]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.55-201201290114.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.55-201201290114.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index e1efc40..47f0daf 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9 +*.cis
10 *.cpio
11 *.csp
12 +*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18 +*.gcno
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *_MODULES
32 +*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36 @@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40 +GPATH
41 +GRTAGS
42 +GSYMS
43 +GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49 +PERF*
50 SCCS
51 System.map*
52 TAGS
53 @@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57 +capability_names.h
58 +capflags.c
59 classlist.h*
60 +clut_vga16.c
61 +common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65 @@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69 +config.c
70 +config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74 @@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78 +gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90 +initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103 +mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110 +mkpiggy
111 mkprep
112 +mkregtable
113 mktables
114 mktree
115 modpost
116 @@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120 +piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124 @@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128 +regdb.c
129 relocs
130 +rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152 +vmlinux.bin.all
153 +vmlinux.bin.bz2
154 vmlinux.lds
155 +vmlinux.relocs
156 +voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zoffset.h
169 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170 index c840e7d..f4c451c 100644
171 --- a/Documentation/kernel-parameters.txt
172 +++ b/Documentation/kernel-parameters.txt
173 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178 + virtualization environments that don't cope well with the
179 + expand down segment used by UDEREF on X86-32 or the frequent
180 + page table updates on X86-64.
181 +
182 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183 +
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187 diff --git a/Makefile b/Makefile
188 index 64d4fc6..3b32f7f 100644
189 --- a/Makefile
190 +++ b/Makefile
191 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196 -HOSTCXXFLAGS = -O2
197 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203 @@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207 -PHONY += scripts_basic
208 -scripts_basic:
209 +PHONY += scripts_basic gcc-plugins
210 +scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214 @@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218 - cscope TAGS tags help %docs check% \
219 + cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223 @@ -526,6 +527,46 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227 +ifndef DISABLE_PAX_PLUGINS
228 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231 +endif
232 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
233 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234 +STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235 +endif
236 +ifdef CONFIG_KALLOCSTAT_PLUGIN
237 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238 +endif
239 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241 +KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
242 +endif
243 +ifdef CONFIG_CHECKER_PLUGIN
244 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
245 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
246 +endif
247 +endif
248 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
249 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
250 +ifeq ($(KBUILD_EXTMOD),)
251 +gcc-plugins:
252 + $(Q)$(MAKE) $(build)=tools/gcc
253 +else
254 +gcc-plugins: ;
255 +endif
256 +else
257 +gcc-plugins:
258 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
259 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
260 +else
261 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
262 +endif
263 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
264 +endif
265 +endif
266 +
267 include $(srctree)/arch/$(SRCARCH)/Makefile
268
269 ifneq ($(CONFIG_FRAME_WARN),0)
270 @@ -647,7 +688,7 @@ export mod_strip_cmd
271
272
273 ifeq ($(KBUILD_EXTMOD),)
274 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
275 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
276
277 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
278 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
279 @@ -868,6 +909,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
280
281 # The actual objects are generated when descending,
282 # make sure no implicit rule kicks in
283 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
284 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
285
286 # Handle descending into subdirectories listed in $(vmlinux-dirs)
287 @@ -877,7 +919,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288 # Error messages still appears in the original language
289
290 PHONY += $(vmlinux-dirs)
291 -$(vmlinux-dirs): prepare scripts
292 +$(vmlinux-dirs): gcc-plugins prepare scripts
293 $(Q)$(MAKE) $(build)=$@
294
295 # Build the kernel release string
296 @@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
297 $(Q)$(MAKE) $(build)=. missing-syscalls
298
299 # All the preparing..
300 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
301 prepare: prepare0
302
303 # The asm symlink changes when $(ARCH) changes.
304 @@ -1127,6 +1170,7 @@ all: modules
305 # using awk while concatenating to the final file.
306
307 PHONY += modules
308 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
309 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
310 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
311 @$(kecho) ' Building modules, stage 2.';
312 @@ -1136,7 +1180,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
313
314 # Target to prepare building external modules
315 PHONY += modules_prepare
316 -modules_prepare: prepare scripts
317 +modules_prepare: gcc-plugins prepare scripts
318
319 # Target to install modules
320 PHONY += modules_install
321 @@ -1201,7 +1245,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
322 include/linux/autoconf.h include/linux/version.h \
323 include/linux/utsrelease.h \
324 include/linux/bounds.h include/asm*/asm-offsets.h \
325 - Module.symvers Module.markers tags TAGS cscope*
326 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
327
328 # clean - Delete most, but leave enough to build external modules
329 #
330 @@ -1245,7 +1289,7 @@ distclean: mrproper
331 @find $(srctree) $(RCS_FIND_IGNORE) \
332 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
333 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
334 - -o -name '.*.rej' -o -size 0 \
335 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
336 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
337 -type f -print | xargs rm -f
338
339 @@ -1292,6 +1336,7 @@ help:
340 @echo ' modules_prepare - Set up for building external modules'
341 @echo ' tags/TAGS - Generate tags file for editors'
342 @echo ' cscope - Generate cscope index'
343 + @echo ' gtags - Generate GNU GLOBAL index'
344 @echo ' kernelrelease - Output the release version string'
345 @echo ' kernelversion - Output the version stored in Makefile'
346 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
347 @@ -1393,6 +1438,7 @@ PHONY += $(module-dirs) modules
348 $(module-dirs): crmodverdir $(objtree)/Module.symvers
349 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
350
351 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
352 modules: $(module-dirs)
353 @$(kecho) ' Building modules, stage 2.';
354 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
355 @@ -1448,7 +1494,7 @@ endif # KBUILD_EXTMOD
356 quiet_cmd_tags = GEN $@
357 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
358
359 -tags TAGS cscope: FORCE
360 +tags TAGS cscope gtags: FORCE
361 $(call cmd,tags)
362
363 # Scripts to check various things for consistency
364 @@ -1513,17 +1559,19 @@ else
365 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
366 endif
367
368 -%.s: %.c prepare scripts FORCE
369 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
370 +%.s: %.c gcc-plugins prepare scripts FORCE
371 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
372 %.i: %.c prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374 -%.o: %.c prepare scripts FORCE
375 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
376 +%.o: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.lst: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380 -%.s: %.S prepare scripts FORCE
381 +%.s: %.S gcc-plugins prepare scripts FORCE
382 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
383 -%.o: %.S prepare scripts FORCE
384 +%.o: %.S gcc-plugins prepare scripts FORCE
385 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
386 %.symtypes: %.c prepare scripts FORCE
387 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
388 @@ -1533,11 +1581,13 @@ endif
389 $(cmd_crmodverdir)
390 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
391 $(build)=$(build-dir)
392 -%/: prepare scripts FORCE
393 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
394 +%/: gcc-plugins prepare scripts FORCE
395 $(cmd_crmodverdir)
396 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
397 $(build)=$(build-dir)
398 -%.ko: prepare scripts FORCE
399 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
400 +%.ko: gcc-plugins prepare scripts FORCE
401 $(cmd_crmodverdir)
402 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
403 $(build)=$(build-dir) $(@:.ko=.o)
404 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
405 index 5c75c1b..c82f878 100644
406 --- a/arch/alpha/include/asm/elf.h
407 +++ b/arch/alpha/include/asm/elf.h
408 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
409
410 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
411
412 +#ifdef CONFIG_PAX_ASLR
413 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
414 +
415 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
416 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
417 +#endif
418 +
419 /* $0 is set by ld.so to a pointer to a function which might be
420 registered using atexit. This provides a mean for the dynamic
421 linker to call DT_FINI functions for shared libraries that have
422 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
423 index 3f0c59f..cf1e100 100644
424 --- a/arch/alpha/include/asm/pgtable.h
425 +++ b/arch/alpha/include/asm/pgtable.h
426 @@ -101,6 +101,17 @@ struct vm_area_struct;
427 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
428 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
429 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
430 +
431 +#ifdef CONFIG_PAX_PAGEEXEC
432 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
433 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
434 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
435 +#else
436 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
437 +# define PAGE_COPY_NOEXEC PAGE_COPY
438 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
439 +#endif
440 +
441 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
442
443 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
444 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
445 index ebc3c89..20cfa63 100644
446 --- a/arch/alpha/kernel/module.c
447 +++ b/arch/alpha/kernel/module.c
448 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
449
450 /* The small sections were sorted to the end of the segment.
451 The following should definitely cover them. */
452 - gp = (u64)me->module_core + me->core_size - 0x8000;
453 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
454 got = sechdrs[me->arch.gotsecindex].sh_addr;
455
456 for (i = 0; i < n; i++) {
457 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
458 index a94e49c..d71dd44 100644
459 --- a/arch/alpha/kernel/osf_sys.c
460 +++ b/arch/alpha/kernel/osf_sys.c
461 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
462 /* At this point: (!vma || addr < vma->vm_end). */
463 if (limit - len < addr)
464 return -ENOMEM;
465 - if (!vma || addr + len <= vma->vm_start)
466 + if (check_heap_stack_gap(vma, addr, len))
467 return addr;
468 addr = vma->vm_end;
469 vma = vma->vm_next;
470 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
471 merely specific addresses, but regions of memory -- perhaps
472 this feature should be incorporated into all ports? */
473
474 +#ifdef CONFIG_PAX_RANDMMAP
475 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
476 +#endif
477 +
478 if (addr) {
479 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
480 if (addr != (unsigned long) -ENOMEM)
481 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
482 }
483
484 /* Next, try allocating at TASK_UNMAPPED_BASE. */
485 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
486 - len, limit);
487 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
488 +
489 if (addr != (unsigned long) -ENOMEM)
490 return addr;
491
492 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
493 index 00a31de..2ded0f2 100644
494 --- a/arch/alpha/mm/fault.c
495 +++ b/arch/alpha/mm/fault.c
496 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
497 __reload_thread(pcb);
498 }
499
500 +#ifdef CONFIG_PAX_PAGEEXEC
501 +/*
502 + * PaX: decide what to do with offenders (regs->pc = fault address)
503 + *
504 + * returns 1 when task should be killed
505 + * 2 when patched PLT trampoline was detected
506 + * 3 when unpatched PLT trampoline was detected
507 + */
508 +static int pax_handle_fetch_fault(struct pt_regs *regs)
509 +{
510 +
511 +#ifdef CONFIG_PAX_EMUPLT
512 + int err;
513 +
514 + do { /* PaX: patched PLT emulation #1 */
515 + unsigned int ldah, ldq, jmp;
516 +
517 + err = get_user(ldah, (unsigned int *)regs->pc);
518 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
519 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
520 +
521 + if (err)
522 + break;
523 +
524 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
525 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
526 + jmp == 0x6BFB0000U)
527 + {
528 + unsigned long r27, addr;
529 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
530 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
531 +
532 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
533 + err = get_user(r27, (unsigned long *)addr);
534 + if (err)
535 + break;
536 +
537 + regs->r27 = r27;
538 + regs->pc = r27;
539 + return 2;
540 + }
541 + } while (0);
542 +
543 + do { /* PaX: patched PLT emulation #2 */
544 + unsigned int ldah, lda, br;
545 +
546 + err = get_user(ldah, (unsigned int *)regs->pc);
547 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
548 + err |= get_user(br, (unsigned int *)(regs->pc+8));
549 +
550 + if (err)
551 + break;
552 +
553 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
554 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
555 + (br & 0xFFE00000U) == 0xC3E00000U)
556 + {
557 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
558 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
559 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
560 +
561 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
562 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
563 + return 2;
564 + }
565 + } while (0);
566 +
567 + do { /* PaX: unpatched PLT emulation */
568 + unsigned int br;
569 +
570 + err = get_user(br, (unsigned int *)regs->pc);
571 +
572 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
573 + unsigned int br2, ldq, nop, jmp;
574 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
575 +
576 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
577 + err = get_user(br2, (unsigned int *)addr);
578 + err |= get_user(ldq, (unsigned int *)(addr+4));
579 + err |= get_user(nop, (unsigned int *)(addr+8));
580 + err |= get_user(jmp, (unsigned int *)(addr+12));
581 + err |= get_user(resolver, (unsigned long *)(addr+16));
582 +
583 + if (err)
584 + break;
585 +
586 + if (br2 == 0xC3600000U &&
587 + ldq == 0xA77B000CU &&
588 + nop == 0x47FF041FU &&
589 + jmp == 0x6B7B0000U)
590 + {
591 + regs->r28 = regs->pc+4;
592 + regs->r27 = addr+16;
593 + regs->pc = resolver;
594 + return 3;
595 + }
596 + }
597 + } while (0);
598 +#endif
599 +
600 + return 1;
601 +}
602 +
603 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
604 +{
605 + unsigned long i;
606 +
607 + printk(KERN_ERR "PAX: bytes at PC: ");
608 + for (i = 0; i < 5; i++) {
609 + unsigned int c;
610 + if (get_user(c, (unsigned int *)pc+i))
611 + printk(KERN_CONT "???????? ");
612 + else
613 + printk(KERN_CONT "%08x ", c);
614 + }
615 + printk("\n");
616 +}
617 +#endif
618
619 /*
620 * This routine handles page faults. It determines the address,
621 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
622 good_area:
623 si_code = SEGV_ACCERR;
624 if (cause < 0) {
625 - if (!(vma->vm_flags & VM_EXEC))
626 + if (!(vma->vm_flags & VM_EXEC)) {
627 +
628 +#ifdef CONFIG_PAX_PAGEEXEC
629 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
630 + goto bad_area;
631 +
632 + up_read(&mm->mmap_sem);
633 + switch (pax_handle_fetch_fault(regs)) {
634 +
635 +#ifdef CONFIG_PAX_EMUPLT
636 + case 2:
637 + case 3:
638 + return;
639 +#endif
640 +
641 + }
642 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
643 + do_group_exit(SIGKILL);
644 +#else
645 goto bad_area;
646 +#endif
647 +
648 + }
649 } else if (!cause) {
650 /* Allow reads even for write-only mappings */
651 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
652 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
653 index 6aac3f5..265536b 100644
654 --- a/arch/arm/include/asm/elf.h
655 +++ b/arch/arm/include/asm/elf.h
656 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 the loader. We need to make sure that it is out of the way of the program
658 that it will "exec", and that there is sufficient room for the brk. */
659
660 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
661 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
662 +
663 +#ifdef CONFIG_PAX_ASLR
664 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
665 +
666 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
667 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
668 +#endif
669
670 /* When the program starts, a1 contains a pointer to a function to be
671 registered with atexit, as per the SVR4 ABI. A value of 0 means we
672 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
673 index c019949..388fdd1 100644
674 --- a/arch/arm/include/asm/kmap_types.h
675 +++ b/arch/arm/include/asm/kmap_types.h
676 @@ -19,6 +19,7 @@ enum km_type {
677 KM_SOFTIRQ0,
678 KM_SOFTIRQ1,
679 KM_L2_CACHE,
680 + KM_CLEARPAGE,
681 KM_TYPE_NR
682 };
683
684 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
685 index 1d6bd40..fba0cb9 100644
686 --- a/arch/arm/include/asm/uaccess.h
687 +++ b/arch/arm/include/asm/uaccess.h
688 @@ -22,6 +22,8 @@
689 #define VERIFY_READ 0
690 #define VERIFY_WRITE 1
691
692 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
693 +
694 /*
695 * The exception table consists of pairs of addresses: the first is the
696 * address of an instruction that is allowed to fault, and the second is
697 @@ -387,8 +389,23 @@ do { \
698
699
700 #ifdef CONFIG_MMU
701 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
702 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
703 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
704 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
705 +
706 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
707 +{
708 + if (!__builtin_constant_p(n))
709 + check_object_size(to, n, false);
710 + return ___copy_from_user(to, from, n);
711 +}
712 +
713 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
714 +{
715 + if (!__builtin_constant_p(n))
716 + check_object_size(from, n, true);
717 + return ___copy_to_user(to, from, n);
718 +}
719 +
720 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
721 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
722 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
723 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
724
725 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
726 {
727 + if ((long)n < 0)
728 + return n;
729 +
730 if (access_ok(VERIFY_READ, from, n))
731 n = __copy_from_user(to, from, n);
732 else /* security hole - plug it */
733 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
734
735 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
736 {
737 + if ((long)n < 0)
738 + return n;
739 +
740 if (access_ok(VERIFY_WRITE, to, n))
741 n = __copy_to_user(to, from, n);
742 return n;
743 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
744 index 0e62770..e2c2cd6 100644
745 --- a/arch/arm/kernel/armksyms.c
746 +++ b/arch/arm/kernel/armksyms.c
747 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
748 #ifdef CONFIG_MMU
749 EXPORT_SYMBOL(copy_page);
750
751 -EXPORT_SYMBOL(__copy_from_user);
752 -EXPORT_SYMBOL(__copy_to_user);
753 +EXPORT_SYMBOL(___copy_from_user);
754 +EXPORT_SYMBOL(___copy_to_user);
755 EXPORT_SYMBOL(__clear_user);
756
757 EXPORT_SYMBOL(__get_user_1);
758 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
759 index ba8ccfe..2dc34dc 100644
760 --- a/arch/arm/kernel/kgdb.c
761 +++ b/arch/arm/kernel/kgdb.c
762 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
763 * and we handle the normal undef case within the do_undefinstr
764 * handler.
765 */
766 -struct kgdb_arch arch_kgdb_ops = {
767 +const struct kgdb_arch arch_kgdb_ops = {
768 #ifndef __ARMEB__
769 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
770 #else /* ! __ARMEB__ */
771 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
772 index 3f361a7..6e806e1 100644
773 --- a/arch/arm/kernel/traps.c
774 +++ b/arch/arm/kernel/traps.c
775 @@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
776
777 DEFINE_SPINLOCK(die_lock);
778
779 +extern void gr_handle_kernel_exploit(void);
780 +
781 /*
782 * This function is protected against re-entrancy.
783 */
784 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
785 if (panic_on_oops)
786 panic("Fatal exception");
787
788 + gr_handle_kernel_exploit();
789 +
790 do_exit(SIGSEGV);
791 }
792
793 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
794 index e4fe124..0fc246b 100644
795 --- a/arch/arm/lib/copy_from_user.S
796 +++ b/arch/arm/lib/copy_from_user.S
797 @@ -16,7 +16,7 @@
798 /*
799 * Prototype:
800 *
801 - * size_t __copy_from_user(void *to, const void *from, size_t n)
802 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
803 *
804 * Purpose:
805 *
806 @@ -84,11 +84,11 @@
807
808 .text
809
810 -ENTRY(__copy_from_user)
811 +ENTRY(___copy_from_user)
812
813 #include "copy_template.S"
814
815 -ENDPROC(__copy_from_user)
816 +ENDPROC(___copy_from_user)
817
818 .section .fixup,"ax"
819 .align 0
820 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
821 index 1a71e15..ac7b258 100644
822 --- a/arch/arm/lib/copy_to_user.S
823 +++ b/arch/arm/lib/copy_to_user.S
824 @@ -16,7 +16,7 @@
825 /*
826 * Prototype:
827 *
828 - * size_t __copy_to_user(void *to, const void *from, size_t n)
829 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
830 *
831 * Purpose:
832 *
833 @@ -88,11 +88,11 @@
834 .text
835
836 ENTRY(__copy_to_user_std)
837 -WEAK(__copy_to_user)
838 +WEAK(___copy_to_user)
839
840 #include "copy_template.S"
841
842 -ENDPROC(__copy_to_user)
843 +ENDPROC(___copy_to_user)
844
845 .section .fixup,"ax"
846 .align 0
847 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
848 index ffdd274..91017b6 100644
849 --- a/arch/arm/lib/uaccess.S
850 +++ b/arch/arm/lib/uaccess.S
851 @@ -19,7 +19,7 @@
852
853 #define PAGE_SHIFT 12
854
855 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
856 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
857 * Purpose : copy a block to user memory from kernel memory
858 * Params : to - user memory
859 * : from - kernel memory
860 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
861 sub r2, r2, ip
862 b .Lc2u_dest_aligned
863
864 -ENTRY(__copy_to_user)
865 +ENTRY(___copy_to_user)
866 stmfd sp!, {r2, r4 - r7, lr}
867 cmp r2, #4
868 blt .Lc2u_not_enough
869 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
870 ldrgtb r3, [r1], #0
871 USER( strgtbt r3, [r0], #1) @ May fault
872 b .Lc2u_finished
873 -ENDPROC(__copy_to_user)
874 +ENDPROC(___copy_to_user)
875
876 .section .fixup,"ax"
877 .align 0
878 9001: ldmfd sp!, {r0, r4 - r7, pc}
879 .previous
880
881 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
882 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
883 * Purpose : copy a block from user memory to kernel memory
884 * Params : to - kernel memory
885 * : from - user memory
886 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
887 sub r2, r2, ip
888 b .Lcfu_dest_aligned
889
890 -ENTRY(__copy_from_user)
891 +ENTRY(___copy_from_user)
892 stmfd sp!, {r0, r2, r4 - r7, lr}
893 cmp r2, #4
894 blt .Lcfu_not_enough
895 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
896 USER( ldrgtbt r3, [r1], #1) @ May fault
897 strgtb r3, [r0], #1
898 b .Lcfu_finished
899 -ENDPROC(__copy_from_user)
900 +ENDPROC(___copy_from_user)
901
902 .section .fixup,"ax"
903 .align 0
904 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
905 index 6b967ff..67d5b2b 100644
906 --- a/arch/arm/lib/uaccess_with_memcpy.c
907 +++ b/arch/arm/lib/uaccess_with_memcpy.c
908 @@ -97,7 +97,7 @@ out:
909 }
910
911 unsigned long
912 -__copy_to_user(void __user *to, const void *from, unsigned long n)
913 +___copy_to_user(void __user *to, const void *from, unsigned long n)
914 {
915 /*
916 * This test is stubbed out of the main function above to keep
917 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
918 index 4028724..beec230 100644
919 --- a/arch/arm/mach-at91/pm.c
920 +++ b/arch/arm/mach-at91/pm.c
921 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
922 }
923
924
925 -static struct platform_suspend_ops at91_pm_ops ={
926 +static const struct platform_suspend_ops at91_pm_ops ={
927 .valid = at91_pm_valid_state,
928 .begin = at91_pm_begin,
929 .enter = at91_pm_enter,
930 diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
931 index 5218943..0a34552 100644
932 --- a/arch/arm/mach-omap1/pm.c
933 +++ b/arch/arm/mach-omap1/pm.c
934 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
935
936
937
938 -static struct platform_suspend_ops omap_pm_ops ={
939 +static const struct platform_suspend_ops omap_pm_ops ={
940 .prepare = omap_pm_prepare,
941 .enter = omap_pm_enter,
942 .finish = omap_pm_finish,
943 diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
944 index bff5c4e..d4c649b 100644
945 --- a/arch/arm/mach-omap2/pm24xx.c
946 +++ b/arch/arm/mach-omap2/pm24xx.c
947 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
948 enable_hlt();
949 }
950
951 -static struct platform_suspend_ops omap_pm_ops = {
952 +static const struct platform_suspend_ops omap_pm_ops = {
953 .prepare = omap2_pm_prepare,
954 .enter = omap2_pm_enter,
955 .finish = omap2_pm_finish,
956 diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
957 index 8946319..7d3e661 100644
958 --- a/arch/arm/mach-omap2/pm34xx.c
959 +++ b/arch/arm/mach-omap2/pm34xx.c
960 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
961 return;
962 }
963
964 -static struct platform_suspend_ops omap_pm_ops = {
965 +static const struct platform_suspend_ops omap_pm_ops = {
966 .begin = omap3_pm_begin,
967 .end = omap3_pm_end,
968 .prepare = omap3_pm_prepare,
969 diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
970 index b3d8d53..6e68ebc 100644
971 --- a/arch/arm/mach-pnx4008/pm.c
972 +++ b/arch/arm/mach-pnx4008/pm.c
973 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
974 (state == PM_SUSPEND_MEM);
975 }
976
977 -static struct platform_suspend_ops pnx4008_pm_ops = {
978 +static const struct platform_suspend_ops pnx4008_pm_ops = {
979 .enter = pnx4008_pm_enter,
980 .valid = pnx4008_pm_valid,
981 };
982 diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
983 index 7693355..9beb00a 100644
984 --- a/arch/arm/mach-pxa/pm.c
985 +++ b/arch/arm/mach-pxa/pm.c
986 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
987 pxa_cpu_pm_fns->finish();
988 }
989
990 -static struct platform_suspend_ops pxa_pm_ops = {
991 +static const struct platform_suspend_ops pxa_pm_ops = {
992 .valid = pxa_pm_valid,
993 .enter = pxa_pm_enter,
994 .prepare = pxa_pm_prepare,
995 diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
996 index 629e05d..06be589 100644
997 --- a/arch/arm/mach-pxa/sharpsl_pm.c
998 +++ b/arch/arm/mach-pxa/sharpsl_pm.c
999 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1000 }
1001
1002 #ifdef CONFIG_PM
1003 -static struct platform_suspend_ops sharpsl_pm_ops = {
1004 +static const struct platform_suspend_ops sharpsl_pm_ops = {
1005 .prepare = pxa_pm_prepare,
1006 .finish = pxa_pm_finish,
1007 .enter = corgi_pxa_pm_enter,
1008 diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1009 index c83fdc8..ab9fc44 100644
1010 --- a/arch/arm/mach-sa1100/pm.c
1011 +++ b/arch/arm/mach-sa1100/pm.c
1012 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1013 return virt_to_phys(sp);
1014 }
1015
1016 -static struct platform_suspend_ops sa11x0_pm_ops = {
1017 +static const struct platform_suspend_ops sa11x0_pm_ops = {
1018 .enter = sa11x0_pm_enter,
1019 .valid = suspend_valid_only_mem,
1020 };
1021 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1022 index 3191cd6..c0739db 100644
1023 --- a/arch/arm/mm/fault.c
1024 +++ b/arch/arm/mm/fault.c
1025 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1026 }
1027 #endif
1028
1029 +#ifdef CONFIG_PAX_PAGEEXEC
1030 + if (fsr & FSR_LNX_PF) {
1031 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1032 + do_group_exit(SIGKILL);
1033 + }
1034 +#endif
1035 +
1036 tsk->thread.address = addr;
1037 tsk->thread.error_code = fsr;
1038 tsk->thread.trap_no = 14;
1039 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1040 }
1041 #endif /* CONFIG_MMU */
1042
1043 +#ifdef CONFIG_PAX_PAGEEXEC
1044 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1045 +{
1046 + long i;
1047 +
1048 + printk(KERN_ERR "PAX: bytes at PC: ");
1049 + for (i = 0; i < 20; i++) {
1050 + unsigned char c;
1051 + if (get_user(c, (__force unsigned char __user *)pc+i))
1052 + printk(KERN_CONT "?? ");
1053 + else
1054 + printk(KERN_CONT "%02x ", c);
1055 + }
1056 + printk("\n");
1057 +
1058 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1059 + for (i = -1; i < 20; i++) {
1060 + unsigned long c;
1061 + if (get_user(c, (__force unsigned long __user *)sp+i))
1062 + printk(KERN_CONT "???????? ");
1063 + else
1064 + printk(KERN_CONT "%08lx ", c);
1065 + }
1066 + printk("\n");
1067 +}
1068 +#endif
1069 +
1070 /*
1071 * First Level Translation Fault Handler
1072 *
1073 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1074 index f5abc51..7ec524c 100644
1075 --- a/arch/arm/mm/mmap.c
1076 +++ b/arch/arm/mm/mmap.c
1077 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1078 if (len > TASK_SIZE)
1079 return -ENOMEM;
1080
1081 +#ifdef CONFIG_PAX_RANDMMAP
1082 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1083 +#endif
1084 +
1085 if (addr) {
1086 if (do_align)
1087 addr = COLOUR_ALIGN(addr, pgoff);
1088 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1089 addr = PAGE_ALIGN(addr);
1090
1091 vma = find_vma(mm, addr);
1092 - if (TASK_SIZE - len >= addr &&
1093 - (!vma || addr + len <= vma->vm_start))
1094 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1095 return addr;
1096 }
1097 if (len > mm->cached_hole_size) {
1098 - start_addr = addr = mm->free_area_cache;
1099 + start_addr = addr = mm->free_area_cache;
1100 } else {
1101 - start_addr = addr = TASK_UNMAPPED_BASE;
1102 - mm->cached_hole_size = 0;
1103 + start_addr = addr = mm->mmap_base;
1104 + mm->cached_hole_size = 0;
1105 }
1106
1107 full_search:
1108 @@ -94,14 +97,14 @@ full_search:
1109 * Start a new search - just in case we missed
1110 * some holes.
1111 */
1112 - if (start_addr != TASK_UNMAPPED_BASE) {
1113 - start_addr = addr = TASK_UNMAPPED_BASE;
1114 + if (start_addr != mm->mmap_base) {
1115 + start_addr = addr = mm->mmap_base;
1116 mm->cached_hole_size = 0;
1117 goto full_search;
1118 }
1119 return -ENOMEM;
1120 }
1121 - if (!vma || addr + len <= vma->vm_start) {
1122 + if (check_heap_stack_gap(vma, addr, len)) {
1123 /*
1124 * Remember the place where we stopped the search:
1125 */
1126 diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1127 index 8d97db2..b66cfa5 100644
1128 --- a/arch/arm/plat-s3c/pm.c
1129 +++ b/arch/arm/plat-s3c/pm.c
1130 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1131 s3c_pm_check_cleanup();
1132 }
1133
1134 -static struct platform_suspend_ops s3c_pm_ops = {
1135 +static const struct platform_suspend_ops s3c_pm_ops = {
1136 .enter = s3c_pm_enter,
1137 .prepare = s3c_pm_prepare,
1138 .finish = s3c_pm_finish,
1139 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1140 index d5d1d41..856e2ed 100644
1141 --- a/arch/avr32/include/asm/elf.h
1142 +++ b/arch/avr32/include/asm/elf.h
1143 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1144 the loader. We need to make sure that it is out of the way of the program
1145 that it will "exec", and that there is sufficient room for the brk. */
1146
1147 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1148 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1149
1150 +#ifdef CONFIG_PAX_ASLR
1151 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1152 +
1153 +#define PAX_DELTA_MMAP_LEN 15
1154 +#define PAX_DELTA_STACK_LEN 15
1155 +#endif
1156
1157 /* This yields a mask that user programs can use to figure out what
1158 instruction set this CPU supports. This could be done in user space,
1159 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1160 index b7f5c68..556135c 100644
1161 --- a/arch/avr32/include/asm/kmap_types.h
1162 +++ b/arch/avr32/include/asm/kmap_types.h
1163 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1164 D(11) KM_IRQ1,
1165 D(12) KM_SOFTIRQ0,
1166 D(13) KM_SOFTIRQ1,
1167 -D(14) KM_TYPE_NR
1168 +D(14) KM_CLEARPAGE,
1169 +D(15) KM_TYPE_NR
1170 };
1171
1172 #undef D
1173 diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1174 index f021edf..32d680e 100644
1175 --- a/arch/avr32/mach-at32ap/pm.c
1176 +++ b/arch/avr32/mach-at32ap/pm.c
1177 @@ -176,7 +176,7 @@ out:
1178 return 0;
1179 }
1180
1181 -static struct platform_suspend_ops avr32_pm_ops = {
1182 +static const struct platform_suspend_ops avr32_pm_ops = {
1183 .valid = avr32_pm_valid_state,
1184 .enter = avr32_pm_enter,
1185 };
1186 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1187 index b61d86d..e292c7f 100644
1188 --- a/arch/avr32/mm/fault.c
1189 +++ b/arch/avr32/mm/fault.c
1190 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1191
1192 int exception_trace = 1;
1193
1194 +#ifdef CONFIG_PAX_PAGEEXEC
1195 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1196 +{
1197 + unsigned long i;
1198 +
1199 + printk(KERN_ERR "PAX: bytes at PC: ");
1200 + for (i = 0; i < 20; i++) {
1201 + unsigned char c;
1202 + if (get_user(c, (unsigned char *)pc+i))
1203 + printk(KERN_CONT "???????? ");
1204 + else
1205 + printk(KERN_CONT "%02x ", c);
1206 + }
1207 + printk("\n");
1208 +}
1209 +#endif
1210 +
1211 /*
1212 * This routine handles page faults. It determines the address and the
1213 * problem, and then passes it off to one of the appropriate routines.
1214 @@ -157,6 +174,16 @@ bad_area:
1215 up_read(&mm->mmap_sem);
1216
1217 if (user_mode(regs)) {
1218 +
1219 +#ifdef CONFIG_PAX_PAGEEXEC
1220 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1221 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1222 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1223 + do_group_exit(SIGKILL);
1224 + }
1225 + }
1226 +#endif
1227 +
1228 if (exception_trace && printk_ratelimit())
1229 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1230 "sp %08lx ecr %lu\n",
1231 diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1232 index cce79d0..c406c85 100644
1233 --- a/arch/blackfin/kernel/kgdb.c
1234 +++ b/arch/blackfin/kernel/kgdb.c
1235 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1236 return -1; /* this means that we do not want to exit from the handler */
1237 }
1238
1239 -struct kgdb_arch arch_kgdb_ops = {
1240 +const struct kgdb_arch arch_kgdb_ops = {
1241 .gdb_bpt_instr = {0xa1},
1242 #ifdef CONFIG_SMP
1243 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1244 diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1245 index 8837be4..b2fb413 100644
1246 --- a/arch/blackfin/mach-common/pm.c
1247 +++ b/arch/blackfin/mach-common/pm.c
1248 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1249 return 0;
1250 }
1251
1252 -struct platform_suspend_ops bfin_pm_ops = {
1253 +const struct platform_suspend_ops bfin_pm_ops = {
1254 .enter = bfin_pm_enter,
1255 .valid = bfin_pm_valid,
1256 };
1257 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1258 index f8e16b2..c73ff79 100644
1259 --- a/arch/frv/include/asm/kmap_types.h
1260 +++ b/arch/frv/include/asm/kmap_types.h
1261 @@ -23,6 +23,7 @@ enum km_type {
1262 KM_IRQ1,
1263 KM_SOFTIRQ0,
1264 KM_SOFTIRQ1,
1265 + KM_CLEARPAGE,
1266 KM_TYPE_NR
1267 };
1268
1269 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1270 index 385fd30..6c3d97e 100644
1271 --- a/arch/frv/mm/elf-fdpic.c
1272 +++ b/arch/frv/mm/elf-fdpic.c
1273 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1274 if (addr) {
1275 addr = PAGE_ALIGN(addr);
1276 vma = find_vma(current->mm, addr);
1277 - if (TASK_SIZE - len >= addr &&
1278 - (!vma || addr + len <= vma->vm_start))
1279 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1280 goto success;
1281 }
1282
1283 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1284 for (; vma; vma = vma->vm_next) {
1285 if (addr > limit)
1286 break;
1287 - if (addr + len <= vma->vm_start)
1288 + if (check_heap_stack_gap(vma, addr, len))
1289 goto success;
1290 addr = vma->vm_end;
1291 }
1292 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1293 for (; vma; vma = vma->vm_next) {
1294 if (addr > limit)
1295 break;
1296 - if (addr + len <= vma->vm_start)
1297 + if (check_heap_stack_gap(vma, addr, len))
1298 goto success;
1299 addr = vma->vm_end;
1300 }
1301 diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1302 index e4a80d8..11a7ea1 100644
1303 --- a/arch/ia64/hp/common/hwsw_iommu.c
1304 +++ b/arch/ia64/hp/common/hwsw_iommu.c
1305 @@ -17,7 +17,7 @@
1306 #include <linux/swiotlb.h>
1307 #include <asm/machvec.h>
1308
1309 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1310 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1311
1312 /* swiotlb declarations & definitions: */
1313 extern int swiotlb_late_init_with_default_size (size_t size);
1314 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1315 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1316 }
1317
1318 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1319 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1320 {
1321 if (use_swiotlb(dev))
1322 return &swiotlb_dma_ops;
1323 diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1324 index 01ae69b..35752fd 100644
1325 --- a/arch/ia64/hp/common/sba_iommu.c
1326 +++ b/arch/ia64/hp/common/sba_iommu.c
1327 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1328 },
1329 };
1330
1331 -extern struct dma_map_ops swiotlb_dma_ops;
1332 +extern const struct dma_map_ops swiotlb_dma_ops;
1333
1334 static int __init
1335 sba_init(void)
1336 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1337
1338 __setup("sbapagesize=",sba_page_override);
1339
1340 -struct dma_map_ops sba_dma_ops = {
1341 +const struct dma_map_ops sba_dma_ops = {
1342 .alloc_coherent = sba_alloc_coherent,
1343 .free_coherent = sba_free_coherent,
1344 .map_page = sba_map_page,
1345 diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1346 index c69552b..c7122f4 100644
1347 --- a/arch/ia64/ia32/binfmt_elf32.c
1348 +++ b/arch/ia64/ia32/binfmt_elf32.c
1349 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1350
1351 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1352
1353 +#ifdef CONFIG_PAX_ASLR
1354 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1355 +
1356 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1357 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1358 +#endif
1359 +
1360 /* Ugly but avoids duplication */
1361 #include "../../../fs/binfmt_elf.c"
1362
1363 diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1364 index 0f15349..26b3429 100644
1365 --- a/arch/ia64/ia32/ia32priv.h
1366 +++ b/arch/ia64/ia32/ia32priv.h
1367 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1368 #define ELF_DATA ELFDATA2LSB
1369 #define ELF_ARCH EM_386
1370
1371 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
1372 +#ifdef CONFIG_PAX_RANDUSTACK
1373 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
1374 +#else
1375 +#define __IA32_DELTA_STACK 0UL
1376 +#endif
1377 +
1378 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1379 +
1380 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1381 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1382
1383 diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1384 index 8d3c79c..71b3af6 100644
1385 --- a/arch/ia64/include/asm/dma-mapping.h
1386 +++ b/arch/ia64/include/asm/dma-mapping.h
1387 @@ -12,7 +12,7 @@
1388
1389 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1390
1391 -extern struct dma_map_ops *dma_ops;
1392 +extern const struct dma_map_ops *dma_ops;
1393 extern struct ia64_machine_vector ia64_mv;
1394 extern void set_iommu_machvec(void);
1395
1396 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1397 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1398 dma_addr_t *daddr, gfp_t gfp)
1399 {
1400 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1401 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1402 void *caddr;
1403
1404 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1405 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1406 static inline void dma_free_coherent(struct device *dev, size_t size,
1407 void *caddr, dma_addr_t daddr)
1408 {
1409 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1410 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1411 debug_dma_free_coherent(dev, size, caddr, daddr);
1412 ops->free_coherent(dev, size, caddr, daddr);
1413 }
1414 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1415
1416 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1417 {
1418 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1419 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1420 return ops->mapping_error(dev, daddr);
1421 }
1422
1423 static inline int dma_supported(struct device *dev, u64 mask)
1424 {
1425 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1426 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1427 return ops->dma_supported(dev, mask);
1428 }
1429
1430 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1431 index 86eddee..b116bb4 100644
1432 --- a/arch/ia64/include/asm/elf.h
1433 +++ b/arch/ia64/include/asm/elf.h
1434 @@ -43,6 +43,13 @@
1435 */
1436 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1437
1438 +#ifdef CONFIG_PAX_ASLR
1439 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1440 +
1441 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1442 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1443 +#endif
1444 +
1445 #define PT_IA_64_UNWIND 0x70000001
1446
1447 /* IA-64 relocations: */
1448 diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1449 index 367d299..9ad4279 100644
1450 --- a/arch/ia64/include/asm/machvec.h
1451 +++ b/arch/ia64/include/asm/machvec.h
1452 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1453 /* DMA-mapping interface: */
1454 typedef void ia64_mv_dma_init (void);
1455 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1456 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1457 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1458
1459 /*
1460 * WARNING: The legacy I/O space is _architected_. Platforms are
1461 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1462 # endif /* CONFIG_IA64_GENERIC */
1463
1464 extern void swiotlb_dma_init(void);
1465 -extern struct dma_map_ops *dma_get_ops(struct device *);
1466 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1467
1468 /*
1469 * Define default versions so we can extend machvec for new platforms without having
1470 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1471 index 8840a69..cdb63d9 100644
1472 --- a/arch/ia64/include/asm/pgtable.h
1473 +++ b/arch/ia64/include/asm/pgtable.h
1474 @@ -12,7 +12,7 @@
1475 * David Mosberger-Tang <davidm@hpl.hp.com>
1476 */
1477
1478 -
1479 +#include <linux/const.h>
1480 #include <asm/mman.h>
1481 #include <asm/page.h>
1482 #include <asm/processor.h>
1483 @@ -143,6 +143,17 @@
1484 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1485 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1486 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1487 +
1488 +#ifdef CONFIG_PAX_PAGEEXEC
1489 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1490 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1491 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1492 +#else
1493 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1494 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1495 +# define PAGE_COPY_NOEXEC PAGE_COPY
1496 +#endif
1497 +
1498 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1499 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1500 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1501 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1502 index 239ecdc..f94170e 100644
1503 --- a/arch/ia64/include/asm/spinlock.h
1504 +++ b/arch/ia64/include/asm/spinlock.h
1505 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1506 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1507
1508 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1509 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1510 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1511 }
1512
1513 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1514 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1515 index 449c8c0..432a3d2 100644
1516 --- a/arch/ia64/include/asm/uaccess.h
1517 +++ b/arch/ia64/include/asm/uaccess.h
1518 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1519 const void *__cu_from = (from); \
1520 long __cu_len = (n); \
1521 \
1522 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1523 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1524 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1525 __cu_len; \
1526 })
1527 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1528 long __cu_len = (n); \
1529 \
1530 __chk_user_ptr(__cu_from); \
1531 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1532 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1533 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1534 __cu_len; \
1535 })
1536 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1537 index f2c1600..969398a 100644
1538 --- a/arch/ia64/kernel/dma-mapping.c
1539 +++ b/arch/ia64/kernel/dma-mapping.c
1540 @@ -3,7 +3,7 @@
1541 /* Set this to 1 if there is a HW IOMMU in the system */
1542 int iommu_detected __read_mostly;
1543
1544 -struct dma_map_ops *dma_ops;
1545 +const struct dma_map_ops *dma_ops;
1546 EXPORT_SYMBOL(dma_ops);
1547
1548 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1549 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1550 }
1551 fs_initcall(dma_init);
1552
1553 -struct dma_map_ops *dma_get_ops(struct device *dev)
1554 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1555 {
1556 return dma_ops;
1557 }
1558 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1559 index 1481b0a..e7d38ff 100644
1560 --- a/arch/ia64/kernel/module.c
1561 +++ b/arch/ia64/kernel/module.c
1562 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1563 void
1564 module_free (struct module *mod, void *module_region)
1565 {
1566 - if (mod && mod->arch.init_unw_table &&
1567 - module_region == mod->module_init) {
1568 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1569 unw_remove_unwind_table(mod->arch.init_unw_table);
1570 mod->arch.init_unw_table = NULL;
1571 }
1572 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1573 }
1574
1575 static inline int
1576 +in_init_rx (const struct module *mod, uint64_t addr)
1577 +{
1578 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1579 +}
1580 +
1581 +static inline int
1582 +in_init_rw (const struct module *mod, uint64_t addr)
1583 +{
1584 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1585 +}
1586 +
1587 +static inline int
1588 in_init (const struct module *mod, uint64_t addr)
1589 {
1590 - return addr - (uint64_t) mod->module_init < mod->init_size;
1591 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1592 +}
1593 +
1594 +static inline int
1595 +in_core_rx (const struct module *mod, uint64_t addr)
1596 +{
1597 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1598 +}
1599 +
1600 +static inline int
1601 +in_core_rw (const struct module *mod, uint64_t addr)
1602 +{
1603 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1604 }
1605
1606 static inline int
1607 in_core (const struct module *mod, uint64_t addr)
1608 {
1609 - return addr - (uint64_t) mod->module_core < mod->core_size;
1610 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1611 }
1612
1613 static inline int
1614 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1615 break;
1616
1617 case RV_BDREL:
1618 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1619 + if (in_init_rx(mod, val))
1620 + val -= (uint64_t) mod->module_init_rx;
1621 + else if (in_init_rw(mod, val))
1622 + val -= (uint64_t) mod->module_init_rw;
1623 + else if (in_core_rx(mod, val))
1624 + val -= (uint64_t) mod->module_core_rx;
1625 + else if (in_core_rw(mod, val))
1626 + val -= (uint64_t) mod->module_core_rw;
1627 break;
1628
1629 case RV_LTV:
1630 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1631 * addresses have been selected...
1632 */
1633 uint64_t gp;
1634 - if (mod->core_size > MAX_LTOFF)
1635 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1636 /*
1637 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1638 * at the end of the module.
1639 */
1640 - gp = mod->core_size - MAX_LTOFF / 2;
1641 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1642 else
1643 - gp = mod->core_size / 2;
1644 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1645 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1646 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1647 mod->arch.gp = gp;
1648 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1649 }
1650 diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1651 index f6b1ff0..de773fb 100644
1652 --- a/arch/ia64/kernel/pci-dma.c
1653 +++ b/arch/ia64/kernel/pci-dma.c
1654 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1655 .dma_mask = &fallback_dev.coherent_dma_mask,
1656 };
1657
1658 -extern struct dma_map_ops intel_dma_ops;
1659 +extern const struct dma_map_ops intel_dma_ops;
1660
1661 static int __init pci_iommu_init(void)
1662 {
1663 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1664 }
1665 EXPORT_SYMBOL(iommu_dma_supported);
1666
1667 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1668 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1669 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1670 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1671 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1672 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1673 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1674 +
1675 +static const struct dma_map_ops intel_iommu_dma_ops = {
1676 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1677 + .alloc_coherent = intel_alloc_coherent,
1678 + .free_coherent = intel_free_coherent,
1679 + .map_sg = intel_map_sg,
1680 + .unmap_sg = intel_unmap_sg,
1681 + .map_page = intel_map_page,
1682 + .unmap_page = intel_unmap_page,
1683 + .mapping_error = intel_mapping_error,
1684 +
1685 + .sync_single_for_cpu = machvec_dma_sync_single,
1686 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1687 + .sync_single_for_device = machvec_dma_sync_single,
1688 + .sync_sg_for_device = machvec_dma_sync_sg,
1689 + .dma_supported = iommu_dma_supported,
1690 +};
1691 +
1692 void __init pci_iommu_alloc(void)
1693 {
1694 - dma_ops = &intel_dma_ops;
1695 -
1696 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1697 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1698 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1699 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1700 - dma_ops->dma_supported = iommu_dma_supported;
1701 + dma_ops = &intel_iommu_dma_ops;
1702
1703 /*
1704 * The order of these functions is important for
1705 diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1706 index 285aae8..61dbab6 100644
1707 --- a/arch/ia64/kernel/pci-swiotlb.c
1708 +++ b/arch/ia64/kernel/pci-swiotlb.c
1709 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1710 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1711 }
1712
1713 -struct dma_map_ops swiotlb_dma_ops = {
1714 +const struct dma_map_ops swiotlb_dma_ops = {
1715 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1716 .free_coherent = swiotlb_free_coherent,
1717 .map_page = swiotlb_map_page,
1718 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1719 index 609d500..7dde2a8 100644
1720 --- a/arch/ia64/kernel/sys_ia64.c
1721 +++ b/arch/ia64/kernel/sys_ia64.c
1722 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1723 if (REGION_NUMBER(addr) == RGN_HPAGE)
1724 addr = 0;
1725 #endif
1726 +
1727 +#ifdef CONFIG_PAX_RANDMMAP
1728 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1729 + addr = mm->free_area_cache;
1730 + else
1731 +#endif
1732 +
1733 if (!addr)
1734 addr = mm->free_area_cache;
1735
1736 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1737 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1738 /* At this point: (!vma || addr < vma->vm_end). */
1739 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1740 - if (start_addr != TASK_UNMAPPED_BASE) {
1741 + if (start_addr != mm->mmap_base) {
1742 /* Start a new search --- just in case we missed some holes. */
1743 - addr = TASK_UNMAPPED_BASE;
1744 + addr = mm->mmap_base;
1745 goto full_search;
1746 }
1747 return -ENOMEM;
1748 }
1749 - if (!vma || addr + len <= vma->vm_start) {
1750 + if (check_heap_stack_gap(vma, addr, len)) {
1751 /* Remember the address where we stopped this search: */
1752 mm->free_area_cache = addr + len;
1753 return addr;
1754 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1755 index 8f06035..b3a5818 100644
1756 --- a/arch/ia64/kernel/topology.c
1757 +++ b/arch/ia64/kernel/topology.c
1758 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1759 return ret;
1760 }
1761
1762 -static struct sysfs_ops cache_sysfs_ops = {
1763 +static const struct sysfs_ops cache_sysfs_ops = {
1764 .show = cache_show
1765 };
1766
1767 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1768 index 0a0c77b..8e55a81 100644
1769 --- a/arch/ia64/kernel/vmlinux.lds.S
1770 +++ b/arch/ia64/kernel/vmlinux.lds.S
1771 @@ -190,7 +190,7 @@ SECTIONS
1772 /* Per-cpu data: */
1773 . = ALIGN(PERCPU_PAGE_SIZE);
1774 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1775 - __phys_per_cpu_start = __per_cpu_load;
1776 + __phys_per_cpu_start = per_cpu_load;
1777 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1778 * into percpu page size
1779 */
1780 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1781 index 19261a9..1611b7a 100644
1782 --- a/arch/ia64/mm/fault.c
1783 +++ b/arch/ia64/mm/fault.c
1784 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1785 return pte_present(pte);
1786 }
1787
1788 +#ifdef CONFIG_PAX_PAGEEXEC
1789 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1790 +{
1791 + unsigned long i;
1792 +
1793 + printk(KERN_ERR "PAX: bytes at PC: ");
1794 + for (i = 0; i < 8; i++) {
1795 + unsigned int c;
1796 + if (get_user(c, (unsigned int *)pc+i))
1797 + printk(KERN_CONT "???????? ");
1798 + else
1799 + printk(KERN_CONT "%08x ", c);
1800 + }
1801 + printk("\n");
1802 +}
1803 +#endif
1804 +
1805 void __kprobes
1806 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1807 {
1808 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1809 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1810 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1811
1812 - if ((vma->vm_flags & mask) != mask)
1813 + if ((vma->vm_flags & mask) != mask) {
1814 +
1815 +#ifdef CONFIG_PAX_PAGEEXEC
1816 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1817 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1818 + goto bad_area;
1819 +
1820 + up_read(&mm->mmap_sem);
1821 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1822 + do_group_exit(SIGKILL);
1823 + }
1824 +#endif
1825 +
1826 goto bad_area;
1827
1828 + }
1829 +
1830 survive:
1831 /*
1832 * If for any reason at all we couldn't handle the fault, make
1833 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1834 index b0f6157..a082bbc 100644
1835 --- a/arch/ia64/mm/hugetlbpage.c
1836 +++ b/arch/ia64/mm/hugetlbpage.c
1837 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1838 /* At this point: (!vmm || addr < vmm->vm_end). */
1839 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1840 return -ENOMEM;
1841 - if (!vmm || (addr + len) <= vmm->vm_start)
1842 + if (check_heap_stack_gap(vmm, addr, len))
1843 return addr;
1844 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1845 }
1846 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1847 index 1857766..05cc6a3 100644
1848 --- a/arch/ia64/mm/init.c
1849 +++ b/arch/ia64/mm/init.c
1850 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1851 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1852 vma->vm_end = vma->vm_start + PAGE_SIZE;
1853 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1854 +
1855 +#ifdef CONFIG_PAX_PAGEEXEC
1856 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1857 + vma->vm_flags &= ~VM_EXEC;
1858 +
1859 +#ifdef CONFIG_PAX_MPROTECT
1860 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1861 + vma->vm_flags &= ~VM_MAYEXEC;
1862 +#endif
1863 +
1864 + }
1865 +#endif
1866 +
1867 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1868 down_write(&current->mm->mmap_sem);
1869 if (insert_vm_struct(current->mm, vma)) {
1870 diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1871 index 98b6849..8046766 100644
1872 --- a/arch/ia64/sn/pci/pci_dma.c
1873 +++ b/arch/ia64/sn/pci/pci_dma.c
1874 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1875 return ret;
1876 }
1877
1878 -static struct dma_map_ops sn_dma_ops = {
1879 +static const struct dma_map_ops sn_dma_ops = {
1880 .alloc_coherent = sn_dma_alloc_coherent,
1881 .free_coherent = sn_dma_free_coherent,
1882 .map_page = sn_dma_map_page,
1883 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1884 index 82abd15..d95ae5d 100644
1885 --- a/arch/m32r/lib/usercopy.c
1886 +++ b/arch/m32r/lib/usercopy.c
1887 @@ -14,6 +14,9 @@
1888 unsigned long
1889 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1890 {
1891 + if ((long)n < 0)
1892 + return n;
1893 +
1894 prefetch(from);
1895 if (access_ok(VERIFY_WRITE, to, n))
1896 __copy_user(to,from,n);
1897 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1898 unsigned long
1899 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1900 {
1901 + if ((long)n < 0)
1902 + return n;
1903 +
1904 prefetchw(to);
1905 if (access_ok(VERIFY_READ, from, n))
1906 __copy_user_zeroing(to,from,n);
1907 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
1908 index 77f5021..2b1db8a 100644
1909 --- a/arch/mips/Makefile
1910 +++ b/arch/mips/Makefile
1911 @@ -51,6 +51,8 @@ endif
1912 cflags-y := -ffunction-sections
1913 cflags-y += $(call cc-option, -mno-check-zero-division)
1914
1915 +cflags-y += -Wno-sign-compare -Wno-extra
1916 +
1917 ifdef CONFIG_32BIT
1918 ld-emul = $(32bit-emul)
1919 vmlinux-32 = vmlinux
1920 diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
1921 index 632f986..fd0378d 100644
1922 --- a/arch/mips/alchemy/devboards/pm.c
1923 +++ b/arch/mips/alchemy/devboards/pm.c
1924 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1925
1926 }
1927
1928 -static struct platform_suspend_ops db1x_pm_ops = {
1929 +static const struct platform_suspend_ops db1x_pm_ops = {
1930 .valid = suspend_valid_only_mem,
1931 .begin = db1x_pm_begin,
1932 .enter = db1x_pm_enter,
1933 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1934 index 7990694..4e93acf 100644
1935 --- a/arch/mips/include/asm/elf.h
1936 +++ b/arch/mips/include/asm/elf.h
1937 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
1938 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1939 #endif
1940
1941 +#ifdef CONFIG_PAX_ASLR
1942 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1943 +
1944 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1945 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1946 +#endif
1947 +
1948 #endif /* _ASM_ELF_H */
1949 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1950 index f266295..627cfff 100644
1951 --- a/arch/mips/include/asm/page.h
1952 +++ b/arch/mips/include/asm/page.h
1953 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1954 #ifdef CONFIG_CPU_MIPS32
1955 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1956 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1957 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1958 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1959 #else
1960 typedef struct { unsigned long long pte; } pte_t;
1961 #define pte_val(x) ((x).pte)
1962 diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
1963 index e48c0bf..f3acf65 100644
1964 --- a/arch/mips/include/asm/reboot.h
1965 +++ b/arch/mips/include/asm/reboot.h
1966 @@ -9,7 +9,7 @@
1967 #ifndef _ASM_REBOOT_H
1968 #define _ASM_REBOOT_H
1969
1970 -extern void (*_machine_restart)(char *command);
1971 -extern void (*_machine_halt)(void);
1972 +extern void (*__noreturn _machine_restart)(char *command);
1973 +extern void (*__noreturn _machine_halt)(void);
1974
1975 #endif /* _ASM_REBOOT_H */
1976 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1977 index 83b5509..9fa24a23 100644
1978 --- a/arch/mips/include/asm/system.h
1979 +++ b/arch/mips/include/asm/system.h
1980 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1981 */
1982 #define __ARCH_WANT_UNLOCKED_CTXSW
1983
1984 -extern unsigned long arch_align_stack(unsigned long sp);
1985 +#define arch_align_stack(x) ((x) & ~0xfUL)
1986
1987 #endif /* _ASM_SYSTEM_H */
1988 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1989 index 9fdd8bc..fcf9d68 100644
1990 --- a/arch/mips/kernel/binfmt_elfn32.c
1991 +++ b/arch/mips/kernel/binfmt_elfn32.c
1992 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1993 #undef ELF_ET_DYN_BASE
1994 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1995
1996 +#ifdef CONFIG_PAX_ASLR
1997 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1998 +
1999 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2000 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2001 +#endif
2002 +
2003 #include <asm/processor.h>
2004 #include <linux/module.h>
2005 #include <linux/elfcore.h>
2006 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2007 index ff44823..cf0b48a 100644
2008 --- a/arch/mips/kernel/binfmt_elfo32.c
2009 +++ b/arch/mips/kernel/binfmt_elfo32.c
2010 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2011 #undef ELF_ET_DYN_BASE
2012 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2013
2014 +#ifdef CONFIG_PAX_ASLR
2015 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2016 +
2017 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2018 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2019 +#endif
2020 +
2021 #include <asm/processor.h>
2022
2023 /*
2024 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2025 index 50c9bb8..efdd5f8 100644
2026 --- a/arch/mips/kernel/kgdb.c
2027 +++ b/arch/mips/kernel/kgdb.c
2028 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2029 return -1;
2030 }
2031
2032 +/* cannot be const */
2033 struct kgdb_arch arch_kgdb_ops;
2034
2035 /*
2036 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2037 index f3d73e1..bb3f57a 100644
2038 --- a/arch/mips/kernel/process.c
2039 +++ b/arch/mips/kernel/process.c
2040 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2041 out:
2042 return pc;
2043 }
2044 -
2045 -/*
2046 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2047 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2048 - */
2049 -unsigned long arch_align_stack(unsigned long sp)
2050 -{
2051 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2052 - sp -= get_random_int() & ~PAGE_MASK;
2053 -
2054 - return sp & ALMASK;
2055 -}
2056 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2057 index 060563a..7fbf310 100644
2058 --- a/arch/mips/kernel/reset.c
2059 +++ b/arch/mips/kernel/reset.c
2060 @@ -19,8 +19,8 @@
2061 * So handle all using function pointers to machine specific
2062 * functions.
2063 */
2064 -void (*_machine_restart)(char *command);
2065 -void (*_machine_halt)(void);
2066 +void (*__noreturn _machine_restart)(char *command);
2067 +void (*__noreturn _machine_halt)(void);
2068 void (*pm_power_off)(void);
2069
2070 EXPORT_SYMBOL(pm_power_off);
2071 @@ -29,16 +29,19 @@ void machine_restart(char *command)
2072 {
2073 if (_machine_restart)
2074 _machine_restart(command);
2075 + BUG();
2076 }
2077
2078 void machine_halt(void)
2079 {
2080 if (_machine_halt)
2081 _machine_halt();
2082 + BUG();
2083 }
2084
2085 void machine_power_off(void)
2086 {
2087 if (pm_power_off)
2088 pm_power_off();
2089 + BUG();
2090 }
2091 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2092 index 3f7f466..3abe0b5 100644
2093 --- a/arch/mips/kernel/syscall.c
2094 +++ b/arch/mips/kernel/syscall.c
2095 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2096 do_color_align = 0;
2097 if (filp || (flags & MAP_SHARED))
2098 do_color_align = 1;
2099 +
2100 +#ifdef CONFIG_PAX_RANDMMAP
2101 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2102 +#endif
2103 +
2104 if (addr) {
2105 if (do_color_align)
2106 addr = COLOUR_ALIGN(addr, pgoff);
2107 else
2108 addr = PAGE_ALIGN(addr);
2109 vmm = find_vma(current->mm, addr);
2110 - if (task_size - len >= addr &&
2111 - (!vmm || addr + len <= vmm->vm_start))
2112 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2113 return addr;
2114 }
2115 - addr = TASK_UNMAPPED_BASE;
2116 + addr = current->mm->mmap_base;
2117 if (do_color_align)
2118 addr = COLOUR_ALIGN(addr, pgoff);
2119 else
2120 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2121 /* At this point: (!vmm || addr < vmm->vm_end). */
2122 if (task_size - len < addr)
2123 return -ENOMEM;
2124 - if (!vmm || addr + len <= vmm->vm_start)
2125 + if (check_heap_stack_gap(vmm, addr, len))
2126 return addr;
2127 addr = vmm->vm_end;
2128 if (do_color_align)
2129 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2130 index e97a7a2..f18f5b0 100644
2131 --- a/arch/mips/mm/fault.c
2132 +++ b/arch/mips/mm/fault.c
2133 @@ -26,6 +26,23 @@
2134 #include <asm/ptrace.h>
2135 #include <asm/highmem.h> /* For VMALLOC_END */
2136
2137 +#ifdef CONFIG_PAX_PAGEEXEC
2138 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2139 +{
2140 + unsigned long i;
2141 +
2142 + printk(KERN_ERR "PAX: bytes at PC: ");
2143 + for (i = 0; i < 5; i++) {
2144 + unsigned int c;
2145 + if (get_user(c, (unsigned int *)pc+i))
2146 + printk(KERN_CONT "???????? ");
2147 + else
2148 + printk(KERN_CONT "%08x ", c);
2149 + }
2150 + printk("\n");
2151 +}
2152 +#endif
2153 +
2154 /*
2155 * This routine handles page faults. It determines the address,
2156 * and the problem, and then passes it off to one of the appropriate
2157 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2158 index 9c802eb..0592e41 100644
2159 --- a/arch/parisc/include/asm/elf.h
2160 +++ b/arch/parisc/include/asm/elf.h
2161 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2162
2163 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2164
2165 +#ifdef CONFIG_PAX_ASLR
2166 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2167 +
2168 +#define PAX_DELTA_MMAP_LEN 16
2169 +#define PAX_DELTA_STACK_LEN 16
2170 +#endif
2171 +
2172 /* This yields a mask that user programs can use to figure out what
2173 instruction set this CPU supports. This could be done in user space,
2174 but it's not easy, and we've already done it here. */
2175 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2176 index a27d2e2..18fd845 100644
2177 --- a/arch/parisc/include/asm/pgtable.h
2178 +++ b/arch/parisc/include/asm/pgtable.h
2179 @@ -207,6 +207,17 @@
2180 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2181 #define PAGE_COPY PAGE_EXECREAD
2182 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2183 +
2184 +#ifdef CONFIG_PAX_PAGEEXEC
2185 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2186 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2187 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2188 +#else
2189 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2190 +# define PAGE_COPY_NOEXEC PAGE_COPY
2191 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2192 +#endif
2193 +
2194 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2195 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2196 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2197 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2198 index 2120746..8d70a5e 100644
2199 --- a/arch/parisc/kernel/module.c
2200 +++ b/arch/parisc/kernel/module.c
2201 @@ -95,16 +95,38 @@
2202
2203 /* three functions to determine where in the module core
2204 * or init pieces the location is */
2205 +static inline int in_init_rx(struct module *me, void *loc)
2206 +{
2207 + return (loc >= me->module_init_rx &&
2208 + loc < (me->module_init_rx + me->init_size_rx));
2209 +}
2210 +
2211 +static inline int in_init_rw(struct module *me, void *loc)
2212 +{
2213 + return (loc >= me->module_init_rw &&
2214 + loc < (me->module_init_rw + me->init_size_rw));
2215 +}
2216 +
2217 static inline int in_init(struct module *me, void *loc)
2218 {
2219 - return (loc >= me->module_init &&
2220 - loc <= (me->module_init + me->init_size));
2221 + return in_init_rx(me, loc) || in_init_rw(me, loc);
2222 +}
2223 +
2224 +static inline int in_core_rx(struct module *me, void *loc)
2225 +{
2226 + return (loc >= me->module_core_rx &&
2227 + loc < (me->module_core_rx + me->core_size_rx));
2228 +}
2229 +
2230 +static inline int in_core_rw(struct module *me, void *loc)
2231 +{
2232 + return (loc >= me->module_core_rw &&
2233 + loc < (me->module_core_rw + me->core_size_rw));
2234 }
2235
2236 static inline int in_core(struct module *me, void *loc)
2237 {
2238 - return (loc >= me->module_core &&
2239 - loc <= (me->module_core + me->core_size));
2240 + return in_core_rx(me, loc) || in_core_rw(me, loc);
2241 }
2242
2243 static inline int in_local(struct module *me, void *loc)
2244 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2245 }
2246
2247 /* align things a bit */
2248 - me->core_size = ALIGN(me->core_size, 16);
2249 - me->arch.got_offset = me->core_size;
2250 - me->core_size += gots * sizeof(struct got_entry);
2251 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2252 + me->arch.got_offset = me->core_size_rw;
2253 + me->core_size_rw += gots * sizeof(struct got_entry);
2254
2255 - me->core_size = ALIGN(me->core_size, 16);
2256 - me->arch.fdesc_offset = me->core_size;
2257 - me->core_size += fdescs * sizeof(Elf_Fdesc);
2258 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2259 + me->arch.fdesc_offset = me->core_size_rw;
2260 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2261
2262 me->arch.got_max = gots;
2263 me->arch.fdesc_max = fdescs;
2264 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2265
2266 BUG_ON(value == 0);
2267
2268 - got = me->module_core + me->arch.got_offset;
2269 + got = me->module_core_rw + me->arch.got_offset;
2270 for (i = 0; got[i].addr; i++)
2271 if (got[i].addr == value)
2272 goto out;
2273 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2274 #ifdef CONFIG_64BIT
2275 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2276 {
2277 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2278 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2279
2280 if (!value) {
2281 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2282 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2283
2284 /* Create new one */
2285 fdesc->addr = value;
2286 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2287 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2288 return (Elf_Addr)fdesc;
2289 }
2290 #endif /* CONFIG_64BIT */
2291 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2292
2293 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2294 end = table + sechdrs[me->arch.unwind_section].sh_size;
2295 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2296 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2297
2298 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2299 me->arch.unwind_section, table, end, gp);
2300 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2301 index 9147391..f3d949a 100644
2302 --- a/arch/parisc/kernel/sys_parisc.c
2303 +++ b/arch/parisc/kernel/sys_parisc.c
2304 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2305 /* At this point: (!vma || addr < vma->vm_end). */
2306 if (TASK_SIZE - len < addr)
2307 return -ENOMEM;
2308 - if (!vma || addr + len <= vma->vm_start)
2309 + if (check_heap_stack_gap(vma, addr, len))
2310 return addr;
2311 addr = vma->vm_end;
2312 }
2313 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2314 /* At this point: (!vma || addr < vma->vm_end). */
2315 if (TASK_SIZE - len < addr)
2316 return -ENOMEM;
2317 - if (!vma || addr + len <= vma->vm_start)
2318 + if (check_heap_stack_gap(vma, addr, len))
2319 return addr;
2320 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2321 if (addr < vma->vm_end) /* handle wraparound */
2322 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2323 if (flags & MAP_FIXED)
2324 return addr;
2325 if (!addr)
2326 - addr = TASK_UNMAPPED_BASE;
2327 + addr = current->mm->mmap_base;
2328
2329 if (filp) {
2330 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2331 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2332 index 8b58bf0..7afff03 100644
2333 --- a/arch/parisc/kernel/traps.c
2334 +++ b/arch/parisc/kernel/traps.c
2335 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2336
2337 down_read(&current->mm->mmap_sem);
2338 vma = find_vma(current->mm,regs->iaoq[0]);
2339 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2340 - && (vma->vm_flags & VM_EXEC)) {
2341 -
2342 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2343 fault_address = regs->iaoq[0];
2344 fault_space = regs->iasq[0];
2345
2346 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2347 index c6afbfc..c5839f6 100644
2348 --- a/arch/parisc/mm/fault.c
2349 +++ b/arch/parisc/mm/fault.c
2350 @@ -15,6 +15,7 @@
2351 #include <linux/sched.h>
2352 #include <linux/interrupt.h>
2353 #include <linux/module.h>
2354 +#include <linux/unistd.h>
2355
2356 #include <asm/uaccess.h>
2357 #include <asm/traps.h>
2358 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2359 static unsigned long
2360 parisc_acctyp(unsigned long code, unsigned int inst)
2361 {
2362 - if (code == 6 || code == 16)
2363 + if (code == 6 || code == 7 || code == 16)
2364 return VM_EXEC;
2365
2366 switch (inst & 0xf0000000) {
2367 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2368 }
2369 #endif
2370
2371 +#ifdef CONFIG_PAX_PAGEEXEC
2372 +/*
2373 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2374 + *
2375 + * returns 1 when task should be killed
2376 + * 2 when rt_sigreturn trampoline was detected
2377 + * 3 when unpatched PLT trampoline was detected
2378 + */
2379 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2380 +{
2381 +
2382 +#ifdef CONFIG_PAX_EMUPLT
2383 + int err;
2384 +
2385 + do { /* PaX: unpatched PLT emulation */
2386 + unsigned int bl, depwi;
2387 +
2388 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2389 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2390 +
2391 + if (err)
2392 + break;
2393 +
2394 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2395 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2396 +
2397 + err = get_user(ldw, (unsigned int *)addr);
2398 + err |= get_user(bv, (unsigned int *)(addr+4));
2399 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2400 +
2401 + if (err)
2402 + break;
2403 +
2404 + if (ldw == 0x0E801096U &&
2405 + bv == 0xEAC0C000U &&
2406 + ldw2 == 0x0E881095U)
2407 + {
2408 + unsigned int resolver, map;
2409 +
2410 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2411 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2412 + if (err)
2413 + break;
2414 +
2415 + regs->gr[20] = instruction_pointer(regs)+8;
2416 + regs->gr[21] = map;
2417 + regs->gr[22] = resolver;
2418 + regs->iaoq[0] = resolver | 3UL;
2419 + regs->iaoq[1] = regs->iaoq[0] + 4;
2420 + return 3;
2421 + }
2422 + }
2423 + } while (0);
2424 +#endif
2425 +
2426 +#ifdef CONFIG_PAX_EMUTRAMP
2427 +
2428 +#ifndef CONFIG_PAX_EMUSIGRT
2429 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2430 + return 1;
2431 +#endif
2432 +
2433 + do { /* PaX: rt_sigreturn emulation */
2434 + unsigned int ldi1, ldi2, bel, nop;
2435 +
2436 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2437 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2438 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2439 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2440 +
2441 + if (err)
2442 + break;
2443 +
2444 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2445 + ldi2 == 0x3414015AU &&
2446 + bel == 0xE4008200U &&
2447 + nop == 0x08000240U)
2448 + {
2449 + regs->gr[25] = (ldi1 & 2) >> 1;
2450 + regs->gr[20] = __NR_rt_sigreturn;
2451 + regs->gr[31] = regs->iaoq[1] + 16;
2452 + regs->sr[0] = regs->iasq[1];
2453 + regs->iaoq[0] = 0x100UL;
2454 + regs->iaoq[1] = regs->iaoq[0] + 4;
2455 + regs->iasq[0] = regs->sr[2];
2456 + regs->iasq[1] = regs->sr[2];
2457 + return 2;
2458 + }
2459 + } while (0);
2460 +#endif
2461 +
2462 + return 1;
2463 +}
2464 +
2465 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2466 +{
2467 + unsigned long i;
2468 +
2469 + printk(KERN_ERR "PAX: bytes at PC: ");
2470 + for (i = 0; i < 5; i++) {
2471 + unsigned int c;
2472 + if (get_user(c, (unsigned int *)pc+i))
2473 + printk(KERN_CONT "???????? ");
2474 + else
2475 + printk(KERN_CONT "%08x ", c);
2476 + }
2477 + printk("\n");
2478 +}
2479 +#endif
2480 +
2481 int fixup_exception(struct pt_regs *regs)
2482 {
2483 const struct exception_table_entry *fix;
2484 @@ -192,8 +303,33 @@ good_area:
2485
2486 acc_type = parisc_acctyp(code,regs->iir);
2487
2488 - if ((vma->vm_flags & acc_type) != acc_type)
2489 + if ((vma->vm_flags & acc_type) != acc_type) {
2490 +
2491 +#ifdef CONFIG_PAX_PAGEEXEC
2492 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2493 + (address & ~3UL) == instruction_pointer(regs))
2494 + {
2495 + up_read(&mm->mmap_sem);
2496 + switch (pax_handle_fetch_fault(regs)) {
2497 +
2498 +#ifdef CONFIG_PAX_EMUPLT
2499 + case 3:
2500 + return;
2501 +#endif
2502 +
2503 +#ifdef CONFIG_PAX_EMUTRAMP
2504 + case 2:
2505 + return;
2506 +#endif
2507 +
2508 + }
2509 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2510 + do_group_exit(SIGKILL);
2511 + }
2512 +#endif
2513 +
2514 goto bad_area;
2515 + }
2516
2517 /*
2518 * If for any reason at all we couldn't handle the fault, make
2519 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2520 index c107b74..409dc0f 100644
2521 --- a/arch/powerpc/Makefile
2522 +++ b/arch/powerpc/Makefile
2523 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2524 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2525 CPP = $(CC) -E $(KBUILD_CFLAGS)
2526
2527 +cflags-y += -Wno-sign-compare -Wno-extra
2528 +
2529 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2530
2531 ifeq ($(CONFIG_PPC64),y)
2532 diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2533 index 6d94d27..50d4cad 100644
2534 --- a/arch/powerpc/include/asm/device.h
2535 +++ b/arch/powerpc/include/asm/device.h
2536 @@ -14,7 +14,7 @@ struct dev_archdata {
2537 struct device_node *of_node;
2538
2539 /* DMA operations on that device */
2540 - struct dma_map_ops *dma_ops;
2541 + const struct dma_map_ops *dma_ops;
2542
2543 /*
2544 * When an iommu is in use, dma_data is used as a ptr to the base of the
2545 diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2546 index e281dae..2b8a784 100644
2547 --- a/arch/powerpc/include/asm/dma-mapping.h
2548 +++ b/arch/powerpc/include/asm/dma-mapping.h
2549 @@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2550 #ifdef CONFIG_PPC64
2551 extern struct dma_map_ops dma_iommu_ops;
2552 #endif
2553 -extern struct dma_map_ops dma_direct_ops;
2554 +extern const struct dma_map_ops dma_direct_ops;
2555
2556 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2557 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2558 {
2559 /* We don't handle the NULL dev case for ISA for now. We could
2560 * do it via an out of line call but it is not needed for now. The
2561 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2562 return dev->archdata.dma_ops;
2563 }
2564
2565 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2566 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2567 {
2568 dev->archdata.dma_ops = ops;
2569 }
2570 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2571
2572 static inline int dma_supported(struct device *dev, u64 mask)
2573 {
2574 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2575 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2576
2577 if (unlikely(dma_ops == NULL))
2578 return 0;
2579 @@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2580
2581 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2582 {
2583 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2584 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2585
2586 if (unlikely(dma_ops == NULL))
2587 return -EIO;
2588 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2589 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2590 dma_addr_t *dma_handle, gfp_t flag)
2591 {
2592 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2593 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2594 void *cpu_addr;
2595
2596 BUG_ON(!dma_ops);
2597 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2598 static inline void dma_free_coherent(struct device *dev, size_t size,
2599 void *cpu_addr, dma_addr_t dma_handle)
2600 {
2601 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2602 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2603
2604 BUG_ON(!dma_ops);
2605
2606 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2607
2608 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2609 {
2610 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2611 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2612
2613 if (dma_ops->mapping_error)
2614 return dma_ops->mapping_error(dev, dma_addr);
2615 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2616 index 5698502..5db093c 100644
2617 --- a/arch/powerpc/include/asm/elf.h
2618 +++ b/arch/powerpc/include/asm/elf.h
2619 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2620 the loader. We need to make sure that it is out of the way of the program
2621 that it will "exec", and that there is sufficient room for the brk. */
2622
2623 -extern unsigned long randomize_et_dyn(unsigned long base);
2624 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2625 +#define ELF_ET_DYN_BASE (0x20000000)
2626 +
2627 +#ifdef CONFIG_PAX_ASLR
2628 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2629 +
2630 +#ifdef __powerpc64__
2631 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2632 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2633 +#else
2634 +#define PAX_DELTA_MMAP_LEN 15
2635 +#define PAX_DELTA_STACK_LEN 15
2636 +#endif
2637 +#endif
2638
2639 /*
2640 * Our registers are always unsigned longs, whether we're a 32 bit
2641 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2642 (0x7ff >> (PAGE_SHIFT - 12)) : \
2643 (0x3ffff >> (PAGE_SHIFT - 12)))
2644
2645 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2646 -#define arch_randomize_brk arch_randomize_brk
2647 -
2648 #endif /* __KERNEL__ */
2649
2650 /*
2651 diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2652 index edfc980..1766f59 100644
2653 --- a/arch/powerpc/include/asm/iommu.h
2654 +++ b/arch/powerpc/include/asm/iommu.h
2655 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2656 extern void iommu_init_early_dart(void);
2657 extern void iommu_init_early_pasemi(void);
2658
2659 +/* dma-iommu.c */
2660 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2661 +
2662 #ifdef CONFIG_PCI
2663 extern void pci_iommu_init(void);
2664 extern void pci_direct_iommu_init(void);
2665 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2666 index 9163695..5a00112 100644
2667 --- a/arch/powerpc/include/asm/kmap_types.h
2668 +++ b/arch/powerpc/include/asm/kmap_types.h
2669 @@ -26,6 +26,7 @@ enum km_type {
2670 KM_SOFTIRQ1,
2671 KM_PPC_SYNC_PAGE,
2672 KM_PPC_SYNC_ICACHE,
2673 + KM_CLEARPAGE,
2674 KM_TYPE_NR
2675 };
2676
2677 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2678 index ff24254..fe45b21 100644
2679 --- a/arch/powerpc/include/asm/page.h
2680 +++ b/arch/powerpc/include/asm/page.h
2681 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2682 * and needs to be executable. This means the whole heap ends
2683 * up being executable.
2684 */
2685 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2686 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2687 +#define VM_DATA_DEFAULT_FLAGS32 \
2688 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2689 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2690
2691 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2692 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2693 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2694 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2695 #endif
2696
2697 +#define ktla_ktva(addr) (addr)
2698 +#define ktva_ktla(addr) (addr)
2699 +
2700 #ifndef __ASSEMBLY__
2701
2702 #undef STRICT_MM_TYPECHECKS
2703 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2704 index 3f17b83..1f9e766 100644
2705 --- a/arch/powerpc/include/asm/page_64.h
2706 +++ b/arch/powerpc/include/asm/page_64.h
2707 @@ -180,15 +180,18 @@ do { \
2708 * stack by default, so in the absense of a PT_GNU_STACK program header
2709 * we turn execute permission off.
2710 */
2711 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2712 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2713 +#define VM_STACK_DEFAULT_FLAGS32 \
2714 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2715 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2716
2717 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2718 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2719
2720 +#ifndef CONFIG_PAX_PAGEEXEC
2721 #define VM_STACK_DEFAULT_FLAGS \
2722 (test_thread_flag(TIF_32BIT) ? \
2723 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2724 +#endif
2725
2726 #include <asm-generic/getorder.h>
2727
2728 diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2729 index b5ea626..4030822 100644
2730 --- a/arch/powerpc/include/asm/pci.h
2731 +++ b/arch/powerpc/include/asm/pci.h
2732 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2733 }
2734
2735 #ifdef CONFIG_PCI
2736 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2737 -extern struct dma_map_ops *get_pci_dma_ops(void);
2738 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2739 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2740 #else /* CONFIG_PCI */
2741 #define set_pci_dma_ops(d)
2742 #define get_pci_dma_ops() NULL
2743 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2744 index 2a5da06..d65bea2 100644
2745 --- a/arch/powerpc/include/asm/pgtable.h
2746 +++ b/arch/powerpc/include/asm/pgtable.h
2747 @@ -2,6 +2,7 @@
2748 #define _ASM_POWERPC_PGTABLE_H
2749 #ifdef __KERNEL__
2750
2751 +#include <linux/const.h>
2752 #ifndef __ASSEMBLY__
2753 #include <asm/processor.h> /* For TASK_SIZE */
2754 #include <asm/mmu.h>
2755 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2756 index 4aad413..85d86bf 100644
2757 --- a/arch/powerpc/include/asm/pte-hash32.h
2758 +++ b/arch/powerpc/include/asm/pte-hash32.h
2759 @@ -21,6 +21,7 @@
2760 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2761 #define _PAGE_USER 0x004 /* usermode access allowed */
2762 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2763 +#define _PAGE_EXEC _PAGE_GUARDED
2764 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2765 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2766 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2767 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2768 index 8c34149..78f425a 100644
2769 --- a/arch/powerpc/include/asm/ptrace.h
2770 +++ b/arch/powerpc/include/asm/ptrace.h
2771 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2772 } while(0)
2773
2774 struct task_struct;
2775 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2776 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2777 extern int ptrace_put_reg(struct task_struct *task, int regno,
2778 unsigned long data);
2779
2780 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2781 index 32a7c30..be3a8bb 100644
2782 --- a/arch/powerpc/include/asm/reg.h
2783 +++ b/arch/powerpc/include/asm/reg.h
2784 @@ -191,6 +191,7 @@
2785 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2786 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2787 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2788 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2789 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2790 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2791 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2792 diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2793 index 8979d4c..d2fd0d3 100644
2794 --- a/arch/powerpc/include/asm/swiotlb.h
2795 +++ b/arch/powerpc/include/asm/swiotlb.h
2796 @@ -13,7 +13,7 @@
2797
2798 #include <linux/swiotlb.h>
2799
2800 -extern struct dma_map_ops swiotlb_dma_ops;
2801 +extern const struct dma_map_ops swiotlb_dma_ops;
2802
2803 static inline void dma_mark_clean(void *addr, size_t size) {}
2804
2805 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2806 index 094a12a..877a60a 100644
2807 --- a/arch/powerpc/include/asm/system.h
2808 +++ b/arch/powerpc/include/asm/system.h
2809 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2810 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2811 #endif
2812
2813 -extern unsigned long arch_align_stack(unsigned long sp);
2814 +#define arch_align_stack(x) ((x) & ~0xfUL)
2815
2816 /* Used in very early kernel initialization. */
2817 extern unsigned long reloc_offset(void);
2818 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2819 index bd0fb84..a42a14b 100644
2820 --- a/arch/powerpc/include/asm/uaccess.h
2821 +++ b/arch/powerpc/include/asm/uaccess.h
2822 @@ -13,6 +13,8 @@
2823 #define VERIFY_READ 0
2824 #define VERIFY_WRITE 1
2825
2826 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2827 +
2828 /*
2829 * The fs value determines whether argument validity checking should be
2830 * performed or not. If get_fs() == USER_DS, checking is performed, with
2831 @@ -327,52 +329,6 @@ do { \
2832 extern unsigned long __copy_tofrom_user(void __user *to,
2833 const void __user *from, unsigned long size);
2834
2835 -#ifndef __powerpc64__
2836 -
2837 -static inline unsigned long copy_from_user(void *to,
2838 - const void __user *from, unsigned long n)
2839 -{
2840 - unsigned long over;
2841 -
2842 - if (access_ok(VERIFY_READ, from, n))
2843 - return __copy_tofrom_user((__force void __user *)to, from, n);
2844 - if ((unsigned long)from < TASK_SIZE) {
2845 - over = (unsigned long)from + n - TASK_SIZE;
2846 - return __copy_tofrom_user((__force void __user *)to, from,
2847 - n - over) + over;
2848 - }
2849 - return n;
2850 -}
2851 -
2852 -static inline unsigned long copy_to_user(void __user *to,
2853 - const void *from, unsigned long n)
2854 -{
2855 - unsigned long over;
2856 -
2857 - if (access_ok(VERIFY_WRITE, to, n))
2858 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2859 - if ((unsigned long)to < TASK_SIZE) {
2860 - over = (unsigned long)to + n - TASK_SIZE;
2861 - return __copy_tofrom_user(to, (__force void __user *)from,
2862 - n - over) + over;
2863 - }
2864 - return n;
2865 -}
2866 -
2867 -#else /* __powerpc64__ */
2868 -
2869 -#define __copy_in_user(to, from, size) \
2870 - __copy_tofrom_user((to), (from), (size))
2871 -
2872 -extern unsigned long copy_from_user(void *to, const void __user *from,
2873 - unsigned long n);
2874 -extern unsigned long copy_to_user(void __user *to, const void *from,
2875 - unsigned long n);
2876 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2877 - unsigned long n);
2878 -
2879 -#endif /* __powerpc64__ */
2880 -
2881 static inline unsigned long __copy_from_user_inatomic(void *to,
2882 const void __user *from, unsigned long n)
2883 {
2884 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2885 if (ret == 0)
2886 return 0;
2887 }
2888 +
2889 + if (!__builtin_constant_p(n))
2890 + check_object_size(to, n, false);
2891 +
2892 return __copy_tofrom_user((__force void __user *)to, from, n);
2893 }
2894
2895 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2896 if (ret == 0)
2897 return 0;
2898 }
2899 +
2900 + if (!__builtin_constant_p(n))
2901 + check_object_size(from, n, true);
2902 +
2903 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2904 }
2905
2906 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2907 return __copy_to_user_inatomic(to, from, size);
2908 }
2909
2910 +#ifndef __powerpc64__
2911 +
2912 +static inline unsigned long __must_check copy_from_user(void *to,
2913 + const void __user *from, unsigned long n)
2914 +{
2915 + unsigned long over;
2916 +
2917 + if ((long)n < 0)
2918 + return n;
2919 +
2920 + if (access_ok(VERIFY_READ, from, n)) {
2921 + if (!__builtin_constant_p(n))
2922 + check_object_size(to, n, false);
2923 + return __copy_tofrom_user((__force void __user *)to, from, n);
2924 + }
2925 + if ((unsigned long)from < TASK_SIZE) {
2926 + over = (unsigned long)from + n - TASK_SIZE;
2927 + if (!__builtin_constant_p(n - over))
2928 + check_object_size(to, n - over, false);
2929 + return __copy_tofrom_user((__force void __user *)to, from,
2930 + n - over) + over;
2931 + }
2932 + return n;
2933 +}
2934 +
2935 +static inline unsigned long __must_check copy_to_user(void __user *to,
2936 + const void *from, unsigned long n)
2937 +{
2938 + unsigned long over;
2939 +
2940 + if ((long)n < 0)
2941 + return n;
2942 +
2943 + if (access_ok(VERIFY_WRITE, to, n)) {
2944 + if (!__builtin_constant_p(n))
2945 + check_object_size(from, n, true);
2946 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2947 + }
2948 + if ((unsigned long)to < TASK_SIZE) {
2949 + over = (unsigned long)to + n - TASK_SIZE;
2950 + if (!__builtin_constant_p(n))
2951 + check_object_size(from, n - over, true);
2952 + return __copy_tofrom_user(to, (__force void __user *)from,
2953 + n - over) + over;
2954 + }
2955 + return n;
2956 +}
2957 +
2958 +#else /* __powerpc64__ */
2959 +
2960 +#define __copy_in_user(to, from, size) \
2961 + __copy_tofrom_user((to), (from), (size))
2962 +
2963 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2964 +{
2965 + if ((long)n < 0 || n > INT_MAX)
2966 + return n;
2967 +
2968 + if (!__builtin_constant_p(n))
2969 + check_object_size(to, n, false);
2970 +
2971 + if (likely(access_ok(VERIFY_READ, from, n)))
2972 + n = __copy_from_user(to, from, n);
2973 + else
2974 + memset(to, 0, n);
2975 + return n;
2976 +}
2977 +
2978 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2979 +{
2980 + if ((long)n < 0 || n > INT_MAX)
2981 + return n;
2982 +
2983 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2984 + if (!__builtin_constant_p(n))
2985 + check_object_size(from, n, true);
2986 + n = __copy_to_user(to, from, n);
2987 + }
2988 + return n;
2989 +}
2990 +
2991 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2992 + unsigned long n);
2993 +
2994 +#endif /* __powerpc64__ */
2995 +
2996 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2997
2998 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2999 diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3000 index bb37b1d..01fe9ce 100644
3001 --- a/arch/powerpc/kernel/cacheinfo.c
3002 +++ b/arch/powerpc/kernel/cacheinfo.c
3003 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3004 &cache_assoc_attr,
3005 };
3006
3007 -static struct sysfs_ops cache_index_ops = {
3008 +static const struct sysfs_ops cache_index_ops = {
3009 .show = cache_index_show,
3010 };
3011
3012 diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3013 index 37771a5..648530c 100644
3014 --- a/arch/powerpc/kernel/dma-iommu.c
3015 +++ b/arch/powerpc/kernel/dma-iommu.c
3016 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3017 }
3018
3019 /* We support DMA to/from any memory page via the iommu */
3020 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3021 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
3022 {
3023 struct iommu_table *tbl = get_iommu_table_base(dev);
3024
3025 diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3026 index e96cbbd..bdd6d41 100644
3027 --- a/arch/powerpc/kernel/dma-swiotlb.c
3028 +++ b/arch/powerpc/kernel/dma-swiotlb.c
3029 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3030 * map_page, and unmap_page on highmem, use normal dma_ops
3031 * for everything else.
3032 */
3033 -struct dma_map_ops swiotlb_dma_ops = {
3034 +const struct dma_map_ops swiotlb_dma_ops = {
3035 .alloc_coherent = dma_direct_alloc_coherent,
3036 .free_coherent = dma_direct_free_coherent,
3037 .map_sg = swiotlb_map_sg_attrs,
3038 diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3039 index 6215062..ebea59c 100644
3040 --- a/arch/powerpc/kernel/dma.c
3041 +++ b/arch/powerpc/kernel/dma.c
3042 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3043 }
3044 #endif
3045
3046 -struct dma_map_ops dma_direct_ops = {
3047 +const struct dma_map_ops dma_direct_ops = {
3048 .alloc_coherent = dma_direct_alloc_coherent,
3049 .free_coherent = dma_direct_free_coherent,
3050 .map_sg = dma_direct_map_sg,
3051 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3052 index 24dcc0e..a300455 100644
3053 --- a/arch/powerpc/kernel/exceptions-64e.S
3054 +++ b/arch/powerpc/kernel/exceptions-64e.S
3055 @@ -455,6 +455,7 @@ storage_fault_common:
3056 std r14,_DAR(r1)
3057 std r15,_DSISR(r1)
3058 addi r3,r1,STACK_FRAME_OVERHEAD
3059 + bl .save_nvgprs
3060 mr r4,r14
3061 mr r5,r15
3062 ld r14,PACA_EXGEN+EX_R14(r13)
3063 @@ -464,8 +465,7 @@ storage_fault_common:
3064 cmpdi r3,0
3065 bne- 1f
3066 b .ret_from_except_lite
3067 -1: bl .save_nvgprs
3068 - mr r5,r3
3069 +1: mr r5,r3
3070 addi r3,r1,STACK_FRAME_OVERHEAD
3071 ld r4,_DAR(r1)
3072 bl .bad_page_fault
3073 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3074 index 1808876..9fd206a 100644
3075 --- a/arch/powerpc/kernel/exceptions-64s.S
3076 +++ b/arch/powerpc/kernel/exceptions-64s.S
3077 @@ -818,10 +818,10 @@ handle_page_fault:
3078 11: ld r4,_DAR(r1)
3079 ld r5,_DSISR(r1)
3080 addi r3,r1,STACK_FRAME_OVERHEAD
3081 + bl .save_nvgprs
3082 bl .do_page_fault
3083 cmpdi r3,0
3084 beq+ 13f
3085 - bl .save_nvgprs
3086 mr r5,r3
3087 addi r3,r1,STACK_FRAME_OVERHEAD
3088 lwz r4,_DAR(r1)
3089 diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3090 index a4c8b38..1b09ad9 100644
3091 --- a/arch/powerpc/kernel/ibmebus.c
3092 +++ b/arch/powerpc/kernel/ibmebus.c
3093 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3094 return 1;
3095 }
3096
3097 -static struct dma_map_ops ibmebus_dma_ops = {
3098 +static const struct dma_map_ops ibmebus_dma_ops = {
3099 .alloc_coherent = ibmebus_alloc_coherent,
3100 .free_coherent = ibmebus_free_coherent,
3101 .map_sg = ibmebus_map_sg,
3102 diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3103 index 641c74b..8339ad7 100644
3104 --- a/arch/powerpc/kernel/kgdb.c
3105 +++ b/arch/powerpc/kernel/kgdb.c
3106 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3107 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3108 return 0;
3109
3110 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3111 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3112 regs->nip += 4;
3113
3114 return 1;
3115 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3116 /*
3117 * Global data
3118 */
3119 -struct kgdb_arch arch_kgdb_ops = {
3120 +const struct kgdb_arch arch_kgdb_ops = {
3121 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3122 };
3123
3124 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3125 index 477c663..4f50234 100644
3126 --- a/arch/powerpc/kernel/module.c
3127 +++ b/arch/powerpc/kernel/module.c
3128 @@ -31,11 +31,24 @@
3129
3130 LIST_HEAD(module_bug_list);
3131
3132 +#ifdef CONFIG_PAX_KERNEXEC
3133 void *module_alloc(unsigned long size)
3134 {
3135 if (size == 0)
3136 return NULL;
3137
3138 + return vmalloc(size);
3139 +}
3140 +
3141 +void *module_alloc_exec(unsigned long size)
3142 +#else
3143 +void *module_alloc(unsigned long size)
3144 +#endif
3145 +
3146 +{
3147 + if (size == 0)
3148 + return NULL;
3149 +
3150 return vmalloc_exec(size);
3151 }
3152
3153 @@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3154 vfree(module_region);
3155 }
3156
3157 +#ifdef CONFIG_PAX_KERNEXEC
3158 +void module_free_exec(struct module *mod, void *module_region)
3159 +{
3160 + module_free(mod, module_region);
3161 +}
3162 +#endif
3163 +
3164 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3165 const Elf_Shdr *sechdrs,
3166 const char *name)
3167 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3168 index f832773..0507238 100644
3169 --- a/arch/powerpc/kernel/module_32.c
3170 +++ b/arch/powerpc/kernel/module_32.c
3171 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3172 me->arch.core_plt_section = i;
3173 }
3174 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3175 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3176 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3177 return -ENOEXEC;
3178 }
3179
3180 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3181
3182 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3183 /* Init, or core PLT? */
3184 - if (location >= mod->module_core
3185 - && location < mod->module_core + mod->core_size)
3186 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3187 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3188 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3189 - else
3190 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3191 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3192 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3193 + else {
3194 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3195 + return ~0UL;
3196 + }
3197
3198 /* Find this entry, or if that fails, the next avail. entry */
3199 while (entry->jump[0]) {
3200 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3201 index cadbed6..b9bbb00 100644
3202 --- a/arch/powerpc/kernel/pci-common.c
3203 +++ b/arch/powerpc/kernel/pci-common.c
3204 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3205 unsigned int ppc_pci_flags = 0;
3206
3207
3208 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3209 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3210
3211 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3212 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3213 {
3214 pci_dma_ops = dma_ops;
3215 }
3216
3217 -struct dma_map_ops *get_pci_dma_ops(void)
3218 +const struct dma_map_ops *get_pci_dma_ops(void)
3219 {
3220 return pci_dma_ops;
3221 }
3222 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3223 index 7b816da..8d5c277 100644
3224 --- a/arch/powerpc/kernel/process.c
3225 +++ b/arch/powerpc/kernel/process.c
3226 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3227 * Lookup NIP late so we have the best change of getting the
3228 * above info out without failing
3229 */
3230 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3231 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3232 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3233 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3234 #endif
3235 show_stack(current, (unsigned long *) regs->gpr[1]);
3236 if (!user_mode(regs))
3237 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3238 newsp = stack[0];
3239 ip = stack[STACK_FRAME_LR_SAVE];
3240 if (!firstframe || ip != lr) {
3241 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3242 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3243 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3244 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3245 - printk(" (%pS)",
3246 + printk(" (%pA)",
3247 (void *)current->ret_stack[curr_frame].ret);
3248 curr_frame--;
3249 }
3250 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3251 struct pt_regs *regs = (struct pt_regs *)
3252 (sp + STACK_FRAME_OVERHEAD);
3253 lr = regs->link;
3254 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3255 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3256 regs->trap, (void *)regs->nip, (void *)lr);
3257 firstframe = 1;
3258 }
3259 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3260 }
3261
3262 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3263 -
3264 -unsigned long arch_align_stack(unsigned long sp)
3265 -{
3266 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3267 - sp -= get_random_int() & ~PAGE_MASK;
3268 - return sp & ~0xf;
3269 -}
3270 -
3271 -static inline unsigned long brk_rnd(void)
3272 -{
3273 - unsigned long rnd = 0;
3274 -
3275 - /* 8MB for 32bit, 1GB for 64bit */
3276 - if (is_32bit_task())
3277 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3278 - else
3279 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3280 -
3281 - return rnd << PAGE_SHIFT;
3282 -}
3283 -
3284 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3285 -{
3286 - unsigned long base = mm->brk;
3287 - unsigned long ret;
3288 -
3289 -#ifdef CONFIG_PPC_STD_MMU_64
3290 - /*
3291 - * If we are using 1TB segments and we are allowed to randomise
3292 - * the heap, we can put it above 1TB so it is backed by a 1TB
3293 - * segment. Otherwise the heap will be in the bottom 1TB
3294 - * which always uses 256MB segments and this may result in a
3295 - * performance penalty.
3296 - */
3297 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3298 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3299 -#endif
3300 -
3301 - ret = PAGE_ALIGN(base + brk_rnd());
3302 -
3303 - if (ret < mm->brk)
3304 - return mm->brk;
3305 -
3306 - return ret;
3307 -}
3308 -
3309 -unsigned long randomize_et_dyn(unsigned long base)
3310 -{
3311 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3312 -
3313 - if (ret < base)
3314 - return base;
3315 -
3316 - return ret;
3317 -}
3318 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3319 index ef14988..856c4bc 100644
3320 --- a/arch/powerpc/kernel/ptrace.c
3321 +++ b/arch/powerpc/kernel/ptrace.c
3322 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3323 /*
3324 * Get contents of register REGNO in task TASK.
3325 */
3326 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3327 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3328 {
3329 if (task->thread.regs == NULL)
3330 return -EIO;
3331 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3332
3333 CHECK_FULL_REGS(child->thread.regs);
3334 if (index < PT_FPR0) {
3335 - tmp = ptrace_get_reg(child, (int) index);
3336 + tmp = ptrace_get_reg(child, index);
3337 } else {
3338 flush_fp_to_thread(child);
3339 tmp = ((unsigned long *)child->thread.fpr)
3340 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3341 index d670429..2bc59b2 100644
3342 --- a/arch/powerpc/kernel/signal_32.c
3343 +++ b/arch/powerpc/kernel/signal_32.c
3344 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3345 /* Save user registers on the stack */
3346 frame = &rt_sf->uc.uc_mcontext;
3347 addr = frame;
3348 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3349 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3350 if (save_user_regs(regs, frame, 0, 1))
3351 goto badframe;
3352 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3353 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3354 index 2fe6fc6..ada0d96 100644
3355 --- a/arch/powerpc/kernel/signal_64.c
3356 +++ b/arch/powerpc/kernel/signal_64.c
3357 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3358 current->thread.fpscr.val = 0;
3359
3360 /* Set up to return from userspace. */
3361 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3362 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3363 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3364 } else {
3365 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3366 diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3367 index b97c2d6..dd01a6a 100644
3368 --- a/arch/powerpc/kernel/sys_ppc32.c
3369 +++ b/arch/powerpc/kernel/sys_ppc32.c
3370 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3371 if (oldlenp) {
3372 if (!error) {
3373 if (get_user(oldlen, oldlenp) ||
3374 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3375 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3376 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3377 error = -EFAULT;
3378 }
3379 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3380 }
3381 return error;
3382 }
3383 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3384 index 6f0ae1a..e4b6a56 100644
3385 --- a/arch/powerpc/kernel/traps.c
3386 +++ b/arch/powerpc/kernel/traps.c
3387 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3388 static inline void pmac_backlight_unblank(void) { }
3389 #endif
3390
3391 +extern void gr_handle_kernel_exploit(void);
3392 +
3393 int die(const char *str, struct pt_regs *regs, long err)
3394 {
3395 static struct {
3396 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3397 if (panic_on_oops)
3398 panic("Fatal exception");
3399
3400 + gr_handle_kernel_exploit();
3401 +
3402 oops_exit();
3403 do_exit(err);
3404
3405 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3406 index 137dc22..fe57a79 100644
3407 --- a/arch/powerpc/kernel/vdso.c
3408 +++ b/arch/powerpc/kernel/vdso.c
3409 @@ -36,6 +36,7 @@
3410 #include <asm/firmware.h>
3411 #include <asm/vdso.h>
3412 #include <asm/vdso_datapage.h>
3413 +#include <asm/mman.h>
3414
3415 #include "setup.h"
3416
3417 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3418 vdso_base = VDSO32_MBASE;
3419 #endif
3420
3421 - current->mm->context.vdso_base = 0;
3422 + current->mm->context.vdso_base = ~0UL;
3423
3424 /* vDSO has a problem and was disabled, just don't "enable" it for the
3425 * process
3426 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3427 vdso_base = get_unmapped_area(NULL, vdso_base,
3428 (vdso_pages << PAGE_SHIFT) +
3429 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3430 - 0, 0);
3431 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3432 if (IS_ERR_VALUE(vdso_base)) {
3433 rc = vdso_base;
3434 goto fail_mmapsem;
3435 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3436 index 77f6421..829564a 100644
3437 --- a/arch/powerpc/kernel/vio.c
3438 +++ b/arch/powerpc/kernel/vio.c
3439 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3440 vio_cmo_dealloc(viodev, alloc_size);
3441 }
3442
3443 -struct dma_map_ops vio_dma_mapping_ops = {
3444 +static const struct dma_map_ops vio_dma_mapping_ops = {
3445 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3446 .free_coherent = vio_dma_iommu_free_coherent,
3447 .map_sg = vio_dma_iommu_map_sg,
3448 .unmap_sg = vio_dma_iommu_unmap_sg,
3449 + .dma_supported = dma_iommu_dma_supported,
3450 .map_page = vio_dma_iommu_map_page,
3451 .unmap_page = vio_dma_iommu_unmap_page,
3452
3453 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3454
3455 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3456 {
3457 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3458 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3459 }
3460
3461 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3462 index 5eea6f3..5d10396 100644
3463 --- a/arch/powerpc/lib/usercopy_64.c
3464 +++ b/arch/powerpc/lib/usercopy_64.c
3465 @@ -9,22 +9,6 @@
3466 #include <linux/module.h>
3467 #include <asm/uaccess.h>
3468
3469 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3470 -{
3471 - if (likely(access_ok(VERIFY_READ, from, n)))
3472 - n = __copy_from_user(to, from, n);
3473 - else
3474 - memset(to, 0, n);
3475 - return n;
3476 -}
3477 -
3478 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3479 -{
3480 - if (likely(access_ok(VERIFY_WRITE, to, n)))
3481 - n = __copy_to_user(to, from, n);
3482 - return n;
3483 -}
3484 -
3485 unsigned long copy_in_user(void __user *to, const void __user *from,
3486 unsigned long n)
3487 {
3488 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3489 return n;
3490 }
3491
3492 -EXPORT_SYMBOL(copy_from_user);
3493 -EXPORT_SYMBOL(copy_to_user);
3494 EXPORT_SYMBOL(copy_in_user);
3495
3496 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3497 index e7dae82..877ce0d 100644
3498 --- a/arch/powerpc/mm/fault.c
3499 +++ b/arch/powerpc/mm/fault.c
3500 @@ -30,6 +30,10 @@
3501 #include <linux/kprobes.h>
3502 #include <linux/kdebug.h>
3503 #include <linux/perf_event.h>
3504 +#include <linux/slab.h>
3505 +#include <linux/pagemap.h>
3506 +#include <linux/compiler.h>
3507 +#include <linux/unistd.h>
3508
3509 #include <asm/firmware.h>
3510 #include <asm/page.h>
3511 @@ -40,6 +44,7 @@
3512 #include <asm/uaccess.h>
3513 #include <asm/tlbflush.h>
3514 #include <asm/siginfo.h>
3515 +#include <asm/ptrace.h>
3516
3517
3518 #ifdef CONFIG_KPROBES
3519 @@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3520 }
3521 #endif
3522
3523 +#ifdef CONFIG_PAX_PAGEEXEC
3524 +/*
3525 + * PaX: decide what to do with offenders (regs->nip = fault address)
3526 + *
3527 + * returns 1 when task should be killed
3528 + */
3529 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3530 +{
3531 + return 1;
3532 +}
3533 +
3534 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3535 +{
3536 + unsigned long i;
3537 +
3538 + printk(KERN_ERR "PAX: bytes at PC: ");
3539 + for (i = 0; i < 5; i++) {
3540 + unsigned int c;
3541 + if (get_user(c, (unsigned int __user *)pc+i))
3542 + printk(KERN_CONT "???????? ");
3543 + else
3544 + printk(KERN_CONT "%08x ", c);
3545 + }
3546 + printk("\n");
3547 +}
3548 +#endif
3549 +
3550 /*
3551 * Check whether the instruction at regs->nip is a store using
3552 * an update addressing form which will update r1.
3553 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3554 * indicate errors in DSISR but can validly be set in SRR1.
3555 */
3556 if (trap == 0x400)
3557 - error_code &= 0x48200000;
3558 + error_code &= 0x58200000;
3559 else
3560 is_write = error_code & DSISR_ISSTORE;
3561 #else
3562 @@ -250,7 +282,7 @@ good_area:
3563 * "undefined". Of those that can be set, this is the only
3564 * one which seems bad.
3565 */
3566 - if (error_code & 0x10000000)
3567 + if (error_code & DSISR_GUARDED)
3568 /* Guarded storage error. */
3569 goto bad_area;
3570 #endif /* CONFIG_8xx */
3571 @@ -265,7 +297,7 @@ good_area:
3572 * processors use the same I/D cache coherency mechanism
3573 * as embedded.
3574 */
3575 - if (error_code & DSISR_PROTFAULT)
3576 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3577 goto bad_area;
3578 #endif /* CONFIG_PPC_STD_MMU */
3579
3580 @@ -335,6 +367,23 @@ bad_area:
3581 bad_area_nosemaphore:
3582 /* User mode accesses cause a SIGSEGV */
3583 if (user_mode(regs)) {
3584 +
3585 +#ifdef CONFIG_PAX_PAGEEXEC
3586 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3587 +#ifdef CONFIG_PPC_STD_MMU
3588 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3589 +#else
3590 + if (is_exec && regs->nip == address) {
3591 +#endif
3592 + switch (pax_handle_fetch_fault(regs)) {
3593 + }
3594 +
3595 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3596 + do_group_exit(SIGKILL);
3597 + }
3598 + }
3599 +#endif
3600 +
3601 _exception(SIGSEGV, regs, code, address);
3602 return 0;
3603 }
3604 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3605 index 5973631..ad617af 100644
3606 --- a/arch/powerpc/mm/mem.c
3607 +++ b/arch/powerpc/mm/mem.c
3608 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3609 {
3610 unsigned long lmb_next_region_start_pfn,
3611 lmb_region_max_pfn;
3612 - int i;
3613 + unsigned int i;
3614
3615 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3616 lmb_region_max_pfn =
3617 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3618 index 0d957a4..26d968f 100644
3619 --- a/arch/powerpc/mm/mmap_64.c
3620 +++ b/arch/powerpc/mm/mmap_64.c
3621 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3622 */
3623 if (mmap_is_legacy()) {
3624 mm->mmap_base = TASK_UNMAPPED_BASE;
3625 +
3626 +#ifdef CONFIG_PAX_RANDMMAP
3627 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3628 + mm->mmap_base += mm->delta_mmap;
3629 +#endif
3630 +
3631 mm->get_unmapped_area = arch_get_unmapped_area;
3632 mm->unmap_area = arch_unmap_area;
3633 } else {
3634 mm->mmap_base = mmap_base();
3635 +
3636 +#ifdef CONFIG_PAX_RANDMMAP
3637 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3638 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3639 +#endif
3640 +
3641 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3642 mm->unmap_area = arch_unmap_area_topdown;
3643 }
3644 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3645 index ba51948..23009d9 100644
3646 --- a/arch/powerpc/mm/slice.c
3647 +++ b/arch/powerpc/mm/slice.c
3648 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3649 if ((mm->task_size - len) < addr)
3650 return 0;
3651 vma = find_vma(mm, addr);
3652 - return (!vma || (addr + len) <= vma->vm_start);
3653 + return check_heap_stack_gap(vma, addr, len);
3654 }
3655
3656 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3657 @@ -256,7 +256,7 @@ full_search:
3658 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3659 continue;
3660 }
3661 - if (!vma || addr + len <= vma->vm_start) {
3662 + if (check_heap_stack_gap(vma, addr, len)) {
3663 /*
3664 * Remember the place where we stopped the search:
3665 */
3666 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3667 }
3668 }
3669
3670 - addr = mm->mmap_base;
3671 - while (addr > len) {
3672 + if (mm->mmap_base < len)
3673 + addr = -ENOMEM;
3674 + else
3675 + addr = mm->mmap_base - len;
3676 +
3677 + while (!IS_ERR_VALUE(addr)) {
3678 /* Go down by chunk size */
3679 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3680 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3681
3682 /* Check for hit with different page size */
3683 mask = slice_range_to_mask(addr, len);
3684 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3685 * return with success:
3686 */
3687 vma = find_vma(mm, addr);
3688 - if (!vma || (addr + len) <= vma->vm_start) {
3689 + if (check_heap_stack_gap(vma, addr, len)) {
3690 /* remember the address as a hint for next time */
3691 if (use_cache)
3692 mm->free_area_cache = addr;
3693 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3694 mm->cached_hole_size = vma->vm_start - addr;
3695
3696 /* try just below the current vma->vm_start */
3697 - addr = vma->vm_start;
3698 + addr = skip_heap_stack_gap(vma, len);
3699 }
3700
3701 /*
3702 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3703 if (fixed && addr > (mm->task_size - len))
3704 return -EINVAL;
3705
3706 +#ifdef CONFIG_PAX_RANDMMAP
3707 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3708 + addr = 0;
3709 +#endif
3710 +
3711 /* If hint, make sure it matches our alignment restrictions */
3712 if (!fixed && addr) {
3713 addr = _ALIGN_UP(addr, 1ul << pshift);
3714 diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3715 index b5c753d..8f01abe 100644
3716 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3717 +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3718 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3719 lite5200_pm_target_state = PM_SUSPEND_ON;
3720 }
3721
3722 -static struct platform_suspend_ops lite5200_pm_ops = {
3723 +static const struct platform_suspend_ops lite5200_pm_ops = {
3724 .valid = lite5200_pm_valid,
3725 .begin = lite5200_pm_begin,
3726 .prepare = lite5200_pm_prepare,
3727 diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3728 index a55b0b6..478c18e 100644
3729 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3730 +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3731 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3732 iounmap(mbar);
3733 }
3734
3735 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3736 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3737 .valid = mpc52xx_pm_valid,
3738 .prepare = mpc52xx_pm_prepare,
3739 .enter = mpc52xx_pm_enter,
3740 diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3741 index 08e65fc..643d3ac 100644
3742 --- a/arch/powerpc/platforms/83xx/suspend.c
3743 +++ b/arch/powerpc/platforms/83xx/suspend.c
3744 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3745 return ret;
3746 }
3747
3748 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3749 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3750 .valid = mpc83xx_suspend_valid,
3751 .begin = mpc83xx_suspend_begin,
3752 .enter = mpc83xx_suspend_enter,
3753 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3754 index ca5bfdf..1602e09 100644
3755 --- a/arch/powerpc/platforms/cell/iommu.c
3756 +++ b/arch/powerpc/platforms/cell/iommu.c
3757 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3758
3759 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3760
3761 -struct dma_map_ops dma_iommu_fixed_ops = {
3762 +const struct dma_map_ops dma_iommu_fixed_ops = {
3763 .alloc_coherent = dma_fixed_alloc_coherent,
3764 .free_coherent = dma_fixed_free_coherent,
3765 .map_sg = dma_fixed_map_sg,
3766 diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3767 index e34b305..20e48ec 100644
3768 --- a/arch/powerpc/platforms/ps3/system-bus.c
3769 +++ b/arch/powerpc/platforms/ps3/system-bus.c
3770 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3771 return mask >= DMA_BIT_MASK(32);
3772 }
3773
3774 -static struct dma_map_ops ps3_sb_dma_ops = {
3775 +static const struct dma_map_ops ps3_sb_dma_ops = {
3776 .alloc_coherent = ps3_alloc_coherent,
3777 .free_coherent = ps3_free_coherent,
3778 .map_sg = ps3_sb_map_sg,
3779 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3780 .unmap_page = ps3_unmap_page,
3781 };
3782
3783 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3784 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3785 .alloc_coherent = ps3_alloc_coherent,
3786 .free_coherent = ps3_free_coherent,
3787 .map_sg = ps3_ioc0_map_sg,
3788 diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3789 index f0e6f28..60d53ed 100644
3790 --- a/arch/powerpc/platforms/pseries/Kconfig
3791 +++ b/arch/powerpc/platforms/pseries/Kconfig
3792 @@ -2,6 +2,8 @@ config PPC_PSERIES
3793 depends on PPC64 && PPC_BOOK3S
3794 bool "IBM pSeries & new (POWER5-based) iSeries"
3795 select MPIC
3796 + select PCI_MSI
3797 + select XICS
3798 select PPC_I8259
3799 select PPC_RTAS
3800 select RTAS_ERROR_LOGGING
3801 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3802 index 43c0aca..42c045b 100644
3803 --- a/arch/s390/Kconfig
3804 +++ b/arch/s390/Kconfig
3805 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3806
3807 config S390_SWITCH_AMODE
3808 bool "Switch kernel/user addressing modes"
3809 + default y
3810 help
3811 This option allows to switch the addressing modes of kernel and user
3812 - space. The kernel parameter switch_amode=on will enable this feature,
3813 - default is disabled. Enabling this (via kernel parameter) on machines
3814 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3815 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3816 + will reduce system performance.
3817
3818 Note that this option will also be selected by selecting the execute
3819 - protection option below. Enabling the execute protection via the
3820 - noexec kernel parameter will also switch the addressing modes,
3821 - independent of the switch_amode kernel parameter.
3822 + protection option below. Enabling the execute protection will also
3823 + switch the addressing modes, independent of this option.
3824
3825
3826 config S390_EXEC_PROTECT
3827 bool "Data execute protection"
3828 + default y
3829 select S390_SWITCH_AMODE
3830 help
3831 This option allows to enable a buffer overflow protection for user
3832 space programs and it also selects the addressing mode option above.
3833 - The kernel parameter noexec=on will enable this feature and also
3834 - switch the addressing modes, default is disabled. Enabling this (via
3835 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3836 - will reduce system performance.
3837 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3838 + reduce system performance.
3839
3840 comment "Code generation options"
3841
3842 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3843 index e885442..5e6c303 100644
3844 --- a/arch/s390/include/asm/elf.h
3845 +++ b/arch/s390/include/asm/elf.h
3846 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3847 that it will "exec", and that there is sufficient room for the brk. */
3848 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3849
3850 +#ifdef CONFIG_PAX_ASLR
3851 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3852 +
3853 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3854 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3855 +#endif
3856 +
3857 /* This yields a mask that user programs can use to figure out what
3858 instruction set this CPU supports. */
3859
3860 diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
3861 index e37478e..9ce0e9f 100644
3862 --- a/arch/s390/include/asm/setup.h
3863 +++ b/arch/s390/include/asm/setup.h
3864 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3865 void detect_memory_layout(struct mem_chunk chunk[]);
3866
3867 #ifdef CONFIG_S390_SWITCH_AMODE
3868 -extern unsigned int switch_amode;
3869 +#define switch_amode (1)
3870 #else
3871 #define switch_amode (0)
3872 #endif
3873
3874 #ifdef CONFIG_S390_EXEC_PROTECT
3875 -extern unsigned int s390_noexec;
3876 +#define s390_noexec (1)
3877 #else
3878 #define s390_noexec (0)
3879 #endif
3880 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3881 index 8377e91..e28e6f1 100644
3882 --- a/arch/s390/include/asm/uaccess.h
3883 +++ b/arch/s390/include/asm/uaccess.h
3884 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3885 copy_to_user(void __user *to, const void *from, unsigned long n)
3886 {
3887 might_fault();
3888 +
3889 + if ((long)n < 0)
3890 + return n;
3891 +
3892 if (access_ok(VERIFY_WRITE, to, n))
3893 n = __copy_to_user(to, from, n);
3894 return n;
3895 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3896 static inline unsigned long __must_check
3897 __copy_from_user(void *to, const void __user *from, unsigned long n)
3898 {
3899 + if ((long)n < 0)
3900 + return n;
3901 +
3902 if (__builtin_constant_p(n) && (n <= 256))
3903 return uaccess.copy_from_user_small(n, from, to);
3904 else
3905 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3906 copy_from_user(void *to, const void __user *from, unsigned long n)
3907 {
3908 might_fault();
3909 +
3910 + if ((long)n < 0)
3911 + return n;
3912 +
3913 if (access_ok(VERIFY_READ, from, n))
3914 n = __copy_from_user(to, from, n);
3915 else
3916 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3917 index 639380a..72e3c02 100644
3918 --- a/arch/s390/kernel/module.c
3919 +++ b/arch/s390/kernel/module.c
3920 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3921
3922 /* Increase core size by size of got & plt and set start
3923 offsets for got and plt. */
3924 - me->core_size = ALIGN(me->core_size, 4);
3925 - me->arch.got_offset = me->core_size;
3926 - me->core_size += me->arch.got_size;
3927 - me->arch.plt_offset = me->core_size;
3928 - me->core_size += me->arch.plt_size;
3929 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3930 + me->arch.got_offset = me->core_size_rw;
3931 + me->core_size_rw += me->arch.got_size;
3932 + me->arch.plt_offset = me->core_size_rx;
3933 + me->core_size_rx += me->arch.plt_size;
3934 return 0;
3935 }
3936
3937 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3938 if (info->got_initialized == 0) {
3939 Elf_Addr *gotent;
3940
3941 - gotent = me->module_core + me->arch.got_offset +
3942 + gotent = me->module_core_rw + me->arch.got_offset +
3943 info->got_offset;
3944 *gotent = val;
3945 info->got_initialized = 1;
3946 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3947 else if (r_type == R_390_GOTENT ||
3948 r_type == R_390_GOTPLTENT)
3949 *(unsigned int *) loc =
3950 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3951 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3952 else if (r_type == R_390_GOT64 ||
3953 r_type == R_390_GOTPLT64)
3954 *(unsigned long *) loc = val;
3955 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3956 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3957 if (info->plt_initialized == 0) {
3958 unsigned int *ip;
3959 - ip = me->module_core + me->arch.plt_offset +
3960 + ip = me->module_core_rx + me->arch.plt_offset +
3961 info->plt_offset;
3962 #ifndef CONFIG_64BIT
3963 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3964 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3965 val - loc + 0xffffUL < 0x1ffffeUL) ||
3966 (r_type == R_390_PLT32DBL &&
3967 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3968 - val = (Elf_Addr) me->module_core +
3969 + val = (Elf_Addr) me->module_core_rx +
3970 me->arch.plt_offset +
3971 info->plt_offset;
3972 val += rela->r_addend - loc;
3973 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3974 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3975 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3976 val = val + rela->r_addend -
3977 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3978 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3979 if (r_type == R_390_GOTOFF16)
3980 *(unsigned short *) loc = val;
3981 else if (r_type == R_390_GOTOFF32)
3982 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3983 break;
3984 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3985 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3986 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3987 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3988 rela->r_addend - loc;
3989 if (r_type == R_390_GOTPC)
3990 *(unsigned int *) loc = val;
3991 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3992 index 061479f..dbfb08c 100644
3993 --- a/arch/s390/kernel/setup.c
3994 +++ b/arch/s390/kernel/setup.c
3995 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
3996 early_param("mem", early_parse_mem);
3997
3998 #ifdef CONFIG_S390_SWITCH_AMODE
3999 -unsigned int switch_amode = 0;
4000 -EXPORT_SYMBOL_GPL(switch_amode);
4001 -
4002 static int set_amode_and_uaccess(unsigned long user_amode,
4003 unsigned long user32_amode)
4004 {
4005 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4006 return 0;
4007 }
4008 }
4009 -
4010 -/*
4011 - * Switch kernel/user addressing modes?
4012 - */
4013 -static int __init early_parse_switch_amode(char *p)
4014 -{
4015 - switch_amode = 1;
4016 - return 0;
4017 -}
4018 -early_param("switch_amode", early_parse_switch_amode);
4019 -
4020 #else /* CONFIG_S390_SWITCH_AMODE */
4021 static inline int set_amode_and_uaccess(unsigned long user_amode,
4022 unsigned long user32_amode)
4023 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4024 }
4025 #endif /* CONFIG_S390_SWITCH_AMODE */
4026
4027 -#ifdef CONFIG_S390_EXEC_PROTECT
4028 -unsigned int s390_noexec = 0;
4029 -EXPORT_SYMBOL_GPL(s390_noexec);
4030 -
4031 -/*
4032 - * Enable execute protection?
4033 - */
4034 -static int __init early_parse_noexec(char *p)
4035 -{
4036 - if (!strncmp(p, "off", 3))
4037 - return 0;
4038 - switch_amode = 1;
4039 - s390_noexec = 1;
4040 - return 0;
4041 -}
4042 -early_param("noexec", early_parse_noexec);
4043 -#endif /* CONFIG_S390_EXEC_PROTECT */
4044 -
4045 static void setup_addressing_mode(void)
4046 {
4047 if (s390_noexec) {
4048 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4049 index f4558cc..e461f37 100644
4050 --- a/arch/s390/mm/mmap.c
4051 +++ b/arch/s390/mm/mmap.c
4052 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 */
4054 if (mmap_is_legacy()) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE;
4056 +
4057 +#ifdef CONFIG_PAX_RANDMMAP
4058 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4059 + mm->mmap_base += mm->delta_mmap;
4060 +#endif
4061 +
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065 mm->mmap_base = mmap_base();
4066 +
4067 +#ifdef CONFIG_PAX_RANDMMAP
4068 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4069 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4070 +#endif
4071 +
4072 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4073 mm->unmap_area = arch_unmap_area_topdown;
4074 }
4075 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4076 */
4077 if (mmap_is_legacy()) {
4078 mm->mmap_base = TASK_UNMAPPED_BASE;
4079 +
4080 +#ifdef CONFIG_PAX_RANDMMAP
4081 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4082 + mm->mmap_base += mm->delta_mmap;
4083 +#endif
4084 +
4085 mm->get_unmapped_area = s390_get_unmapped_area;
4086 mm->unmap_area = arch_unmap_area;
4087 } else {
4088 mm->mmap_base = mmap_base();
4089 +
4090 +#ifdef CONFIG_PAX_RANDMMAP
4091 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4092 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4093 +#endif
4094 +
4095 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4096 mm->unmap_area = arch_unmap_area_topdown;
4097 }
4098 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4099 index 589d5c7..669e274 100644
4100 --- a/arch/score/include/asm/system.h
4101 +++ b/arch/score/include/asm/system.h
4102 @@ -17,7 +17,7 @@ do { \
4103 #define finish_arch_switch(prev) do {} while (0)
4104
4105 typedef void (*vi_handler_t)(void);
4106 -extern unsigned long arch_align_stack(unsigned long sp);
4107 +#define arch_align_stack(x) (x)
4108
4109 #define mb() barrier()
4110 #define rmb() barrier()
4111 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4112 index 25d0803..d6c8e36 100644
4113 --- a/arch/score/kernel/process.c
4114 +++ b/arch/score/kernel/process.c
4115 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4116
4117 return task_pt_regs(task)->cp0_epc;
4118 }
4119 -
4120 -unsigned long arch_align_stack(unsigned long sp)
4121 -{
4122 - return sp;
4123 -}
4124 diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4125 index d936c1a..304a252 100644
4126 --- a/arch/sh/boards/mach-hp6xx/pm.c
4127 +++ b/arch/sh/boards/mach-hp6xx/pm.c
4128 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4129 return 0;
4130 }
4131
4132 -static struct platform_suspend_ops hp6x0_pm_ops = {
4133 +static const struct platform_suspend_ops hp6x0_pm_ops = {
4134 .enter = hp6x0_pm_enter,
4135 .valid = suspend_valid_only_mem,
4136 };
4137 diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4138 index 8a8a993..7b3079b 100644
4139 --- a/arch/sh/kernel/cpu/sh4/sq.c
4140 +++ b/arch/sh/kernel/cpu/sh4/sq.c
4141 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4142 NULL,
4143 };
4144
4145 -static struct sysfs_ops sq_sysfs_ops = {
4146 +static const struct sysfs_ops sq_sysfs_ops = {
4147 .show = sq_sysfs_show,
4148 .store = sq_sysfs_store,
4149 };
4150 diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4151 index ee3c2aa..c49cee6 100644
4152 --- a/arch/sh/kernel/cpu/shmobile/pm.c
4153 +++ b/arch/sh/kernel/cpu/shmobile/pm.c
4154 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4155 return 0;
4156 }
4157
4158 -static struct platform_suspend_ops sh_pm_ops = {
4159 +static const struct platform_suspend_ops sh_pm_ops = {
4160 .enter = sh_pm_enter,
4161 .valid = suspend_valid_only_mem,
4162 };
4163 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4164 index 3e532d0..9faa306 100644
4165 --- a/arch/sh/kernel/kgdb.c
4166 +++ b/arch/sh/kernel/kgdb.c
4167 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4168 {
4169 }
4170
4171 -struct kgdb_arch arch_kgdb_ops = {
4172 +const struct kgdb_arch arch_kgdb_ops = {
4173 /* Breakpoint instruction: trapa #0x3c */
4174 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4175 .gdb_bpt_instr = { 0x3c, 0xc3 },
4176 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4177 index afeb710..d1d1289 100644
4178 --- a/arch/sh/mm/mmap.c
4179 +++ b/arch/sh/mm/mmap.c
4180 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4181 addr = PAGE_ALIGN(addr);
4182
4183 vma = find_vma(mm, addr);
4184 - if (TASK_SIZE - len >= addr &&
4185 - (!vma || addr + len <= vma->vm_start))
4186 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4187 return addr;
4188 }
4189
4190 @@ -106,7 +105,7 @@ full_search:
4191 }
4192 return -ENOMEM;
4193 }
4194 - if (likely(!vma || addr + len <= vma->vm_start)) {
4195 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4196 /*
4197 * Remember the place where we stopped the search:
4198 */
4199 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4200 addr = PAGE_ALIGN(addr);
4201
4202 vma = find_vma(mm, addr);
4203 - if (TASK_SIZE - len >= addr &&
4204 - (!vma || addr + len <= vma->vm_start))
4205 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4206 return addr;
4207 }
4208
4209 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4210 /* make sure it can fit in the remaining address space */
4211 if (likely(addr > len)) {
4212 vma = find_vma(mm, addr-len);
4213 - if (!vma || addr <= vma->vm_start) {
4214 + if (check_heap_stack_gap(vma, addr - len, len)) {
4215 /* remember the address as a hint for next time */
4216 return (mm->free_area_cache = addr-len);
4217 }
4218 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4219 if (unlikely(mm->mmap_base < len))
4220 goto bottomup;
4221
4222 - addr = mm->mmap_base-len;
4223 - if (do_colour_align)
4224 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4225 + addr = mm->mmap_base - len;
4226
4227 do {
4228 + if (do_colour_align)
4229 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4230 /*
4231 * Lookup failure means no vma is above this address,
4232 * else if new region fits below vma->vm_start,
4233 * return with success:
4234 */
4235 vma = find_vma(mm, addr);
4236 - if (likely(!vma || addr+len <= vma->vm_start)) {
4237 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4238 /* remember the address as a hint for next time */
4239 return (mm->free_area_cache = addr);
4240 }
4241 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4242 mm->cached_hole_size = vma->vm_start - addr;
4243
4244 /* try just below the current vma->vm_start */
4245 - addr = vma->vm_start-len;
4246 - if (do_colour_align)
4247 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4248 - } while (likely(len < vma->vm_start));
4249 + addr = skip_heap_stack_gap(vma, len);
4250 + } while (!IS_ERR_VALUE(addr));
4251
4252 bottomup:
4253 /*
4254 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4255 index 113225b..7fd04e7 100644
4256 --- a/arch/sparc/Makefile
4257 +++ b/arch/sparc/Makefile
4258 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4259 # Export what is needed by arch/sparc/boot/Makefile
4260 export VMLINUX_INIT VMLINUX_MAIN
4261 VMLINUX_INIT := $(head-y) $(init-y)
4262 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4263 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4264 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4265 VMLINUX_MAIN += $(drivers-y) $(net-y)
4266
4267 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4268 index f5cc06f..f858d47 100644
4269 --- a/arch/sparc/include/asm/atomic_64.h
4270 +++ b/arch/sparc/include/asm/atomic_64.h
4271 @@ -14,18 +14,40 @@
4272 #define ATOMIC64_INIT(i) { (i) }
4273
4274 #define atomic_read(v) ((v)->counter)
4275 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4276 +{
4277 + return v->counter;
4278 +}
4279 #define atomic64_read(v) ((v)->counter)
4280 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4281 +{
4282 + return v->counter;
4283 +}
4284
4285 #define atomic_set(v, i) (((v)->counter) = i)
4286 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4287 +{
4288 + v->counter = i;
4289 +}
4290 #define atomic64_set(v, i) (((v)->counter) = i)
4291 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4292 +{
4293 + v->counter = i;
4294 +}
4295
4296 extern void atomic_add(int, atomic_t *);
4297 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4298 extern void atomic64_add(long, atomic64_t *);
4299 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4300 extern void atomic_sub(int, atomic_t *);
4301 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4302 extern void atomic64_sub(long, atomic64_t *);
4303 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4304
4305 extern int atomic_add_ret(int, atomic_t *);
4306 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4307 extern long atomic64_add_ret(long, atomic64_t *);
4308 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4309 extern int atomic_sub_ret(int, atomic_t *);
4310 extern long atomic64_sub_ret(long, atomic64_t *);
4311
4312 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4313 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4314
4315 #define atomic_inc_return(v) atomic_add_ret(1, v)
4316 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4317 +{
4318 + return atomic_add_ret_unchecked(1, v);
4319 +}
4320 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4321 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4322 +{
4323 + return atomic64_add_ret_unchecked(1, v);
4324 +}
4325
4326 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4327 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4328
4329 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4330 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4331 +{
4332 + return atomic_add_ret_unchecked(i, v);
4333 +}
4334 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4335 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4336 +{
4337 + return atomic64_add_ret_unchecked(i, v);
4338 +}
4339
4340 /*
4341 * atomic_inc_and_test - increment and test
4342 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4343 * other cases.
4344 */
4345 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4346 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4347 +{
4348 + return atomic_inc_return_unchecked(v) == 0;
4349 +}
4350 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4351
4352 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4353 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4354 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4355
4356 #define atomic_inc(v) atomic_add(1, v)
4357 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4358 +{
4359 + atomic_add_unchecked(1, v);
4360 +}
4361 #define atomic64_inc(v) atomic64_add(1, v)
4362 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4363 +{
4364 + atomic64_add_unchecked(1, v);
4365 +}
4366
4367 #define atomic_dec(v) atomic_sub(1, v)
4368 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4369 +{
4370 + atomic_sub_unchecked(1, v);
4371 +}
4372 #define atomic64_dec(v) atomic64_sub(1, v)
4373 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4374 +{
4375 + atomic64_sub_unchecked(1, v);
4376 +}
4377
4378 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4379 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4380
4381 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4382 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4383 +{
4384 + return cmpxchg(&v->counter, old, new);
4385 +}
4386 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4387 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4388 +{
4389 + return xchg(&v->counter, new);
4390 +}
4391
4392 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4393 {
4394 - int c, old;
4395 + int c, old, new;
4396 c = atomic_read(v);
4397 for (;;) {
4398 - if (unlikely(c == (u)))
4399 + if (unlikely(c == u))
4400 break;
4401 - old = atomic_cmpxchg((v), c, c + (a));
4402 +
4403 + asm volatile("addcc %2, %0, %0\n"
4404 +
4405 +#ifdef CONFIG_PAX_REFCOUNT
4406 + "tvs %%icc, 6\n"
4407 +#endif
4408 +
4409 + : "=r" (new)
4410 + : "0" (c), "ir" (a)
4411 + : "cc");
4412 +
4413 + old = atomic_cmpxchg(v, c, new);
4414 if (likely(old == c))
4415 break;
4416 c = old;
4417 }
4418 - return c != (u);
4419 + return c != u;
4420 }
4421
4422 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4423 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4424 #define atomic64_cmpxchg(v, o, n) \
4425 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4426 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4427 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4428 +{
4429 + return xchg(&v->counter, new);
4430 +}
4431
4432 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4433 {
4434 - long c, old;
4435 + long c, old, new;
4436 c = atomic64_read(v);
4437 for (;;) {
4438 - if (unlikely(c == (u)))
4439 + if (unlikely(c == u))
4440 break;
4441 - old = atomic64_cmpxchg((v), c, c + (a));
4442 +
4443 + asm volatile("addcc %2, %0, %0\n"
4444 +
4445 +#ifdef CONFIG_PAX_REFCOUNT
4446 + "tvs %%xcc, 6\n"
4447 +#endif
4448 +
4449 + : "=r" (new)
4450 + : "0" (c), "ir" (a)
4451 + : "cc");
4452 +
4453 + old = atomic64_cmpxchg(v, c, new);
4454 if (likely(old == c))
4455 break;
4456 c = old;
4457 }
4458 - return c != (u);
4459 + return c != u;
4460 }
4461
4462 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4463 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4464 index 41f85ae..fb54d5e 100644
4465 --- a/arch/sparc/include/asm/cache.h
4466 +++ b/arch/sparc/include/asm/cache.h
4467 @@ -8,7 +8,7 @@
4468 #define _SPARC_CACHE_H
4469
4470 #define L1_CACHE_SHIFT 5
4471 -#define L1_CACHE_BYTES 32
4472 +#define L1_CACHE_BYTES 32UL
4473 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4474
4475 #ifdef CONFIG_SPARC32
4476 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4477 index 5a8c308..38def92 100644
4478 --- a/arch/sparc/include/asm/dma-mapping.h
4479 +++ b/arch/sparc/include/asm/dma-mapping.h
4480 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4481 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4482 #define dma_is_consistent(d, h) (1)
4483
4484 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4485 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4486 extern struct bus_type pci_bus_type;
4487
4488 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4489 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4490 {
4491 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4492 if (dev->bus == &pci_bus_type)
4493 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4494 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4495 dma_addr_t *dma_handle, gfp_t flag)
4496 {
4497 - struct dma_map_ops *ops = get_dma_ops(dev);
4498 + const struct dma_map_ops *ops = get_dma_ops(dev);
4499 void *cpu_addr;
4500
4501 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4502 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4503 static inline void dma_free_coherent(struct device *dev, size_t size,
4504 void *cpu_addr, dma_addr_t dma_handle)
4505 {
4506 - struct dma_map_ops *ops = get_dma_ops(dev);
4507 + const struct dma_map_ops *ops = get_dma_ops(dev);
4508
4509 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4510 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4511 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4512 index 381a1b5..b97e3ff 100644
4513 --- a/arch/sparc/include/asm/elf_32.h
4514 +++ b/arch/sparc/include/asm/elf_32.h
4515 @@ -116,6 +116,13 @@ typedef struct {
4516
4517 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4518
4519 +#ifdef CONFIG_PAX_ASLR
4520 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4521 +
4522 +#define PAX_DELTA_MMAP_LEN 16
4523 +#define PAX_DELTA_STACK_LEN 16
4524 +#endif
4525 +
4526 /* This yields a mask that user programs can use to figure out what
4527 instruction set this cpu supports. This can NOT be done in userspace
4528 on Sparc. */
4529 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4530 index 9968085..c2106ef 100644
4531 --- a/arch/sparc/include/asm/elf_64.h
4532 +++ b/arch/sparc/include/asm/elf_64.h
4533 @@ -163,6 +163,12 @@ typedef struct {
4534 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4535 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4536
4537 +#ifdef CONFIG_PAX_ASLR
4538 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4539 +
4540 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4541 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4542 +#endif
4543
4544 /* This yields a mask that user programs can use to figure out what
4545 instruction set this cpu supports. */
4546 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4547 index e0cabe7..efd60f1 100644
4548 --- a/arch/sparc/include/asm/pgtable_32.h
4549 +++ b/arch/sparc/include/asm/pgtable_32.h
4550 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4551 BTFIXUPDEF_INT(page_none)
4552 BTFIXUPDEF_INT(page_copy)
4553 BTFIXUPDEF_INT(page_readonly)
4554 +
4555 +#ifdef CONFIG_PAX_PAGEEXEC
4556 +BTFIXUPDEF_INT(page_shared_noexec)
4557 +BTFIXUPDEF_INT(page_copy_noexec)
4558 +BTFIXUPDEF_INT(page_readonly_noexec)
4559 +#endif
4560 +
4561 BTFIXUPDEF_INT(page_kernel)
4562
4563 #define PMD_SHIFT SUN4C_PMD_SHIFT
4564 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4565 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4566 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4567
4568 +#ifdef CONFIG_PAX_PAGEEXEC
4569 +extern pgprot_t PAGE_SHARED_NOEXEC;
4570 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4571 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4572 +#else
4573 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4574 +# define PAGE_COPY_NOEXEC PAGE_COPY
4575 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4576 +#endif
4577 +
4578 extern unsigned long page_kernel;
4579
4580 #ifdef MODULE
4581 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4582 index 1407c07..7e10231 100644
4583 --- a/arch/sparc/include/asm/pgtsrmmu.h
4584 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4585 @@ -115,6 +115,13 @@
4586 SRMMU_EXEC | SRMMU_REF)
4587 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4588 SRMMU_EXEC | SRMMU_REF)
4589 +
4590 +#ifdef CONFIG_PAX_PAGEEXEC
4591 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4592 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4593 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4594 +#endif
4595 +
4596 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4597 SRMMU_DIRTY | SRMMU_REF)
4598
4599 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4600 index 43e5147..47622a1 100644
4601 --- a/arch/sparc/include/asm/spinlock_64.h
4602 +++ b/arch/sparc/include/asm/spinlock_64.h
4603 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4604
4605 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4606
4607 -static void inline arch_read_lock(raw_rwlock_t *lock)
4608 +static inline void arch_read_lock(raw_rwlock_t *lock)
4609 {
4610 unsigned long tmp1, tmp2;
4611
4612 __asm__ __volatile__ (
4613 "1: ldsw [%2], %0\n"
4614 " brlz,pn %0, 2f\n"
4615 -"4: add %0, 1, %1\n"
4616 +"4: addcc %0, 1, %1\n"
4617 +
4618 +#ifdef CONFIG_PAX_REFCOUNT
4619 +" tvs %%icc, 6\n"
4620 +#endif
4621 +
4622 " cas [%2], %0, %1\n"
4623 " cmp %0, %1\n"
4624 " bne,pn %%icc, 1b\n"
4625 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4626 " .previous"
4627 : "=&r" (tmp1), "=&r" (tmp2)
4628 : "r" (lock)
4629 - : "memory");
4630 + : "memory", "cc");
4631 }
4632
4633 -static int inline arch_read_trylock(raw_rwlock_t *lock)
4634 +static inline int arch_read_trylock(raw_rwlock_t *lock)
4635 {
4636 int tmp1, tmp2;
4637
4638 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4639 "1: ldsw [%2], %0\n"
4640 " brlz,a,pn %0, 2f\n"
4641 " mov 0, %0\n"
4642 -" add %0, 1, %1\n"
4643 +" addcc %0, 1, %1\n"
4644 +
4645 +#ifdef CONFIG_PAX_REFCOUNT
4646 +" tvs %%icc, 6\n"
4647 +#endif
4648 +
4649 " cas [%2], %0, %1\n"
4650 " cmp %0, %1\n"
4651 " bne,pn %%icc, 1b\n"
4652 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4653 return tmp1;
4654 }
4655
4656 -static void inline arch_read_unlock(raw_rwlock_t *lock)
4657 +static inline void arch_read_unlock(raw_rwlock_t *lock)
4658 {
4659 unsigned long tmp1, tmp2;
4660
4661 __asm__ __volatile__(
4662 "1: lduw [%2], %0\n"
4663 -" sub %0, 1, %1\n"
4664 +" subcc %0, 1, %1\n"
4665 +
4666 +#ifdef CONFIG_PAX_REFCOUNT
4667 +" tvs %%icc, 6\n"
4668 +#endif
4669 +
4670 " cas [%2], %0, %1\n"
4671 " cmp %0, %1\n"
4672 " bne,pn %%xcc, 1b\n"
4673 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4674 : "memory");
4675 }
4676
4677 -static void inline arch_write_lock(raw_rwlock_t *lock)
4678 +static inline void arch_write_lock(raw_rwlock_t *lock)
4679 {
4680 unsigned long mask, tmp1, tmp2;
4681
4682 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4683 : "memory");
4684 }
4685
4686 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4687 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4688 {
4689 __asm__ __volatile__(
4690 " stw %%g0, [%0]"
4691 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4692 : "memory");
4693 }
4694
4695 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4696 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4697 {
4698 unsigned long mask, tmp1, tmp2, result;
4699
4700 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4701 index 844d73a..f787fb9 100644
4702 --- a/arch/sparc/include/asm/thread_info_32.h
4703 +++ b/arch/sparc/include/asm/thread_info_32.h
4704 @@ -50,6 +50,8 @@ struct thread_info {
4705 unsigned long w_saved;
4706
4707 struct restart_block restart_block;
4708 +
4709 + unsigned long lowest_stack;
4710 };
4711
4712 /*
4713 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4714 index f78ad9a..9f55fc7 100644
4715 --- a/arch/sparc/include/asm/thread_info_64.h
4716 +++ b/arch/sparc/include/asm/thread_info_64.h
4717 @@ -68,6 +68,8 @@ struct thread_info {
4718 struct pt_regs *kern_una_regs;
4719 unsigned int kern_una_insn;
4720
4721 + unsigned long lowest_stack;
4722 +
4723 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4724 };
4725
4726 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4727 index e88fbe5..96b0ce5 100644
4728 --- a/arch/sparc/include/asm/uaccess.h
4729 +++ b/arch/sparc/include/asm/uaccess.h
4730 @@ -1,5 +1,13 @@
4731 #ifndef ___ASM_SPARC_UACCESS_H
4732 #define ___ASM_SPARC_UACCESS_H
4733 +
4734 +#ifdef __KERNEL__
4735 +#ifndef __ASSEMBLY__
4736 +#include <linux/types.h>
4737 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4738 +#endif
4739 +#endif
4740 +
4741 #if defined(__sparc__) && defined(__arch64__)
4742 #include <asm/uaccess_64.h>
4743 #else
4744 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4745 index 8303ac4..07f333d 100644
4746 --- a/arch/sparc/include/asm/uaccess_32.h
4747 +++ b/arch/sparc/include/asm/uaccess_32.h
4748 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4749
4750 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4751 {
4752 - if (n && __access_ok((unsigned long) to, n))
4753 + if ((long)n < 0)
4754 + return n;
4755 +
4756 + if (n && __access_ok((unsigned long) to, n)) {
4757 + if (!__builtin_constant_p(n))
4758 + check_object_size(from, n, true);
4759 return __copy_user(to, (__force void __user *) from, n);
4760 - else
4761 + } else
4762 return n;
4763 }
4764
4765 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4766 {
4767 + if ((long)n < 0)
4768 + return n;
4769 +
4770 + if (!__builtin_constant_p(n))
4771 + check_object_size(from, n, true);
4772 +
4773 return __copy_user(to, (__force void __user *) from, n);
4774 }
4775
4776 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4777 {
4778 - if (n && __access_ok((unsigned long) from, n))
4779 + if ((long)n < 0)
4780 + return n;
4781 +
4782 + if (n && __access_ok((unsigned long) from, n)) {
4783 + if (!__builtin_constant_p(n))
4784 + check_object_size(to, n, false);
4785 return __copy_user((__force void __user *) to, from, n);
4786 - else
4787 + } else
4788 return n;
4789 }
4790
4791 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4792 {
4793 + if ((long)n < 0)
4794 + return n;
4795 +
4796 return __copy_user((__force void __user *) to, from, n);
4797 }
4798
4799 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4800 index 9ea271e..7b8a271 100644
4801 --- a/arch/sparc/include/asm/uaccess_64.h
4802 +++ b/arch/sparc/include/asm/uaccess_64.h
4803 @@ -9,6 +9,7 @@
4804 #include <linux/compiler.h>
4805 #include <linux/string.h>
4806 #include <linux/thread_info.h>
4807 +#include <linux/kernel.h>
4808 #include <asm/asi.h>
4809 #include <asm/system.h>
4810 #include <asm/spitfire.h>
4811 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4812 static inline unsigned long __must_check
4813 copy_from_user(void *to, const void __user *from, unsigned long size)
4814 {
4815 - unsigned long ret = ___copy_from_user(to, from, size);
4816 + unsigned long ret;
4817
4818 + if ((long)size < 0 || size > INT_MAX)
4819 + return size;
4820 +
4821 + if (!__builtin_constant_p(size))
4822 + check_object_size(to, size, false);
4823 +
4824 + ret = ___copy_from_user(to, from, size);
4825 if (unlikely(ret))
4826 ret = copy_from_user_fixup(to, from, size);
4827 return ret;
4828 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4829 static inline unsigned long __must_check
4830 copy_to_user(void __user *to, const void *from, unsigned long size)
4831 {
4832 - unsigned long ret = ___copy_to_user(to, from, size);
4833 + unsigned long ret;
4834
4835 + if ((long)size < 0 || size > INT_MAX)
4836 + return size;
4837 +
4838 + if (!__builtin_constant_p(size))
4839 + check_object_size(from, size, true);
4840 +
4841 + ret = ___copy_to_user(to, from, size);
4842 if (unlikely(ret))
4843 ret = copy_to_user_fixup(to, from, size);
4844 return ret;
4845 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4846 index 2782681..77ded84 100644
4847 --- a/arch/sparc/kernel/Makefile
4848 +++ b/arch/sparc/kernel/Makefile
4849 @@ -3,7 +3,7 @@
4850 #
4851
4852 asflags-y := -ansi
4853 -ccflags-y := -Werror
4854 +#ccflags-y := -Werror
4855
4856 extra-y := head_$(BITS).o
4857 extra-y += init_task.o
4858 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
4859 index 7690cc2..ece64c9 100644
4860 --- a/arch/sparc/kernel/iommu.c
4861 +++ b/arch/sparc/kernel/iommu.c
4862 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
4863 spin_unlock_irqrestore(&iommu->lock, flags);
4864 }
4865
4866 -static struct dma_map_ops sun4u_dma_ops = {
4867 +static const struct dma_map_ops sun4u_dma_ops = {
4868 .alloc_coherent = dma_4u_alloc_coherent,
4869 .free_coherent = dma_4u_free_coherent,
4870 .map_page = dma_4u_map_page,
4871 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
4872 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4873 };
4874
4875 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4876 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4877 EXPORT_SYMBOL(dma_ops);
4878
4879 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4880 diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
4881 index 9f61fd8..bd048db 100644
4882 --- a/arch/sparc/kernel/ioport.c
4883 +++ b/arch/sparc/kernel/ioport.c
4884 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
4885 BUG();
4886 }
4887
4888 -struct dma_map_ops sbus_dma_ops = {
4889 +const struct dma_map_ops sbus_dma_ops = {
4890 .alloc_coherent = sbus_alloc_coherent,
4891 .free_coherent = sbus_free_coherent,
4892 .map_page = sbus_map_page,
4893 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4894 .sync_sg_for_device = sbus_sync_sg_for_device,
4895 };
4896
4897 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4898 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4899 EXPORT_SYMBOL(dma_ops);
4900
4901 static int __init sparc_register_ioport(void)
4902 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
4903 }
4904 }
4905
4906 -struct dma_map_ops pci32_dma_ops = {
4907 +const struct dma_map_ops pci32_dma_ops = {
4908 .alloc_coherent = pci32_alloc_coherent,
4909 .free_coherent = pci32_free_coherent,
4910 .map_page = pci32_map_page,
4911 diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
4912 index 04df4ed..55c4b6e 100644
4913 --- a/arch/sparc/kernel/kgdb_32.c
4914 +++ b/arch/sparc/kernel/kgdb_32.c
4915 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4916 {
4917 }
4918
4919 -struct kgdb_arch arch_kgdb_ops = {
4920 +const struct kgdb_arch arch_kgdb_ops = {
4921 /* Breakpoint instruction: ta 0x7d */
4922 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4923 };
4924 diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
4925 index f5a0fd4..d886f71 100644
4926 --- a/arch/sparc/kernel/kgdb_64.c
4927 +++ b/arch/sparc/kernel/kgdb_64.c
4928 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4929 {
4930 }
4931
4932 -struct kgdb_arch arch_kgdb_ops = {
4933 +const struct kgdb_arch arch_kgdb_ops = {
4934 /* Breakpoint instruction: ta 0x72 */
4935 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4936 };
4937 diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
4938 index 23c33ff..d137fbd 100644
4939 --- a/arch/sparc/kernel/pci_sun4v.c
4940 +++ b/arch/sparc/kernel/pci_sun4v.c
4941 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
4942 spin_unlock_irqrestore(&iommu->lock, flags);
4943 }
4944
4945 -static struct dma_map_ops sun4v_dma_ops = {
4946 +static const struct dma_map_ops sun4v_dma_ops = {
4947 .alloc_coherent = dma_4v_alloc_coherent,
4948 .free_coherent = dma_4v_free_coherent,
4949 .map_page = dma_4v_map_page,
4950 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4951 index c49865b..b41a81b 100644
4952 --- a/arch/sparc/kernel/process_32.c
4953 +++ b/arch/sparc/kernel/process_32.c
4954 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4955 rw->ins[4], rw->ins[5],
4956 rw->ins[6],
4957 rw->ins[7]);
4958 - printk("%pS\n", (void *) rw->ins[7]);
4959 + printk("%pA\n", (void *) rw->ins[7]);
4960 rw = (struct reg_window32 *) rw->ins[6];
4961 }
4962 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4963 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4964
4965 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4966 r->psr, r->pc, r->npc, r->y, print_tainted());
4967 - printk("PC: <%pS>\n", (void *) r->pc);
4968 + printk("PC: <%pA>\n", (void *) r->pc);
4969 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4970 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4971 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4972 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4973 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4974 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4975 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4976 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4977
4978 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4979 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4980 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4981 rw = (struct reg_window32 *) fp;
4982 pc = rw->ins[7];
4983 printk("[%08lx : ", pc);
4984 - printk("%pS ] ", (void *) pc);
4985 + printk("%pA ] ", (void *) pc);
4986 fp = rw->ins[6];
4987 } while (++count < 16);
4988 printk("\n");
4989 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4990 index cb70476..3d0c191 100644
4991 --- a/arch/sparc/kernel/process_64.c
4992 +++ b/arch/sparc/kernel/process_64.c
4993 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4994 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4995 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4996 if (regs->tstate & TSTATE_PRIV)
4997 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4998 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4999 }
5000
5001 void show_regs(struct pt_regs *regs)
5002 {
5003 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5004 regs->tpc, regs->tnpc, regs->y, print_tainted());
5005 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5006 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5007 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5008 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5009 regs->u_regs[3]);
5010 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5011 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5012 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5013 regs->u_regs[15]);
5014 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5015 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5016 show_regwindow(regs);
5017 }
5018
5019 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5020 ((tp && tp->task) ? tp->task->pid : -1));
5021
5022 if (gp->tstate & TSTATE_PRIV) {
5023 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5024 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5025 (void *) gp->tpc,
5026 (void *) gp->o7,
5027 (void *) gp->i7,
5028 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5029 index 6edc4e5..06a69b4 100644
5030 --- a/arch/sparc/kernel/sigutil_64.c
5031 +++ b/arch/sparc/kernel/sigutil_64.c
5032 @@ -2,6 +2,7 @@
5033 #include <linux/types.h>
5034 #include <linux/thread_info.h>
5035 #include <linux/uaccess.h>
5036 +#include <linux/errno.h>
5037
5038 #include <asm/sigcontext.h>
5039 #include <asm/fpumacro.h>
5040 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5041 index 3a82e65..ce0a53a 100644
5042 --- a/arch/sparc/kernel/sys_sparc_32.c
5043 +++ b/arch/sparc/kernel/sys_sparc_32.c
5044 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5045 if (ARCH_SUN4C && len > 0x20000000)
5046 return -ENOMEM;
5047 if (!addr)
5048 - addr = TASK_UNMAPPED_BASE;
5049 + addr = current->mm->mmap_base;
5050
5051 if (flags & MAP_SHARED)
5052 addr = COLOUR_ALIGN(addr);
5053 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5054 }
5055 if (TASK_SIZE - PAGE_SIZE - len < addr)
5056 return -ENOMEM;
5057 - if (!vmm || addr + len <= vmm->vm_start)
5058 + if (check_heap_stack_gap(vmm, addr, len))
5059 return addr;
5060 addr = vmm->vm_end;
5061 if (flags & MAP_SHARED)
5062 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5063 index cfa0e19..98972ac 100644
5064 --- a/arch/sparc/kernel/sys_sparc_64.c
5065 +++ b/arch/sparc/kernel/sys_sparc_64.c
5066 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5067 /* We do not accept a shared mapping if it would violate
5068 * cache aliasing constraints.
5069 */
5070 - if ((flags & MAP_SHARED) &&
5071 + if ((filp || (flags & MAP_SHARED)) &&
5072 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5073 return -EINVAL;
5074 return addr;
5075 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5076 if (filp || (flags & MAP_SHARED))
5077 do_color_align = 1;
5078
5079 +#ifdef CONFIG_PAX_RANDMMAP
5080 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5081 +#endif
5082 +
5083 if (addr) {
5084 if (do_color_align)
5085 addr = COLOUR_ALIGN(addr, pgoff);
5086 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5087 addr = PAGE_ALIGN(addr);
5088
5089 vma = find_vma(mm, addr);
5090 - if (task_size - len >= addr &&
5091 - (!vma || addr + len <= vma->vm_start))
5092 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5093 return addr;
5094 }
5095
5096 if (len > mm->cached_hole_size) {
5097 - start_addr = addr = mm->free_area_cache;
5098 + start_addr = addr = mm->free_area_cache;
5099 } else {
5100 - start_addr = addr = TASK_UNMAPPED_BASE;
5101 + start_addr = addr = mm->mmap_base;
5102 mm->cached_hole_size = 0;
5103 }
5104
5105 @@ -175,14 +178,14 @@ full_search:
5106 vma = find_vma(mm, VA_EXCLUDE_END);
5107 }
5108 if (unlikely(task_size < addr)) {
5109 - if (start_addr != TASK_UNMAPPED_BASE) {
5110 - start_addr = addr = TASK_UNMAPPED_BASE;
5111 + if (start_addr != mm->mmap_base) {
5112 + start_addr = addr = mm->mmap_base;
5113 mm->cached_hole_size = 0;
5114 goto full_search;
5115 }
5116 return -ENOMEM;
5117 }
5118 - if (likely(!vma || addr + len <= vma->vm_start)) {
5119 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5120 /*
5121 * Remember the place where we stopped the search:
5122 */
5123 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5124 /* We do not accept a shared mapping if it would violate
5125 * cache aliasing constraints.
5126 */
5127 - if ((flags & MAP_SHARED) &&
5128 + if ((filp || (flags & MAP_SHARED)) &&
5129 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5130 return -EINVAL;
5131 return addr;
5132 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5133 addr = PAGE_ALIGN(addr);
5134
5135 vma = find_vma(mm, addr);
5136 - if (task_size - len >= addr &&
5137 - (!vma || addr + len <= vma->vm_start))
5138 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5139 return addr;
5140 }
5141
5142 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5143 /* make sure it can fit in the remaining address space */
5144 if (likely(addr > len)) {
5145 vma = find_vma(mm, addr-len);
5146 - if (!vma || addr <= vma->vm_start) {
5147 + if (check_heap_stack_gap(vma, addr - len, len)) {
5148 /* remember the address as a hint for next time */
5149 return (mm->free_area_cache = addr-len);
5150 }
5151 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5152 if (unlikely(mm->mmap_base < len))
5153 goto bottomup;
5154
5155 - addr = mm->mmap_base-len;
5156 - if (do_color_align)
5157 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5158 + addr = mm->mmap_base - len;
5159
5160 do {
5161 + if (do_color_align)
5162 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5163 /*
5164 * Lookup failure means no vma is above this address,
5165 * else if new region fits below vma->vm_start,
5166 * return with success:
5167 */
5168 vma = find_vma(mm, addr);
5169 - if (likely(!vma || addr+len <= vma->vm_start)) {
5170 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5171 /* remember the address as a hint for next time */
5172 return (mm->free_area_cache = addr);
5173 }
5174 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5175 mm->cached_hole_size = vma->vm_start - addr;
5176
5177 /* try just below the current vma->vm_start */
5178 - addr = vma->vm_start-len;
5179 - if (do_color_align)
5180 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5181 - } while (likely(len < vma->vm_start));
5182 + addr = skip_heap_stack_gap(vma, len);
5183 + } while (!IS_ERR_VALUE(addr));
5184
5185 bottomup:
5186 /*
5187 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5188 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5189 sysctl_legacy_va_layout) {
5190 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5191 +
5192 +#ifdef CONFIG_PAX_RANDMMAP
5193 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5194 + mm->mmap_base += mm->delta_mmap;
5195 +#endif
5196 +
5197 mm->get_unmapped_area = arch_get_unmapped_area;
5198 mm->unmap_area = arch_unmap_area;
5199 } else {
5200 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5201 gap = (task_size / 6 * 5);
5202
5203 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5204 +
5205 +#ifdef CONFIG_PAX_RANDMMAP
5206 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5207 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5208 +#endif
5209 +
5210 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5211 mm->unmap_area = arch_unmap_area_topdown;
5212 }
5213 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5214 index c0490c7..84959d1 100644
5215 --- a/arch/sparc/kernel/traps_32.c
5216 +++ b/arch/sparc/kernel/traps_32.c
5217 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5218 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5219 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5220
5221 +extern void gr_handle_kernel_exploit(void);
5222 +
5223 void die_if_kernel(char *str, struct pt_regs *regs)
5224 {
5225 static int die_counter;
5226 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5227 count++ < 30 &&
5228 (((unsigned long) rw) >= PAGE_OFFSET) &&
5229 !(((unsigned long) rw) & 0x7)) {
5230 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5231 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5232 (void *) rw->ins[7]);
5233 rw = (struct reg_window32 *)rw->ins[6];
5234 }
5235 }
5236 printk("Instruction DUMP:");
5237 instruction_dump ((unsigned long *) regs->pc);
5238 - if(regs->psr & PSR_PS)
5239 + if(regs->psr & PSR_PS) {
5240 + gr_handle_kernel_exploit();
5241 do_exit(SIGKILL);
5242 + }
5243 do_exit(SIGSEGV);
5244 }
5245
5246 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5247 index 10f7bb9..cdb6793 100644
5248 --- a/arch/sparc/kernel/traps_64.c
5249 +++ b/arch/sparc/kernel/traps_64.c
5250 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5251 i + 1,
5252 p->trapstack[i].tstate, p->trapstack[i].tpc,
5253 p->trapstack[i].tnpc, p->trapstack[i].tt);
5254 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5255 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5256 }
5257 }
5258
5259 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5260
5261 lvl -= 0x100;
5262 if (regs->tstate & TSTATE_PRIV) {
5263 +
5264 +#ifdef CONFIG_PAX_REFCOUNT
5265 + if (lvl == 6)
5266 + pax_report_refcount_overflow(regs);
5267 +#endif
5268 +
5269 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5270 die_if_kernel(buffer, regs);
5271 }
5272 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5273 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5274 {
5275 char buffer[32];
5276 -
5277 +
5278 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5279 0, lvl, SIGTRAP) == NOTIFY_STOP)
5280 return;
5281
5282 +#ifdef CONFIG_PAX_REFCOUNT
5283 + if (lvl == 6)
5284 + pax_report_refcount_overflow(regs);
5285 +#endif
5286 +
5287 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5288
5289 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5290 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5291 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5292 printk("%s" "ERROR(%d): ",
5293 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5294 - printk("TPC<%pS>\n", (void *) regs->tpc);
5295 + printk("TPC<%pA>\n", (void *) regs->tpc);
5296 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5297 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5298 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5299 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5300 smp_processor_id(),
5301 (type & 0x1) ? 'I' : 'D',
5302 regs->tpc);
5303 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5304 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5305 panic("Irrecoverable Cheetah+ parity error.");
5306 }
5307
5308 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5309 smp_processor_id(),
5310 (type & 0x1) ? 'I' : 'D',
5311 regs->tpc);
5312 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5313 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5314 }
5315
5316 struct sun4v_error_entry {
5317 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5318
5319 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5320 regs->tpc, tl);
5321 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5322 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5323 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5324 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5325 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5326 (void *) regs->u_regs[UREG_I7]);
5327 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5328 "pte[%lx] error[%lx]\n",
5329 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5330
5331 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5332 regs->tpc, tl);
5333 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5334 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5335 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5336 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5337 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5338 (void *) regs->u_regs[UREG_I7]);
5339 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5340 "pte[%lx] error[%lx]\n",
5341 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5342 fp = (unsigned long)sf->fp + STACK_BIAS;
5343 }
5344
5345 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5346 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5347 } while (++count < 16);
5348 }
5349
5350 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5351 return (struct reg_window *) (fp + STACK_BIAS);
5352 }
5353
5354 +extern void gr_handle_kernel_exploit(void);
5355 +
5356 void die_if_kernel(char *str, struct pt_regs *regs)
5357 {
5358 static int die_counter;
5359 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5360 while (rw &&
5361 count++ < 30&&
5362 is_kernel_stack(current, rw)) {
5363 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5364 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5365 (void *) rw->ins[7]);
5366
5367 rw = kernel_stack_up(rw);
5368 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5369 }
5370 user_instruction_dump ((unsigned int __user *) regs->tpc);
5371 }
5372 - if (regs->tstate & TSTATE_PRIV)
5373 + if (regs->tstate & TSTATE_PRIV) {
5374 + gr_handle_kernel_exploit();
5375 do_exit(SIGKILL);
5376 + }
5377 +
5378 do_exit(SIGSEGV);
5379 }
5380 EXPORT_SYMBOL(die_if_kernel);
5381 diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5382 index be183fe..1c8d332 100644
5383 --- a/arch/sparc/kernel/una_asm_64.S
5384 +++ b/arch/sparc/kernel/una_asm_64.S
5385 @@ -127,7 +127,7 @@ do_int_load:
5386 wr %o5, 0x0, %asi
5387 retl
5388 mov 0, %o0
5389 - .size __do_int_load, .-__do_int_load
5390 + .size do_int_load, .-do_int_load
5391
5392 .section __ex_table,"a"
5393 .word 4b, __retl_efault
5394 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5395 index 3792099..2af17d8 100644
5396 --- a/arch/sparc/kernel/unaligned_64.c
5397 +++ b/arch/sparc/kernel/unaligned_64.c
5398 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5399 if (count < 5) {
5400 last_time = jiffies;
5401 count++;
5402 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5403 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5404 regs->tpc, (void *) regs->tpc);
5405 }
5406 }
5407 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5408 index e75faf0..24f12f9 100644
5409 --- a/arch/sparc/lib/Makefile
5410 +++ b/arch/sparc/lib/Makefile
5411 @@ -2,7 +2,7 @@
5412 #
5413
5414 asflags-y := -ansi -DST_DIV0=0x02
5415 -ccflags-y := -Werror
5416 +#ccflags-y := -Werror
5417
5418 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5419 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5420 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5421 index 0268210..f0291ca 100644
5422 --- a/arch/sparc/lib/atomic_64.S
5423 +++ b/arch/sparc/lib/atomic_64.S
5424 @@ -18,7 +18,12 @@
5425 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5426 BACKOFF_SETUP(%o2)
5427 1: lduw [%o1], %g1
5428 - add %g1, %o0, %g7
5429 + addcc %g1, %o0, %g7
5430 +
5431 +#ifdef CONFIG_PAX_REFCOUNT
5432 + tvs %icc, 6
5433 +#endif
5434 +
5435 cas [%o1], %g1, %g7
5436 cmp %g1, %g7
5437 bne,pn %icc, 2f
5438 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5439 2: BACKOFF_SPIN(%o2, %o3, 1b)
5440 .size atomic_add, .-atomic_add
5441
5442 + .globl atomic_add_unchecked
5443 + .type atomic_add_unchecked,#function
5444 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5445 + BACKOFF_SETUP(%o2)
5446 +1: lduw [%o1], %g1
5447 + add %g1, %o0, %g7
5448 + cas [%o1], %g1, %g7
5449 + cmp %g1, %g7
5450 + bne,pn %icc, 2f
5451 + nop
5452 + retl
5453 + nop
5454 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5455 + .size atomic_add_unchecked, .-atomic_add_unchecked
5456 +
5457 .globl atomic_sub
5458 .type atomic_sub,#function
5459 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5460 BACKOFF_SETUP(%o2)
5461 1: lduw [%o1], %g1
5462 - sub %g1, %o0, %g7
5463 + subcc %g1, %o0, %g7
5464 +
5465 +#ifdef CONFIG_PAX_REFCOUNT
5466 + tvs %icc, 6
5467 +#endif
5468 +
5469 cas [%o1], %g1, %g7
5470 cmp %g1, %g7
5471 bne,pn %icc, 2f
5472 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5473 2: BACKOFF_SPIN(%o2, %o3, 1b)
5474 .size atomic_sub, .-atomic_sub
5475
5476 + .globl atomic_sub_unchecked
5477 + .type atomic_sub_unchecked,#function
5478 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5479 + BACKOFF_SETUP(%o2)
5480 +1: lduw [%o1], %g1
5481 + sub %g1, %o0, %g7
5482 + cas [%o1], %g1, %g7
5483 + cmp %g1, %g7
5484 + bne,pn %icc, 2f
5485 + nop
5486 + retl
5487 + nop
5488 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5489 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5490 +
5491 .globl atomic_add_ret
5492 .type atomic_add_ret,#function
5493 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5494 BACKOFF_SETUP(%o2)
5495 1: lduw [%o1], %g1
5496 - add %g1, %o0, %g7
5497 + addcc %g1, %o0, %g7
5498 +
5499 +#ifdef CONFIG_PAX_REFCOUNT
5500 + tvs %icc, 6
5501 +#endif
5502 +
5503 cas [%o1], %g1, %g7
5504 cmp %g1, %g7
5505 bne,pn %icc, 2f
5506 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5507 2: BACKOFF_SPIN(%o2, %o3, 1b)
5508 .size atomic_add_ret, .-atomic_add_ret
5509
5510 + .globl atomic_add_ret_unchecked
5511 + .type atomic_add_ret_unchecked,#function
5512 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5513 + BACKOFF_SETUP(%o2)
5514 +1: lduw [%o1], %g1
5515 + addcc %g1, %o0, %g7
5516 + cas [%o1], %g1, %g7
5517 + cmp %g1, %g7
5518 + bne,pn %icc, 2f
5519 + add %g7, %o0, %g7
5520 + sra %g7, 0, %o0
5521 + retl
5522 + nop
5523 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5524 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5525 +
5526 .globl atomic_sub_ret
5527 .type atomic_sub_ret,#function
5528 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5529 BACKOFF_SETUP(%o2)
5530 1: lduw [%o1], %g1
5531 - sub %g1, %o0, %g7
5532 + subcc %g1, %o0, %g7
5533 +
5534 +#ifdef CONFIG_PAX_REFCOUNT
5535 + tvs %icc, 6
5536 +#endif
5537 +
5538 cas [%o1], %g1, %g7
5539 cmp %g1, %g7
5540 bne,pn %icc, 2f
5541 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5542 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5543 BACKOFF_SETUP(%o2)
5544 1: ldx [%o1], %g1
5545 - add %g1, %o0, %g7
5546 + addcc %g1, %o0, %g7
5547 +
5548 +#ifdef CONFIG_PAX_REFCOUNT
5549 + tvs %xcc, 6
5550 +#endif
5551 +
5552 casx [%o1], %g1, %g7
5553 cmp %g1, %g7
5554 bne,pn %xcc, 2f
5555 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5556 2: BACKOFF_SPIN(%o2, %o3, 1b)
5557 .size atomic64_add, .-atomic64_add
5558
5559 + .globl atomic64_add_unchecked
5560 + .type atomic64_add_unchecked,#function
5561 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5562 + BACKOFF_SETUP(%o2)
5563 +1: ldx [%o1], %g1
5564 + addcc %g1, %o0, %g7
5565 + casx [%o1], %g1, %g7
5566 + cmp %g1, %g7
5567 + bne,pn %xcc, 2f
5568 + nop
5569 + retl
5570 + nop
5571 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5572 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5573 +
5574 .globl atomic64_sub
5575 .type atomic64_sub,#function
5576 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5577 BACKOFF_SETUP(%o2)
5578 1: ldx [%o1], %g1
5579 - sub %g1, %o0, %g7
5580 + subcc %g1, %o0, %g7
5581 +
5582 +#ifdef CONFIG_PAX_REFCOUNT
5583 + tvs %xcc, 6
5584 +#endif
5585 +
5586 casx [%o1], %g1, %g7
5587 cmp %g1, %g7
5588 bne,pn %xcc, 2f
5589 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5590 2: BACKOFF_SPIN(%o2, %o3, 1b)
5591 .size atomic64_sub, .-atomic64_sub
5592
5593 + .globl atomic64_sub_unchecked
5594 + .type atomic64_sub_unchecked,#function
5595 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5596 + BACKOFF_SETUP(%o2)
5597 +1: ldx [%o1], %g1
5598 + subcc %g1, %o0, %g7
5599 + casx [%o1], %g1, %g7
5600 + cmp %g1, %g7
5601 + bne,pn %xcc, 2f
5602 + nop
5603 + retl
5604 + nop
5605 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5606 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5607 +
5608 .globl atomic64_add_ret
5609 .type atomic64_add_ret,#function
5610 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5611 BACKOFF_SETUP(%o2)
5612 1: ldx [%o1], %g1
5613 - add %g1, %o0, %g7
5614 + addcc %g1, %o0, %g7
5615 +
5616 +#ifdef CONFIG_PAX_REFCOUNT
5617 + tvs %xcc, 6
5618 +#endif
5619 +
5620 casx [%o1], %g1, %g7
5621 cmp %g1, %g7
5622 bne,pn %xcc, 2f
5623 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5624 2: BACKOFF_SPIN(%o2, %o3, 1b)
5625 .size atomic64_add_ret, .-atomic64_add_ret
5626
5627 + .globl atomic64_add_ret_unchecked
5628 + .type atomic64_add_ret_unchecked,#function
5629 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5630 + BACKOFF_SETUP(%o2)
5631 +1: ldx [%o1], %g1
5632 + addcc %g1, %o0, %g7
5633 + casx [%o1], %g1, %g7
5634 + cmp %g1, %g7
5635 + bne,pn %xcc, 2f
5636 + add %g7, %o0, %g7
5637 + mov %g7, %o0
5638 + retl
5639 + nop
5640 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5641 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5642 +
5643 .globl atomic64_sub_ret
5644 .type atomic64_sub_ret,#function
5645 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5646 BACKOFF_SETUP(%o2)
5647 1: ldx [%o1], %g1
5648 - sub %g1, %o0, %g7
5649 + subcc %g1, %o0, %g7
5650 +
5651 +#ifdef CONFIG_PAX_REFCOUNT
5652 + tvs %xcc, 6
5653 +#endif
5654 +
5655 casx [%o1], %g1, %g7
5656 cmp %g1, %g7
5657 bne,pn %xcc, 2f
5658 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5659 index 704b126..2e79d76 100644
5660 --- a/arch/sparc/lib/ksyms.c
5661 +++ b/arch/sparc/lib/ksyms.c
5662 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5663
5664 /* Atomic counter implementation. */
5665 EXPORT_SYMBOL(atomic_add);
5666 +EXPORT_SYMBOL(atomic_add_unchecked);
5667 EXPORT_SYMBOL(atomic_add_ret);
5668 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5669 EXPORT_SYMBOL(atomic_sub);
5670 +EXPORT_SYMBOL(atomic_sub_unchecked);
5671 EXPORT_SYMBOL(atomic_sub_ret);
5672 EXPORT_SYMBOL(atomic64_add);
5673 +EXPORT_SYMBOL(atomic64_add_unchecked);
5674 EXPORT_SYMBOL(atomic64_add_ret);
5675 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5676 EXPORT_SYMBOL(atomic64_sub);
5677 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5678 EXPORT_SYMBOL(atomic64_sub_ret);
5679
5680 /* Atomic bit operations. */
5681 diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5682 index 91a7d29..ce75c29 100644
5683 --- a/arch/sparc/lib/rwsem_64.S
5684 +++ b/arch/sparc/lib/rwsem_64.S
5685 @@ -11,7 +11,12 @@
5686 .globl __down_read
5687 __down_read:
5688 1: lduw [%o0], %g1
5689 - add %g1, 1, %g7
5690 + addcc %g1, 1, %g7
5691 +
5692 +#ifdef CONFIG_PAX_REFCOUNT
5693 + tvs %icc, 6
5694 +#endif
5695 +
5696 cas [%o0], %g1, %g7
5697 cmp %g1, %g7
5698 bne,pn %icc, 1b
5699 @@ -33,7 +38,12 @@ __down_read:
5700 .globl __down_read_trylock
5701 __down_read_trylock:
5702 1: lduw [%o0], %g1
5703 - add %g1, 1, %g7
5704 + addcc %g1, 1, %g7
5705 +
5706 +#ifdef CONFIG_PAX_REFCOUNT
5707 + tvs %icc, 6
5708 +#endif
5709 +
5710 cmp %g7, 0
5711 bl,pn %icc, 2f
5712 mov 0, %o1
5713 @@ -51,7 +61,12 @@ __down_write:
5714 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5715 1:
5716 lduw [%o0], %g3
5717 - add %g3, %g1, %g7
5718 + addcc %g3, %g1, %g7
5719 +
5720 +#ifdef CONFIG_PAX_REFCOUNT
5721 + tvs %icc, 6
5722 +#endif
5723 +
5724 cas [%o0], %g3, %g7
5725 cmp %g3, %g7
5726 bne,pn %icc, 1b
5727 @@ -77,7 +92,12 @@ __down_write_trylock:
5728 cmp %g3, 0
5729 bne,pn %icc, 2f
5730 mov 0, %o1
5731 - add %g3, %g1, %g7
5732 + addcc %g3, %g1, %g7
5733 +
5734 +#ifdef CONFIG_PAX_REFCOUNT
5735 + tvs %icc, 6
5736 +#endif
5737 +
5738 cas [%o0], %g3, %g7
5739 cmp %g3, %g7
5740 bne,pn %icc, 1b
5741 @@ -90,7 +110,12 @@ __down_write_trylock:
5742 __up_read:
5743 1:
5744 lduw [%o0], %g1
5745 - sub %g1, 1, %g7
5746 + subcc %g1, 1, %g7
5747 +
5748 +#ifdef CONFIG_PAX_REFCOUNT
5749 + tvs %icc, 6
5750 +#endif
5751 +
5752 cas [%o0], %g1, %g7
5753 cmp %g1, %g7
5754 bne,pn %icc, 1b
5755 @@ -118,7 +143,12 @@ __up_write:
5756 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5757 1:
5758 lduw [%o0], %g3
5759 - sub %g3, %g1, %g7
5760 + subcc %g3, %g1, %g7
5761 +
5762 +#ifdef CONFIG_PAX_REFCOUNT
5763 + tvs %icc, 6
5764 +#endif
5765 +
5766 cas [%o0], %g3, %g7
5767 cmp %g3, %g7
5768 bne,pn %icc, 1b
5769 @@ -143,7 +173,12 @@ __downgrade_write:
5770 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5771 1:
5772 lduw [%o0], %g3
5773 - sub %g3, %g1, %g7
5774 + subcc %g3, %g1, %g7
5775 +
5776 +#ifdef CONFIG_PAX_REFCOUNT
5777 + tvs %icc, 6
5778 +#endif
5779 +
5780 cas [%o0], %g3, %g7
5781 cmp %g3, %g7
5782 bne,pn %icc, 1b
5783 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5784 index 79836a7..62f47a2 100644
5785 --- a/arch/sparc/mm/Makefile
5786 +++ b/arch/sparc/mm/Makefile
5787 @@ -2,7 +2,7 @@
5788 #
5789
5790 asflags-y := -ansi
5791 -ccflags-y := -Werror
5792 +#ccflags-y := -Werror
5793
5794 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5795 obj-y += fault_$(BITS).o
5796 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5797 index b99f81c..3453e93 100644
5798 --- a/arch/sparc/mm/fault_32.c
5799 +++ b/arch/sparc/mm/fault_32.c
5800 @@ -21,6 +21,9 @@
5801 #include <linux/interrupt.h>
5802 #include <linux/module.h>
5803 #include <linux/kdebug.h>
5804 +#include <linux/slab.h>
5805 +#include <linux/pagemap.h>
5806 +#include <linux/compiler.h>
5807
5808 #include <asm/system.h>
5809 #include <asm/page.h>
5810 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5811 return safe_compute_effective_address(regs, insn);
5812 }
5813
5814 +#ifdef CONFIG_PAX_PAGEEXEC
5815 +#ifdef CONFIG_PAX_DLRESOLVE
5816 +static void pax_emuplt_close(struct vm_area_struct *vma)
5817 +{
5818 + vma->vm_mm->call_dl_resolve = 0UL;
5819 +}
5820 +
5821 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5822 +{
5823 + unsigned int *kaddr;
5824 +
5825 + vmf->page = alloc_page(GFP_HIGHUSER);
5826 + if (!vmf->page)
5827 + return VM_FAULT_OOM;
5828 +
5829 + kaddr = kmap(vmf->page);
5830 + memset(kaddr, 0, PAGE_SIZE);
5831 + kaddr[0] = 0x9DE3BFA8U; /* save */
5832 + flush_dcache_page(vmf->page);
5833 + kunmap(vmf->page);
5834 + return VM_FAULT_MAJOR;
5835 +}
5836 +
5837 +static const struct vm_operations_struct pax_vm_ops = {
5838 + .close = pax_emuplt_close,
5839 + .fault = pax_emuplt_fault
5840 +};
5841 +
5842 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5843 +{
5844 + int ret;
5845 +
5846 + vma->vm_mm = current->mm;
5847 + vma->vm_start = addr;
5848 + vma->vm_end = addr + PAGE_SIZE;
5849 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5850 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5851 + vma->vm_ops = &pax_vm_ops;
5852 +
5853 + ret = insert_vm_struct(current->mm, vma);
5854 + if (ret)
5855 + return ret;
5856 +
5857 + ++current->mm->total_vm;
5858 + return 0;
5859 +}
5860 +#endif
5861 +
5862 +/*
5863 + * PaX: decide what to do with offenders (regs->pc = fault address)
5864 + *
5865 + * returns 1 when task should be killed
5866 + * 2 when patched PLT trampoline was detected
5867 + * 3 when unpatched PLT trampoline was detected
5868 + */
5869 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5870 +{
5871 +
5872 +#ifdef CONFIG_PAX_EMUPLT
5873 + int err;
5874 +
5875 + do { /* PaX: patched PLT emulation #1 */
5876 + unsigned int sethi1, sethi2, jmpl;
5877 +
5878 + err = get_user(sethi1, (unsigned int *)regs->pc);
5879 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5880 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5881 +
5882 + if (err)
5883 + break;
5884 +
5885 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5886 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5887 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5888 + {
5889 + unsigned int addr;
5890 +
5891 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5892 + addr = regs->u_regs[UREG_G1];
5893 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5894 + regs->pc = addr;
5895 + regs->npc = addr+4;
5896 + return 2;
5897 + }
5898 + } while (0);
5899 +
5900 + { /* PaX: patched PLT emulation #2 */
5901 + unsigned int ba;
5902 +
5903 + err = get_user(ba, (unsigned int *)regs->pc);
5904 +
5905 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5906 + unsigned int addr;
5907 +
5908 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5909 + regs->pc = addr;
5910 + regs->npc = addr+4;
5911 + return 2;
5912 + }
5913 + }
5914 +
5915 + do { /* PaX: patched PLT emulation #3 */
5916 + unsigned int sethi, jmpl, nop;
5917 +
5918 + err = get_user(sethi, (unsigned int *)regs->pc);
5919 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5920 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5921 +
5922 + if (err)
5923 + break;
5924 +
5925 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5926 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5927 + nop == 0x01000000U)
5928 + {
5929 + unsigned int addr;
5930 +
5931 + addr = (sethi & 0x003FFFFFU) << 10;
5932 + regs->u_regs[UREG_G1] = addr;
5933 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5934 + regs->pc = addr;
5935 + regs->npc = addr+4;
5936 + return 2;
5937 + }
5938 + } while (0);
5939 +
5940 + do { /* PaX: unpatched PLT emulation step 1 */
5941 + unsigned int sethi, ba, nop;
5942 +
5943 + err = get_user(sethi, (unsigned int *)regs->pc);
5944 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5945 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5946 +
5947 + if (err)
5948 + break;
5949 +
5950 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5951 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5952 + nop == 0x01000000U)
5953 + {
5954 + unsigned int addr, save, call;
5955 +
5956 + if ((ba & 0xFFC00000U) == 0x30800000U)
5957 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5958 + else
5959 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5960 +
5961 + err = get_user(save, (unsigned int *)addr);
5962 + err |= get_user(call, (unsigned int *)(addr+4));
5963 + err |= get_user(nop, (unsigned int *)(addr+8));
5964 + if (err)
5965 + break;
5966 +
5967 +#ifdef CONFIG_PAX_DLRESOLVE
5968 + if (save == 0x9DE3BFA8U &&
5969 + (call & 0xC0000000U) == 0x40000000U &&
5970 + nop == 0x01000000U)
5971 + {
5972 + struct vm_area_struct *vma;
5973 + unsigned long call_dl_resolve;
5974 +
5975 + down_read(&current->mm->mmap_sem);
5976 + call_dl_resolve = current->mm->call_dl_resolve;
5977 + up_read(&current->mm->mmap_sem);
5978 + if (likely(call_dl_resolve))
5979 + goto emulate;
5980 +
5981 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5982 +
5983 + down_write(&current->mm->mmap_sem);
5984 + if (current->mm->call_dl_resolve) {
5985 + call_dl_resolve = current->mm->call_dl_resolve;
5986 + up_write(&current->mm->mmap_sem);
5987 + if (vma)
5988 + kmem_cache_free(vm_area_cachep, vma);
5989 + goto emulate;
5990 + }
5991 +
5992 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5993 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5994 + up_write(&current->mm->mmap_sem);
5995 + if (vma)
5996 + kmem_cache_free(vm_area_cachep, vma);
5997 + return 1;
5998 + }
5999 +
6000 + if (pax_insert_vma(vma, call_dl_resolve)) {
6001 + up_write(&current->mm->mmap_sem);
6002 + kmem_cache_free(vm_area_cachep, vma);
6003 + return 1;
6004 + }
6005 +
6006 + current->mm->call_dl_resolve = call_dl_resolve;
6007 + up_write(&current->mm->mmap_sem);
6008 +
6009 +emulate:
6010 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6011 + regs->pc = call_dl_resolve;
6012 + regs->npc = addr+4;
6013 + return 3;
6014 + }
6015 +#endif
6016 +
6017 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6018 + if ((save & 0xFFC00000U) == 0x05000000U &&
6019 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6020 + nop == 0x01000000U)
6021 + {
6022 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6023 + regs->u_regs[UREG_G2] = addr + 4;
6024 + addr = (save & 0x003FFFFFU) << 10;
6025 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6026 + regs->pc = addr;
6027 + regs->npc = addr+4;
6028 + return 3;
6029 + }
6030 + }
6031 + } while (0);
6032 +
6033 + do { /* PaX: unpatched PLT emulation step 2 */
6034 + unsigned int save, call, nop;
6035 +
6036 + err = get_user(save, (unsigned int *)(regs->pc-4));
6037 + err |= get_user(call, (unsigned int *)regs->pc);
6038 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6039 + if (err)
6040 + break;
6041 +
6042 + if (save == 0x9DE3BFA8U &&
6043 + (call & 0xC0000000U) == 0x40000000U &&
6044 + nop == 0x01000000U)
6045 + {
6046 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6047 +
6048 + regs->u_regs[UREG_RETPC] = regs->pc;
6049 + regs->pc = dl_resolve;
6050 + regs->npc = dl_resolve+4;
6051 + return 3;
6052 + }
6053 + } while (0);
6054 +#endif
6055 +
6056 + return 1;
6057 +}
6058 +
6059 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6060 +{
6061 + unsigned long i;
6062 +
6063 + printk(KERN_ERR "PAX: bytes at PC: ");
6064 + for (i = 0; i < 8; i++) {
6065 + unsigned int c;
6066 + if (get_user(c, (unsigned int *)pc+i))
6067 + printk(KERN_CONT "???????? ");
6068 + else
6069 + printk(KERN_CONT "%08x ", c);
6070 + }
6071 + printk("\n");
6072 +}
6073 +#endif
6074 +
6075 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6076 unsigned long address)
6077 {
6078 @@ -231,6 +495,24 @@ good_area:
6079 if(!(vma->vm_flags & VM_WRITE))
6080 goto bad_area;
6081 } else {
6082 +
6083 +#ifdef CONFIG_PAX_PAGEEXEC
6084 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6085 + up_read(&mm->mmap_sem);
6086 + switch (pax_handle_fetch_fault(regs)) {
6087 +
6088 +#ifdef CONFIG_PAX_EMUPLT
6089 + case 2:
6090 + case 3:
6091 + return;
6092 +#endif
6093 +
6094 + }
6095 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6096 + do_group_exit(SIGKILL);
6097 + }
6098 +#endif
6099 +
6100 /* Allow reads even for write-only mappings */
6101 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6102 goto bad_area;
6103 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6104 index 43b0da9..a0b78f9 100644
6105 --- a/arch/sparc/mm/fault_64.c
6106 +++ b/arch/sparc/mm/fault_64.c
6107 @@ -20,6 +20,9 @@
6108 #include <linux/kprobes.h>
6109 #include <linux/kdebug.h>
6110 #include <linux/percpu.h>
6111 +#include <linux/slab.h>
6112 +#include <linux/pagemap.h>
6113 +#include <linux/compiler.h>
6114
6115 #include <asm/page.h>
6116 #include <asm/pgtable.h>
6117 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6118 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6119 regs->tpc);
6120 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6121 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6122 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6123 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6124 dump_stack();
6125 unhandled_fault(regs->tpc, current, regs);
6126 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6127 show_regs(regs);
6128 }
6129
6130 +#ifdef CONFIG_PAX_PAGEEXEC
6131 +#ifdef CONFIG_PAX_DLRESOLVE
6132 +static void pax_emuplt_close(struct vm_area_struct *vma)
6133 +{
6134 + vma->vm_mm->call_dl_resolve = 0UL;
6135 +}
6136 +
6137 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6138 +{
6139 + unsigned int *kaddr;
6140 +
6141 + vmf->page = alloc_page(GFP_HIGHUSER);
6142 + if (!vmf->page)
6143 + return VM_FAULT_OOM;
6144 +
6145 + kaddr = kmap(vmf->page);
6146 + memset(kaddr, 0, PAGE_SIZE);
6147 + kaddr[0] = 0x9DE3BFA8U; /* save */
6148 + flush_dcache_page(vmf->page);
6149 + kunmap(vmf->page);
6150 + return VM_FAULT_MAJOR;
6151 +}
6152 +
6153 +static const struct vm_operations_struct pax_vm_ops = {
6154 + .close = pax_emuplt_close,
6155 + .fault = pax_emuplt_fault
6156 +};
6157 +
6158 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6159 +{
6160 + int ret;
6161 +
6162 + vma->vm_mm = current->mm;
6163 + vma->vm_start = addr;
6164 + vma->vm_end = addr + PAGE_SIZE;
6165 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6166 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6167 + vma->vm_ops = &pax_vm_ops;
6168 +
6169 + ret = insert_vm_struct(current->mm, vma);
6170 + if (ret)
6171 + return ret;
6172 +
6173 + ++current->mm->total_vm;
6174 + return 0;
6175 +}
6176 +#endif
6177 +
6178 +/*
6179 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6180 + *
6181 + * returns 1 when task should be killed
6182 + * 2 when patched PLT trampoline was detected
6183 + * 3 when unpatched PLT trampoline was detected
6184 + */
6185 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6186 +{
6187 +
6188 +#ifdef CONFIG_PAX_EMUPLT
6189 + int err;
6190 +
6191 + do { /* PaX: patched PLT emulation #1 */
6192 + unsigned int sethi1, sethi2, jmpl;
6193 +
6194 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6195 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6196 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6197 +
6198 + if (err)
6199 + break;
6200 +
6201 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6202 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6203 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6204 + {
6205 + unsigned long addr;
6206 +
6207 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6208 + addr = regs->u_regs[UREG_G1];
6209 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6210 +
6211 + if (test_thread_flag(TIF_32BIT))
6212 + addr &= 0xFFFFFFFFUL;
6213 +
6214 + regs->tpc = addr;
6215 + regs->tnpc = addr+4;
6216 + return 2;
6217 + }
6218 + } while (0);
6219 +
6220 + { /* PaX: patched PLT emulation #2 */
6221 + unsigned int ba;
6222 +
6223 + err = get_user(ba, (unsigned int *)regs->tpc);
6224 +
6225 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6226 + unsigned long addr;
6227 +
6228 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6229 +
6230 + if (test_thread_flag(TIF_32BIT))
6231 + addr &= 0xFFFFFFFFUL;
6232 +
6233 + regs->tpc = addr;
6234 + regs->tnpc = addr+4;
6235 + return 2;
6236 + }
6237 + }
6238 +
6239 + do { /* PaX: patched PLT emulation #3 */
6240 + unsigned int sethi, jmpl, nop;
6241 +
6242 + err = get_user(sethi, (unsigned int *)regs->tpc);
6243 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6244 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6245 +
6246 + if (err)
6247 + break;
6248 +
6249 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6250 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6251 + nop == 0x01000000U)
6252 + {
6253 + unsigned long addr;
6254 +
6255 + addr = (sethi & 0x003FFFFFU) << 10;
6256 + regs->u_regs[UREG_G1] = addr;
6257 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6258 +
6259 + if (test_thread_flag(TIF_32BIT))
6260 + addr &= 0xFFFFFFFFUL;
6261 +
6262 + regs->tpc = addr;
6263 + regs->tnpc = addr+4;
6264 + return 2;
6265 + }
6266 + } while (0);
6267 +
6268 + do { /* PaX: patched PLT emulation #4 */
6269 + unsigned int sethi, mov1, call, mov2;
6270 +
6271 + err = get_user(sethi, (unsigned int *)regs->tpc);
6272 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6273 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6274 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6275 +
6276 + if (err)
6277 + break;
6278 +
6279 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6280 + mov1 == 0x8210000FU &&
6281 + (call & 0xC0000000U) == 0x40000000U &&
6282 + mov2 == 0x9E100001U)
6283 + {
6284 + unsigned long addr;
6285 +
6286 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6287 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6288 +
6289 + if (test_thread_flag(TIF_32BIT))
6290 + addr &= 0xFFFFFFFFUL;
6291 +
6292 + regs->tpc = addr;
6293 + regs->tnpc = addr+4;
6294 + return 2;
6295 + }
6296 + } while (0);
6297 +
6298 + do { /* PaX: patched PLT emulation #5 */
6299 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6300 +
6301 + err = get_user(sethi, (unsigned int *)regs->tpc);
6302 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6303 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6304 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6305 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6306 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6307 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6308 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6309 +
6310 + if (err)
6311 + break;
6312 +
6313 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6314 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6315 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6316 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6317 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6318 + sllx == 0x83287020U &&
6319 + jmpl == 0x81C04005U &&
6320 + nop == 0x01000000U)
6321 + {
6322 + unsigned long addr;
6323 +
6324 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6325 + regs->u_regs[UREG_G1] <<= 32;
6326 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6327 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6328 + regs->tpc = addr;
6329 + regs->tnpc = addr+4;
6330 + return 2;
6331 + }
6332 + } while (0);
6333 +
6334 + do { /* PaX: patched PLT emulation #6 */
6335 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6336 +
6337 + err = get_user(sethi, (unsigned int *)regs->tpc);
6338 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6339 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6340 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6341 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6342 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6343 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6344 +
6345 + if (err)
6346 + break;
6347 +
6348 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6349 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6350 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6351 + sllx == 0x83287020U &&
6352 + (or & 0xFFFFE000U) == 0x8A116000U &&
6353 + jmpl == 0x81C04005U &&
6354 + nop == 0x01000000U)
6355 + {
6356 + unsigned long addr;
6357 +
6358 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6359 + regs->u_regs[UREG_G1] <<= 32;
6360 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6361 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6362 + regs->tpc = addr;
6363 + regs->tnpc = addr+4;
6364 + return 2;
6365 + }
6366 + } while (0);
6367 +
6368 + do { /* PaX: unpatched PLT emulation step 1 */
6369 + unsigned int sethi, ba, nop;
6370 +
6371 + err = get_user(sethi, (unsigned int *)regs->tpc);
6372 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6373 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6374 +
6375 + if (err)
6376 + break;
6377 +
6378 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6379 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6380 + nop == 0x01000000U)
6381 + {
6382 + unsigned long addr;
6383 + unsigned int save, call;
6384 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6385 +
6386 + if ((ba & 0xFFC00000U) == 0x30800000U)
6387 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6388 + else
6389 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6390 +
6391 + if (test_thread_flag(TIF_32BIT))
6392 + addr &= 0xFFFFFFFFUL;
6393 +
6394 + err = get_user(save, (unsigned int *)addr);
6395 + err |= get_user(call, (unsigned int *)(addr+4));
6396 + err |= get_user(nop, (unsigned int *)(addr+8));
6397 + if (err)
6398 + break;
6399 +
6400 +#ifdef CONFIG_PAX_DLRESOLVE
6401 + if (save == 0x9DE3BFA8U &&
6402 + (call & 0xC0000000U) == 0x40000000U &&
6403 + nop == 0x01000000U)
6404 + {
6405 + struct vm_area_struct *vma;
6406 + unsigned long call_dl_resolve;
6407 +
6408 + down_read(&current->mm->mmap_sem);
6409 + call_dl_resolve = current->mm->call_dl_resolve;
6410 + up_read(&current->mm->mmap_sem);
6411 + if (likely(call_dl_resolve))
6412 + goto emulate;
6413 +
6414 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6415 +
6416 + down_write(&current->mm->mmap_sem);
6417 + if (current->mm->call_dl_resolve) {
6418 + call_dl_resolve = current->mm->call_dl_resolve;
6419 + up_write(&current->mm->mmap_sem);
6420 + if (vma)
6421 + kmem_cache_free(vm_area_cachep, vma);
6422 + goto emulate;
6423 + }
6424 +
6425 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6426 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6427 + up_write(&current->mm->mmap_sem);
6428 + if (vma)
6429 + kmem_cache_free(vm_area_cachep, vma);
6430 + return 1;
6431 + }
6432 +
6433 + if (pax_insert_vma(vma, call_dl_resolve)) {
6434 + up_write(&current->mm->mmap_sem);
6435 + kmem_cache_free(vm_area_cachep, vma);
6436 + return 1;
6437 + }
6438 +
6439 + current->mm->call_dl_resolve = call_dl_resolve;
6440 + up_write(&current->mm->mmap_sem);
6441 +
6442 +emulate:
6443 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6444 + regs->tpc = call_dl_resolve;
6445 + regs->tnpc = addr+4;
6446 + return 3;
6447 + }
6448 +#endif
6449 +
6450 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6451 + if ((save & 0xFFC00000U) == 0x05000000U &&
6452 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6453 + nop == 0x01000000U)
6454 + {
6455 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6456 + regs->u_regs[UREG_G2] = addr + 4;
6457 + addr = (save & 0x003FFFFFU) << 10;
6458 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6459 +
6460 + if (test_thread_flag(TIF_32BIT))
6461 + addr &= 0xFFFFFFFFUL;
6462 +
6463 + regs->tpc = addr;
6464 + regs->tnpc = addr+4;
6465 + return 3;
6466 + }
6467 +
6468 + /* PaX: 64-bit PLT stub */
6469 + err = get_user(sethi1, (unsigned int *)addr);
6470 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6471 + err |= get_user(or1, (unsigned int *)(addr+8));
6472 + err |= get_user(or2, (unsigned int *)(addr+12));
6473 + err |= get_user(sllx, (unsigned int *)(addr+16));
6474 + err |= get_user(add, (unsigned int *)(addr+20));
6475 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6476 + err |= get_user(nop, (unsigned int *)(addr+28));
6477 + if (err)
6478 + break;
6479 +
6480 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6481 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6482 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6483 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6484 + sllx == 0x89293020U &&
6485 + add == 0x8A010005U &&
6486 + jmpl == 0x89C14000U &&
6487 + nop == 0x01000000U)
6488 + {
6489 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6490 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6491 + regs->u_regs[UREG_G4] <<= 32;
6492 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6493 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6494 + regs->u_regs[UREG_G4] = addr + 24;
6495 + addr = regs->u_regs[UREG_G5];
6496 + regs->tpc = addr;
6497 + regs->tnpc = addr+4;
6498 + return 3;
6499 + }
6500 + }
6501 + } while (0);
6502 +
6503 +#ifdef CONFIG_PAX_DLRESOLVE
6504 + do { /* PaX: unpatched PLT emulation step 2 */
6505 + unsigned int save, call, nop;
6506 +
6507 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6508 + err |= get_user(call, (unsigned int *)regs->tpc);
6509 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6510 + if (err)
6511 + break;
6512 +
6513 + if (save == 0x9DE3BFA8U &&
6514 + (call & 0xC0000000U) == 0x40000000U &&
6515 + nop == 0x01000000U)
6516 + {
6517 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6518 +
6519 + if (test_thread_flag(TIF_32BIT))
6520 + dl_resolve &= 0xFFFFFFFFUL;
6521 +
6522 + regs->u_regs[UREG_RETPC] = regs->tpc;
6523 + regs->tpc = dl_resolve;
6524 + regs->tnpc = dl_resolve+4;
6525 + return 3;
6526 + }
6527 + } while (0);
6528 +#endif
6529 +
6530 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6531 + unsigned int sethi, ba, nop;
6532 +
6533 + err = get_user(sethi, (unsigned int *)regs->tpc);
6534 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6535 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6536 +
6537 + if (err)
6538 + break;
6539 +
6540 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6541 + (ba & 0xFFF00000U) == 0x30600000U &&
6542 + nop == 0x01000000U)
6543 + {
6544 + unsigned long addr;
6545 +
6546 + addr = (sethi & 0x003FFFFFU) << 10;
6547 + regs->u_regs[UREG_G1] = addr;
6548 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6549 +
6550 + if (test_thread_flag(TIF_32BIT))
6551 + addr &= 0xFFFFFFFFUL;
6552 +
6553 + regs->tpc = addr;
6554 + regs->tnpc = addr+4;
6555 + return 2;
6556 + }
6557 + } while (0);
6558 +
6559 +#endif
6560 +
6561 + return 1;
6562 +}
6563 +
6564 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6565 +{
6566 + unsigned long i;
6567 +
6568 + printk(KERN_ERR "PAX: bytes at PC: ");
6569 + for (i = 0; i < 8; i++) {
6570 + unsigned int c;
6571 + if (get_user(c, (unsigned int *)pc+i))
6572 + printk(KERN_CONT "???????? ");
6573 + else
6574 + printk(KERN_CONT "%08x ", c);
6575 + }
6576 + printk("\n");
6577 +}
6578 +#endif
6579 +
6580 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6581 {
6582 struct mm_struct *mm = current->mm;
6583 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6584 if (!vma)
6585 goto bad_area;
6586
6587 +#ifdef CONFIG_PAX_PAGEEXEC
6588 + /* PaX: detect ITLB misses on non-exec pages */
6589 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6590 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6591 + {
6592 + if (address != regs->tpc)
6593 + goto good_area;
6594 +
6595 + up_read(&mm->mmap_sem);
6596 + switch (pax_handle_fetch_fault(regs)) {
6597 +
6598 +#ifdef CONFIG_PAX_EMUPLT
6599 + case 2:
6600 + case 3:
6601 + return;
6602 +#endif
6603 +
6604 + }
6605 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6606 + do_group_exit(SIGKILL);
6607 + }
6608 +#endif
6609 +
6610 /* Pure DTLB misses do not tell us whether the fault causing
6611 * load/store/atomic was a write or not, it only says that there
6612 * was no match. So in such a case we (carefully) read the
6613 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6614 index f27d103..1b06377 100644
6615 --- a/arch/sparc/mm/hugetlbpage.c
6616 +++ b/arch/sparc/mm/hugetlbpage.c
6617 @@ -69,7 +69,7 @@ full_search:
6618 }
6619 return -ENOMEM;
6620 }
6621 - if (likely(!vma || addr + len <= vma->vm_start)) {
6622 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6623 /*
6624 * Remember the place where we stopped the search:
6625 */
6626 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6627 /* make sure it can fit in the remaining address space */
6628 if (likely(addr > len)) {
6629 vma = find_vma(mm, addr-len);
6630 - if (!vma || addr <= vma->vm_start) {
6631 + if (check_heap_stack_gap(vma, addr - len, len)) {
6632 /* remember the address as a hint for next time */
6633 return (mm->free_area_cache = addr-len);
6634 }
6635 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6636 if (unlikely(mm->mmap_base < len))
6637 goto bottomup;
6638
6639 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6640 + addr = mm->mmap_base - len;
6641
6642 do {
6643 + addr &= HPAGE_MASK;
6644 /*
6645 * Lookup failure means no vma is above this address,
6646 * else if new region fits below vma->vm_start,
6647 * return with success:
6648 */
6649 vma = find_vma(mm, addr);
6650 - if (likely(!vma || addr+len <= vma->vm_start)) {
6651 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6652 /* remember the address as a hint for next time */
6653 return (mm->free_area_cache = addr);
6654 }
6655 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6656 mm->cached_hole_size = vma->vm_start - addr;
6657
6658 /* try just below the current vma->vm_start */
6659 - addr = (vma->vm_start-len) & HPAGE_MASK;
6660 - } while (likely(len < vma->vm_start));
6661 + addr = skip_heap_stack_gap(vma, len);
6662 + } while (!IS_ERR_VALUE(addr));
6663
6664 bottomup:
6665 /*
6666 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6667 if (addr) {
6668 addr = ALIGN(addr, HPAGE_SIZE);
6669 vma = find_vma(mm, addr);
6670 - if (task_size - len >= addr &&
6671 - (!vma || addr + len <= vma->vm_start))
6672 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6673 return addr;
6674 }
6675 if (mm->get_unmapped_area == arch_get_unmapped_area)
6676 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6677 index dc7c3b1..34c0070 100644
6678 --- a/arch/sparc/mm/init_32.c
6679 +++ b/arch/sparc/mm/init_32.c
6680 @@ -317,6 +317,9 @@ extern void device_scan(void);
6681 pgprot_t PAGE_SHARED __read_mostly;
6682 EXPORT_SYMBOL(PAGE_SHARED);
6683
6684 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6685 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6686 +
6687 void __init paging_init(void)
6688 {
6689 switch(sparc_cpu_model) {
6690 @@ -345,17 +348,17 @@ void __init paging_init(void)
6691
6692 /* Initialize the protection map with non-constant, MMU dependent values. */
6693 protection_map[0] = PAGE_NONE;
6694 - protection_map[1] = PAGE_READONLY;
6695 - protection_map[2] = PAGE_COPY;
6696 - protection_map[3] = PAGE_COPY;
6697 + protection_map[1] = PAGE_READONLY_NOEXEC;
6698 + protection_map[2] = PAGE_COPY_NOEXEC;
6699 + protection_map[3] = PAGE_COPY_NOEXEC;
6700 protection_map[4] = PAGE_READONLY;
6701 protection_map[5] = PAGE_READONLY;
6702 protection_map[6] = PAGE_COPY;
6703 protection_map[7] = PAGE_COPY;
6704 protection_map[8] = PAGE_NONE;
6705 - protection_map[9] = PAGE_READONLY;
6706 - protection_map[10] = PAGE_SHARED;
6707 - protection_map[11] = PAGE_SHARED;
6708 + protection_map[9] = PAGE_READONLY_NOEXEC;
6709 + protection_map[10] = PAGE_SHARED_NOEXEC;
6710 + protection_map[11] = PAGE_SHARED_NOEXEC;
6711 protection_map[12] = PAGE_READONLY;
6712 protection_map[13] = PAGE_READONLY;
6713 protection_map[14] = PAGE_SHARED;
6714 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6715 index 509b1ff..bfd7118 100644
6716 --- a/arch/sparc/mm/srmmu.c
6717 +++ b/arch/sparc/mm/srmmu.c
6718 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6719 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6720 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6721 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6722 +
6723 +#ifdef CONFIG_PAX_PAGEEXEC
6724 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6725 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6726 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6727 +#endif
6728 +
6729 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6730 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6731
6732 diff --git a/arch/um/Makefile b/arch/um/Makefile
6733 index fc633db..5e1a1c2 100644
6734 --- a/arch/um/Makefile
6735 +++ b/arch/um/Makefile
6736 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6737 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6738 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6739
6740 +ifdef CONSTIFY_PLUGIN
6741 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6742 +endif
6743 +
6744 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6745
6746 #This will adjust *FLAGS accordingly to the platform.
6747 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6748 index 6c03acd..a5e0215 100644
6749 --- a/arch/um/include/asm/kmap_types.h
6750 +++ b/arch/um/include/asm/kmap_types.h
6751 @@ -23,6 +23,7 @@ enum km_type {
6752 KM_IRQ1,
6753 KM_SOFTIRQ0,
6754 KM_SOFTIRQ1,
6755 + KM_CLEARPAGE,
6756 KM_TYPE_NR
6757 };
6758
6759 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6760 index 4cc9b6c..02e5029 100644
6761 --- a/arch/um/include/asm/page.h
6762 +++ b/arch/um/include/asm/page.h
6763 @@ -14,6 +14,9 @@
6764 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6765 #define PAGE_MASK (~(PAGE_SIZE-1))
6766
6767 +#define ktla_ktva(addr) (addr)
6768 +#define ktva_ktla(addr) (addr)
6769 +
6770 #ifndef __ASSEMBLY__
6771
6772 struct page;
6773 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6774 index 4a28a15..654dc2a 100644
6775 --- a/arch/um/kernel/process.c
6776 +++ b/arch/um/kernel/process.c
6777 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6778 return 2;
6779 }
6780
6781 -/*
6782 - * Only x86 and x86_64 have an arch_align_stack().
6783 - * All other arches have "#define arch_align_stack(x) (x)"
6784 - * in their asm/system.h
6785 - * As this is included in UML from asm-um/system-generic.h,
6786 - * we can use it to behave as the subarch does.
6787 - */
6788 -#ifndef arch_align_stack
6789 -unsigned long arch_align_stack(unsigned long sp)
6790 -{
6791 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6792 - sp -= get_random_int() % 8192;
6793 - return sp & ~0xf;
6794 -}
6795 -#endif
6796 -
6797 unsigned long get_wchan(struct task_struct *p)
6798 {
6799 unsigned long stack_page, sp, ip;
6800 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
6801 index d1b93c4..ae1b7fd 100644
6802 --- a/arch/um/sys-i386/shared/sysdep/system.h
6803 +++ b/arch/um/sys-i386/shared/sysdep/system.h
6804 @@ -17,7 +17,7 @@
6805 # define AT_VECTOR_SIZE_ARCH 1
6806 #endif
6807
6808 -extern unsigned long arch_align_stack(unsigned long sp);
6809 +#define arch_align_stack(x) ((x) & ~0xfUL)
6810
6811 void default_idle(void);
6812
6813 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
6814 index 857ca0b..9a2669d 100644
6815 --- a/arch/um/sys-i386/syscalls.c
6816 +++ b/arch/um/sys-i386/syscalls.c
6817 @@ -11,6 +11,21 @@
6818 #include "asm/uaccess.h"
6819 #include "asm/unistd.h"
6820
6821 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6822 +{
6823 + unsigned long pax_task_size = TASK_SIZE;
6824 +
6825 +#ifdef CONFIG_PAX_SEGMEXEC
6826 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6827 + pax_task_size = SEGMEXEC_TASK_SIZE;
6828 +#endif
6829 +
6830 + if (len > pax_task_size || addr > pax_task_size - len)
6831 + return -EINVAL;
6832 +
6833 + return 0;
6834 +}
6835 +
6836 /*
6837 * Perform the select(nd, in, out, ex, tv) and mmap() system
6838 * calls. Linux/i386 didn't use to be able to handle more than
6839 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
6840 index d1b93c4..ae1b7fd 100644
6841 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
6842 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
6843 @@ -17,7 +17,7 @@
6844 # define AT_VECTOR_SIZE_ARCH 1
6845 #endif
6846
6847 -extern unsigned long arch_align_stack(unsigned long sp);
6848 +#define arch_align_stack(x) ((x) & ~0xfUL)
6849
6850 void default_idle(void);
6851
6852 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
6853 index 73ae02a..f932de5 100644
6854 --- a/arch/x86/Kconfig
6855 +++ b/arch/x86/Kconfig
6856 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
6857
6858 config X86_32_LAZY_GS
6859 def_bool y
6860 - depends on X86_32 && !CC_STACKPROTECTOR
6861 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
6862
6863 config KTIME_SCALAR
6864 def_bool X86_32
6865 @@ -1008,7 +1008,7 @@ choice
6866
6867 config NOHIGHMEM
6868 bool "off"
6869 - depends on !X86_NUMAQ
6870 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6871 ---help---
6872 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
6873 However, the address space of 32-bit x86 processors is only 4
6874 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
6875
6876 config HIGHMEM4G
6877 bool "4GB"
6878 - depends on !X86_NUMAQ
6879 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6880 ---help---
6881 Select this if you have a 32-bit processor and between 1 and 4
6882 gigabytes of physical RAM.
6883 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
6884 hex
6885 default 0xB0000000 if VMSPLIT_3G_OPT
6886 default 0x80000000 if VMSPLIT_2G
6887 - default 0x78000000 if VMSPLIT_2G_OPT
6888 + default 0x70000000 if VMSPLIT_2G_OPT
6889 default 0x40000000 if VMSPLIT_1G
6890 default 0xC0000000
6891 depends on X86_32
6892 @@ -1460,6 +1460,7 @@ config SECCOMP
6893
6894 config CC_STACKPROTECTOR
6895 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
6896 + depends on X86_64 || !PAX_MEMORY_UDEREF
6897 ---help---
6898 This option turns on the -fstack-protector GCC feature. This
6899 feature puts, at the beginning of functions, a canary value on
6900 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
6901 config PHYSICAL_START
6902 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
6903 default "0x1000000"
6904 + range 0x400000 0x40000000
6905 ---help---
6906 This gives the physical address where the kernel is loaded.
6907
6908 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
6909 hex
6910 prompt "Alignment value to which kernel should be aligned" if X86_32
6911 default "0x1000000"
6912 + range 0x400000 0x1000000 if PAX_KERNEXEC
6913 range 0x2000 0x1000000
6914 ---help---
6915 This value puts the alignment restrictions on physical address
6916 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
6917 Say N if you want to disable CPU hotplug.
6918
6919 config COMPAT_VDSO
6920 - def_bool y
6921 + def_bool n
6922 prompt "Compat VDSO support"
6923 depends on X86_32 || IA32_EMULATION
6924 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
6925 ---help---
6926 Map the 32-bit VDSO to the predictable old-style address too.
6927 ---help---
6928 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
6929 index 0e566103..1a6b57e 100644
6930 --- a/arch/x86/Kconfig.cpu
6931 +++ b/arch/x86/Kconfig.cpu
6932 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
6933
6934 config X86_F00F_BUG
6935 def_bool y
6936 - depends on M586MMX || M586TSC || M586 || M486 || M386
6937 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
6938
6939 config X86_WP_WORKS_OK
6940 def_bool y
6941 @@ -360,7 +360,7 @@ config X86_POPAD_OK
6942
6943 config X86_ALIGNMENT_16
6944 def_bool y
6945 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6946 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6947
6948 config X86_INTEL_USERCOPY
6949 def_bool y
6950 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
6951 # generates cmov.
6952 config X86_CMOV
6953 def_bool y
6954 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6955 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6956
6957 config X86_MINIMUM_CPU_FAMILY
6958 int
6959 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
6960 index d105f29..c928727 100644
6961 --- a/arch/x86/Kconfig.debug
6962 +++ b/arch/x86/Kconfig.debug
6963 @@ -99,7 +99,7 @@ config X86_PTDUMP
6964 config DEBUG_RODATA
6965 bool "Write protect kernel read-only data structures"
6966 default y
6967 - depends on DEBUG_KERNEL
6968 + depends on DEBUG_KERNEL && BROKEN
6969 ---help---
6970 Mark the kernel read-only data as write-protected in the pagetables,
6971 in order to catch accidental (and incorrect) writes to such const
6972 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
6973 index d2d24c9..0f21f8d 100644
6974 --- a/arch/x86/Makefile
6975 +++ b/arch/x86/Makefile
6976 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
6977 else
6978 BITS := 64
6979 UTS_MACHINE := x86_64
6980 + biarch := $(call cc-option,-m64)
6981 CHECKFLAGS += -D__x86_64__ -m64
6982
6983 KBUILD_AFLAGS += -m64
6984 @@ -189,3 +190,12 @@ define archhelp
6985 echo ' FDARGS="..." arguments for the booted kernel'
6986 echo ' FDINITRD=file initrd for the booted kernel'
6987 endef
6988 +
6989 +define OLD_LD
6990 +
6991 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
6992 +*** Please upgrade your binutils to 2.18 or newer
6993 +endef
6994 +
6995 +archprepare:
6996 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
6997 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
6998 index ec749c2..bbb5319 100644
6999 --- a/arch/x86/boot/Makefile
7000 +++ b/arch/x86/boot/Makefile
7001 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7002 $(call cc-option, -fno-stack-protector) \
7003 $(call cc-option, -mpreferred-stack-boundary=2)
7004 KBUILD_CFLAGS += $(call cc-option, -m32)
7005 +ifdef CONSTIFY_PLUGIN
7006 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7007 +endif
7008 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7009 GCOV_PROFILE := n
7010
7011 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7012 index 878e4b9..20537ab 100644
7013 --- a/arch/x86/boot/bitops.h
7014 +++ b/arch/x86/boot/bitops.h
7015 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7016 u8 v;
7017 const u32 *p = (const u32 *)addr;
7018
7019 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7020 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7021 return v;
7022 }
7023
7024 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7025
7026 static inline void set_bit(int nr, void *addr)
7027 {
7028 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7029 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7030 }
7031
7032 #endif /* BOOT_BITOPS_H */
7033 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7034 index 98239d2..f40214c 100644
7035 --- a/arch/x86/boot/boot.h
7036 +++ b/arch/x86/boot/boot.h
7037 @@ -82,7 +82,7 @@ static inline void io_delay(void)
7038 static inline u16 ds(void)
7039 {
7040 u16 seg;
7041 - asm("movw %%ds,%0" : "=rm" (seg));
7042 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7043 return seg;
7044 }
7045
7046 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7047 static inline int memcmp(const void *s1, const void *s2, size_t len)
7048 {
7049 u8 diff;
7050 - asm("repe; cmpsb; setnz %0"
7051 + asm volatile("repe; cmpsb; setnz %0"
7052 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7053 return diff;
7054 }
7055 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7056 index f8ed065..5bf5ff3 100644
7057 --- a/arch/x86/boot/compressed/Makefile
7058 +++ b/arch/x86/boot/compressed/Makefile
7059 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7060 KBUILD_CFLAGS += $(cflags-y)
7061 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7062 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7063 +ifdef CONSTIFY_PLUGIN
7064 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7065 +endif
7066
7067 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7068 GCOV_PROFILE := n
7069 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7070 index f543b70..b60fba8 100644
7071 --- a/arch/x86/boot/compressed/head_32.S
7072 +++ b/arch/x86/boot/compressed/head_32.S
7073 @@ -76,7 +76,7 @@ ENTRY(startup_32)
7074 notl %eax
7075 andl %eax, %ebx
7076 #else
7077 - movl $LOAD_PHYSICAL_ADDR, %ebx
7078 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7079 #endif
7080
7081 /* Target address to relocate to for decompression */
7082 @@ -149,7 +149,7 @@ relocated:
7083 * and where it was actually loaded.
7084 */
7085 movl %ebp, %ebx
7086 - subl $LOAD_PHYSICAL_ADDR, %ebx
7087 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7088 jz 2f /* Nothing to be done if loaded at compiled addr. */
7089 /*
7090 * Process relocations.
7091 @@ -157,8 +157,7 @@ relocated:
7092
7093 1: subl $4, %edi
7094 movl (%edi), %ecx
7095 - testl %ecx, %ecx
7096 - jz 2f
7097 + jecxz 2f
7098 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7099 jmp 1b
7100 2:
7101 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7102 index 077e1b6..2c6b13b 100644
7103 --- a/arch/x86/boot/compressed/head_64.S
7104 +++ b/arch/x86/boot/compressed/head_64.S
7105 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7106 notl %eax
7107 andl %eax, %ebx
7108 #else
7109 - movl $LOAD_PHYSICAL_ADDR, %ebx
7110 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7111 #endif
7112
7113 /* Target address to relocate to for decompression */
7114 @@ -183,7 +183,7 @@ no_longmode:
7115 hlt
7116 jmp 1b
7117
7118 -#include "../../kernel/verify_cpu_64.S"
7119 +#include "../../kernel/verify_cpu.S"
7120
7121 /*
7122 * Be careful here startup_64 needs to be at a predictable
7123 @@ -234,7 +234,7 @@ ENTRY(startup_64)
7124 notq %rax
7125 andq %rax, %rbp
7126 #else
7127 - movq $LOAD_PHYSICAL_ADDR, %rbp
7128 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7129 #endif
7130
7131 /* Target address to relocate to for decompression */
7132 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7133 index 842b2a3..f00178b 100644
7134 --- a/arch/x86/boot/compressed/misc.c
7135 +++ b/arch/x86/boot/compressed/misc.c
7136 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
7137 case PT_LOAD:
7138 #ifdef CONFIG_RELOCATABLE
7139 dest = output;
7140 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7141 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7142 #else
7143 dest = (void *)(phdr->p_paddr);
7144 #endif
7145 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7146 error("Destination address too large");
7147 #endif
7148 #ifndef CONFIG_RELOCATABLE
7149 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7150 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7151 error("Wrong destination address");
7152 #endif
7153
7154 diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7155 index bcbd36c..b1754af 100644
7156 --- a/arch/x86/boot/compressed/mkpiggy.c
7157 +++ b/arch/x86/boot/compressed/mkpiggy.c
7158 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7159
7160 offs = (olen > ilen) ? olen - ilen : 0;
7161 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7162 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7163 + offs += 64*1024; /* Add 64K bytes slack */
7164 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7165
7166 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7167 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7168 index bbeb0c3..f5167ab 100644
7169 --- a/arch/x86/boot/compressed/relocs.c
7170 +++ b/arch/x86/boot/compressed/relocs.c
7171 @@ -10,8 +10,11 @@
7172 #define USE_BSD
7173 #include <endian.h>
7174
7175 +#include "../../../../include/linux/autoconf.h"
7176 +
7177 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7178 static Elf32_Ehdr ehdr;
7179 +static Elf32_Phdr *phdr;
7180 static unsigned long reloc_count, reloc_idx;
7181 static unsigned long *relocs;
7182
7183 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7184
7185 static int is_safe_abs_reloc(const char* sym_name)
7186 {
7187 - int i;
7188 + unsigned int i;
7189
7190 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7191 if (!strcmp(sym_name, safe_abs_relocs[i]))
7192 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7193 }
7194 }
7195
7196 +static void read_phdrs(FILE *fp)
7197 +{
7198 + unsigned int i;
7199 +
7200 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7201 + if (!phdr) {
7202 + die("Unable to allocate %d program headers\n",
7203 + ehdr.e_phnum);
7204 + }
7205 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7206 + die("Seek to %d failed: %s\n",
7207 + ehdr.e_phoff, strerror(errno));
7208 + }
7209 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7210 + die("Cannot read ELF program headers: %s\n",
7211 + strerror(errno));
7212 + }
7213 + for(i = 0; i < ehdr.e_phnum; i++) {
7214 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7215 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7216 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7217 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7218 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7219 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7220 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7221 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7222 + }
7223 +
7224 +}
7225 +
7226 static void read_shdrs(FILE *fp)
7227 {
7228 - int i;
7229 + unsigned int i;
7230 Elf32_Shdr shdr;
7231
7232 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7233 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7234
7235 static void read_strtabs(FILE *fp)
7236 {
7237 - int i;
7238 + unsigned int i;
7239 for (i = 0; i < ehdr.e_shnum; i++) {
7240 struct section *sec = &secs[i];
7241 if (sec->shdr.sh_type != SHT_STRTAB) {
7242 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7243
7244 static void read_symtabs(FILE *fp)
7245 {
7246 - int i,j;
7247 + unsigned int i,j;
7248 for (i = 0; i < ehdr.e_shnum; i++) {
7249 struct section *sec = &secs[i];
7250 if (sec->shdr.sh_type != SHT_SYMTAB) {
7251 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7252
7253 static void read_relocs(FILE *fp)
7254 {
7255 - int i,j;
7256 + unsigned int i,j;
7257 + uint32_t base;
7258 +
7259 for (i = 0; i < ehdr.e_shnum; i++) {
7260 struct section *sec = &secs[i];
7261 if (sec->shdr.sh_type != SHT_REL) {
7262 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7263 die("Cannot read symbol table: %s\n",
7264 strerror(errno));
7265 }
7266 + base = 0;
7267 + for (j = 0; j < ehdr.e_phnum; j++) {
7268 + if (phdr[j].p_type != PT_LOAD )
7269 + continue;
7270 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7271 + continue;
7272 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7273 + break;
7274 + }
7275 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7276 Elf32_Rel *rel = &sec->reltab[j];
7277 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7278 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7279 rel->r_info = elf32_to_cpu(rel->r_info);
7280 }
7281 }
7282 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7283
7284 static void print_absolute_symbols(void)
7285 {
7286 - int i;
7287 + unsigned int i;
7288 printf("Absolute symbols\n");
7289 printf(" Num: Value Size Type Bind Visibility Name\n");
7290 for (i = 0; i < ehdr.e_shnum; i++) {
7291 struct section *sec = &secs[i];
7292 char *sym_strtab;
7293 Elf32_Sym *sh_symtab;
7294 - int j;
7295 + unsigned int j;
7296
7297 if (sec->shdr.sh_type != SHT_SYMTAB) {
7298 continue;
7299 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7300
7301 static void print_absolute_relocs(void)
7302 {
7303 - int i, printed = 0;
7304 + unsigned int i, printed = 0;
7305
7306 for (i = 0; i < ehdr.e_shnum; i++) {
7307 struct section *sec = &secs[i];
7308 struct section *sec_applies, *sec_symtab;
7309 char *sym_strtab;
7310 Elf32_Sym *sh_symtab;
7311 - int j;
7312 + unsigned int j;
7313 if (sec->shdr.sh_type != SHT_REL) {
7314 continue;
7315 }
7316 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7317
7318 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7319 {
7320 - int i;
7321 + unsigned int i;
7322 /* Walk through the relocations */
7323 for (i = 0; i < ehdr.e_shnum; i++) {
7324 char *sym_strtab;
7325 Elf32_Sym *sh_symtab;
7326 struct section *sec_applies, *sec_symtab;
7327 - int j;
7328 + unsigned int j;
7329 struct section *sec = &secs[i];
7330
7331 if (sec->shdr.sh_type != SHT_REL) {
7332 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7333 if (sym->st_shndx == SHN_ABS) {
7334 continue;
7335 }
7336 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7337 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7338 + continue;
7339 +
7340 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7341 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7342 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7343 + continue;
7344 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7345 + continue;
7346 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7347 + continue;
7348 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7349 + continue;
7350 +#endif
7351 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7352 /*
7353 * NONE can be ignored and and PC relative
7354 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7355
7356 static void emit_relocs(int as_text)
7357 {
7358 - int i;
7359 + unsigned int i;
7360 /* Count how many relocations I have and allocate space for them. */
7361 reloc_count = 0;
7362 walk_relocs(count_reloc);
7363 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
7364 fname, strerror(errno));
7365 }
7366 read_ehdr(fp);
7367 + read_phdrs(fp);
7368 read_shdrs(fp);
7369 read_strtabs(fp);
7370 read_symtabs(fp);
7371 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7372 index 4d3ff03..e4972ff 100644
7373 --- a/arch/x86/boot/cpucheck.c
7374 +++ b/arch/x86/boot/cpucheck.c
7375 @@ -74,7 +74,7 @@ static int has_fpu(void)
7376 u16 fcw = -1, fsw = -1;
7377 u32 cr0;
7378
7379 - asm("movl %%cr0,%0" : "=r" (cr0));
7380 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7381 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7382 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7383 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7384 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7385 {
7386 u32 f0, f1;
7387
7388 - asm("pushfl ; "
7389 + asm volatile("pushfl ; "
7390 "pushfl ; "
7391 "popl %0 ; "
7392 "movl %0,%1 ; "
7393 @@ -115,7 +115,7 @@ static void get_flags(void)
7394 set_bit(X86_FEATURE_FPU, cpu.flags);
7395
7396 if (has_eflag(X86_EFLAGS_ID)) {
7397 - asm("cpuid"
7398 + asm volatile("cpuid"
7399 : "=a" (max_intel_level),
7400 "=b" (cpu_vendor[0]),
7401 "=d" (cpu_vendor[1]),
7402 @@ -124,7 +124,7 @@ static void get_flags(void)
7403
7404 if (max_intel_level >= 0x00000001 &&
7405 max_intel_level <= 0x0000ffff) {
7406 - asm("cpuid"
7407 + asm volatile("cpuid"
7408 : "=a" (tfms),
7409 "=c" (cpu.flags[4]),
7410 "=d" (cpu.flags[0])
7411 @@ -136,7 +136,7 @@ static void get_flags(void)
7412 cpu.model += ((tfms >> 16) & 0xf) << 4;
7413 }
7414
7415 - asm("cpuid"
7416 + asm volatile("cpuid"
7417 : "=a" (max_amd_level)
7418 : "a" (0x80000000)
7419 : "ebx", "ecx", "edx");
7420 @@ -144,7 +144,7 @@ static void get_flags(void)
7421 if (max_amd_level >= 0x80000001 &&
7422 max_amd_level <= 0x8000ffff) {
7423 u32 eax = 0x80000001;
7424 - asm("cpuid"
7425 + asm volatile("cpuid"
7426 : "+a" (eax),
7427 "=c" (cpu.flags[6]),
7428 "=d" (cpu.flags[1])
7429 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7430 u32 ecx = MSR_K7_HWCR;
7431 u32 eax, edx;
7432
7433 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7434 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7435 eax &= ~(1 << 15);
7436 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7437 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7438
7439 get_flags(); /* Make sure it really did something */
7440 err = check_flags();
7441 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7442 u32 ecx = MSR_VIA_FCR;
7443 u32 eax, edx;
7444
7445 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7446 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7447 eax |= (1<<1)|(1<<7);
7448 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7449 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7450
7451 set_bit(X86_FEATURE_CX8, cpu.flags);
7452 err = check_flags();
7453 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7454 u32 eax, edx;
7455 u32 level = 1;
7456
7457 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7458 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7459 - asm("cpuid"
7460 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7461 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7462 + asm volatile("cpuid"
7463 : "+a" (level), "=d" (cpu.flags[0])
7464 : : "ecx", "ebx");
7465 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7466 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7467
7468 err = check_flags();
7469 }
7470 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7471 index b31cc54..8d69237 100644
7472 --- a/arch/x86/boot/header.S
7473 +++ b/arch/x86/boot/header.S
7474 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7475 # single linked list of
7476 # struct setup_data
7477
7478 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7479 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7480
7481 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7482 #define VO_INIT_SIZE (VO__end - VO__text)
7483 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7484 index cae3feb..ff8ff2a 100644
7485 --- a/arch/x86/boot/memory.c
7486 +++ b/arch/x86/boot/memory.c
7487 @@ -19,7 +19,7 @@
7488
7489 static int detect_memory_e820(void)
7490 {
7491 - int count = 0;
7492 + unsigned int count = 0;
7493 struct biosregs ireg, oreg;
7494 struct e820entry *desc = boot_params.e820_map;
7495 static struct e820entry buf; /* static so it is zeroed */
7496 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7497 index 11e8c6e..fdbb1ed 100644
7498 --- a/arch/x86/boot/video-vesa.c
7499 +++ b/arch/x86/boot/video-vesa.c
7500 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7501
7502 boot_params.screen_info.vesapm_seg = oreg.es;
7503 boot_params.screen_info.vesapm_off = oreg.di;
7504 + boot_params.screen_info.vesapm_size = oreg.cx;
7505 }
7506
7507 /*
7508 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7509 index d42da38..787cdf3 100644
7510 --- a/arch/x86/boot/video.c
7511 +++ b/arch/x86/boot/video.c
7512 @@ -90,7 +90,7 @@ static void store_mode_params(void)
7513 static unsigned int get_entry(void)
7514 {
7515 char entry_buf[4];
7516 - int i, len = 0;
7517 + unsigned int i, len = 0;
7518 int key;
7519 unsigned int v;
7520
7521 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7522 index 5b577d5..3c1fed4 100644
7523 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7524 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7525 @@ -8,6 +8,8 @@
7526 * including this sentence is retained in full.
7527 */
7528
7529 +#include <asm/alternative-asm.h>
7530 +
7531 .extern crypto_ft_tab
7532 .extern crypto_it_tab
7533 .extern crypto_fl_tab
7534 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7535 je B192; \
7536 leaq 32(r9),r9;
7537
7538 +#define ret pax_force_retaddr 0, 1; ret
7539 +
7540 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7541 movq r1,r2; \
7542 movq r3,r4; \
7543 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7544 index eb0566e..e3ebad8 100644
7545 --- a/arch/x86/crypto/aesni-intel_asm.S
7546 +++ b/arch/x86/crypto/aesni-intel_asm.S
7547 @@ -16,6 +16,7 @@
7548 */
7549
7550 #include <linux/linkage.h>
7551 +#include <asm/alternative-asm.h>
7552
7553 .text
7554
7555 @@ -52,6 +53,7 @@ _key_expansion_256a:
7556 pxor %xmm1, %xmm0
7557 movaps %xmm0, (%rcx)
7558 add $0x10, %rcx
7559 + pax_force_retaddr_bts
7560 ret
7561
7562 _key_expansion_192a:
7563 @@ -75,6 +77,7 @@ _key_expansion_192a:
7564 shufps $0b01001110, %xmm2, %xmm1
7565 movaps %xmm1, 16(%rcx)
7566 add $0x20, %rcx
7567 + pax_force_retaddr_bts
7568 ret
7569
7570 _key_expansion_192b:
7571 @@ -93,6 +96,7 @@ _key_expansion_192b:
7572
7573 movaps %xmm0, (%rcx)
7574 add $0x10, %rcx
7575 + pax_force_retaddr_bts
7576 ret
7577
7578 _key_expansion_256b:
7579 @@ -104,6 +108,7 @@ _key_expansion_256b:
7580 pxor %xmm1, %xmm2
7581 movaps %xmm2, (%rcx)
7582 add $0x10, %rcx
7583 + pax_force_retaddr_bts
7584 ret
7585
7586 /*
7587 @@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7588 cmp %rcx, %rdi
7589 jb .Ldec_key_loop
7590 xor %rax, %rax
7591 + pax_force_retaddr 0, 1
7592 ret
7593 +ENDPROC(aesni_set_key)
7594
7595 /*
7596 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7597 @@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7598 movups (INP), STATE # input
7599 call _aesni_enc1
7600 movups STATE, (OUTP) # output
7601 + pax_force_retaddr 0, 1
7602 ret
7603 +ENDPROC(aesni_enc)
7604
7605 /*
7606 * _aesni_enc1: internal ABI
7607 @@ -319,6 +328,7 @@ _aesni_enc1:
7608 movaps 0x70(TKEYP), KEY
7609 # aesenclast KEY, STATE # last round
7610 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7611 + pax_force_retaddr_bts
7612 ret
7613
7614 /*
7615 @@ -482,6 +492,7 @@ _aesni_enc4:
7616 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7617 # aesenclast KEY, STATE4
7618 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7619 + pax_force_retaddr_bts
7620 ret
7621
7622 /*
7623 @@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7624 movups (INP), STATE # input
7625 call _aesni_dec1
7626 movups STATE, (OUTP) #output
7627 + pax_force_retaddr 0, 1
7628 ret
7629 +ENDPROC(aesni_dec)
7630
7631 /*
7632 * _aesni_dec1: internal ABI
7633 @@ -563,6 +576,7 @@ _aesni_dec1:
7634 movaps 0x70(TKEYP), KEY
7635 # aesdeclast KEY, STATE # last round
7636 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7637 + pax_force_retaddr_bts
7638 ret
7639
7640 /*
7641 @@ -726,6 +740,7 @@ _aesni_dec4:
7642 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7643 # aesdeclast KEY, STATE4
7644 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7645 + pax_force_retaddr_bts
7646 ret
7647
7648 /*
7649 @@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7650 cmp $16, LEN
7651 jge .Lecb_enc_loop1
7652 .Lecb_enc_ret:
7653 + pax_force_retaddr 0, 1
7654 ret
7655 +ENDPROC(aesni_ecb_enc)
7656
7657 /*
7658 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7659 @@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7660 cmp $16, LEN
7661 jge .Lecb_dec_loop1
7662 .Lecb_dec_ret:
7663 + pax_force_retaddr 0, 1
7664 ret
7665 +ENDPROC(aesni_ecb_dec)
7666
7667 /*
7668 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7669 @@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7670 jge .Lcbc_enc_loop
7671 movups STATE, (IVP)
7672 .Lcbc_enc_ret:
7673 + pax_force_retaddr 0, 1
7674 ret
7675 +ENDPROC(aesni_cbc_enc)
7676
7677 /*
7678 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7679 @@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7680 .Lcbc_dec_ret:
7681 movups IV, (IVP)
7682 .Lcbc_dec_just_ret:
7683 + pax_force_retaddr 0, 1
7684 ret
7685 +ENDPROC(aesni_cbc_dec)
7686 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7687 index 6214a9b..1f4fc9a 100644
7688 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7689 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7690 @@ -1,3 +1,5 @@
7691 +#include <asm/alternative-asm.h>
7692 +
7693 # enter ECRYPT_encrypt_bytes
7694 .text
7695 .p2align 5
7696 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7697 add %r11,%rsp
7698 mov %rdi,%rax
7699 mov %rsi,%rdx
7700 + pax_force_retaddr 0, 1
7701 ret
7702 # bytesatleast65:
7703 ._bytesatleast65:
7704 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7705 add %r11,%rsp
7706 mov %rdi,%rax
7707 mov %rsi,%rdx
7708 + pax_force_retaddr
7709 ret
7710 # enter ECRYPT_ivsetup
7711 .text
7712 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7713 add %r11,%rsp
7714 mov %rdi,%rax
7715 mov %rsi,%rdx
7716 + pax_force_retaddr
7717 ret
7718 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7719 index 35974a5..5662ae2 100644
7720 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7721 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7722 @@ -21,6 +21,7 @@
7723 .text
7724
7725 #include <asm/asm-offsets.h>
7726 +#include <asm/alternative-asm.h>
7727
7728 #define a_offset 0
7729 #define b_offset 4
7730 @@ -269,6 +270,7 @@ twofish_enc_blk:
7731
7732 popq R1
7733 movq $1,%rax
7734 + pax_force_retaddr 0, 1
7735 ret
7736
7737 twofish_dec_blk:
7738 @@ -321,4 +323,5 @@ twofish_dec_blk:
7739
7740 popq R1
7741 movq $1,%rax
7742 + pax_force_retaddr 0, 1
7743 ret
7744 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7745 index 14531ab..a89a0c0 100644
7746 --- a/arch/x86/ia32/ia32_aout.c
7747 +++ b/arch/x86/ia32/ia32_aout.c
7748 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7749 unsigned long dump_start, dump_size;
7750 struct user32 dump;
7751
7752 + memset(&dump, 0, sizeof(dump));
7753 +
7754 fs = get_fs();
7755 set_fs(KERNEL_DS);
7756 has_dumped = 1;
7757 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7758 dump_size = dump.u_ssize << PAGE_SHIFT;
7759 DUMP_WRITE(dump_start, dump_size);
7760 }
7761 - /*
7762 - * Finally dump the task struct. Not be used by gdb, but
7763 - * could be useful
7764 - */
7765 - set_fs(KERNEL_DS);
7766 - DUMP_WRITE(current, sizeof(*current));
7767 end_coredump:
7768 set_fs(fs);
7769 return has_dumped;
7770 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7771 index 588a7aa..a3468b0 100644
7772 --- a/arch/x86/ia32/ia32_signal.c
7773 +++ b/arch/x86/ia32/ia32_signal.c
7774 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7775 }
7776 seg = get_fs();
7777 set_fs(KERNEL_DS);
7778 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7779 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7780 set_fs(seg);
7781 if (ret >= 0 && uoss_ptr) {
7782 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7783 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
7784 */
7785 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7786 size_t frame_size,
7787 - void **fpstate)
7788 + void __user **fpstate)
7789 {
7790 unsigned long sp;
7791
7792 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7793
7794 if (used_math()) {
7795 sp = sp - sig_xstate_ia32_size;
7796 - *fpstate = (struct _fpstate_ia32 *) sp;
7797 + *fpstate = (struct _fpstate_ia32 __user *) sp;
7798 if (save_i387_xstate_ia32(*fpstate) < 0)
7799 return (void __user *) -1L;
7800 }
7801 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7802 sp -= frame_size;
7803 /* Align the stack pointer according to the i386 ABI,
7804 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
7805 - sp = ((sp + 4) & -16ul) - 4;
7806 + sp = ((sp - 12) & -16ul) - 4;
7807 return (void __user *) sp;
7808 }
7809
7810 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
7811 * These are actually not used anymore, but left because some
7812 * gdb versions depend on them as a marker.
7813 */
7814 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7815 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7816 } put_user_catch(err);
7817
7818 if (err)
7819 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7820 0xb8,
7821 __NR_ia32_rt_sigreturn,
7822 0x80cd,
7823 - 0,
7824 + 0
7825 };
7826
7827 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
7828 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7829
7830 if (ka->sa.sa_flags & SA_RESTORER)
7831 restorer = ka->sa.sa_restorer;
7832 + else if (current->mm->context.vdso)
7833 + /* Return stub is in 32bit vsyscall page */
7834 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
7835 else
7836 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
7837 - rt_sigreturn);
7838 + restorer = &frame->retcode;
7839 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
7840
7841 /*
7842 * Not actually used anymore, but left because some gdb
7843 * versions need it.
7844 */
7845 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7846 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7847 } put_user_catch(err);
7848
7849 if (err)
7850 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
7851 index 4edd8eb..29124b4 100644
7852 --- a/arch/x86/ia32/ia32entry.S
7853 +++ b/arch/x86/ia32/ia32entry.S
7854 @@ -13,7 +13,9 @@
7855 #include <asm/thread_info.h>
7856 #include <asm/segment.h>
7857 #include <asm/irqflags.h>
7858 +#include <asm/pgtable.h>
7859 #include <linux/linkage.h>
7860 +#include <asm/alternative-asm.h>
7861
7862 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
7863 #include <linux/elf-em.h>
7864 @@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
7865 ENDPROC(native_irq_enable_sysexit)
7866 #endif
7867
7868 + .macro pax_enter_kernel_user
7869 + pax_set_fptr_mask
7870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7871 + call pax_enter_kernel_user
7872 +#endif
7873 + .endm
7874 +
7875 + .macro pax_exit_kernel_user
7876 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7877 + call pax_exit_kernel_user
7878 +#endif
7879 +#ifdef CONFIG_PAX_RANDKSTACK
7880 + pushq %rax
7881 + pushq %r11
7882 + call pax_randomize_kstack
7883 + popq %r11
7884 + popq %rax
7885 +#endif
7886 + .endm
7887 +
7888 +.macro pax_erase_kstack
7889 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
7890 + call pax_erase_kstack
7891 +#endif
7892 +.endm
7893 +
7894 /*
7895 * 32bit SYSENTER instruction entry.
7896 *
7897 @@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
7898 CFI_REGISTER rsp,rbp
7899 SWAPGS_UNSAFE_STACK
7900 movq PER_CPU_VAR(kernel_stack), %rsp
7901 - addq $(KERNEL_STACK_OFFSET),%rsp
7902 - /*
7903 - * No need to follow this irqs on/off section: the syscall
7904 - * disabled irqs, here we enable it straight after entry:
7905 - */
7906 - ENABLE_INTERRUPTS(CLBR_NONE)
7907 movl %ebp,%ebp /* zero extension */
7908 pushq $__USER32_DS
7909 CFI_ADJUST_CFA_OFFSET 8
7910 @@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
7911 pushfq
7912 CFI_ADJUST_CFA_OFFSET 8
7913 /*CFI_REL_OFFSET rflags,0*/
7914 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
7915 - CFI_REGISTER rip,r10
7916 + orl $X86_EFLAGS_IF,(%rsp)
7917 + GET_THREAD_INFO(%r11)
7918 + movl TI_sysenter_return(%r11), %r11d
7919 + CFI_REGISTER rip,r11
7920 pushq $__USER32_CS
7921 CFI_ADJUST_CFA_OFFSET 8
7922 /*CFI_REL_OFFSET cs,0*/
7923 movl %eax, %eax
7924 - pushq %r10
7925 + pushq %r11
7926 CFI_ADJUST_CFA_OFFSET 8
7927 CFI_REL_OFFSET rip,0
7928 pushq %rax
7929 CFI_ADJUST_CFA_OFFSET 8
7930 cld
7931 SAVE_ARGS 0,0,1
7932 + pax_enter_kernel_user
7933 + /*
7934 + * No need to follow this irqs on/off section: the syscall
7935 + * disabled irqs, here we enable it straight after entry:
7936 + */
7937 + ENABLE_INTERRUPTS(CLBR_NONE)
7938 /* no need to do an access_ok check here because rbp has been
7939 32bit zero extended */
7940 +
7941 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7942 + mov $PAX_USER_SHADOW_BASE,%r11
7943 + add %r11,%rbp
7944 +#endif
7945 +
7946 1: movl (%rbp),%ebp
7947 .section __ex_table,"a"
7948 .quad 1b,ia32_badarg
7949 .previous
7950 - GET_THREAD_INFO(%r10)
7951 - orl $TS_COMPAT,TI_status(%r10)
7952 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7953 + GET_THREAD_INFO(%r11)
7954 + orl $TS_COMPAT,TI_status(%r11)
7955 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7956 CFI_REMEMBER_STATE
7957 jnz sysenter_tracesys
7958 cmpq $(IA32_NR_syscalls-1),%rax
7959 @@ -166,13 +202,15 @@ sysenter_do_call:
7960 sysenter_dispatch:
7961 call *ia32_sys_call_table(,%rax,8)
7962 movq %rax,RAX-ARGOFFSET(%rsp)
7963 - GET_THREAD_INFO(%r10)
7964 + GET_THREAD_INFO(%r11)
7965 DISABLE_INTERRUPTS(CLBR_NONE)
7966 TRACE_IRQS_OFF
7967 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7968 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7969 jnz sysexit_audit
7970 sysexit_from_sys_call:
7971 - andl $~TS_COMPAT,TI_status(%r10)
7972 + pax_exit_kernel_user
7973 + pax_erase_kstack
7974 + andl $~TS_COMPAT,TI_status(%r11)
7975 /* clear IF, that popfq doesn't enable interrupts early */
7976 andl $~0x200,EFLAGS-R11(%rsp)
7977 movl RIP-R11(%rsp),%edx /* User %eip */
7978 @@ -200,6 +238,9 @@ sysexit_from_sys_call:
7979 movl %eax,%esi /* 2nd arg: syscall number */
7980 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7981 call audit_syscall_entry
7982 +
7983 + pax_erase_kstack
7984 +
7985 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7986 cmpq $(IA32_NR_syscalls-1),%rax
7987 ja ia32_badsys
7988 @@ -211,7 +252,7 @@ sysexit_from_sys_call:
7989 .endm
7990
7991 .macro auditsys_exit exit
7992 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7993 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7994 jnz ia32_ret_from_sys_call
7995 TRACE_IRQS_ON
7996 sti
7997 @@ -221,12 +262,12 @@ sysexit_from_sys_call:
7998 movzbl %al,%edi /* zero-extend that into %edi */
7999 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8000 call audit_syscall_exit
8001 - GET_THREAD_INFO(%r10)
8002 + GET_THREAD_INFO(%r11)
8003 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8004 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8005 cli
8006 TRACE_IRQS_OFF
8007 - testl %edi,TI_flags(%r10)
8008 + testl %edi,TI_flags(%r11)
8009 jz \exit
8010 CLEAR_RREGS -ARGOFFSET
8011 jmp int_with_check
8012 @@ -244,7 +285,7 @@ sysexit_audit:
8013
8014 sysenter_tracesys:
8015 #ifdef CONFIG_AUDITSYSCALL
8016 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8017 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8018 jz sysenter_auditsys
8019 #endif
8020 SAVE_REST
8021 @@ -252,6 +293,9 @@ sysenter_tracesys:
8022 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8023 movq %rsp,%rdi /* &pt_regs -> arg1 */
8024 call syscall_trace_enter
8025 +
8026 + pax_erase_kstack
8027 +
8028 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8029 RESTORE_REST
8030 cmpq $(IA32_NR_syscalls-1),%rax
8031 @@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8032 ENTRY(ia32_cstar_target)
8033 CFI_STARTPROC32 simple
8034 CFI_SIGNAL_FRAME
8035 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8036 + CFI_DEF_CFA rsp,0
8037 CFI_REGISTER rip,rcx
8038 /*CFI_REGISTER rflags,r11*/
8039 SWAPGS_UNSAFE_STACK
8040 movl %esp,%r8d
8041 CFI_REGISTER rsp,r8
8042 movq PER_CPU_VAR(kernel_stack),%rsp
8043 + SAVE_ARGS 8*6,1,1
8044 + pax_enter_kernel_user
8045 /*
8046 * No need to follow this irqs on/off section: the syscall
8047 * disabled irqs and here we enable it straight after entry:
8048 */
8049 ENABLE_INTERRUPTS(CLBR_NONE)
8050 - SAVE_ARGS 8,1,1
8051 movl %eax,%eax /* zero extension */
8052 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8053 movq %rcx,RIP-ARGOFFSET(%rsp)
8054 @@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8055 /* no need to do an access_ok check here because r8 has been
8056 32bit zero extended */
8057 /* hardware stack frame is complete now */
8058 +
8059 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8060 + mov $PAX_USER_SHADOW_BASE,%r11
8061 + add %r11,%r8
8062 +#endif
8063 +
8064 1: movl (%r8),%r9d
8065 .section __ex_table,"a"
8066 .quad 1b,ia32_badarg
8067 .previous
8068 - GET_THREAD_INFO(%r10)
8069 - orl $TS_COMPAT,TI_status(%r10)
8070 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8071 + GET_THREAD_INFO(%r11)
8072 + orl $TS_COMPAT,TI_status(%r11)
8073 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8074 CFI_REMEMBER_STATE
8075 jnz cstar_tracesys
8076 cmpq $IA32_NR_syscalls-1,%rax
8077 @@ -327,13 +378,15 @@ cstar_do_call:
8078 cstar_dispatch:
8079 call *ia32_sys_call_table(,%rax,8)
8080 movq %rax,RAX-ARGOFFSET(%rsp)
8081 - GET_THREAD_INFO(%r10)
8082 + GET_THREAD_INFO(%r11)
8083 DISABLE_INTERRUPTS(CLBR_NONE)
8084 TRACE_IRQS_OFF
8085 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8086 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8087 jnz sysretl_audit
8088 sysretl_from_sys_call:
8089 - andl $~TS_COMPAT,TI_status(%r10)
8090 + pax_exit_kernel_user
8091 + pax_erase_kstack
8092 + andl $~TS_COMPAT,TI_status(%r11)
8093 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8094 movl RIP-ARGOFFSET(%rsp),%ecx
8095 CFI_REGISTER rip,rcx
8096 @@ -361,7 +414,7 @@ sysretl_audit:
8097
8098 cstar_tracesys:
8099 #ifdef CONFIG_AUDITSYSCALL
8100 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8101 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8102 jz cstar_auditsys
8103 #endif
8104 xchgl %r9d,%ebp
8105 @@ -370,6 +423,9 @@ cstar_tracesys:
8106 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8107 movq %rsp,%rdi /* &pt_regs -> arg1 */
8108 call syscall_trace_enter
8109 +
8110 + pax_erase_kstack
8111 +
8112 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8113 RESTORE_REST
8114 xchgl %ebp,%r9d
8115 @@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8116 CFI_REL_OFFSET rip,RIP-RIP
8117 PARAVIRT_ADJUST_EXCEPTION_FRAME
8118 SWAPGS
8119 - /*
8120 - * No need to follow this irqs on/off section: the syscall
8121 - * disabled irqs and here we enable it straight after entry:
8122 - */
8123 - ENABLE_INTERRUPTS(CLBR_NONE)
8124 movl %eax,%eax
8125 pushq %rax
8126 CFI_ADJUST_CFA_OFFSET 8
8127 @@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8128 /* note the registers are not zero extended to the sf.
8129 this could be a problem. */
8130 SAVE_ARGS 0,0,1
8131 - GET_THREAD_INFO(%r10)
8132 - orl $TS_COMPAT,TI_status(%r10)
8133 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8134 + pax_enter_kernel_user
8135 + /*
8136 + * No need to follow this irqs on/off section: the syscall
8137 + * disabled irqs and here we enable it straight after entry:
8138 + */
8139 + ENABLE_INTERRUPTS(CLBR_NONE)
8140 + GET_THREAD_INFO(%r11)
8141 + orl $TS_COMPAT,TI_status(%r11)
8142 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8143 jnz ia32_tracesys
8144 cmpq $(IA32_NR_syscalls-1),%rax
8145 ja ia32_badsys
8146 @@ -448,6 +505,9 @@ ia32_tracesys:
8147 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8148 movq %rsp,%rdi /* &pt_regs -> arg1 */
8149 call syscall_trace_enter
8150 +
8151 + pax_erase_kstack
8152 +
8153 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8154 RESTORE_REST
8155 cmpq $(IA32_NR_syscalls-1),%rax
8156 @@ -462,6 +522,7 @@ ia32_badsys:
8157
8158 quiet_ni_syscall:
8159 movq $-ENOSYS,%rax
8160 + pax_force_retaddr
8161 ret
8162 CFI_ENDPROC
8163
8164 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8165 index 016218c..47ccbdd 100644
8166 --- a/arch/x86/ia32/sys_ia32.c
8167 +++ b/arch/x86/ia32/sys_ia32.c
8168 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8169 */
8170 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8171 {
8172 - typeof(ubuf->st_uid) uid = 0;
8173 - typeof(ubuf->st_gid) gid = 0;
8174 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8175 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8176 SET_UID(uid, stat->uid);
8177 SET_GID(gid, stat->gid);
8178 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8179 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8180 }
8181 set_fs(KERNEL_DS);
8182 ret = sys_rt_sigprocmask(how,
8183 - set ? (sigset_t __user *)&s : NULL,
8184 - oset ? (sigset_t __user *)&s : NULL,
8185 + set ? (sigset_t __force_user *)&s : NULL,
8186 + oset ? (sigset_t __force_user *)&s : NULL,
8187 sigsetsize);
8188 set_fs(old_fs);
8189 if (ret)
8190 @@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8191 mm_segment_t old_fs = get_fs();
8192
8193 set_fs(KERNEL_DS);
8194 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8195 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8196 set_fs(old_fs);
8197 if (put_compat_timespec(&t, interval))
8198 return -EFAULT;
8199 @@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8200 mm_segment_t old_fs = get_fs();
8201
8202 set_fs(KERNEL_DS);
8203 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8204 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8205 set_fs(old_fs);
8206 if (!ret) {
8207 switch (_NSIG_WORDS) {
8208 @@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8209 if (copy_siginfo_from_user32(&info, uinfo))
8210 return -EFAULT;
8211 set_fs(KERNEL_DS);
8212 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8213 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8214 set_fs(old_fs);
8215 return ret;
8216 }
8217 @@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8218 return -EFAULT;
8219
8220 set_fs(KERNEL_DS);
8221 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8222 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8223 count);
8224 set_fs(old_fs);
8225
8226 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8227 index e2077d3..b7a8919 100644
8228 --- a/arch/x86/include/asm/alternative-asm.h
8229 +++ b/arch/x86/include/asm/alternative-asm.h
8230 @@ -8,10 +8,10 @@
8231
8232 #ifdef CONFIG_SMP
8233 .macro LOCK_PREFIX
8234 -1: lock
8235 +672: lock
8236 .section .smp_locks,"a"
8237 .align 4
8238 - X86_ALIGN 1b
8239 + X86_ALIGN 672b
8240 .previous
8241 .endm
8242 #else
8243 @@ -19,4 +19,43 @@
8244 .endm
8245 #endif
8246
8247 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
8248 + .macro pax_force_retaddr_bts rip=0
8249 + btsq $63,\rip(%rsp)
8250 + .endm
8251 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8252 + .macro pax_force_retaddr rip=0, reload=0
8253 + btsq $63,\rip(%rsp)
8254 + .endm
8255 + .macro pax_force_fptr ptr
8256 + btsq $63,\ptr
8257 + .endm
8258 + .macro pax_set_fptr_mask
8259 + .endm
8260 +#endif
8261 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8262 + .macro pax_force_retaddr rip=0, reload=0
8263 + .if \reload
8264 + pax_set_fptr_mask
8265 + .endif
8266 + orq %r10,\rip(%rsp)
8267 + .endm
8268 + .macro pax_force_fptr ptr
8269 + orq %r10,\ptr
8270 + .endm
8271 + .macro pax_set_fptr_mask
8272 + movabs $0x8000000000000000,%r10
8273 + .endm
8274 +#endif
8275 +#else
8276 + .macro pax_force_retaddr rip=0, reload=0
8277 + .endm
8278 + .macro pax_force_fptr ptr
8279 + .endm
8280 + .macro pax_force_retaddr_bts rip=0
8281 + .endm
8282 + .macro pax_set_fptr_mask
8283 + .endm
8284 +#endif
8285 +
8286 #endif /* __ASSEMBLY__ */
8287 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8288 index c240efc..fdfadf3 100644
8289 --- a/arch/x86/include/asm/alternative.h
8290 +++ b/arch/x86/include/asm/alternative.h
8291 @@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8292 " .byte 662b-661b\n" /* sourcelen */ \
8293 " .byte 664f-663f\n" /* replacementlen */ \
8294 ".previous\n" \
8295 - ".section .altinstr_replacement, \"ax\"\n" \
8296 + ".section .altinstr_replacement, \"a\"\n" \
8297 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8298 ".previous"
8299
8300 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8301 index 474d80d..1f97d58 100644
8302 --- a/arch/x86/include/asm/apic.h
8303 +++ b/arch/x86/include/asm/apic.h
8304 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8305
8306 #ifdef CONFIG_X86_LOCAL_APIC
8307
8308 -extern unsigned int apic_verbosity;
8309 +extern int apic_verbosity;
8310 extern int local_apic_timer_c2_ok;
8311
8312 extern int disable_apic;
8313 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8314 index 20370c6..a2eb9b0 100644
8315 --- a/arch/x86/include/asm/apm.h
8316 +++ b/arch/x86/include/asm/apm.h
8317 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8318 __asm__ __volatile__(APM_DO_ZERO_SEGS
8319 "pushl %%edi\n\t"
8320 "pushl %%ebp\n\t"
8321 - "lcall *%%cs:apm_bios_entry\n\t"
8322 + "lcall *%%ss:apm_bios_entry\n\t"
8323 "setc %%al\n\t"
8324 "popl %%ebp\n\t"
8325 "popl %%edi\n\t"
8326 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8327 __asm__ __volatile__(APM_DO_ZERO_SEGS
8328 "pushl %%edi\n\t"
8329 "pushl %%ebp\n\t"
8330 - "lcall *%%cs:apm_bios_entry\n\t"
8331 + "lcall *%%ss:apm_bios_entry\n\t"
8332 "setc %%bl\n\t"
8333 "popl %%ebp\n\t"
8334 "popl %%edi\n\t"
8335 diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8336 index dc5a667..939040c 100644
8337 --- a/arch/x86/include/asm/atomic_32.h
8338 +++ b/arch/x86/include/asm/atomic_32.h
8339 @@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8340 }
8341
8342 /**
8343 + * atomic_read_unchecked - read atomic variable
8344 + * @v: pointer of type atomic_unchecked_t
8345 + *
8346 + * Atomically reads the value of @v.
8347 + */
8348 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8349 +{
8350 + return v->counter;
8351 +}
8352 +
8353 +/**
8354 * atomic_set - set atomic variable
8355 * @v: pointer of type atomic_t
8356 * @i: required value
8357 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8358 }
8359
8360 /**
8361 + * atomic_set_unchecked - set atomic variable
8362 + * @v: pointer of type atomic_unchecked_t
8363 + * @i: required value
8364 + *
8365 + * Atomically sets the value of @v to @i.
8366 + */
8367 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8368 +{
8369 + v->counter = i;
8370 +}
8371 +
8372 +/**
8373 * atomic_add - add integer to atomic variable
8374 * @i: integer value to add
8375 * @v: pointer of type atomic_t
8376 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8377 */
8378 static inline void atomic_add(int i, atomic_t *v)
8379 {
8380 - asm volatile(LOCK_PREFIX "addl %1,%0"
8381 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8382 +
8383 +#ifdef CONFIG_PAX_REFCOUNT
8384 + "jno 0f\n"
8385 + LOCK_PREFIX "subl %1,%0\n"
8386 + "int $4\n0:\n"
8387 + _ASM_EXTABLE(0b, 0b)
8388 +#endif
8389 +
8390 + : "+m" (v->counter)
8391 + : "ir" (i));
8392 +}
8393 +
8394 +/**
8395 + * atomic_add_unchecked - add integer to atomic variable
8396 + * @i: integer value to add
8397 + * @v: pointer of type atomic_unchecked_t
8398 + *
8399 + * Atomically adds @i to @v.
8400 + */
8401 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8402 +{
8403 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8404 : "+m" (v->counter)
8405 : "ir" (i));
8406 }
8407 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8408 */
8409 static inline void atomic_sub(int i, atomic_t *v)
8410 {
8411 - asm volatile(LOCK_PREFIX "subl %1,%0"
8412 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8413 +
8414 +#ifdef CONFIG_PAX_REFCOUNT
8415 + "jno 0f\n"
8416 + LOCK_PREFIX "addl %1,%0\n"
8417 + "int $4\n0:\n"
8418 + _ASM_EXTABLE(0b, 0b)
8419 +#endif
8420 +
8421 + : "+m" (v->counter)
8422 + : "ir" (i));
8423 +}
8424 +
8425 +/**
8426 + * atomic_sub_unchecked - subtract integer from atomic variable
8427 + * @i: integer value to subtract
8428 + * @v: pointer of type atomic_unchecked_t
8429 + *
8430 + * Atomically subtracts @i from @v.
8431 + */
8432 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8433 +{
8434 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8435 : "+m" (v->counter)
8436 : "ir" (i));
8437 }
8438 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8439 {
8440 unsigned char c;
8441
8442 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8443 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8444 +
8445 +#ifdef CONFIG_PAX_REFCOUNT
8446 + "jno 0f\n"
8447 + LOCK_PREFIX "addl %2,%0\n"
8448 + "int $4\n0:\n"
8449 + _ASM_EXTABLE(0b, 0b)
8450 +#endif
8451 +
8452 + "sete %1\n"
8453 : "+m" (v->counter), "=qm" (c)
8454 : "ir" (i) : "memory");
8455 return c;
8456 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8457 */
8458 static inline void atomic_inc(atomic_t *v)
8459 {
8460 - asm volatile(LOCK_PREFIX "incl %0"
8461 + asm volatile(LOCK_PREFIX "incl %0\n"
8462 +
8463 +#ifdef CONFIG_PAX_REFCOUNT
8464 + "jno 0f\n"
8465 + LOCK_PREFIX "decl %0\n"
8466 + "int $4\n0:\n"
8467 + _ASM_EXTABLE(0b, 0b)
8468 +#endif
8469 +
8470 + : "+m" (v->counter));
8471 +}
8472 +
8473 +/**
8474 + * atomic_inc_unchecked - increment atomic variable
8475 + * @v: pointer of type atomic_unchecked_t
8476 + *
8477 + * Atomically increments @v by 1.
8478 + */
8479 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8480 +{
8481 + asm volatile(LOCK_PREFIX "incl %0\n"
8482 : "+m" (v->counter));
8483 }
8484
8485 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8486 */
8487 static inline void atomic_dec(atomic_t *v)
8488 {
8489 - asm volatile(LOCK_PREFIX "decl %0"
8490 + asm volatile(LOCK_PREFIX "decl %0\n"
8491 +
8492 +#ifdef CONFIG_PAX_REFCOUNT
8493 + "jno 0f\n"
8494 + LOCK_PREFIX "incl %0\n"
8495 + "int $4\n0:\n"
8496 + _ASM_EXTABLE(0b, 0b)
8497 +#endif
8498 +
8499 + : "+m" (v->counter));
8500 +}
8501 +
8502 +/**
8503 + * atomic_dec_unchecked - decrement atomic variable
8504 + * @v: pointer of type atomic_unchecked_t
8505 + *
8506 + * Atomically decrements @v by 1.
8507 + */
8508 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8509 +{
8510 + asm volatile(LOCK_PREFIX "decl %0\n"
8511 : "+m" (v->counter));
8512 }
8513
8514 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8515 {
8516 unsigned char c;
8517
8518 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8519 + asm volatile(LOCK_PREFIX "decl %0\n"
8520 +
8521 +#ifdef CONFIG_PAX_REFCOUNT
8522 + "jno 0f\n"
8523 + LOCK_PREFIX "incl %0\n"
8524 + "int $4\n0:\n"
8525 + _ASM_EXTABLE(0b, 0b)
8526 +#endif
8527 +
8528 + "sete %1\n"
8529 : "+m" (v->counter), "=qm" (c)
8530 : : "memory");
8531 return c != 0;
8532 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8533 {
8534 unsigned char c;
8535
8536 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8537 + asm volatile(LOCK_PREFIX "incl %0\n"
8538 +
8539 +#ifdef CONFIG_PAX_REFCOUNT
8540 + "jno 0f\n"
8541 + LOCK_PREFIX "decl %0\n"
8542 + "into\n0:\n"
8543 + _ASM_EXTABLE(0b, 0b)
8544 +#endif
8545 +
8546 + "sete %1\n"
8547 + : "+m" (v->counter), "=qm" (c)
8548 + : : "memory");
8549 + return c != 0;
8550 +}
8551 +
8552 +/**
8553 + * atomic_inc_and_test_unchecked - increment and test
8554 + * @v: pointer of type atomic_unchecked_t
8555 + *
8556 + * Atomically increments @v by 1
8557 + * and returns true if the result is zero, or false for all
8558 + * other cases.
8559 + */
8560 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8561 +{
8562 + unsigned char c;
8563 +
8564 + asm volatile(LOCK_PREFIX "incl %0\n"
8565 + "sete %1\n"
8566 : "+m" (v->counter), "=qm" (c)
8567 : : "memory");
8568 return c != 0;
8569 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8570 {
8571 unsigned char c;
8572
8573 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8574 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8575 +
8576 +#ifdef CONFIG_PAX_REFCOUNT
8577 + "jno 0f\n"
8578 + LOCK_PREFIX "subl %2,%0\n"
8579 + "int $4\n0:\n"
8580 + _ASM_EXTABLE(0b, 0b)
8581 +#endif
8582 +
8583 + "sets %1\n"
8584 : "+m" (v->counter), "=qm" (c)
8585 : "ir" (i) : "memory");
8586 return c;
8587 @@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8588 #endif
8589 /* Modern 486+ processor */
8590 __i = i;
8591 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
8592 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8593 +
8594 +#ifdef CONFIG_PAX_REFCOUNT
8595 + "jno 0f\n"
8596 + "movl %0, %1\n"
8597 + "int $4\n0:\n"
8598 + _ASM_EXTABLE(0b, 0b)
8599 +#endif
8600 +
8601 : "+r" (i), "+m" (v->counter)
8602 : : "memory");
8603 return i + __i;
8604 @@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8605 }
8606
8607 /**
8608 + * atomic_add_return_unchecked - add integer and return
8609 + * @v: pointer of type atomic_unchecked_t
8610 + * @i: integer value to add
8611 + *
8612 + * Atomically adds @i to @v and returns @i + @v
8613 + */
8614 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8615 +{
8616 + int __i;
8617 +#ifdef CONFIG_M386
8618 + unsigned long flags;
8619 + if (unlikely(boot_cpu_data.x86 <= 3))
8620 + goto no_xadd;
8621 +#endif
8622 + /* Modern 486+ processor */
8623 + __i = i;
8624 + asm volatile(LOCK_PREFIX "xaddl %0, %1"
8625 + : "+r" (i), "+m" (v->counter)
8626 + : : "memory");
8627 + return i + __i;
8628 +
8629 +#ifdef CONFIG_M386
8630 +no_xadd: /* Legacy 386 processor */
8631 + local_irq_save(flags);
8632 + __i = atomic_read_unchecked(v);
8633 + atomic_set_unchecked(v, i + __i);
8634 + local_irq_restore(flags);
8635 + return i + __i;
8636 +#endif
8637 +}
8638 +
8639 +/**
8640 * atomic_sub_return - subtract integer and return
8641 * @v: pointer of type atomic_t
8642 * @i: integer value to subtract
8643 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8644 return cmpxchg(&v->counter, old, new);
8645 }
8646
8647 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8648 +{
8649 + return cmpxchg(&v->counter, old, new);
8650 +}
8651 +
8652 static inline int atomic_xchg(atomic_t *v, int new)
8653 {
8654 return xchg(&v->counter, new);
8655 }
8656
8657 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8658 +{
8659 + return xchg(&v->counter, new);
8660 +}
8661 +
8662 /**
8663 * atomic_add_unless - add unless the number is already a given value
8664 * @v: pointer of type atomic_t
8665 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8666 */
8667 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8668 {
8669 - int c, old;
8670 + int c, old, new;
8671 c = atomic_read(v);
8672 for (;;) {
8673 - if (unlikely(c == (u)))
8674 + if (unlikely(c == u))
8675 break;
8676 - old = atomic_cmpxchg((v), c, c + (a));
8677 +
8678 + asm volatile("addl %2,%0\n"
8679 +
8680 +#ifdef CONFIG_PAX_REFCOUNT
8681 + "jno 0f\n"
8682 + "subl %2,%0\n"
8683 + "int $4\n0:\n"
8684 + _ASM_EXTABLE(0b, 0b)
8685 +#endif
8686 +
8687 + : "=r" (new)
8688 + : "0" (c), "ir" (a));
8689 +
8690 + old = atomic_cmpxchg(v, c, new);
8691 if (likely(old == c))
8692 break;
8693 c = old;
8694 }
8695 - return c != (u);
8696 + return c != u;
8697 }
8698
8699 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8700
8701 #define atomic_inc_return(v) (atomic_add_return(1, v))
8702 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8703 +{
8704 + return atomic_add_return_unchecked(1, v);
8705 +}
8706 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8707
8708 /* These are x86-specific, used by some header files */
8709 @@ -266,9 +495,18 @@ typedef struct {
8710 u64 __aligned(8) counter;
8711 } atomic64_t;
8712
8713 +#ifdef CONFIG_PAX_REFCOUNT
8714 +typedef struct {
8715 + u64 __aligned(8) counter;
8716 +} atomic64_unchecked_t;
8717 +#else
8718 +typedef atomic64_t atomic64_unchecked_t;
8719 +#endif
8720 +
8721 #define ATOMIC64_INIT(val) { (val) }
8722
8723 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8724 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8725
8726 /**
8727 * atomic64_xchg - xchg atomic64 variable
8728 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8729 * the old value.
8730 */
8731 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8732 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8733
8734 /**
8735 * atomic64_set - set atomic64 variable
8736 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8737 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8738
8739 /**
8740 + * atomic64_unchecked_set - set atomic64 variable
8741 + * @ptr: pointer to type atomic64_unchecked_t
8742 + * @new_val: value to assign
8743 + *
8744 + * Atomically sets the value of @ptr to @new_val.
8745 + */
8746 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8747 +
8748 +/**
8749 * atomic64_read - read atomic64 variable
8750 * @ptr: pointer to type atomic64_t
8751 *
8752 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8753 return res;
8754 }
8755
8756 -extern u64 atomic64_read(atomic64_t *ptr);
8757 +/**
8758 + * atomic64_read_unchecked - read atomic64 variable
8759 + * @ptr: pointer to type atomic64_unchecked_t
8760 + *
8761 + * Atomically reads the value of @ptr and returns it.
8762 + */
8763 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8764 +{
8765 + u64 res;
8766 +
8767 + /*
8768 + * Note, we inline this atomic64_unchecked_t primitive because
8769 + * it only clobbers EAX/EDX and leaves the others
8770 + * untouched. We also (somewhat subtly) rely on the
8771 + * fact that cmpxchg8b returns the current 64-bit value
8772 + * of the memory location we are touching:
8773 + */
8774 + asm volatile(
8775 + "mov %%ebx, %%eax\n\t"
8776 + "mov %%ecx, %%edx\n\t"
8777 + LOCK_PREFIX "cmpxchg8b %1\n"
8778 + : "=&A" (res)
8779 + : "m" (*ptr)
8780 + );
8781 +
8782 + return res;
8783 +}
8784
8785 /**
8786 * atomic64_add_return - add and return
8787 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
8788 * Other variants with different arithmetic operators:
8789 */
8790 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
8791 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8792 extern u64 atomic64_inc_return(atomic64_t *ptr);
8793 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
8794 extern u64 atomic64_dec_return(atomic64_t *ptr);
8795 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
8796
8797 /**
8798 * atomic64_add - add integer to atomic64 variable
8799 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
8800 extern void atomic64_add(u64 delta, atomic64_t *ptr);
8801
8802 /**
8803 + * atomic64_add_unchecked - add integer to atomic64 variable
8804 + * @delta: integer value to add
8805 + * @ptr: pointer to type atomic64_unchecked_t
8806 + *
8807 + * Atomically adds @delta to @ptr.
8808 + */
8809 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8810 +
8811 +/**
8812 * atomic64_sub - subtract the atomic64 variable
8813 * @delta: integer value to subtract
8814 * @ptr: pointer to type atomic64_t
8815 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
8816 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
8817
8818 /**
8819 + * atomic64_sub_unchecked - subtract the atomic64 variable
8820 + * @delta: integer value to subtract
8821 + * @ptr: pointer to type atomic64_unchecked_t
8822 + *
8823 + * Atomically subtracts @delta from @ptr.
8824 + */
8825 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8826 +
8827 +/**
8828 * atomic64_sub_and_test - subtract value from variable and test result
8829 * @delta: integer value to subtract
8830 * @ptr: pointer to type atomic64_t
8831 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
8832 extern void atomic64_inc(atomic64_t *ptr);
8833
8834 /**
8835 + * atomic64_inc_unchecked - increment atomic64 variable
8836 + * @ptr: pointer to type atomic64_unchecked_t
8837 + *
8838 + * Atomically increments @ptr by 1.
8839 + */
8840 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
8841 +
8842 +/**
8843 * atomic64_dec - decrement atomic64 variable
8844 * @ptr: pointer to type atomic64_t
8845 *
8846 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
8847 extern void atomic64_dec(atomic64_t *ptr);
8848
8849 /**
8850 + * atomic64_dec_unchecked - decrement atomic64 variable
8851 + * @ptr: pointer to type atomic64_unchecked_t
8852 + *
8853 + * Atomically decrements @ptr by 1.
8854 + */
8855 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
8856 +
8857 +/**
8858 * atomic64_dec_and_test - decrement and test
8859 * @ptr: pointer to type atomic64_t
8860 *
8861 diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
8862 index d605dc2..fafd7bd 100644
8863 --- a/arch/x86/include/asm/atomic_64.h
8864 +++ b/arch/x86/include/asm/atomic_64.h
8865 @@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
8866 }
8867
8868 /**
8869 + * atomic_read_unchecked - read atomic variable
8870 + * @v: pointer of type atomic_unchecked_t
8871 + *
8872 + * Atomically reads the value of @v.
8873 + */
8874 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8875 +{
8876 + return v->counter;
8877 +}
8878 +
8879 +/**
8880 * atomic_set - set atomic variable
8881 * @v: pointer of type atomic_t
8882 * @i: required value
8883 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
8884 }
8885
8886 /**
8887 + * atomic_set_unchecked - set atomic variable
8888 + * @v: pointer of type atomic_unchecked_t
8889 + * @i: required value
8890 + *
8891 + * Atomically sets the value of @v to @i.
8892 + */
8893 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8894 +{
8895 + v->counter = i;
8896 +}
8897 +
8898 +/**
8899 * atomic_add - add integer to atomic variable
8900 * @i: integer value to add
8901 * @v: pointer of type atomic_t
8902 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
8903 */
8904 static inline void atomic_add(int i, atomic_t *v)
8905 {
8906 - asm volatile(LOCK_PREFIX "addl %1,%0"
8907 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8908 +
8909 +#ifdef CONFIG_PAX_REFCOUNT
8910 + "jno 0f\n"
8911 + LOCK_PREFIX "subl %1,%0\n"
8912 + "int $4\n0:\n"
8913 + _ASM_EXTABLE(0b, 0b)
8914 +#endif
8915 +
8916 + : "=m" (v->counter)
8917 + : "ir" (i), "m" (v->counter));
8918 +}
8919 +
8920 +/**
8921 + * atomic_add_unchecked - add integer to atomic variable
8922 + * @i: integer value to add
8923 + * @v: pointer of type atomic_unchecked_t
8924 + *
8925 + * Atomically adds @i to @v.
8926 + */
8927 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8928 +{
8929 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8930 : "=m" (v->counter)
8931 : "ir" (i), "m" (v->counter));
8932 }
8933 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
8934 */
8935 static inline void atomic_sub(int i, atomic_t *v)
8936 {
8937 - asm volatile(LOCK_PREFIX "subl %1,%0"
8938 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8939 +
8940 +#ifdef CONFIG_PAX_REFCOUNT
8941 + "jno 0f\n"
8942 + LOCK_PREFIX "addl %1,%0\n"
8943 + "int $4\n0:\n"
8944 + _ASM_EXTABLE(0b, 0b)
8945 +#endif
8946 +
8947 + : "=m" (v->counter)
8948 + : "ir" (i), "m" (v->counter));
8949 +}
8950 +
8951 +/**
8952 + * atomic_sub_unchecked - subtract the atomic variable
8953 + * @i: integer value to subtract
8954 + * @v: pointer of type atomic_unchecked_t
8955 + *
8956 + * Atomically subtracts @i from @v.
8957 + */
8958 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8959 +{
8960 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8961 : "=m" (v->counter)
8962 : "ir" (i), "m" (v->counter));
8963 }
8964 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8965 {
8966 unsigned char c;
8967
8968 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8969 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8970 +
8971 +#ifdef CONFIG_PAX_REFCOUNT
8972 + "jno 0f\n"
8973 + LOCK_PREFIX "addl %2,%0\n"
8974 + "int $4\n0:\n"
8975 + _ASM_EXTABLE(0b, 0b)
8976 +#endif
8977 +
8978 + "sete %1\n"
8979 : "=m" (v->counter), "=qm" (c)
8980 : "ir" (i), "m" (v->counter) : "memory");
8981 return c;
8982 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8983 */
8984 static inline void atomic_inc(atomic_t *v)
8985 {
8986 - asm volatile(LOCK_PREFIX "incl %0"
8987 + asm volatile(LOCK_PREFIX "incl %0\n"
8988 +
8989 +#ifdef CONFIG_PAX_REFCOUNT
8990 + "jno 0f\n"
8991 + LOCK_PREFIX "decl %0\n"
8992 + "int $4\n0:\n"
8993 + _ASM_EXTABLE(0b, 0b)
8994 +#endif
8995 +
8996 + : "=m" (v->counter)
8997 + : "m" (v->counter));
8998 +}
8999 +
9000 +/**
9001 + * atomic_inc_unchecked - increment atomic variable
9002 + * @v: pointer of type atomic_unchecked_t
9003 + *
9004 + * Atomically increments @v by 1.
9005 + */
9006 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9007 +{
9008 + asm volatile(LOCK_PREFIX "incl %0\n"
9009 : "=m" (v->counter)
9010 : "m" (v->counter));
9011 }
9012 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9013 */
9014 static inline void atomic_dec(atomic_t *v)
9015 {
9016 - asm volatile(LOCK_PREFIX "decl %0"
9017 + asm volatile(LOCK_PREFIX "decl %0\n"
9018 +
9019 +#ifdef CONFIG_PAX_REFCOUNT
9020 + "jno 0f\n"
9021 + LOCK_PREFIX "incl %0\n"
9022 + "int $4\n0:\n"
9023 + _ASM_EXTABLE(0b, 0b)
9024 +#endif
9025 +
9026 + : "=m" (v->counter)
9027 + : "m" (v->counter));
9028 +}
9029 +
9030 +/**
9031 + * atomic_dec_unchecked - decrement atomic variable
9032 + * @v: pointer of type atomic_unchecked_t
9033 + *
9034 + * Atomically decrements @v by 1.
9035 + */
9036 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9037 +{
9038 + asm volatile(LOCK_PREFIX "decl %0\n"
9039 : "=m" (v->counter)
9040 : "m" (v->counter));
9041 }
9042 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9043 {
9044 unsigned char c;
9045
9046 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9047 + asm volatile(LOCK_PREFIX "decl %0\n"
9048 +
9049 +#ifdef CONFIG_PAX_REFCOUNT
9050 + "jno 0f\n"
9051 + LOCK_PREFIX "incl %0\n"
9052 + "int $4\n0:\n"
9053 + _ASM_EXTABLE(0b, 0b)
9054 +#endif
9055 +
9056 + "sete %1\n"
9057 : "=m" (v->counter), "=qm" (c)
9058 : "m" (v->counter) : "memory");
9059 return c != 0;
9060 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9061 {
9062 unsigned char c;
9063
9064 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9065 + asm volatile(LOCK_PREFIX "incl %0\n"
9066 +
9067 +#ifdef CONFIG_PAX_REFCOUNT
9068 + "jno 0f\n"
9069 + LOCK_PREFIX "decl %0\n"
9070 + "int $4\n0:\n"
9071 + _ASM_EXTABLE(0b, 0b)
9072 +#endif
9073 +
9074 + "sete %1\n"
9075 + : "=m" (v->counter), "=qm" (c)
9076 + : "m" (v->counter) : "memory");
9077 + return c != 0;
9078 +}
9079 +
9080 +/**
9081 + * atomic_inc_and_test_unchecked - increment and test
9082 + * @v: pointer of type atomic_unchecked_t
9083 + *
9084 + * Atomically increments @v by 1
9085 + * and returns true if the result is zero, or false for all
9086 + * other cases.
9087 + */
9088 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9089 +{
9090 + unsigned char c;
9091 +
9092 + asm volatile(LOCK_PREFIX "incl %0\n"
9093 + "sete %1\n"
9094 : "=m" (v->counter), "=qm" (c)
9095 : "m" (v->counter) : "memory");
9096 return c != 0;
9097 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9098 {
9099 unsigned char c;
9100
9101 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9102 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9103 +
9104 +#ifdef CONFIG_PAX_REFCOUNT
9105 + "jno 0f\n"
9106 + LOCK_PREFIX "subl %2,%0\n"
9107 + "int $4\n0:\n"
9108 + _ASM_EXTABLE(0b, 0b)
9109 +#endif
9110 +
9111 + "sets %1\n"
9112 : "=m" (v->counter), "=qm" (c)
9113 : "ir" (i), "m" (v->counter) : "memory");
9114 return c;
9115 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9116 static inline int atomic_add_return(int i, atomic_t *v)
9117 {
9118 int __i = i;
9119 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
9120 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9121 +
9122 +#ifdef CONFIG_PAX_REFCOUNT
9123 + "jno 0f\n"
9124 + "movl %0, %1\n"
9125 + "int $4\n0:\n"
9126 + _ASM_EXTABLE(0b, 0b)
9127 +#endif
9128 +
9129 + : "+r" (i), "+m" (v->counter)
9130 + : : "memory");
9131 + return i + __i;
9132 +}
9133 +
9134 +/**
9135 + * atomic_add_return_unchecked - add and return
9136 + * @i: integer value to add
9137 + * @v: pointer of type atomic_unchecked_t
9138 + *
9139 + * Atomically adds @i to @v and returns @i + @v
9140 + */
9141 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9142 +{
9143 + int __i = i;
9144 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9145 : "+r" (i), "+m" (v->counter)
9146 : : "memory");
9147 return i + __i;
9148 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9149 }
9150
9151 #define atomic_inc_return(v) (atomic_add_return(1, v))
9152 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9153 +{
9154 + return atomic_add_return_unchecked(1, v);
9155 +}
9156 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9157
9158 /* The 64-bit atomic type */
9159 @@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9160 }
9161
9162 /**
9163 + * atomic64_read_unchecked - read atomic64 variable
9164 + * @v: pointer of type atomic64_unchecked_t
9165 + *
9166 + * Atomically reads the value of @v.
9167 + * Doesn't imply a read memory barrier.
9168 + */
9169 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9170 +{
9171 + return v->counter;
9172 +}
9173 +
9174 +/**
9175 * atomic64_set - set atomic64 variable
9176 * @v: pointer to type atomic64_t
9177 * @i: required value
9178 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9179 }
9180
9181 /**
9182 + * atomic64_set_unchecked - set atomic64 variable
9183 + * @v: pointer to type atomic64_unchecked_t
9184 + * @i: required value
9185 + *
9186 + * Atomically sets the value of @v to @i.
9187 + */
9188 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9189 +{
9190 + v->counter = i;
9191 +}
9192 +
9193 +/**
9194 * atomic64_add - add integer to atomic64 variable
9195 * @i: integer value to add
9196 * @v: pointer to type atomic64_t
9197 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9198 */
9199 static inline void atomic64_add(long i, atomic64_t *v)
9200 {
9201 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9202 +
9203 +#ifdef CONFIG_PAX_REFCOUNT
9204 + "jno 0f\n"
9205 + LOCK_PREFIX "subq %1,%0\n"
9206 + "int $4\n0:\n"
9207 + _ASM_EXTABLE(0b, 0b)
9208 +#endif
9209 +
9210 + : "=m" (v->counter)
9211 + : "er" (i), "m" (v->counter));
9212 +}
9213 +
9214 +/**
9215 + * atomic64_add_unchecked - add integer to atomic64 variable
9216 + * @i: integer value to add
9217 + * @v: pointer to type atomic64_unchecked_t
9218 + *
9219 + * Atomically adds @i to @v.
9220 + */
9221 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9222 +{
9223 asm volatile(LOCK_PREFIX "addq %1,%0"
9224 : "=m" (v->counter)
9225 : "er" (i), "m" (v->counter));
9226 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9227 */
9228 static inline void atomic64_sub(long i, atomic64_t *v)
9229 {
9230 - asm volatile(LOCK_PREFIX "subq %1,%0"
9231 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9232 +
9233 +#ifdef CONFIG_PAX_REFCOUNT
9234 + "jno 0f\n"
9235 + LOCK_PREFIX "addq %1,%0\n"
9236 + "int $4\n0:\n"
9237 + _ASM_EXTABLE(0b, 0b)
9238 +#endif
9239 +
9240 : "=m" (v->counter)
9241 : "er" (i), "m" (v->counter));
9242 }
9243 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9244 {
9245 unsigned char c;
9246
9247 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9248 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9249 +
9250 +#ifdef CONFIG_PAX_REFCOUNT
9251 + "jno 0f\n"
9252 + LOCK_PREFIX "addq %2,%0\n"
9253 + "int $4\n0:\n"
9254 + _ASM_EXTABLE(0b, 0b)
9255 +#endif
9256 +
9257 + "sete %1\n"
9258 : "=m" (v->counter), "=qm" (c)
9259 : "er" (i), "m" (v->counter) : "memory");
9260 return c;
9261 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9262 */
9263 static inline void atomic64_inc(atomic64_t *v)
9264 {
9265 + asm volatile(LOCK_PREFIX "incq %0\n"
9266 +
9267 +#ifdef CONFIG_PAX_REFCOUNT
9268 + "jno 0f\n"
9269 + LOCK_PREFIX "decq %0\n"
9270 + "int $4\n0:\n"
9271 + _ASM_EXTABLE(0b, 0b)
9272 +#endif
9273 +
9274 + : "=m" (v->counter)
9275 + : "m" (v->counter));
9276 +}
9277 +
9278 +/**
9279 + * atomic64_inc_unchecked - increment atomic64 variable
9280 + * @v: pointer to type atomic64_unchecked_t
9281 + *
9282 + * Atomically increments @v by 1.
9283 + */
9284 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9285 +{
9286 asm volatile(LOCK_PREFIX "incq %0"
9287 : "=m" (v->counter)
9288 : "m" (v->counter));
9289 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9290 */
9291 static inline void atomic64_dec(atomic64_t *v)
9292 {
9293 - asm volatile(LOCK_PREFIX "decq %0"
9294 + asm volatile(LOCK_PREFIX "decq %0\n"
9295 +
9296 +#ifdef CONFIG_PAX_REFCOUNT
9297 + "jno 0f\n"
9298 + LOCK_PREFIX "incq %0\n"
9299 + "int $4\n0:\n"
9300 + _ASM_EXTABLE(0b, 0b)
9301 +#endif
9302 +
9303 + : "=m" (v->counter)
9304 + : "m" (v->counter));
9305 +}
9306 +
9307 +/**
9308 + * atomic64_dec_unchecked - decrement atomic64 variable
9309 + * @v: pointer to type atomic64_t
9310 + *
9311 + * Atomically decrements @v by 1.
9312 + */
9313 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9314 +{
9315 + asm volatile(LOCK_PREFIX "decq %0\n"
9316 : "=m" (v->counter)
9317 : "m" (v->counter));
9318 }
9319 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9320 {
9321 unsigned char c;
9322
9323 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9324 + asm volatile(LOCK_PREFIX "decq %0\n"
9325 +
9326 +#ifdef CONFIG_PAX_REFCOUNT
9327 + "jno 0f\n"
9328 + LOCK_PREFIX "incq %0\n"
9329 + "int $4\n0:\n"
9330 + _ASM_EXTABLE(0b, 0b)
9331 +#endif
9332 +
9333 + "sete %1\n"
9334 : "=m" (v->counter), "=qm" (c)
9335 : "m" (v->counter) : "memory");
9336 return c != 0;
9337 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9338 {
9339 unsigned char c;
9340
9341 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9342 + asm volatile(LOCK_PREFIX "incq %0\n"
9343 +
9344 +#ifdef CONFIG_PAX_REFCOUNT
9345 + "jno 0f\n"
9346 + LOCK_PREFIX "decq %0\n"
9347 + "int $4\n0:\n"
9348 + _ASM_EXTABLE(0b, 0b)
9349 +#endif
9350 +
9351 + "sete %1\n"
9352 : "=m" (v->counter), "=qm" (c)
9353 : "m" (v->counter) : "memory");
9354 return c != 0;
9355 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9356 {
9357 unsigned char c;
9358
9359 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9360 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9361 +
9362 +#ifdef CONFIG_PAX_REFCOUNT
9363 + "jno 0f\n"
9364 + LOCK_PREFIX "subq %2,%0\n"
9365 + "int $4\n0:\n"
9366 + _ASM_EXTABLE(0b, 0b)
9367 +#endif
9368 +
9369 + "sets %1\n"
9370 : "=m" (v->counter), "=qm" (c)
9371 : "er" (i), "m" (v->counter) : "memory");
9372 return c;
9373 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9374 static inline long atomic64_add_return(long i, atomic64_t *v)
9375 {
9376 long __i = i;
9377 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9378 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9379 +
9380 +#ifdef CONFIG_PAX_REFCOUNT
9381 + "jno 0f\n"
9382 + "movq %0, %1\n"
9383 + "int $4\n0:\n"
9384 + _ASM_EXTABLE(0b, 0b)
9385 +#endif
9386 +
9387 + : "+r" (i), "+m" (v->counter)
9388 + : : "memory");
9389 + return i + __i;
9390 +}
9391 +
9392 +/**
9393 + * atomic64_add_return_unchecked - add and return
9394 + * @i: integer value to add
9395 + * @v: pointer to type atomic64_unchecked_t
9396 + *
9397 + * Atomically adds @i to @v and returns @i + @v
9398 + */
9399 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9400 +{
9401 + long __i = i;
9402 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
9403 : "+r" (i), "+m" (v->counter)
9404 : : "memory");
9405 return i + __i;
9406 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9407 }
9408
9409 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9410 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9411 +{
9412 + return atomic64_add_return_unchecked(1, v);
9413 +}
9414 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9415
9416 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9417 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9418 return cmpxchg(&v->counter, old, new);
9419 }
9420
9421 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9422 +{
9423 + return cmpxchg(&v->counter, old, new);
9424 +}
9425 +
9426 static inline long atomic64_xchg(atomic64_t *v, long new)
9427 {
9428 return xchg(&v->counter, new);
9429 }
9430
9431 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9432 +{
9433 + return xchg(&v->counter, new);
9434 +}
9435 +
9436 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9437 {
9438 return cmpxchg(&v->counter, old, new);
9439 }
9440
9441 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9442 +{
9443 + return cmpxchg(&v->counter, old, new);
9444 +}
9445 +
9446 static inline long atomic_xchg(atomic_t *v, int new)
9447 {
9448 return xchg(&v->counter, new);
9449 }
9450
9451 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9452 +{
9453 + return xchg(&v->counter, new);
9454 +}
9455 +
9456 /**
9457 * atomic_add_unless - add unless the number is a given value
9458 * @v: pointer of type atomic_t
9459 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9460 */
9461 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9462 {
9463 - int c, old;
9464 + int c, old, new;
9465 c = atomic_read(v);
9466 for (;;) {
9467 - if (unlikely(c == (u)))
9468 + if (unlikely(c == u))
9469 break;
9470 - old = atomic_cmpxchg((v), c, c + (a));
9471 +
9472 + asm volatile("addl %2,%0\n"
9473 +
9474 +#ifdef CONFIG_PAX_REFCOUNT
9475 + "jno 0f\n"
9476 + "subl %2,%0\n"
9477 + "int $4\n0:\n"
9478 + _ASM_EXTABLE(0b, 0b)
9479 +#endif
9480 +
9481 + : "=r" (new)
9482 + : "0" (c), "ir" (a));
9483 +
9484 + old = atomic_cmpxchg(v, c, new);
9485 if (likely(old == c))
9486 break;
9487 c = old;
9488 }
9489 - return c != (u);
9490 + return c != u;
9491 }
9492
9493 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9494 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9495 */
9496 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9497 {
9498 - long c, old;
9499 + long c, old, new;
9500 c = atomic64_read(v);
9501 for (;;) {
9502 - if (unlikely(c == (u)))
9503 + if (unlikely(c == u))
9504 break;
9505 - old = atomic64_cmpxchg((v), c, c + (a));
9506 +
9507 + asm volatile("addq %2,%0\n"
9508 +
9509 +#ifdef CONFIG_PAX_REFCOUNT
9510 + "jno 0f\n"
9511 + "subq %2,%0\n"
9512 + "int $4\n0:\n"
9513 + _ASM_EXTABLE(0b, 0b)
9514 +#endif
9515 +
9516 + : "=r" (new)
9517 + : "0" (c), "er" (a));
9518 +
9519 + old = atomic64_cmpxchg(v, c, new);
9520 if (likely(old == c))
9521 break;
9522 c = old;
9523 }
9524 - return c != (u);
9525 + return c != u;
9526 }
9527
9528 /**
9529 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9530 index 02b47a6..d5c4b15 100644
9531 --- a/arch/x86/include/asm/bitops.h
9532 +++ b/arch/x86/include/asm/bitops.h
9533 @@ -38,7 +38,7 @@
9534 * a mask operation on a byte.
9535 */
9536 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9537 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9538 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9539 #define CONST_MASK(nr) (1 << ((nr) & 7))
9540
9541 /**
9542 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9543 index 7a10659..8bbf355 100644
9544 --- a/arch/x86/include/asm/boot.h
9545 +++ b/arch/x86/include/asm/boot.h
9546 @@ -11,10 +11,15 @@
9547 #include <asm/pgtable_types.h>
9548
9549 /* Physical address where kernel should be loaded. */
9550 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9551 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9552 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9553 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9554
9555 +#ifndef __ASSEMBLY__
9556 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9557 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9558 +#endif
9559 +
9560 /* Minimum kernel alignment, as a power of two */
9561 #ifdef CONFIG_X86_64
9562 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9563 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9564 index 549860d..7d45f68 100644
9565 --- a/arch/x86/include/asm/cache.h
9566 +++ b/arch/x86/include/asm/cache.h
9567 @@ -5,9 +5,10 @@
9568
9569 /* L1 cache line size */
9570 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9571 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9572 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9573
9574 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9575 +#define __read_only __attribute__((__section__(".data.read_only")))
9576
9577 #ifdef CONFIG_X86_VSMP
9578 /* vSMP Internode cacheline shift */
9579 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9580 index b54f6af..5b376a6 100644
9581 --- a/arch/x86/include/asm/cacheflush.h
9582 +++ b/arch/x86/include/asm/cacheflush.h
9583 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9584 static inline unsigned long get_page_memtype(struct page *pg)
9585 {
9586 if (!PageUncached(pg) && !PageWC(pg))
9587 - return -1;
9588 + return ~0UL;
9589 else if (!PageUncached(pg) && PageWC(pg))
9590 return _PAGE_CACHE_WC;
9591 else if (PageUncached(pg) && !PageWC(pg))
9592 @@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9593 SetPageWC(pg);
9594 break;
9595 default:
9596 - case -1:
9597 + case ~0UL:
9598 ClearPageUncached(pg);
9599 ClearPageWC(pg);
9600 break;
9601 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9602 index 0e63c9a..ab8d972 100644
9603 --- a/arch/x86/include/asm/calling.h
9604 +++ b/arch/x86/include/asm/calling.h
9605 @@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9606 * for assembly code:
9607 */
9608
9609 -#define R15 0
9610 -#define R14 8
9611 -#define R13 16
9612 -#define R12 24
9613 -#define RBP 32
9614 -#define RBX 40
9615 +#define R15 (0)
9616 +#define R14 (8)
9617 +#define R13 (16)
9618 +#define R12 (24)
9619 +#define RBP (32)
9620 +#define RBX (40)
9621
9622 /* arguments: interrupts/non tracing syscalls only save up to here: */
9623 -#define R11 48
9624 -#define R10 56
9625 -#define R9 64
9626 -#define R8 72
9627 -#define RAX 80
9628 -#define RCX 88
9629 -#define RDX 96
9630 -#define RSI 104
9631 -#define RDI 112
9632 -#define ORIG_RAX 120 /* + error_code */
9633 +#define R11 (48)
9634 +#define R10 (56)
9635 +#define R9 (64)
9636 +#define R8 (72)
9637 +#define RAX (80)
9638 +#define RCX (88)
9639 +#define RDX (96)
9640 +#define RSI (104)
9641 +#define RDI (112)
9642 +#define ORIG_RAX (120) /* + error_code */
9643 /* end of arguments */
9644
9645 /* cpu exception frame or undefined in case of fast syscall: */
9646 -#define RIP 128
9647 -#define CS 136
9648 -#define EFLAGS 144
9649 -#define RSP 152
9650 -#define SS 160
9651 +#define RIP (128)
9652 +#define CS (136)
9653 +#define EFLAGS (144)
9654 +#define RSP (152)
9655 +#define SS (160)
9656
9657 #define ARGOFFSET R11
9658 #define SWFRAME ORIG_RAX
9659 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9660 index 46fc474..b02b0f9 100644
9661 --- a/arch/x86/include/asm/checksum_32.h
9662 +++ b/arch/x86/include/asm/checksum_32.h
9663 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9664 int len, __wsum sum,
9665 int *src_err_ptr, int *dst_err_ptr);
9666
9667 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9668 + int len, __wsum sum,
9669 + int *src_err_ptr, int *dst_err_ptr);
9670 +
9671 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9672 + int len, __wsum sum,
9673 + int *src_err_ptr, int *dst_err_ptr);
9674 +
9675 /*
9676 * Note: when you get a NULL pointer exception here this means someone
9677 * passed in an incorrect kernel address to one of these functions.
9678 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9679 int *err_ptr)
9680 {
9681 might_sleep();
9682 - return csum_partial_copy_generic((__force void *)src, dst,
9683 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9684 len, sum, err_ptr, NULL);
9685 }
9686
9687 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9688 {
9689 might_sleep();
9690 if (access_ok(VERIFY_WRITE, dst, len))
9691 - return csum_partial_copy_generic(src, (__force void *)dst,
9692 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9693 len, sum, NULL, err_ptr);
9694
9695 if (len)
9696 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9697 index 617bd56..7b047a1 100644
9698 --- a/arch/x86/include/asm/desc.h
9699 +++ b/arch/x86/include/asm/desc.h
9700 @@ -4,6 +4,7 @@
9701 #include <asm/desc_defs.h>
9702 #include <asm/ldt.h>
9703 #include <asm/mmu.h>
9704 +#include <asm/pgtable.h>
9705 #include <linux/smp.h>
9706
9707 static inline void fill_ldt(struct desc_struct *desc,
9708 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9709 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9710 desc->type = (info->read_exec_only ^ 1) << 1;
9711 desc->type |= info->contents << 2;
9712 + desc->type |= info->seg_not_present ^ 1;
9713 desc->s = 1;
9714 desc->dpl = 0x3;
9715 desc->p = info->seg_not_present ^ 1;
9716 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9717 }
9718
9719 extern struct desc_ptr idt_descr;
9720 -extern gate_desc idt_table[];
9721 -
9722 -struct gdt_page {
9723 - struct desc_struct gdt[GDT_ENTRIES];
9724 -} __attribute__((aligned(PAGE_SIZE)));
9725 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9726 +extern gate_desc idt_table[256];
9727
9728 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9729 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9730 {
9731 - return per_cpu(gdt_page, cpu).gdt;
9732 + return cpu_gdt_table[cpu];
9733 }
9734
9735 #ifdef CONFIG_X86_64
9736 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9737 unsigned long base, unsigned dpl, unsigned flags,
9738 unsigned short seg)
9739 {
9740 - gate->a = (seg << 16) | (base & 0xffff);
9741 - gate->b = (base & 0xffff0000) |
9742 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9743 + gate->gate.offset_low = base;
9744 + gate->gate.seg = seg;
9745 + gate->gate.reserved = 0;
9746 + gate->gate.type = type;
9747 + gate->gate.s = 0;
9748 + gate->gate.dpl = dpl;
9749 + gate->gate.p = 1;
9750 + gate->gate.offset_high = base >> 16;
9751 }
9752
9753 #endif
9754 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9755 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9756 const gate_desc *gate)
9757 {
9758 + pax_open_kernel();
9759 memcpy(&idt[entry], gate, sizeof(*gate));
9760 + pax_close_kernel();
9761 }
9762
9763 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9764 const void *desc)
9765 {
9766 + pax_open_kernel();
9767 memcpy(&ldt[entry], desc, 8);
9768 + pax_close_kernel();
9769 }
9770
9771 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9772 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9773 size = sizeof(struct desc_struct);
9774 break;
9775 }
9776 +
9777 + pax_open_kernel();
9778 memcpy(&gdt[entry], desc, size);
9779 + pax_close_kernel();
9780 }
9781
9782 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9783 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9784
9785 static inline void native_load_tr_desc(void)
9786 {
9787 + pax_open_kernel();
9788 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9789 + pax_close_kernel();
9790 }
9791
9792 static inline void native_load_gdt(const struct desc_ptr *dtr)
9793 @@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9794 unsigned int i;
9795 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9796
9797 + pax_open_kernel();
9798 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9799 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9800 + pax_close_kernel();
9801 }
9802
9803 #define _LDT_empty(info) \
9804 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9805 desc->limit = (limit >> 16) & 0xf;
9806 }
9807
9808 -static inline void _set_gate(int gate, unsigned type, void *addr,
9809 +static inline void _set_gate(int gate, unsigned type, const void *addr,
9810 unsigned dpl, unsigned ist, unsigned seg)
9811 {
9812 gate_desc s;
9813 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9814 * Pentium F0 0F bugfix can have resulted in the mapped
9815 * IDT being write-protected.
9816 */
9817 -static inline void set_intr_gate(unsigned int n, void *addr)
9818 +static inline void set_intr_gate(unsigned int n, const void *addr)
9819 {
9820 BUG_ON((unsigned)n > 0xFF);
9821 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9822 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9823 /*
9824 * This routine sets up an interrupt gate at directory privilege level 3.
9825 */
9826 -static inline void set_system_intr_gate(unsigned int n, void *addr)
9827 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
9828 {
9829 BUG_ON((unsigned)n > 0xFF);
9830 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9831 }
9832
9833 -static inline void set_system_trap_gate(unsigned int n, void *addr)
9834 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
9835 {
9836 BUG_ON((unsigned)n > 0xFF);
9837 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9838 }
9839
9840 -static inline void set_trap_gate(unsigned int n, void *addr)
9841 +static inline void set_trap_gate(unsigned int n, const void *addr)
9842 {
9843 BUG_ON((unsigned)n > 0xFF);
9844 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9845 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9846 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9847 {
9848 BUG_ON((unsigned)n > 0xFF);
9849 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9850 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9851 }
9852
9853 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9854 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9855 {
9856 BUG_ON((unsigned)n > 0xFF);
9857 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9858 }
9859
9860 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9861 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9862 {
9863 BUG_ON((unsigned)n > 0xFF);
9864 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9865 }
9866
9867 +#ifdef CONFIG_X86_32
9868 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9869 +{
9870 + struct desc_struct d;
9871 +
9872 + if (likely(limit))
9873 + limit = (limit - 1UL) >> PAGE_SHIFT;
9874 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
9875 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9876 +}
9877 +#endif
9878 +
9879 #endif /* _ASM_X86_DESC_H */
9880 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9881 index 9d66848..6b4a691 100644
9882 --- a/arch/x86/include/asm/desc_defs.h
9883 +++ b/arch/x86/include/asm/desc_defs.h
9884 @@ -31,6 +31,12 @@ struct desc_struct {
9885 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9886 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9887 };
9888 + struct {
9889 + u16 offset_low;
9890 + u16 seg;
9891 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9892 + unsigned offset_high: 16;
9893 + } gate;
9894 };
9895 } __attribute__((packed));
9896
9897 diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
9898 index cee34e9..a7c3fa2 100644
9899 --- a/arch/x86/include/asm/device.h
9900 +++ b/arch/x86/include/asm/device.h
9901 @@ -6,7 +6,7 @@ struct dev_archdata {
9902 void *acpi_handle;
9903 #endif
9904 #ifdef CONFIG_X86_64
9905 -struct dma_map_ops *dma_ops;
9906 + const struct dma_map_ops *dma_ops;
9907 #endif
9908 #ifdef CONFIG_DMAR
9909 void *iommu; /* hook for IOMMU specific extension */
9910 diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
9911 index 6a25d5d..786b202 100644
9912 --- a/arch/x86/include/asm/dma-mapping.h
9913 +++ b/arch/x86/include/asm/dma-mapping.h
9914 @@ -25,9 +25,9 @@ extern int iommu_merge;
9915 extern struct device x86_dma_fallback_dev;
9916 extern int panic_on_overflow;
9917
9918 -extern struct dma_map_ops *dma_ops;
9919 +extern const struct dma_map_ops *dma_ops;
9920
9921 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9922 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
9923 {
9924 #ifdef CONFIG_X86_32
9925 return dma_ops;
9926 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9927 /* Make sure we keep the same behaviour */
9928 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
9929 {
9930 - struct dma_map_ops *ops = get_dma_ops(dev);
9931 + const struct dma_map_ops *ops = get_dma_ops(dev);
9932 if (ops->mapping_error)
9933 return ops->mapping_error(dev, dma_addr);
9934
9935 @@ -122,7 +122,7 @@ static inline void *
9936 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9937 gfp_t gfp)
9938 {
9939 - struct dma_map_ops *ops = get_dma_ops(dev);
9940 + const struct dma_map_ops *ops = get_dma_ops(dev);
9941 void *memory;
9942
9943 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
9944 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9945 static inline void dma_free_coherent(struct device *dev, size_t size,
9946 void *vaddr, dma_addr_t bus)
9947 {
9948 - struct dma_map_ops *ops = get_dma_ops(dev);
9949 + const struct dma_map_ops *ops = get_dma_ops(dev);
9950
9951 WARN_ON(irqs_disabled()); /* for portability */
9952
9953 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9954 index 40b4e61..40d8133 100644
9955 --- a/arch/x86/include/asm/e820.h
9956 +++ b/arch/x86/include/asm/e820.h
9957 @@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
9958 #define ISA_END_ADDRESS 0x100000
9959 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
9960
9961 -#define BIOS_BEGIN 0x000a0000
9962 +#define BIOS_BEGIN 0x000c0000
9963 #define BIOS_END 0x00100000
9964
9965 #ifdef __KERNEL__
9966 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9967 index 8ac9d9a..0a6c96e 100644
9968 --- a/arch/x86/include/asm/elf.h
9969 +++ b/arch/x86/include/asm/elf.h
9970 @@ -257,7 +257,25 @@ extern int force_personality32;
9971 the loader. We need to make sure that it is out of the way of the program
9972 that it will "exec", and that there is sufficient room for the brk. */
9973
9974 +#ifdef CONFIG_PAX_SEGMEXEC
9975 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9976 +#else
9977 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9978 +#endif
9979 +
9980 +#ifdef CONFIG_PAX_ASLR
9981 +#ifdef CONFIG_X86_32
9982 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9983 +
9984 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9985 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9986 +#else
9987 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
9988 +
9989 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9990 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9991 +#endif
9992 +#endif
9993
9994 /* This yields a mask that user programs can use to figure out what
9995 instruction set this CPU supports. This could be done in user space,
9996 @@ -310,9 +328,7 @@ do { \
9997
9998 #define ARCH_DLINFO \
9999 do { \
10000 - if (vdso_enabled) \
10001 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10002 - (unsigned long)current->mm->context.vdso); \
10003 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10004 } while (0)
10005
10006 #define AT_SYSINFO 32
10007 @@ -323,7 +339,7 @@ do { \
10008
10009 #endif /* !CONFIG_X86_32 */
10010
10011 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10012 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10013
10014 #define VDSO_ENTRY \
10015 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10016 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10017 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10018 #define compat_arch_setup_additional_pages syscall32_setup_pages
10019
10020 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10021 -#define arch_randomize_brk arch_randomize_brk
10022 -
10023 #endif /* _ASM_X86_ELF_H */
10024 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10025 index cc70c1c..d96d011 100644
10026 --- a/arch/x86/include/asm/emergency-restart.h
10027 +++ b/arch/x86/include/asm/emergency-restart.h
10028 @@ -15,6 +15,6 @@ enum reboot_type {
10029
10030 extern enum reboot_type reboot_type;
10031
10032 -extern void machine_emergency_restart(void);
10033 +extern void machine_emergency_restart(void) __noreturn;
10034
10035 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10036 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10037 index 1f11ce4..7caabd1 100644
10038 --- a/arch/x86/include/asm/futex.h
10039 +++ b/arch/x86/include/asm/futex.h
10040 @@ -12,16 +12,18 @@
10041 #include <asm/system.h>
10042
10043 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10044 + typecheck(u32 __user *, uaddr); \
10045 asm volatile("1:\t" insn "\n" \
10046 "2:\t.section .fixup,\"ax\"\n" \
10047 "3:\tmov\t%3, %1\n" \
10048 "\tjmp\t2b\n" \
10049 "\t.previous\n" \
10050 _ASM_EXTABLE(1b, 3b) \
10051 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10052 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10053 : "i" (-EFAULT), "0" (oparg), "1" (0))
10054
10055 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10056 + typecheck(u32 __user *, uaddr); \
10057 asm volatile("1:\tmovl %2, %0\n" \
10058 "\tmovl\t%0, %3\n" \
10059 "\t" insn "\n" \
10060 @@ -34,10 +36,10 @@
10061 _ASM_EXTABLE(1b, 4b) \
10062 _ASM_EXTABLE(2b, 4b) \
10063 : "=&a" (oldval), "=&r" (ret), \
10064 - "+m" (*uaddr), "=&r" (tem) \
10065 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10066 : "r" (oparg), "i" (-EFAULT), "1" (0))
10067
10068 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10069 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10070 {
10071 int op = (encoded_op >> 28) & 7;
10072 int cmp = (encoded_op >> 24) & 15;
10073 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10074
10075 switch (op) {
10076 case FUTEX_OP_SET:
10077 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10078 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10079 break;
10080 case FUTEX_OP_ADD:
10081 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10082 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10083 uaddr, oparg);
10084 break;
10085 case FUTEX_OP_OR:
10086 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10087 return ret;
10088 }
10089
10090 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10091 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10092 int newval)
10093 {
10094
10095 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10096 return -ENOSYS;
10097 #endif
10098
10099 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10100 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10101 return -EFAULT;
10102
10103 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10104 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10105 "2:\t.section .fixup, \"ax\"\n"
10106 "3:\tmov %2, %0\n"
10107 "\tjmp 2b\n"
10108 "\t.previous\n"
10109 _ASM_EXTABLE(1b, 3b)
10110 - : "=a" (oldval), "+m" (*uaddr)
10111 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10112 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10113 : "memory"
10114 );
10115 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10116 index ba180d9..3bad351 100644
10117 --- a/arch/x86/include/asm/hw_irq.h
10118 +++ b/arch/x86/include/asm/hw_irq.h
10119 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10120 extern void enable_IO_APIC(void);
10121
10122 /* Statistics */
10123 -extern atomic_t irq_err_count;
10124 -extern atomic_t irq_mis_count;
10125 +extern atomic_unchecked_t irq_err_count;
10126 +extern atomic_unchecked_t irq_mis_count;
10127
10128 /* EISA */
10129 extern void eisa_set_level_irq(unsigned int irq);
10130 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10131 index 0b20bbb..4cb1396 100644
10132 --- a/arch/x86/include/asm/i387.h
10133 +++ b/arch/x86/include/asm/i387.h
10134 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10135 {
10136 int err;
10137
10138 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10139 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10140 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10141 +#endif
10142 +
10143 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10144 "2:\n"
10145 ".section .fixup,\"ax\"\n"
10146 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10147 {
10148 int err;
10149
10150 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10151 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10152 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10153 +#endif
10154 +
10155 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10156 "2:\n"
10157 ".section .fixup,\"ax\"\n"
10158 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10159 }
10160
10161 /* We need a safe address that is cheap to find and that is already
10162 - in L1 during context switch. The best choices are unfortunately
10163 - different for UP and SMP */
10164 -#ifdef CONFIG_SMP
10165 -#define safe_address (__per_cpu_offset[0])
10166 -#else
10167 -#define safe_address (kstat_cpu(0).cpustat.user)
10168 -#endif
10169 + in L1 during context switch. */
10170 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10171
10172 /*
10173 * These must be called with preempt disabled
10174 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10175 struct thread_info *me = current_thread_info();
10176 preempt_disable();
10177 if (me->status & TS_USEDFPU)
10178 - __save_init_fpu(me->task);
10179 + __save_init_fpu(current);
10180 else
10181 clts();
10182 }
10183 diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10184 index a299900..15c5410 100644
10185 --- a/arch/x86/include/asm/io_32.h
10186 +++ b/arch/x86/include/asm/io_32.h
10187 @@ -3,6 +3,7 @@
10188
10189 #include <linux/string.h>
10190 #include <linux/compiler.h>
10191 +#include <asm/processor.h>
10192
10193 /*
10194 * This file contains the definitions for the x86 IO instructions
10195 @@ -42,6 +43,17 @@
10196
10197 #ifdef __KERNEL__
10198
10199 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10200 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10201 +{
10202 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10203 +}
10204 +
10205 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10206 +{
10207 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10208 +}
10209 +
10210 #include <asm-generic/iomap.h>
10211
10212 #include <linux/vmalloc.h>
10213 diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10214 index 2440678..c158b88 100644
10215 --- a/arch/x86/include/asm/io_64.h
10216 +++ b/arch/x86/include/asm/io_64.h
10217 @@ -140,6 +140,17 @@ __OUTS(l)
10218
10219 #include <linux/vmalloc.h>
10220
10221 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10222 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10223 +{
10224 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10225 +}
10226 +
10227 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10228 +{
10229 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10230 +}
10231 +
10232 #include <asm-generic/iomap.h>
10233
10234 void __memcpy_fromio(void *, unsigned long, unsigned);
10235 diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10236 index fd6d21b..8b13915 100644
10237 --- a/arch/x86/include/asm/iommu.h
10238 +++ b/arch/x86/include/asm/iommu.h
10239 @@ -3,7 +3,7 @@
10240
10241 extern void pci_iommu_shutdown(void);
10242 extern void no_iommu_init(void);
10243 -extern struct dma_map_ops nommu_dma_ops;
10244 +extern const struct dma_map_ops nommu_dma_ops;
10245 extern int force_iommu, no_iommu;
10246 extern int iommu_detected;
10247 extern int iommu_pass_through;
10248 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10249 index 9e2b952..557206e 100644
10250 --- a/arch/x86/include/asm/irqflags.h
10251 +++ b/arch/x86/include/asm/irqflags.h
10252 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10253 sti; \
10254 sysexit
10255
10256 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10257 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10258 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10259 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10260 +
10261 #else
10262 #define INTERRUPT_RETURN iret
10263 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10264 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10265 index 4fe681d..bb6d40c 100644
10266 --- a/arch/x86/include/asm/kprobes.h
10267 +++ b/arch/x86/include/asm/kprobes.h
10268 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10269 #define BREAKPOINT_INSTRUCTION 0xcc
10270 #define RELATIVEJUMP_INSTRUCTION 0xe9
10271 #define MAX_INSN_SIZE 16
10272 -#define MAX_STACK_SIZE 64
10273 -#define MIN_STACK_SIZE(ADDR) \
10274 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10275 - THREAD_SIZE - (unsigned long)(ADDR))) \
10276 - ? (MAX_STACK_SIZE) \
10277 - : (((unsigned long)current_thread_info()) + \
10278 - THREAD_SIZE - (unsigned long)(ADDR)))
10279 +#define MAX_STACK_SIZE 64UL
10280 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10281
10282 #define flush_insn_slot(p) do { } while (0)
10283
10284 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10285 index 08bc2ff..2e88d1f 100644
10286 --- a/arch/x86/include/asm/kvm_host.h
10287 +++ b/arch/x86/include/asm/kvm_host.h
10288 @@ -534,9 +534,9 @@ struct kvm_x86_ops {
10289 bool (*gb_page_enable)(void);
10290
10291 const struct trace_print_flags *exit_reasons_str;
10292 -};
10293 +} __do_const;
10294
10295 -extern struct kvm_x86_ops *kvm_x86_ops;
10296 +extern const struct kvm_x86_ops *kvm_x86_ops;
10297
10298 int kvm_mmu_module_init(void);
10299 void kvm_mmu_module_exit(void);
10300 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10301 index 47b9b6f..815aaa1 100644
10302 --- a/arch/x86/include/asm/local.h
10303 +++ b/arch/x86/include/asm/local.h
10304 @@ -18,26 +18,58 @@ typedef struct {
10305
10306 static inline void local_inc(local_t *l)
10307 {
10308 - asm volatile(_ASM_INC "%0"
10309 + asm volatile(_ASM_INC "%0\n"
10310 +
10311 +#ifdef CONFIG_PAX_REFCOUNT
10312 + "jno 0f\n"
10313 + _ASM_DEC "%0\n"
10314 + "int $4\n0:\n"
10315 + _ASM_EXTABLE(0b, 0b)
10316 +#endif
10317 +
10318 : "+m" (l->a.counter));
10319 }
10320
10321 static inline void local_dec(local_t *l)
10322 {
10323 - asm volatile(_ASM_DEC "%0"
10324 + asm volatile(_ASM_DEC "%0\n"
10325 +
10326 +#ifdef CONFIG_PAX_REFCOUNT
10327 + "jno 0f\n"
10328 + _ASM_INC "%0\n"
10329 + "int $4\n0:\n"
10330 + _ASM_EXTABLE(0b, 0b)
10331 +#endif
10332 +
10333 : "+m" (l->a.counter));
10334 }
10335
10336 static inline void local_add(long i, local_t *l)
10337 {
10338 - asm volatile(_ASM_ADD "%1,%0"
10339 + asm volatile(_ASM_ADD "%1,%0\n"
10340 +
10341 +#ifdef CONFIG_PAX_REFCOUNT
10342 + "jno 0f\n"
10343 + _ASM_SUB "%1,%0\n"
10344 + "int $4\n0:\n"
10345 + _ASM_EXTABLE(0b, 0b)
10346 +#endif
10347 +
10348 : "+m" (l->a.counter)
10349 : "ir" (i));
10350 }
10351
10352 static inline void local_sub(long i, local_t *l)
10353 {
10354 - asm volatile(_ASM_SUB "%1,%0"
10355 + asm volatile(_ASM_SUB "%1,%0\n"
10356 +
10357 +#ifdef CONFIG_PAX_REFCOUNT
10358 + "jno 0f\n"
10359 + _ASM_ADD "%1,%0\n"
10360 + "int $4\n0:\n"
10361 + _ASM_EXTABLE(0b, 0b)
10362 +#endif
10363 +
10364 : "+m" (l->a.counter)
10365 : "ir" (i));
10366 }
10367 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10368 {
10369 unsigned char c;
10370
10371 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10372 + asm volatile(_ASM_SUB "%2,%0\n"
10373 +
10374 +#ifdef CONFIG_PAX_REFCOUNT
10375 + "jno 0f\n"
10376 + _ASM_ADD "%2,%0\n"
10377 + "int $4\n0:\n"
10378 + _ASM_EXTABLE(0b, 0b)
10379 +#endif
10380 +
10381 + "sete %1\n"
10382 : "+m" (l->a.counter), "=qm" (c)
10383 : "ir" (i) : "memory");
10384 return c;
10385 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10386 {
10387 unsigned char c;
10388
10389 - asm volatile(_ASM_DEC "%0; sete %1"
10390 + asm volatile(_ASM_DEC "%0\n"
10391 +
10392 +#ifdef CONFIG_PAX_REFCOUNT
10393 + "jno 0f\n"
10394 + _ASM_INC "%0\n"
10395 + "int $4\n0:\n"
10396 + _ASM_EXTABLE(0b, 0b)
10397 +#endif
10398 +
10399 + "sete %1\n"
10400 : "+m" (l->a.counter), "=qm" (c)
10401 : : "memory");
10402 return c != 0;
10403 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10404 {
10405 unsigned char c;
10406
10407 - asm volatile(_ASM_INC "%0; sete %1"
10408 + asm volatile(_ASM_INC "%0\n"
10409 +
10410 +#ifdef CONFIG_PAX_REFCOUNT
10411 + "jno 0f\n"
10412 + _ASM_DEC "%0\n"
10413 + "int $4\n0:\n"
10414 + _ASM_EXTABLE(0b, 0b)
10415 +#endif
10416 +
10417 + "sete %1\n"
10418 : "+m" (l->a.counter), "=qm" (c)
10419 : : "memory");
10420 return c != 0;
10421 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10422 {
10423 unsigned char c;
10424
10425 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10426 + asm volatile(_ASM_ADD "%2,%0\n"
10427 +
10428 +#ifdef CONFIG_PAX_REFCOUNT
10429 + "jno 0f\n"
10430 + _ASM_SUB "%2,%0\n"
10431 + "int $4\n0:\n"
10432 + _ASM_EXTABLE(0b, 0b)
10433 +#endif
10434 +
10435 + "sets %1\n"
10436 : "+m" (l->a.counter), "=qm" (c)
10437 : "ir" (i) : "memory");
10438 return c;
10439 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10440 #endif
10441 /* Modern 486+ processor */
10442 __i = i;
10443 - asm volatile(_ASM_XADD "%0, %1;"
10444 + asm volatile(_ASM_XADD "%0, %1\n"
10445 +
10446 +#ifdef CONFIG_PAX_REFCOUNT
10447 + "jno 0f\n"
10448 + _ASM_MOV "%0,%1\n"
10449 + "int $4\n0:\n"
10450 + _ASM_EXTABLE(0b, 0b)
10451 +#endif
10452 +
10453 : "+r" (i), "+m" (l->a.counter)
10454 : : "memory");
10455 return i + __i;
10456 diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10457 index ef51b50..514ba37 100644
10458 --- a/arch/x86/include/asm/microcode.h
10459 +++ b/arch/x86/include/asm/microcode.h
10460 @@ -12,13 +12,13 @@ struct device;
10461 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10462
10463 struct microcode_ops {
10464 - enum ucode_state (*request_microcode_user) (int cpu,
10465 + enum ucode_state (* const request_microcode_user) (int cpu,
10466 const void __user *buf, size_t size);
10467
10468 - enum ucode_state (*request_microcode_fw) (int cpu,
10469 + enum ucode_state (* const request_microcode_fw) (int cpu,
10470 struct device *device);
10471
10472 - void (*microcode_fini_cpu) (int cpu);
10473 + void (* const microcode_fini_cpu) (int cpu);
10474
10475 /*
10476 * The generic 'microcode_core' part guarantees that
10477 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
10478 extern struct ucode_cpu_info ucode_cpu_info[];
10479
10480 #ifdef CONFIG_MICROCODE_INTEL
10481 -extern struct microcode_ops * __init init_intel_microcode(void);
10482 +extern const struct microcode_ops * __init init_intel_microcode(void);
10483 #else
10484 -static inline struct microcode_ops * __init init_intel_microcode(void)
10485 +static inline const struct microcode_ops * __init init_intel_microcode(void)
10486 {
10487 return NULL;
10488 }
10489 #endif /* CONFIG_MICROCODE_INTEL */
10490
10491 #ifdef CONFIG_MICROCODE_AMD
10492 -extern struct microcode_ops * __init init_amd_microcode(void);
10493 +extern const struct microcode_ops * __init init_amd_microcode(void);
10494 #else
10495 -static inline struct microcode_ops * __init init_amd_microcode(void)
10496 +static inline const struct microcode_ops * __init init_amd_microcode(void)
10497 {
10498 return NULL;
10499 }
10500 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10501 index 593e51d..fa69c9a 100644
10502 --- a/arch/x86/include/asm/mman.h
10503 +++ b/arch/x86/include/asm/mman.h
10504 @@ -5,4 +5,14 @@
10505
10506 #include <asm-generic/mman.h>
10507
10508 +#ifdef __KERNEL__
10509 +#ifndef __ASSEMBLY__
10510 +#ifdef CONFIG_X86_32
10511 +#define arch_mmap_check i386_mmap_check
10512 +int i386_mmap_check(unsigned long addr, unsigned long len,
10513 + unsigned long flags);
10514 +#endif
10515 +#endif
10516 +#endif
10517 +
10518 #endif /* _ASM_X86_MMAN_H */
10519 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10520 index 80a1dee..239c67d 100644
10521 --- a/arch/x86/include/asm/mmu.h
10522 +++ b/arch/x86/include/asm/mmu.h
10523 @@ -9,10 +9,23 @@
10524 * we put the segment information here.
10525 */
10526 typedef struct {
10527 - void *ldt;
10528 + struct desc_struct *ldt;
10529 int size;
10530 struct mutex lock;
10531 - void *vdso;
10532 + unsigned long vdso;
10533 +
10534 +#ifdef CONFIG_X86_32
10535 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10536 + unsigned long user_cs_base;
10537 + unsigned long user_cs_limit;
10538 +
10539 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10540 + cpumask_t cpu_user_cs_mask;
10541 +#endif
10542 +
10543 +#endif
10544 +#endif
10545 +
10546 } mm_context_t;
10547
10548 #ifdef CONFIG_SMP
10549 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10550 index 8b5393e..8143173 100644
10551 --- a/arch/x86/include/asm/mmu_context.h
10552 +++ b/arch/x86/include/asm/mmu_context.h
10553 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10554
10555 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10556 {
10557 +
10558 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10559 + unsigned int i;
10560 + pgd_t *pgd;
10561 +
10562 + pax_open_kernel();
10563 + pgd = get_cpu_pgd(smp_processor_id());
10564 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10565 + set_pgd_batched(pgd+i, native_make_pgd(0));
10566 + pax_close_kernel();
10567 +#endif
10568 +
10569 #ifdef CONFIG_SMP
10570 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10571 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10572 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10573 struct task_struct *tsk)
10574 {
10575 unsigned cpu = smp_processor_id();
10576 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10577 + int tlbstate = TLBSTATE_OK;
10578 +#endif
10579
10580 if (likely(prev != next)) {
10581 #ifdef CONFIG_SMP
10582 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10583 + tlbstate = percpu_read(cpu_tlbstate.state);
10584 +#endif
10585 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10586 percpu_write(cpu_tlbstate.active_mm, next);
10587 #endif
10588 cpumask_set_cpu(cpu, mm_cpumask(next));
10589
10590 /* Re-load page tables */
10591 +#ifdef CONFIG_PAX_PER_CPU_PGD
10592 + pax_open_kernel();
10593 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10594 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10595 + pax_close_kernel();
10596 + load_cr3(get_cpu_pgd(cpu));
10597 +#else
10598 load_cr3(next->pgd);
10599 +#endif
10600
10601 /* stop flush ipis for the previous mm */
10602 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10603 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10604 */
10605 if (unlikely(prev->context.ldt != next->context.ldt))
10606 load_LDT_nolock(&next->context);
10607 - }
10608 +
10609 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10610 + if (!nx_enabled) {
10611 + smp_mb__before_clear_bit();
10612 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10613 + smp_mb__after_clear_bit();
10614 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10615 + }
10616 +#endif
10617 +
10618 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10619 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10620 + prev->context.user_cs_limit != next->context.user_cs_limit))
10621 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10622 #ifdef CONFIG_SMP
10623 + else if (unlikely(tlbstate != TLBSTATE_OK))
10624 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10625 +#endif
10626 +#endif
10627 +
10628 + }
10629 else {
10630 +
10631 +#ifdef CONFIG_PAX_PER_CPU_PGD
10632 + pax_open_kernel();
10633 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10634 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10635 + pax_close_kernel();
10636 + load_cr3(get_cpu_pgd(cpu));
10637 +#endif
10638 +
10639 +#ifdef CONFIG_SMP
10640 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10641 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10642
10643 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10644 * tlb flush IPI delivery. We must reload CR3
10645 * to make sure to use no freed page tables.
10646 */
10647 +
10648 +#ifndef CONFIG_PAX_PER_CPU_PGD
10649 load_cr3(next->pgd);
10650 +#endif
10651 +
10652 load_LDT_nolock(&next->context);
10653 +
10654 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10655 + if (!nx_enabled)
10656 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10657 +#endif
10658 +
10659 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10660 +#ifdef CONFIG_PAX_PAGEEXEC
10661 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10662 +#endif
10663 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10664 +#endif
10665 +
10666 }
10667 +#endif
10668 }
10669 -#endif
10670 }
10671
10672 #define activate_mm(prev, next) \
10673 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10674 index 3e2ce58..caaf478 100644
10675 --- a/arch/x86/include/asm/module.h
10676 +++ b/arch/x86/include/asm/module.h
10677 @@ -5,6 +5,7 @@
10678
10679 #ifdef CONFIG_X86_64
10680 /* X86_64 does not define MODULE_PROC_FAMILY */
10681 +#define MODULE_PROC_FAMILY ""
10682 #elif defined CONFIG_M386
10683 #define MODULE_PROC_FAMILY "386 "
10684 #elif defined CONFIG_M486
10685 @@ -59,13 +60,26 @@
10686 #error unknown processor family
10687 #endif
10688
10689 -#ifdef CONFIG_X86_32
10690 -# ifdef CONFIG_4KSTACKS
10691 -# define MODULE_STACKSIZE "4KSTACKS "
10692 -# else
10693 -# define MODULE_STACKSIZE ""
10694 -# endif
10695 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10696 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10697 +#define MODULE_STACKSIZE "4KSTACKS "
10698 +#else
10699 +#define MODULE_STACKSIZE ""
10700 #endif
10701
10702 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10703 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10704 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10705 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10706 +#else
10707 +#define MODULE_PAX_KERNEXEC ""
10708 +#endif
10709 +
10710 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10711 +#define MODULE_PAX_UDEREF "UDEREF "
10712 +#else
10713 +#define MODULE_PAX_UDEREF ""
10714 +#endif
10715 +
10716 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10717 +
10718 #endif /* _ASM_X86_MODULE_H */
10719 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10720 index 7639dbf..e08a58c 100644
10721 --- a/arch/x86/include/asm/page_64_types.h
10722 +++ b/arch/x86/include/asm/page_64_types.h
10723 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10724
10725 /* duplicated to the one in bootmem.h */
10726 extern unsigned long max_pfn;
10727 -extern unsigned long phys_base;
10728 +extern const unsigned long phys_base;
10729
10730 extern unsigned long __phys_addr(unsigned long);
10731 #define __phys_reloc_hide(x) (x)
10732 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10733 index efb3899..ef30687 100644
10734 --- a/arch/x86/include/asm/paravirt.h
10735 +++ b/arch/x86/include/asm/paravirt.h
10736 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10737 val);
10738 }
10739
10740 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10741 +{
10742 + pgdval_t val = native_pgd_val(pgd);
10743 +
10744 + if (sizeof(pgdval_t) > sizeof(long))
10745 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10746 + val, (u64)val >> 32);
10747 + else
10748 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10749 + val);
10750 +}
10751 +
10752 static inline void pgd_clear(pgd_t *pgdp)
10753 {
10754 set_pgd(pgdp, __pgd(0));
10755 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10756 pv_mmu_ops.set_fixmap(idx, phys, flags);
10757 }
10758
10759 +#ifdef CONFIG_PAX_KERNEXEC
10760 +static inline unsigned long pax_open_kernel(void)
10761 +{
10762 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10763 +}
10764 +
10765 +static inline unsigned long pax_close_kernel(void)
10766 +{
10767 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10768 +}
10769 +#else
10770 +static inline unsigned long pax_open_kernel(void) { return 0; }
10771 +static inline unsigned long pax_close_kernel(void) { return 0; }
10772 +#endif
10773 +
10774 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10775
10776 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10777 @@ -945,7 +972,7 @@ extern void default_banner(void);
10778
10779 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10780 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10781 -#define PARA_INDIRECT(addr) *%cs:addr
10782 +#define PARA_INDIRECT(addr) *%ss:addr
10783 #endif
10784
10785 #define INTERRUPT_RETURN \
10786 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
10787 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10788 CLBR_NONE, \
10789 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10790 +
10791 +#define GET_CR0_INTO_RDI \
10792 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10793 + mov %rax,%rdi
10794 +
10795 +#define SET_RDI_INTO_CR0 \
10796 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10797 +
10798 +#define GET_CR3_INTO_RDI \
10799 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10800 + mov %rax,%rdi
10801 +
10802 +#define SET_RDI_INTO_CR3 \
10803 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10804 +
10805 #endif /* CONFIG_X86_32 */
10806
10807 #endif /* __ASSEMBLY__ */
10808 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10809 index 9357473..aeb2de5 100644
10810 --- a/arch/x86/include/asm/paravirt_types.h
10811 +++ b/arch/x86/include/asm/paravirt_types.h
10812 @@ -78,19 +78,19 @@ struct pv_init_ops {
10813 */
10814 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10815 unsigned long addr, unsigned len);
10816 -};
10817 +} __no_const;
10818
10819
10820 struct pv_lazy_ops {
10821 /* Set deferred update mode, used for batching operations. */
10822 void (*enter)(void);
10823 void (*leave)(void);
10824 -};
10825 +} __no_const;
10826
10827 struct pv_time_ops {
10828 unsigned long long (*sched_clock)(void);
10829 unsigned long (*get_tsc_khz)(void);
10830 -};
10831 +} __no_const;
10832
10833 struct pv_cpu_ops {
10834 /* hooks for various privileged instructions */
10835 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
10836
10837 void (*start_context_switch)(struct task_struct *prev);
10838 void (*end_context_switch)(struct task_struct *next);
10839 -};
10840 +} __no_const;
10841
10842 struct pv_irq_ops {
10843 /*
10844 @@ -217,7 +217,7 @@ struct pv_apic_ops {
10845 unsigned long start_eip,
10846 unsigned long start_esp);
10847 #endif
10848 -};
10849 +} __no_const;
10850
10851 struct pv_mmu_ops {
10852 unsigned long (*read_cr2)(void);
10853 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
10854 struct paravirt_callee_save make_pud;
10855
10856 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10857 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10858 #endif /* PAGETABLE_LEVELS == 4 */
10859 #endif /* PAGETABLE_LEVELS >= 3 */
10860
10861 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
10862 an mfn. We can tell which is which from the index. */
10863 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10864 phys_addr_t phys, pgprot_t flags);
10865 +
10866 +#ifdef CONFIG_PAX_KERNEXEC
10867 + unsigned long (*pax_open_kernel)(void);
10868 + unsigned long (*pax_close_kernel)(void);
10869 +#endif
10870 +
10871 };
10872
10873 struct raw_spinlock;
10874 @@ -326,7 +333,7 @@ struct pv_lock_ops {
10875 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
10876 int (*spin_trylock)(struct raw_spinlock *lock);
10877 void (*spin_unlock)(struct raw_spinlock *lock);
10878 -};
10879 +} __no_const;
10880
10881 /* This contains all the paravirt structures: we get a convenient
10882 * number for each function using the offset which we use to indicate
10883 diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
10884 index b399988..3f47c38 100644
10885 --- a/arch/x86/include/asm/pci_x86.h
10886 +++ b/arch/x86/include/asm/pci_x86.h
10887 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
10888 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
10889
10890 struct pci_raw_ops {
10891 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10892 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10893 int reg, int len, u32 *val);
10894 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10895 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10896 int reg, int len, u32 val);
10897 };
10898
10899 -extern struct pci_raw_ops *raw_pci_ops;
10900 -extern struct pci_raw_ops *raw_pci_ext_ops;
10901 +extern const struct pci_raw_ops *raw_pci_ops;
10902 +extern const struct pci_raw_ops *raw_pci_ext_ops;
10903
10904 -extern struct pci_raw_ops pci_direct_conf1;
10905 +extern const struct pci_raw_ops pci_direct_conf1;
10906 extern bool port_cf9_safe;
10907
10908 /* arch_initcall level */
10909 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
10910 index b65a36d..50345a4 100644
10911 --- a/arch/x86/include/asm/percpu.h
10912 +++ b/arch/x86/include/asm/percpu.h
10913 @@ -78,6 +78,7 @@ do { \
10914 if (0) { \
10915 T__ tmp__; \
10916 tmp__ = (val); \
10917 + (void)tmp__; \
10918 } \
10919 switch (sizeof(var)) { \
10920 case 1: \
10921 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10922 index 271de94..ef944d6 100644
10923 --- a/arch/x86/include/asm/pgalloc.h
10924 +++ b/arch/x86/include/asm/pgalloc.h
10925 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10926 pmd_t *pmd, pte_t *pte)
10927 {
10928 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10929 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10930 +}
10931 +
10932 +static inline void pmd_populate_user(struct mm_struct *mm,
10933 + pmd_t *pmd, pte_t *pte)
10934 +{
10935 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10936 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10937 }
10938
10939 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10940 index 2334982..70bc412 100644
10941 --- a/arch/x86/include/asm/pgtable-2level.h
10942 +++ b/arch/x86/include/asm/pgtable-2level.h
10943 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10944
10945 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10946 {
10947 + pax_open_kernel();
10948 *pmdp = pmd;
10949 + pax_close_kernel();
10950 }
10951
10952 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10953 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10954 index 33927d2..ccde329 100644
10955 --- a/arch/x86/include/asm/pgtable-3level.h
10956 +++ b/arch/x86/include/asm/pgtable-3level.h
10957 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10958
10959 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10960 {
10961 + pax_open_kernel();
10962 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10963 + pax_close_kernel();
10964 }
10965
10966 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10967 {
10968 + pax_open_kernel();
10969 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10970 + pax_close_kernel();
10971 }
10972
10973 /*
10974 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10975 index af6fd36..867ff74 100644
10976 --- a/arch/x86/include/asm/pgtable.h
10977 +++ b/arch/x86/include/asm/pgtable.h
10978 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
10979
10980 #ifndef __PAGETABLE_PUD_FOLDED
10981 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10982 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10983 #define pgd_clear(pgd) native_pgd_clear(pgd)
10984 #endif
10985
10986 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
10987
10988 #define arch_end_context_switch(prev) do {} while(0)
10989
10990 +#define pax_open_kernel() native_pax_open_kernel()
10991 +#define pax_close_kernel() native_pax_close_kernel()
10992 #endif /* CONFIG_PARAVIRT */
10993
10994 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
10995 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10996 +
10997 +#ifdef CONFIG_PAX_KERNEXEC
10998 +static inline unsigned long native_pax_open_kernel(void)
10999 +{
11000 + unsigned long cr0;
11001 +
11002 + preempt_disable();
11003 + barrier();
11004 + cr0 = read_cr0() ^ X86_CR0_WP;
11005 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11006 + write_cr0(cr0);
11007 + return cr0 ^ X86_CR0_WP;
11008 +}
11009 +
11010 +static inline unsigned long native_pax_close_kernel(void)
11011 +{
11012 + unsigned long cr0;
11013 +
11014 + cr0 = read_cr0() ^ X86_CR0_WP;
11015 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11016 + write_cr0(cr0);
11017 + barrier();
11018 + preempt_enable_no_resched();
11019 + return cr0 ^ X86_CR0_WP;
11020 +}
11021 +#else
11022 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11023 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11024 +#endif
11025 +
11026 /*
11027 * The following only work if pte_present() is true.
11028 * Undefined behaviour if not..
11029 */
11030 +static inline int pte_user(pte_t pte)
11031 +{
11032 + return pte_val(pte) & _PAGE_USER;
11033 +}
11034 +
11035 static inline int pte_dirty(pte_t pte)
11036 {
11037 return pte_flags(pte) & _PAGE_DIRTY;
11038 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11039 return pte_clear_flags(pte, _PAGE_RW);
11040 }
11041
11042 +static inline pte_t pte_mkread(pte_t pte)
11043 +{
11044 + return __pte(pte_val(pte) | _PAGE_USER);
11045 +}
11046 +
11047 static inline pte_t pte_mkexec(pte_t pte)
11048 {
11049 - return pte_clear_flags(pte, _PAGE_NX);
11050 +#ifdef CONFIG_X86_PAE
11051 + if (__supported_pte_mask & _PAGE_NX)
11052 + return pte_clear_flags(pte, _PAGE_NX);
11053 + else
11054 +#endif
11055 + return pte_set_flags(pte, _PAGE_USER);
11056 +}
11057 +
11058 +static inline pte_t pte_exprotect(pte_t pte)
11059 +{
11060 +#ifdef CONFIG_X86_PAE
11061 + if (__supported_pte_mask & _PAGE_NX)
11062 + return pte_set_flags(pte, _PAGE_NX);
11063 + else
11064 +#endif
11065 + return pte_clear_flags(pte, _PAGE_USER);
11066 }
11067
11068 static inline pte_t pte_mkdirty(pte_t pte)
11069 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11070 #endif
11071
11072 #ifndef __ASSEMBLY__
11073 +
11074 +#ifdef CONFIG_PAX_PER_CPU_PGD
11075 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11076 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11077 +{
11078 + return cpu_pgd[cpu];
11079 +}
11080 +#endif
11081 +
11082 #include <linux/mm_types.h>
11083
11084 static inline int pte_none(pte_t pte)
11085 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11086
11087 static inline int pgd_bad(pgd_t pgd)
11088 {
11089 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11090 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11091 }
11092
11093 static inline int pgd_none(pgd_t pgd)
11094 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11095 * pgd_offset() returns a (pgd_t *)
11096 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11097 */
11098 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11099 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11100 +
11101 +#ifdef CONFIG_PAX_PER_CPU_PGD
11102 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11103 +#endif
11104 +
11105 /*
11106 * a shortcut which implies the use of the kernel's pgd, instead
11107 * of a process's
11108 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11109 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11110 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11111
11112 +#ifdef CONFIG_X86_32
11113 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11114 +#else
11115 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11116 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11117 +
11118 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11119 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11120 +#else
11121 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11122 +#endif
11123 +
11124 +#endif
11125 +
11126 #ifndef __ASSEMBLY__
11127
11128 extern int direct_gbpages;
11129 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11130 * dst and src can be on the same page, but the range must not overlap,
11131 * and must not cross a page boundary.
11132 */
11133 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11134 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11135 {
11136 - memcpy(dst, src, count * sizeof(pgd_t));
11137 + pax_open_kernel();
11138 + while (count--)
11139 + *dst++ = *src++;
11140 + pax_close_kernel();
11141 }
11142
11143 +#ifdef CONFIG_PAX_PER_CPU_PGD
11144 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11145 +#endif
11146 +
11147 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11148 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11149 +#else
11150 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11151 +#endif
11152
11153 #include <asm-generic/pgtable.h>
11154 #endif /* __ASSEMBLY__ */
11155 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11156 index 750f1bf..971e839 100644
11157 --- a/arch/x86/include/asm/pgtable_32.h
11158 +++ b/arch/x86/include/asm/pgtable_32.h
11159 @@ -26,9 +26,6 @@
11160 struct mm_struct;
11161 struct vm_area_struct;
11162
11163 -extern pgd_t swapper_pg_dir[1024];
11164 -extern pgd_t trampoline_pg_dir[1024];
11165 -
11166 static inline void pgtable_cache_init(void) { }
11167 static inline void check_pgt_cache(void) { }
11168 void paging_init(void);
11169 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11170 # include <asm/pgtable-2level.h>
11171 #endif
11172
11173 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11174 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11175 +#ifdef CONFIG_X86_PAE
11176 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11177 +#endif
11178 +
11179 #if defined(CONFIG_HIGHPTE)
11180 #define __KM_PTE \
11181 (in_nmi() ? KM_NMI_PTE : \
11182 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11183 /* Clear a kernel PTE and flush it from the TLB */
11184 #define kpte_clear_flush(ptep, vaddr) \
11185 do { \
11186 + pax_open_kernel(); \
11187 pte_clear(&init_mm, (vaddr), (ptep)); \
11188 + pax_close_kernel(); \
11189 __flush_tlb_one((vaddr)); \
11190 } while (0)
11191
11192 @@ -85,6 +90,9 @@ do { \
11193
11194 #endif /* !__ASSEMBLY__ */
11195
11196 +#define HAVE_ARCH_UNMAPPED_AREA
11197 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11198 +
11199 /*
11200 * kern_addr_valid() is (1) for FLATMEM and (0) for
11201 * SPARSEMEM and DISCONTIGMEM
11202 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11203 index 5e67c15..12d5c47 100644
11204 --- a/arch/x86/include/asm/pgtable_32_types.h
11205 +++ b/arch/x86/include/asm/pgtable_32_types.h
11206 @@ -8,7 +8,7 @@
11207 */
11208 #ifdef CONFIG_X86_PAE
11209 # include <asm/pgtable-3level_types.h>
11210 -# define PMD_SIZE (1UL << PMD_SHIFT)
11211 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11212 # define PMD_MASK (~(PMD_SIZE - 1))
11213 #else
11214 # include <asm/pgtable-2level_types.h>
11215 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11216 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11217 #endif
11218
11219 +#ifdef CONFIG_PAX_KERNEXEC
11220 +#ifndef __ASSEMBLY__
11221 +extern unsigned char MODULES_EXEC_VADDR[];
11222 +extern unsigned char MODULES_EXEC_END[];
11223 +#endif
11224 +#include <asm/boot.h>
11225 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11226 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11227 +#else
11228 +#define ktla_ktva(addr) (addr)
11229 +#define ktva_ktla(addr) (addr)
11230 +#endif
11231 +
11232 #define MODULES_VADDR VMALLOC_START
11233 #define MODULES_END VMALLOC_END
11234 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11235 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11236 index c57a301..6b414ff 100644
11237 --- a/arch/x86/include/asm/pgtable_64.h
11238 +++ b/arch/x86/include/asm/pgtable_64.h
11239 @@ -16,10 +16,14 @@
11240
11241 extern pud_t level3_kernel_pgt[512];
11242 extern pud_t level3_ident_pgt[512];
11243 +extern pud_t level3_vmalloc_start_pgt[512];
11244 +extern pud_t level3_vmalloc_end_pgt[512];
11245 +extern pud_t level3_vmemmap_pgt[512];
11246 +extern pud_t level2_vmemmap_pgt[512];
11247 extern pmd_t level2_kernel_pgt[512];
11248 extern pmd_t level2_fixmap_pgt[512];
11249 -extern pmd_t level2_ident_pgt[512];
11250 -extern pgd_t init_level4_pgt[];
11251 +extern pmd_t level2_ident_pgt[512*2];
11252 +extern pgd_t init_level4_pgt[512];
11253
11254 #define swapper_pg_dir init_level4_pgt
11255
11256 @@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11257
11258 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11259 {
11260 + pax_open_kernel();
11261 *pmdp = pmd;
11262 + pax_close_kernel();
11263 }
11264
11265 static inline void native_pmd_clear(pmd_t *pmd)
11266 @@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11267
11268 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11269 {
11270 + pax_open_kernel();
11271 + *pgdp = pgd;
11272 + pax_close_kernel();
11273 +}
11274 +
11275 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11276 +{
11277 *pgdp = pgd;
11278 }
11279
11280 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11281 index 766ea16..5b96cb3 100644
11282 --- a/arch/x86/include/asm/pgtable_64_types.h
11283 +++ b/arch/x86/include/asm/pgtable_64_types.h
11284 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11285 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11286 #define MODULES_END _AC(0xffffffffff000000, UL)
11287 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11288 +#define MODULES_EXEC_VADDR MODULES_VADDR
11289 +#define MODULES_EXEC_END MODULES_END
11290 +
11291 +#define ktla_ktva(addr) (addr)
11292 +#define ktva_ktla(addr) (addr)
11293
11294 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11295 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11296 index d1f4a76..2f46ba1 100644
11297 --- a/arch/x86/include/asm/pgtable_types.h
11298 +++ b/arch/x86/include/asm/pgtable_types.h
11299 @@ -16,12 +16,11 @@
11300 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11301 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11302 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11303 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11304 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11305 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11306 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11307 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11308 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11309 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11310 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11311 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11312
11313 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11314 @@ -39,7 +38,6 @@
11315 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11316 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11317 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11318 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11319 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11320 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11321 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11322 @@ -55,8 +53,10 @@
11323
11324 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11325 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11326 -#else
11327 +#elif defined(CONFIG_KMEMCHECK)
11328 #define _PAGE_NX (_AT(pteval_t, 0))
11329 +#else
11330 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11331 #endif
11332
11333 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11334 @@ -93,6 +93,9 @@
11335 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11336 _PAGE_ACCESSED)
11337
11338 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11339 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11340 +
11341 #define __PAGE_KERNEL_EXEC \
11342 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11343 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11344 @@ -103,8 +106,8 @@
11345 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11346 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11347 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11348 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11349 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11350 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11351 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11352 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11353 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11354 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11355 @@ -163,8 +166,8 @@
11356 * bits are combined, this will alow user to access the high address mapped
11357 * VDSO in the presence of CONFIG_COMPAT_VDSO
11358 */
11359 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11360 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11361 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11362 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11363 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11364 #endif
11365
11366 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11367 {
11368 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11369 }
11370 +#endif
11371
11372 +#if PAGETABLE_LEVELS == 3
11373 +#include <asm-generic/pgtable-nopud.h>
11374 +#endif
11375 +
11376 +#if PAGETABLE_LEVELS == 2
11377 +#include <asm-generic/pgtable-nopmd.h>
11378 +#endif
11379 +
11380 +#ifndef __ASSEMBLY__
11381 #if PAGETABLE_LEVELS > 3
11382 typedef struct { pudval_t pud; } pud_t;
11383
11384 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11385 return pud.pud;
11386 }
11387 #else
11388 -#include <asm-generic/pgtable-nopud.h>
11389 -
11390 static inline pudval_t native_pud_val(pud_t pud)
11391 {
11392 return native_pgd_val(pud.pgd);
11393 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11394 return pmd.pmd;
11395 }
11396 #else
11397 -#include <asm-generic/pgtable-nopmd.h>
11398 -
11399 static inline pmdval_t native_pmd_val(pmd_t pmd)
11400 {
11401 return native_pgd_val(pmd.pud.pgd);
11402 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11403
11404 extern pteval_t __supported_pte_mask;
11405 extern void set_nx(void);
11406 +
11407 +#ifdef CONFIG_X86_32
11408 +#ifdef CONFIG_X86_PAE
11409 extern int nx_enabled;
11410 +#else
11411 +#define nx_enabled (0)
11412 +#endif
11413 +#else
11414 +#define nx_enabled (1)
11415 +#endif
11416
11417 #define pgprot_writecombine pgprot_writecombine
11418 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11419 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11420 index fa04dea..5f823fc 100644
11421 --- a/arch/x86/include/asm/processor.h
11422 +++ b/arch/x86/include/asm/processor.h
11423 @@ -272,7 +272,7 @@ struct tss_struct {
11424
11425 } ____cacheline_aligned;
11426
11427 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11428 +extern struct tss_struct init_tss[NR_CPUS];
11429
11430 /*
11431 * Save the original ist values for checking stack pointers during debugging
11432 @@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11433 */
11434 #define TASK_SIZE PAGE_OFFSET
11435 #define TASK_SIZE_MAX TASK_SIZE
11436 +
11437 +#ifdef CONFIG_PAX_SEGMEXEC
11438 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11439 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11440 +#else
11441 #define STACK_TOP TASK_SIZE
11442 -#define STACK_TOP_MAX STACK_TOP
11443 +#endif
11444 +
11445 +#define STACK_TOP_MAX TASK_SIZE
11446
11447 #define INIT_THREAD { \
11448 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11449 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11450 .vm86_info = NULL, \
11451 .sysenter_cs = __KERNEL_CS, \
11452 .io_bitmap_ptr = NULL, \
11453 @@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11454 */
11455 #define INIT_TSS { \
11456 .x86_tss = { \
11457 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11458 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11459 .ss0 = __KERNEL_DS, \
11460 .ss1 = __KERNEL_CS, \
11461 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11462 @@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11463 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11464
11465 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11466 -#define KSTK_TOP(info) \
11467 -({ \
11468 - unsigned long *__ptr = (unsigned long *)(info); \
11469 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11470 -})
11471 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11472
11473 /*
11474 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11475 @@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11476 #define task_pt_regs(task) \
11477 ({ \
11478 struct pt_regs *__regs__; \
11479 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11480 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11481 __regs__ - 1; \
11482 })
11483
11484 @@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11485 /*
11486 * User space process size. 47bits minus one guard page.
11487 */
11488 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11489 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11490
11491 /* This decides where the kernel will search for a free chunk of vm
11492 * space during mmap's.
11493 */
11494 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11495 - 0xc0000000 : 0xFFFFe000)
11496 + 0xc0000000 : 0xFFFFf000)
11497
11498 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11499 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11500 @@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11501 #define STACK_TOP_MAX TASK_SIZE_MAX
11502
11503 #define INIT_THREAD { \
11504 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11505 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11506 }
11507
11508 #define INIT_TSS { \
11509 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11510 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11511 }
11512
11513 /*
11514 @@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11515 */
11516 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11517
11518 +#ifdef CONFIG_PAX_SEGMEXEC
11519 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11520 +#endif
11521 +
11522 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11523
11524 /* Get/set a process' ability to use the timestamp counter instruction */
11525 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11526 index 0f0d908..f2e3da2 100644
11527 --- a/arch/x86/include/asm/ptrace.h
11528 +++ b/arch/x86/include/asm/ptrace.h
11529 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11530 }
11531
11532 /*
11533 - * user_mode_vm(regs) determines whether a register set came from user mode.
11534 + * user_mode(regs) determines whether a register set came from user mode.
11535 * This is true if V8086 mode was enabled OR if the register set was from
11536 * protected mode with RPL-3 CS value. This tricky test checks that with
11537 * one comparison. Many places in the kernel can bypass this full check
11538 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11539 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11540 + * be used.
11541 */
11542 -static inline int user_mode(struct pt_regs *regs)
11543 +static inline int user_mode_novm(struct pt_regs *regs)
11544 {
11545 #ifdef CONFIG_X86_32
11546 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11547 #else
11548 - return !!(regs->cs & 3);
11549 + return !!(regs->cs & SEGMENT_RPL_MASK);
11550 #endif
11551 }
11552
11553 -static inline int user_mode_vm(struct pt_regs *regs)
11554 +static inline int user_mode(struct pt_regs *regs)
11555 {
11556 #ifdef CONFIG_X86_32
11557 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11558 USER_RPL;
11559 #else
11560 - return user_mode(regs);
11561 + return user_mode_novm(regs);
11562 #endif
11563 }
11564
11565 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11566 index 562d4fd..6e39df1 100644
11567 --- a/arch/x86/include/asm/reboot.h
11568 +++ b/arch/x86/include/asm/reboot.h
11569 @@ -6,19 +6,19 @@
11570 struct pt_regs;
11571
11572 struct machine_ops {
11573 - void (*restart)(char *cmd);
11574 - void (*halt)(void);
11575 - void (*power_off)(void);
11576 + void (* __noreturn restart)(char *cmd);
11577 + void (* __noreturn halt)(void);
11578 + void (* __noreturn power_off)(void);
11579 void (*shutdown)(void);
11580 void (*crash_shutdown)(struct pt_regs *);
11581 - void (*emergency_restart)(void);
11582 -};
11583 + void (* __noreturn emergency_restart)(void);
11584 +} __no_const;
11585
11586 extern struct machine_ops machine_ops;
11587
11588 void native_machine_crash_shutdown(struct pt_regs *regs);
11589 void native_machine_shutdown(void);
11590 -void machine_real_restart(const unsigned char *code, int length);
11591 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11592
11593 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11594 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11595 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11596 index 606ede1..dbfff37 100644
11597 --- a/arch/x86/include/asm/rwsem.h
11598 +++ b/arch/x86/include/asm/rwsem.h
11599 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11600 {
11601 asm volatile("# beginning down_read\n\t"
11602 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11603 +
11604 +#ifdef CONFIG_PAX_REFCOUNT
11605 + "jno 0f\n"
11606 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11607 + "int $4\n0:\n"
11608 + _ASM_EXTABLE(0b, 0b)
11609 +#endif
11610 +
11611 /* adds 0x00000001, returns the old value */
11612 " jns 1f\n"
11613 " call call_rwsem_down_read_failed\n"
11614 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11615 "1:\n\t"
11616 " mov %1,%2\n\t"
11617 " add %3,%2\n\t"
11618 +
11619 +#ifdef CONFIG_PAX_REFCOUNT
11620 + "jno 0f\n"
11621 + "sub %3,%2\n"
11622 + "int $4\n0:\n"
11623 + _ASM_EXTABLE(0b, 0b)
11624 +#endif
11625 +
11626 " jle 2f\n\t"
11627 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11628 " jnz 1b\n\t"
11629 @@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11630 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11631 asm volatile("# beginning down_write\n\t"
11632 LOCK_PREFIX " xadd %1,(%2)\n\t"
11633 +
11634 +#ifdef CONFIG_PAX_REFCOUNT
11635 + "jno 0f\n"
11636 + "mov %1,(%2)\n"
11637 + "int $4\n0:\n"
11638 + _ASM_EXTABLE(0b, 0b)
11639 +#endif
11640 +
11641 /* subtract 0x0000ffff, returns the old value */
11642 " test %1,%1\n\t"
11643 /* was the count 0 before? */
11644 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11645 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11646 asm volatile("# beginning __up_read\n\t"
11647 LOCK_PREFIX " xadd %1,(%2)\n\t"
11648 +
11649 +#ifdef CONFIG_PAX_REFCOUNT
11650 + "jno 0f\n"
11651 + "mov %1,(%2)\n"
11652 + "int $4\n0:\n"
11653 + _ASM_EXTABLE(0b, 0b)
11654 +#endif
11655 +
11656 /* subtracts 1, returns the old value */
11657 " jns 1f\n\t"
11658 " call call_rwsem_wake\n"
11659 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11660 rwsem_count_t tmp;
11661 asm volatile("# beginning __up_write\n\t"
11662 LOCK_PREFIX " xadd %1,(%2)\n\t"
11663 +
11664 +#ifdef CONFIG_PAX_REFCOUNT
11665 + "jno 0f\n"
11666 + "mov %1,(%2)\n"
11667 + "int $4\n0:\n"
11668 + _ASM_EXTABLE(0b, 0b)
11669 +#endif
11670 +
11671 /* tries to transition
11672 0xffff0001 -> 0x00000000 */
11673 " jz 1f\n"
11674 @@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11675 {
11676 asm volatile("# beginning __downgrade_write\n\t"
11677 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11678 +
11679 +#ifdef CONFIG_PAX_REFCOUNT
11680 + "jno 0f\n"
11681 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11682 + "int $4\n0:\n"
11683 + _ASM_EXTABLE(0b, 0b)
11684 +#endif
11685 +
11686 /*
11687 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11688 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11689 @@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11690 static inline void rwsem_atomic_add(rwsem_count_t delta,
11691 struct rw_semaphore *sem)
11692 {
11693 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11694 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11695 +
11696 +#ifdef CONFIG_PAX_REFCOUNT
11697 + "jno 0f\n"
11698 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11699 + "int $4\n0:\n"
11700 + _ASM_EXTABLE(0b, 0b)
11701 +#endif
11702 +
11703 : "+m" (sem->count)
11704 : "er" (delta));
11705 }
11706 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11707 {
11708 rwsem_count_t tmp = delta;
11709
11710 - asm volatile(LOCK_PREFIX "xadd %0,%1"
11711 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11712 +
11713 +#ifdef CONFIG_PAX_REFCOUNT
11714 + "jno 0f\n"
11715 + "mov %0,%1\n"
11716 + "int $4\n0:\n"
11717 + _ASM_EXTABLE(0b, 0b)
11718 +#endif
11719 +
11720 : "+r" (tmp), "+m" (sem->count)
11721 : : "memory");
11722
11723 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11724 index 14e0ed8..7f7dd5e 100644
11725 --- a/arch/x86/include/asm/segment.h
11726 +++ b/arch/x86/include/asm/segment.h
11727 @@ -62,10 +62,15 @@
11728 * 26 - ESPFIX small SS
11729 * 27 - per-cpu [ offset to per-cpu data area ]
11730 * 28 - stack_canary-20 [ for stack protector ]
11731 - * 29 - unused
11732 - * 30 - unused
11733 + * 29 - PCI BIOS CS
11734 + * 30 - PCI BIOS DS
11735 * 31 - TSS for double fault handler
11736 */
11737 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11738 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11739 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11740 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11741 +
11742 #define GDT_ENTRY_TLS_MIN 6
11743 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11744
11745 @@ -77,6 +82,8 @@
11746
11747 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11748
11749 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11750 +
11751 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11752
11753 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11754 @@ -88,7 +95,7 @@
11755 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11756 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11757
11758 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11759 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11760 #ifdef CONFIG_SMP
11761 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11762 #else
11763 @@ -102,6 +109,12 @@
11764 #define __KERNEL_STACK_CANARY 0
11765 #endif
11766
11767 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11768 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11769 +
11770 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11771 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11772 +
11773 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11774
11775 /*
11776 @@ -139,7 +152,7 @@
11777 */
11778
11779 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11780 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11781 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11782
11783
11784 #else
11785 @@ -163,6 +176,8 @@
11786 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
11787 #define __USER32_DS __USER_DS
11788
11789 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11790 +
11791 #define GDT_ENTRY_TSS 8 /* needs two entries */
11792 #define GDT_ENTRY_LDT 10 /* needs two entries */
11793 #define GDT_ENTRY_TLS_MIN 12
11794 @@ -183,6 +198,7 @@
11795 #endif
11796
11797 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
11798 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
11799 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
11800 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
11801 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
11802 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11803 index 4c2f63c..5685db2 100644
11804 --- a/arch/x86/include/asm/smp.h
11805 +++ b/arch/x86/include/asm/smp.h
11806 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
11807 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
11808 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11809 DECLARE_PER_CPU(u16, cpu_llc_id);
11810 -DECLARE_PER_CPU(int, cpu_number);
11811 +DECLARE_PER_CPU(unsigned int, cpu_number);
11812
11813 static inline struct cpumask *cpu_sibling_mask(int cpu)
11814 {
11815 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
11816 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
11817
11818 /* Static state in head.S used to set up a CPU */
11819 -extern struct {
11820 - void *sp;
11821 - unsigned short ss;
11822 -} stack_start;
11823 +extern unsigned long stack_start; /* Initial stack pointer address */
11824
11825 struct smp_ops {
11826 void (*smp_prepare_boot_cpu)(void);
11827 @@ -60,7 +57,7 @@ struct smp_ops {
11828
11829 void (*send_call_func_ipi)(const struct cpumask *mask);
11830 void (*send_call_func_single_ipi)(int cpu);
11831 -};
11832 +} __no_const;
11833
11834 /* Globals due to paravirt */
11835 extern void set_cpu_sibling_map(int cpu);
11836 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11837 extern int safe_smp_processor_id(void);
11838
11839 #elif defined(CONFIG_X86_64_SMP)
11840 -#define raw_smp_processor_id() (percpu_read(cpu_number))
11841 -
11842 -#define stack_smp_processor_id() \
11843 -({ \
11844 - struct thread_info *ti; \
11845 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11846 - ti->cpu; \
11847 -})
11848 +#define raw_smp_processor_id() (percpu_read(cpu_number))
11849 +#define stack_smp_processor_id() raw_smp_processor_id()
11850 #define safe_smp_processor_id() smp_processor_id()
11851
11852 #endif
11853 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11854 index 4e77853..4359783 100644
11855 --- a/arch/x86/include/asm/spinlock.h
11856 +++ b/arch/x86/include/asm/spinlock.h
11857 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
11858 static inline void __raw_read_lock(raw_rwlock_t *rw)
11859 {
11860 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
11861 +
11862 +#ifdef CONFIG_PAX_REFCOUNT
11863 + "jno 0f\n"
11864 + LOCK_PREFIX " addl $1,(%0)\n"
11865 + "int $4\n0:\n"
11866 + _ASM_EXTABLE(0b, 0b)
11867 +#endif
11868 +
11869 "jns 1f\n"
11870 "call __read_lock_failed\n\t"
11871 "1:\n"
11872 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
11873 static inline void __raw_write_lock(raw_rwlock_t *rw)
11874 {
11875 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
11876 +
11877 +#ifdef CONFIG_PAX_REFCOUNT
11878 + "jno 0f\n"
11879 + LOCK_PREFIX " addl %1,(%0)\n"
11880 + "int $4\n0:\n"
11881 + _ASM_EXTABLE(0b, 0b)
11882 +#endif
11883 +
11884 "jz 1f\n"
11885 "call __write_lock_failed\n\t"
11886 "1:\n"
11887 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
11888
11889 static inline void __raw_read_unlock(raw_rwlock_t *rw)
11890 {
11891 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
11892 + asm volatile(LOCK_PREFIX "incl %0\n"
11893 +
11894 +#ifdef CONFIG_PAX_REFCOUNT
11895 + "jno 0f\n"
11896 + LOCK_PREFIX "decl %0\n"
11897 + "int $4\n0:\n"
11898 + _ASM_EXTABLE(0b, 0b)
11899 +#endif
11900 +
11901 + :"+m" (rw->lock) : : "memory");
11902 }
11903
11904 static inline void __raw_write_unlock(raw_rwlock_t *rw)
11905 {
11906 - asm volatile(LOCK_PREFIX "addl %1, %0"
11907 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
11908 +
11909 +#ifdef CONFIG_PAX_REFCOUNT
11910 + "jno 0f\n"
11911 + LOCK_PREFIX "subl %1, %0\n"
11912 + "int $4\n0:\n"
11913 + _ASM_EXTABLE(0b, 0b)
11914 +#endif
11915 +
11916 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
11917 }
11918
11919 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11920 index 1575177..cb23f52 100644
11921 --- a/arch/x86/include/asm/stackprotector.h
11922 +++ b/arch/x86/include/asm/stackprotector.h
11923 @@ -48,7 +48,7 @@
11924 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11925 */
11926 #define GDT_STACK_CANARY_INIT \
11927 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11928 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11929
11930 /*
11931 * Initialize the stackprotector canary value.
11932 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11933
11934 static inline void load_stack_canary_segment(void)
11935 {
11936 -#ifdef CONFIG_X86_32
11937 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11938 asm volatile ("mov %0, %%gs" : : "r" (0));
11939 #endif
11940 }
11941 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11942 index e0fbf29..858ef4a 100644
11943 --- a/arch/x86/include/asm/system.h
11944 +++ b/arch/x86/include/asm/system.h
11945 @@ -132,7 +132,7 @@ do { \
11946 "thread_return:\n\t" \
11947 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11948 __switch_canary \
11949 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
11950 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11951 "movq %%rax,%%rdi\n\t" \
11952 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11953 "jnz ret_from_fork\n\t" \
11954 @@ -143,7 +143,7 @@ do { \
11955 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11956 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11957 [_tif_fork] "i" (_TIF_FORK), \
11958 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
11959 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
11960 [current_task] "m" (per_cpu_var(current_task)) \
11961 __switch_canary_iparam \
11962 : "memory", "cc" __EXTRA_CLOBBER)
11963 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11964 {
11965 unsigned long __limit;
11966 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11967 - return __limit + 1;
11968 + return __limit;
11969 }
11970
11971 static inline void native_clts(void)
11972 @@ -340,12 +340,12 @@ void enable_hlt(void);
11973
11974 void cpu_idle_wait(void);
11975
11976 -extern unsigned long arch_align_stack(unsigned long sp);
11977 +#define arch_align_stack(x) ((x) & ~0xfUL)
11978 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11979
11980 void default_idle(void);
11981
11982 -void stop_this_cpu(void *dummy);
11983 +void stop_this_cpu(void *dummy) __noreturn;
11984
11985 /*
11986 * Force strict CPU ordering.
11987 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11988 index 19c3ce4..8962535 100644
11989 --- a/arch/x86/include/asm/thread_info.h
11990 +++ b/arch/x86/include/asm/thread_info.h
11991 @@ -10,6 +10,7 @@
11992 #include <linux/compiler.h>
11993 #include <asm/page.h>
11994 #include <asm/types.h>
11995 +#include <asm/percpu.h>
11996
11997 /*
11998 * low level task data that entry.S needs immediate access to
11999 @@ -24,7 +25,6 @@ struct exec_domain;
12000 #include <asm/atomic.h>
12001
12002 struct thread_info {
12003 - struct task_struct *task; /* main task structure */
12004 struct exec_domain *exec_domain; /* execution domain */
12005 __u32 flags; /* low level flags */
12006 __u32 status; /* thread synchronous flags */
12007 @@ -34,18 +34,12 @@ struct thread_info {
12008 mm_segment_t addr_limit;
12009 struct restart_block restart_block;
12010 void __user *sysenter_return;
12011 -#ifdef CONFIG_X86_32
12012 - unsigned long previous_esp; /* ESP of the previous stack in
12013 - case of nested (IRQ) stacks
12014 - */
12015 - __u8 supervisor_stack[0];
12016 -#endif
12017 + unsigned long lowest_stack;
12018 int uaccess_err;
12019 };
12020
12021 -#define INIT_THREAD_INFO(tsk) \
12022 +#define INIT_THREAD_INFO \
12023 { \
12024 - .task = &tsk, \
12025 .exec_domain = &default_exec_domain, \
12026 .flags = 0, \
12027 .cpu = 0, \
12028 @@ -56,7 +50,7 @@ struct thread_info {
12029 }, \
12030 }
12031
12032 -#define init_thread_info (init_thread_union.thread_info)
12033 +#define init_thread_info (init_thread_union.stack)
12034 #define init_stack (init_thread_union.stack)
12035
12036 #else /* !__ASSEMBLY__ */
12037 @@ -163,45 +157,40 @@ struct thread_info {
12038 #define alloc_thread_info(tsk) \
12039 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12040
12041 -#ifdef CONFIG_X86_32
12042 -
12043 -#define STACK_WARN (THREAD_SIZE/8)
12044 -/*
12045 - * macros/functions for gaining access to the thread information structure
12046 - *
12047 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12048 - */
12049 -#ifndef __ASSEMBLY__
12050 -
12051 -
12052 -/* how to get the current stack pointer from C */
12053 -register unsigned long current_stack_pointer asm("esp") __used;
12054 -
12055 -/* how to get the thread information struct from C */
12056 -static inline struct thread_info *current_thread_info(void)
12057 -{
12058 - return (struct thread_info *)
12059 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12060 -}
12061 -
12062 -#else /* !__ASSEMBLY__ */
12063 -
12064 +#ifdef __ASSEMBLY__
12065 /* how to get the thread information struct from ASM */
12066 #define GET_THREAD_INFO(reg) \
12067 - movl $-THREAD_SIZE, reg; \
12068 - andl %esp, reg
12069 + mov PER_CPU_VAR(current_tinfo), reg
12070
12071 /* use this one if reg already contains %esp */
12072 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12073 - andl $-THREAD_SIZE, reg
12074 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12075 +#else
12076 +/* how to get the thread information struct from C */
12077 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12078 +
12079 +static __always_inline struct thread_info *current_thread_info(void)
12080 +{
12081 + return percpu_read_stable(current_tinfo);
12082 +}
12083 +#endif
12084 +
12085 +#ifdef CONFIG_X86_32
12086 +
12087 +#define STACK_WARN (THREAD_SIZE/8)
12088 +/*
12089 + * macros/functions for gaining access to the thread information structure
12090 + *
12091 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12092 + */
12093 +#ifndef __ASSEMBLY__
12094 +
12095 +/* how to get the current stack pointer from C */
12096 +register unsigned long current_stack_pointer asm("esp") __used;
12097
12098 #endif
12099
12100 #else /* X86_32 */
12101
12102 -#include <asm/percpu.h>
12103 -#define KERNEL_STACK_OFFSET (5*8)
12104 -
12105 /*
12106 * macros/functions for gaining access to the thread information structure
12107 * preempt_count needs to be 1 initially, until the scheduler is functional.
12108 @@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12109 #ifndef __ASSEMBLY__
12110 DECLARE_PER_CPU(unsigned long, kernel_stack);
12111
12112 -static inline struct thread_info *current_thread_info(void)
12113 -{
12114 - struct thread_info *ti;
12115 - ti = (void *)(percpu_read_stable(kernel_stack) +
12116 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12117 - return ti;
12118 -}
12119 -
12120 -#else /* !__ASSEMBLY__ */
12121 -
12122 -/* how to get the thread information struct from ASM */
12123 -#define GET_THREAD_INFO(reg) \
12124 - movq PER_CPU_VAR(kernel_stack),reg ; \
12125 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12126 -
12127 +/* how to get the current stack pointer from C */
12128 +register unsigned long current_stack_pointer asm("rsp") __used;
12129 #endif
12130
12131 #endif /* !X86_32 */
12132 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12133 extern void free_thread_info(struct thread_info *ti);
12134 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12135 #define arch_task_cache_init arch_task_cache_init
12136 +
12137 +#define __HAVE_THREAD_FUNCTIONS
12138 +#define task_thread_info(task) (&(task)->tinfo)
12139 +#define task_stack_page(task) ((task)->stack)
12140 +#define setup_thread_stack(p, org) do {} while (0)
12141 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12142 +
12143 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12144 +extern struct task_struct *alloc_task_struct(void);
12145 +extern void free_task_struct(struct task_struct *);
12146 +
12147 #endif
12148 #endif /* _ASM_X86_THREAD_INFO_H */
12149 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12150 index 61c5874..8a046e9 100644
12151 --- a/arch/x86/include/asm/uaccess.h
12152 +++ b/arch/x86/include/asm/uaccess.h
12153 @@ -8,12 +8,15 @@
12154 #include <linux/thread_info.h>
12155 #include <linux/prefetch.h>
12156 #include <linux/string.h>
12157 +#include <linux/sched.h>
12158 #include <asm/asm.h>
12159 #include <asm/page.h>
12160
12161 #define VERIFY_READ 0
12162 #define VERIFY_WRITE 1
12163
12164 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12165 +
12166 /*
12167 * The fs value determines whether argument validity checking should be
12168 * performed or not. If get_fs() == USER_DS, checking is performed, with
12169 @@ -29,7 +32,12 @@
12170
12171 #define get_ds() (KERNEL_DS)
12172 #define get_fs() (current_thread_info()->addr_limit)
12173 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12174 +void __set_fs(mm_segment_t x);
12175 +void set_fs(mm_segment_t x);
12176 +#else
12177 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12178 +#endif
12179
12180 #define segment_eq(a, b) ((a).seg == (b).seg)
12181
12182 @@ -77,7 +85,33 @@
12183 * checks that the pointer is in the user space range - after calling
12184 * this function, memory access functions may still return -EFAULT.
12185 */
12186 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12187 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12188 +#define access_ok(type, addr, size) \
12189 +({ \
12190 + long __size = size; \
12191 + unsigned long __addr = (unsigned long)addr; \
12192 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12193 + unsigned long __end_ao = __addr + __size - 1; \
12194 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12195 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12196 + while(__addr_ao <= __end_ao) { \
12197 + char __c_ao; \
12198 + __addr_ao += PAGE_SIZE; \
12199 + if (__size > PAGE_SIZE) \
12200 + cond_resched(); \
12201 + if (__get_user(__c_ao, (char __user *)__addr)) \
12202 + break; \
12203 + if (type != VERIFY_WRITE) { \
12204 + __addr = __addr_ao; \
12205 + continue; \
12206 + } \
12207 + if (__put_user(__c_ao, (char __user *)__addr)) \
12208 + break; \
12209 + __addr = __addr_ao; \
12210 + } \
12211 + } \
12212 + __ret_ao; \
12213 +})
12214
12215 /*
12216 * The exception table consists of pairs of addresses: the first is the
12217 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12218 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12219 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12220
12221 -
12222 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12223 +#define __copyuser_seg "gs;"
12224 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12225 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12226 +#else
12227 +#define __copyuser_seg
12228 +#define __COPYUSER_SET_ES
12229 +#define __COPYUSER_RESTORE_ES
12230 +#endif
12231
12232 #ifdef CONFIG_X86_32
12233 #define __put_user_asm_u64(x, addr, err, errret) \
12234 - asm volatile("1: movl %%eax,0(%2)\n" \
12235 - "2: movl %%edx,4(%2)\n" \
12236 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12237 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12238 "3:\n" \
12239 ".section .fixup,\"ax\"\n" \
12240 "4: movl %3,%0\n" \
12241 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12242 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12243
12244 #define __put_user_asm_ex_u64(x, addr) \
12245 - asm volatile("1: movl %%eax,0(%1)\n" \
12246 - "2: movl %%edx,4(%1)\n" \
12247 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12248 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12249 "3:\n" \
12250 _ASM_EXTABLE(1b, 2b - 1b) \
12251 _ASM_EXTABLE(2b, 3b - 2b) \
12252 @@ -253,7 +295,7 @@ extern void __put_user_8(void);
12253 __typeof__(*(ptr)) __pu_val; \
12254 __chk_user_ptr(ptr); \
12255 might_fault(); \
12256 - __pu_val = x; \
12257 + __pu_val = (x); \
12258 switch (sizeof(*(ptr))) { \
12259 case 1: \
12260 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12261 @@ -374,7 +416,7 @@ do { \
12262 } while (0)
12263
12264 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12265 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12266 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12267 "2:\n" \
12268 ".section .fixup,\"ax\"\n" \
12269 "3: mov %3,%0\n" \
12270 @@ -382,7 +424,7 @@ do { \
12271 " jmp 2b\n" \
12272 ".previous\n" \
12273 _ASM_EXTABLE(1b, 3b) \
12274 - : "=r" (err), ltype(x) \
12275 + : "=r" (err), ltype (x) \
12276 : "m" (__m(addr)), "i" (errret), "0" (err))
12277
12278 #define __get_user_size_ex(x, ptr, size) \
12279 @@ -407,7 +449,7 @@ do { \
12280 } while (0)
12281
12282 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12283 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12284 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12285 "2:\n" \
12286 _ASM_EXTABLE(1b, 2b - 1b) \
12287 : ltype(x) : "m" (__m(addr)))
12288 @@ -424,13 +466,24 @@ do { \
12289 int __gu_err; \
12290 unsigned long __gu_val; \
12291 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12292 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12293 + (x) = (__typeof__(*(ptr)))__gu_val; \
12294 __gu_err; \
12295 })
12296
12297 /* FIXME: this hack is definitely wrong -AK */
12298 struct __large_struct { unsigned long buf[100]; };
12299 -#define __m(x) (*(struct __large_struct __user *)(x))
12300 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12301 +#define ____m(x) \
12302 +({ \
12303 + unsigned long ____x = (unsigned long)(x); \
12304 + if (____x < PAX_USER_SHADOW_BASE) \
12305 + ____x += PAX_USER_SHADOW_BASE; \
12306 + (void __user *)____x; \
12307 +})
12308 +#else
12309 +#define ____m(x) (x)
12310 +#endif
12311 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12312
12313 /*
12314 * Tell gcc we read from memory instead of writing: this is because
12315 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12316 * aliasing issues.
12317 */
12318 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12319 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12320 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12321 "2:\n" \
12322 ".section .fixup,\"ax\"\n" \
12323 "3: mov %3,%0\n" \
12324 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12325 ".previous\n" \
12326 _ASM_EXTABLE(1b, 3b) \
12327 : "=r"(err) \
12328 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12329 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12330
12331 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12332 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12333 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12334 "2:\n" \
12335 _ASM_EXTABLE(1b, 2b - 1b) \
12336 : : ltype(x), "m" (__m(addr)))
12337 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12338 * On error, the variable @x is set to zero.
12339 */
12340
12341 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12342 +#define __get_user(x, ptr) get_user((x), (ptr))
12343 +#else
12344 #define __get_user(x, ptr) \
12345 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12346 +#endif
12347
12348 /**
12349 * __put_user: - Write a simple value into user space, with less checking.
12350 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12351 * Returns zero on success, or -EFAULT on error.
12352 */
12353
12354 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12355 +#define __put_user(x, ptr) put_user((x), (ptr))
12356 +#else
12357 #define __put_user(x, ptr) \
12358 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12359 +#endif
12360
12361 #define __get_user_unaligned __get_user
12362 #define __put_user_unaligned __put_user
12363 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12364 #define get_user_ex(x, ptr) do { \
12365 unsigned long __gue_val; \
12366 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12367 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12368 + (x) = (__typeof__(*(ptr)))__gue_val; \
12369 } while (0)
12370
12371 #ifdef CONFIG_X86_WP_WORKS_OK
12372 @@ -567,6 +628,7 @@ extern struct movsl_mask {
12373
12374 #define ARCH_HAS_NOCACHE_UACCESS 1
12375
12376 +#define ARCH_HAS_SORT_EXTABLE
12377 #ifdef CONFIG_X86_32
12378 # include "uaccess_32.h"
12379 #else
12380 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12381 index 632fb44..e30e334 100644
12382 --- a/arch/x86/include/asm/uaccess_32.h
12383 +++ b/arch/x86/include/asm/uaccess_32.h
12384 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12385 static __always_inline unsigned long __must_check
12386 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12387 {
12388 + pax_track_stack();
12389 +
12390 + if ((long)n < 0)
12391 + return n;
12392 +
12393 if (__builtin_constant_p(n)) {
12394 unsigned long ret;
12395
12396 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12397 return ret;
12398 }
12399 }
12400 + if (!__builtin_constant_p(n))
12401 + check_object_size(from, n, true);
12402 return __copy_to_user_ll(to, from, n);
12403 }
12404
12405 @@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12406 __copy_to_user(void __user *to, const void *from, unsigned long n)
12407 {
12408 might_fault();
12409 +
12410 return __copy_to_user_inatomic(to, from, n);
12411 }
12412
12413 static __always_inline unsigned long
12414 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12415 {
12416 + if ((long)n < 0)
12417 + return n;
12418 +
12419 /* Avoid zeroing the tail if the copy fails..
12420 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12421 * but as the zeroing behaviour is only significant when n is not
12422 @@ -138,6 +149,12 @@ static __always_inline unsigned long
12423 __copy_from_user(void *to, const void __user *from, unsigned long n)
12424 {
12425 might_fault();
12426 +
12427 + pax_track_stack();
12428 +
12429 + if ((long)n < 0)
12430 + return n;
12431 +
12432 if (__builtin_constant_p(n)) {
12433 unsigned long ret;
12434
12435 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12436 return ret;
12437 }
12438 }
12439 + if (!__builtin_constant_p(n))
12440 + check_object_size(to, n, false);
12441 return __copy_from_user_ll(to, from, n);
12442 }
12443
12444 @@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12445 const void __user *from, unsigned long n)
12446 {
12447 might_fault();
12448 +
12449 + if ((long)n < 0)
12450 + return n;
12451 +
12452 if (__builtin_constant_p(n)) {
12453 unsigned long ret;
12454
12455 @@ -182,14 +205,62 @@ static __always_inline unsigned long
12456 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12457 unsigned long n)
12458 {
12459 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12460 + if ((long)n < 0)
12461 + return n;
12462 +
12463 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12464 +}
12465 +
12466 +/**
12467 + * copy_to_user: - Copy a block of data into user space.
12468 + * @to: Destination address, in user space.
12469 + * @from: Source address, in kernel space.
12470 + * @n: Number of bytes to copy.
12471 + *
12472 + * Context: User context only. This function may sleep.
12473 + *
12474 + * Copy data from kernel space to user space.
12475 + *
12476 + * Returns number of bytes that could not be copied.
12477 + * On success, this will be zero.
12478 + */
12479 +static __always_inline unsigned long __must_check
12480 +copy_to_user(void __user *to, const void *from, unsigned long n)
12481 +{
12482 + if (access_ok(VERIFY_WRITE, to, n))
12483 + n = __copy_to_user(to, from, n);
12484 + return n;
12485 +}
12486 +
12487 +/**
12488 + * copy_from_user: - Copy a block of data from user space.
12489 + * @to: Destination address, in kernel space.
12490 + * @from: Source address, in user space.
12491 + * @n: Number of bytes to copy.
12492 + *
12493 + * Context: User context only. This function may sleep.
12494 + *
12495 + * Copy data from user space to kernel space.
12496 + *
12497 + * Returns number of bytes that could not be copied.
12498 + * On success, this will be zero.
12499 + *
12500 + * If some data could not be copied, this function will pad the copied
12501 + * data to the requested size using zero bytes.
12502 + */
12503 +static __always_inline unsigned long __must_check
12504 +copy_from_user(void *to, const void __user *from, unsigned long n)
12505 +{
12506 + if (access_ok(VERIFY_READ, from, n))
12507 + n = __copy_from_user(to, from, n);
12508 + else if ((long)n > 0) {
12509 + if (!__builtin_constant_p(n))
12510 + check_object_size(to, n, false);
12511 + memset(to, 0, n);
12512 + }
12513 + return n;
12514 }
12515
12516 -unsigned long __must_check copy_to_user(void __user *to,
12517 - const void *from, unsigned long n);
12518 -unsigned long __must_check copy_from_user(void *to,
12519 - const void __user *from,
12520 - unsigned long n);
12521 long __must_check strncpy_from_user(char *dst, const char __user *src,
12522 long count);
12523 long __must_check __strncpy_from_user(char *dst,
12524 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12525 index db24b21..f595ae7 100644
12526 --- a/arch/x86/include/asm/uaccess_64.h
12527 +++ b/arch/x86/include/asm/uaccess_64.h
12528 @@ -9,6 +9,9 @@
12529 #include <linux/prefetch.h>
12530 #include <linux/lockdep.h>
12531 #include <asm/page.h>
12532 +#include <asm/pgtable.h>
12533 +
12534 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12535
12536 /*
12537 * Copy To/From Userspace
12538 @@ -16,116 +19,205 @@
12539
12540 /* Handles exceptions in both to and from, but doesn't do access_ok */
12541 __must_check unsigned long
12542 -copy_user_generic(void *to, const void *from, unsigned len);
12543 +copy_user_generic(void *to, const void *from, unsigned long len);
12544
12545 __must_check unsigned long
12546 -copy_to_user(void __user *to, const void *from, unsigned len);
12547 -__must_check unsigned long
12548 -copy_from_user(void *to, const void __user *from, unsigned len);
12549 -__must_check unsigned long
12550 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12551 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12552
12553 static __always_inline __must_check
12554 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12555 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12556 {
12557 - int ret = 0;
12558 + unsigned ret = 0;
12559
12560 might_fault();
12561 - if (!__builtin_constant_p(size))
12562 - return copy_user_generic(dst, (__force void *)src, size);
12563 +
12564 + if (size > INT_MAX)
12565 + return size;
12566 +
12567 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12568 + if (!__access_ok(VERIFY_READ, src, size))
12569 + return size;
12570 +#endif
12571 +
12572 + if (!__builtin_constant_p(size)) {
12573 + check_object_size(dst, size, false);
12574 +
12575 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12576 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12577 + src += PAX_USER_SHADOW_BASE;
12578 +#endif
12579 +
12580 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12581 + }
12582 switch (size) {
12583 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12584 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12585 ret, "b", "b", "=q", 1);
12586 return ret;
12587 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12588 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12589 ret, "w", "w", "=r", 2);
12590 return ret;
12591 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12592 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12593 ret, "l", "k", "=r", 4);
12594 return ret;
12595 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12596 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12597 ret, "q", "", "=r", 8);
12598 return ret;
12599 case 10:
12600 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12601 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12602 ret, "q", "", "=r", 10);
12603 if (unlikely(ret))
12604 return ret;
12605 __get_user_asm(*(u16 *)(8 + (char *)dst),
12606 - (u16 __user *)(8 + (char __user *)src),
12607 + (const u16 __user *)(8 + (const char __user *)src),
12608 ret, "w", "w", "=r", 2);
12609 return ret;
12610 case 16:
12611 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12612 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12613 ret, "q", "", "=r", 16);
12614 if (unlikely(ret))
12615 return ret;
12616 __get_user_asm(*(u64 *)(8 + (char *)dst),
12617 - (u64 __user *)(8 + (char __user *)src),
12618 + (const u64 __user *)(8 + (const char __user *)src),
12619 ret, "q", "", "=r", 8);
12620 return ret;
12621 default:
12622 - return copy_user_generic(dst, (__force void *)src, size);
12623 +
12624 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12625 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12626 + src += PAX_USER_SHADOW_BASE;
12627 +#endif
12628 +
12629 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12630 }
12631 }
12632
12633 static __always_inline __must_check
12634 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12635 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12636 {
12637 - int ret = 0;
12638 + unsigned ret = 0;
12639
12640 might_fault();
12641 - if (!__builtin_constant_p(size))
12642 - return copy_user_generic((__force void *)dst, src, size);
12643 +
12644 + pax_track_stack();
12645 +
12646 + if (size > INT_MAX)
12647 + return size;
12648 +
12649 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12650 + if (!__access_ok(VERIFY_WRITE, dst, size))
12651 + return size;
12652 +#endif
12653 +
12654 + if (!__builtin_constant_p(size)) {
12655 + check_object_size(src, size, true);
12656 +
12657 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12658 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12659 + dst += PAX_USER_SHADOW_BASE;
12660 +#endif
12661 +
12662 + return copy_user_generic((__force_kernel void *)dst, src, size);
12663 + }
12664 switch (size) {
12665 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12666 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12667 ret, "b", "b", "iq", 1);
12668 return ret;
12669 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12670 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12671 ret, "w", "w", "ir", 2);
12672 return ret;
12673 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12674 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12675 ret, "l", "k", "ir", 4);
12676 return ret;
12677 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12678 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12679 ret, "q", "", "er", 8);
12680 return ret;
12681 case 10:
12682 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12683 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12684 ret, "q", "", "er", 10);
12685 if (unlikely(ret))
12686 return ret;
12687 asm("":::"memory");
12688 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12689 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12690 ret, "w", "w", "ir", 2);
12691 return ret;
12692 case 16:
12693 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12694 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12695 ret, "q", "", "er", 16);
12696 if (unlikely(ret))
12697 return ret;
12698 asm("":::"memory");
12699 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12700 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12701 ret, "q", "", "er", 8);
12702 return ret;
12703 default:
12704 - return copy_user_generic((__force void *)dst, src, size);
12705 +
12706 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12707 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12708 + dst += PAX_USER_SHADOW_BASE;
12709 +#endif
12710 +
12711 + return copy_user_generic((__force_kernel void *)dst, src, size);
12712 + }
12713 +}
12714 +
12715 +static __always_inline __must_check
12716 +unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12717 +{
12718 + if (access_ok(VERIFY_WRITE, to, len))
12719 + len = __copy_to_user(to, from, len);
12720 + return len;
12721 +}
12722 +
12723 +static __always_inline __must_check
12724 +unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12725 +{
12726 + might_fault();
12727 +
12728 + if (access_ok(VERIFY_READ, from, len))
12729 + len = __copy_from_user(to, from, len);
12730 + else if (len < INT_MAX) {
12731 + if (!__builtin_constant_p(len))
12732 + check_object_size(to, len, false);
12733 + memset(to, 0, len);
12734 }
12735 + return len;
12736 }
12737
12738 static __always_inline __must_check
12739 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12740 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12741 {
12742 - int ret = 0;
12743 + unsigned ret = 0;
12744
12745 might_fault();
12746 - if (!__builtin_constant_p(size))
12747 - return copy_user_generic((__force void *)dst,
12748 - (__force void *)src, size);
12749 +
12750 + pax_track_stack();
12751 +
12752 + if (size > INT_MAX)
12753 + return size;
12754 +
12755 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12756 + if (!__access_ok(VERIFY_READ, src, size))
12757 + return size;
12758 + if (!__access_ok(VERIFY_WRITE, dst, size))
12759 + return size;
12760 +#endif
12761 +
12762 + if (!__builtin_constant_p(size)) {
12763 +
12764 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12765 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12766 + src += PAX_USER_SHADOW_BASE;
12767 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12768 + dst += PAX_USER_SHADOW_BASE;
12769 +#endif
12770 +
12771 + return copy_user_generic((__force_kernel void *)dst,
12772 + (__force_kernel const void *)src, size);
12773 + }
12774 switch (size) {
12775 case 1: {
12776 u8 tmp;
12777 - __get_user_asm(tmp, (u8 __user *)src,
12778 + __get_user_asm(tmp, (const u8 __user *)src,
12779 ret, "b", "b", "=q", 1);
12780 if (likely(!ret))
12781 __put_user_asm(tmp, (u8 __user *)dst,
12782 @@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12783 }
12784 case 2: {
12785 u16 tmp;
12786 - __get_user_asm(tmp, (u16 __user *)src,
12787 + __get_user_asm(tmp, (const u16 __user *)src,
12788 ret, "w", "w", "=r", 2);
12789 if (likely(!ret))
12790 __put_user_asm(tmp, (u16 __user *)dst,
12791 @@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12792
12793 case 4: {
12794 u32 tmp;
12795 - __get_user_asm(tmp, (u32 __user *)src,
12796 + __get_user_asm(tmp, (const u32 __user *)src,
12797 ret, "l", "k", "=r", 4);
12798 if (likely(!ret))
12799 __put_user_asm(tmp, (u32 __user *)dst,
12800 @@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12801 }
12802 case 8: {
12803 u64 tmp;
12804 - __get_user_asm(tmp, (u64 __user *)src,
12805 + __get_user_asm(tmp, (const u64 __user *)src,
12806 ret, "q", "", "=r", 8);
12807 if (likely(!ret))
12808 __put_user_asm(tmp, (u64 __user *)dst,
12809 @@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12810 return ret;
12811 }
12812 default:
12813 - return copy_user_generic((__force void *)dst,
12814 - (__force void *)src, size);
12815 +
12816 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12817 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12818 + src += PAX_USER_SHADOW_BASE;
12819 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12820 + dst += PAX_USER_SHADOW_BASE;
12821 +#endif
12822 +
12823 + return copy_user_generic((__force_kernel void *)dst,
12824 + (__force_kernel const void *)src, size);
12825 }
12826 }
12827
12828 @@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
12829 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12830 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12831
12832 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
12833 - unsigned size);
12834 +static __must_check __always_inline unsigned long
12835 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12836 +{
12837 + pax_track_stack();
12838 +
12839 + if (size > INT_MAX)
12840 + return size;
12841 +
12842 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12843 + if (!__access_ok(VERIFY_READ, src, size))
12844 + return size;
12845
12846 -static __must_check __always_inline int
12847 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12848 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12849 + src += PAX_USER_SHADOW_BASE;
12850 +#endif
12851 +
12852 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12853 +}
12854 +
12855 +static __must_check __always_inline unsigned long
12856 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12857 {
12858 - return copy_user_generic((__force void *)dst, src, size);
12859 + if (size > INT_MAX)
12860 + return size;
12861 +
12862 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12863 + if (!__access_ok(VERIFY_WRITE, dst, size))
12864 + return size;
12865 +
12866 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12867 + dst += PAX_USER_SHADOW_BASE;
12868 +#endif
12869 +
12870 + return copy_user_generic((__force_kernel void *)dst, src, size);
12871 }
12872
12873 -extern long __copy_user_nocache(void *dst, const void __user *src,
12874 - unsigned size, int zerorest);
12875 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12876 + unsigned long size, int zerorest);
12877
12878 -static inline int
12879 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12880 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12881 {
12882 might_sleep();
12883 +
12884 + if (size > INT_MAX)
12885 + return size;
12886 +
12887 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12888 + if (!__access_ok(VERIFY_READ, src, size))
12889 + return size;
12890 +#endif
12891 +
12892 return __copy_user_nocache(dst, src, size, 1);
12893 }
12894
12895 -static inline int
12896 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12897 - unsigned size)
12898 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12899 + unsigned long size)
12900 {
12901 + if (size > INT_MAX)
12902 + return size;
12903 +
12904 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12905 + if (!__access_ok(VERIFY_READ, src, size))
12906 + return size;
12907 +#endif
12908 +
12909 return __copy_user_nocache(dst, src, size, 0);
12910 }
12911
12912 -unsigned long
12913 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12914 +extern unsigned long
12915 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12916
12917 #endif /* _ASM_X86_UACCESS_64_H */
12918 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12919 index 9064052..786cfbc 100644
12920 --- a/arch/x86/include/asm/vdso.h
12921 +++ b/arch/x86/include/asm/vdso.h
12922 @@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
12923 #define VDSO32_SYMBOL(base, name) \
12924 ({ \
12925 extern const char VDSO32_##name[]; \
12926 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12927 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12928 })
12929 #endif
12930
12931 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
12932 index 3d61e20..9507180 100644
12933 --- a/arch/x86/include/asm/vgtod.h
12934 +++ b/arch/x86/include/asm/vgtod.h
12935 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
12936 int sysctl_enabled;
12937 struct timezone sys_tz;
12938 struct { /* extract of a clocksource struct */
12939 + char name[8];
12940 cycle_t (*vread)(void);
12941 cycle_t cycle_last;
12942 cycle_t mask;
12943 diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
12944 index 61e08c0..b0da582 100644
12945 --- a/arch/x86/include/asm/vmi.h
12946 +++ b/arch/x86/include/asm/vmi.h
12947 @@ -191,6 +191,7 @@ struct vrom_header {
12948 u8 reserved[96]; /* Reserved for headers */
12949 char vmi_init[8]; /* VMI_Init jump point */
12950 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
12951 + char rom_data[8048]; /* rest of the option ROM */
12952 } __attribute__((packed));
12953
12954 struct pnp_header {
12955 diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
12956 index c6e0bee..fcb9f74 100644
12957 --- a/arch/x86/include/asm/vmi_time.h
12958 +++ b/arch/x86/include/asm/vmi_time.h
12959 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
12960 int (*wallclock_updated)(void);
12961 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
12962 void (*cancel_alarm)(u32 flags);
12963 -} vmi_timer_ops;
12964 +} __no_const vmi_timer_ops;
12965
12966 /* Prototypes */
12967 extern void __init vmi_time_init(void);
12968 diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
12969 index d0983d2..1f7c9e9 100644
12970 --- a/arch/x86/include/asm/vsyscall.h
12971 +++ b/arch/x86/include/asm/vsyscall.h
12972 @@ -15,9 +15,10 @@ enum vsyscall_num {
12973
12974 #ifdef __KERNEL__
12975 #include <linux/seqlock.h>
12976 +#include <linux/getcpu.h>
12977 +#include <linux/time.h>
12978
12979 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
12980 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
12981
12982 /* Definitions for CONFIG_GENERIC_TIME definitions */
12983 #define __section_vsyscall_gtod_data __attribute__ \
12984 @@ -31,7 +32,6 @@ enum vsyscall_num {
12985 #define VGETCPU_LSL 2
12986
12987 extern int __vgetcpu_mode;
12988 -extern volatile unsigned long __jiffies;
12989
12990 /* kernel space (writeable) */
12991 extern int vgetcpu_mode;
12992 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
12993
12994 extern void map_vsyscall(void);
12995
12996 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
12997 +extern time_t vtime(time_t *t);
12998 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
12999 #endif /* __KERNEL__ */
13000
13001 #endif /* _ASM_X86_VSYSCALL_H */
13002 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13003 index 2c756fd..3377e37 100644
13004 --- a/arch/x86/include/asm/x86_init.h
13005 +++ b/arch/x86/include/asm/x86_init.h
13006 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
13007 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13008 void (*find_smp_config)(unsigned int reserve);
13009 void (*get_smp_config)(unsigned int early);
13010 -};
13011 +} __no_const;
13012
13013 /**
13014 * struct x86_init_resources - platform specific resource related ops
13015 @@ -42,7 +42,7 @@ struct x86_init_resources {
13016 void (*probe_roms)(void);
13017 void (*reserve_resources)(void);
13018 char *(*memory_setup)(void);
13019 -};
13020 +} __no_const;
13021
13022 /**
13023 * struct x86_init_irqs - platform specific interrupt setup
13024 @@ -55,7 +55,7 @@ struct x86_init_irqs {
13025 void (*pre_vector_init)(void);
13026 void (*intr_init)(void);
13027 void (*trap_init)(void);
13028 -};
13029 +} __no_const;
13030
13031 /**
13032 * struct x86_init_oem - oem platform specific customizing functions
13033 @@ -65,7 +65,7 @@ struct x86_init_irqs {
13034 struct x86_init_oem {
13035 void (*arch_setup)(void);
13036 void (*banner)(void);
13037 -};
13038 +} __no_const;
13039
13040 /**
13041 * struct x86_init_paging - platform specific paging functions
13042 @@ -75,7 +75,7 @@ struct x86_init_oem {
13043 struct x86_init_paging {
13044 void (*pagetable_setup_start)(pgd_t *base);
13045 void (*pagetable_setup_done)(pgd_t *base);
13046 -};
13047 +} __no_const;
13048
13049 /**
13050 * struct x86_init_timers - platform specific timer setup
13051 @@ -88,7 +88,7 @@ struct x86_init_timers {
13052 void (*setup_percpu_clockev)(void);
13053 void (*tsc_pre_init)(void);
13054 void (*timer_init)(void);
13055 -};
13056 +} __no_const;
13057
13058 /**
13059 * struct x86_init_ops - functions for platform specific setup
13060 @@ -101,7 +101,7 @@ struct x86_init_ops {
13061 struct x86_init_oem oem;
13062 struct x86_init_paging paging;
13063 struct x86_init_timers timers;
13064 -};
13065 +} __no_const;
13066
13067 /**
13068 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13069 @@ -109,7 +109,7 @@ struct x86_init_ops {
13070 */
13071 struct x86_cpuinit_ops {
13072 void (*setup_percpu_clockev)(void);
13073 -};
13074 +} __no_const;
13075
13076 /**
13077 * struct x86_platform_ops - platform specific runtime functions
13078 @@ -121,7 +121,7 @@ struct x86_platform_ops {
13079 unsigned long (*calibrate_tsc)(void);
13080 unsigned long (*get_wallclock)(void);
13081 int (*set_wallclock)(unsigned long nowtime);
13082 -};
13083 +} __no_const;
13084
13085 extern struct x86_init_ops x86_init;
13086 extern struct x86_cpuinit_ops x86_cpuinit;
13087 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13088 index 727acc1..554f3eb 100644
13089 --- a/arch/x86/include/asm/xsave.h
13090 +++ b/arch/x86/include/asm/xsave.h
13091 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13092 static inline int xsave_user(struct xsave_struct __user *buf)
13093 {
13094 int err;
13095 +
13096 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13097 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13098 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13099 +#endif
13100 +
13101 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13102 "2:\n"
13103 ".section .fixup,\"ax\"\n"
13104 @@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13105 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13106 {
13107 int err;
13108 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13109 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13110 u32 lmask = mask;
13111 u32 hmask = mask >> 32;
13112
13113 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13114 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13115 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13116 +#endif
13117 +
13118 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13119 "2:\n"
13120 ".section .fixup,\"ax\"\n"
13121 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13122 index 6a564ac..9b1340c 100644
13123 --- a/arch/x86/kernel/acpi/realmode/Makefile
13124 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13125 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13126 $(call cc-option, -fno-stack-protector) \
13127 $(call cc-option, -mpreferred-stack-boundary=2)
13128 KBUILD_CFLAGS += $(call cc-option, -m32)
13129 +ifdef CONSTIFY_PLUGIN
13130 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13131 +endif
13132 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13133 GCOV_PROFILE := n
13134
13135 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13136 index 580b4e2..d4129e4 100644
13137 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13138 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13139 @@ -91,6 +91,9 @@ _start:
13140 /* Do any other stuff... */
13141
13142 #ifndef CONFIG_64BIT
13143 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
13144 + call verify_cpu
13145 +
13146 /* This could also be done in C code... */
13147 movl pmode_cr3, %eax
13148 movl %eax, %cr3
13149 @@ -104,7 +107,7 @@ _start:
13150 movl %eax, %ecx
13151 orl %edx, %ecx
13152 jz 1f
13153 - movl $0xc0000080, %ecx
13154 + mov $MSR_EFER, %ecx
13155 wrmsr
13156 1:
13157
13158 @@ -114,6 +117,7 @@ _start:
13159 movl pmode_cr0, %eax
13160 movl %eax, %cr0
13161 jmp pmode_return
13162 +# include "../../verify_cpu.S"
13163 #else
13164 pushw $0
13165 pushw trampoline_segment
13166 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13167 index ca93638..7042f24 100644
13168 --- a/arch/x86/kernel/acpi/sleep.c
13169 +++ b/arch/x86/kernel/acpi/sleep.c
13170 @@ -11,11 +11,12 @@
13171 #include <linux/cpumask.h>
13172 #include <asm/segment.h>
13173 #include <asm/desc.h>
13174 +#include <asm/e820.h>
13175
13176 #include "realmode/wakeup.h"
13177 #include "sleep.h"
13178
13179 -unsigned long acpi_wakeup_address;
13180 +unsigned long acpi_wakeup_address = 0x2000;
13181 unsigned long acpi_realmode_flags;
13182
13183 /* address in low memory of the wakeup routine. */
13184 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13185 #else /* CONFIG_64BIT */
13186 header->trampoline_segment = setup_trampoline() >> 4;
13187 #ifdef CONFIG_SMP
13188 - stack_start.sp = temp_stack + sizeof(temp_stack);
13189 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13190 +
13191 + pax_open_kernel();
13192 early_gdt_descr.address =
13193 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13194 + pax_close_kernel();
13195 +
13196 initial_gs = per_cpu_offset(smp_processor_id());
13197 #endif
13198 initial_code = (unsigned long)wakeup_long64;
13199 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13200 return;
13201 }
13202
13203 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13204 -
13205 - if (!acpi_realmode) {
13206 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13207 - return;
13208 - }
13209 -
13210 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13211 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13212 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13213 }
13214
13215
13216 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13217 index 8ded418..079961e 100644
13218 --- a/arch/x86/kernel/acpi/wakeup_32.S
13219 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13220 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13221 # and restore the stack ... but you need gdt for this to work
13222 movl saved_context_esp, %esp
13223
13224 - movl %cs:saved_magic, %eax
13225 - cmpl $0x12345678, %eax
13226 + cmpl $0x12345678, saved_magic
13227 jne bogus_magic
13228
13229 # jump to place where we left off
13230 - movl saved_eip, %eax
13231 - jmp *%eax
13232 + jmp *(saved_eip)
13233
13234 bogus_magic:
13235 jmp bogus_magic
13236 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13237 index de7353c..075da5f 100644
13238 --- a/arch/x86/kernel/alternative.c
13239 +++ b/arch/x86/kernel/alternative.c
13240 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13241
13242 BUG_ON(p->len > MAX_PATCH_LEN);
13243 /* prep the buffer with the original instructions */
13244 - memcpy(insnbuf, p->instr, p->len);
13245 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13246 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13247 (unsigned long)p->instr, p->len);
13248
13249 @@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13250 if (smp_alt_once)
13251 free_init_pages("SMP alternatives",
13252 (unsigned long)__smp_locks,
13253 - (unsigned long)__smp_locks_end);
13254 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13255
13256 restart_nmi();
13257 }
13258 @@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13259 * instructions. And on the local CPU you need to be protected again NMI or MCE
13260 * handlers seeing an inconsistent instruction while you patch.
13261 */
13262 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13263 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
13264 size_t len)
13265 {
13266 unsigned long flags;
13267 local_irq_save(flags);
13268 - memcpy(addr, opcode, len);
13269 +
13270 + pax_open_kernel();
13271 + memcpy(ktla_ktva(addr), opcode, len);
13272 sync_core();
13273 + pax_close_kernel();
13274 +
13275 local_irq_restore(flags);
13276 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13277 that causes hangs on some VIA CPUs. */
13278 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13279 */
13280 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13281 {
13282 - unsigned long flags;
13283 - char *vaddr;
13284 + unsigned char *vaddr = ktla_ktva(addr);
13285 struct page *pages[2];
13286 - int i;
13287 + size_t i;
13288
13289 if (!core_kernel_text((unsigned long)addr)) {
13290 - pages[0] = vmalloc_to_page(addr);
13291 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13292 + pages[0] = vmalloc_to_page(vaddr);
13293 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13294 } else {
13295 - pages[0] = virt_to_page(addr);
13296 + pages[0] = virt_to_page(vaddr);
13297 WARN_ON(!PageReserved(pages[0]));
13298 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13299 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13300 }
13301 BUG_ON(!pages[0]);
13302 - local_irq_save(flags);
13303 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13304 - if (pages[1])
13305 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13306 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13307 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13308 - clear_fixmap(FIX_TEXT_POKE0);
13309 - if (pages[1])
13310 - clear_fixmap(FIX_TEXT_POKE1);
13311 - local_flush_tlb();
13312 - sync_core();
13313 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13314 - that causes hangs on some VIA CPUs. */
13315 + text_poke_early(addr, opcode, len);
13316 for (i = 0; i < len; i++)
13317 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13318 - local_irq_restore(flags);
13319 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13320 return addr;
13321 }
13322 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13323 index 3a44b75..1601800 100644
13324 --- a/arch/x86/kernel/amd_iommu.c
13325 +++ b/arch/x86/kernel/amd_iommu.c
13326 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13327 }
13328 }
13329
13330 -static struct dma_map_ops amd_iommu_dma_ops = {
13331 +static const struct dma_map_ops amd_iommu_dma_ops = {
13332 .alloc_coherent = alloc_coherent,
13333 .free_coherent = free_coherent,
13334 .map_page = map_page,
13335 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13336 index 1d2d670..8e3f477 100644
13337 --- a/arch/x86/kernel/apic/apic.c
13338 +++ b/arch/x86/kernel/apic/apic.c
13339 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13340 /*
13341 * Debug level, exported for io_apic.c
13342 */
13343 -unsigned int apic_verbosity;
13344 +int apic_verbosity;
13345
13346 int pic_mode;
13347
13348 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13349 apic_write(APIC_ESR, 0);
13350 v1 = apic_read(APIC_ESR);
13351 ack_APIC_irq();
13352 - atomic_inc(&irq_err_count);
13353 + atomic_inc_unchecked(&irq_err_count);
13354
13355 /*
13356 * Here is what the APIC error bits mean:
13357 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13358 u16 *bios_cpu_apicid;
13359 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13360
13361 + pax_track_stack();
13362 +
13363 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13364 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13365
13366 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13367 index 8928d97..f799cea 100644
13368 --- a/arch/x86/kernel/apic/io_apic.c
13369 +++ b/arch/x86/kernel/apic/io_apic.c
13370 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13371 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13372 GFP_ATOMIC);
13373 if (!ioapic_entries)
13374 - return 0;
13375 + return NULL;
13376
13377 for (apic = 0; apic < nr_ioapics; apic++) {
13378 ioapic_entries[apic] =
13379 @@ -733,7 +733,7 @@ nomem:
13380 kfree(ioapic_entries[apic]);
13381 kfree(ioapic_entries);
13382
13383 - return 0;
13384 + return NULL;
13385 }
13386
13387 /*
13388 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13389 }
13390 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13391
13392 -void lock_vector_lock(void)
13393 +void lock_vector_lock(void) __acquires(vector_lock)
13394 {
13395 /* Used to the online set of cpus does not change
13396 * during assign_irq_vector.
13397 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13398 spin_lock(&vector_lock);
13399 }
13400
13401 -void unlock_vector_lock(void)
13402 +void unlock_vector_lock(void) __releases(vector_lock)
13403 {
13404 spin_unlock(&vector_lock);
13405 }
13406 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13407 ack_APIC_irq();
13408 }
13409
13410 -atomic_t irq_mis_count;
13411 +atomic_unchecked_t irq_mis_count;
13412
13413 static void ack_apic_level(unsigned int irq)
13414 {
13415 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13416
13417 /* Tail end of version 0x11 I/O APIC bug workaround */
13418 if (!(v & (1 << (i & 0x1f)))) {
13419 - atomic_inc(&irq_mis_count);
13420 + atomic_inc_unchecked(&irq_mis_count);
13421 spin_lock(&ioapic_lock);
13422 __mask_and_edge_IO_APIC_irq(cfg);
13423 __unmask_and_level_IO_APIC_irq(cfg);
13424 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13425 index 151ace6..f317474 100644
13426 --- a/arch/x86/kernel/apm_32.c
13427 +++ b/arch/x86/kernel/apm_32.c
13428 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13429 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13430 * even though they are called in protected mode.
13431 */
13432 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13433 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13434 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13435
13436 static const char driver_version[] = "1.16ac"; /* no spaces */
13437 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13438 BUG_ON(cpu != 0);
13439 gdt = get_cpu_gdt_table(cpu);
13440 save_desc_40 = gdt[0x40 / 8];
13441 +
13442 + pax_open_kernel();
13443 gdt[0x40 / 8] = bad_bios_desc;
13444 + pax_close_kernel();
13445
13446 apm_irq_save(flags);
13447 APM_DO_SAVE_SEGS;
13448 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13449 &call->esi);
13450 APM_DO_RESTORE_SEGS;
13451 apm_irq_restore(flags);
13452 +
13453 + pax_open_kernel();
13454 gdt[0x40 / 8] = save_desc_40;
13455 + pax_close_kernel();
13456 +
13457 put_cpu();
13458
13459 return call->eax & 0xff;
13460 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13461 BUG_ON(cpu != 0);
13462 gdt = get_cpu_gdt_table(cpu);
13463 save_desc_40 = gdt[0x40 / 8];
13464 +
13465 + pax_open_kernel();
13466 gdt[0x40 / 8] = bad_bios_desc;
13467 + pax_close_kernel();
13468
13469 apm_irq_save(flags);
13470 APM_DO_SAVE_SEGS;
13471 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13472 &call->eax);
13473 APM_DO_RESTORE_SEGS;
13474 apm_irq_restore(flags);
13475 +
13476 + pax_open_kernel();
13477 gdt[0x40 / 8] = save_desc_40;
13478 + pax_close_kernel();
13479 +
13480 put_cpu();
13481 return error;
13482 }
13483 @@ -975,7 +989,7 @@ recalc:
13484
13485 static void apm_power_off(void)
13486 {
13487 - unsigned char po_bios_call[] = {
13488 + const unsigned char po_bios_call[] = {
13489 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13490 0x8e, 0xd0, /* movw ax,ss */
13491 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13492 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13493 * code to that CPU.
13494 */
13495 gdt = get_cpu_gdt_table(0);
13496 +
13497 + pax_open_kernel();
13498 set_desc_base(&gdt[APM_CS >> 3],
13499 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13500 set_desc_base(&gdt[APM_CS_16 >> 3],
13501 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13502 set_desc_base(&gdt[APM_DS >> 3],
13503 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13504 + pax_close_kernel();
13505
13506 proc_create("apm", 0, NULL, &apm_file_ops);
13507
13508 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13509 index dfdbf64..9b2b6ce 100644
13510 --- a/arch/x86/kernel/asm-offsets_32.c
13511 +++ b/arch/x86/kernel/asm-offsets_32.c
13512 @@ -51,7 +51,6 @@ void foo(void)
13513 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13514 BLANK();
13515
13516 - OFFSET(TI_task, thread_info, task);
13517 OFFSET(TI_exec_domain, thread_info, exec_domain);
13518 OFFSET(TI_flags, thread_info, flags);
13519 OFFSET(TI_status, thread_info, status);
13520 @@ -60,6 +59,8 @@ void foo(void)
13521 OFFSET(TI_restart_block, thread_info, restart_block);
13522 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13523 OFFSET(TI_cpu, thread_info, cpu);
13524 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13525 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13526 BLANK();
13527
13528 OFFSET(GDS_size, desc_ptr, size);
13529 @@ -99,6 +100,7 @@ void foo(void)
13530
13531 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13532 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13533 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13534 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13535 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13536 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13537 @@ -115,6 +117,11 @@ void foo(void)
13538 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13539 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13540 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13541 +
13542 +#ifdef CONFIG_PAX_KERNEXEC
13543 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13544 +#endif
13545 +
13546 #endif
13547
13548 #ifdef CONFIG_XEN
13549 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13550 index 4a6aeed..371de20 100644
13551 --- a/arch/x86/kernel/asm-offsets_64.c
13552 +++ b/arch/x86/kernel/asm-offsets_64.c
13553 @@ -44,6 +44,8 @@ int main(void)
13554 ENTRY(addr_limit);
13555 ENTRY(preempt_count);
13556 ENTRY(status);
13557 + ENTRY(lowest_stack);
13558 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13559 #ifdef CONFIG_IA32_EMULATION
13560 ENTRY(sysenter_return);
13561 #endif
13562 @@ -63,6 +65,18 @@ int main(void)
13563 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13564 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13565 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13566 +
13567 +#ifdef CONFIG_PAX_KERNEXEC
13568 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13569 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13570 +#endif
13571 +
13572 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13573 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13574 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13575 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13576 +#endif
13577 +
13578 #endif
13579
13580
13581 @@ -115,6 +129,7 @@ int main(void)
13582 ENTRY(cr8);
13583 BLANK();
13584 #undef ENTRY
13585 + DEFINE(TSS_size, sizeof(struct tss_struct));
13586 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13587 BLANK();
13588 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13589 @@ -130,6 +145,7 @@ int main(void)
13590
13591 BLANK();
13592 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13593 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13594 #ifdef CONFIG_XEN
13595 BLANK();
13596 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13597 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13598 index ff502cc..dc5133e 100644
13599 --- a/arch/x86/kernel/cpu/Makefile
13600 +++ b/arch/x86/kernel/cpu/Makefile
13601 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13602 CFLAGS_REMOVE_common.o = -pg
13603 endif
13604
13605 -# Make sure load_percpu_segment has no stackprotector
13606 -nostackp := $(call cc-option, -fno-stack-protector)
13607 -CFLAGS_common.o := $(nostackp)
13608 -
13609 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13610 obj-y += proc.o capflags.o powerflags.o common.o
13611 obj-y += vmware.o hypervisor.o sched.o
13612 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13613 index 6e082dc..a0b5f36 100644
13614 --- a/arch/x86/kernel/cpu/amd.c
13615 +++ b/arch/x86/kernel/cpu/amd.c
13616 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13617 unsigned int size)
13618 {
13619 /* AMD errata T13 (order #21922) */
13620 - if ((c->x86 == 6)) {
13621 + if (c->x86 == 6) {
13622 /* Duron Rev A0 */
13623 if (c->x86_model == 3 && c->x86_mask == 0)
13624 size = 64;
13625 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13626 index 4e34d10..ba6bc97 100644
13627 --- a/arch/x86/kernel/cpu/common.c
13628 +++ b/arch/x86/kernel/cpu/common.c
13629 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13630
13631 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13632
13633 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13634 -#ifdef CONFIG_X86_64
13635 - /*
13636 - * We need valid kernel segments for data and code in long mode too
13637 - * IRET will check the segment types kkeil 2000/10/28
13638 - * Also sysret mandates a special GDT layout
13639 - *
13640 - * TLS descriptors are currently at a different place compared to i386.
13641 - * Hopefully nobody expects them at a fixed place (Wine?)
13642 - */
13643 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13644 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13645 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13646 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13647 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13648 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13649 -#else
13650 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13651 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13652 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13653 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13654 - /*
13655 - * Segments used for calling PnP BIOS have byte granularity.
13656 - * They code segments and data segments have fixed 64k limits,
13657 - * the transfer segment sizes are set at run time.
13658 - */
13659 - /* 32-bit code */
13660 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13661 - /* 16-bit code */
13662 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13663 - /* 16-bit data */
13664 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13665 - /* 16-bit data */
13666 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13667 - /* 16-bit data */
13668 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13669 - /*
13670 - * The APM segments have byte granularity and their bases
13671 - * are set at run time. All have 64k limits.
13672 - */
13673 - /* 32-bit code */
13674 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13675 - /* 16-bit code */
13676 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13677 - /* data */
13678 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13679 -
13680 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13681 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13682 - GDT_STACK_CANARY_INIT
13683 -#endif
13684 -} };
13685 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13686 -
13687 static int __init x86_xsave_setup(char *s)
13688 {
13689 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13690 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13691 {
13692 struct desc_ptr gdt_descr;
13693
13694 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13695 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13696 gdt_descr.size = GDT_SIZE - 1;
13697 load_gdt(&gdt_descr);
13698 /* Reload the per-cpu base */
13699 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13700 /* Filter out anything that depends on CPUID levels we don't have */
13701 filter_cpuid_features(c, true);
13702
13703 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13704 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13705 +#endif
13706 +
13707 /* If the model name is still unset, do table lookup. */
13708 if (!c->x86_model_id[0]) {
13709 const char *p;
13710 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13711 }
13712 __setup("clearcpuid=", setup_disablecpuid);
13713
13714 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13715 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13716 +
13717 #ifdef CONFIG_X86_64
13718 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13719
13720 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13721 EXPORT_PER_CPU_SYMBOL(current_task);
13722
13723 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13724 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13725 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13726 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13727
13728 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13729 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13730 {
13731 memset(regs, 0, sizeof(struct pt_regs));
13732 regs->fs = __KERNEL_PERCPU;
13733 - regs->gs = __KERNEL_STACK_CANARY;
13734 + savesegment(gs, regs->gs);
13735
13736 return regs;
13737 }
13738 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13739 int i;
13740
13741 cpu = stack_smp_processor_id();
13742 - t = &per_cpu(init_tss, cpu);
13743 + t = init_tss + cpu;
13744 orig_ist = &per_cpu(orig_ist, cpu);
13745
13746 #ifdef CONFIG_NUMA
13747 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13748 switch_to_new_gdt(cpu);
13749 loadsegment(fs, 0);
13750
13751 - load_idt((const struct desc_ptr *)&idt_descr);
13752 + load_idt(&idt_descr);
13753
13754 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13755 syscall_init();
13756 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13757 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13758 barrier();
13759
13760 - check_efer();
13761 if (cpu != 0)
13762 enable_x2apic();
13763
13764 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13765 {
13766 int cpu = smp_processor_id();
13767 struct task_struct *curr = current;
13768 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13769 + struct tss_struct *t = init_tss + cpu;
13770 struct thread_struct *thread = &curr->thread;
13771
13772 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13773 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13774 index 6a77cca..4f4fca0 100644
13775 --- a/arch/x86/kernel/cpu/intel.c
13776 +++ b/arch/x86/kernel/cpu/intel.c
13777 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13778 * Update the IDT descriptor and reload the IDT so that
13779 * it uses the read-only mapped virtual address.
13780 */
13781 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13782 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13783 load_idt(&idt_descr);
13784 }
13785 #endif
13786 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
13787 index 417990f..96dc36b 100644
13788 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
13789 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
13790 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13791 return ret;
13792 }
13793
13794 -static struct sysfs_ops sysfs_ops = {
13795 +static const struct sysfs_ops sysfs_ops = {
13796 .show = show,
13797 .store = store,
13798 };
13799 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13800 index 472763d..9831e11 100644
13801 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13802 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13803 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13804 static int inject_init(void)
13805 {
13806 printk(KERN_INFO "Machine check injector initialized\n");
13807 - mce_chrdev_ops.write = mce_write;
13808 + pax_open_kernel();
13809 + *(void **)&mce_chrdev_ops.write = mce_write;
13810 + pax_close_kernel();
13811 register_die_notifier(&mce_raise_nb);
13812 return 0;
13813 }
13814 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13815 index 0f16a2b..21740f5 100644
13816 --- a/arch/x86/kernel/cpu/mcheck/mce.c
13817 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
13818 @@ -43,6 +43,7 @@
13819 #include <asm/ipi.h>
13820 #include <asm/mce.h>
13821 #include <asm/msr.h>
13822 +#include <asm/local.h>
13823
13824 #include "mce-internal.h"
13825
13826 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
13827 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13828 m->cs, m->ip);
13829
13830 - if (m->cs == __KERNEL_CS)
13831 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13832 print_symbol("{%s}", m->ip);
13833 pr_cont("\n");
13834 }
13835 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
13836
13837 #define PANIC_TIMEOUT 5 /* 5 seconds */
13838
13839 -static atomic_t mce_paniced;
13840 +static atomic_unchecked_t mce_paniced;
13841
13842 static int fake_panic;
13843 -static atomic_t mce_fake_paniced;
13844 +static atomic_unchecked_t mce_fake_paniced;
13845
13846 /* Panic in progress. Enable interrupts and wait for final IPI */
13847 static void wait_for_panic(void)
13848 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13849 /*
13850 * Make sure only one CPU runs in machine check panic
13851 */
13852 - if (atomic_inc_return(&mce_paniced) > 1)
13853 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13854 wait_for_panic();
13855 barrier();
13856
13857 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13858 console_verbose();
13859 } else {
13860 /* Don't log too much for fake panic */
13861 - if (atomic_inc_return(&mce_fake_paniced) > 1)
13862 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13863 return;
13864 }
13865 print_mce_head();
13866 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
13867 * might have been modified by someone else.
13868 */
13869 rmb();
13870 - if (atomic_read(&mce_paniced))
13871 + if (atomic_read_unchecked(&mce_paniced))
13872 wait_for_panic();
13873 if (!monarch_timeout)
13874 goto out;
13875 @@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13876 }
13877
13878 /* Call the installed machine check handler for this CPU setup. */
13879 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
13880 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13881 unexpected_machine_check;
13882
13883 /*
13884 @@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13885 return;
13886 }
13887
13888 + pax_open_kernel();
13889 machine_check_vector = do_machine_check;
13890 + pax_close_kernel();
13891
13892 mce_init();
13893 mce_cpu_features(c);
13894 @@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13895 */
13896
13897 static DEFINE_SPINLOCK(mce_state_lock);
13898 -static int open_count; /* #times opened */
13899 +static local_t open_count; /* #times opened */
13900 static int open_exclu; /* already open exclusive? */
13901
13902 static int mce_open(struct inode *inode, struct file *file)
13903 {
13904 spin_lock(&mce_state_lock);
13905
13906 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
13907 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
13908 spin_unlock(&mce_state_lock);
13909
13910 return -EBUSY;
13911 @@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
13912
13913 if (file->f_flags & O_EXCL)
13914 open_exclu = 1;
13915 - open_count++;
13916 + local_inc(&open_count);
13917
13918 spin_unlock(&mce_state_lock);
13919
13920 @@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
13921 {
13922 spin_lock(&mce_state_lock);
13923
13924 - open_count--;
13925 + local_dec(&open_count);
13926 open_exclu = 0;
13927
13928 spin_unlock(&mce_state_lock);
13929 @@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
13930 static void mce_reset(void)
13931 {
13932 cpu_missing = 0;
13933 - atomic_set(&mce_fake_paniced, 0);
13934 + atomic_set_unchecked(&mce_fake_paniced, 0);
13935 atomic_set(&mce_executing, 0);
13936 atomic_set(&mce_callin, 0);
13937 atomic_set(&global_nwo, 0);
13938 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13939 index ef3cd31..9d2f6ab 100644
13940 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
13941 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13942 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13943 return ret;
13944 }
13945
13946 -static struct sysfs_ops threshold_ops = {
13947 +static const struct sysfs_ops threshold_ops = {
13948 .show = show,
13949 .store = store,
13950 };
13951 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13952 index 5c0e653..1e82c7c 100644
13953 --- a/arch/x86/kernel/cpu/mcheck/p5.c
13954 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
13955 @@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13956 if (!cpu_has(c, X86_FEATURE_MCE))
13957 return;
13958
13959 + pax_open_kernel();
13960 machine_check_vector = pentium_machine_check;
13961 + pax_close_kernel();
13962 /* Make sure the vector pointer is visible before we enable MCEs: */
13963 wmb();
13964
13965 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13966 index 54060f5..e6ba93d 100644
13967 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
13968 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13969 @@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13970 {
13971 u32 lo, hi;
13972
13973 + pax_open_kernel();
13974 machine_check_vector = winchip_machine_check;
13975 + pax_close_kernel();
13976 /* Make sure the vector pointer is visible before we enable MCEs: */
13977 wmb();
13978
13979 diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
13980 index 33af141..92ba9cd 100644
13981 --- a/arch/x86/kernel/cpu/mtrr/amd.c
13982 +++ b/arch/x86/kernel/cpu/mtrr/amd.c
13983 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
13984 return 0;
13985 }
13986
13987 -static struct mtrr_ops amd_mtrr_ops = {
13988 +static const struct mtrr_ops amd_mtrr_ops = {
13989 .vendor = X86_VENDOR_AMD,
13990 .set = amd_set_mtrr,
13991 .get = amd_get_mtrr,
13992 diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
13993 index de89f14..316fe3e 100644
13994 --- a/arch/x86/kernel/cpu/mtrr/centaur.c
13995 +++ b/arch/x86/kernel/cpu/mtrr/centaur.c
13996 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
13997 return 0;
13998 }
13999
14000 -static struct mtrr_ops centaur_mtrr_ops = {
14001 +static const struct mtrr_ops centaur_mtrr_ops = {
14002 .vendor = X86_VENDOR_CENTAUR,
14003 .set = centaur_set_mcr,
14004 .get = centaur_get_mcr,
14005 diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14006 index 228d982..68a3343 100644
14007 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14008 +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14009 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14010 post_set();
14011 }
14012
14013 -static struct mtrr_ops cyrix_mtrr_ops = {
14014 +static const struct mtrr_ops cyrix_mtrr_ops = {
14015 .vendor = X86_VENDOR_CYRIX,
14016 .set_all = cyrix_set_all,
14017 .set = cyrix_set_arr,
14018 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14019 index 55da0c5..4d75584 100644
14020 --- a/arch/x86/kernel/cpu/mtrr/generic.c
14021 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
14022 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14023 /*
14024 * Generic structure...
14025 */
14026 -struct mtrr_ops generic_mtrr_ops = {
14027 +const struct mtrr_ops generic_mtrr_ops = {
14028 .use_intel_if = 1,
14029 .set_all = generic_set_all,
14030 .get = generic_get_mtrr,
14031 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14032 index fd60f09..c94ef52 100644
14033 --- a/arch/x86/kernel/cpu/mtrr/main.c
14034 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14035 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14036 u64 size_or_mask, size_and_mask;
14037 static bool mtrr_aps_delayed_init;
14038
14039 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14040 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14041
14042 -struct mtrr_ops *mtrr_if;
14043 +const struct mtrr_ops *mtrr_if;
14044
14045 static void set_mtrr(unsigned int reg, unsigned long base,
14046 unsigned long size, mtrr_type type);
14047
14048 -void set_mtrr_ops(struct mtrr_ops *ops)
14049 +void set_mtrr_ops(const struct mtrr_ops *ops)
14050 {
14051 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14052 mtrr_ops[ops->vendor] = ops;
14053 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14054 index a501dee..816c719 100644
14055 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14056 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14057 @@ -25,14 +25,14 @@ struct mtrr_ops {
14058 int (*validate_add_page)(unsigned long base, unsigned long size,
14059 unsigned int type);
14060 int (*have_wrcomb)(void);
14061 -};
14062 +} __do_const;
14063
14064 extern int generic_get_free_region(unsigned long base, unsigned long size,
14065 int replace_reg);
14066 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14067 unsigned int type);
14068
14069 -extern struct mtrr_ops generic_mtrr_ops;
14070 +extern const struct mtrr_ops generic_mtrr_ops;
14071
14072 extern int positive_have_wrcomb(void);
14073
14074 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14075 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14076 void get_mtrr_state(void);
14077
14078 -extern void set_mtrr_ops(struct mtrr_ops *ops);
14079 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
14080
14081 extern u64 size_or_mask, size_and_mask;
14082 -extern struct mtrr_ops *mtrr_if;
14083 +extern const struct mtrr_ops *mtrr_if;
14084
14085 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14086 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14087 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14088 index 0ff02ca..fc49a60 100644
14089 --- a/arch/x86/kernel/cpu/perf_event.c
14090 +++ b/arch/x86/kernel/cpu/perf_event.c
14091 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14092 * count to the generic event atomically:
14093 */
14094 again:
14095 - prev_raw_count = atomic64_read(&hwc->prev_count);
14096 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14097 rdmsrl(hwc->event_base + idx, new_raw_count);
14098
14099 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14100 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14101 new_raw_count) != prev_raw_count)
14102 goto again;
14103
14104 @@ -741,7 +741,7 @@ again:
14105 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14106 delta >>= shift;
14107
14108 - atomic64_add(delta, &event->count);
14109 + atomic64_add_unchecked(delta, &event->count);
14110 atomic64_sub(delta, &hwc->period_left);
14111
14112 return new_raw_count;
14113 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14114 * The hw event starts counting from this event offset,
14115 * mark it to be able to extra future deltas:
14116 */
14117 - atomic64_set(&hwc->prev_count, (u64)-left);
14118 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14119
14120 err = checking_wrmsrl(hwc->event_base + idx,
14121 (u64)(-left) & x86_pmu.event_mask);
14122 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14123 break;
14124
14125 callchain_store(entry, frame.return_address);
14126 - fp = frame.next_frame;
14127 + fp = (__force const void __user *)frame.next_frame;
14128 }
14129 }
14130
14131 diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14132 index 898df97..9e82503 100644
14133 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14134 +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14135 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14136
14137 /* Interface defining a CPU specific perfctr watchdog */
14138 struct wd_ops {
14139 - int (*reserve)(void);
14140 - void (*unreserve)(void);
14141 - int (*setup)(unsigned nmi_hz);
14142 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14143 - void (*stop)(void);
14144 + int (* const reserve)(void);
14145 + void (* const unreserve)(void);
14146 + int (* const setup)(unsigned nmi_hz);
14147 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14148 + void (* const stop)(void);
14149 unsigned perfctr;
14150 unsigned evntsel;
14151 u64 checkbit;
14152 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14153 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14154 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14155
14156 +/* cannot be const */
14157 static struct wd_ops intel_arch_wd_ops;
14158
14159 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14160 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14161 return 1;
14162 }
14163
14164 +/* cannot be const */
14165 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14166 .reserve = single_msr_reserve,
14167 .unreserve = single_msr_unreserve,
14168 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14169 index ff95824..2ffdcb5 100644
14170 --- a/arch/x86/kernel/crash.c
14171 +++ b/arch/x86/kernel/crash.c
14172 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14173 regs = args->regs;
14174
14175 #ifdef CONFIG_X86_32
14176 - if (!user_mode_vm(regs)) {
14177 + if (!user_mode(regs)) {
14178 crash_fixup_ss_esp(&fixed_regs, regs);
14179 regs = &fixed_regs;
14180 }
14181 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14182 index 37250fe..bf2ec74 100644
14183 --- a/arch/x86/kernel/doublefault_32.c
14184 +++ b/arch/x86/kernel/doublefault_32.c
14185 @@ -11,7 +11,7 @@
14186
14187 #define DOUBLEFAULT_STACKSIZE (1024)
14188 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14189 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14190 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14191
14192 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14193
14194 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14195 unsigned long gdt, tss;
14196
14197 store_gdt(&gdt_desc);
14198 - gdt = gdt_desc.address;
14199 + gdt = (unsigned long)gdt_desc.address;
14200
14201 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14202
14203 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14204 /* 0x2 bit is always set */
14205 .flags = X86_EFLAGS_SF | 0x2,
14206 .sp = STACK_START,
14207 - .es = __USER_DS,
14208 + .es = __KERNEL_DS,
14209 .cs = __KERNEL_CS,
14210 .ss = __KERNEL_DS,
14211 - .ds = __USER_DS,
14212 + .ds = __KERNEL_DS,
14213 .fs = __KERNEL_PERCPU,
14214
14215 .__cr3 = __pa_nodebug(swapper_pg_dir),
14216 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14217 index 2d8a371..4fa6ae6 100644
14218 --- a/arch/x86/kernel/dumpstack.c
14219 +++ b/arch/x86/kernel/dumpstack.c
14220 @@ -2,6 +2,9 @@
14221 * Copyright (C) 1991, 1992 Linus Torvalds
14222 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14223 */
14224 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14225 +#define __INCLUDED_BY_HIDESYM 1
14226 +#endif
14227 #include <linux/kallsyms.h>
14228 #include <linux/kprobes.h>
14229 #include <linux/uaccess.h>
14230 @@ -28,7 +31,7 @@ static int die_counter;
14231
14232 void printk_address(unsigned long address, int reliable)
14233 {
14234 - printk(" [<%p>] %s%pS\n", (void *) address,
14235 + printk(" [<%p>] %s%pA\n", (void *) address,
14236 reliable ? "" : "? ", (void *) address);
14237 }
14238
14239 @@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14240 static void
14241 print_ftrace_graph_addr(unsigned long addr, void *data,
14242 const struct stacktrace_ops *ops,
14243 - struct thread_info *tinfo, int *graph)
14244 + struct task_struct *task, int *graph)
14245 {
14246 - struct task_struct *task = tinfo->task;
14247 unsigned long ret_addr;
14248 int index = task->curr_ret_stack;
14249
14250 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14251 static inline void
14252 print_ftrace_graph_addr(unsigned long addr, void *data,
14253 const struct stacktrace_ops *ops,
14254 - struct thread_info *tinfo, int *graph)
14255 + struct task_struct *task, int *graph)
14256 { }
14257 #endif
14258
14259 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14260 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14261 */
14262
14263 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14264 - void *p, unsigned int size, void *end)
14265 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14266 {
14267 - void *t = tinfo;
14268 if (end) {
14269 if (p < end && p >= (end-THREAD_SIZE))
14270 return 1;
14271 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14272 }
14273
14274 unsigned long
14275 -print_context_stack(struct thread_info *tinfo,
14276 +print_context_stack(struct task_struct *task, void *stack_start,
14277 unsigned long *stack, unsigned long bp,
14278 const struct stacktrace_ops *ops, void *data,
14279 unsigned long *end, int *graph)
14280 {
14281 struct stack_frame *frame = (struct stack_frame *)bp;
14282
14283 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14284 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14285 unsigned long addr;
14286
14287 addr = *stack;
14288 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14289 } else {
14290 ops->address(data, addr, 0);
14291 }
14292 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14293 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14294 }
14295 stack++;
14296 }
14297 @@ -180,7 +180,7 @@ void dump_stack(void)
14298 #endif
14299
14300 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14301 - current->pid, current->comm, print_tainted(),
14302 + task_pid_nr(current), current->comm, print_tainted(),
14303 init_utsname()->release,
14304 (int)strcspn(init_utsname()->version, " "),
14305 init_utsname()->version);
14306 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14307 return flags;
14308 }
14309
14310 +extern void gr_handle_kernel_exploit(void);
14311 +
14312 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14313 {
14314 if (regs && kexec_should_crash(current))
14315 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14316 panic("Fatal exception in interrupt");
14317 if (panic_on_oops)
14318 panic("Fatal exception");
14319 - do_exit(signr);
14320 +
14321 + gr_handle_kernel_exploit();
14322 +
14323 + do_group_exit(signr);
14324 }
14325
14326 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14327 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14328 unsigned long flags = oops_begin();
14329 int sig = SIGSEGV;
14330
14331 - if (!user_mode_vm(regs))
14332 + if (!user_mode(regs))
14333 report_bug(regs->ip, regs);
14334
14335 if (__die(str, regs, err))
14336 diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14337 index 81086c2..13e8b17 100644
14338 --- a/arch/x86/kernel/dumpstack.h
14339 +++ b/arch/x86/kernel/dumpstack.h
14340 @@ -15,7 +15,7 @@
14341 #endif
14342
14343 extern unsigned long
14344 -print_context_stack(struct thread_info *tinfo,
14345 +print_context_stack(struct task_struct *task, void *stack_start,
14346 unsigned long *stack, unsigned long bp,
14347 const struct stacktrace_ops *ops, void *data,
14348 unsigned long *end, int *graph);
14349 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14350 index f7dd2a7..504f53b 100644
14351 --- a/arch/x86/kernel/dumpstack_32.c
14352 +++ b/arch/x86/kernel/dumpstack_32.c
14353 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14354 #endif
14355
14356 for (;;) {
14357 - struct thread_info *context;
14358 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14359 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14360
14361 - context = (struct thread_info *)
14362 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14363 - bp = print_context_stack(context, stack, bp, ops,
14364 - data, NULL, &graph);
14365 -
14366 - stack = (unsigned long *)context->previous_esp;
14367 - if (!stack)
14368 + if (stack_start == task_stack_page(task))
14369 break;
14370 + stack = *(unsigned long **)stack_start;
14371 if (ops->stack(data, "IRQ") < 0)
14372 break;
14373 touch_nmi_watchdog();
14374 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14375 * When in-kernel, we also print out the stack and code at the
14376 * time of the fault..
14377 */
14378 - if (!user_mode_vm(regs)) {
14379 + if (!user_mode(regs)) {
14380 unsigned int code_prologue = code_bytes * 43 / 64;
14381 unsigned int code_len = code_bytes;
14382 unsigned char c;
14383 u8 *ip;
14384 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14385
14386 printk(KERN_EMERG "Stack:\n");
14387 show_stack_log_lvl(NULL, regs, &regs->sp,
14388 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14389
14390 printk(KERN_EMERG "Code: ");
14391
14392 - ip = (u8 *)regs->ip - code_prologue;
14393 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14394 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14395 /* try starting at IP */
14396 - ip = (u8 *)regs->ip;
14397 + ip = (u8 *)regs->ip + cs_base;
14398 code_len = code_len - code_prologue + 1;
14399 }
14400 for (i = 0; i < code_len; i++, ip++) {
14401 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14402 printk(" Bad EIP value.");
14403 break;
14404 }
14405 - if (ip == (u8 *)regs->ip)
14406 + if (ip == (u8 *)regs->ip + cs_base)
14407 printk("<%02x> ", c);
14408 else
14409 printk("%02x ", c);
14410 @@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14411 printk("\n");
14412 }
14413
14414 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14415 +void pax_check_alloca(unsigned long size)
14416 +{
14417 + unsigned long sp = (unsigned long)&sp, stack_left;
14418 +
14419 + /* all kernel stacks are of the same size */
14420 + stack_left = sp & (THREAD_SIZE - 1);
14421 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14422 +}
14423 +EXPORT_SYMBOL(pax_check_alloca);
14424 +#endif
14425 +
14426 int is_valid_bugaddr(unsigned long ip)
14427 {
14428 unsigned short ud2;
14429
14430 + ip = ktla_ktva(ip);
14431 if (ip < PAGE_OFFSET)
14432 return 0;
14433 if (probe_kernel_address((unsigned short *)ip, ud2))
14434 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14435 index a071e6b..36cd585 100644
14436 --- a/arch/x86/kernel/dumpstack_64.c
14437 +++ b/arch/x86/kernel/dumpstack_64.c
14438 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14439 unsigned long *irq_stack_end =
14440 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14441 unsigned used = 0;
14442 - struct thread_info *tinfo;
14443 int graph = 0;
14444 + void *stack_start;
14445
14446 if (!task)
14447 task = current;
14448 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14449 * current stack address. If the stacks consist of nested
14450 * exceptions
14451 */
14452 - tinfo = task_thread_info(task);
14453 for (;;) {
14454 char *id;
14455 unsigned long *estack_end;
14456 +
14457 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14458 &used, &id);
14459
14460 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14461 if (ops->stack(data, id) < 0)
14462 break;
14463
14464 - bp = print_context_stack(tinfo, stack, bp, ops,
14465 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14466 data, estack_end, &graph);
14467 ops->stack(data, "<EOE>");
14468 /*
14469 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14470 if (stack >= irq_stack && stack < irq_stack_end) {
14471 if (ops->stack(data, "IRQ") < 0)
14472 break;
14473 - bp = print_context_stack(tinfo, stack, bp,
14474 + bp = print_context_stack(task, irq_stack, stack, bp,
14475 ops, data, irq_stack_end, &graph);
14476 /*
14477 * We link to the next stack (which would be
14478 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14479 /*
14480 * This handles the process stack:
14481 */
14482 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14483 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14484 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14485 put_cpu();
14486 }
14487 EXPORT_SYMBOL(dump_trace);
14488 @@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14489 return ud2 == 0x0b0f;
14490 }
14491
14492 +
14493 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14494 +void pax_check_alloca(unsigned long size)
14495 +{
14496 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14497 + unsigned cpu, used;
14498 + char *id;
14499 +
14500 + /* check the process stack first */
14501 + stack_start = (unsigned long)task_stack_page(current);
14502 + stack_end = stack_start + THREAD_SIZE;
14503 + if (likely(stack_start <= sp && sp < stack_end)) {
14504 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14505 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14506 + return;
14507 + }
14508 +
14509 + cpu = get_cpu();
14510 +
14511 + /* check the irq stacks */
14512 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14513 + stack_start = stack_end - IRQ_STACK_SIZE;
14514 + if (stack_start <= sp && sp < stack_end) {
14515 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14516 + put_cpu();
14517 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14518 + return;
14519 + }
14520 +
14521 + /* check the exception stacks */
14522 + used = 0;
14523 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14524 + stack_start = stack_end - EXCEPTION_STKSZ;
14525 + if (stack_end && stack_start <= sp && sp < stack_end) {
14526 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14527 + put_cpu();
14528 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14529 + return;
14530 + }
14531 +
14532 + put_cpu();
14533 +
14534 + /* unknown stack */
14535 + BUG();
14536 +}
14537 +EXPORT_SYMBOL(pax_check_alloca);
14538 +#endif
14539 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14540 index a89739a..95e0c48 100644
14541 --- a/arch/x86/kernel/e820.c
14542 +++ b/arch/x86/kernel/e820.c
14543 @@ -733,7 +733,7 @@ struct early_res {
14544 };
14545 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14546 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14547 - {}
14548 + { 0, 0, {0}, 0 }
14549 };
14550
14551 static int __init find_overlapped_early(u64 start, u64 end)
14552 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14553 index b9c830c..1e41a96 100644
14554 --- a/arch/x86/kernel/early_printk.c
14555 +++ b/arch/x86/kernel/early_printk.c
14556 @@ -7,6 +7,7 @@
14557 #include <linux/pci_regs.h>
14558 #include <linux/pci_ids.h>
14559 #include <linux/errno.h>
14560 +#include <linux/sched.h>
14561 #include <asm/io.h>
14562 #include <asm/processor.h>
14563 #include <asm/fcntl.h>
14564 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14565 int n;
14566 va_list ap;
14567
14568 + pax_track_stack();
14569 +
14570 va_start(ap, fmt);
14571 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14572 early_console->write(early_console, buf, n);
14573 diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14574 index 5cab48e..b025f9b 100644
14575 --- a/arch/x86/kernel/efi_32.c
14576 +++ b/arch/x86/kernel/efi_32.c
14577 @@ -38,70 +38,56 @@
14578 */
14579
14580 static unsigned long efi_rt_eflags;
14581 -static pgd_t efi_bak_pg_dir_pointer[2];
14582 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14583
14584 -void efi_call_phys_prelog(void)
14585 +void __init efi_call_phys_prelog(void)
14586 {
14587 - unsigned long cr4;
14588 - unsigned long temp;
14589 struct desc_ptr gdt_descr;
14590
14591 +#ifdef CONFIG_PAX_KERNEXEC
14592 + struct desc_struct d;
14593 +#endif
14594 +
14595 local_irq_save(efi_rt_eflags);
14596
14597 - /*
14598 - * If I don't have PAE, I should just duplicate two entries in page
14599 - * directory. If I have PAE, I just need to duplicate one entry in
14600 - * page directory.
14601 - */
14602 - cr4 = read_cr4_safe();
14603 -
14604 - if (cr4 & X86_CR4_PAE) {
14605 - efi_bak_pg_dir_pointer[0].pgd =
14606 - swapper_pg_dir[pgd_index(0)].pgd;
14607 - swapper_pg_dir[0].pgd =
14608 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14609 - } else {
14610 - efi_bak_pg_dir_pointer[0].pgd =
14611 - swapper_pg_dir[pgd_index(0)].pgd;
14612 - efi_bak_pg_dir_pointer[1].pgd =
14613 - swapper_pg_dir[pgd_index(0x400000)].pgd;
14614 - swapper_pg_dir[pgd_index(0)].pgd =
14615 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14616 - temp = PAGE_OFFSET + 0x400000;
14617 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14618 - swapper_pg_dir[pgd_index(temp)].pgd;
14619 - }
14620 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14621 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14622 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14623
14624 /*
14625 * After the lock is released, the original page table is restored.
14626 */
14627 __flush_tlb_all();
14628
14629 +#ifdef CONFIG_PAX_KERNEXEC
14630 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14631 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14632 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14633 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14634 +#endif
14635 +
14636 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14637 gdt_descr.size = GDT_SIZE - 1;
14638 load_gdt(&gdt_descr);
14639 }
14640
14641 -void efi_call_phys_epilog(void)
14642 +void __init efi_call_phys_epilog(void)
14643 {
14644 - unsigned long cr4;
14645 struct desc_ptr gdt_descr;
14646
14647 +#ifdef CONFIG_PAX_KERNEXEC
14648 + struct desc_struct d;
14649 +
14650 + memset(&d, 0, sizeof d);
14651 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14652 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14653 +#endif
14654 +
14655 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14656 gdt_descr.size = GDT_SIZE - 1;
14657 load_gdt(&gdt_descr);
14658
14659 - cr4 = read_cr4_safe();
14660 -
14661 - if (cr4 & X86_CR4_PAE) {
14662 - swapper_pg_dir[pgd_index(0)].pgd =
14663 - efi_bak_pg_dir_pointer[0].pgd;
14664 - } else {
14665 - swapper_pg_dir[pgd_index(0)].pgd =
14666 - efi_bak_pg_dir_pointer[0].pgd;
14667 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14668 - efi_bak_pg_dir_pointer[1].pgd;
14669 - }
14670 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14671
14672 /*
14673 * After the lock is released, the original page table is restored.
14674 diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14675 index fbe66e6..c5c0dd2 100644
14676 --- a/arch/x86/kernel/efi_stub_32.S
14677 +++ b/arch/x86/kernel/efi_stub_32.S
14678 @@ -6,7 +6,9 @@
14679 */
14680
14681 #include <linux/linkage.h>
14682 +#include <linux/init.h>
14683 #include <asm/page_types.h>
14684 +#include <asm/segment.h>
14685
14686 /*
14687 * efi_call_phys(void *, ...) is a function with variable parameters.
14688 @@ -20,7 +22,7 @@
14689 * service functions will comply with gcc calling convention, too.
14690 */
14691
14692 -.text
14693 +__INIT
14694 ENTRY(efi_call_phys)
14695 /*
14696 * 0. The function can only be called in Linux kernel. So CS has been
14697 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14698 * The mapping of lower virtual memory has been created in prelog and
14699 * epilog.
14700 */
14701 - movl $1f, %edx
14702 - subl $__PAGE_OFFSET, %edx
14703 - jmp *%edx
14704 + movl $(__KERNEXEC_EFI_DS), %edx
14705 + mov %edx, %ds
14706 + mov %edx, %es
14707 + mov %edx, %ss
14708 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14709 1:
14710
14711 /*
14712 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14713 * parameter 2, ..., param n. To make things easy, we save the return
14714 * address of efi_call_phys in a global variable.
14715 */
14716 - popl %edx
14717 - movl %edx, saved_return_addr
14718 - /* get the function pointer into ECX*/
14719 - popl %ecx
14720 - movl %ecx, efi_rt_function_ptr
14721 - movl $2f, %edx
14722 - subl $__PAGE_OFFSET, %edx
14723 - pushl %edx
14724 + popl (saved_return_addr)
14725 + popl (efi_rt_function_ptr)
14726
14727 /*
14728 * 3. Clear PG bit in %CR0.
14729 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14730 /*
14731 * 5. Call the physical function.
14732 */
14733 - jmp *%ecx
14734 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
14735
14736 -2:
14737 /*
14738 * 6. After EFI runtime service returns, control will return to
14739 * following instruction. We'd better readjust stack pointer first.
14740 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14741 movl %cr0, %edx
14742 orl $0x80000000, %edx
14743 movl %edx, %cr0
14744 - jmp 1f
14745 -1:
14746 +
14747 /*
14748 * 8. Now restore the virtual mode from flat mode by
14749 * adding EIP with PAGE_OFFSET.
14750 */
14751 - movl $1f, %edx
14752 - jmp *%edx
14753 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14754 1:
14755 + movl $(__KERNEL_DS), %edx
14756 + mov %edx, %ds
14757 + mov %edx, %es
14758 + mov %edx, %ss
14759
14760 /*
14761 * 9. Balance the stack. And because EAX contain the return value,
14762 * we'd better not clobber it.
14763 */
14764 - leal efi_rt_function_ptr, %edx
14765 - movl (%edx), %ecx
14766 - pushl %ecx
14767 + pushl (efi_rt_function_ptr)
14768
14769 /*
14770 - * 10. Push the saved return address onto the stack and return.
14771 + * 10. Return to the saved return address.
14772 */
14773 - leal saved_return_addr, %edx
14774 - movl (%edx), %ecx
14775 - pushl %ecx
14776 - ret
14777 + jmpl *(saved_return_addr)
14778 ENDPROC(efi_call_phys)
14779 .previous
14780
14781 -.data
14782 +__INITDATA
14783 saved_return_addr:
14784 .long 0
14785 efi_rt_function_ptr:
14786 diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
14787 index 4c07cca..2c8427d 100644
14788 --- a/arch/x86/kernel/efi_stub_64.S
14789 +++ b/arch/x86/kernel/efi_stub_64.S
14790 @@ -7,6 +7,7 @@
14791 */
14792
14793 #include <linux/linkage.h>
14794 +#include <asm/alternative-asm.h>
14795
14796 #define SAVE_XMM \
14797 mov %rsp, %rax; \
14798 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
14799 call *%rdi
14800 addq $32, %rsp
14801 RESTORE_XMM
14802 + pax_force_retaddr 0, 1
14803 ret
14804 ENDPROC(efi_call0)
14805
14806 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
14807 call *%rdi
14808 addq $32, %rsp
14809 RESTORE_XMM
14810 + pax_force_retaddr 0, 1
14811 ret
14812 ENDPROC(efi_call1)
14813
14814 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
14815 call *%rdi
14816 addq $32, %rsp
14817 RESTORE_XMM
14818 + pax_force_retaddr 0, 1
14819 ret
14820 ENDPROC(efi_call2)
14821
14822 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
14823 call *%rdi
14824 addq $32, %rsp
14825 RESTORE_XMM
14826 + pax_force_retaddr 0, 1
14827 ret
14828 ENDPROC(efi_call3)
14829
14830 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
14831 call *%rdi
14832 addq $32, %rsp
14833 RESTORE_XMM
14834 + pax_force_retaddr 0, 1
14835 ret
14836 ENDPROC(efi_call4)
14837
14838 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
14839 call *%rdi
14840 addq $48, %rsp
14841 RESTORE_XMM
14842 + pax_force_retaddr 0, 1
14843 ret
14844 ENDPROC(efi_call5)
14845
14846 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
14847 call *%rdi
14848 addq $48, %rsp
14849 RESTORE_XMM
14850 + pax_force_retaddr 0, 1
14851 ret
14852 ENDPROC(efi_call6)
14853 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14854 index c097e7d..c689cf4 100644
14855 --- a/arch/x86/kernel/entry_32.S
14856 +++ b/arch/x86/kernel/entry_32.S
14857 @@ -185,13 +185,146 @@
14858 /*CFI_REL_OFFSET gs, PT_GS*/
14859 .endm
14860 .macro SET_KERNEL_GS reg
14861 +
14862 +#ifdef CONFIG_CC_STACKPROTECTOR
14863 movl $(__KERNEL_STACK_CANARY), \reg
14864 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14865 + movl $(__USER_DS), \reg
14866 +#else
14867 + xorl \reg, \reg
14868 +#endif
14869 +
14870 movl \reg, %gs
14871 .endm
14872
14873 #endif /* CONFIG_X86_32_LAZY_GS */
14874
14875 -.macro SAVE_ALL
14876 +.macro pax_enter_kernel
14877 +#ifdef CONFIG_PAX_KERNEXEC
14878 + call pax_enter_kernel
14879 +#endif
14880 +.endm
14881 +
14882 +.macro pax_exit_kernel
14883 +#ifdef CONFIG_PAX_KERNEXEC
14884 + call pax_exit_kernel
14885 +#endif
14886 +.endm
14887 +
14888 +#ifdef CONFIG_PAX_KERNEXEC
14889 +ENTRY(pax_enter_kernel)
14890 +#ifdef CONFIG_PARAVIRT
14891 + pushl %eax
14892 + pushl %ecx
14893 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14894 + mov %eax, %esi
14895 +#else
14896 + mov %cr0, %esi
14897 +#endif
14898 + bts $16, %esi
14899 + jnc 1f
14900 + mov %cs, %esi
14901 + cmp $__KERNEL_CS, %esi
14902 + jz 3f
14903 + ljmp $__KERNEL_CS, $3f
14904 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14905 +2:
14906 +#ifdef CONFIG_PARAVIRT
14907 + mov %esi, %eax
14908 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14909 +#else
14910 + mov %esi, %cr0
14911 +#endif
14912 +3:
14913 +#ifdef CONFIG_PARAVIRT
14914 + popl %ecx
14915 + popl %eax
14916 +#endif
14917 + ret
14918 +ENDPROC(pax_enter_kernel)
14919 +
14920 +ENTRY(pax_exit_kernel)
14921 +#ifdef CONFIG_PARAVIRT
14922 + pushl %eax
14923 + pushl %ecx
14924 +#endif
14925 + mov %cs, %esi
14926 + cmp $__KERNEXEC_KERNEL_CS, %esi
14927 + jnz 2f
14928 +#ifdef CONFIG_PARAVIRT
14929 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14930 + mov %eax, %esi
14931 +#else
14932 + mov %cr0, %esi
14933 +#endif
14934 + btr $16, %esi
14935 + ljmp $__KERNEL_CS, $1f
14936 +1:
14937 +#ifdef CONFIG_PARAVIRT
14938 + mov %esi, %eax
14939 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14940 +#else
14941 + mov %esi, %cr0
14942 +#endif
14943 +2:
14944 +#ifdef CONFIG_PARAVIRT
14945 + popl %ecx
14946 + popl %eax
14947 +#endif
14948 + ret
14949 +ENDPROC(pax_exit_kernel)
14950 +#endif
14951 +
14952 +.macro pax_erase_kstack
14953 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14954 + call pax_erase_kstack
14955 +#endif
14956 +.endm
14957 +
14958 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14959 +/*
14960 + * ebp: thread_info
14961 + * ecx, edx: can be clobbered
14962 + */
14963 +ENTRY(pax_erase_kstack)
14964 + pushl %edi
14965 + pushl %eax
14966 +
14967 + mov TI_lowest_stack(%ebp), %edi
14968 + mov $-0xBEEF, %eax
14969 + std
14970 +
14971 +1: mov %edi, %ecx
14972 + and $THREAD_SIZE_asm - 1, %ecx
14973 + shr $2, %ecx
14974 + repne scasl
14975 + jecxz 2f
14976 +
14977 + cmp $2*16, %ecx
14978 + jc 2f
14979 +
14980 + mov $2*16, %ecx
14981 + repe scasl
14982 + jecxz 2f
14983 + jne 1b
14984 +
14985 +2: cld
14986 + mov %esp, %ecx
14987 + sub %edi, %ecx
14988 + shr $2, %ecx
14989 + rep stosl
14990 +
14991 + mov TI_task_thread_sp0(%ebp), %edi
14992 + sub $128, %edi
14993 + mov %edi, TI_lowest_stack(%ebp)
14994 +
14995 + popl %eax
14996 + popl %edi
14997 + ret
14998 +ENDPROC(pax_erase_kstack)
14999 +#endif
15000 +
15001 +.macro __SAVE_ALL _DS
15002 cld
15003 PUSH_GS
15004 pushl %fs
15005 @@ -224,7 +357,7 @@
15006 pushl %ebx
15007 CFI_ADJUST_CFA_OFFSET 4
15008 CFI_REL_OFFSET ebx, 0
15009 - movl $(__USER_DS), %edx
15010 + movl $\_DS, %edx
15011 movl %edx, %ds
15012 movl %edx, %es
15013 movl $(__KERNEL_PERCPU), %edx
15014 @@ -232,6 +365,15 @@
15015 SET_KERNEL_GS %edx
15016 .endm
15017
15018 +.macro SAVE_ALL
15019 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15020 + __SAVE_ALL __KERNEL_DS
15021 + pax_enter_kernel
15022 +#else
15023 + __SAVE_ALL __USER_DS
15024 +#endif
15025 +.endm
15026 +
15027 .macro RESTORE_INT_REGS
15028 popl %ebx
15029 CFI_ADJUST_CFA_OFFSET -4
15030 @@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15031 CFI_ADJUST_CFA_OFFSET -4
15032 jmp syscall_exit
15033 CFI_ENDPROC
15034 -END(ret_from_fork)
15035 +ENDPROC(ret_from_fork)
15036
15037 /*
15038 * Return to user mode is not as complex as all this looks,
15039 @@ -352,7 +494,15 @@ check_userspace:
15040 movb PT_CS(%esp), %al
15041 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15042 cmpl $USER_RPL, %eax
15043 +
15044 +#ifdef CONFIG_PAX_KERNEXEC
15045 + jae resume_userspace
15046 +
15047 + PAX_EXIT_KERNEL
15048 + jmp resume_kernel
15049 +#else
15050 jb resume_kernel # not returning to v8086 or userspace
15051 +#endif
15052
15053 ENTRY(resume_userspace)
15054 LOCKDEP_SYS_EXIT
15055 @@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15056 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15057 # int/exception return?
15058 jne work_pending
15059 - jmp restore_all
15060 -END(ret_from_exception)
15061 + jmp restore_all_pax
15062 +ENDPROC(ret_from_exception)
15063
15064 #ifdef CONFIG_PREEMPT
15065 ENTRY(resume_kernel)
15066 @@ -380,7 +530,7 @@ need_resched:
15067 jz restore_all
15068 call preempt_schedule_irq
15069 jmp need_resched
15070 -END(resume_kernel)
15071 +ENDPROC(resume_kernel)
15072 #endif
15073 CFI_ENDPROC
15074
15075 @@ -414,25 +564,36 @@ sysenter_past_esp:
15076 /*CFI_REL_OFFSET cs, 0*/
15077 /*
15078 * Push current_thread_info()->sysenter_return to the stack.
15079 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15080 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15081 */
15082 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15083 + pushl $0
15084 CFI_ADJUST_CFA_OFFSET 4
15085 CFI_REL_OFFSET eip, 0
15086
15087 pushl %eax
15088 CFI_ADJUST_CFA_OFFSET 4
15089 SAVE_ALL
15090 + GET_THREAD_INFO(%ebp)
15091 + movl TI_sysenter_return(%ebp),%ebp
15092 + movl %ebp,PT_EIP(%esp)
15093 ENABLE_INTERRUPTS(CLBR_NONE)
15094
15095 /*
15096 * Load the potential sixth argument from user stack.
15097 * Careful about security.
15098 */
15099 + movl PT_OLDESP(%esp),%ebp
15100 +
15101 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15102 + mov PT_OLDSS(%esp),%ds
15103 +1: movl %ds:(%ebp),%ebp
15104 + push %ss
15105 + pop %ds
15106 +#else
15107 cmpl $__PAGE_OFFSET-3,%ebp
15108 jae syscall_fault
15109 1: movl (%ebp),%ebp
15110 +#endif
15111 +
15112 movl %ebp,PT_EBP(%esp)
15113 .section __ex_table,"a"
15114 .align 4
15115 @@ -455,12 +616,24 @@ sysenter_do_call:
15116 testl $_TIF_ALLWORK_MASK, %ecx
15117 jne sysexit_audit
15118 sysenter_exit:
15119 +
15120 +#ifdef CONFIG_PAX_RANDKSTACK
15121 + pushl_cfi %eax
15122 + movl %esp, %eax
15123 + call pax_randomize_kstack
15124 + popl_cfi %eax
15125 +#endif
15126 +
15127 + pax_erase_kstack
15128 +
15129 /* if something modifies registers it must also disable sysexit */
15130 movl PT_EIP(%esp), %edx
15131 movl PT_OLDESP(%esp), %ecx
15132 xorl %ebp,%ebp
15133 TRACE_IRQS_ON
15134 1: mov PT_FS(%esp), %fs
15135 +2: mov PT_DS(%esp), %ds
15136 +3: mov PT_ES(%esp), %es
15137 PTGS_TO_GS
15138 ENABLE_INTERRUPTS_SYSEXIT
15139
15140 @@ -477,6 +650,9 @@ sysenter_audit:
15141 movl %eax,%edx /* 2nd arg: syscall number */
15142 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15143 call audit_syscall_entry
15144 +
15145 + pax_erase_kstack
15146 +
15147 pushl %ebx
15148 CFI_ADJUST_CFA_OFFSET 4
15149 movl PT_EAX(%esp),%eax /* reload syscall number */
15150 @@ -504,11 +680,17 @@ sysexit_audit:
15151
15152 CFI_ENDPROC
15153 .pushsection .fixup,"ax"
15154 -2: movl $0,PT_FS(%esp)
15155 +4: movl $0,PT_FS(%esp)
15156 + jmp 1b
15157 +5: movl $0,PT_DS(%esp)
15158 + jmp 1b
15159 +6: movl $0,PT_ES(%esp)
15160 jmp 1b
15161 .section __ex_table,"a"
15162 .align 4
15163 - .long 1b,2b
15164 + .long 1b,4b
15165 + .long 2b,5b
15166 + .long 3b,6b
15167 .popsection
15168 PTGS_TO_GS_EX
15169 ENDPROC(ia32_sysenter_target)
15170 @@ -538,6 +720,15 @@ syscall_exit:
15171 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15172 jne syscall_exit_work
15173
15174 +restore_all_pax:
15175 +
15176 +#ifdef CONFIG_PAX_RANDKSTACK
15177 + movl %esp, %eax
15178 + call pax_randomize_kstack
15179 +#endif
15180 +
15181 + pax_erase_kstack
15182 +
15183 restore_all:
15184 TRACE_IRQS_IRET
15185 restore_all_notrace:
15186 @@ -602,10 +793,29 @@ ldt_ss:
15187 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15188 mov %dx, %ax /* eax: new kernel esp */
15189 sub %eax, %edx /* offset (low word is 0) */
15190 - PER_CPU(gdt_page, %ebx)
15191 +#ifdef CONFIG_SMP
15192 + movl PER_CPU_VAR(cpu_number), %ebx
15193 + shll $PAGE_SHIFT_asm, %ebx
15194 + addl $cpu_gdt_table, %ebx
15195 +#else
15196 + movl $cpu_gdt_table, %ebx
15197 +#endif
15198 shr $16, %edx
15199 +
15200 +#ifdef CONFIG_PAX_KERNEXEC
15201 + mov %cr0, %esi
15202 + btr $16, %esi
15203 + mov %esi, %cr0
15204 +#endif
15205 +
15206 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15207 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15208 +
15209 +#ifdef CONFIG_PAX_KERNEXEC
15210 + bts $16, %esi
15211 + mov %esi, %cr0
15212 +#endif
15213 +
15214 pushl $__ESPFIX_SS
15215 CFI_ADJUST_CFA_OFFSET 4
15216 push %eax /* new kernel esp */
15217 @@ -636,36 +846,30 @@ work_resched:
15218 movl TI_flags(%ebp), %ecx
15219 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15220 # than syscall tracing?
15221 - jz restore_all
15222 + jz restore_all_pax
15223 testb $_TIF_NEED_RESCHED, %cl
15224 jnz work_resched
15225
15226 work_notifysig: # deal with pending signals and
15227 # notify-resume requests
15228 + movl %esp, %eax
15229 #ifdef CONFIG_VM86
15230 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15231 - movl %esp, %eax
15232 - jne work_notifysig_v86 # returning to kernel-space or
15233 + jz 1f # returning to kernel-space or
15234 # vm86-space
15235 - xorl %edx, %edx
15236 - call do_notify_resume
15237 - jmp resume_userspace_sig
15238
15239 - ALIGN
15240 -work_notifysig_v86:
15241 pushl %ecx # save ti_flags for do_notify_resume
15242 CFI_ADJUST_CFA_OFFSET 4
15243 call save_v86_state # %eax contains pt_regs pointer
15244 popl %ecx
15245 CFI_ADJUST_CFA_OFFSET -4
15246 movl %eax, %esp
15247 -#else
15248 - movl %esp, %eax
15249 +1:
15250 #endif
15251 xorl %edx, %edx
15252 call do_notify_resume
15253 jmp resume_userspace_sig
15254 -END(work_pending)
15255 +ENDPROC(work_pending)
15256
15257 # perform syscall exit tracing
15258 ALIGN
15259 @@ -673,11 +877,14 @@ syscall_trace_entry:
15260 movl $-ENOSYS,PT_EAX(%esp)
15261 movl %esp, %eax
15262 call syscall_trace_enter
15263 +
15264 + pax_erase_kstack
15265 +
15266 /* What it returned is what we'll actually use. */
15267 cmpl $(nr_syscalls), %eax
15268 jnae syscall_call
15269 jmp syscall_exit
15270 -END(syscall_trace_entry)
15271 +ENDPROC(syscall_trace_entry)
15272
15273 # perform syscall exit tracing
15274 ALIGN
15275 @@ -690,20 +897,24 @@ syscall_exit_work:
15276 movl %esp, %eax
15277 call syscall_trace_leave
15278 jmp resume_userspace
15279 -END(syscall_exit_work)
15280 +ENDPROC(syscall_exit_work)
15281 CFI_ENDPROC
15282
15283 RING0_INT_FRAME # can't unwind into user space anyway
15284 syscall_fault:
15285 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15286 + push %ss
15287 + pop %ds
15288 +#endif
15289 GET_THREAD_INFO(%ebp)
15290 movl $-EFAULT,PT_EAX(%esp)
15291 jmp resume_userspace
15292 -END(syscall_fault)
15293 +ENDPROC(syscall_fault)
15294
15295 syscall_badsys:
15296 movl $-ENOSYS,PT_EAX(%esp)
15297 jmp resume_userspace
15298 -END(syscall_badsys)
15299 +ENDPROC(syscall_badsys)
15300 CFI_ENDPROC
15301
15302 /*
15303 @@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15304 PTREGSCALL(vm86)
15305 PTREGSCALL(vm86old)
15306
15307 + ALIGN;
15308 +ENTRY(kernel_execve)
15309 + push %ebp
15310 + sub $PT_OLDSS+4,%esp
15311 + push %edi
15312 + push %ecx
15313 + push %eax
15314 + lea 3*4(%esp),%edi
15315 + mov $PT_OLDSS/4+1,%ecx
15316 + xorl %eax,%eax
15317 + rep stosl
15318 + pop %eax
15319 + pop %ecx
15320 + pop %edi
15321 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15322 + mov %eax,PT_EBX(%esp)
15323 + mov %edx,PT_ECX(%esp)
15324 + mov %ecx,PT_EDX(%esp)
15325 + mov %esp,%eax
15326 + call sys_execve
15327 + GET_THREAD_INFO(%ebp)
15328 + test %eax,%eax
15329 + jz syscall_exit
15330 + add $PT_OLDSS+4,%esp
15331 + pop %ebp
15332 + ret
15333 +
15334 .macro FIXUP_ESPFIX_STACK
15335 /*
15336 * Switch back for ESPFIX stack to the normal zerobased stack
15337 @@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15338 * normal stack and adjusts ESP with the matching offset.
15339 */
15340 /* fixup the stack */
15341 - PER_CPU(gdt_page, %ebx)
15342 +#ifdef CONFIG_SMP
15343 + movl PER_CPU_VAR(cpu_number), %ebx
15344 + shll $PAGE_SHIFT_asm, %ebx
15345 + addl $cpu_gdt_table, %ebx
15346 +#else
15347 + movl $cpu_gdt_table, %ebx
15348 +#endif
15349 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15350 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15351 shl $16, %eax
15352 @@ -793,7 +1037,7 @@ vector=vector+1
15353 .endr
15354 2: jmp common_interrupt
15355 .endr
15356 -END(irq_entries_start)
15357 +ENDPROC(irq_entries_start)
15358
15359 .previous
15360 END(interrupt)
15361 @@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15362 CFI_ADJUST_CFA_OFFSET 4
15363 jmp error_code
15364 CFI_ENDPROC
15365 -END(coprocessor_error)
15366 +ENDPROC(coprocessor_error)
15367
15368 ENTRY(simd_coprocessor_error)
15369 RING0_INT_FRAME
15370 @@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15371 CFI_ADJUST_CFA_OFFSET 4
15372 jmp error_code
15373 CFI_ENDPROC
15374 -END(simd_coprocessor_error)
15375 +ENDPROC(simd_coprocessor_error)
15376
15377 ENTRY(device_not_available)
15378 RING0_INT_FRAME
15379 @@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15380 CFI_ADJUST_CFA_OFFSET 4
15381 jmp error_code
15382 CFI_ENDPROC
15383 -END(device_not_available)
15384 +ENDPROC(device_not_available)
15385
15386 #ifdef CONFIG_PARAVIRT
15387 ENTRY(native_iret)
15388 @@ -869,12 +1113,12 @@ ENTRY(native_iret)
15389 .align 4
15390 .long native_iret, iret_exc
15391 .previous
15392 -END(native_iret)
15393 +ENDPROC(native_iret)
15394
15395 ENTRY(native_irq_enable_sysexit)
15396 sti
15397 sysexit
15398 -END(native_irq_enable_sysexit)
15399 +ENDPROC(native_irq_enable_sysexit)
15400 #endif
15401
15402 ENTRY(overflow)
15403 @@ -885,7 +1129,7 @@ ENTRY(overflow)
15404 CFI_ADJUST_CFA_OFFSET 4
15405 jmp error_code
15406 CFI_ENDPROC
15407 -END(overflow)
15408 +ENDPROC(overflow)
15409
15410 ENTRY(bounds)
15411 RING0_INT_FRAME
15412 @@ -895,7 +1139,7 @@ ENTRY(bounds)
15413 CFI_ADJUST_CFA_OFFSET 4
15414 jmp error_code
15415 CFI_ENDPROC
15416 -END(bounds)
15417 +ENDPROC(bounds)
15418
15419 ENTRY(invalid_op)
15420 RING0_INT_FRAME
15421 @@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15422 CFI_ADJUST_CFA_OFFSET 4
15423 jmp error_code
15424 CFI_ENDPROC
15425 -END(invalid_op)
15426 +ENDPROC(invalid_op)
15427
15428 ENTRY(coprocessor_segment_overrun)
15429 RING0_INT_FRAME
15430 @@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15431 CFI_ADJUST_CFA_OFFSET 4
15432 jmp error_code
15433 CFI_ENDPROC
15434 -END(coprocessor_segment_overrun)
15435 +ENDPROC(coprocessor_segment_overrun)
15436
15437 ENTRY(invalid_TSS)
15438 RING0_EC_FRAME
15439 @@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15440 CFI_ADJUST_CFA_OFFSET 4
15441 jmp error_code
15442 CFI_ENDPROC
15443 -END(invalid_TSS)
15444 +ENDPROC(invalid_TSS)
15445
15446 ENTRY(segment_not_present)
15447 RING0_EC_FRAME
15448 @@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15449 CFI_ADJUST_CFA_OFFSET 4
15450 jmp error_code
15451 CFI_ENDPROC
15452 -END(segment_not_present)
15453 +ENDPROC(segment_not_present)
15454
15455 ENTRY(stack_segment)
15456 RING0_EC_FRAME
15457 @@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15458 CFI_ADJUST_CFA_OFFSET 4
15459 jmp error_code
15460 CFI_ENDPROC
15461 -END(stack_segment)
15462 +ENDPROC(stack_segment)
15463
15464 ENTRY(alignment_check)
15465 RING0_EC_FRAME
15466 @@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15467 CFI_ADJUST_CFA_OFFSET 4
15468 jmp error_code
15469 CFI_ENDPROC
15470 -END(alignment_check)
15471 +ENDPROC(alignment_check)
15472
15473 ENTRY(divide_error)
15474 RING0_INT_FRAME
15475 @@ -957,7 +1201,7 @@ ENTRY(divide_error)
15476 CFI_ADJUST_CFA_OFFSET 4
15477 jmp error_code
15478 CFI_ENDPROC
15479 -END(divide_error)
15480 +ENDPROC(divide_error)
15481
15482 #ifdef CONFIG_X86_MCE
15483 ENTRY(machine_check)
15484 @@ -968,7 +1212,7 @@ ENTRY(machine_check)
15485 CFI_ADJUST_CFA_OFFSET 4
15486 jmp error_code
15487 CFI_ENDPROC
15488 -END(machine_check)
15489 +ENDPROC(machine_check)
15490 #endif
15491
15492 ENTRY(spurious_interrupt_bug)
15493 @@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15494 CFI_ADJUST_CFA_OFFSET 4
15495 jmp error_code
15496 CFI_ENDPROC
15497 -END(spurious_interrupt_bug)
15498 +ENDPROC(spurious_interrupt_bug)
15499
15500 ENTRY(kernel_thread_helper)
15501 pushl $0 # fake return address for unwinder
15502 @@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15503
15504 ENTRY(mcount)
15505 ret
15506 -END(mcount)
15507 +ENDPROC(mcount)
15508
15509 ENTRY(ftrace_caller)
15510 cmpl $0, function_trace_stop
15511 @@ -1124,7 +1368,7 @@ ftrace_graph_call:
15512 .globl ftrace_stub
15513 ftrace_stub:
15514 ret
15515 -END(ftrace_caller)
15516 +ENDPROC(ftrace_caller)
15517
15518 #else /* ! CONFIG_DYNAMIC_FTRACE */
15519
15520 @@ -1160,7 +1404,7 @@ trace:
15521 popl %ecx
15522 popl %eax
15523 jmp ftrace_stub
15524 -END(mcount)
15525 +ENDPROC(mcount)
15526 #endif /* CONFIG_DYNAMIC_FTRACE */
15527 #endif /* CONFIG_FUNCTION_TRACER */
15528
15529 @@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15530 popl %ecx
15531 popl %eax
15532 ret
15533 -END(ftrace_graph_caller)
15534 +ENDPROC(ftrace_graph_caller)
15535
15536 .globl return_to_handler
15537 return_to_handler:
15538 @@ -1198,7 +1442,6 @@ return_to_handler:
15539 ret
15540 #endif
15541
15542 -.section .rodata,"a"
15543 #include "syscall_table_32.S"
15544
15545 syscall_table_size=(.-sys_call_table)
15546 @@ -1255,15 +1498,18 @@ error_code:
15547 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15548 REG_TO_PTGS %ecx
15549 SET_KERNEL_GS %ecx
15550 - movl $(__USER_DS), %ecx
15551 + movl $(__KERNEL_DS), %ecx
15552 movl %ecx, %ds
15553 movl %ecx, %es
15554 +
15555 + pax_enter_kernel
15556 +
15557 TRACE_IRQS_OFF
15558 movl %esp,%eax # pt_regs pointer
15559 call *%edi
15560 jmp ret_from_exception
15561 CFI_ENDPROC
15562 -END(page_fault)
15563 +ENDPROC(page_fault)
15564
15565 /*
15566 * Debug traps and NMI can happen at the one SYSENTER instruction
15567 @@ -1309,7 +1555,7 @@ debug_stack_correct:
15568 call do_debug
15569 jmp ret_from_exception
15570 CFI_ENDPROC
15571 -END(debug)
15572 +ENDPROC(debug)
15573
15574 /*
15575 * NMI is doubly nasty. It can happen _while_ we're handling
15576 @@ -1351,6 +1597,9 @@ nmi_stack_correct:
15577 xorl %edx,%edx # zero error code
15578 movl %esp,%eax # pt_regs pointer
15579 call do_nmi
15580 +
15581 + pax_exit_kernel
15582 +
15583 jmp restore_all_notrace
15584 CFI_ENDPROC
15585
15586 @@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15587 FIXUP_ESPFIX_STACK # %eax == %esp
15588 xorl %edx,%edx # zero error code
15589 call do_nmi
15590 +
15591 + pax_exit_kernel
15592 +
15593 RESTORE_REGS
15594 lss 12+4(%esp), %esp # back to espfix stack
15595 CFI_ADJUST_CFA_OFFSET -24
15596 jmp irq_return
15597 CFI_ENDPROC
15598 -END(nmi)
15599 +ENDPROC(nmi)
15600
15601 ENTRY(int3)
15602 RING0_INT_FRAME
15603 @@ -1409,7 +1661,7 @@ ENTRY(int3)
15604 call do_int3
15605 jmp ret_from_exception
15606 CFI_ENDPROC
15607 -END(int3)
15608 +ENDPROC(int3)
15609
15610 ENTRY(general_protection)
15611 RING0_EC_FRAME
15612 @@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15613 CFI_ADJUST_CFA_OFFSET 4
15614 jmp error_code
15615 CFI_ENDPROC
15616 -END(general_protection)
15617 +ENDPROC(general_protection)
15618
15619 /*
15620 * End of kprobes section
15621 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15622 index 34a56a9..4aa5c8b 100644
15623 --- a/arch/x86/kernel/entry_64.S
15624 +++ b/arch/x86/kernel/entry_64.S
15625 @@ -53,6 +53,8 @@
15626 #include <asm/paravirt.h>
15627 #include <asm/ftrace.h>
15628 #include <asm/percpu.h>
15629 +#include <asm/pgtable.h>
15630 +#include <asm/alternative-asm.h>
15631
15632 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15633 #include <linux/elf-em.h>
15634 @@ -64,8 +66,9 @@
15635 #ifdef CONFIG_FUNCTION_TRACER
15636 #ifdef CONFIG_DYNAMIC_FTRACE
15637 ENTRY(mcount)
15638 + pax_force_retaddr
15639 retq
15640 -END(mcount)
15641 +ENDPROC(mcount)
15642
15643 ENTRY(ftrace_caller)
15644 cmpl $0, function_trace_stop
15645 @@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15646 #endif
15647
15648 GLOBAL(ftrace_stub)
15649 + pax_force_retaddr
15650 retq
15651 -END(ftrace_caller)
15652 +ENDPROC(ftrace_caller)
15653
15654 #else /* ! CONFIG_DYNAMIC_FTRACE */
15655 ENTRY(mcount)
15656 @@ -108,6 +112,7 @@ ENTRY(mcount)
15657 #endif
15658
15659 GLOBAL(ftrace_stub)
15660 + pax_force_retaddr
15661 retq
15662
15663 trace:
15664 @@ -117,12 +122,13 @@ trace:
15665 movq 8(%rbp), %rsi
15666 subq $MCOUNT_INSN_SIZE, %rdi
15667
15668 + pax_force_fptr ftrace_trace_function
15669 call *ftrace_trace_function
15670
15671 MCOUNT_RESTORE_FRAME
15672
15673 jmp ftrace_stub
15674 -END(mcount)
15675 +ENDPROC(mcount)
15676 #endif /* CONFIG_DYNAMIC_FTRACE */
15677 #endif /* CONFIG_FUNCTION_TRACER */
15678
15679 @@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15680
15681 MCOUNT_RESTORE_FRAME
15682
15683 + pax_force_retaddr
15684 retq
15685 -END(ftrace_graph_caller)
15686 +ENDPROC(ftrace_graph_caller)
15687
15688 GLOBAL(return_to_handler)
15689 subq $24, %rsp
15690 @@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15691 movq 8(%rsp), %rdx
15692 movq (%rsp), %rax
15693 addq $16, %rsp
15694 + pax_force_retaddr
15695 retq
15696 #endif
15697
15698 @@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15699 ENDPROC(native_usergs_sysret64)
15700 #endif /* CONFIG_PARAVIRT */
15701
15702 + .macro ljmpq sel, off
15703 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15704 + .byte 0x48; ljmp *1234f(%rip)
15705 + .pushsection .rodata
15706 + .align 16
15707 + 1234: .quad \off; .word \sel
15708 + .popsection
15709 +#else
15710 + pushq $\sel
15711 + pushq $\off
15712 + lretq
15713 +#endif
15714 + .endm
15715 +
15716 + .macro pax_enter_kernel
15717 + pax_set_fptr_mask
15718 +#ifdef CONFIG_PAX_KERNEXEC
15719 + call pax_enter_kernel
15720 +#endif
15721 + .endm
15722 +
15723 + .macro pax_exit_kernel
15724 +#ifdef CONFIG_PAX_KERNEXEC
15725 + call pax_exit_kernel
15726 +#endif
15727 + .endm
15728 +
15729 +#ifdef CONFIG_PAX_KERNEXEC
15730 +ENTRY(pax_enter_kernel)
15731 + pushq %rdi
15732 +
15733 +#ifdef CONFIG_PARAVIRT
15734 + PV_SAVE_REGS(CLBR_RDI)
15735 +#endif
15736 +
15737 + GET_CR0_INTO_RDI
15738 + bts $16,%rdi
15739 + jnc 3f
15740 + mov %cs,%edi
15741 + cmp $__KERNEL_CS,%edi
15742 + jnz 2f
15743 +1:
15744 +
15745 +#ifdef CONFIG_PARAVIRT
15746 + PV_RESTORE_REGS(CLBR_RDI)
15747 +#endif
15748 +
15749 + popq %rdi
15750 + pax_force_retaddr
15751 + retq
15752 +
15753 +2: ljmpq __KERNEL_CS,1f
15754 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15755 +4: SET_RDI_INTO_CR0
15756 + jmp 1b
15757 +ENDPROC(pax_enter_kernel)
15758 +
15759 +ENTRY(pax_exit_kernel)
15760 + pushq %rdi
15761 +
15762 +#ifdef CONFIG_PARAVIRT
15763 + PV_SAVE_REGS(CLBR_RDI)
15764 +#endif
15765 +
15766 + mov %cs,%rdi
15767 + cmp $__KERNEXEC_KERNEL_CS,%edi
15768 + jz 2f
15769 +1:
15770 +
15771 +#ifdef CONFIG_PARAVIRT
15772 + PV_RESTORE_REGS(CLBR_RDI);
15773 +#endif
15774 +
15775 + popq %rdi
15776 + pax_force_retaddr
15777 + retq
15778 +
15779 +2: GET_CR0_INTO_RDI
15780 + btr $16,%rdi
15781 + ljmpq __KERNEL_CS,3f
15782 +3: SET_RDI_INTO_CR0
15783 + jmp 1b
15784 +#ifdef CONFIG_PARAVIRT
15785 + PV_RESTORE_REGS(CLBR_RDI);
15786 +#endif
15787 +
15788 + popq %rdi
15789 + pax_force_retaddr
15790 + retq
15791 +ENDPROC(pax_exit_kernel)
15792 +#endif
15793 +
15794 + .macro pax_enter_kernel_user
15795 + pax_set_fptr_mask
15796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15797 + call pax_enter_kernel_user
15798 +#endif
15799 + .endm
15800 +
15801 + .macro pax_exit_kernel_user
15802 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15803 + call pax_exit_kernel_user
15804 +#endif
15805 +#ifdef CONFIG_PAX_RANDKSTACK
15806 + pushq %rax
15807 + call pax_randomize_kstack
15808 + popq %rax
15809 +#endif
15810 + .endm
15811 +
15812 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15813 +ENTRY(pax_enter_kernel_user)
15814 + pushq %rdi
15815 + pushq %rbx
15816 +
15817 +#ifdef CONFIG_PARAVIRT
15818 + PV_SAVE_REGS(CLBR_RDI)
15819 +#endif
15820 +
15821 + GET_CR3_INTO_RDI
15822 + mov %rdi,%rbx
15823 + add $__START_KERNEL_map,%rbx
15824 + sub phys_base(%rip),%rbx
15825 +
15826 +#ifdef CONFIG_PARAVIRT
15827 + pushq %rdi
15828 + cmpl $0, pv_info+PARAVIRT_enabled
15829 + jz 1f
15830 + i = 0
15831 + .rept USER_PGD_PTRS
15832 + mov i*8(%rbx),%rsi
15833 + mov $0,%sil
15834 + lea i*8(%rbx),%rdi
15835 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15836 + i = i + 1
15837 + .endr
15838 + jmp 2f
15839 +1:
15840 +#endif
15841 +
15842 + i = 0
15843 + .rept USER_PGD_PTRS
15844 + movb $0,i*8(%rbx)
15845 + i = i + 1
15846 + .endr
15847 +
15848 +#ifdef CONFIG_PARAVIRT
15849 +2: popq %rdi
15850 +#endif
15851 + SET_RDI_INTO_CR3
15852 +
15853 +#ifdef CONFIG_PAX_KERNEXEC
15854 + GET_CR0_INTO_RDI
15855 + bts $16,%rdi
15856 + SET_RDI_INTO_CR0
15857 +#endif
15858 +
15859 +#ifdef CONFIG_PARAVIRT
15860 + PV_RESTORE_REGS(CLBR_RDI)
15861 +#endif
15862 +
15863 + popq %rbx
15864 + popq %rdi
15865 + pax_force_retaddr
15866 + retq
15867 +ENDPROC(pax_enter_kernel_user)
15868 +
15869 +ENTRY(pax_exit_kernel_user)
15870 + push %rdi
15871 +
15872 +#ifdef CONFIG_PARAVIRT
15873 + pushq %rbx
15874 + PV_SAVE_REGS(CLBR_RDI)
15875 +#endif
15876 +
15877 +#ifdef CONFIG_PAX_KERNEXEC
15878 + GET_CR0_INTO_RDI
15879 + btr $16,%rdi
15880 + SET_RDI_INTO_CR0
15881 +#endif
15882 +
15883 + GET_CR3_INTO_RDI
15884 + add $__START_KERNEL_map,%rdi
15885 + sub phys_base(%rip),%rdi
15886 +
15887 +#ifdef CONFIG_PARAVIRT
15888 + cmpl $0, pv_info+PARAVIRT_enabled
15889 + jz 1f
15890 + mov %rdi,%rbx
15891 + i = 0
15892 + .rept USER_PGD_PTRS
15893 + mov i*8(%rbx),%rsi
15894 + mov $0x67,%sil
15895 + lea i*8(%rbx),%rdi
15896 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15897 + i = i + 1
15898 + .endr
15899 + jmp 2f
15900 +1:
15901 +#endif
15902 +
15903 + i = 0
15904 + .rept USER_PGD_PTRS
15905 + movb $0x67,i*8(%rdi)
15906 + i = i + 1
15907 + .endr
15908 +
15909 +#ifdef CONFIG_PARAVIRT
15910 +2: PV_RESTORE_REGS(CLBR_RDI)
15911 + popq %rbx
15912 +#endif
15913 +
15914 + popq %rdi
15915 + pax_force_retaddr
15916 + retq
15917 +ENDPROC(pax_exit_kernel_user)
15918 +#endif
15919 +
15920 +.macro pax_erase_kstack
15921 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15922 + call pax_erase_kstack
15923 +#endif
15924 +.endm
15925 +
15926 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15927 +/*
15928 + * r11: thread_info
15929 + * rcx, rdx: can be clobbered
15930 + */
15931 +ENTRY(pax_erase_kstack)
15932 + pushq %rdi
15933 + pushq %rax
15934 + pushq %r11
15935 +
15936 + GET_THREAD_INFO(%r11)
15937 + mov TI_lowest_stack(%r11), %rdi
15938 + mov $-0xBEEF, %rax
15939 + std
15940 +
15941 +1: mov %edi, %ecx
15942 + and $THREAD_SIZE_asm - 1, %ecx
15943 + shr $3, %ecx
15944 + repne scasq
15945 + jecxz 2f
15946 +
15947 + cmp $2*8, %ecx
15948 + jc 2f
15949 +
15950 + mov $2*8, %ecx
15951 + repe scasq
15952 + jecxz 2f
15953 + jne 1b
15954 +
15955 +2: cld
15956 + mov %esp, %ecx
15957 + sub %edi, %ecx
15958 +
15959 + cmp $THREAD_SIZE_asm, %rcx
15960 + jb 3f
15961 + ud2
15962 +3:
15963 +
15964 + shr $3, %ecx
15965 + rep stosq
15966 +
15967 + mov TI_task_thread_sp0(%r11), %rdi
15968 + sub $256, %rdi
15969 + mov %rdi, TI_lowest_stack(%r11)
15970 +
15971 + popq %r11
15972 + popq %rax
15973 + popq %rdi
15974 + pax_force_retaddr
15975 + ret
15976 +ENDPROC(pax_erase_kstack)
15977 +#endif
15978
15979 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15980 #ifdef CONFIG_TRACE_IRQFLAGS
15981 @@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
15982 .endm
15983
15984 .macro UNFAKE_STACK_FRAME
15985 - addq $8*6, %rsp
15986 - CFI_ADJUST_CFA_OFFSET -(6*8)
15987 + addq $8*6 + ARG_SKIP, %rsp
15988 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15989 .endm
15990
15991 /*
15992 @@ -317,7 +601,7 @@ ENTRY(save_args)
15993 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
15994 movq_cfi rbp, 8 /* push %rbp */
15995 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
15996 - testl $3, CS(%rdi)
15997 + testb $3, CS(%rdi)
15998 je 1f
15999 SWAPGS
16000 /*
16001 @@ -337,9 +621,10 @@ ENTRY(save_args)
16002 * We entered an interrupt context - irqs are off:
16003 */
16004 2: TRACE_IRQS_OFF
16005 + pax_force_retaddr
16006 ret
16007 CFI_ENDPROC
16008 -END(save_args)
16009 +ENDPROC(save_args)
16010
16011 ENTRY(save_rest)
16012 PARTIAL_FRAME 1 REST_SKIP+8
16013 @@ -352,9 +637,10 @@ ENTRY(save_rest)
16014 movq_cfi r15, R15+16
16015 movq %r11, 8(%rsp) /* return address */
16016 FIXUP_TOP_OF_STACK %r11, 16
16017 + pax_force_retaddr
16018 ret
16019 CFI_ENDPROC
16020 -END(save_rest)
16021 +ENDPROC(save_rest)
16022
16023 /* save complete stack frame */
16024 .pushsection .kprobes.text, "ax"
16025 @@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16026 js 1f /* negative -> in kernel */
16027 SWAPGS
16028 xorl %ebx,%ebx
16029 -1: ret
16030 +1: pax_force_retaddr_bts
16031 + ret
16032 CFI_ENDPROC
16033 -END(save_paranoid)
16034 +ENDPROC(save_paranoid)
16035 .popsection
16036
16037 /*
16038 @@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16039
16040 RESTORE_REST
16041
16042 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16043 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16044 je int_ret_from_sys_call
16045
16046 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16047 @@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16048 jmp ret_from_sys_call # go to the SYSRET fastpath
16049
16050 CFI_ENDPROC
16051 -END(ret_from_fork)
16052 +ENDPROC(ret_from_fork)
16053
16054 /*
16055 * System call entry. Upto 6 arguments in registers are supported.
16056 @@ -455,7 +742,7 @@ END(ret_from_fork)
16057 ENTRY(system_call)
16058 CFI_STARTPROC simple
16059 CFI_SIGNAL_FRAME
16060 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16061 + CFI_DEF_CFA rsp,0
16062 CFI_REGISTER rip,rcx
16063 /*CFI_REGISTER rflags,r11*/
16064 SWAPGS_UNSAFE_STACK
16065 @@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16066
16067 movq %rsp,PER_CPU_VAR(old_rsp)
16068 movq PER_CPU_VAR(kernel_stack),%rsp
16069 + SAVE_ARGS 8*6,1
16070 + pax_enter_kernel_user
16071 /*
16072 * No need to follow this irqs off/on section - it's straight
16073 * and short:
16074 */
16075 ENABLE_INTERRUPTS(CLBR_NONE)
16076 - SAVE_ARGS 8,1
16077 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16078 movq %rcx,RIP-ARGOFFSET(%rsp)
16079 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16080 @@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16081 system_call_fastpath:
16082 cmpq $__NR_syscall_max,%rax
16083 ja badsys
16084 - movq %r10,%rcx
16085 + movq R10-ARGOFFSET(%rsp),%rcx
16086 call *sys_call_table(,%rax,8) # XXX: rip relative
16087 movq %rax,RAX-ARGOFFSET(%rsp)
16088 /*
16089 @@ -502,6 +790,8 @@ sysret_check:
16090 andl %edi,%edx
16091 jnz sysret_careful
16092 CFI_REMEMBER_STATE
16093 + pax_exit_kernel_user
16094 + pax_erase_kstack
16095 /*
16096 * sysretq will re-enable interrupts:
16097 */
16098 @@ -555,14 +845,18 @@ badsys:
16099 * jump back to the normal fast path.
16100 */
16101 auditsys:
16102 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16103 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16104 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16105 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16106 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16107 movq %rax,%rsi /* 2nd arg: syscall number */
16108 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16109 call audit_syscall_entry
16110 +
16111 + pax_erase_kstack
16112 +
16113 LOAD_ARGS 0 /* reload call-clobbered registers */
16114 + pax_set_fptr_mask
16115 jmp system_call_fastpath
16116
16117 /*
16118 @@ -592,16 +886,20 @@ tracesys:
16119 FIXUP_TOP_OF_STACK %rdi
16120 movq %rsp,%rdi
16121 call syscall_trace_enter
16122 +
16123 + pax_erase_kstack
16124 +
16125 /*
16126 * Reload arg registers from stack in case ptrace changed them.
16127 * We don't reload %rax because syscall_trace_enter() returned
16128 * the value it wants us to use in the table lookup.
16129 */
16130 LOAD_ARGS ARGOFFSET, 1
16131 + pax_set_fptr_mask
16132 RESTORE_REST
16133 cmpq $__NR_syscall_max,%rax
16134 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16135 - movq %r10,%rcx /* fixup for C */
16136 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16137 call *sys_call_table(,%rax,8)
16138 movq %rax,RAX-ARGOFFSET(%rsp)
16139 /* Use IRET because user could have changed frame */
16140 @@ -613,7 +911,7 @@ tracesys:
16141 GLOBAL(int_ret_from_sys_call)
16142 DISABLE_INTERRUPTS(CLBR_NONE)
16143 TRACE_IRQS_OFF
16144 - testl $3,CS-ARGOFFSET(%rsp)
16145 + testb $3,CS-ARGOFFSET(%rsp)
16146 je retint_restore_args
16147 movl $_TIF_ALLWORK_MASK,%edi
16148 /* edi: mask to check */
16149 @@ -674,7 +972,7 @@ int_restore_rest:
16150 TRACE_IRQS_OFF
16151 jmp int_with_check
16152 CFI_ENDPROC
16153 -END(system_call)
16154 +ENDPROC(system_call)
16155
16156 /*
16157 * Certain special system calls that need to save a complete full stack frame.
16158 @@ -690,7 +988,7 @@ ENTRY(\label)
16159 call \func
16160 jmp ptregscall_common
16161 CFI_ENDPROC
16162 -END(\label)
16163 +ENDPROC(\label)
16164 .endm
16165
16166 PTREGSCALL stub_clone, sys_clone, %r8
16167 @@ -708,9 +1006,10 @@ ENTRY(ptregscall_common)
16168 movq_cfi_restore R12+8, r12
16169 movq_cfi_restore RBP+8, rbp
16170 movq_cfi_restore RBX+8, rbx
16171 + pax_force_retaddr
16172 ret $REST_SKIP /* pop extended registers */
16173 CFI_ENDPROC
16174 -END(ptregscall_common)
16175 +ENDPROC(ptregscall_common)
16176
16177 ENTRY(stub_execve)
16178 CFI_STARTPROC
16179 @@ -726,7 +1025,7 @@ ENTRY(stub_execve)
16180 RESTORE_REST
16181 jmp int_ret_from_sys_call
16182 CFI_ENDPROC
16183 -END(stub_execve)
16184 +ENDPROC(stub_execve)
16185
16186 /*
16187 * sigreturn is special because it needs to restore all registers on return.
16188 @@ -744,7 +1043,7 @@ ENTRY(stub_rt_sigreturn)
16189 RESTORE_REST
16190 jmp int_ret_from_sys_call
16191 CFI_ENDPROC
16192 -END(stub_rt_sigreturn)
16193 +ENDPROC(stub_rt_sigreturn)
16194
16195 /*
16196 * Build the entry stubs and pointer table with some assembler magic.
16197 @@ -780,7 +1079,7 @@ vector=vector+1
16198 2: jmp common_interrupt
16199 .endr
16200 CFI_ENDPROC
16201 -END(irq_entries_start)
16202 +ENDPROC(irq_entries_start)
16203
16204 .previous
16205 END(interrupt)
16206 @@ -800,6 +1099,16 @@ END(interrupt)
16207 CFI_ADJUST_CFA_OFFSET 10*8
16208 call save_args
16209 PARTIAL_FRAME 0
16210 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16211 + testb $3, CS(%rdi)
16212 + jnz 1f
16213 + pax_enter_kernel
16214 + jmp 2f
16215 +1: pax_enter_kernel_user
16216 +2:
16217 +#else
16218 + pax_enter_kernel
16219 +#endif
16220 call \func
16221 .endm
16222
16223 @@ -822,7 +1131,7 @@ ret_from_intr:
16224 CFI_ADJUST_CFA_OFFSET -8
16225 exit_intr:
16226 GET_THREAD_INFO(%rcx)
16227 - testl $3,CS-ARGOFFSET(%rsp)
16228 + testb $3,CS-ARGOFFSET(%rsp)
16229 je retint_kernel
16230
16231 /* Interrupt came from user space */
16232 @@ -844,12 +1153,16 @@ retint_swapgs: /* return to user-space */
16233 * The iretq could re-enable interrupts:
16234 */
16235 DISABLE_INTERRUPTS(CLBR_ANY)
16236 + pax_exit_kernel_user
16237 + pax_erase_kstack
16238 TRACE_IRQS_IRETQ
16239 SWAPGS
16240 jmp restore_args
16241
16242 retint_restore_args: /* return to kernel space */
16243 DISABLE_INTERRUPTS(CLBR_ANY)
16244 + pax_exit_kernel
16245 + pax_force_retaddr RIP-ARGOFFSET
16246 /*
16247 * The iretq could re-enable interrupts:
16248 */
16249 @@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16250 #endif
16251
16252 CFI_ENDPROC
16253 -END(common_interrupt)
16254 +ENDPROC(common_interrupt)
16255
16256 /*
16257 * APIC interrupts.
16258 @@ -953,7 +1266,7 @@ ENTRY(\sym)
16259 interrupt \do_sym
16260 jmp ret_from_intr
16261 CFI_ENDPROC
16262 -END(\sym)
16263 +ENDPROC(\sym)
16264 .endm
16265
16266 #ifdef CONFIG_SMP
16267 @@ -1032,12 +1345,22 @@ ENTRY(\sym)
16268 CFI_ADJUST_CFA_OFFSET 15*8
16269 call error_entry
16270 DEFAULT_FRAME 0
16271 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16272 + testb $3, CS(%rsp)
16273 + jnz 1f
16274 + pax_enter_kernel
16275 + jmp 2f
16276 +1: pax_enter_kernel_user
16277 +2:
16278 +#else
16279 + pax_enter_kernel
16280 +#endif
16281 movq %rsp,%rdi /* pt_regs pointer */
16282 xorl %esi,%esi /* no error code */
16283 call \do_sym
16284 jmp error_exit /* %ebx: no swapgs flag */
16285 CFI_ENDPROC
16286 -END(\sym)
16287 +ENDPROC(\sym)
16288 .endm
16289
16290 .macro paranoidzeroentry sym do_sym
16291 @@ -1049,12 +1372,22 @@ ENTRY(\sym)
16292 subq $15*8, %rsp
16293 call save_paranoid
16294 TRACE_IRQS_OFF
16295 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16296 + testb $3, CS(%rsp)
16297 + jnz 1f
16298 + pax_enter_kernel
16299 + jmp 2f
16300 +1: pax_enter_kernel_user
16301 +2:
16302 +#else
16303 + pax_enter_kernel
16304 +#endif
16305 movq %rsp,%rdi /* pt_regs pointer */
16306 xorl %esi,%esi /* no error code */
16307 call \do_sym
16308 jmp paranoid_exit /* %ebx: no swapgs flag */
16309 CFI_ENDPROC
16310 -END(\sym)
16311 +ENDPROC(\sym)
16312 .endm
16313
16314 .macro paranoidzeroentry_ist sym do_sym ist
16315 @@ -1066,15 +1399,30 @@ ENTRY(\sym)
16316 subq $15*8, %rsp
16317 call save_paranoid
16318 TRACE_IRQS_OFF
16319 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16320 + testb $3, CS(%rsp)
16321 + jnz 1f
16322 + pax_enter_kernel
16323 + jmp 2f
16324 +1: pax_enter_kernel_user
16325 +2:
16326 +#else
16327 + pax_enter_kernel
16328 +#endif
16329 movq %rsp,%rdi /* pt_regs pointer */
16330 xorl %esi,%esi /* no error code */
16331 - PER_CPU(init_tss, %rbp)
16332 +#ifdef CONFIG_SMP
16333 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16334 + lea init_tss(%rbp), %rbp
16335 +#else
16336 + lea init_tss(%rip), %rbp
16337 +#endif
16338 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16339 call \do_sym
16340 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16341 jmp paranoid_exit /* %ebx: no swapgs flag */
16342 CFI_ENDPROC
16343 -END(\sym)
16344 +ENDPROC(\sym)
16345 .endm
16346
16347 .macro errorentry sym do_sym
16348 @@ -1085,13 +1433,23 @@ ENTRY(\sym)
16349 CFI_ADJUST_CFA_OFFSET 15*8
16350 call error_entry
16351 DEFAULT_FRAME 0
16352 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16353 + testb $3, CS(%rsp)
16354 + jnz 1f
16355 + pax_enter_kernel
16356 + jmp 2f
16357 +1: pax_enter_kernel_user
16358 +2:
16359 +#else
16360 + pax_enter_kernel
16361 +#endif
16362 movq %rsp,%rdi /* pt_regs pointer */
16363 movq ORIG_RAX(%rsp),%rsi /* get error code */
16364 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16365 call \do_sym
16366 jmp error_exit /* %ebx: no swapgs flag */
16367 CFI_ENDPROC
16368 -END(\sym)
16369 +ENDPROC(\sym)
16370 .endm
16371
16372 /* error code is on the stack already */
16373 @@ -1104,13 +1462,23 @@ ENTRY(\sym)
16374 call save_paranoid
16375 DEFAULT_FRAME 0
16376 TRACE_IRQS_OFF
16377 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16378 + testb $3, CS(%rsp)
16379 + jnz 1f
16380 + pax_enter_kernel
16381 + jmp 2f
16382 +1: pax_enter_kernel_user
16383 +2:
16384 +#else
16385 + pax_enter_kernel
16386 +#endif
16387 movq %rsp,%rdi /* pt_regs pointer */
16388 movq ORIG_RAX(%rsp),%rsi /* get error code */
16389 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16390 call \do_sym
16391 jmp paranoid_exit /* %ebx: no swapgs flag */
16392 CFI_ENDPROC
16393 -END(\sym)
16394 +ENDPROC(\sym)
16395 .endm
16396
16397 zeroentry divide_error do_divide_error
16398 @@ -1141,9 +1509,10 @@ gs_change:
16399 SWAPGS
16400 popf
16401 CFI_ADJUST_CFA_OFFSET -8
16402 + pax_force_retaddr
16403 ret
16404 CFI_ENDPROC
16405 -END(native_load_gs_index)
16406 +ENDPROC(native_load_gs_index)
16407
16408 .section __ex_table,"a"
16409 .align 8
16410 @@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16411 * of hacks for example to fork off the per-CPU idle tasks.
16412 * [Hopefully no generic code relies on the reschedule -AK]
16413 */
16414 - RESTORE_ALL
16415 + RESTORE_REST
16416 UNFAKE_STACK_FRAME
16417 + pax_force_retaddr
16418 ret
16419 CFI_ENDPROC
16420 -END(kernel_thread)
16421 +ENDPROC(kernel_thread)
16422
16423 ENTRY(child_rip)
16424 pushq $0 # fake return address
16425 @@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16426 */
16427 movq %rdi, %rax
16428 movq %rsi, %rdi
16429 + pax_force_fptr %rax
16430 call *%rax
16431 # exit
16432 mov %eax, %edi
16433 call do_exit
16434 ud2 # padding for call trace
16435 CFI_ENDPROC
16436 -END(child_rip)
16437 +ENDPROC(child_rip)
16438
16439 /*
16440 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16441 @@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16442 RESTORE_REST
16443 testq %rax,%rax
16444 je int_ret_from_sys_call
16445 - RESTORE_ARGS
16446 UNFAKE_STACK_FRAME
16447 + pax_force_retaddr
16448 ret
16449 CFI_ENDPROC
16450 -END(kernel_execve)
16451 +ENDPROC(kernel_execve)
16452
16453 /* Call softirq on interrupt stack. Interrupts are off. */
16454 ENTRY(call_softirq)
16455 @@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16456 CFI_DEF_CFA_REGISTER rsp
16457 CFI_ADJUST_CFA_OFFSET -8
16458 decl PER_CPU_VAR(irq_count)
16459 + pax_force_retaddr
16460 ret
16461 CFI_ENDPROC
16462 -END(call_softirq)
16463 +ENDPROC(call_softirq)
16464
16465 #ifdef CONFIG_XEN
16466 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16467 @@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16468 decl PER_CPU_VAR(irq_count)
16469 jmp error_exit
16470 CFI_ENDPROC
16471 -END(xen_do_hypervisor_callback)
16472 +ENDPROC(xen_do_hypervisor_callback)
16473
16474 /*
16475 * Hypervisor uses this for application faults while it executes.
16476 @@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16477 SAVE_ALL
16478 jmp error_exit
16479 CFI_ENDPROC
16480 -END(xen_failsafe_callback)
16481 +ENDPROC(xen_failsafe_callback)
16482
16483 #endif /* CONFIG_XEN */
16484
16485 @@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16486 TRACE_IRQS_OFF
16487 testl %ebx,%ebx /* swapgs needed? */
16488 jnz paranoid_restore
16489 - testl $3,CS(%rsp)
16490 + testb $3,CS(%rsp)
16491 jnz paranoid_userspace
16492 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16493 + pax_exit_kernel
16494 + TRACE_IRQS_IRETQ 0
16495 + SWAPGS_UNSAFE_STACK
16496 + RESTORE_ALL 8
16497 + pax_force_retaddr_bts
16498 + jmp irq_return
16499 +#endif
16500 paranoid_swapgs:
16501 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16502 + pax_exit_kernel_user
16503 +#else
16504 + pax_exit_kernel
16505 +#endif
16506 TRACE_IRQS_IRETQ 0
16507 SWAPGS_UNSAFE_STACK
16508 RESTORE_ALL 8
16509 jmp irq_return
16510 paranoid_restore:
16511 + pax_exit_kernel
16512 TRACE_IRQS_IRETQ 0
16513 RESTORE_ALL 8
16514 + pax_force_retaddr_bts
16515 jmp irq_return
16516 paranoid_userspace:
16517 GET_THREAD_INFO(%rcx)
16518 @@ -1443,7 +1830,7 @@ paranoid_schedule:
16519 TRACE_IRQS_OFF
16520 jmp paranoid_userspace
16521 CFI_ENDPROC
16522 -END(paranoid_exit)
16523 +ENDPROC(paranoid_exit)
16524
16525 /*
16526 * Exception entry point. This expects an error code/orig_rax on the stack.
16527 @@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16528 movq_cfi r14, R14+8
16529 movq_cfi r15, R15+8
16530 xorl %ebx,%ebx
16531 - testl $3,CS+8(%rsp)
16532 + testb $3,CS+8(%rsp)
16533 je error_kernelspace
16534 error_swapgs:
16535 SWAPGS
16536 error_sti:
16537 TRACE_IRQS_OFF
16538 + pax_force_retaddr_bts
16539 ret
16540 CFI_ENDPROC
16541
16542 @@ -1497,7 +1885,7 @@ error_kernelspace:
16543 cmpq $gs_change,RIP+8(%rsp)
16544 je error_swapgs
16545 jmp error_sti
16546 -END(error_entry)
16547 +ENDPROC(error_entry)
16548
16549
16550 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16551 @@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16552 jnz retint_careful
16553 jmp retint_swapgs
16554 CFI_ENDPROC
16555 -END(error_exit)
16556 +ENDPROC(error_exit)
16557
16558
16559 /* runs on exception stack */
16560 @@ -1529,6 +1917,16 @@ ENTRY(nmi)
16561 CFI_ADJUST_CFA_OFFSET 15*8
16562 call save_paranoid
16563 DEFAULT_FRAME 0
16564 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16565 + testb $3, CS(%rsp)
16566 + jnz 1f
16567 + pax_enter_kernel
16568 + jmp 2f
16569 +1: pax_enter_kernel_user
16570 +2:
16571 +#else
16572 + pax_enter_kernel
16573 +#endif
16574 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16575 movq %rsp,%rdi
16576 movq $-1,%rsi
16577 @@ -1539,12 +1937,28 @@ ENTRY(nmi)
16578 DISABLE_INTERRUPTS(CLBR_NONE)
16579 testl %ebx,%ebx /* swapgs needed? */
16580 jnz nmi_restore
16581 - testl $3,CS(%rsp)
16582 + testb $3,CS(%rsp)
16583 jnz nmi_userspace
16584 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16585 + pax_exit_kernel
16586 + SWAPGS_UNSAFE_STACK
16587 + RESTORE_ALL 8
16588 + pax_force_retaddr_bts
16589 + jmp irq_return
16590 +#endif
16591 nmi_swapgs:
16592 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16593 + pax_exit_kernel_user
16594 +#else
16595 + pax_exit_kernel
16596 +#endif
16597 SWAPGS_UNSAFE_STACK
16598 + RESTORE_ALL 8
16599 + jmp irq_return
16600 nmi_restore:
16601 + pax_exit_kernel
16602 RESTORE_ALL 8
16603 + pax_force_retaddr_bts
16604 jmp irq_return
16605 nmi_userspace:
16606 GET_THREAD_INFO(%rcx)
16607 @@ -1573,14 +1987,14 @@ nmi_schedule:
16608 jmp paranoid_exit
16609 CFI_ENDPROC
16610 #endif
16611 -END(nmi)
16612 +ENDPROC(nmi)
16613
16614 ENTRY(ignore_sysret)
16615 CFI_STARTPROC
16616 mov $-ENOSYS,%eax
16617 sysret
16618 CFI_ENDPROC
16619 -END(ignore_sysret)
16620 +ENDPROC(ignore_sysret)
16621
16622 /*
16623 * End of kprobes section
16624 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16625 index 9dbb527..7b3615a 100644
16626 --- a/arch/x86/kernel/ftrace.c
16627 +++ b/arch/x86/kernel/ftrace.c
16628 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16629 static void *mod_code_newcode; /* holds the text to write to the IP */
16630
16631 static unsigned nmi_wait_count;
16632 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16633 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16634
16635 int ftrace_arch_read_dyn_info(char *buf, int size)
16636 {
16637 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16638
16639 r = snprintf(buf, size, "%u %u",
16640 nmi_wait_count,
16641 - atomic_read(&nmi_update_count));
16642 + atomic_read_unchecked(&nmi_update_count));
16643 return r;
16644 }
16645
16646 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16647 {
16648 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16649 smp_rmb();
16650 + pax_open_kernel();
16651 ftrace_mod_code();
16652 - atomic_inc(&nmi_update_count);
16653 + pax_close_kernel();
16654 + atomic_inc_unchecked(&nmi_update_count);
16655 }
16656 /* Must have previous changes seen before executions */
16657 smp_mb();
16658 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16659
16660
16661
16662 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16663 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16664
16665 static unsigned char *ftrace_nop_replace(void)
16666 {
16667 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16668 {
16669 unsigned char replaced[MCOUNT_INSN_SIZE];
16670
16671 + ip = ktla_ktva(ip);
16672 +
16673 /*
16674 * Note: Due to modules and __init, code can
16675 * disappear and change, we need to protect against faulting
16676 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16677 unsigned char old[MCOUNT_INSN_SIZE], *new;
16678 int ret;
16679
16680 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16681 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16682 new = ftrace_call_replace(ip, (unsigned long)func);
16683 ret = ftrace_modify_code(ip, old, new);
16684
16685 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16686 switch (faulted) {
16687 case 0:
16688 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16689 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16690 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16691 break;
16692 case 1:
16693 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16694 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16695 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16696 break;
16697 case 2:
16698 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16699 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16700 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16701 break;
16702 }
16703
16704 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16705 {
16706 unsigned char code[MCOUNT_INSN_SIZE];
16707
16708 + ip = ktla_ktva(ip);
16709 +
16710 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16711 return -EFAULT;
16712
16713 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16714 index 4f8e250..df24706 100644
16715 --- a/arch/x86/kernel/head32.c
16716 +++ b/arch/x86/kernel/head32.c
16717 @@ -16,6 +16,7 @@
16718 #include <asm/apic.h>
16719 #include <asm/io_apic.h>
16720 #include <asm/bios_ebda.h>
16721 +#include <asm/boot.h>
16722
16723 static void __init i386_default_early_setup(void)
16724 {
16725 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16726 {
16727 reserve_trampoline_memory();
16728
16729 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16730 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16731
16732 #ifdef CONFIG_BLK_DEV_INITRD
16733 /* Reserve INITRD */
16734 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16735 index 34c3308..6fc4e76 100644
16736 --- a/arch/x86/kernel/head_32.S
16737 +++ b/arch/x86/kernel/head_32.S
16738 @@ -19,10 +19,17 @@
16739 #include <asm/setup.h>
16740 #include <asm/processor-flags.h>
16741 #include <asm/percpu.h>
16742 +#include <asm/msr-index.h>
16743
16744 /* Physical address */
16745 #define pa(X) ((X) - __PAGE_OFFSET)
16746
16747 +#ifdef CONFIG_PAX_KERNEXEC
16748 +#define ta(X) (X)
16749 +#else
16750 +#define ta(X) ((X) - __PAGE_OFFSET)
16751 +#endif
16752 +
16753 /*
16754 * References to members of the new_cpu_data structure.
16755 */
16756 @@ -52,11 +59,7 @@
16757 * and small than max_low_pfn, otherwise will waste some page table entries
16758 */
16759
16760 -#if PTRS_PER_PMD > 1
16761 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16762 -#else
16763 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16764 -#endif
16765 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16766
16767 /* Enough space to fit pagetables for the low memory linear map */
16768 MAPPING_BEYOND_END = \
16769 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
16770 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16771
16772 /*
16773 + * Real beginning of normal "text" segment
16774 + */
16775 +ENTRY(stext)
16776 +ENTRY(_stext)
16777 +
16778 +/*
16779 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16780 * %esi points to the real-mode code as a 32-bit pointer.
16781 * CS and DS must be 4 GB flat segments, but we don't depend on
16782 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16783 * can.
16784 */
16785 __HEAD
16786 +
16787 +#ifdef CONFIG_PAX_KERNEXEC
16788 + jmp startup_32
16789 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16790 +.fill PAGE_SIZE-5,1,0xcc
16791 +#endif
16792 +
16793 ENTRY(startup_32)
16794 + movl pa(stack_start),%ecx
16795 +
16796 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
16797 us to not reload segments */
16798 testb $(1<<6), BP_loadflags(%esi)
16799 @@ -95,7 +113,60 @@ ENTRY(startup_32)
16800 movl %eax,%es
16801 movl %eax,%fs
16802 movl %eax,%gs
16803 + movl %eax,%ss
16804 2:
16805 + leal -__PAGE_OFFSET(%ecx),%esp
16806 +
16807 +#ifdef CONFIG_SMP
16808 + movl $pa(cpu_gdt_table),%edi
16809 + movl $__per_cpu_load,%eax
16810 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16811 + rorl $16,%eax
16812 + movb %al,__KERNEL_PERCPU + 4(%edi)
16813 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16814 + movl $__per_cpu_end - 1,%eax
16815 + subl $__per_cpu_start,%eax
16816 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16817 +#endif
16818 +
16819 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16820 + movl $NR_CPUS,%ecx
16821 + movl $pa(cpu_gdt_table),%edi
16822 +1:
16823 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16824 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16825 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16826 + addl $PAGE_SIZE_asm,%edi
16827 + loop 1b
16828 +#endif
16829 +
16830 +#ifdef CONFIG_PAX_KERNEXEC
16831 + movl $pa(boot_gdt),%edi
16832 + movl $__LOAD_PHYSICAL_ADDR,%eax
16833 + movw %ax,__BOOT_CS + 2(%edi)
16834 + rorl $16,%eax
16835 + movb %al,__BOOT_CS + 4(%edi)
16836 + movb %ah,__BOOT_CS + 7(%edi)
16837 + rorl $16,%eax
16838 +
16839 + ljmp $(__BOOT_CS),$1f
16840 +1:
16841 +
16842 + movl $NR_CPUS,%ecx
16843 + movl $pa(cpu_gdt_table),%edi
16844 + addl $__PAGE_OFFSET,%eax
16845 +1:
16846 + movw %ax,__KERNEL_CS + 2(%edi)
16847 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16848 + rorl $16,%eax
16849 + movb %al,__KERNEL_CS + 4(%edi)
16850 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16851 + movb %ah,__KERNEL_CS + 7(%edi)
16852 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16853 + rorl $16,%eax
16854 + addl $PAGE_SIZE_asm,%edi
16855 + loop 1b
16856 +#endif
16857
16858 /*
16859 * Clear BSS first so that there are no surprises...
16860 @@ -140,9 +211,7 @@ ENTRY(startup_32)
16861 cmpl $num_subarch_entries, %eax
16862 jae bad_subarch
16863
16864 - movl pa(subarch_entries)(,%eax,4), %eax
16865 - subl $__PAGE_OFFSET, %eax
16866 - jmp *%eax
16867 + jmp *pa(subarch_entries)(,%eax,4)
16868
16869 bad_subarch:
16870 WEAK(lguest_entry)
16871 @@ -154,10 +223,10 @@ WEAK(xen_entry)
16872 __INITDATA
16873
16874 subarch_entries:
16875 - .long default_entry /* normal x86/PC */
16876 - .long lguest_entry /* lguest hypervisor */
16877 - .long xen_entry /* Xen hypervisor */
16878 - .long default_entry /* Moorestown MID */
16879 + .long ta(default_entry) /* normal x86/PC */
16880 + .long ta(lguest_entry) /* lguest hypervisor */
16881 + .long ta(xen_entry) /* Xen hypervisor */
16882 + .long ta(default_entry) /* Moorestown MID */
16883 num_subarch_entries = (. - subarch_entries) / 4
16884 .previous
16885 #endif /* CONFIG_PARAVIRT */
16886 @@ -218,8 +287,11 @@ default_entry:
16887 movl %eax, pa(max_pfn_mapped)
16888
16889 /* Do early initialization of the fixmap area */
16890 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16891 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16892 +#ifdef CONFIG_COMPAT_VDSO
16893 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16894 +#else
16895 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16896 +#endif
16897 #else /* Not PAE */
16898
16899 page_pde_offset = (__PAGE_OFFSET >> 20);
16900 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16901 movl %eax, pa(max_pfn_mapped)
16902
16903 /* Do early initialization of the fixmap area */
16904 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16905 - movl %eax,pa(swapper_pg_dir+0xffc)
16906 +#ifdef CONFIG_COMPAT_VDSO
16907 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
16908 +#else
16909 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
16910 +#endif
16911 #endif
16912 jmp 3f
16913 /*
16914 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
16915 movl %eax,%es
16916 movl %eax,%fs
16917 movl %eax,%gs
16918 + movl pa(stack_start),%ecx
16919 + movl %eax,%ss
16920 + leal -__PAGE_OFFSET(%ecx),%esp
16921 #endif /* CONFIG_SMP */
16922 3:
16923
16924 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
16925 orl %edx,%eax
16926 movl %eax,%cr4
16927
16928 +#ifdef CONFIG_X86_PAE
16929 btl $5, %eax # check if PAE is enabled
16930 jnc 6f
16931
16932 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
16933 cpuid
16934 cmpl $0x80000000, %eax
16935 jbe 6f
16936 +
16937 + /* Clear bogus XD_DISABLE bits */
16938 + call verify_cpu
16939 +
16940 mov $0x80000001, %eax
16941 cpuid
16942 /* Execute Disable bit supported? */
16943 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
16944 jnc 6f
16945
16946 /* Setup EFER (Extended Feature Enable Register) */
16947 - movl $0xc0000080, %ecx
16948 + movl $MSR_EFER, %ecx
16949 rdmsr
16950
16951 btsl $11, %eax
16952 /* Make changes effective */
16953 wrmsr
16954
16955 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16956 + movl $1,pa(nx_enabled)
16957 +#endif
16958 +
16959 6:
16960
16961 /*
16962 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
16963 movl %eax,%cr0 /* ..and set paging (PG) bit */
16964 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
16965 1:
16966 - /* Set up the stack pointer */
16967 - lss stack_start,%esp
16968 + /* Shift the stack pointer to a virtual address */
16969 + addl $__PAGE_OFFSET, %esp
16970
16971 /*
16972 * Initialize eflags. Some BIOS's leave bits like NT set. This would
16973 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
16974
16975 #ifdef CONFIG_SMP
16976 cmpb $0, ready
16977 - jz 1f /* Initial CPU cleans BSS */
16978 - jmp checkCPUtype
16979 -1:
16980 + jnz checkCPUtype
16981 #endif /* CONFIG_SMP */
16982
16983 /*
16984 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
16985 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16986 movl %eax,%ss # after changing gdt.
16987
16988 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16989 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16990 movl %eax,%ds
16991 movl %eax,%es
16992
16993 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
16994 */
16995 cmpb $0,ready
16996 jne 1f
16997 - movl $per_cpu__gdt_page,%eax
16998 + movl $cpu_gdt_table,%eax
16999 movl $per_cpu__stack_canary,%ecx
17000 +#ifdef CONFIG_SMP
17001 + addl $__per_cpu_load,%ecx
17002 +#endif
17003 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17004 shrl $16, %ecx
17005 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17006 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17007 1:
17008 -#endif
17009 movl $(__KERNEL_STACK_CANARY),%eax
17010 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17011 + movl $(__USER_DS),%eax
17012 +#else
17013 + xorl %eax,%eax
17014 +#endif
17015 movl %eax,%gs
17016
17017 xorl %eax,%eax # Clear LDT
17018 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17019
17020 cld # gcc2 wants the direction flag cleared at all times
17021 pushl $0 # fake return address for unwinder
17022 -#ifdef CONFIG_SMP
17023 - movb ready, %cl
17024 movb $1, ready
17025 - cmpb $0,%cl # the first CPU calls start_kernel
17026 - je 1f
17027 - movl (stack_start), %esp
17028 -1:
17029 -#endif /* CONFIG_SMP */
17030 jmp *(initial_code)
17031
17032 /*
17033 @@ -546,22 +631,22 @@ early_page_fault:
17034 jmp early_fault
17035
17036 early_fault:
17037 - cld
17038 #ifdef CONFIG_PRINTK
17039 + cmpl $1,%ss:early_recursion_flag
17040 + je hlt_loop
17041 + incl %ss:early_recursion_flag
17042 + cld
17043 pusha
17044 movl $(__KERNEL_DS),%eax
17045 movl %eax,%ds
17046 movl %eax,%es
17047 - cmpl $2,early_recursion_flag
17048 - je hlt_loop
17049 - incl early_recursion_flag
17050 movl %cr2,%eax
17051 pushl %eax
17052 pushl %edx /* trapno */
17053 pushl $fault_msg
17054 call printk
17055 +; call dump_stack
17056 #endif
17057 - call dump_stack
17058 hlt_loop:
17059 hlt
17060 jmp hlt_loop
17061 @@ -569,8 +654,11 @@ hlt_loop:
17062 /* This is the default interrupt "handler" :-) */
17063 ALIGN
17064 ignore_int:
17065 - cld
17066 #ifdef CONFIG_PRINTK
17067 + cmpl $2,%ss:early_recursion_flag
17068 + je hlt_loop
17069 + incl %ss:early_recursion_flag
17070 + cld
17071 pushl %eax
17072 pushl %ecx
17073 pushl %edx
17074 @@ -579,9 +667,6 @@ ignore_int:
17075 movl $(__KERNEL_DS),%eax
17076 movl %eax,%ds
17077 movl %eax,%es
17078 - cmpl $2,early_recursion_flag
17079 - je hlt_loop
17080 - incl early_recursion_flag
17081 pushl 16(%esp)
17082 pushl 24(%esp)
17083 pushl 32(%esp)
17084 @@ -600,6 +685,8 @@ ignore_int:
17085 #endif
17086 iret
17087
17088 +#include "verify_cpu.S"
17089 +
17090 __REFDATA
17091 .align 4
17092 ENTRY(initial_code)
17093 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17094 /*
17095 * BSS section
17096 */
17097 -__PAGE_ALIGNED_BSS
17098 - .align PAGE_SIZE_asm
17099 #ifdef CONFIG_X86_PAE
17100 +.section .swapper_pg_pmd,"a",@progbits
17101 swapper_pg_pmd:
17102 .fill 1024*KPMDS,4,0
17103 #else
17104 +.section .swapper_pg_dir,"a",@progbits
17105 ENTRY(swapper_pg_dir)
17106 .fill 1024,4,0
17107 #endif
17108 +.section .swapper_pg_fixmap,"a",@progbits
17109 swapper_pg_fixmap:
17110 .fill 1024,4,0
17111 #ifdef CONFIG_X86_TRAMPOLINE
17112 +.section .trampoline_pg_dir,"a",@progbits
17113 ENTRY(trampoline_pg_dir)
17114 +#ifdef CONFIG_X86_PAE
17115 + .fill 4,8,0
17116 +#else
17117 .fill 1024,4,0
17118 #endif
17119 +#endif
17120 +
17121 +.section .empty_zero_page,"a",@progbits
17122 ENTRY(empty_zero_page)
17123 .fill 4096,1,0
17124
17125 /*
17126 + * The IDT has to be page-aligned to simplify the Pentium
17127 + * F0 0F bug workaround.. We have a special link segment
17128 + * for this.
17129 + */
17130 +.section .idt,"a",@progbits
17131 +ENTRY(idt_table)
17132 + .fill 256,8,0
17133 +
17134 +/*
17135 * This starts the data section.
17136 */
17137 #ifdef CONFIG_X86_PAE
17138 -__PAGE_ALIGNED_DATA
17139 - /* Page-aligned for the benefit of paravirt? */
17140 - .align PAGE_SIZE_asm
17141 +.section .swapper_pg_dir,"a",@progbits
17142 +
17143 ENTRY(swapper_pg_dir)
17144 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17145 # if KPMDS == 3
17146 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17147 # error "Kernel PMDs should be 1, 2 or 3"
17148 # endif
17149 .align PAGE_SIZE_asm /* needs to be page-sized too */
17150 +
17151 +#ifdef CONFIG_PAX_PER_CPU_PGD
17152 +ENTRY(cpu_pgd)
17153 + .rept NR_CPUS
17154 + .fill 4,8,0
17155 + .endr
17156 +#endif
17157 +
17158 #endif
17159
17160 .data
17161 +.balign 4
17162 ENTRY(stack_start)
17163 - .long init_thread_union+THREAD_SIZE
17164 - .long __BOOT_DS
17165 + .long init_thread_union+THREAD_SIZE-8
17166
17167 ready: .byte 0
17168
17169 +.section .rodata,"a",@progbits
17170 early_recursion_flag:
17171 .long 0
17172
17173 @@ -697,7 +809,7 @@ fault_msg:
17174 .word 0 # 32 bit align gdt_desc.address
17175 boot_gdt_descr:
17176 .word __BOOT_DS+7
17177 - .long boot_gdt - __PAGE_OFFSET
17178 + .long pa(boot_gdt)
17179
17180 .word 0 # 32-bit align idt_desc.address
17181 idt_descr:
17182 @@ -708,7 +820,7 @@ idt_descr:
17183 .word 0 # 32 bit align gdt_desc.address
17184 ENTRY(early_gdt_descr)
17185 .word GDT_ENTRIES*8-1
17186 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17187 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17188
17189 /*
17190 * The boot_gdt must mirror the equivalent in setup.S and is
17191 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17192 .align L1_CACHE_BYTES
17193 ENTRY(boot_gdt)
17194 .fill GDT_ENTRY_BOOT_CS,8,0
17195 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17196 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17197 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17198 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17199 +
17200 + .align PAGE_SIZE_asm
17201 +ENTRY(cpu_gdt_table)
17202 + .rept NR_CPUS
17203 + .quad 0x0000000000000000 /* NULL descriptor */
17204 + .quad 0x0000000000000000 /* 0x0b reserved */
17205 + .quad 0x0000000000000000 /* 0x13 reserved */
17206 + .quad 0x0000000000000000 /* 0x1b reserved */
17207 +
17208 +#ifdef CONFIG_PAX_KERNEXEC
17209 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17210 +#else
17211 + .quad 0x0000000000000000 /* 0x20 unused */
17212 +#endif
17213 +
17214 + .quad 0x0000000000000000 /* 0x28 unused */
17215 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17216 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17217 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17218 + .quad 0x0000000000000000 /* 0x4b reserved */
17219 + .quad 0x0000000000000000 /* 0x53 reserved */
17220 + .quad 0x0000000000000000 /* 0x5b reserved */
17221 +
17222 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17223 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17224 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17225 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17226 +
17227 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17228 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17229 +
17230 + /*
17231 + * Segments used for calling PnP BIOS have byte granularity.
17232 + * The code segments and data segments have fixed 64k limits,
17233 + * the transfer segment sizes are set at run time.
17234 + */
17235 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17236 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17237 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17238 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17239 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17240 +
17241 + /*
17242 + * The APM segments have byte granularity and their bases
17243 + * are set at run time. All have 64k limits.
17244 + */
17245 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17246 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17247 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17248 +
17249 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17250 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17251 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17252 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17253 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17254 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17255 +
17256 + /* Be sure this is zeroed to avoid false validations in Xen */
17257 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17258 + .endr
17259 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17260 index 780cd92..758b2a6 100644
17261 --- a/arch/x86/kernel/head_64.S
17262 +++ b/arch/x86/kernel/head_64.S
17263 @@ -19,6 +19,8 @@
17264 #include <asm/cache.h>
17265 #include <asm/processor-flags.h>
17266 #include <asm/percpu.h>
17267 +#include <asm/cpufeature.h>
17268 +#include <asm/alternative-asm.h>
17269
17270 #ifdef CONFIG_PARAVIRT
17271 #include <asm/asm-offsets.h>
17272 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17273 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17274 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17275 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17276 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17277 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17278 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17279 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17280 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17281 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17282
17283 .text
17284 __HEAD
17285 @@ -85,35 +93,23 @@ startup_64:
17286 */
17287 addq %rbp, init_level4_pgt + 0(%rip)
17288 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17289 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17290 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17291 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17292 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17293
17294 addq %rbp, level3_ident_pgt + 0(%rip)
17295 +#ifndef CONFIG_XEN
17296 + addq %rbp, level3_ident_pgt + 8(%rip)
17297 +#endif
17298
17299 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17300 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17301 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17302 +
17303 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17304 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17305
17306 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17307 -
17308 - /* Add an Identity mapping if I am above 1G */
17309 - leaq _text(%rip), %rdi
17310 - andq $PMD_PAGE_MASK, %rdi
17311 -
17312 - movq %rdi, %rax
17313 - shrq $PUD_SHIFT, %rax
17314 - andq $(PTRS_PER_PUD - 1), %rax
17315 - jz ident_complete
17316 -
17317 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17318 - leaq level3_ident_pgt(%rip), %rbx
17319 - movq %rdx, 0(%rbx, %rax, 8)
17320 -
17321 - movq %rdi, %rax
17322 - shrq $PMD_SHIFT, %rax
17323 - andq $(PTRS_PER_PMD - 1), %rax
17324 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17325 - leaq level2_spare_pgt(%rip), %rbx
17326 - movq %rdx, 0(%rbx, %rax, 8)
17327 -ident_complete:
17328 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17329
17330 /*
17331 * Fixup the kernel text+data virtual addresses. Note that
17332 @@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17333 * after the boot processor executes this code.
17334 */
17335
17336 - /* Enable PAE mode and PGE */
17337 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17338 + /* Enable PAE mode and PSE/PGE */
17339 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17340 movq %rax, %cr4
17341
17342 /* Setup early boot stage 4 level pagetables. */
17343 @@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17344 movl $MSR_EFER, %ecx
17345 rdmsr
17346 btsl $_EFER_SCE, %eax /* Enable System Call */
17347 - btl $20,%edi /* No Execute supported? */
17348 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17349 jnc 1f
17350 btsl $_EFER_NX, %eax
17351 + leaq init_level4_pgt(%rip), %rdi
17352 +#ifndef CONFIG_EFI
17353 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17354 +#endif
17355 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17356 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17357 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17358 1: wrmsr /* Make changes effective */
17359
17360 /* Setup cr0 */
17361 @@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17362 * jump. In addition we need to ensure %cs is set so we make this
17363 * a far return.
17364 */
17365 + pax_set_fptr_mask
17366 movq initial_code(%rip),%rax
17367 pushq $0 # fake return address to stop unwinder
17368 pushq $__KERNEL_CS # set correct cs
17369 @@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17370 .quad x86_64_start_kernel
17371 ENTRY(initial_gs)
17372 .quad INIT_PER_CPU_VAR(irq_stack_union)
17373 - __FINITDATA
17374
17375 ENTRY(stack_start)
17376 .quad init_thread_union+THREAD_SIZE-8
17377 .word 0
17378 + __FINITDATA
17379
17380 bad_address:
17381 jmp bad_address
17382
17383 - .section ".init.text","ax"
17384 + __INIT
17385 #ifdef CONFIG_EARLY_PRINTK
17386 .globl early_idt_handlers
17387 early_idt_handlers:
17388 @@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17389 #endif /* EARLY_PRINTK */
17390 1: hlt
17391 jmp 1b
17392 + .previous
17393
17394 #ifdef CONFIG_EARLY_PRINTK
17395 + __INITDATA
17396 early_recursion_flag:
17397 .long 0
17398 + .previous
17399
17400 + .section .rodata,"a",@progbits
17401 early_idt_msg:
17402 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17403 early_idt_ripmsg:
17404 .asciz "RIP %s\n"
17405 + .previous
17406 #endif /* CONFIG_EARLY_PRINTK */
17407 - .previous
17408
17409 + .section .rodata,"a",@progbits
17410 #define NEXT_PAGE(name) \
17411 .balign PAGE_SIZE; \
17412 ENTRY(name)
17413 @@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17414 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17415 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17416 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17417 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17418 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17419 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17420 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17421 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17422 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17423 .org init_level4_pgt + L4_START_KERNEL*8, 0
17424 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17425 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17426
17427 +#ifdef CONFIG_PAX_PER_CPU_PGD
17428 +NEXT_PAGE(cpu_pgd)
17429 + .rept NR_CPUS
17430 + .fill 512,8,0
17431 + .endr
17432 +#endif
17433 +
17434 NEXT_PAGE(level3_ident_pgt)
17435 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17436 +#ifdef CONFIG_XEN
17437 .fill 511,8,0
17438 +#else
17439 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17440 + .fill 510,8,0
17441 +#endif
17442 +
17443 +NEXT_PAGE(level3_vmalloc_start_pgt)
17444 + .fill 512,8,0
17445 +
17446 +NEXT_PAGE(level3_vmalloc_end_pgt)
17447 + .fill 512,8,0
17448 +
17449 +NEXT_PAGE(level3_vmemmap_pgt)
17450 + .fill L3_VMEMMAP_START,8,0
17451 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17452
17453 NEXT_PAGE(level3_kernel_pgt)
17454 .fill L3_START_KERNEL,8,0
17455 @@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17456 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17457 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17458
17459 +NEXT_PAGE(level2_vmemmap_pgt)
17460 + .fill 512,8,0
17461 +
17462 NEXT_PAGE(level2_fixmap_pgt)
17463 - .fill 506,8,0
17464 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17465 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17466 - .fill 5,8,0
17467 + .fill 507,8,0
17468 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17469 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17470 + .fill 4,8,0
17471
17472 -NEXT_PAGE(level1_fixmap_pgt)
17473 +NEXT_PAGE(level1_vsyscall_pgt)
17474 .fill 512,8,0
17475
17476 -NEXT_PAGE(level2_ident_pgt)
17477 - /* Since I easily can, map the first 1G.
17478 + /* Since I easily can, map the first 2G.
17479 * Don't set NX because code runs from these pages.
17480 */
17481 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17482 +NEXT_PAGE(level2_ident_pgt)
17483 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17484
17485 NEXT_PAGE(level2_kernel_pgt)
17486 /*
17487 @@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17488 * If you want to increase this then increase MODULES_VADDR
17489 * too.)
17490 */
17491 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17492 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17493 -
17494 -NEXT_PAGE(level2_spare_pgt)
17495 - .fill 512, 8, 0
17496 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17497
17498 #undef PMDS
17499 #undef NEXT_PAGE
17500
17501 - .data
17502 + .align PAGE_SIZE
17503 +ENTRY(cpu_gdt_table)
17504 + .rept NR_CPUS
17505 + .quad 0x0000000000000000 /* NULL descriptor */
17506 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17507 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17508 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17509 + .quad 0x00cffb000000ffff /* __USER32_CS */
17510 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17511 + .quad 0x00affb000000ffff /* __USER_CS */
17512 +
17513 +#ifdef CONFIG_PAX_KERNEXEC
17514 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17515 +#else
17516 + .quad 0x0 /* unused */
17517 +#endif
17518 +
17519 + .quad 0,0 /* TSS */
17520 + .quad 0,0 /* LDT */
17521 + .quad 0,0,0 /* three TLS descriptors */
17522 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17523 + /* asm/segment.h:GDT_ENTRIES must match this */
17524 +
17525 + /* zero the remaining page */
17526 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17527 + .endr
17528 +
17529 .align 16
17530 .globl early_gdt_descr
17531 early_gdt_descr:
17532 .word GDT_ENTRIES*8-1
17533 early_gdt_descr_base:
17534 - .quad INIT_PER_CPU_VAR(gdt_page)
17535 + .quad cpu_gdt_table
17536
17537 ENTRY(phys_base)
17538 /* This must match the first entry in level2_kernel_pgt */
17539 .quad 0x0000000000000000
17540
17541 #include "../../x86/xen/xen-head.S"
17542 -
17543 - .section .bss, "aw", @nobits
17544 +
17545 + .section .rodata,"a",@progbits
17546 .align L1_CACHE_BYTES
17547 ENTRY(idt_table)
17548 - .skip IDT_ENTRIES * 16
17549 + .fill 512,8,0
17550
17551 __PAGE_ALIGNED_BSS
17552 .align PAGE_SIZE
17553 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17554 index 9c3bd4a..e1d9b35 100644
17555 --- a/arch/x86/kernel/i386_ksyms_32.c
17556 +++ b/arch/x86/kernel/i386_ksyms_32.c
17557 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17558 EXPORT_SYMBOL(cmpxchg8b_emu);
17559 #endif
17560
17561 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17562 +
17563 /* Networking helper routines. */
17564 EXPORT_SYMBOL(csum_partial_copy_generic);
17565 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17566 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17567
17568 EXPORT_SYMBOL(__get_user_1);
17569 EXPORT_SYMBOL(__get_user_2);
17570 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17571
17572 EXPORT_SYMBOL(csum_partial);
17573 EXPORT_SYMBOL(empty_zero_page);
17574 +
17575 +#ifdef CONFIG_PAX_KERNEXEC
17576 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17577 +#endif
17578 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17579 index df89102..a244320 100644
17580 --- a/arch/x86/kernel/i8259.c
17581 +++ b/arch/x86/kernel/i8259.c
17582 @@ -208,7 +208,7 @@ spurious_8259A_irq:
17583 "spurious 8259A interrupt: IRQ%d.\n", irq);
17584 spurious_irq_mask |= irqmask;
17585 }
17586 - atomic_inc(&irq_err_count);
17587 + atomic_inc_unchecked(&irq_err_count);
17588 /*
17589 * Theoretically we do not have to handle this IRQ,
17590 * but in Linux this does not cause problems and is
17591 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17592 index 3a54dcb..1c22348 100644
17593 --- a/arch/x86/kernel/init_task.c
17594 +++ b/arch/x86/kernel/init_task.c
17595 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17596 * way process stacks are handled. This is done by having a special
17597 * "init_task" linker map entry..
17598 */
17599 -union thread_union init_thread_union __init_task_data =
17600 - { INIT_THREAD_INFO(init_task) };
17601 +union thread_union init_thread_union __init_task_data;
17602
17603 /*
17604 * Initial task structure.
17605 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17606 * section. Since TSS's are completely CPU-local, we want them
17607 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17608 */
17609 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17610 -
17611 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17612 +EXPORT_SYMBOL(init_tss);
17613 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17614 index 99c4d30..74c84e9 100644
17615 --- a/arch/x86/kernel/ioport.c
17616 +++ b/arch/x86/kernel/ioport.c
17617 @@ -6,6 +6,7 @@
17618 #include <linux/sched.h>
17619 #include <linux/kernel.h>
17620 #include <linux/capability.h>
17621 +#include <linux/security.h>
17622 #include <linux/errno.h>
17623 #include <linux/types.h>
17624 #include <linux/ioport.h>
17625 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17626
17627 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17628 return -EINVAL;
17629 +#ifdef CONFIG_GRKERNSEC_IO
17630 + if (turn_on && grsec_disable_privio) {
17631 + gr_handle_ioperm();
17632 + return -EPERM;
17633 + }
17634 +#endif
17635 if (turn_on && !capable(CAP_SYS_RAWIO))
17636 return -EPERM;
17637
17638 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17639 * because the ->io_bitmap_max value must match the bitmap
17640 * contents:
17641 */
17642 - tss = &per_cpu(init_tss, get_cpu());
17643 + tss = init_tss + get_cpu();
17644
17645 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17646
17647 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17648 return -EINVAL;
17649 /* Trying to gain more privileges? */
17650 if (level > old) {
17651 +#ifdef CONFIG_GRKERNSEC_IO
17652 + if (grsec_disable_privio) {
17653 + gr_handle_iopl();
17654 + return -EPERM;
17655 + }
17656 +#endif
17657 if (!capable(CAP_SYS_RAWIO))
17658 return -EPERM;
17659 }
17660 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17661 index 04bbd52..83a07d9 100644
17662 --- a/arch/x86/kernel/irq.c
17663 +++ b/arch/x86/kernel/irq.c
17664 @@ -15,7 +15,7 @@
17665 #include <asm/mce.h>
17666 #include <asm/hw_irq.h>
17667
17668 -atomic_t irq_err_count;
17669 +atomic_unchecked_t irq_err_count;
17670
17671 /* Function pointer for generic interrupt vector handling */
17672 void (*generic_interrupt_extension)(void) = NULL;
17673 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17674 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17675 seq_printf(p, " Machine check polls\n");
17676 #endif
17677 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17678 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17679 #if defined(CONFIG_X86_IO_APIC)
17680 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17681 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17682 #endif
17683 return 0;
17684 }
17685 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17686
17687 u64 arch_irq_stat(void)
17688 {
17689 - u64 sum = atomic_read(&irq_err_count);
17690 + u64 sum = atomic_read_unchecked(&irq_err_count);
17691
17692 #ifdef CONFIG_X86_IO_APIC
17693 - sum += atomic_read(&irq_mis_count);
17694 + sum += atomic_read_unchecked(&irq_mis_count);
17695 #endif
17696 return sum;
17697 }
17698 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17699 index 7d35d0f..03f1d52 100644
17700 --- a/arch/x86/kernel/irq_32.c
17701 +++ b/arch/x86/kernel/irq_32.c
17702 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17703 __asm__ __volatile__("andl %%esp,%0" :
17704 "=r" (sp) : "0" (THREAD_SIZE - 1));
17705
17706 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17707 + return sp < STACK_WARN;
17708 }
17709
17710 static void print_stack_overflow(void)
17711 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17712 * per-CPU IRQ handling contexts (thread information and stack)
17713 */
17714 union irq_ctx {
17715 - struct thread_info tinfo;
17716 - u32 stack[THREAD_SIZE/sizeof(u32)];
17717 -} __attribute__((aligned(PAGE_SIZE)));
17718 + unsigned long previous_esp;
17719 + u32 stack[THREAD_SIZE/sizeof(u32)];
17720 +} __attribute__((aligned(THREAD_SIZE)));
17721
17722 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17723 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17724 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17725 static inline int
17726 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17727 {
17728 - union irq_ctx *curctx, *irqctx;
17729 + union irq_ctx *irqctx;
17730 u32 *isp, arg1, arg2;
17731
17732 - curctx = (union irq_ctx *) current_thread_info();
17733 irqctx = __get_cpu_var(hardirq_ctx);
17734
17735 /*
17736 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17737 * handler) we can't do that and just have to keep using the
17738 * current stack (which is the irq stack already after all)
17739 */
17740 - if (unlikely(curctx == irqctx))
17741 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17742 return 0;
17743
17744 /* build the stack frame on the IRQ stack */
17745 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17746 - irqctx->tinfo.task = curctx->tinfo.task;
17747 - irqctx->tinfo.previous_esp = current_stack_pointer;
17748 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17749 + irqctx->previous_esp = current_stack_pointer;
17750
17751 - /*
17752 - * Copy the softirq bits in preempt_count so that the
17753 - * softirq checks work in the hardirq context.
17754 - */
17755 - irqctx->tinfo.preempt_count =
17756 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17757 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17758 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17759 + __set_fs(MAKE_MM_SEG(0));
17760 +#endif
17761
17762 if (unlikely(overflow))
17763 call_on_stack(print_stack_overflow, isp);
17764 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17765 : "0" (irq), "1" (desc), "2" (isp),
17766 "D" (desc->handle_irq)
17767 : "memory", "cc", "ecx");
17768 +
17769 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17770 + __set_fs(current_thread_info()->addr_limit);
17771 +#endif
17772 +
17773 return 1;
17774 }
17775
17776 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17777 */
17778 void __cpuinit irq_ctx_init(int cpu)
17779 {
17780 - union irq_ctx *irqctx;
17781 -
17782 if (per_cpu(hardirq_ctx, cpu))
17783 return;
17784
17785 - irqctx = &per_cpu(hardirq_stack, cpu);
17786 - irqctx->tinfo.task = NULL;
17787 - irqctx->tinfo.exec_domain = NULL;
17788 - irqctx->tinfo.cpu = cpu;
17789 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17790 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17791 -
17792 - per_cpu(hardirq_ctx, cpu) = irqctx;
17793 -
17794 - irqctx = &per_cpu(softirq_stack, cpu);
17795 - irqctx->tinfo.task = NULL;
17796 - irqctx->tinfo.exec_domain = NULL;
17797 - irqctx->tinfo.cpu = cpu;
17798 - irqctx->tinfo.preempt_count = 0;
17799 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17800 -
17801 - per_cpu(softirq_ctx, cpu) = irqctx;
17802 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
17803 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
17804
17805 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17806 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17807 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
17808 asmlinkage void do_softirq(void)
17809 {
17810 unsigned long flags;
17811 - struct thread_info *curctx;
17812 union irq_ctx *irqctx;
17813 u32 *isp;
17814
17815 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
17816 local_irq_save(flags);
17817
17818 if (local_softirq_pending()) {
17819 - curctx = current_thread_info();
17820 irqctx = __get_cpu_var(softirq_ctx);
17821 - irqctx->tinfo.task = curctx->task;
17822 - irqctx->tinfo.previous_esp = current_stack_pointer;
17823 + irqctx->previous_esp = current_stack_pointer;
17824
17825 /* build the stack frame on the softirq stack */
17826 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17827 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17828 +
17829 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17830 + __set_fs(MAKE_MM_SEG(0));
17831 +#endif
17832
17833 call_on_stack(__do_softirq, isp);
17834 +
17835 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17836 + __set_fs(current_thread_info()->addr_limit);
17837 +#endif
17838 +
17839 /*
17840 * Shouldnt happen, we returned above if in_interrupt():
17841 */
17842 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17843 index 8d82a77..0baf312 100644
17844 --- a/arch/x86/kernel/kgdb.c
17845 +++ b/arch/x86/kernel/kgdb.c
17846 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17847
17848 /* clear the trace bit */
17849 linux_regs->flags &= ~X86_EFLAGS_TF;
17850 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17851 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17852
17853 /* set the trace bit if we're stepping */
17854 if (remcomInBuffer[0] == 's') {
17855 linux_regs->flags |= X86_EFLAGS_TF;
17856 kgdb_single_step = 1;
17857 - atomic_set(&kgdb_cpu_doing_single_step,
17858 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17859 raw_smp_processor_id());
17860 }
17861
17862 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17863 break;
17864
17865 case DIE_DEBUG:
17866 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
17867 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
17868 raw_smp_processor_id()) {
17869 if (user_mode(regs))
17870 return single_step_cont(regs, args);
17871 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
17872 return instruction_pointer(regs);
17873 }
17874
17875 -struct kgdb_arch arch_kgdb_ops = {
17876 +const struct kgdb_arch arch_kgdb_ops = {
17877 /* Breakpoint instruction: */
17878 .gdb_bpt_instr = { 0xcc },
17879 .flags = KGDB_HW_BREAKPOINT,
17880 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17881 index 7a67820..8d15b75 100644
17882 --- a/arch/x86/kernel/kprobes.c
17883 +++ b/arch/x86/kernel/kprobes.c
17884 @@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
17885 char op;
17886 s32 raddr;
17887 } __attribute__((packed)) * jop;
17888 - jop = (struct __arch_jmp_op *)from;
17889 +
17890 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
17891 +
17892 + pax_open_kernel();
17893 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
17894 jop->op = RELATIVEJUMP_INSTRUCTION;
17895 + pax_close_kernel();
17896 }
17897
17898 /*
17899 @@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17900 kprobe_opcode_t opcode;
17901 kprobe_opcode_t *orig_opcodes = opcodes;
17902
17903 - if (search_exception_tables((unsigned long)opcodes))
17904 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17905 return 0; /* Page fault may occur on this address. */
17906
17907 retry:
17908 @@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
17909 disp = (u8 *) p->addr + *((s32 *) insn) -
17910 (u8 *) p->ainsn.insn;
17911 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
17912 + pax_open_kernel();
17913 *(s32 *)insn = (s32) disp;
17914 + pax_close_kernel();
17915 }
17916 }
17917 #endif
17918 @@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
17919
17920 static void __kprobes arch_copy_kprobe(struct kprobe *p)
17921 {
17922 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17923 + pax_open_kernel();
17924 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17925 + pax_close_kernel();
17926
17927 fix_riprel(p);
17928
17929 - if (can_boost(p->addr))
17930 + if (can_boost(ktla_ktva(p->addr)))
17931 p->ainsn.boostable = 0;
17932 else
17933 p->ainsn.boostable = -1;
17934
17935 - p->opcode = *p->addr;
17936 + p->opcode = *(ktla_ktva(p->addr));
17937 }
17938
17939 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17940 @@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
17941 if (p->opcode == BREAKPOINT_INSTRUCTION)
17942 regs->ip = (unsigned long)p->addr;
17943 else
17944 - regs->ip = (unsigned long)p->ainsn.insn;
17945 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17946 }
17947
17948 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
17949 @@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17950 if (p->ainsn.boostable == 1 && !p->post_handler) {
17951 /* Boost up -- we can execute copied instructions directly */
17952 reset_current_kprobe();
17953 - regs->ip = (unsigned long)p->ainsn.insn;
17954 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17955 preempt_enable_no_resched();
17956 return;
17957 }
17958 @@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17959 struct kprobe_ctlblk *kcb;
17960
17961 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
17962 - if (*addr != BREAKPOINT_INSTRUCTION) {
17963 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17964 /*
17965 * The breakpoint instruction was removed right
17966 * after we hit it. Another cpu has removed
17967 @@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17968 /* Skip orig_ax, ip, cs */
17969 " addq $24, %rsp\n"
17970 " popfq\n"
17971 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17972 + " btsq $63,(%rsp)\n"
17973 +#endif
17974 #else
17975 " pushf\n"
17976 /*
17977 @@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17978 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17979 {
17980 unsigned long *tos = stack_addr(regs);
17981 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17982 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17983 unsigned long orig_ip = (unsigned long)p->addr;
17984 kprobe_opcode_t *insn = p->ainsn.insn;
17985
17986 @@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17987 struct die_args *args = data;
17988 int ret = NOTIFY_DONE;
17989
17990 - if (args->regs && user_mode_vm(args->regs))
17991 + if (args->regs && user_mode(args->regs))
17992 return ret;
17993
17994 switch (val) {
17995 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
17996 index 63b0ec8..6d92227 100644
17997 --- a/arch/x86/kernel/kvm.c
17998 +++ b/arch/x86/kernel/kvm.c
17999 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18000 pv_mmu_ops.set_pud = kvm_set_pud;
18001 #if PAGETABLE_LEVELS == 4
18002 pv_mmu_ops.set_pgd = kvm_set_pgd;
18003 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18004 #endif
18005 #endif
18006 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18007 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18008 index ec6ef60..ab2c824 100644
18009 --- a/arch/x86/kernel/ldt.c
18010 +++ b/arch/x86/kernel/ldt.c
18011 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18012 if (reload) {
18013 #ifdef CONFIG_SMP
18014 preempt_disable();
18015 - load_LDT(pc);
18016 + load_LDT_nolock(pc);
18017 if (!cpumask_equal(mm_cpumask(current->mm),
18018 cpumask_of(smp_processor_id())))
18019 smp_call_function(flush_ldt, current->mm, 1);
18020 preempt_enable();
18021 #else
18022 - load_LDT(pc);
18023 + load_LDT_nolock(pc);
18024 #endif
18025 }
18026 if (oldsize) {
18027 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18028 return err;
18029
18030 for (i = 0; i < old->size; i++)
18031 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18032 + write_ldt_entry(new->ldt, i, old->ldt + i);
18033 return 0;
18034 }
18035
18036 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18037 retval = copy_ldt(&mm->context, &old_mm->context);
18038 mutex_unlock(&old_mm->context.lock);
18039 }
18040 +
18041 + if (tsk == current) {
18042 + mm->context.vdso = 0;
18043 +
18044 +#ifdef CONFIG_X86_32
18045 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18046 + mm->context.user_cs_base = 0UL;
18047 + mm->context.user_cs_limit = ~0UL;
18048 +
18049 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18050 + cpus_clear(mm->context.cpu_user_cs_mask);
18051 +#endif
18052 +
18053 +#endif
18054 +#endif
18055 +
18056 + }
18057 +
18058 return retval;
18059 }
18060
18061 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18062 }
18063 }
18064
18065 +#ifdef CONFIG_PAX_SEGMEXEC
18066 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18067 + error = -EINVAL;
18068 + goto out_unlock;
18069 + }
18070 +#endif
18071 +
18072 fill_ldt(&ldt, &ldt_info);
18073 if (oldmode)
18074 ldt.avl = 0;
18075 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18076 index c1c429d..f02eaf9 100644
18077 --- a/arch/x86/kernel/machine_kexec_32.c
18078 +++ b/arch/x86/kernel/machine_kexec_32.c
18079 @@ -26,7 +26,7 @@
18080 #include <asm/system.h>
18081 #include <asm/cacheflush.h>
18082
18083 -static void set_idt(void *newidt, __u16 limit)
18084 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18085 {
18086 struct desc_ptr curidt;
18087
18088 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18089 }
18090
18091
18092 -static void set_gdt(void *newgdt, __u16 limit)
18093 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18094 {
18095 struct desc_ptr curgdt;
18096
18097 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18098 }
18099
18100 control_page = page_address(image->control_code_page);
18101 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18102 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18103
18104 relocate_kernel_ptr = control_page;
18105 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18106 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18107 index 1e47679..e73449d 100644
18108 --- a/arch/x86/kernel/microcode_amd.c
18109 +++ b/arch/x86/kernel/microcode_amd.c
18110 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18111 uci->mc = NULL;
18112 }
18113
18114 -static struct microcode_ops microcode_amd_ops = {
18115 +static const struct microcode_ops microcode_amd_ops = {
18116 .request_microcode_user = request_microcode_user,
18117 .request_microcode_fw = request_microcode_fw,
18118 .collect_cpu_info = collect_cpu_info_amd,
18119 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18120 .microcode_fini_cpu = microcode_fini_cpu_amd,
18121 };
18122
18123 -struct microcode_ops * __init init_amd_microcode(void)
18124 +const struct microcode_ops * __init init_amd_microcode(void)
18125 {
18126 return &microcode_amd_ops;
18127 }
18128 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18129 index 378e9a8..b5a6ea9 100644
18130 --- a/arch/x86/kernel/microcode_core.c
18131 +++ b/arch/x86/kernel/microcode_core.c
18132 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18133
18134 #define MICROCODE_VERSION "2.00"
18135
18136 -static struct microcode_ops *microcode_ops;
18137 +static const struct microcode_ops *microcode_ops;
18138
18139 /*
18140 * Synchronization.
18141 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18142 index 0d334dd..14cedaf 100644
18143 --- a/arch/x86/kernel/microcode_intel.c
18144 +++ b/arch/x86/kernel/microcode_intel.c
18145 @@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18146
18147 static int get_ucode_user(void *to, const void *from, size_t n)
18148 {
18149 - return copy_from_user(to, from, n);
18150 + return copy_from_user(to, (const void __force_user *)from, n);
18151 }
18152
18153 static enum ucode_state
18154 request_microcode_user(int cpu, const void __user *buf, size_t size)
18155 {
18156 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18157 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18158 }
18159
18160 static void microcode_fini_cpu(int cpu)
18161 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18162 uci->mc = NULL;
18163 }
18164
18165 -static struct microcode_ops microcode_intel_ops = {
18166 +static const struct microcode_ops microcode_intel_ops = {
18167 .request_microcode_user = request_microcode_user,
18168 .request_microcode_fw = request_microcode_fw,
18169 .collect_cpu_info = collect_cpu_info,
18170 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18171 .microcode_fini_cpu = microcode_fini_cpu,
18172 };
18173
18174 -struct microcode_ops * __init init_intel_microcode(void)
18175 +const struct microcode_ops * __init init_intel_microcode(void)
18176 {
18177 return &microcode_intel_ops;
18178 }
18179 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18180 index 89f386f..9028f51 100644
18181 --- a/arch/x86/kernel/module.c
18182 +++ b/arch/x86/kernel/module.c
18183 @@ -34,7 +34,7 @@
18184 #define DEBUGP(fmt...)
18185 #endif
18186
18187 -void *module_alloc(unsigned long size)
18188 +static void *__module_alloc(unsigned long size, pgprot_t prot)
18189 {
18190 struct vm_struct *area;
18191
18192 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18193 if (!area)
18194 return NULL;
18195
18196 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18197 - PAGE_KERNEL_EXEC);
18198 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18199 +}
18200 +
18201 +void *module_alloc(unsigned long size)
18202 +{
18203 +
18204 +#ifdef CONFIG_PAX_KERNEXEC
18205 + return __module_alloc(size, PAGE_KERNEL);
18206 +#else
18207 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18208 +#endif
18209 +
18210 }
18211
18212 /* Free memory returned from module_alloc */
18213 @@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18214 vfree(module_region);
18215 }
18216
18217 +#ifdef CONFIG_PAX_KERNEXEC
18218 +#ifdef CONFIG_X86_32
18219 +void *module_alloc_exec(unsigned long size)
18220 +{
18221 + struct vm_struct *area;
18222 +
18223 + if (size == 0)
18224 + return NULL;
18225 +
18226 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18227 + return area ? area->addr : NULL;
18228 +}
18229 +EXPORT_SYMBOL(module_alloc_exec);
18230 +
18231 +void module_free_exec(struct module *mod, void *module_region)
18232 +{
18233 + vunmap(module_region);
18234 +}
18235 +EXPORT_SYMBOL(module_free_exec);
18236 +#else
18237 +void module_free_exec(struct module *mod, void *module_region)
18238 +{
18239 + module_free(mod, module_region);
18240 +}
18241 +EXPORT_SYMBOL(module_free_exec);
18242 +
18243 +void *module_alloc_exec(unsigned long size)
18244 +{
18245 + return __module_alloc(size, PAGE_KERNEL_RX);
18246 +}
18247 +EXPORT_SYMBOL(module_alloc_exec);
18248 +#endif
18249 +#endif
18250 +
18251 /* We don't need anything special. */
18252 int module_frob_arch_sections(Elf_Ehdr *hdr,
18253 Elf_Shdr *sechdrs,
18254 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18255 unsigned int i;
18256 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18257 Elf32_Sym *sym;
18258 - uint32_t *location;
18259 + uint32_t *plocation, location;
18260
18261 DEBUGP("Applying relocate section %u to %u\n", relsec,
18262 sechdrs[relsec].sh_info);
18263 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18264 /* This is where to make the change */
18265 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18266 - + rel[i].r_offset;
18267 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18268 + location = (uint32_t)plocation;
18269 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18270 + plocation = ktla_ktva((void *)plocation);
18271 /* This is the symbol it is referring to. Note that all
18272 undefined symbols have been resolved. */
18273 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18274 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18275 switch (ELF32_R_TYPE(rel[i].r_info)) {
18276 case R_386_32:
18277 /* We add the value into the location given */
18278 - *location += sym->st_value;
18279 + pax_open_kernel();
18280 + *plocation += sym->st_value;
18281 + pax_close_kernel();
18282 break;
18283 case R_386_PC32:
18284 /* Add the value, subtract its postition */
18285 - *location += sym->st_value - (uint32_t)location;
18286 + pax_open_kernel();
18287 + *plocation += sym->st_value - location;
18288 + pax_close_kernel();
18289 break;
18290 default:
18291 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18292 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18293 case R_X86_64_NONE:
18294 break;
18295 case R_X86_64_64:
18296 + pax_open_kernel();
18297 *(u64 *)loc = val;
18298 + pax_close_kernel();
18299 break;
18300 case R_X86_64_32:
18301 + pax_open_kernel();
18302 *(u32 *)loc = val;
18303 + pax_close_kernel();
18304 if (val != *(u32 *)loc)
18305 goto overflow;
18306 break;
18307 case R_X86_64_32S:
18308 + pax_open_kernel();
18309 *(s32 *)loc = val;
18310 + pax_close_kernel();
18311 if ((s64)val != *(s32 *)loc)
18312 goto overflow;
18313 break;
18314 case R_X86_64_PC32:
18315 val -= (u64)loc;
18316 + pax_open_kernel();
18317 *(u32 *)loc = val;
18318 + pax_close_kernel();
18319 +
18320 #if 0
18321 if ((s64)val != *(s32 *)loc)
18322 goto overflow;
18323 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18324 index 3a7c5a4..9191528 100644
18325 --- a/arch/x86/kernel/paravirt-spinlocks.c
18326 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18327 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18328 __raw_spin_lock(lock);
18329 }
18330
18331 -struct pv_lock_ops pv_lock_ops = {
18332 +struct pv_lock_ops pv_lock_ops __read_only = {
18333 #ifdef CONFIG_SMP
18334 .spin_is_locked = __ticket_spin_is_locked,
18335 .spin_is_contended = __ticket_spin_is_contended,
18336 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18337 index 1b1739d..dea6077 100644
18338 --- a/arch/x86/kernel/paravirt.c
18339 +++ b/arch/x86/kernel/paravirt.c
18340 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18341 {
18342 return x;
18343 }
18344 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18345 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18346 +#endif
18347
18348 void __init default_banner(void)
18349 {
18350 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18351 * corresponding structure. */
18352 static void *get_call_destination(u8 type)
18353 {
18354 - struct paravirt_patch_template tmpl = {
18355 + const struct paravirt_patch_template tmpl = {
18356 .pv_init_ops = pv_init_ops,
18357 .pv_time_ops = pv_time_ops,
18358 .pv_cpu_ops = pv_cpu_ops,
18359 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18360 .pv_lock_ops = pv_lock_ops,
18361 #endif
18362 };
18363 +
18364 + pax_track_stack();
18365 return *((void **)&tmpl + type);
18366 }
18367
18368 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18369 if (opfunc == NULL)
18370 /* If there's no function, patch it with a ud2a (BUG) */
18371 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18372 - else if (opfunc == _paravirt_nop)
18373 + else if (opfunc == (void *)_paravirt_nop)
18374 /* If the operation is a nop, then nop the callsite */
18375 ret = paravirt_patch_nop();
18376
18377 /* identity functions just return their single argument */
18378 - else if (opfunc == _paravirt_ident_32)
18379 + else if (opfunc == (void *)_paravirt_ident_32)
18380 ret = paravirt_patch_ident_32(insnbuf, len);
18381 - else if (opfunc == _paravirt_ident_64)
18382 + else if (opfunc == (void *)_paravirt_ident_64)
18383 ret = paravirt_patch_ident_64(insnbuf, len);
18384 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18385 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18386 + ret = paravirt_patch_ident_64(insnbuf, len);
18387 +#endif
18388
18389 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18390 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18391 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18392 if (insn_len > len || start == NULL)
18393 insn_len = len;
18394 else
18395 - memcpy(insnbuf, start, insn_len);
18396 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18397
18398 return insn_len;
18399 }
18400 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18401 preempt_enable();
18402 }
18403
18404 -struct pv_info pv_info = {
18405 +struct pv_info pv_info __read_only = {
18406 .name = "bare hardware",
18407 .paravirt_enabled = 0,
18408 .kernel_rpl = 0,
18409 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18410 };
18411
18412 -struct pv_init_ops pv_init_ops = {
18413 +struct pv_init_ops pv_init_ops __read_only = {
18414 .patch = native_patch,
18415 };
18416
18417 -struct pv_time_ops pv_time_ops = {
18418 +struct pv_time_ops pv_time_ops __read_only = {
18419 .sched_clock = native_sched_clock,
18420 };
18421
18422 -struct pv_irq_ops pv_irq_ops = {
18423 +struct pv_irq_ops pv_irq_ops __read_only = {
18424 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18425 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18426 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18427 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18428 #endif
18429 };
18430
18431 -struct pv_cpu_ops pv_cpu_ops = {
18432 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18433 .cpuid = native_cpuid,
18434 .get_debugreg = native_get_debugreg,
18435 .set_debugreg = native_set_debugreg,
18436 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18437 .end_context_switch = paravirt_nop,
18438 };
18439
18440 -struct pv_apic_ops pv_apic_ops = {
18441 +struct pv_apic_ops pv_apic_ops __read_only = {
18442 #ifdef CONFIG_X86_LOCAL_APIC
18443 .startup_ipi_hook = paravirt_nop,
18444 #endif
18445 };
18446
18447 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18448 +#ifdef CONFIG_X86_32
18449 +#ifdef CONFIG_X86_PAE
18450 +/* 64-bit pagetable entries */
18451 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18452 +#else
18453 /* 32-bit pagetable entries */
18454 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18455 +#endif
18456 #else
18457 /* 64-bit pagetable entries */
18458 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18459 #endif
18460
18461 -struct pv_mmu_ops pv_mmu_ops = {
18462 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18463
18464 .read_cr2 = native_read_cr2,
18465 .write_cr2 = native_write_cr2,
18466 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18467 .make_pud = PTE_IDENT,
18468
18469 .set_pgd = native_set_pgd,
18470 + .set_pgd_batched = native_set_pgd_batched,
18471 #endif
18472 #endif /* PAGETABLE_LEVELS >= 3 */
18473
18474 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18475 },
18476
18477 .set_fixmap = native_set_fixmap,
18478 +
18479 +#ifdef CONFIG_PAX_KERNEXEC
18480 + .pax_open_kernel = native_pax_open_kernel,
18481 + .pax_close_kernel = native_pax_close_kernel,
18482 +#endif
18483 +
18484 };
18485
18486 EXPORT_SYMBOL_GPL(pv_time_ops);
18487 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18488 index 1a2d4b1..6a0dd55 100644
18489 --- a/arch/x86/kernel/pci-calgary_64.c
18490 +++ b/arch/x86/kernel/pci-calgary_64.c
18491 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18492 free_pages((unsigned long)vaddr, get_order(size));
18493 }
18494
18495 -static struct dma_map_ops calgary_dma_ops = {
18496 +static const struct dma_map_ops calgary_dma_ops = {
18497 .alloc_coherent = calgary_alloc_coherent,
18498 .free_coherent = calgary_free_coherent,
18499 .map_sg = calgary_map_sg,
18500 diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18501 index 6ac3931..42b4414 100644
18502 --- a/arch/x86/kernel/pci-dma.c
18503 +++ b/arch/x86/kernel/pci-dma.c
18504 @@ -14,7 +14,7 @@
18505
18506 static int forbid_dac __read_mostly;
18507
18508 -struct dma_map_ops *dma_ops;
18509 +const struct dma_map_ops *dma_ops;
18510 EXPORT_SYMBOL(dma_ops);
18511
18512 static int iommu_sac_force __read_mostly;
18513 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18514
18515 int dma_supported(struct device *dev, u64 mask)
18516 {
18517 - struct dma_map_ops *ops = get_dma_ops(dev);
18518 + const struct dma_map_ops *ops = get_dma_ops(dev);
18519
18520 #ifdef CONFIG_PCI
18521 if (mask > 0xffffffff && forbid_dac > 0) {
18522 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18523 index 1c76691..e3632db 100644
18524 --- a/arch/x86/kernel/pci-gart_64.c
18525 +++ b/arch/x86/kernel/pci-gart_64.c
18526 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18527 return -1;
18528 }
18529
18530 -static struct dma_map_ops gart_dma_ops = {
18531 +static const struct dma_map_ops gart_dma_ops = {
18532 .map_sg = gart_map_sg,
18533 .unmap_sg = gart_unmap_sg,
18534 .map_page = gart_map_page,
18535 diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18536 index a3933d4..c898869 100644
18537 --- a/arch/x86/kernel/pci-nommu.c
18538 +++ b/arch/x86/kernel/pci-nommu.c
18539 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18540 flush_write_buffers();
18541 }
18542
18543 -struct dma_map_ops nommu_dma_ops = {
18544 +const struct dma_map_ops nommu_dma_ops = {
18545 .alloc_coherent = dma_generic_alloc_coherent,
18546 .free_coherent = nommu_free_coherent,
18547 .map_sg = nommu_map_sg,
18548 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18549 index aaa6b78..4de1881 100644
18550 --- a/arch/x86/kernel/pci-swiotlb.c
18551 +++ b/arch/x86/kernel/pci-swiotlb.c
18552 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18553 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18554 }
18555
18556 -static struct dma_map_ops swiotlb_dma_ops = {
18557 +static const struct dma_map_ops swiotlb_dma_ops = {
18558 .mapping_error = swiotlb_dma_mapping_error,
18559 .alloc_coherent = x86_swiotlb_alloc_coherent,
18560 .free_coherent = swiotlb_free_coherent,
18561 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18562 index fc6c84d..0312ca2 100644
18563 --- a/arch/x86/kernel/process.c
18564 +++ b/arch/x86/kernel/process.c
18565 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18566
18567 void free_thread_info(struct thread_info *ti)
18568 {
18569 - free_thread_xstate(ti->task);
18570 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18571 }
18572
18573 +static struct kmem_cache *task_struct_cachep;
18574 +
18575 void arch_task_cache_init(void)
18576 {
18577 - task_xstate_cachep =
18578 - kmem_cache_create("task_xstate", xstate_size,
18579 + /* create a slab on which task_structs can be allocated */
18580 + task_struct_cachep =
18581 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18582 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18583 +
18584 + task_xstate_cachep =
18585 + kmem_cache_create("task_xstate", xstate_size,
18586 __alignof__(union thread_xstate),
18587 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18588 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18589 +}
18590 +
18591 +struct task_struct *alloc_task_struct(void)
18592 +{
18593 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18594 +}
18595 +
18596 +void free_task_struct(struct task_struct *task)
18597 +{
18598 + free_thread_xstate(task);
18599 + kmem_cache_free(task_struct_cachep, task);
18600 }
18601
18602 /*
18603 @@ -73,7 +90,7 @@ void exit_thread(void)
18604 unsigned long *bp = t->io_bitmap_ptr;
18605
18606 if (bp) {
18607 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18608 + struct tss_struct *tss = init_tss + get_cpu();
18609
18610 t->io_bitmap_ptr = NULL;
18611 clear_thread_flag(TIF_IO_BITMAP);
18612 @@ -93,6 +110,9 @@ void flush_thread(void)
18613
18614 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18615
18616 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18617 + loadsegment(gs, 0);
18618 +#endif
18619 tsk->thread.debugreg0 = 0;
18620 tsk->thread.debugreg1 = 0;
18621 tsk->thread.debugreg2 = 0;
18622 @@ -307,7 +327,7 @@ void default_idle(void)
18623 EXPORT_SYMBOL(default_idle);
18624 #endif
18625
18626 -void stop_this_cpu(void *dummy)
18627 +__noreturn void stop_this_cpu(void *dummy)
18628 {
18629 local_irq_disable();
18630 /*
18631 @@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18632 }
18633 early_param("idle", idle_setup);
18634
18635 -unsigned long arch_align_stack(unsigned long sp)
18636 +#ifdef CONFIG_PAX_RANDKSTACK
18637 +void pax_randomize_kstack(struct pt_regs *regs)
18638 {
18639 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18640 - sp -= get_random_int() % 8192;
18641 - return sp & ~0xf;
18642 -}
18643 + struct thread_struct *thread = &current->thread;
18644 + unsigned long time;
18645
18646 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18647 -{
18648 - unsigned long range_end = mm->brk + 0x02000000;
18649 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18650 + if (!randomize_va_space)
18651 + return;
18652 +
18653 + if (v8086_mode(regs))
18654 + return;
18655 +
18656 + rdtscl(time);
18657 +
18658 + /* P4 seems to return a 0 LSB, ignore it */
18659 +#ifdef CONFIG_MPENTIUM4
18660 + time &= 0x3EUL;
18661 + time <<= 2;
18662 +#elif defined(CONFIG_X86_64)
18663 + time &= 0xFUL;
18664 + time <<= 4;
18665 +#else
18666 + time &= 0x1FUL;
18667 + time <<= 3;
18668 +#endif
18669 +
18670 + thread->sp0 ^= time;
18671 + load_sp0(init_tss + smp_processor_id(), thread);
18672 +
18673 +#ifdef CONFIG_X86_64
18674 + percpu_write(kernel_stack, thread->sp0);
18675 +#endif
18676 }
18677 +#endif
18678
18679 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18680 index c40c432..6e1df72 100644
18681 --- a/arch/x86/kernel/process_32.c
18682 +++ b/arch/x86/kernel/process_32.c
18683 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18684 unsigned long thread_saved_pc(struct task_struct *tsk)
18685 {
18686 return ((unsigned long *)tsk->thread.sp)[3];
18687 +//XXX return tsk->thread.eip;
18688 }
18689
18690 #ifndef CONFIG_SMP
18691 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18692 unsigned short ss, gs;
18693 const char *board;
18694
18695 - if (user_mode_vm(regs)) {
18696 + if (user_mode(regs)) {
18697 sp = regs->sp;
18698 ss = regs->ss & 0xffff;
18699 - gs = get_user_gs(regs);
18700 } else {
18701 sp = (unsigned long) (&regs->sp);
18702 savesegment(ss, ss);
18703 - savesegment(gs, gs);
18704 }
18705 + gs = get_user_gs(regs);
18706
18707 printk("\n");
18708
18709 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18710 regs.bx = (unsigned long) fn;
18711 regs.dx = (unsigned long) arg;
18712
18713 - regs.ds = __USER_DS;
18714 - regs.es = __USER_DS;
18715 + regs.ds = __KERNEL_DS;
18716 + regs.es = __KERNEL_DS;
18717 regs.fs = __KERNEL_PERCPU;
18718 - regs.gs = __KERNEL_STACK_CANARY;
18719 + savesegment(gs, regs.gs);
18720 regs.orig_ax = -1;
18721 regs.ip = (unsigned long) kernel_thread_helper;
18722 regs.cs = __KERNEL_CS | get_kernel_rpl();
18723 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18724 struct task_struct *tsk;
18725 int err;
18726
18727 - childregs = task_pt_regs(p);
18728 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18729 *childregs = *regs;
18730 childregs->ax = 0;
18731 childregs->sp = sp;
18732
18733 p->thread.sp = (unsigned long) childregs;
18734 p->thread.sp0 = (unsigned long) (childregs+1);
18735 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18736
18737 p->thread.ip = (unsigned long) ret_from_fork;
18738
18739 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18740 struct thread_struct *prev = &prev_p->thread,
18741 *next = &next_p->thread;
18742 int cpu = smp_processor_id();
18743 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18744 + struct tss_struct *tss = init_tss + cpu;
18745 bool preload_fpu;
18746
18747 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18748 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18749 */
18750 lazy_save_gs(prev->gs);
18751
18752 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18753 + __set_fs(task_thread_info(next_p)->addr_limit);
18754 +#endif
18755 +
18756 /*
18757 * Load the per-thread Thread-Local Storage descriptor.
18758 */
18759 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18760 */
18761 arch_end_context_switch(next_p);
18762
18763 + percpu_write(current_task, next_p);
18764 + percpu_write(current_tinfo, &next_p->tinfo);
18765 +
18766 if (preload_fpu)
18767 __math_state_restore();
18768
18769 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18770 if (prev->gs | next->gs)
18771 lazy_load_gs(next->gs);
18772
18773 - percpu_write(current_task, next_p);
18774 -
18775 return prev_p;
18776 }
18777
18778 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
18779 } while (count++ < 16);
18780 return 0;
18781 }
18782 -
18783 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18784 index 39493bc..196816d 100644
18785 --- a/arch/x86/kernel/process_64.c
18786 +++ b/arch/x86/kernel/process_64.c
18787 @@ -91,7 +91,7 @@ static void __exit_idle(void)
18788 void exit_idle(void)
18789 {
18790 /* idle loop has pid 0 */
18791 - if (current->pid)
18792 + if (task_pid_nr(current))
18793 return;
18794 __exit_idle();
18795 }
18796 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
18797 if (!board)
18798 board = "";
18799 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
18800 - current->pid, current->comm, print_tainted(),
18801 + task_pid_nr(current), current->comm, print_tainted(),
18802 init_utsname()->release,
18803 (int)strcspn(init_utsname()->version, " "),
18804 init_utsname()->version, board);
18805 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18806 struct pt_regs *childregs;
18807 struct task_struct *me = current;
18808
18809 - childregs = ((struct pt_regs *)
18810 - (THREAD_SIZE + task_stack_page(p))) - 1;
18811 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18812 *childregs = *regs;
18813
18814 childregs->ax = 0;
18815 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18816 p->thread.sp = (unsigned long) childregs;
18817 p->thread.sp0 = (unsigned long) (childregs+1);
18818 p->thread.usersp = me->thread.usersp;
18819 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18820
18821 set_tsk_thread_flag(p, TIF_FORK);
18822
18823 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18824 struct thread_struct *prev = &prev_p->thread;
18825 struct thread_struct *next = &next_p->thread;
18826 int cpu = smp_processor_id();
18827 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18828 + struct tss_struct *tss = init_tss + cpu;
18829 unsigned fsindex, gsindex;
18830 bool preload_fpu;
18831
18832 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18833 prev->usersp = percpu_read(old_rsp);
18834 percpu_write(old_rsp, next->usersp);
18835 percpu_write(current_task, next_p);
18836 + percpu_write(current_tinfo, &next_p->tinfo);
18837
18838 - percpu_write(kernel_stack,
18839 - (unsigned long)task_stack_page(next_p) +
18840 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18841 + percpu_write(kernel_stack, next->sp0);
18842
18843 /*
18844 * Now maybe reload the debug registers and handle I/O bitmaps
18845 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
18846 if (!p || p == current || p->state == TASK_RUNNING)
18847 return 0;
18848 stack = (unsigned long)task_stack_page(p);
18849 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18850 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18851 return 0;
18852 fp = *(u64 *)(p->thread.sp);
18853 do {
18854 - if (fp < (unsigned long)stack ||
18855 - fp >= (unsigned long)stack+THREAD_SIZE)
18856 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18857 return 0;
18858 ip = *(u64 *)(fp+8);
18859 if (!in_sched_functions(ip))
18860 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18861 index c06acdd..3f5fff5 100644
18862 --- a/arch/x86/kernel/ptrace.c
18863 +++ b/arch/x86/kernel/ptrace.c
18864 @@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
18865 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18866 {
18867 int ret;
18868 - unsigned long __user *datap = (unsigned long __user *)data;
18869 + unsigned long __user *datap = (__force unsigned long __user *)data;
18870
18871 switch (request) {
18872 /* read the word at location addr in the USER area. */
18873 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18874 if (addr < 0)
18875 return -EIO;
18876 ret = do_get_thread_area(child, addr,
18877 - (struct user_desc __user *) data);
18878 + (__force struct user_desc __user *) data);
18879 break;
18880
18881 case PTRACE_SET_THREAD_AREA:
18882 if (addr < 0)
18883 return -EIO;
18884 ret = do_set_thread_area(child, addr,
18885 - (struct user_desc __user *) data, 0);
18886 + (__force struct user_desc __user *) data, 0);
18887 break;
18888 #endif
18889
18890 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18891 #ifdef CONFIG_X86_PTRACE_BTS
18892 case PTRACE_BTS_CONFIG:
18893 ret = ptrace_bts_config
18894 - (child, data, (struct ptrace_bts_config __user *)addr);
18895 + (child, data, (__force struct ptrace_bts_config __user *)addr);
18896 break;
18897
18898 case PTRACE_BTS_STATUS:
18899 ret = ptrace_bts_status
18900 - (child, data, (struct ptrace_bts_config __user *)addr);
18901 + (child, data, (__force struct ptrace_bts_config __user *)addr);
18902 break;
18903
18904 case PTRACE_BTS_SIZE:
18905 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18906
18907 case PTRACE_BTS_GET:
18908 ret = ptrace_bts_read_record
18909 - (child, data, (struct bts_struct __user *) addr);
18910 + (child, data, (__force struct bts_struct __user *) addr);
18911 break;
18912
18913 case PTRACE_BTS_CLEAR:
18914 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18915
18916 case PTRACE_BTS_DRAIN:
18917 ret = ptrace_bts_drain
18918 - (child, data, (struct bts_struct __user *) addr);
18919 + (child, data, (__force struct bts_struct __user *) addr);
18920 break;
18921 #endif /* CONFIG_X86_PTRACE_BTS */
18922
18923 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18924 info.si_code = si_code;
18925
18926 /* User-mode ip? */
18927 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
18928 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
18929
18930 /* Send us the fake SIGTRAP */
18931 force_sig_info(SIGTRAP, &info, tsk);
18932 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18933 * We must return the syscall number to actually look up in the table.
18934 * This can be -1L to skip running any syscall at all.
18935 */
18936 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
18937 +long syscall_trace_enter(struct pt_regs *regs)
18938 {
18939 long ret = 0;
18940
18941 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
18942 return ret ?: regs->orig_ax;
18943 }
18944
18945 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
18946 +void syscall_trace_leave(struct pt_regs *regs)
18947 {
18948 if (unlikely(current->audit_context))
18949 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
18950 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18951 index cf98100..e76e03d 100644
18952 --- a/arch/x86/kernel/reboot.c
18953 +++ b/arch/x86/kernel/reboot.c
18954 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
18955 EXPORT_SYMBOL(pm_power_off);
18956
18957 static const struct desc_ptr no_idt = {};
18958 -static int reboot_mode;
18959 +static unsigned short reboot_mode;
18960 enum reboot_type reboot_type = BOOT_KBD;
18961 int reboot_force;
18962
18963 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
18964 controller to pulse the CPU reset line, which is more thorough, but
18965 doesn't work with at least one type of 486 motherboard. It is easy
18966 to stop this code working; hence the copious comments. */
18967 -static const unsigned long long
18968 -real_mode_gdt_entries [3] =
18969 +static struct desc_struct
18970 +real_mode_gdt_entries [3] __read_only =
18971 {
18972 - 0x0000000000000000ULL, /* Null descriptor */
18973 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
18974 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
18975 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
18976 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
18977 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
18978 };
18979
18980 static const struct desc_ptr
18981 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
18982 * specified by the code and length parameters.
18983 * We assume that length will aways be less that 100!
18984 */
18985 -void machine_real_restart(const unsigned char *code, int length)
18986 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
18987 {
18988 local_irq_disable();
18989
18990 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
18991 /* Remap the kernel at virtual address zero, as well as offset zero
18992 from the kernel segment. This assumes the kernel segment starts at
18993 virtual address PAGE_OFFSET. */
18994 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18995 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
18996 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18997 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
18998
18999 /*
19000 * Use `swapper_pg_dir' as our page directory.
19001 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19002 boot)". This seems like a fairly standard thing that gets set by
19003 REBOOT.COM programs, and the previous reset routine did this
19004 too. */
19005 - *((unsigned short *)0x472) = reboot_mode;
19006 + *(unsigned short *)(__va(0x472)) = reboot_mode;
19007
19008 /* For the switch to real mode, copy some code to low memory. It has
19009 to be in the first 64k because it is running in 16-bit mode, and it
19010 has to have the same physical and virtual address, because it turns
19011 off paging. Copy it near the end of the first page, out of the way
19012 of BIOS variables. */
19013 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19014 - real_mode_switch, sizeof (real_mode_switch));
19015 - memcpy((void *)(0x1000 - 100), code, length);
19016 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19017 + memcpy(__va(0x1000 - 100), code, length);
19018
19019 /* Set up the IDT for real mode. */
19020 load_idt(&real_mode_idt);
19021 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19022 __asm__ __volatile__ ("ljmp $0x0008,%0"
19023 :
19024 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19025 + do { } while (1);
19026 }
19027 #ifdef CONFIG_APM_MODULE
19028 EXPORT_SYMBOL(machine_real_restart);
19029 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19030 {
19031 }
19032
19033 -static void native_machine_emergency_restart(void)
19034 +__noreturn static void native_machine_emergency_restart(void)
19035 {
19036 int i;
19037
19038 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19039 #endif
19040 }
19041
19042 -static void __machine_emergency_restart(int emergency)
19043 +static __noreturn void __machine_emergency_restart(int emergency)
19044 {
19045 reboot_emergency = emergency;
19046 machine_ops.emergency_restart();
19047 }
19048
19049 -static void native_machine_restart(char *__unused)
19050 +static __noreturn void native_machine_restart(char *__unused)
19051 {
19052 printk("machine restart\n");
19053
19054 @@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19055 __machine_emergency_restart(0);
19056 }
19057
19058 -static void native_machine_halt(void)
19059 +static __noreturn void native_machine_halt(void)
19060 {
19061 /* stop other cpus and apics */
19062 machine_shutdown();
19063 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
19064 stop_this_cpu(NULL);
19065 }
19066
19067 -static void native_machine_power_off(void)
19068 +__noreturn static void native_machine_power_off(void)
19069 {
19070 if (pm_power_off) {
19071 if (!reboot_force)
19072 @@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19073 }
19074 /* a fallback in case there is no PM info available */
19075 tboot_shutdown(TB_SHUTDOWN_HALT);
19076 + do { } while (1);
19077 }
19078
19079 struct machine_ops machine_ops = {
19080 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19081 index 7a6f3b3..976a959 100644
19082 --- a/arch/x86/kernel/relocate_kernel_64.S
19083 +++ b/arch/x86/kernel/relocate_kernel_64.S
19084 @@ -11,6 +11,7 @@
19085 #include <asm/kexec.h>
19086 #include <asm/processor-flags.h>
19087 #include <asm/pgtable_types.h>
19088 +#include <asm/alternative-asm.h>
19089
19090 /*
19091 * Must be relocatable PIC code callable as a C function
19092 @@ -167,6 +168,7 @@ identity_mapped:
19093 xorq %r14, %r14
19094 xorq %r15, %r15
19095
19096 + pax_force_retaddr 0, 1
19097 ret
19098
19099 1:
19100 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19101 index 5449a26..0b6c759 100644
19102 --- a/arch/x86/kernel/setup.c
19103 +++ b/arch/x86/kernel/setup.c
19104 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19105
19106 if (!boot_params.hdr.root_flags)
19107 root_mountflags &= ~MS_RDONLY;
19108 - init_mm.start_code = (unsigned long) _text;
19109 - init_mm.end_code = (unsigned long) _etext;
19110 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19111 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19112 init_mm.end_data = (unsigned long) _edata;
19113 init_mm.brk = _brk_end;
19114
19115 - code_resource.start = virt_to_phys(_text);
19116 - code_resource.end = virt_to_phys(_etext)-1;
19117 - data_resource.start = virt_to_phys(_etext);
19118 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19119 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19120 + data_resource.start = virt_to_phys(_sdata);
19121 data_resource.end = virt_to_phys(_edata)-1;
19122 bss_resource.start = virt_to_phys(&__bss_start);
19123 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19124 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19125 index d559af9..524c6ad 100644
19126 --- a/arch/x86/kernel/setup_percpu.c
19127 +++ b/arch/x86/kernel/setup_percpu.c
19128 @@ -25,19 +25,17 @@
19129 # define DBG(x...)
19130 #endif
19131
19132 -DEFINE_PER_CPU(int, cpu_number);
19133 +#ifdef CONFIG_SMP
19134 +DEFINE_PER_CPU(unsigned int, cpu_number);
19135 EXPORT_PER_CPU_SYMBOL(cpu_number);
19136 +#endif
19137
19138 -#ifdef CONFIG_X86_64
19139 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19140 -#else
19141 -#define BOOT_PERCPU_OFFSET 0
19142 -#endif
19143
19144 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19145 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19146
19147 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19148 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19149 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19150 };
19151 EXPORT_SYMBOL(__per_cpu_offset);
19152 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19153 {
19154 #ifdef CONFIG_X86_32
19155 struct desc_struct gdt;
19156 + unsigned long base = per_cpu_offset(cpu);
19157
19158 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19159 - 0x2 | DESCTYPE_S, 0x8);
19160 - gdt.s = 1;
19161 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19162 + 0x83 | DESCTYPE_S, 0xC);
19163 write_gdt_entry(get_cpu_gdt_table(cpu),
19164 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19165 #endif
19166 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19167 /* alrighty, percpu areas up and running */
19168 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19169 for_each_possible_cpu(cpu) {
19170 +#ifdef CONFIG_CC_STACKPROTECTOR
19171 +#ifdef CONFIG_X86_32
19172 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19173 +#endif
19174 +#endif
19175 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19176 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19177 per_cpu(cpu_number, cpu) = cpu;
19178 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19179 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19180 #endif
19181 #endif
19182 +#ifdef CONFIG_CC_STACKPROTECTOR
19183 +#ifdef CONFIG_X86_32
19184 + if (!cpu)
19185 + per_cpu(stack_canary.canary, cpu) = canary;
19186 +#endif
19187 +#endif
19188 /*
19189 * Up to this point, the boot CPU has been using .data.init
19190 * area. Reload any changed state for the boot CPU.
19191 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19192 index 6a44a76..a9287a1 100644
19193 --- a/arch/x86/kernel/signal.c
19194 +++ b/arch/x86/kernel/signal.c
19195 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19196 * Align the stack pointer according to the i386 ABI,
19197 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19198 */
19199 - sp = ((sp + 4) & -16ul) - 4;
19200 + sp = ((sp - 12) & -16ul) - 4;
19201 #else /* !CONFIG_X86_32 */
19202 sp = round_down(sp, 16) - 8;
19203 #endif
19204 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19205 * Return an always-bogus address instead so we will die with SIGSEGV.
19206 */
19207 if (onsigstack && !likely(on_sig_stack(sp)))
19208 - return (void __user *)-1L;
19209 + return (__force void __user *)-1L;
19210
19211 /* save i387 state */
19212 if (used_math() && save_i387_xstate(*fpstate) < 0)
19213 - return (void __user *)-1L;
19214 + return (__force void __user *)-1L;
19215
19216 return (void __user *)sp;
19217 }
19218 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19219 }
19220
19221 if (current->mm->context.vdso)
19222 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19223 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19224 else
19225 - restorer = &frame->retcode;
19226 + restorer = (void __user *)&frame->retcode;
19227 if (ka->sa.sa_flags & SA_RESTORER)
19228 restorer = ka->sa.sa_restorer;
19229
19230 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19231 * reasons and because gdb uses it as a signature to notice
19232 * signal handler stack frames.
19233 */
19234 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19235 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19236
19237 if (err)
19238 return -EFAULT;
19239 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19240 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19241
19242 /* Set up to return from userspace. */
19243 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19244 + if (current->mm->context.vdso)
19245 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19246 + else
19247 + restorer = (void __user *)&frame->retcode;
19248 if (ka->sa.sa_flags & SA_RESTORER)
19249 restorer = ka->sa.sa_restorer;
19250 put_user_ex(restorer, &frame->pretcode);
19251 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19252 * reasons and because gdb uses it as a signature to notice
19253 * signal handler stack frames.
19254 */
19255 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19256 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19257 } put_user_catch(err);
19258
19259 if (err)
19260 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19261 int signr;
19262 sigset_t *oldset;
19263
19264 + pax_track_stack();
19265 +
19266 /*
19267 * We want the common case to go fast, which is why we may in certain
19268 * cases get here from kernel mode. Just return without doing anything
19269 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19270 * X86_32: vm86 regs switched out by assembly code before reaching
19271 * here, so testing against kernel CS suffices.
19272 */
19273 - if (!user_mode(regs))
19274 + if (!user_mode_novm(regs))
19275 return;
19276
19277 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19278 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19279 index 7e8e905..64d5c32 100644
19280 --- a/arch/x86/kernel/smpboot.c
19281 +++ b/arch/x86/kernel/smpboot.c
19282 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19283 */
19284 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19285
19286 -void cpu_hotplug_driver_lock()
19287 +void cpu_hotplug_driver_lock(void)
19288 {
19289 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
19290 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
19291 }
19292
19293 -void cpu_hotplug_driver_unlock()
19294 +void cpu_hotplug_driver_unlock(void)
19295 {
19296 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19297 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19298 }
19299
19300 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19301 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19302 * target processor state.
19303 */
19304 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19305 - (unsigned long)stack_start.sp);
19306 + stack_start);
19307
19308 /*
19309 * Run STARTUP IPI loop.
19310 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19311 set_idle_for_cpu(cpu, c_idle.idle);
19312 do_rest:
19313 per_cpu(current_task, cpu) = c_idle.idle;
19314 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19315 #ifdef CONFIG_X86_32
19316 /* Stack for startup_32 can be just as for start_secondary onwards */
19317 irq_ctx_init(cpu);
19318 @@ -750,13 +751,15 @@ do_rest:
19319 #else
19320 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19321 initial_gs = per_cpu_offset(cpu);
19322 - per_cpu(kernel_stack, cpu) =
19323 - (unsigned long)task_stack_page(c_idle.idle) -
19324 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19325 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19326 #endif
19327 +
19328 + pax_open_kernel();
19329 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19330 + pax_close_kernel();
19331 +
19332 initial_code = (unsigned long)start_secondary;
19333 - stack_start.sp = (void *) c_idle.idle->thread.sp;
19334 + stack_start = c_idle.idle->thread.sp;
19335
19336 /* start_ip had better be page-aligned! */
19337 start_ip = setup_trampoline();
19338 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19339
19340 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19341
19342 +#ifdef CONFIG_PAX_PER_CPU_PGD
19343 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19344 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19345 + KERNEL_PGD_PTRS);
19346 +#endif
19347 +
19348 err = do_boot_cpu(apicid, cpu);
19349
19350 if (err) {
19351 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19352 index 3149032..14f1053 100644
19353 --- a/arch/x86/kernel/step.c
19354 +++ b/arch/x86/kernel/step.c
19355 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19356 struct desc_struct *desc;
19357 unsigned long base;
19358
19359 - seg &= ~7UL;
19360 + seg >>= 3;
19361
19362 mutex_lock(&child->mm->context.lock);
19363 - if (unlikely((seg >> 3) >= child->mm->context.size))
19364 + if (unlikely(seg >= child->mm->context.size))
19365 addr = -1L; /* bogus selector, access would fault */
19366 else {
19367 desc = child->mm->context.ldt + seg;
19368 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19369 addr += base;
19370 }
19371 mutex_unlock(&child->mm->context.lock);
19372 - }
19373 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19374 + addr = ktla_ktva(addr);
19375
19376 return addr;
19377 }
19378 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19379 unsigned char opcode[15];
19380 unsigned long addr = convert_ip_to_linear(child, regs);
19381
19382 + if (addr == -EINVAL)
19383 + return 0;
19384 +
19385 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19386 for (i = 0; i < copied; i++) {
19387 switch (opcode[i]) {
19388 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19389
19390 #ifdef CONFIG_X86_64
19391 case 0x40 ... 0x4f:
19392 - if (regs->cs != __USER_CS)
19393 + if ((regs->cs & 0xffff) != __USER_CS)
19394 /* 32-bit mode: register increment */
19395 return 0;
19396 /* 64-bit mode: REX prefix */
19397 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19398 index dee1ff7..a397f7f 100644
19399 --- a/arch/x86/kernel/sys_i386_32.c
19400 +++ b/arch/x86/kernel/sys_i386_32.c
19401 @@ -24,6 +24,21 @@
19402
19403 #include <asm/syscalls.h>
19404
19405 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19406 +{
19407 + unsigned long pax_task_size = TASK_SIZE;
19408 +
19409 +#ifdef CONFIG_PAX_SEGMEXEC
19410 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19411 + pax_task_size = SEGMEXEC_TASK_SIZE;
19412 +#endif
19413 +
19414 + if (len > pax_task_size || addr > pax_task_size - len)
19415 + return -EINVAL;
19416 +
19417 + return 0;
19418 +}
19419 +
19420 /*
19421 * Perform the select(nd, in, out, ex, tv) and mmap() system
19422 * calls. Linux/i386 didn't use to be able to handle more than
19423 @@ -58,6 +73,212 @@ out:
19424 return err;
19425 }
19426
19427 +unsigned long
19428 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19429 + unsigned long len, unsigned long pgoff, unsigned long flags)
19430 +{
19431 + struct mm_struct *mm = current->mm;
19432 + struct vm_area_struct *vma;
19433 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19434 +
19435 +#ifdef CONFIG_PAX_SEGMEXEC
19436 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19437 + pax_task_size = SEGMEXEC_TASK_SIZE;
19438 +#endif
19439 +
19440 + pax_task_size -= PAGE_SIZE;
19441 +
19442 + if (len > pax_task_size)
19443 + return -ENOMEM;
19444 +
19445 + if (flags & MAP_FIXED)
19446 + return addr;
19447 +
19448 +#ifdef CONFIG_PAX_RANDMMAP
19449 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19450 +#endif
19451 +
19452 + if (addr) {
19453 + addr = PAGE_ALIGN(addr);
19454 + if (pax_task_size - len >= addr) {
19455 + vma = find_vma(mm, addr);
19456 + if (check_heap_stack_gap(vma, addr, len))
19457 + return addr;
19458 + }
19459 + }
19460 + if (len > mm->cached_hole_size) {
19461 + start_addr = addr = mm->free_area_cache;
19462 + } else {
19463 + start_addr = addr = mm->mmap_base;
19464 + mm->cached_hole_size = 0;
19465 + }
19466 +
19467 +#ifdef CONFIG_PAX_PAGEEXEC
19468 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19469 + start_addr = 0x00110000UL;
19470 +
19471 +#ifdef CONFIG_PAX_RANDMMAP
19472 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19473 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19474 +#endif
19475 +
19476 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19477 + start_addr = addr = mm->mmap_base;
19478 + else
19479 + addr = start_addr;
19480 + }
19481 +#endif
19482 +
19483 +full_search:
19484 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19485 + /* At this point: (!vma || addr < vma->vm_end). */
19486 + if (pax_task_size - len < addr) {
19487 + /*
19488 + * Start a new search - just in case we missed
19489 + * some holes.
19490 + */
19491 + if (start_addr != mm->mmap_base) {
19492 + start_addr = addr = mm->mmap_base;
19493 + mm->cached_hole_size = 0;
19494 + goto full_search;
19495 + }
19496 + return -ENOMEM;
19497 + }
19498 + if (check_heap_stack_gap(vma, addr, len))
19499 + break;
19500 + if (addr + mm->cached_hole_size < vma->vm_start)
19501 + mm->cached_hole_size = vma->vm_start - addr;
19502 + addr = vma->vm_end;
19503 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19504 + start_addr = addr = mm->mmap_base;
19505 + mm->cached_hole_size = 0;
19506 + goto full_search;
19507 + }
19508 + }
19509 +
19510 + /*
19511 + * Remember the place where we stopped the search:
19512 + */
19513 + mm->free_area_cache = addr + len;
19514 + return addr;
19515 +}
19516 +
19517 +unsigned long
19518 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19519 + const unsigned long len, const unsigned long pgoff,
19520 + const unsigned long flags)
19521 +{
19522 + struct vm_area_struct *vma;
19523 + struct mm_struct *mm = current->mm;
19524 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19525 +
19526 +#ifdef CONFIG_PAX_SEGMEXEC
19527 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19528 + pax_task_size = SEGMEXEC_TASK_SIZE;
19529 +#endif
19530 +
19531 + pax_task_size -= PAGE_SIZE;
19532 +
19533 + /* requested length too big for entire address space */
19534 + if (len > pax_task_size)
19535 + return -ENOMEM;
19536 +
19537 + if (flags & MAP_FIXED)
19538 + return addr;
19539 +
19540 +#ifdef CONFIG_PAX_PAGEEXEC
19541 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19542 + goto bottomup;
19543 +#endif
19544 +
19545 +#ifdef CONFIG_PAX_RANDMMAP
19546 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19547 +#endif
19548 +
19549 + /* requesting a specific address */
19550 + if (addr) {
19551 + addr = PAGE_ALIGN(addr);
19552 + if (pax_task_size - len >= addr) {
19553 + vma = find_vma(mm, addr);
19554 + if (check_heap_stack_gap(vma, addr, len))
19555 + return addr;
19556 + }
19557 + }
19558 +
19559 + /* check if free_area_cache is useful for us */
19560 + if (len <= mm->cached_hole_size) {
19561 + mm->cached_hole_size = 0;
19562 + mm->free_area_cache = mm->mmap_base;
19563 + }
19564 +
19565 + /* either no address requested or can't fit in requested address hole */
19566 + addr = mm->free_area_cache;
19567 +
19568 + /* make sure it can fit in the remaining address space */
19569 + if (addr > len) {
19570 + vma = find_vma(mm, addr-len);
19571 + if (check_heap_stack_gap(vma, addr - len, len))
19572 + /* remember the address as a hint for next time */
19573 + return (mm->free_area_cache = addr-len);
19574 + }
19575 +
19576 + if (mm->mmap_base < len)
19577 + goto bottomup;
19578 +
19579 + addr = mm->mmap_base-len;
19580 +
19581 + do {
19582 + /*
19583 + * Lookup failure means no vma is above this address,
19584 + * else if new region fits below vma->vm_start,
19585 + * return with success:
19586 + */
19587 + vma = find_vma(mm, addr);
19588 + if (check_heap_stack_gap(vma, addr, len))
19589 + /* remember the address as a hint for next time */
19590 + return (mm->free_area_cache = addr);
19591 +
19592 + /* remember the largest hole we saw so far */
19593 + if (addr + mm->cached_hole_size < vma->vm_start)
19594 + mm->cached_hole_size = vma->vm_start - addr;
19595 +
19596 + /* try just below the current vma->vm_start */
19597 + addr = skip_heap_stack_gap(vma, len);
19598 + } while (!IS_ERR_VALUE(addr));
19599 +
19600 +bottomup:
19601 + /*
19602 + * A failed mmap() very likely causes application failure,
19603 + * so fall back to the bottom-up function here. This scenario
19604 + * can happen with large stack limits and large mmap()
19605 + * allocations.
19606 + */
19607 +
19608 +#ifdef CONFIG_PAX_SEGMEXEC
19609 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19610 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19611 + else
19612 +#endif
19613 +
19614 + mm->mmap_base = TASK_UNMAPPED_BASE;
19615 +
19616 +#ifdef CONFIG_PAX_RANDMMAP
19617 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19618 + mm->mmap_base += mm->delta_mmap;
19619 +#endif
19620 +
19621 + mm->free_area_cache = mm->mmap_base;
19622 + mm->cached_hole_size = ~0UL;
19623 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19624 + /*
19625 + * Restore the topdown base:
19626 + */
19627 + mm->mmap_base = base;
19628 + mm->free_area_cache = base;
19629 + mm->cached_hole_size = ~0UL;
19630 +
19631 + return addr;
19632 +}
19633
19634 struct sel_arg_struct {
19635 unsigned long n;
19636 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19637 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19638 case SEMTIMEDOP:
19639 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19640 - (const struct timespec __user *)fifth);
19641 + (__force const struct timespec __user *)fifth);
19642
19643 case SEMGET:
19644 return sys_semget(first, second, third);
19645 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19646 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19647 if (ret)
19648 return ret;
19649 - return put_user(raddr, (ulong __user *) third);
19650 + return put_user(raddr, (__force ulong __user *) third);
19651 }
19652 case 1: /* iBCS2 emulator entry point */
19653 if (!segment_eq(get_fs(), get_ds()))
19654 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19655
19656 return error;
19657 }
19658 -
19659 -
19660 -/*
19661 - * Do a system call from kernel instead of calling sys_execve so we
19662 - * end up with proper pt_regs.
19663 - */
19664 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19665 -{
19666 - long __res;
19667 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19668 - : "=a" (__res)
19669 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19670 - return __res;
19671 -}
19672 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19673 index 8aa2057..b604bc1 100644
19674 --- a/arch/x86/kernel/sys_x86_64.c
19675 +++ b/arch/x86/kernel/sys_x86_64.c
19676 @@ -32,8 +32,8 @@ out:
19677 return error;
19678 }
19679
19680 -static void find_start_end(unsigned long flags, unsigned long *begin,
19681 - unsigned long *end)
19682 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19683 + unsigned long *begin, unsigned long *end)
19684 {
19685 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19686 unsigned long new_begin;
19687 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19688 *begin = new_begin;
19689 }
19690 } else {
19691 - *begin = TASK_UNMAPPED_BASE;
19692 + *begin = mm->mmap_base;
19693 *end = TASK_SIZE;
19694 }
19695 }
19696 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19697 if (flags & MAP_FIXED)
19698 return addr;
19699
19700 - find_start_end(flags, &begin, &end);
19701 + find_start_end(mm, flags, &begin, &end);
19702
19703 if (len > end)
19704 return -ENOMEM;
19705
19706 +#ifdef CONFIG_PAX_RANDMMAP
19707 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19708 +#endif
19709 +
19710 if (addr) {
19711 addr = PAGE_ALIGN(addr);
19712 vma = find_vma(mm, addr);
19713 - if (end - len >= addr &&
19714 - (!vma || addr + len <= vma->vm_start))
19715 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19716 return addr;
19717 }
19718 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19719 @@ -106,7 +109,7 @@ full_search:
19720 }
19721 return -ENOMEM;
19722 }
19723 - if (!vma || addr + len <= vma->vm_start) {
19724 + if (check_heap_stack_gap(vma, addr, len)) {
19725 /*
19726 * Remember the place where we stopped the search:
19727 */
19728 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19729 {
19730 struct vm_area_struct *vma;
19731 struct mm_struct *mm = current->mm;
19732 - unsigned long addr = addr0;
19733 + unsigned long base = mm->mmap_base, addr = addr0;
19734
19735 /* requested length too big for entire address space */
19736 if (len > TASK_SIZE)
19737 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19738 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19739 goto bottomup;
19740
19741 +#ifdef CONFIG_PAX_RANDMMAP
19742 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19743 +#endif
19744 +
19745 /* requesting a specific address */
19746 if (addr) {
19747 addr = PAGE_ALIGN(addr);
19748 - vma = find_vma(mm, addr);
19749 - if (TASK_SIZE - len >= addr &&
19750 - (!vma || addr + len <= vma->vm_start))
19751 - return addr;
19752 + if (TASK_SIZE - len >= addr) {
19753 + vma = find_vma(mm, addr);
19754 + if (check_heap_stack_gap(vma, addr, len))
19755 + return addr;
19756 + }
19757 }
19758
19759 /* check if free_area_cache is useful for us */
19760 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19761 /* make sure it can fit in the remaining address space */
19762 if (addr > len) {
19763 vma = find_vma(mm, addr-len);
19764 - if (!vma || addr <= vma->vm_start)
19765 + if (check_heap_stack_gap(vma, addr - len, len))
19766 /* remember the address as a hint for next time */
19767 return mm->free_area_cache = addr-len;
19768 }
19769 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19770 * return with success:
19771 */
19772 vma = find_vma(mm, addr);
19773 - if (!vma || addr+len <= vma->vm_start)
19774 + if (check_heap_stack_gap(vma, addr, len))
19775 /* remember the address as a hint for next time */
19776 return mm->free_area_cache = addr;
19777
19778 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19779 mm->cached_hole_size = vma->vm_start - addr;
19780
19781 /* try just below the current vma->vm_start */
19782 - addr = vma->vm_start-len;
19783 - } while (len < vma->vm_start);
19784 + addr = skip_heap_stack_gap(vma, len);
19785 + } while (!IS_ERR_VALUE(addr));
19786
19787 bottomup:
19788 /*
19789 @@ -198,13 +206,21 @@ bottomup:
19790 * can happen with large stack limits and large mmap()
19791 * allocations.
19792 */
19793 + mm->mmap_base = TASK_UNMAPPED_BASE;
19794 +
19795 +#ifdef CONFIG_PAX_RANDMMAP
19796 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19797 + mm->mmap_base += mm->delta_mmap;
19798 +#endif
19799 +
19800 + mm->free_area_cache = mm->mmap_base;
19801 mm->cached_hole_size = ~0UL;
19802 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19803 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19804 /*
19805 * Restore the topdown base:
19806 */
19807 - mm->free_area_cache = mm->mmap_base;
19808 + mm->mmap_base = base;
19809 + mm->free_area_cache = base;
19810 mm->cached_hole_size = ~0UL;
19811
19812 return addr;
19813 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
19814 index 76d70a4..4c94a44 100644
19815 --- a/arch/x86/kernel/syscall_table_32.S
19816 +++ b/arch/x86/kernel/syscall_table_32.S
19817 @@ -1,3 +1,4 @@
19818 +.section .rodata,"a",@progbits
19819 ENTRY(sys_call_table)
19820 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
19821 .long sys_exit
19822 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19823 index 46b8277..3349d55 100644
19824 --- a/arch/x86/kernel/tboot.c
19825 +++ b/arch/x86/kernel/tboot.c
19826 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
19827
19828 void tboot_shutdown(u32 shutdown_type)
19829 {
19830 - void (*shutdown)(void);
19831 + void (* __noreturn shutdown)(void);
19832
19833 if (!tboot_enabled())
19834 return;
19835 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
19836
19837 switch_to_tboot_pt();
19838
19839 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19840 + shutdown = (void *)tboot->shutdown_entry;
19841 shutdown();
19842
19843 /* should not reach here */
19844 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19845 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19846 }
19847
19848 -static atomic_t ap_wfs_count;
19849 +static atomic_unchecked_t ap_wfs_count;
19850
19851 static int tboot_wait_for_aps(int num_aps)
19852 {
19853 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19854 {
19855 switch (action) {
19856 case CPU_DYING:
19857 - atomic_inc(&ap_wfs_count);
19858 + atomic_inc_unchecked(&ap_wfs_count);
19859 if (num_online_cpus() == 1)
19860 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19861 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19862 return NOTIFY_BAD;
19863 break;
19864 }
19865 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
19866
19867 tboot_create_trampoline();
19868
19869 - atomic_set(&ap_wfs_count, 0);
19870 + atomic_set_unchecked(&ap_wfs_count, 0);
19871 register_hotcpu_notifier(&tboot_cpu_notifier);
19872 return 0;
19873 }
19874 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19875 index be25734..87fe232 100644
19876 --- a/arch/x86/kernel/time.c
19877 +++ b/arch/x86/kernel/time.c
19878 @@ -26,17 +26,13 @@
19879 int timer_ack;
19880 #endif
19881
19882 -#ifdef CONFIG_X86_64
19883 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
19884 -#endif
19885 -
19886 unsigned long profile_pc(struct pt_regs *regs)
19887 {
19888 unsigned long pc = instruction_pointer(regs);
19889
19890 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19891 + if (!user_mode(regs) && in_lock_functions(pc)) {
19892 #ifdef CONFIG_FRAME_POINTER
19893 - return *(unsigned long *)(regs->bp + sizeof(long));
19894 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19895 #else
19896 unsigned long *sp =
19897 (unsigned long *)kernel_stack_pointer(regs);
19898 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19899 * or above a saved flags. Eflags has bits 22-31 zero,
19900 * kernel addresses don't.
19901 */
19902 +
19903 +#ifdef CONFIG_PAX_KERNEXEC
19904 + return ktla_ktva(sp[0]);
19905 +#else
19906 if (sp[0] >> 22)
19907 return sp[0];
19908 if (sp[1] >> 22)
19909 return sp[1];
19910 #endif
19911 +
19912 +#endif
19913 }
19914 return pc;
19915 }
19916 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19917 index 6bb7b85..dd853e1 100644
19918 --- a/arch/x86/kernel/tls.c
19919 +++ b/arch/x86/kernel/tls.c
19920 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19921 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19922 return -EINVAL;
19923
19924 +#ifdef CONFIG_PAX_SEGMEXEC
19925 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19926 + return -EINVAL;
19927 +#endif
19928 +
19929 set_tls_desc(p, idx, &info, 1);
19930
19931 return 0;
19932 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19933 index 8508237..229b664 100644
19934 --- a/arch/x86/kernel/trampoline_32.S
19935 +++ b/arch/x86/kernel/trampoline_32.S
19936 @@ -32,6 +32,12 @@
19937 #include <asm/segment.h>
19938 #include <asm/page_types.h>
19939
19940 +#ifdef CONFIG_PAX_KERNEXEC
19941 +#define ta(X) (X)
19942 +#else
19943 +#define ta(X) ((X) - __PAGE_OFFSET)
19944 +#endif
19945 +
19946 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
19947 __CPUINITRODATA
19948 .code16
19949 @@ -60,7 +66,7 @@ r_base = .
19950 inc %ax # protected mode (PE) bit
19951 lmsw %ax # into protected mode
19952 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19953 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19954 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19955
19956 # These need to be in the same 64K segment as the above;
19957 # hence we don't use the boot_gdt_descr defined in head.S
19958 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19959 index 3af2dff..ba8aa49 100644
19960 --- a/arch/x86/kernel/trampoline_64.S
19961 +++ b/arch/x86/kernel/trampoline_64.S
19962 @@ -91,7 +91,7 @@ startup_32:
19963 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19964 movl %eax, %ds
19965
19966 - movl $X86_CR4_PAE, %eax
19967 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19968 movl %eax, %cr4 # Enable PAE mode
19969
19970 # Setup trampoline 4 level pagetables
19971 @@ -127,7 +127,7 @@ startup_64:
19972 no_longmode:
19973 hlt
19974 jmp no_longmode
19975 -#include "verify_cpu_64.S"
19976 +#include "verify_cpu.S"
19977
19978 # Careful these need to be in the same 64K segment as the above;
19979 tidt:
19980 @@ -138,7 +138,7 @@ tidt:
19981 # so the kernel can live anywhere
19982 .balign 4
19983 tgdt:
19984 - .short tgdt_end - tgdt # gdt limit
19985 + .short tgdt_end - tgdt - 1 # gdt limit
19986 .long tgdt - r_base
19987 .short 0
19988 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19989 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19990 index 7e37dce..ec3f8e5 100644
19991 --- a/arch/x86/kernel/traps.c
19992 +++ b/arch/x86/kernel/traps.c
19993 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
19994
19995 /* Do we ignore FPU interrupts ? */
19996 char ignore_fpu_irq;
19997 -
19998 -/*
19999 - * The IDT has to be page-aligned to simplify the Pentium
20000 - * F0 0F bug workaround.
20001 - */
20002 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20003 #endif
20004
20005 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20006 @@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20007 static inline void
20008 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20009 {
20010 - if (!user_mode_vm(regs))
20011 + if (!user_mode(regs))
20012 die(str, regs, err);
20013 }
20014 #endif
20015
20016 static void __kprobes
20017 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20018 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20019 long error_code, siginfo_t *info)
20020 {
20021 struct task_struct *tsk = current;
20022
20023 #ifdef CONFIG_X86_32
20024 - if (regs->flags & X86_VM_MASK) {
20025 + if (v8086_mode(regs)) {
20026 /*
20027 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20028 * On nmi (interrupt 2), do_trap should not be called.
20029 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20030 }
20031 #endif
20032
20033 - if (!user_mode(regs))
20034 + if (!user_mode_novm(regs))
20035 goto kernel_trap;
20036
20037 #ifdef CONFIG_X86_32
20038 @@ -158,7 +152,7 @@ trap_signal:
20039 printk_ratelimit()) {
20040 printk(KERN_INFO
20041 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20042 - tsk->comm, tsk->pid, str,
20043 + tsk->comm, task_pid_nr(tsk), str,
20044 regs->ip, regs->sp, error_code);
20045 print_vma_addr(" in ", regs->ip);
20046 printk("\n");
20047 @@ -175,8 +169,20 @@ kernel_trap:
20048 if (!fixup_exception(regs)) {
20049 tsk->thread.error_code = error_code;
20050 tsk->thread.trap_no = trapnr;
20051 +
20052 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20053 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20054 + str = "PAX: suspicious stack segment fault";
20055 +#endif
20056 +
20057 die(str, regs, error_code);
20058 }
20059 +
20060 +#ifdef CONFIG_PAX_REFCOUNT
20061 + if (trapnr == 4)
20062 + pax_report_refcount_overflow(regs);
20063 +#endif
20064 +
20065 return;
20066
20067 #ifdef CONFIG_X86_32
20068 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20069 conditional_sti(regs);
20070
20071 #ifdef CONFIG_X86_32
20072 - if (regs->flags & X86_VM_MASK)
20073 + if (v8086_mode(regs))
20074 goto gp_in_vm86;
20075 #endif
20076
20077 tsk = current;
20078 - if (!user_mode(regs))
20079 + if (!user_mode_novm(regs))
20080 goto gp_in_kernel;
20081
20082 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20083 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20084 + struct mm_struct *mm = tsk->mm;
20085 + unsigned long limit;
20086 +
20087 + down_write(&mm->mmap_sem);
20088 + limit = mm->context.user_cs_limit;
20089 + if (limit < TASK_SIZE) {
20090 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20091 + up_write(&mm->mmap_sem);
20092 + return;
20093 + }
20094 + up_write(&mm->mmap_sem);
20095 + }
20096 +#endif
20097 +
20098 tsk->thread.error_code = error_code;
20099 tsk->thread.trap_no = 13;
20100
20101 @@ -305,6 +327,13 @@ gp_in_kernel:
20102 if (notify_die(DIE_GPF, "general protection fault", regs,
20103 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20104 return;
20105 +
20106 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20107 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20108 + die("PAX: suspicious general protection fault", regs, error_code);
20109 + else
20110 +#endif
20111 +
20112 die("general protection fault", regs, error_code);
20113 }
20114
20115 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20116 dotraplinkage notrace __kprobes void
20117 do_nmi(struct pt_regs *regs, long error_code)
20118 {
20119 +
20120 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20121 + if (!user_mode(regs)) {
20122 + unsigned long cs = regs->cs & 0xFFFF;
20123 + unsigned long ip = ktva_ktla(regs->ip);
20124 +
20125 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20126 + regs->ip = ip;
20127 + }
20128 +#endif
20129 +
20130 nmi_enter();
20131
20132 inc_irq_stat(__nmi_count);
20133 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20134 }
20135
20136 #ifdef CONFIG_X86_32
20137 - if (regs->flags & X86_VM_MASK)
20138 + if (v8086_mode(regs))
20139 goto debug_vm86;
20140 #endif
20141
20142 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20143 * kernel space (but re-enable TF when returning to user mode).
20144 */
20145 if (condition & DR_STEP) {
20146 - if (!user_mode(regs))
20147 + if (!user_mode_novm(regs))
20148 goto clear_TF_reenable;
20149 }
20150
20151 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20152 * Handle strange cache flush from user space exception
20153 * in all other cases. This is undocumented behaviour.
20154 */
20155 - if (regs->flags & X86_VM_MASK) {
20156 + if (v8086_mode(regs)) {
20157 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20158 return;
20159 }
20160 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20161 void __math_state_restore(void)
20162 {
20163 struct thread_info *thread = current_thread_info();
20164 - struct task_struct *tsk = thread->task;
20165 + struct task_struct *tsk = current;
20166
20167 /*
20168 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20169 @@ -825,8 +865,7 @@ void __math_state_restore(void)
20170 */
20171 asmlinkage void math_state_restore(void)
20172 {
20173 - struct thread_info *thread = current_thread_info();
20174 - struct task_struct *tsk = thread->task;
20175 + struct task_struct *tsk = current;
20176
20177 if (!tsk_used_math(tsk)) {
20178 local_irq_enable();
20179 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20180 new file mode 100644
20181 index 0000000..50c5edd
20182 --- /dev/null
20183 +++ b/arch/x86/kernel/verify_cpu.S
20184 @@ -0,0 +1,140 @@
20185 +/*
20186 + *
20187 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
20188 + * code has been borrowed from boot/setup.S and was introduced by
20189 + * Andi Kleen.
20190 + *
20191 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20192 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20193 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20194 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20195 + *
20196 + * This source code is licensed under the GNU General Public License,
20197 + * Version 2. See the file COPYING for more details.
20198 + *
20199 + * This is a common code for verification whether CPU supports
20200 + * long mode and SSE or not. It is not called directly instead this
20201 + * file is included at various places and compiled in that context.
20202 + * This file is expected to run in 32bit code. Currently:
20203 + *
20204 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20205 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
20206 + * arch/x86/kernel/head_32.S: processor startup
20207 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20208 + *
20209 + * verify_cpu, returns the status of longmode and SSE in register %eax.
20210 + * 0: Success 1: Failure
20211 + *
20212 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20213 + *
20214 + * The caller needs to check for the error code and take the action
20215 + * appropriately. Either display a message or halt.
20216 + */
20217 +
20218 +#include <asm/cpufeature.h>
20219 +#include <asm/msr-index.h>
20220 +
20221 +verify_cpu:
20222 + pushfl # Save caller passed flags
20223 + pushl $0 # Kill any dangerous flags
20224 + popfl
20225 +
20226 + pushfl # standard way to check for cpuid
20227 + popl %eax
20228 + movl %eax,%ebx
20229 + xorl $0x200000,%eax
20230 + pushl %eax
20231 + popfl
20232 + pushfl
20233 + popl %eax
20234 + cmpl %eax,%ebx
20235 + jz verify_cpu_no_longmode # cpu has no cpuid
20236 +
20237 + movl $0x0,%eax # See if cpuid 1 is implemented
20238 + cpuid
20239 + cmpl $0x1,%eax
20240 + jb verify_cpu_no_longmode # no cpuid 1
20241 +
20242 + xor %di,%di
20243 + cmpl $0x68747541,%ebx # AuthenticAMD
20244 + jnz verify_cpu_noamd
20245 + cmpl $0x69746e65,%edx
20246 + jnz verify_cpu_noamd
20247 + cmpl $0x444d4163,%ecx
20248 + jnz verify_cpu_noamd
20249 + mov $1,%di # cpu is from AMD
20250 + jmp verify_cpu_check
20251 +
20252 +verify_cpu_noamd:
20253 + cmpl $0x756e6547,%ebx # GenuineIntel?
20254 + jnz verify_cpu_check
20255 + cmpl $0x49656e69,%edx
20256 + jnz verify_cpu_check
20257 + cmpl $0x6c65746e,%ecx
20258 + jnz verify_cpu_check
20259 +
20260 + # only call IA32_MISC_ENABLE when:
20261 + # family > 6 || (family == 6 && model >= 0xd)
20262 + movl $0x1, %eax # check CPU family and model
20263 + cpuid
20264 + movl %eax, %ecx
20265 +
20266 + andl $0x0ff00f00, %eax # mask family and extended family
20267 + shrl $8, %eax
20268 + cmpl $6, %eax
20269 + ja verify_cpu_clear_xd # family > 6, ok
20270 + jb verify_cpu_check # family < 6, skip
20271 +
20272 + andl $0x000f00f0, %ecx # mask model and extended model
20273 + shrl $4, %ecx
20274 + cmpl $0xd, %ecx
20275 + jb verify_cpu_check # family == 6, model < 0xd, skip
20276 +
20277 +verify_cpu_clear_xd:
20278 + movl $MSR_IA32_MISC_ENABLE, %ecx
20279 + rdmsr
20280 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20281 + jnc verify_cpu_check # only write MSR if bit was changed
20282 + wrmsr
20283 +
20284 +verify_cpu_check:
20285 + movl $0x1,%eax # Does the cpu have what it takes
20286 + cpuid
20287 + andl $REQUIRED_MASK0,%edx
20288 + xorl $REQUIRED_MASK0,%edx
20289 + jnz verify_cpu_no_longmode
20290 +
20291 + movl $0x80000000,%eax # See if extended cpuid is implemented
20292 + cpuid
20293 + cmpl $0x80000001,%eax
20294 + jb verify_cpu_no_longmode # no extended cpuid
20295 +
20296 + movl $0x80000001,%eax # Does the cpu have what it takes
20297 + cpuid
20298 + andl $REQUIRED_MASK1,%edx
20299 + xorl $REQUIRED_MASK1,%edx
20300 + jnz verify_cpu_no_longmode
20301 +
20302 +verify_cpu_sse_test:
20303 + movl $1,%eax
20304 + cpuid
20305 + andl $SSE_MASK,%edx
20306 + cmpl $SSE_MASK,%edx
20307 + je verify_cpu_sse_ok
20308 + test %di,%di
20309 + jz verify_cpu_no_longmode # only try to force SSE on AMD
20310 + movl $MSR_K7_HWCR,%ecx
20311 + rdmsr
20312 + btr $15,%eax # enable SSE
20313 + wrmsr
20314 + xor %di,%di # don't loop
20315 + jmp verify_cpu_sse_test # try again
20316 +
20317 +verify_cpu_no_longmode:
20318 + popfl # Restore caller passed flags
20319 + movl $1,%eax
20320 + ret
20321 +verify_cpu_sse_ok:
20322 + popfl # Restore caller passed flags
20323 + xorl %eax, %eax
20324 + ret
20325 diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20326 deleted file mode 100644
20327 index 45b6f8a..0000000
20328 --- a/arch/x86/kernel/verify_cpu_64.S
20329 +++ /dev/null
20330 @@ -1,105 +0,0 @@
20331 -/*
20332 - *
20333 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
20334 - * code has been borrowed from boot/setup.S and was introduced by
20335 - * Andi Kleen.
20336 - *
20337 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20338 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20339 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20340 - *
20341 - * This source code is licensed under the GNU General Public License,
20342 - * Version 2. See the file COPYING for more details.
20343 - *
20344 - * This is a common code for verification whether CPU supports
20345 - * long mode and SSE or not. It is not called directly instead this
20346 - * file is included at various places and compiled in that context.
20347 - * Following are the current usage.
20348 - *
20349 - * This file is included by both 16bit and 32bit code.
20350 - *
20351 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20352 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20353 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20354 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20355 - *
20356 - * verify_cpu, returns the status of cpu check in register %eax.
20357 - * 0: Success 1: Failure
20358 - *
20359 - * The caller needs to check for the error code and take the action
20360 - * appropriately. Either display a message or halt.
20361 - */
20362 -
20363 -#include <asm/cpufeature.h>
20364 -
20365 -verify_cpu:
20366 - pushfl # Save caller passed flags
20367 - pushl $0 # Kill any dangerous flags
20368 - popfl
20369 -
20370 - pushfl # standard way to check for cpuid
20371 - popl %eax
20372 - movl %eax,%ebx
20373 - xorl $0x200000,%eax
20374 - pushl %eax
20375 - popfl
20376 - pushfl
20377 - popl %eax
20378 - cmpl %eax,%ebx
20379 - jz verify_cpu_no_longmode # cpu has no cpuid
20380 -
20381 - movl $0x0,%eax # See if cpuid 1 is implemented
20382 - cpuid
20383 - cmpl $0x1,%eax
20384 - jb verify_cpu_no_longmode # no cpuid 1
20385 -
20386 - xor %di,%di
20387 - cmpl $0x68747541,%ebx # AuthenticAMD
20388 - jnz verify_cpu_noamd
20389 - cmpl $0x69746e65,%edx
20390 - jnz verify_cpu_noamd
20391 - cmpl $0x444d4163,%ecx
20392 - jnz verify_cpu_noamd
20393 - mov $1,%di # cpu is from AMD
20394 -
20395 -verify_cpu_noamd:
20396 - movl $0x1,%eax # Does the cpu have what it takes
20397 - cpuid
20398 - andl $REQUIRED_MASK0,%edx
20399 - xorl $REQUIRED_MASK0,%edx
20400 - jnz verify_cpu_no_longmode
20401 -
20402 - movl $0x80000000,%eax # See if extended cpuid is implemented
20403 - cpuid
20404 - cmpl $0x80000001,%eax
20405 - jb verify_cpu_no_longmode # no extended cpuid
20406 -
20407 - movl $0x80000001,%eax # Does the cpu have what it takes
20408 - cpuid
20409 - andl $REQUIRED_MASK1,%edx
20410 - xorl $REQUIRED_MASK1,%edx
20411 - jnz verify_cpu_no_longmode
20412 -
20413 -verify_cpu_sse_test:
20414 - movl $1,%eax
20415 - cpuid
20416 - andl $SSE_MASK,%edx
20417 - cmpl $SSE_MASK,%edx
20418 - je verify_cpu_sse_ok
20419 - test %di,%di
20420 - jz verify_cpu_no_longmode # only try to force SSE on AMD
20421 - movl $0xc0010015,%ecx # HWCR
20422 - rdmsr
20423 - btr $15,%eax # enable SSE
20424 - wrmsr
20425 - xor %di,%di # don't loop
20426 - jmp verify_cpu_sse_test # try again
20427 -
20428 -verify_cpu_no_longmode:
20429 - popfl # Restore caller passed flags
20430 - movl $1,%eax
20431 - ret
20432 -verify_cpu_sse_ok:
20433 - popfl # Restore caller passed flags
20434 - xorl %eax, %eax
20435 - ret
20436 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20437 index 9c4e625..c992817 100644
20438 --- a/arch/x86/kernel/vm86_32.c
20439 +++ b/arch/x86/kernel/vm86_32.c
20440 @@ -41,6 +41,7 @@
20441 #include <linux/ptrace.h>
20442 #include <linux/audit.h>
20443 #include <linux/stddef.h>
20444 +#include <linux/grsecurity.h>
20445
20446 #include <asm/uaccess.h>
20447 #include <asm/io.h>
20448 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20449 do_exit(SIGSEGV);
20450 }
20451
20452 - tss = &per_cpu(init_tss, get_cpu());
20453 + tss = init_tss + get_cpu();
20454 current->thread.sp0 = current->thread.saved_sp0;
20455 current->thread.sysenter_cs = __KERNEL_CS;
20456 load_sp0(tss, &current->thread);
20457 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20458 struct task_struct *tsk;
20459 int tmp, ret = -EPERM;
20460
20461 +#ifdef CONFIG_GRKERNSEC_VM86
20462 + if (!capable(CAP_SYS_RAWIO)) {
20463 + gr_handle_vm86();
20464 + goto out;
20465 + }
20466 +#endif
20467 +
20468 tsk = current;
20469 if (tsk->thread.saved_sp0)
20470 goto out;
20471 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20472 int tmp, ret;
20473 struct vm86plus_struct __user *v86;
20474
20475 +#ifdef CONFIG_GRKERNSEC_VM86
20476 + if (!capable(CAP_SYS_RAWIO)) {
20477 + gr_handle_vm86();
20478 + ret = -EPERM;
20479 + goto out;
20480 + }
20481 +#endif
20482 +
20483 tsk = current;
20484 switch (regs->bx) {
20485 case VM86_REQUEST_IRQ:
20486 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20487 tsk->thread.saved_fs = info->regs32->fs;
20488 tsk->thread.saved_gs = get_user_gs(info->regs32);
20489
20490 - tss = &per_cpu(init_tss, get_cpu());
20491 + tss = init_tss + get_cpu();
20492 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20493 if (cpu_has_sep)
20494 tsk->thread.sysenter_cs = 0;
20495 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20496 goto cannot_handle;
20497 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20498 goto cannot_handle;
20499 - intr_ptr = (unsigned long __user *) (i << 2);
20500 + intr_ptr = (__force unsigned long __user *) (i << 2);
20501 if (get_user(segoffs, intr_ptr))
20502 goto cannot_handle;
20503 if ((segoffs >> 16) == BIOSSEG)
20504 diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20505 index d430e4c..831f817 100644
20506 --- a/arch/x86/kernel/vmi_32.c
20507 +++ b/arch/x86/kernel/vmi_32.c
20508 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20509 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20510
20511 #define call_vrom_func(rom,func) \
20512 - (((VROMFUNC *)(rom->func))())
20513 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
20514
20515 #define call_vrom_long_func(rom,func,arg) \
20516 - (((VROMLONGFUNC *)(rom->func)) (arg))
20517 +({\
20518 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20519 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20520 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20521 + __reloc;\
20522 +})
20523
20524 -static struct vrom_header *vmi_rom;
20525 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20526 static int disable_pge;
20527 static int disable_pse;
20528 static int disable_sep;
20529 @@ -76,10 +81,10 @@ static struct {
20530 void (*set_initial_ap_state)(int, int);
20531 void (*halt)(void);
20532 void (*set_lazy_mode)(int mode);
20533 -} vmi_ops;
20534 +} __no_const vmi_ops __read_only;
20535
20536 /* Cached VMI operations */
20537 -struct vmi_timer_ops vmi_timer_ops;
20538 +struct vmi_timer_ops vmi_timer_ops __read_only;
20539
20540 /*
20541 * VMI patching routines.
20542 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20543 static inline void patch_offset(void *insnbuf,
20544 unsigned long ip, unsigned long dest)
20545 {
20546 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
20547 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
20548 }
20549
20550 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20551 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20552 {
20553 u64 reloc;
20554 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20555 +
20556 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20557 switch(rel->type) {
20558 case VMI_RELOCATION_CALL_REL:
20559 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20560
20561 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20562 {
20563 - const pte_t pte = { .pte = 0 };
20564 + const pte_t pte = __pte(0ULL);
20565 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20566 }
20567
20568 static void vmi_pmd_clear(pmd_t *pmd)
20569 {
20570 - const pte_t pte = { .pte = 0 };
20571 + const pte_t pte = __pte(0ULL);
20572 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20573 }
20574 #endif
20575 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20576 ap.ss = __KERNEL_DS;
20577 ap.esp = (unsigned long) start_esp;
20578
20579 - ap.ds = __USER_DS;
20580 - ap.es = __USER_DS;
20581 + ap.ds = __KERNEL_DS;
20582 + ap.es = __KERNEL_DS;
20583 ap.fs = __KERNEL_PERCPU;
20584 - ap.gs = __KERNEL_STACK_CANARY;
20585 + savesegment(gs, ap.gs);
20586
20587 ap.eflags = 0;
20588
20589 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20590 paravirt_leave_lazy_mmu();
20591 }
20592
20593 +#ifdef CONFIG_PAX_KERNEXEC
20594 +static unsigned long vmi_pax_open_kernel(void)
20595 +{
20596 + return 0;
20597 +}
20598 +
20599 +static unsigned long vmi_pax_close_kernel(void)
20600 +{
20601 + return 0;
20602 +}
20603 +#endif
20604 +
20605 static inline int __init check_vmi_rom(struct vrom_header *rom)
20606 {
20607 struct pci_header *pci;
20608 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20609 return 0;
20610 if (rom->vrom_signature != VMI_SIGNATURE)
20611 return 0;
20612 + if (rom->rom_length * 512 > sizeof(*rom)) {
20613 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20614 + return 0;
20615 + }
20616 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20617 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20618 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20619 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20620 struct vrom_header *romstart;
20621 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20622 if (check_vmi_rom(romstart)) {
20623 - vmi_rom = romstart;
20624 + vmi_rom = *romstart;
20625 return 1;
20626 }
20627 }
20628 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20629
20630 para_fill(pv_irq_ops.safe_halt, Halt);
20631
20632 +#ifdef CONFIG_PAX_KERNEXEC
20633 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20634 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20635 +#endif
20636 +
20637 /*
20638 * Alternative instruction rewriting doesn't happen soon enough
20639 * to convert VMI_IRET to a call instead of a jump; so we have
20640 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20641
20642 void __init vmi_init(void)
20643 {
20644 - if (!vmi_rom)
20645 + if (!vmi_rom.rom_signature)
20646 probe_vmi_rom();
20647 else
20648 - check_vmi_rom(vmi_rom);
20649 + check_vmi_rom(&vmi_rom);
20650
20651 /* In case probing for or validating the ROM failed, basil */
20652 - if (!vmi_rom)
20653 + if (!vmi_rom.rom_signature)
20654 return;
20655
20656 - reserve_top_address(-vmi_rom->virtual_top);
20657 + reserve_top_address(-vmi_rom.virtual_top);
20658
20659 #ifdef CONFIG_X86_IO_APIC
20660 /* This is virtual hardware; timer routing is wired correctly */
20661 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
20662 {
20663 unsigned long flags;
20664
20665 - if (!vmi_rom)
20666 + if (!vmi_rom.rom_signature)
20667 return;
20668
20669 local_irq_save(flags);
20670 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20671 index 3c68fe2..12c8280 100644
20672 --- a/arch/x86/kernel/vmlinux.lds.S
20673 +++ b/arch/x86/kernel/vmlinux.lds.S
20674 @@ -26,6 +26,13 @@
20675 #include <asm/page_types.h>
20676 #include <asm/cache.h>
20677 #include <asm/boot.h>
20678 +#include <asm/segment.h>
20679 +
20680 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20681 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20682 +#else
20683 +#define __KERNEL_TEXT_OFFSET 0
20684 +#endif
20685
20686 #undef i386 /* in case the preprocessor is a 32bit one */
20687
20688 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20689 #ifdef CONFIG_X86_32
20690 OUTPUT_ARCH(i386)
20691 ENTRY(phys_startup_32)
20692 -jiffies = jiffies_64;
20693 #else
20694 OUTPUT_ARCH(i386:x86-64)
20695 ENTRY(phys_startup_64)
20696 -jiffies_64 = jiffies;
20697 #endif
20698
20699 PHDRS {
20700 text PT_LOAD FLAGS(5); /* R_E */
20701 - data PT_LOAD FLAGS(7); /* RWE */
20702 +#ifdef CONFIG_X86_32
20703 + module PT_LOAD FLAGS(5); /* R_E */
20704 +#endif
20705 +#ifdef CONFIG_XEN
20706 + rodata PT_LOAD FLAGS(5); /* R_E */
20707 +#else
20708 + rodata PT_LOAD FLAGS(4); /* R__ */
20709 +#endif
20710 + data PT_LOAD FLAGS(6); /* RW_ */
20711 #ifdef CONFIG_X86_64
20712 user PT_LOAD FLAGS(5); /* R_E */
20713 +#endif
20714 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20715 #ifdef CONFIG_SMP
20716 percpu PT_LOAD FLAGS(6); /* RW_ */
20717 #endif
20718 + text.init PT_LOAD FLAGS(5); /* R_E */
20719 + text.exit PT_LOAD FLAGS(5); /* R_E */
20720 init PT_LOAD FLAGS(7); /* RWE */
20721 -#endif
20722 note PT_NOTE FLAGS(0); /* ___ */
20723 }
20724
20725 SECTIONS
20726 {
20727 #ifdef CONFIG_X86_32
20728 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20729 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20730 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20731 #else
20732 - . = __START_KERNEL;
20733 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20734 + . = __START_KERNEL;
20735 #endif
20736
20737 /* Text and read-only data */
20738 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20739 - _text = .;
20740 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20741 /* bootstrapping code */
20742 +#ifdef CONFIG_X86_32
20743 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20744 +#else
20745 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20746 +#endif
20747 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20748 + _text = .;
20749 HEAD_TEXT
20750 #ifdef CONFIG_X86_32
20751 . = ALIGN(PAGE_SIZE);
20752 @@ -82,28 +102,71 @@ SECTIONS
20753 IRQENTRY_TEXT
20754 *(.fixup)
20755 *(.gnu.warning)
20756 - /* End of text section */
20757 - _etext = .;
20758 } :text = 0x9090
20759
20760 - NOTES :text :note
20761 + . += __KERNEL_TEXT_OFFSET;
20762
20763 - EXCEPTION_TABLE(16) :text = 0x9090
20764 +#ifdef CONFIG_X86_32
20765 + . = ALIGN(PAGE_SIZE);
20766 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
20767 + *(.vmi.rom)
20768 + } :module
20769 +
20770 + . = ALIGN(PAGE_SIZE);
20771 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20772 +
20773 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20774 + MODULES_EXEC_VADDR = .;
20775 + BYTE(0)
20776 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20777 + . = ALIGN(HPAGE_SIZE);
20778 + MODULES_EXEC_END = . - 1;
20779 +#endif
20780 +
20781 + } :module
20782 +#endif
20783 +
20784 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20785 + /* End of text section */
20786 + _etext = . - __KERNEL_TEXT_OFFSET;
20787 + }
20788 +
20789 +#ifdef CONFIG_X86_32
20790 + . = ALIGN(PAGE_SIZE);
20791 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20792 + *(.idt)
20793 + . = ALIGN(PAGE_SIZE);
20794 + *(.empty_zero_page)
20795 + *(.swapper_pg_fixmap)
20796 + *(.swapper_pg_pmd)
20797 + *(.swapper_pg_dir)
20798 + *(.trampoline_pg_dir)
20799 + } :rodata
20800 +#endif
20801 +
20802 + . = ALIGN(PAGE_SIZE);
20803 + NOTES :rodata :note
20804 +
20805 + EXCEPTION_TABLE(16) :rodata
20806
20807 RO_DATA(PAGE_SIZE)
20808
20809 /* Data */
20810 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20811 +
20812 +#ifdef CONFIG_PAX_KERNEXEC
20813 + . = ALIGN(HPAGE_SIZE);
20814 +#else
20815 + . = ALIGN(PAGE_SIZE);
20816 +#endif
20817 +
20818 /* Start of data section */
20819 _sdata = .;
20820
20821 /* init_task */
20822 INIT_TASK_DATA(THREAD_SIZE)
20823
20824 -#ifdef CONFIG_X86_32
20825 - /* 32 bit has nosave before _edata */
20826 NOSAVE_DATA
20827 -#endif
20828
20829 PAGE_ALIGNED_DATA(PAGE_SIZE)
20830
20831 @@ -112,6 +175,8 @@ SECTIONS
20832 DATA_DATA
20833 CONSTRUCTORS
20834
20835 + jiffies = jiffies_64;
20836 +
20837 /* rarely changed data like cpu maps */
20838 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
20839
20840 @@ -166,12 +231,6 @@ SECTIONS
20841 }
20842 vgetcpu_mode = VVIRT(.vgetcpu_mode);
20843
20844 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
20845 - .jiffies : AT(VLOAD(.jiffies)) {
20846 - *(.jiffies)
20847 - }
20848 - jiffies = VVIRT(.jiffies);
20849 -
20850 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
20851 *(.vsyscall_3)
20852 }
20853 @@ -187,12 +246,19 @@ SECTIONS
20854 #endif /* CONFIG_X86_64 */
20855
20856 /* Init code and data - will be freed after init */
20857 - . = ALIGN(PAGE_SIZE);
20858 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20859 + BYTE(0)
20860 +
20861 +#ifdef CONFIG_PAX_KERNEXEC
20862 + . = ALIGN(HPAGE_SIZE);
20863 +#else
20864 + . = ALIGN(PAGE_SIZE);
20865 +#endif
20866 +
20867 __init_begin = .; /* paired with __init_end */
20868 - }
20869 + } :init.begin
20870
20871 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20872 +#ifdef CONFIG_SMP
20873 /*
20874 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20875 * output PHDR, so the next output section - .init.text - should
20876 @@ -201,12 +267,27 @@ SECTIONS
20877 PERCPU_VADDR(0, :percpu)
20878 #endif
20879
20880 - INIT_TEXT_SECTION(PAGE_SIZE)
20881 -#ifdef CONFIG_X86_64
20882 - :init
20883 -#endif
20884 + . = ALIGN(PAGE_SIZE);
20885 + init_begin = .;
20886 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20887 + VMLINUX_SYMBOL(_sinittext) = .;
20888 + INIT_TEXT
20889 + VMLINUX_SYMBOL(_einittext) = .;
20890 + . = ALIGN(PAGE_SIZE);
20891 + } :text.init
20892
20893 - INIT_DATA_SECTION(16)
20894 + /*
20895 + * .exit.text is discard at runtime, not link time, to deal with
20896 + * references from .altinstructions and .eh_frame
20897 + */
20898 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20899 + EXIT_TEXT
20900 + . = ALIGN(16);
20901 + } :text.exit
20902 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20903 +
20904 + . = ALIGN(PAGE_SIZE);
20905 + INIT_DATA_SECTION(16) :init
20906
20907 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20908 __x86_cpu_dev_start = .;
20909 @@ -232,19 +313,11 @@ SECTIONS
20910 *(.altinstr_replacement)
20911 }
20912
20913 - /*
20914 - * .exit.text is discard at runtime, not link time, to deal with
20915 - * references from .altinstructions and .eh_frame
20916 - */
20917 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20918 - EXIT_TEXT
20919 - }
20920 -
20921 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20922 EXIT_DATA
20923 }
20924
20925 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20926 +#ifndef CONFIG_SMP
20927 PERCPU(PAGE_SIZE)
20928 #endif
20929
20930 @@ -267,12 +340,6 @@ SECTIONS
20931 . = ALIGN(PAGE_SIZE);
20932 }
20933
20934 -#ifdef CONFIG_X86_64
20935 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20936 - NOSAVE_DATA
20937 - }
20938 -#endif
20939 -
20940 /* BSS */
20941 . = ALIGN(PAGE_SIZE);
20942 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20943 @@ -288,6 +355,7 @@ SECTIONS
20944 __brk_base = .;
20945 . += 64 * 1024; /* 64k alignment slop space */
20946 *(.brk_reservation) /* areas brk users have reserved */
20947 + . = ALIGN(HPAGE_SIZE);
20948 __brk_limit = .;
20949 }
20950
20951 @@ -316,13 +384,12 @@ SECTIONS
20952 * for the boot processor.
20953 */
20954 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
20955 -INIT_PER_CPU(gdt_page);
20956 INIT_PER_CPU(irq_stack_union);
20957
20958 /*
20959 * Build-time check on the image size:
20960 */
20961 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20962 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20963 "kernel image bigger than KERNEL_IMAGE_SIZE");
20964
20965 #ifdef CONFIG_SMP
20966 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20967 index 62f39d7..3bc46a1 100644
20968 --- a/arch/x86/kernel/vsyscall_64.c
20969 +++ b/arch/x86/kernel/vsyscall_64.c
20970 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
20971
20972 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
20973 /* copy vsyscall data */
20974 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
20975 vsyscall_gtod_data.clock.vread = clock->vread;
20976 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
20977 vsyscall_gtod_data.clock.mask = clock->mask;
20978 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
20979 We do this here because otherwise user space would do it on
20980 its own in a likely inferior way (no access to jiffies).
20981 If you don't like it pass NULL. */
20982 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
20983 + if (tcache && tcache->blob[0] == (j = jiffies)) {
20984 p = tcache->blob[1];
20985 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
20986 /* Load per CPU data from RDTSCP */
20987 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20988 index 3909e3b..5433a97 100644
20989 --- a/arch/x86/kernel/x8664_ksyms_64.c
20990 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20991 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
20992
20993 EXPORT_SYMBOL(copy_user_generic);
20994 EXPORT_SYMBOL(__copy_user_nocache);
20995 -EXPORT_SYMBOL(copy_from_user);
20996 -EXPORT_SYMBOL(copy_to_user);
20997 EXPORT_SYMBOL(__copy_from_user_inatomic);
20998
20999 EXPORT_SYMBOL(copy_page);
21000 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21001 index c5ee17e..d63218f 100644
21002 --- a/arch/x86/kernel/xsave.c
21003 +++ b/arch/x86/kernel/xsave.c
21004 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21005 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21006 return -1;
21007
21008 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21009 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21010 fx_sw_user->extended_size -
21011 FP_XSTATE_MAGIC2_SIZE));
21012 /*
21013 @@ -196,7 +196,7 @@ fx_only:
21014 * the other extended state.
21015 */
21016 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21017 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21018 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21019 }
21020
21021 /*
21022 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21023 if (task_thread_info(tsk)->status & TS_XSAVE)
21024 err = restore_user_xstate(buf);
21025 else
21026 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
21027 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
21028 buf);
21029 if (unlikely(err)) {
21030 /*
21031 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21032 index 1350e43..a94b011 100644
21033 --- a/arch/x86/kvm/emulate.c
21034 +++ b/arch/x86/kvm/emulate.c
21035 @@ -81,8 +81,8 @@
21036 #define Src2CL (1<<29)
21037 #define Src2ImmByte (2<<29)
21038 #define Src2One (3<<29)
21039 -#define Src2Imm16 (4<<29)
21040 -#define Src2Mask (7<<29)
21041 +#define Src2Imm16 (4U<<29)
21042 +#define Src2Mask (7U<<29)
21043
21044 enum {
21045 Group1_80, Group1_81, Group1_82, Group1_83,
21046 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
21047
21048 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21049 do { \
21050 + unsigned long _tmp; \
21051 __asm__ __volatile__ ( \
21052 _PRE_EFLAGS("0", "4", "2") \
21053 _op _suffix " %"_x"3,%1; " \
21054 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
21055 /* Raw emulation: instruction has two explicit operands. */
21056 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21057 do { \
21058 - unsigned long _tmp; \
21059 - \
21060 switch ((_dst).bytes) { \
21061 case 2: \
21062 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21063 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
21064
21065 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21066 do { \
21067 - unsigned long _tmp; \
21068 switch ((_dst).bytes) { \
21069 case 1: \
21070 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21071 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21072 index 8dfeaaa..4daa395 100644
21073 --- a/arch/x86/kvm/lapic.c
21074 +++ b/arch/x86/kvm/lapic.c
21075 @@ -52,7 +52,7 @@
21076 #define APIC_BUS_CYCLE_NS 1
21077
21078 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21079 -#define apic_debug(fmt, arg...)
21080 +#define apic_debug(fmt, arg...) do {} while (0)
21081
21082 #define APIC_LVT_NUM 6
21083 /* 14 is the version for Xeon and Pentium 8.4.8*/
21084 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21085 index 3bc2707..dd157e2 100644
21086 --- a/arch/x86/kvm/paging_tmpl.h
21087 +++ b/arch/x86/kvm/paging_tmpl.h
21088 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21089 int level = PT_PAGE_TABLE_LEVEL;
21090 unsigned long mmu_seq;
21091
21092 + pax_track_stack();
21093 +
21094 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21095 kvm_mmu_audit(vcpu, "pre page fault");
21096
21097 @@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21098 kvm_mmu_free_some_pages(vcpu);
21099 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21100 level, &write_pt, pfn);
21101 + (void)sptep;
21102 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21103 sptep, *sptep, write_pt);
21104
21105 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21106 index 7c6e63e..c5d92c1 100644
21107 --- a/arch/x86/kvm/svm.c
21108 +++ b/arch/x86/kvm/svm.c
21109 @@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21110 int cpu = raw_smp_processor_id();
21111
21112 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21113 +
21114 + pax_open_kernel();
21115 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21116 + pax_close_kernel();
21117 +
21118 load_TR_desc();
21119 }
21120
21121 @@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21122 return true;
21123 }
21124
21125 -static struct kvm_x86_ops svm_x86_ops = {
21126 +static const struct kvm_x86_ops svm_x86_ops = {
21127 .cpu_has_kvm_support = has_svm,
21128 .disabled_by_bios = is_disabled,
21129 .hardware_setup = svm_hardware_setup,
21130 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21131 index e6d925f..e7a4af8 100644
21132 --- a/arch/x86/kvm/vmx.c
21133 +++ b/arch/x86/kvm/vmx.c
21134 @@ -570,7 +570,11 @@ static void reload_tss(void)
21135
21136 kvm_get_gdt(&gdt);
21137 descs = (void *)gdt.base;
21138 +
21139 + pax_open_kernel();
21140 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21141 + pax_close_kernel();
21142 +
21143 load_TR_desc();
21144 }
21145
21146 @@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21147 if (!cpu_has_vmx_flexpriority())
21148 flexpriority_enabled = 0;
21149
21150 - if (!cpu_has_vmx_tpr_shadow())
21151 - kvm_x86_ops->update_cr8_intercept = NULL;
21152 + if (!cpu_has_vmx_tpr_shadow()) {
21153 + pax_open_kernel();
21154 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21155 + pax_close_kernel();
21156 + }
21157
21158 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21159 kvm_disable_largepages();
21160 @@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21161 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21162
21163 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21164 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21165 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21166 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21167 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21168 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21169 @@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21170 "jmp .Lkvm_vmx_return \n\t"
21171 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21172 ".Lkvm_vmx_return: "
21173 +
21174 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21175 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21176 + ".Lkvm_vmx_return2: "
21177 +#endif
21178 +
21179 /* Save guest registers, load host registers, keep flags */
21180 "xchg %0, (%%"R"sp) \n\t"
21181 "mov %%"R"ax, %c[rax](%0) \n\t"
21182 @@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21183 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21184 #endif
21185 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21186 +
21187 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21188 + ,[cs]"i"(__KERNEL_CS)
21189 +#endif
21190 +
21191 : "cc", "memory"
21192 - , R"bx", R"di", R"si"
21193 + , R"ax", R"bx", R"di", R"si"
21194 #ifdef CONFIG_X86_64
21195 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21196 #endif
21197 @@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21198 if (vmx->rmode.irq.pending)
21199 fixup_rmode_irq(vmx);
21200
21201 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21202 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21203 +
21204 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21205 + loadsegment(fs, __KERNEL_PERCPU);
21206 +#endif
21207 +
21208 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21209 + __set_fs(current_thread_info()->addr_limit);
21210 +#endif
21211 +
21212 vmx->launched = 1;
21213
21214 vmx_complete_interrupts(vmx);
21215 @@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21216 return false;
21217 }
21218
21219 -static struct kvm_x86_ops vmx_x86_ops = {
21220 +static const struct kvm_x86_ops vmx_x86_ops = {
21221 .cpu_has_kvm_support = cpu_has_kvm_support,
21222 .disabled_by_bios = vmx_disabled_by_bios,
21223 .hardware_setup = hardware_setup,
21224 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21225 index df1cefb..5e882ad 100644
21226 --- a/arch/x86/kvm/x86.c
21227 +++ b/arch/x86/kvm/x86.c
21228 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21229 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21230 struct kvm_cpuid_entry2 __user *entries);
21231
21232 -struct kvm_x86_ops *kvm_x86_ops;
21233 +const struct kvm_x86_ops *kvm_x86_ops;
21234 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21235
21236 int ignore_msrs = 0;
21237 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21238 struct kvm_cpuid2 *cpuid,
21239 struct kvm_cpuid_entry2 __user *entries)
21240 {
21241 - int r;
21242 + int r, i;
21243
21244 r = -E2BIG;
21245 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21246 goto out;
21247 r = -EFAULT;
21248 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21249 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21250 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21251 goto out;
21252 + for (i = 0; i < cpuid->nent; ++i) {
21253 + struct kvm_cpuid_entry2 cpuid_entry;
21254 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21255 + goto out;
21256 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
21257 + }
21258 vcpu->arch.cpuid_nent = cpuid->nent;
21259 kvm_apic_set_version(vcpu);
21260 return 0;
21261 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21262 struct kvm_cpuid2 *cpuid,
21263 struct kvm_cpuid_entry2 __user *entries)
21264 {
21265 - int r;
21266 + int r, i;
21267
21268 vcpu_load(vcpu);
21269 r = -E2BIG;
21270 if (cpuid->nent < vcpu->arch.cpuid_nent)
21271 goto out;
21272 r = -EFAULT;
21273 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21274 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21275 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21276 goto out;
21277 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21278 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21279 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21280 + goto out;
21281 + }
21282 return 0;
21283
21284 out:
21285 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21286 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21287 struct kvm_interrupt *irq)
21288 {
21289 - if (irq->irq < 0 || irq->irq >= 256)
21290 + if (irq->irq >= 256)
21291 return -EINVAL;
21292 if (irqchip_in_kernel(vcpu->kvm))
21293 return -ENXIO;
21294 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21295 .notifier_call = kvmclock_cpufreq_notifier
21296 };
21297
21298 -int kvm_arch_init(void *opaque)
21299 +int kvm_arch_init(const void *opaque)
21300 {
21301 int r, cpu;
21302 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21303 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21304
21305 if (kvm_x86_ops) {
21306 printk(KERN_ERR "kvm: already loaded the other module\n");
21307 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21308 index 7e59dc1..b88c98f 100644
21309 --- a/arch/x86/lguest/boot.c
21310 +++ b/arch/x86/lguest/boot.c
21311 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21312 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21313 * Launcher to reboot us.
21314 */
21315 -static void lguest_restart(char *reason)
21316 +static __noreturn void lguest_restart(char *reason)
21317 {
21318 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21319 + BUG();
21320 }
21321
21322 /*G:050
21323 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21324 index 824fa0b..c619e96 100644
21325 --- a/arch/x86/lib/atomic64_32.c
21326 +++ b/arch/x86/lib/atomic64_32.c
21327 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21328 }
21329 EXPORT_SYMBOL(atomic64_cmpxchg);
21330
21331 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21332 +{
21333 + return cmpxchg8b(&ptr->counter, old_val, new_val);
21334 +}
21335 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21336 +
21337 /**
21338 * atomic64_xchg - xchg atomic64 variable
21339 * @ptr: pointer to type atomic64_t
21340 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21341 EXPORT_SYMBOL(atomic64_xchg);
21342
21343 /**
21344 + * atomic64_xchg_unchecked - xchg atomic64 variable
21345 + * @ptr: pointer to type atomic64_unchecked_t
21346 + * @new_val: value to assign
21347 + *
21348 + * Atomically xchgs the value of @ptr to @new_val and returns
21349 + * the old value.
21350 + */
21351 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21352 +{
21353 + /*
21354 + * Try first with a (possibly incorrect) assumption about
21355 + * what we have there. We'll do two loops most likely,
21356 + * but we'll get an ownership MESI transaction straight away
21357 + * instead of a read transaction followed by a
21358 + * flush-for-ownership transaction:
21359 + */
21360 + u64 old_val, real_val = 0;
21361 +
21362 + do {
21363 + old_val = real_val;
21364 +
21365 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21366 +
21367 + } while (real_val != old_val);
21368 +
21369 + return old_val;
21370 +}
21371 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
21372 +
21373 +/**
21374 * atomic64_set - set atomic64 variable
21375 * @ptr: pointer to type atomic64_t
21376 * @new_val: value to assign
21377 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21378 EXPORT_SYMBOL(atomic64_set);
21379
21380 /**
21381 -EXPORT_SYMBOL(atomic64_read);
21382 + * atomic64_unchecked_set - set atomic64 variable
21383 + * @ptr: pointer to type atomic64_unchecked_t
21384 + * @new_val: value to assign
21385 + *
21386 + * Atomically sets the value of @ptr to @new_val.
21387 + */
21388 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21389 +{
21390 + atomic64_xchg_unchecked(ptr, new_val);
21391 +}
21392 +EXPORT_SYMBOL(atomic64_set_unchecked);
21393 +
21394 +/**
21395 * atomic64_add_return - add and return
21396 * @delta: integer value to add
21397 * @ptr: pointer to type atomic64_t
21398 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21399 }
21400 EXPORT_SYMBOL(atomic64_add_return);
21401
21402 +/**
21403 + * atomic64_add_return_unchecked - add and return
21404 + * @delta: integer value to add
21405 + * @ptr: pointer to type atomic64_unchecked_t
21406 + *
21407 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
21408 + */
21409 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21410 +{
21411 + /*
21412 + * Try first with a (possibly incorrect) assumption about
21413 + * what we have there. We'll do two loops most likely,
21414 + * but we'll get an ownership MESI transaction straight away
21415 + * instead of a read transaction followed by a
21416 + * flush-for-ownership transaction:
21417 + */
21418 + u64 old_val, new_val, real_val = 0;
21419 +
21420 + do {
21421 + old_val = real_val;
21422 + new_val = old_val + delta;
21423 +
21424 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21425 +
21426 + } while (real_val != old_val);
21427 +
21428 + return new_val;
21429 +}
21430 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
21431 +
21432 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21433 {
21434 return atomic64_add_return(-delta, ptr);
21435 }
21436 EXPORT_SYMBOL(atomic64_sub_return);
21437
21438 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21439 +{
21440 + return atomic64_add_return_unchecked(-delta, ptr);
21441 +}
21442 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21443 +
21444 u64 atomic64_inc_return(atomic64_t *ptr)
21445 {
21446 return atomic64_add_return(1, ptr);
21447 }
21448 EXPORT_SYMBOL(atomic64_inc_return);
21449
21450 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21451 +{
21452 + return atomic64_add_return_unchecked(1, ptr);
21453 +}
21454 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21455 +
21456 u64 atomic64_dec_return(atomic64_t *ptr)
21457 {
21458 return atomic64_sub_return(1, ptr);
21459 }
21460 EXPORT_SYMBOL(atomic64_dec_return);
21461
21462 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21463 +{
21464 + return atomic64_sub_return_unchecked(1, ptr);
21465 +}
21466 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21467 +
21468 /**
21469 * atomic64_add - add integer to atomic64 variable
21470 * @delta: integer value to add
21471 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21472 EXPORT_SYMBOL(atomic64_add);
21473
21474 /**
21475 + * atomic64_add_unchecked - add integer to atomic64 variable
21476 + * @delta: integer value to add
21477 + * @ptr: pointer to type atomic64_unchecked_t
21478 + *
21479 + * Atomically adds @delta to @ptr.
21480 + */
21481 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21482 +{
21483 + atomic64_add_return_unchecked(delta, ptr);
21484 +}
21485 +EXPORT_SYMBOL(atomic64_add_unchecked);
21486 +
21487 +/**
21488 * atomic64_sub - subtract the atomic64 variable
21489 * @delta: integer value to subtract
21490 * @ptr: pointer to type atomic64_t
21491 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21492 EXPORT_SYMBOL(atomic64_sub);
21493
21494 /**
21495 + * atomic64_sub_unchecked - subtract the atomic64 variable
21496 + * @delta: integer value to subtract
21497 + * @ptr: pointer to type atomic64_unchecked_t
21498 + *
21499 + * Atomically subtracts @delta from @ptr.
21500 + */
21501 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21502 +{
21503 + atomic64_add_unchecked(-delta, ptr);
21504 +}
21505 +EXPORT_SYMBOL(atomic64_sub_unchecked);
21506 +
21507 +/**
21508 * atomic64_sub_and_test - subtract value from variable and test result
21509 * @delta: integer value to subtract
21510 * @ptr: pointer to type atomic64_t
21511 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21512 EXPORT_SYMBOL(atomic64_inc);
21513
21514 /**
21515 + * atomic64_inc_unchecked - increment atomic64 variable
21516 + * @ptr: pointer to type atomic64_unchecked_t
21517 + *
21518 + * Atomically increments @ptr by 1.
21519 + */
21520 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21521 +{
21522 + atomic64_add_unchecked(1, ptr);
21523 +}
21524 +EXPORT_SYMBOL(atomic64_inc_unchecked);
21525 +
21526 +/**
21527 * atomic64_dec - decrement atomic64 variable
21528 * @ptr: pointer to type atomic64_t
21529 *
21530 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21531 EXPORT_SYMBOL(atomic64_dec);
21532
21533 /**
21534 + * atomic64_dec_unchecked - decrement atomic64 variable
21535 + * @ptr: pointer to type atomic64_unchecked_t
21536 + *
21537 + * Atomically decrements @ptr by 1.
21538 + */
21539 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21540 +{
21541 + atomic64_sub_unchecked(1, ptr);
21542 +}
21543 +EXPORT_SYMBOL(atomic64_dec_unchecked);
21544 +
21545 +/**
21546 * atomic64_dec_and_test - decrement and test
21547 * @ptr: pointer to type atomic64_t
21548 *
21549 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21550 index adbccd0..98f96c8 100644
21551 --- a/arch/x86/lib/checksum_32.S
21552 +++ b/arch/x86/lib/checksum_32.S
21553 @@ -28,7 +28,8 @@
21554 #include <linux/linkage.h>
21555 #include <asm/dwarf2.h>
21556 #include <asm/errno.h>
21557 -
21558 +#include <asm/segment.h>
21559 +
21560 /*
21561 * computes a partial checksum, e.g. for TCP/UDP fragments
21562 */
21563 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21564
21565 #define ARGBASE 16
21566 #define FP 12
21567 -
21568 -ENTRY(csum_partial_copy_generic)
21569 +
21570 +ENTRY(csum_partial_copy_generic_to_user)
21571 CFI_STARTPROC
21572 +
21573 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21574 + pushl %gs
21575 + CFI_ADJUST_CFA_OFFSET 4
21576 + popl %es
21577 + CFI_ADJUST_CFA_OFFSET -4
21578 + jmp csum_partial_copy_generic
21579 +#endif
21580 +
21581 +ENTRY(csum_partial_copy_generic_from_user)
21582 +
21583 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21584 + pushl %gs
21585 + CFI_ADJUST_CFA_OFFSET 4
21586 + popl %ds
21587 + CFI_ADJUST_CFA_OFFSET -4
21588 +#endif
21589 +
21590 +ENTRY(csum_partial_copy_generic)
21591 subl $4,%esp
21592 CFI_ADJUST_CFA_OFFSET 4
21593 pushl %edi
21594 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21595 jmp 4f
21596 SRC(1: movw (%esi), %bx )
21597 addl $2, %esi
21598 -DST( movw %bx, (%edi) )
21599 +DST( movw %bx, %es:(%edi) )
21600 addl $2, %edi
21601 addw %bx, %ax
21602 adcl $0, %eax
21603 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21604 SRC(1: movl (%esi), %ebx )
21605 SRC( movl 4(%esi), %edx )
21606 adcl %ebx, %eax
21607 -DST( movl %ebx, (%edi) )
21608 +DST( movl %ebx, %es:(%edi) )
21609 adcl %edx, %eax
21610 -DST( movl %edx, 4(%edi) )
21611 +DST( movl %edx, %es:4(%edi) )
21612
21613 SRC( movl 8(%esi), %ebx )
21614 SRC( movl 12(%esi), %edx )
21615 adcl %ebx, %eax
21616 -DST( movl %ebx, 8(%edi) )
21617 +DST( movl %ebx, %es:8(%edi) )
21618 adcl %edx, %eax
21619 -DST( movl %edx, 12(%edi) )
21620 +DST( movl %edx, %es:12(%edi) )
21621
21622 SRC( movl 16(%esi), %ebx )
21623 SRC( movl 20(%esi), %edx )
21624 adcl %ebx, %eax
21625 -DST( movl %ebx, 16(%edi) )
21626 +DST( movl %ebx, %es:16(%edi) )
21627 adcl %edx, %eax
21628 -DST( movl %edx, 20(%edi) )
21629 +DST( movl %edx, %es:20(%edi) )
21630
21631 SRC( movl 24(%esi), %ebx )
21632 SRC( movl 28(%esi), %edx )
21633 adcl %ebx, %eax
21634 -DST( movl %ebx, 24(%edi) )
21635 +DST( movl %ebx, %es:24(%edi) )
21636 adcl %edx, %eax
21637 -DST( movl %edx, 28(%edi) )
21638 +DST( movl %edx, %es:28(%edi) )
21639
21640 lea 32(%esi), %esi
21641 lea 32(%edi), %edi
21642 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21643 shrl $2, %edx # This clears CF
21644 SRC(3: movl (%esi), %ebx )
21645 adcl %ebx, %eax
21646 -DST( movl %ebx, (%edi) )
21647 +DST( movl %ebx, %es:(%edi) )
21648 lea 4(%esi), %esi
21649 lea 4(%edi), %edi
21650 dec %edx
21651 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21652 jb 5f
21653 SRC( movw (%esi), %cx )
21654 leal 2(%esi), %esi
21655 -DST( movw %cx, (%edi) )
21656 +DST( movw %cx, %es:(%edi) )
21657 leal 2(%edi), %edi
21658 je 6f
21659 shll $16,%ecx
21660 SRC(5: movb (%esi), %cl )
21661 -DST( movb %cl, (%edi) )
21662 +DST( movb %cl, %es:(%edi) )
21663 6: addl %ecx, %eax
21664 adcl $0, %eax
21665 7:
21666 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21667
21668 6001:
21669 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21670 - movl $-EFAULT, (%ebx)
21671 + movl $-EFAULT, %ss:(%ebx)
21672
21673 # zero the complete destination - computing the rest
21674 # is too much work
21675 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21676
21677 6002:
21678 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21679 - movl $-EFAULT,(%ebx)
21680 + movl $-EFAULT,%ss:(%ebx)
21681 jmp 5000b
21682
21683 .previous
21684
21685 + pushl %ss
21686 + CFI_ADJUST_CFA_OFFSET 4
21687 + popl %ds
21688 + CFI_ADJUST_CFA_OFFSET -4
21689 + pushl %ss
21690 + CFI_ADJUST_CFA_OFFSET 4
21691 + popl %es
21692 + CFI_ADJUST_CFA_OFFSET -4
21693 popl %ebx
21694 CFI_ADJUST_CFA_OFFSET -4
21695 CFI_RESTORE ebx
21696 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21697 CFI_ADJUST_CFA_OFFSET -4
21698 ret
21699 CFI_ENDPROC
21700 -ENDPROC(csum_partial_copy_generic)
21701 +ENDPROC(csum_partial_copy_generic_to_user)
21702
21703 #else
21704
21705 /* Version for PentiumII/PPro */
21706
21707 #define ROUND1(x) \
21708 + nop; nop; nop; \
21709 SRC(movl x(%esi), %ebx ) ; \
21710 addl %ebx, %eax ; \
21711 - DST(movl %ebx, x(%edi) ) ;
21712 + DST(movl %ebx, %es:x(%edi)) ;
21713
21714 #define ROUND(x) \
21715 + nop; nop; nop; \
21716 SRC(movl x(%esi), %ebx ) ; \
21717 adcl %ebx, %eax ; \
21718 - DST(movl %ebx, x(%edi) ) ;
21719 + DST(movl %ebx, %es:x(%edi)) ;
21720
21721 #define ARGBASE 12
21722 -
21723 -ENTRY(csum_partial_copy_generic)
21724 +
21725 +ENTRY(csum_partial_copy_generic_to_user)
21726 CFI_STARTPROC
21727 +
21728 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21729 + pushl %gs
21730 + CFI_ADJUST_CFA_OFFSET 4
21731 + popl %es
21732 + CFI_ADJUST_CFA_OFFSET -4
21733 + jmp csum_partial_copy_generic
21734 +#endif
21735 +
21736 +ENTRY(csum_partial_copy_generic_from_user)
21737 +
21738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21739 + pushl %gs
21740 + CFI_ADJUST_CFA_OFFSET 4
21741 + popl %ds
21742 + CFI_ADJUST_CFA_OFFSET -4
21743 +#endif
21744 +
21745 +ENTRY(csum_partial_copy_generic)
21746 pushl %ebx
21747 CFI_ADJUST_CFA_OFFSET 4
21748 CFI_REL_OFFSET ebx, 0
21749 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21750 subl %ebx, %edi
21751 lea -1(%esi),%edx
21752 andl $-32,%edx
21753 - lea 3f(%ebx,%ebx), %ebx
21754 + lea 3f(%ebx,%ebx,2), %ebx
21755 testl %esi, %esi
21756 jmp *%ebx
21757 1: addl $64,%esi
21758 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
21759 jb 5f
21760 SRC( movw (%esi), %dx )
21761 leal 2(%esi), %esi
21762 -DST( movw %dx, (%edi) )
21763 +DST( movw %dx, %es:(%edi) )
21764 leal 2(%edi), %edi
21765 je 6f
21766 shll $16,%edx
21767 5:
21768 SRC( movb (%esi), %dl )
21769 -DST( movb %dl, (%edi) )
21770 +DST( movb %dl, %es:(%edi) )
21771 6: addl %edx, %eax
21772 adcl $0, %eax
21773 7:
21774 .section .fixup, "ax"
21775 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21776 - movl $-EFAULT, (%ebx)
21777 + movl $-EFAULT, %ss:(%ebx)
21778 # zero the complete destination (computing the rest is too much work)
21779 movl ARGBASE+8(%esp),%edi # dst
21780 movl ARGBASE+12(%esp),%ecx # len
21781 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
21782 rep; stosb
21783 jmp 7b
21784 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21785 - movl $-EFAULT, (%ebx)
21786 + movl $-EFAULT, %ss:(%ebx)
21787 jmp 7b
21788 .previous
21789
21790 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21791 + pushl %ss
21792 + CFI_ADJUST_CFA_OFFSET 4
21793 + popl %ds
21794 + CFI_ADJUST_CFA_OFFSET -4
21795 + pushl %ss
21796 + CFI_ADJUST_CFA_OFFSET 4
21797 + popl %es
21798 + CFI_ADJUST_CFA_OFFSET -4
21799 +#endif
21800 +
21801 popl %esi
21802 CFI_ADJUST_CFA_OFFSET -4
21803 CFI_RESTORE esi
21804 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
21805 CFI_RESTORE ebx
21806 ret
21807 CFI_ENDPROC
21808 -ENDPROC(csum_partial_copy_generic)
21809 +ENDPROC(csum_partial_copy_generic_to_user)
21810
21811 #undef ROUND
21812 #undef ROUND1
21813 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21814 index ebeafcc..1e3a402 100644
21815 --- a/arch/x86/lib/clear_page_64.S
21816 +++ b/arch/x86/lib/clear_page_64.S
21817 @@ -1,5 +1,6 @@
21818 #include <linux/linkage.h>
21819 #include <asm/dwarf2.h>
21820 +#include <asm/alternative-asm.h>
21821
21822 /*
21823 * Zero a page.
21824 @@ -10,6 +11,7 @@ ENTRY(clear_page_c)
21825 movl $4096/8,%ecx
21826 xorl %eax,%eax
21827 rep stosq
21828 + pax_force_retaddr
21829 ret
21830 CFI_ENDPROC
21831 ENDPROC(clear_page_c)
21832 @@ -33,6 +35,7 @@ ENTRY(clear_page)
21833 leaq 64(%rdi),%rdi
21834 jnz .Lloop
21835 nop
21836 + pax_force_retaddr
21837 ret
21838 CFI_ENDPROC
21839 .Lclear_page_end:
21840 @@ -43,7 +46,7 @@ ENDPROC(clear_page)
21841
21842 #include <asm/cpufeature.h>
21843
21844 - .section .altinstr_replacement,"ax"
21845 + .section .altinstr_replacement,"a"
21846 1: .byte 0xeb /* jmp <disp8> */
21847 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21848 2:
21849 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21850 index 727a5d4..333818a 100644
21851 --- a/arch/x86/lib/copy_page_64.S
21852 +++ b/arch/x86/lib/copy_page_64.S
21853 @@ -2,12 +2,14 @@
21854
21855 #include <linux/linkage.h>
21856 #include <asm/dwarf2.h>
21857 +#include <asm/alternative-asm.h>
21858
21859 ALIGN
21860 copy_page_c:
21861 CFI_STARTPROC
21862 movl $4096/8,%ecx
21863 rep movsq
21864 + pax_force_retaddr
21865 ret
21866 CFI_ENDPROC
21867 ENDPROC(copy_page_c)
21868 @@ -38,7 +40,7 @@ ENTRY(copy_page)
21869 movq 16 (%rsi), %rdx
21870 movq 24 (%rsi), %r8
21871 movq 32 (%rsi), %r9
21872 - movq 40 (%rsi), %r10
21873 + movq 40 (%rsi), %r13
21874 movq 48 (%rsi), %r11
21875 movq 56 (%rsi), %r12
21876
21877 @@ -49,7 +51,7 @@ ENTRY(copy_page)
21878 movq %rdx, 16 (%rdi)
21879 movq %r8, 24 (%rdi)
21880 movq %r9, 32 (%rdi)
21881 - movq %r10, 40 (%rdi)
21882 + movq %r13, 40 (%rdi)
21883 movq %r11, 48 (%rdi)
21884 movq %r12, 56 (%rdi)
21885
21886 @@ -68,7 +70,7 @@ ENTRY(copy_page)
21887 movq 16 (%rsi), %rdx
21888 movq 24 (%rsi), %r8
21889 movq 32 (%rsi), %r9
21890 - movq 40 (%rsi), %r10
21891 + movq 40 (%rsi), %r13
21892 movq 48 (%rsi), %r11
21893 movq 56 (%rsi), %r12
21894
21895 @@ -77,7 +79,7 @@ ENTRY(copy_page)
21896 movq %rdx, 16 (%rdi)
21897 movq %r8, 24 (%rdi)
21898 movq %r9, 32 (%rdi)
21899 - movq %r10, 40 (%rdi)
21900 + movq %r13, 40 (%rdi)
21901 movq %r11, 48 (%rdi)
21902 movq %r12, 56 (%rdi)
21903
21904 @@ -94,6 +96,7 @@ ENTRY(copy_page)
21905 CFI_RESTORE r13
21906 addq $3*8,%rsp
21907 CFI_ADJUST_CFA_OFFSET -3*8
21908 + pax_force_retaddr
21909 ret
21910 .Lcopy_page_end:
21911 CFI_ENDPROC
21912 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
21913
21914 #include <asm/cpufeature.h>
21915
21916 - .section .altinstr_replacement,"ax"
21917 + .section .altinstr_replacement,"a"
21918 1: .byte 0xeb /* jmp <disp8> */
21919 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21920 2:
21921 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21922 index af8debd..40c75f3 100644
21923 --- a/arch/x86/lib/copy_user_64.S
21924 +++ b/arch/x86/lib/copy_user_64.S
21925 @@ -15,13 +15,15 @@
21926 #include <asm/asm-offsets.h>
21927 #include <asm/thread_info.h>
21928 #include <asm/cpufeature.h>
21929 +#include <asm/pgtable.h>
21930 +#include <asm/alternative-asm.h>
21931
21932 .macro ALTERNATIVE_JUMP feature,orig,alt
21933 0:
21934 .byte 0xe9 /* 32bit jump */
21935 .long \orig-1f /* by default jump to orig */
21936 1:
21937 - .section .altinstr_replacement,"ax"
21938 + .section .altinstr_replacement,"a"
21939 2: .byte 0xe9 /* near jump with 32bit immediate */
21940 .long \alt-1b /* offset */ /* or alternatively to alt */
21941 .previous
21942 @@ -64,55 +66,26 @@
21943 #endif
21944 .endm
21945
21946 -/* Standard copy_to_user with segment limit checking */
21947 -ENTRY(copy_to_user)
21948 - CFI_STARTPROC
21949 - GET_THREAD_INFO(%rax)
21950 - movq %rdi,%rcx
21951 - addq %rdx,%rcx
21952 - jc bad_to_user
21953 - cmpq TI_addr_limit(%rax),%rcx
21954 - ja bad_to_user
21955 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21956 - CFI_ENDPROC
21957 -ENDPROC(copy_to_user)
21958 -
21959 -/* Standard copy_from_user with segment limit checking */
21960 -ENTRY(copy_from_user)
21961 - CFI_STARTPROC
21962 - GET_THREAD_INFO(%rax)
21963 - movq %rsi,%rcx
21964 - addq %rdx,%rcx
21965 - jc bad_from_user
21966 - cmpq TI_addr_limit(%rax),%rcx
21967 - ja bad_from_user
21968 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21969 - CFI_ENDPROC
21970 -ENDPROC(copy_from_user)
21971 -
21972 ENTRY(copy_user_generic)
21973 CFI_STARTPROC
21974 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21975 CFI_ENDPROC
21976 ENDPROC(copy_user_generic)
21977
21978 -ENTRY(__copy_from_user_inatomic)
21979 - CFI_STARTPROC
21980 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21981 - CFI_ENDPROC
21982 -ENDPROC(__copy_from_user_inatomic)
21983 -
21984 .section .fixup,"ax"
21985 /* must zero dest */
21986 ENTRY(bad_from_user)
21987 bad_from_user:
21988 CFI_STARTPROC
21989 + testl %edx,%edx
21990 + js bad_to_user
21991 movl %edx,%ecx
21992 xorl %eax,%eax
21993 rep
21994 stosb
21995 bad_to_user:
21996 movl %edx,%eax
21997 + pax_force_retaddr
21998 ret
21999 CFI_ENDPROC
22000 ENDPROC(bad_from_user)
22001 @@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22002 jz 17f
22003 1: movq (%rsi),%r8
22004 2: movq 1*8(%rsi),%r9
22005 -3: movq 2*8(%rsi),%r10
22006 +3: movq 2*8(%rsi),%rax
22007 4: movq 3*8(%rsi),%r11
22008 5: movq %r8,(%rdi)
22009 6: movq %r9,1*8(%rdi)
22010 -7: movq %r10,2*8(%rdi)
22011 +7: movq %rax,2*8(%rdi)
22012 8: movq %r11,3*8(%rdi)
22013 9: movq 4*8(%rsi),%r8
22014 10: movq 5*8(%rsi),%r9
22015 -11: movq 6*8(%rsi),%r10
22016 +11: movq 6*8(%rsi),%rax
22017 12: movq 7*8(%rsi),%r11
22018 13: movq %r8,4*8(%rdi)
22019 14: movq %r9,5*8(%rdi)
22020 -15: movq %r10,6*8(%rdi)
22021 +15: movq %rax,6*8(%rdi)
22022 16: movq %r11,7*8(%rdi)
22023 leaq 64(%rsi),%rsi
22024 leaq 64(%rdi),%rdi
22025 @@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22026 decl %ecx
22027 jnz 21b
22028 23: xor %eax,%eax
22029 + pax_force_retaddr
22030 ret
22031
22032 .section .fixup,"ax"
22033 @@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22034 3: rep
22035 movsb
22036 4: xorl %eax,%eax
22037 + pax_force_retaddr
22038 ret
22039
22040 .section .fixup,"ax"
22041 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22042 index cb0c112..e3a6895 100644
22043 --- a/arch/x86/lib/copy_user_nocache_64.S
22044 +++ b/arch/x86/lib/copy_user_nocache_64.S
22045 @@ -8,12 +8,14 @@
22046
22047 #include <linux/linkage.h>
22048 #include <asm/dwarf2.h>
22049 +#include <asm/alternative-asm.h>
22050
22051 #define FIX_ALIGNMENT 1
22052
22053 #include <asm/current.h>
22054 #include <asm/asm-offsets.h>
22055 #include <asm/thread_info.h>
22056 +#include <asm/pgtable.h>
22057
22058 .macro ALIGN_DESTINATION
22059 #ifdef FIX_ALIGNMENT
22060 @@ -50,6 +52,15 @@
22061 */
22062 ENTRY(__copy_user_nocache)
22063 CFI_STARTPROC
22064 +
22065 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22066 + mov $PAX_USER_SHADOW_BASE,%rcx
22067 + cmp %rcx,%rsi
22068 + jae 1f
22069 + add %rcx,%rsi
22070 +1:
22071 +#endif
22072 +
22073 cmpl $8,%edx
22074 jb 20f /* less then 8 bytes, go to byte copy loop */
22075 ALIGN_DESTINATION
22076 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22077 jz 17f
22078 1: movq (%rsi),%r8
22079 2: movq 1*8(%rsi),%r9
22080 -3: movq 2*8(%rsi),%r10
22081 +3: movq 2*8(%rsi),%rax
22082 4: movq 3*8(%rsi),%r11
22083 5: movnti %r8,(%rdi)
22084 6: movnti %r9,1*8(%rdi)
22085 -7: movnti %r10,2*8(%rdi)
22086 +7: movnti %rax,2*8(%rdi)
22087 8: movnti %r11,3*8(%rdi)
22088 9: movq 4*8(%rsi),%r8
22089 10: movq 5*8(%rsi),%r9
22090 -11: movq 6*8(%rsi),%r10
22091 +11: movq 6*8(%rsi),%rax
22092 12: movq 7*8(%rsi),%r11
22093 13: movnti %r8,4*8(%rdi)
22094 14: movnti %r9,5*8(%rdi)
22095 -15: movnti %r10,6*8(%rdi)
22096 +15: movnti %rax,6*8(%rdi)
22097 16: movnti %r11,7*8(%rdi)
22098 leaq 64(%rsi),%rsi
22099 leaq 64(%rdi),%rdi
22100 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22101 jnz 21b
22102 23: xorl %eax,%eax
22103 sfence
22104 + pax_force_retaddr
22105 ret
22106
22107 .section .fixup,"ax"
22108 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22109 index f0dba36..48cb4d6 100644
22110 --- a/arch/x86/lib/csum-copy_64.S
22111 +++ b/arch/x86/lib/csum-copy_64.S
22112 @@ -8,6 +8,7 @@
22113 #include <linux/linkage.h>
22114 #include <asm/dwarf2.h>
22115 #include <asm/errno.h>
22116 +#include <asm/alternative-asm.h>
22117
22118 /*
22119 * Checksum copy with exception handling.
22120 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22121 CFI_RESTORE rbp
22122 addq $7*8,%rsp
22123 CFI_ADJUST_CFA_OFFSET -7*8
22124 + pax_force_retaddr 0, 1
22125 ret
22126 CFI_RESTORE_STATE
22127
22128 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22129 index 459b58a..9570bc7 100644
22130 --- a/arch/x86/lib/csum-wrappers_64.c
22131 +++ b/arch/x86/lib/csum-wrappers_64.c
22132 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22133 len -= 2;
22134 }
22135 }
22136 - isum = csum_partial_copy_generic((__force const void *)src,
22137 +
22138 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22139 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22140 + src += PAX_USER_SHADOW_BASE;
22141 +#endif
22142 +
22143 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
22144 dst, len, isum, errp, NULL);
22145 if (unlikely(*errp))
22146 goto out_err;
22147 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22148 }
22149
22150 *errp = 0;
22151 - return csum_partial_copy_generic(src, (void __force *)dst,
22152 +
22153 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22154 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22155 + dst += PAX_USER_SHADOW_BASE;
22156 +#endif
22157 +
22158 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22159 len, isum, NULL, errp);
22160 }
22161 EXPORT_SYMBOL(csum_partial_copy_to_user);
22162 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22163 index 51f1504..ddac4c1 100644
22164 --- a/arch/x86/lib/getuser.S
22165 +++ b/arch/x86/lib/getuser.S
22166 @@ -33,15 +33,38 @@
22167 #include <asm/asm-offsets.h>
22168 #include <asm/thread_info.h>
22169 #include <asm/asm.h>
22170 +#include <asm/segment.h>
22171 +#include <asm/pgtable.h>
22172 +#include <asm/alternative-asm.h>
22173 +
22174 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22175 +#define __copyuser_seg gs;
22176 +#else
22177 +#define __copyuser_seg
22178 +#endif
22179
22180 .text
22181 ENTRY(__get_user_1)
22182 CFI_STARTPROC
22183 +
22184 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22185 GET_THREAD_INFO(%_ASM_DX)
22186 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22187 jae bad_get_user
22188 -1: movzb (%_ASM_AX),%edx
22189 +
22190 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22191 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22192 + cmp %_ASM_DX,%_ASM_AX
22193 + jae 1234f
22194 + add %_ASM_DX,%_ASM_AX
22195 +1234:
22196 +#endif
22197 +
22198 +#endif
22199 +
22200 +1: __copyuser_seg movzb (%_ASM_AX),%edx
22201 xor %eax,%eax
22202 + pax_force_retaddr
22203 ret
22204 CFI_ENDPROC
22205 ENDPROC(__get_user_1)
22206 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22207 ENTRY(__get_user_2)
22208 CFI_STARTPROC
22209 add $1,%_ASM_AX
22210 +
22211 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22212 jc bad_get_user
22213 GET_THREAD_INFO(%_ASM_DX)
22214 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22215 jae bad_get_user
22216 -2: movzwl -1(%_ASM_AX),%edx
22217 +
22218 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22219 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22220 + cmp %_ASM_DX,%_ASM_AX
22221 + jae 1234f
22222 + add %_ASM_DX,%_ASM_AX
22223 +1234:
22224 +#endif
22225 +
22226 +#endif
22227 +
22228 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22229 xor %eax,%eax
22230 + pax_force_retaddr
22231 ret
22232 CFI_ENDPROC
22233 ENDPROC(__get_user_2)
22234 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22235 ENTRY(__get_user_4)
22236 CFI_STARTPROC
22237 add $3,%_ASM_AX
22238 +
22239 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22240 jc bad_get_user
22241 GET_THREAD_INFO(%_ASM_DX)
22242 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22243 jae bad_get_user
22244 -3: mov -3(%_ASM_AX),%edx
22245 +
22246 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22247 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22248 + cmp %_ASM_DX,%_ASM_AX
22249 + jae 1234f
22250 + add %_ASM_DX,%_ASM_AX
22251 +1234:
22252 +#endif
22253 +
22254 +#endif
22255 +
22256 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
22257 xor %eax,%eax
22258 + pax_force_retaddr
22259 ret
22260 CFI_ENDPROC
22261 ENDPROC(__get_user_4)
22262 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22263 GET_THREAD_INFO(%_ASM_DX)
22264 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22265 jae bad_get_user
22266 +
22267 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22268 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22269 + cmp %_ASM_DX,%_ASM_AX
22270 + jae 1234f
22271 + add %_ASM_DX,%_ASM_AX
22272 +1234:
22273 +#endif
22274 +
22275 4: movq -7(%_ASM_AX),%_ASM_DX
22276 xor %eax,%eax
22277 + pax_force_retaddr
22278 ret
22279 CFI_ENDPROC
22280 ENDPROC(__get_user_8)
22281 @@ -91,6 +152,7 @@ bad_get_user:
22282 CFI_STARTPROC
22283 xor %edx,%edx
22284 mov $(-EFAULT),%_ASM_AX
22285 + pax_force_retaddr
22286 ret
22287 CFI_ENDPROC
22288 END(bad_get_user)
22289 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22290 index 05a95e7..326f2fa 100644
22291 --- a/arch/x86/lib/iomap_copy_64.S
22292 +++ b/arch/x86/lib/iomap_copy_64.S
22293 @@ -17,6 +17,7 @@
22294
22295 #include <linux/linkage.h>
22296 #include <asm/dwarf2.h>
22297 +#include <asm/alternative-asm.h>
22298
22299 /*
22300 * override generic version in lib/iomap_copy.c
22301 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22302 CFI_STARTPROC
22303 movl %edx,%ecx
22304 rep movsd
22305 + pax_force_retaddr
22306 ret
22307 CFI_ENDPROC
22308 ENDPROC(__iowrite32_copy)
22309 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22310 index ad5441e..610e351 100644
22311 --- a/arch/x86/lib/memcpy_64.S
22312 +++ b/arch/x86/lib/memcpy_64.S
22313 @@ -4,6 +4,7 @@
22314
22315 #include <asm/cpufeature.h>
22316 #include <asm/dwarf2.h>
22317 +#include <asm/alternative-asm.h>
22318
22319 /*
22320 * memcpy - Copy a memory block.
22321 @@ -34,6 +35,7 @@ memcpy_c:
22322 rep movsq
22323 movl %edx, %ecx
22324 rep movsb
22325 + pax_force_retaddr
22326 ret
22327 CFI_ENDPROC
22328 ENDPROC(memcpy_c)
22329 @@ -118,6 +120,7 @@ ENTRY(memcpy)
22330 jnz .Lloop_1
22331
22332 .Lend:
22333 + pax_force_retaddr 0, 1
22334 ret
22335 CFI_ENDPROC
22336 ENDPROC(memcpy)
22337 @@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22338 * It is also a lot simpler. Use this when possible:
22339 */
22340
22341 - .section .altinstr_replacement, "ax"
22342 + .section .altinstr_replacement, "a"
22343 1: .byte 0xeb /* jmp <disp8> */
22344 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22345 2:
22346 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22347 index 2c59481..7e9ba4e 100644
22348 --- a/arch/x86/lib/memset_64.S
22349 +++ b/arch/x86/lib/memset_64.S
22350 @@ -2,6 +2,7 @@
22351
22352 #include <linux/linkage.h>
22353 #include <asm/dwarf2.h>
22354 +#include <asm/alternative-asm.h>
22355
22356 /*
22357 * ISO C memset - set a memory block to a byte value.
22358 @@ -28,6 +29,7 @@ memset_c:
22359 movl %r8d,%ecx
22360 rep stosb
22361 movq %r9,%rax
22362 + pax_force_retaddr
22363 ret
22364 CFI_ENDPROC
22365 ENDPROC(memset_c)
22366 @@ -35,13 +37,13 @@ ENDPROC(memset_c)
22367 ENTRY(memset)
22368 ENTRY(__memset)
22369 CFI_STARTPROC
22370 - movq %rdi,%r10
22371 movq %rdx,%r11
22372
22373 /* expand byte value */
22374 movzbl %sil,%ecx
22375 movabs $0x0101010101010101,%rax
22376 mul %rcx /* with rax, clobbers rdx */
22377 + movq %rdi,%rdx
22378
22379 /* align dst */
22380 movl %edi,%r9d
22381 @@ -95,7 +97,8 @@ ENTRY(__memset)
22382 jnz .Lloop_1
22383
22384 .Lende:
22385 - movq %r10,%rax
22386 + movq %rdx,%rax
22387 + pax_force_retaddr
22388 ret
22389
22390 CFI_RESTORE_STATE
22391 @@ -118,7 +121,7 @@ ENDPROC(__memset)
22392
22393 #include <asm/cpufeature.h>
22394
22395 - .section .altinstr_replacement,"ax"
22396 + .section .altinstr_replacement,"a"
22397 1: .byte 0xeb /* jmp <disp8> */
22398 .byte (memset_c - memset) - (2f - 1b) /* offset */
22399 2:
22400 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22401 index c9f2d9b..e7fd2c0 100644
22402 --- a/arch/x86/lib/mmx_32.c
22403 +++ b/arch/x86/lib/mmx_32.c
22404 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22405 {
22406 void *p;
22407 int i;
22408 + unsigned long cr0;
22409
22410 if (unlikely(in_interrupt()))
22411 return __memcpy(to, from, len);
22412 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22413 kernel_fpu_begin();
22414
22415 __asm__ __volatile__ (
22416 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22417 - " prefetch 64(%0)\n"
22418 - " prefetch 128(%0)\n"
22419 - " prefetch 192(%0)\n"
22420 - " prefetch 256(%0)\n"
22421 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22422 + " prefetch 64(%1)\n"
22423 + " prefetch 128(%1)\n"
22424 + " prefetch 192(%1)\n"
22425 + " prefetch 256(%1)\n"
22426 "2: \n"
22427 ".section .fixup, \"ax\"\n"
22428 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22429 + "3: \n"
22430 +
22431 +#ifdef CONFIG_PAX_KERNEXEC
22432 + " movl %%cr0, %0\n"
22433 + " movl %0, %%eax\n"
22434 + " andl $0xFFFEFFFF, %%eax\n"
22435 + " movl %%eax, %%cr0\n"
22436 +#endif
22437 +
22438 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22439 +
22440 +#ifdef CONFIG_PAX_KERNEXEC
22441 + " movl %0, %%cr0\n"
22442 +#endif
22443 +
22444 " jmp 2b\n"
22445 ".previous\n"
22446 _ASM_EXTABLE(1b, 3b)
22447 - : : "r" (from));
22448 + : "=&r" (cr0) : "r" (from) : "ax");
22449
22450 for ( ; i > 5; i--) {
22451 __asm__ __volatile__ (
22452 - "1: prefetch 320(%0)\n"
22453 - "2: movq (%0), %%mm0\n"
22454 - " movq 8(%0), %%mm1\n"
22455 - " movq 16(%0), %%mm2\n"
22456 - " movq 24(%0), %%mm3\n"
22457 - " movq %%mm0, (%1)\n"
22458 - " movq %%mm1, 8(%1)\n"
22459 - " movq %%mm2, 16(%1)\n"
22460 - " movq %%mm3, 24(%1)\n"
22461 - " movq 32(%0), %%mm0\n"
22462 - " movq 40(%0), %%mm1\n"
22463 - " movq 48(%0), %%mm2\n"
22464 - " movq 56(%0), %%mm3\n"
22465 - " movq %%mm0, 32(%1)\n"
22466 - " movq %%mm1, 40(%1)\n"
22467 - " movq %%mm2, 48(%1)\n"
22468 - " movq %%mm3, 56(%1)\n"
22469 + "1: prefetch 320(%1)\n"
22470 + "2: movq (%1), %%mm0\n"
22471 + " movq 8(%1), %%mm1\n"
22472 + " movq 16(%1), %%mm2\n"
22473 + " movq 24(%1), %%mm3\n"
22474 + " movq %%mm0, (%2)\n"
22475 + " movq %%mm1, 8(%2)\n"
22476 + " movq %%mm2, 16(%2)\n"
22477 + " movq %%mm3, 24(%2)\n"
22478 + " movq 32(%1), %%mm0\n"
22479 + " movq 40(%1), %%mm1\n"
22480 + " movq 48(%1), %%mm2\n"
22481 + " movq 56(%1), %%mm3\n"
22482 + " movq %%mm0, 32(%2)\n"
22483 + " movq %%mm1, 40(%2)\n"
22484 + " movq %%mm2, 48(%2)\n"
22485 + " movq %%mm3, 56(%2)\n"
22486 ".section .fixup, \"ax\"\n"
22487 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22488 + "3:\n"
22489 +
22490 +#ifdef CONFIG_PAX_KERNEXEC
22491 + " movl %%cr0, %0\n"
22492 + " movl %0, %%eax\n"
22493 + " andl $0xFFFEFFFF, %%eax\n"
22494 + " movl %%eax, %%cr0\n"
22495 +#endif
22496 +
22497 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22498 +
22499 +#ifdef CONFIG_PAX_KERNEXEC
22500 + " movl %0, %%cr0\n"
22501 +#endif
22502 +
22503 " jmp 2b\n"
22504 ".previous\n"
22505 _ASM_EXTABLE(1b, 3b)
22506 - : : "r" (from), "r" (to) : "memory");
22507 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22508
22509 from += 64;
22510 to += 64;
22511 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22512 static void fast_copy_page(void *to, void *from)
22513 {
22514 int i;
22515 + unsigned long cr0;
22516
22517 kernel_fpu_begin();
22518
22519 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22520 * but that is for later. -AV
22521 */
22522 __asm__ __volatile__(
22523 - "1: prefetch (%0)\n"
22524 - " prefetch 64(%0)\n"
22525 - " prefetch 128(%0)\n"
22526 - " prefetch 192(%0)\n"
22527 - " prefetch 256(%0)\n"
22528 + "1: prefetch (%1)\n"
22529 + " prefetch 64(%1)\n"
22530 + " prefetch 128(%1)\n"
22531 + " prefetch 192(%1)\n"
22532 + " prefetch 256(%1)\n"
22533 "2: \n"
22534 ".section .fixup, \"ax\"\n"
22535 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22536 + "3: \n"
22537 +
22538 +#ifdef CONFIG_PAX_KERNEXEC
22539 + " movl %%cr0, %0\n"
22540 + " movl %0, %%eax\n"
22541 + " andl $0xFFFEFFFF, %%eax\n"
22542 + " movl %%eax, %%cr0\n"
22543 +#endif
22544 +
22545 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22546 +
22547 +#ifdef CONFIG_PAX_KERNEXEC
22548 + " movl %0, %%cr0\n"
22549 +#endif
22550 +
22551 " jmp 2b\n"
22552 ".previous\n"
22553 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22554 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22555
22556 for (i = 0; i < (4096-320)/64; i++) {
22557 __asm__ __volatile__ (
22558 - "1: prefetch 320(%0)\n"
22559 - "2: movq (%0), %%mm0\n"
22560 - " movntq %%mm0, (%1)\n"
22561 - " movq 8(%0), %%mm1\n"
22562 - " movntq %%mm1, 8(%1)\n"
22563 - " movq 16(%0), %%mm2\n"
22564 - " movntq %%mm2, 16(%1)\n"
22565 - " movq 24(%0), %%mm3\n"
22566 - " movntq %%mm3, 24(%1)\n"
22567 - " movq 32(%0), %%mm4\n"
22568 - " movntq %%mm4, 32(%1)\n"
22569 - " movq 40(%0), %%mm5\n"
22570 - " movntq %%mm5, 40(%1)\n"
22571 - " movq 48(%0), %%mm6\n"
22572 - " movntq %%mm6, 48(%1)\n"
22573 - " movq 56(%0), %%mm7\n"
22574 - " movntq %%mm7, 56(%1)\n"
22575 + "1: prefetch 320(%1)\n"
22576 + "2: movq (%1), %%mm0\n"
22577 + " movntq %%mm0, (%2)\n"
22578 + " movq 8(%1), %%mm1\n"
22579 + " movntq %%mm1, 8(%2)\n"
22580 + " movq 16(%1), %%mm2\n"
22581 + " movntq %%mm2, 16(%2)\n"
22582 + " movq 24(%1), %%mm3\n"
22583 + " movntq %%mm3, 24(%2)\n"
22584 + " movq 32(%1), %%mm4\n"
22585 + " movntq %%mm4, 32(%2)\n"
22586 + " movq 40(%1), %%mm5\n"
22587 + " movntq %%mm5, 40(%2)\n"
22588 + " movq 48(%1), %%mm6\n"
22589 + " movntq %%mm6, 48(%2)\n"
22590 + " movq 56(%1), %%mm7\n"
22591 + " movntq %%mm7, 56(%2)\n"
22592 ".section .fixup, \"ax\"\n"
22593 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22594 + "3:\n"
22595 +
22596 +#ifdef CONFIG_PAX_KERNEXEC
22597 + " movl %%cr0, %0\n"
22598 + " movl %0, %%eax\n"
22599 + " andl $0xFFFEFFFF, %%eax\n"
22600 + " movl %%eax, %%cr0\n"
22601 +#endif
22602 +
22603 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22604 +
22605 +#ifdef CONFIG_PAX_KERNEXEC
22606 + " movl %0, %%cr0\n"
22607 +#endif
22608 +
22609 " jmp 2b\n"
22610 ".previous\n"
22611 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22612 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22613
22614 from += 64;
22615 to += 64;
22616 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22617 static void fast_copy_page(void *to, void *from)
22618 {
22619 int i;
22620 + unsigned long cr0;
22621
22622 kernel_fpu_begin();
22623
22624 __asm__ __volatile__ (
22625 - "1: prefetch (%0)\n"
22626 - " prefetch 64(%0)\n"
22627 - " prefetch 128(%0)\n"
22628 - " prefetch 192(%0)\n"
22629 - " prefetch 256(%0)\n"
22630 + "1: prefetch (%1)\n"
22631 + " prefetch 64(%1)\n"
22632 + " prefetch 128(%1)\n"
22633 + " prefetch 192(%1)\n"
22634 + " prefetch 256(%1)\n"
22635 "2: \n"
22636 ".section .fixup, \"ax\"\n"
22637 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22638 + "3: \n"
22639 +
22640 +#ifdef CONFIG_PAX_KERNEXEC
22641 + " movl %%cr0, %0\n"
22642 + " movl %0, %%eax\n"
22643 + " andl $0xFFFEFFFF, %%eax\n"
22644 + " movl %%eax, %%cr0\n"
22645 +#endif
22646 +
22647 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22648 +
22649 +#ifdef CONFIG_PAX_KERNEXEC
22650 + " movl %0, %%cr0\n"
22651 +#endif
22652 +
22653 " jmp 2b\n"
22654 ".previous\n"
22655 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22656 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22657
22658 for (i = 0; i < 4096/64; i++) {
22659 __asm__ __volatile__ (
22660 - "1: prefetch 320(%0)\n"
22661 - "2: movq (%0), %%mm0\n"
22662 - " movq 8(%0), %%mm1\n"
22663 - " movq 16(%0), %%mm2\n"
22664 - " movq 24(%0), %%mm3\n"
22665 - " movq %%mm0, (%1)\n"
22666 - " movq %%mm1, 8(%1)\n"
22667 - " movq %%mm2, 16(%1)\n"
22668 - " movq %%mm3, 24(%1)\n"
22669 - " movq 32(%0), %%mm0\n"
22670 - " movq 40(%0), %%mm1\n"
22671 - " movq 48(%0), %%mm2\n"
22672 - " movq 56(%0), %%mm3\n"
22673 - " movq %%mm0, 32(%1)\n"
22674 - " movq %%mm1, 40(%1)\n"
22675 - " movq %%mm2, 48(%1)\n"
22676 - " movq %%mm3, 56(%1)\n"
22677 + "1: prefetch 320(%1)\n"
22678 + "2: movq (%1), %%mm0\n"
22679 + " movq 8(%1), %%mm1\n"
22680 + " movq 16(%1), %%mm2\n"
22681 + " movq 24(%1), %%mm3\n"
22682 + " movq %%mm0, (%2)\n"
22683 + " movq %%mm1, 8(%2)\n"
22684 + " movq %%mm2, 16(%2)\n"
22685 + " movq %%mm3, 24(%2)\n"
22686 + " movq 32(%1), %%mm0\n"
22687 + " movq 40(%1), %%mm1\n"
22688 + " movq 48(%1), %%mm2\n"
22689 + " movq 56(%1), %%mm3\n"
22690 + " movq %%mm0, 32(%2)\n"
22691 + " movq %%mm1, 40(%2)\n"
22692 + " movq %%mm2, 48(%2)\n"
22693 + " movq %%mm3, 56(%2)\n"
22694 ".section .fixup, \"ax\"\n"
22695 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22696 + "3:\n"
22697 +
22698 +#ifdef CONFIG_PAX_KERNEXEC
22699 + " movl %%cr0, %0\n"
22700 + " movl %0, %%eax\n"
22701 + " andl $0xFFFEFFFF, %%eax\n"
22702 + " movl %%eax, %%cr0\n"
22703 +#endif
22704 +
22705 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22706 +
22707 +#ifdef CONFIG_PAX_KERNEXEC
22708 + " movl %0, %%cr0\n"
22709 +#endif
22710 +
22711 " jmp 2b\n"
22712 ".previous\n"
22713 _ASM_EXTABLE(1b, 3b)
22714 - : : "r" (from), "r" (to) : "memory");
22715 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22716
22717 from += 64;
22718 to += 64;
22719 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22720 index 69fa106..adda88b 100644
22721 --- a/arch/x86/lib/msr-reg.S
22722 +++ b/arch/x86/lib/msr-reg.S
22723 @@ -3,6 +3,7 @@
22724 #include <asm/dwarf2.h>
22725 #include <asm/asm.h>
22726 #include <asm/msr.h>
22727 +#include <asm/alternative-asm.h>
22728
22729 #ifdef CONFIG_X86_64
22730 /*
22731 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22732 CFI_STARTPROC
22733 pushq_cfi %rbx
22734 pushq_cfi %rbp
22735 - movq %rdi, %r10 /* Save pointer */
22736 + movq %rdi, %r9 /* Save pointer */
22737 xorl %r11d, %r11d /* Return value */
22738 movl (%rdi), %eax
22739 movl 4(%rdi), %ecx
22740 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22741 movl 28(%rdi), %edi
22742 CFI_REMEMBER_STATE
22743 1: \op
22744 -2: movl %eax, (%r10)
22745 +2: movl %eax, (%r9)
22746 movl %r11d, %eax /* Return value */
22747 - movl %ecx, 4(%r10)
22748 - movl %edx, 8(%r10)
22749 - movl %ebx, 12(%r10)
22750 - movl %ebp, 20(%r10)
22751 - movl %esi, 24(%r10)
22752 - movl %edi, 28(%r10)
22753 + movl %ecx, 4(%r9)
22754 + movl %edx, 8(%r9)
22755 + movl %ebx, 12(%r9)
22756 + movl %ebp, 20(%r9)
22757 + movl %esi, 24(%r9)
22758 + movl %edi, 28(%r9)
22759 popq_cfi %rbp
22760 popq_cfi %rbx
22761 + pax_force_retaddr
22762 ret
22763 3:
22764 CFI_RESTORE_STATE
22765 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22766 index 36b0d15..d381858 100644
22767 --- a/arch/x86/lib/putuser.S
22768 +++ b/arch/x86/lib/putuser.S
22769 @@ -15,7 +15,9 @@
22770 #include <asm/thread_info.h>
22771 #include <asm/errno.h>
22772 #include <asm/asm.h>
22773 -
22774 +#include <asm/segment.h>
22775 +#include <asm/pgtable.h>
22776 +#include <asm/alternative-asm.h>
22777
22778 /*
22779 * __put_user_X
22780 @@ -29,52 +31,119 @@
22781 * as they get called from within inline assembly.
22782 */
22783
22784 -#define ENTER CFI_STARTPROC ; \
22785 - GET_THREAD_INFO(%_ASM_BX)
22786 -#define EXIT ret ; \
22787 +#define ENTER CFI_STARTPROC
22788 +#define EXIT pax_force_retaddr; ret ; \
22789 CFI_ENDPROC
22790
22791 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22792 +#define _DEST %_ASM_CX,%_ASM_BX
22793 +#else
22794 +#define _DEST %_ASM_CX
22795 +#endif
22796 +
22797 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22798 +#define __copyuser_seg gs;
22799 +#else
22800 +#define __copyuser_seg
22801 +#endif
22802 +
22803 .text
22804 ENTRY(__put_user_1)
22805 ENTER
22806 +
22807 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22808 + GET_THREAD_INFO(%_ASM_BX)
22809 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22810 jae bad_put_user
22811 -1: movb %al,(%_ASM_CX)
22812 +
22813 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22814 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22815 + cmp %_ASM_BX,%_ASM_CX
22816 + jb 1234f
22817 + xor %ebx,%ebx
22818 +1234:
22819 +#endif
22820 +
22821 +#endif
22822 +
22823 +1: __copyuser_seg movb %al,(_DEST)
22824 xor %eax,%eax
22825 EXIT
22826 ENDPROC(__put_user_1)
22827
22828 ENTRY(__put_user_2)
22829 ENTER
22830 +
22831 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22832 + GET_THREAD_INFO(%_ASM_BX)
22833 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22834 sub $1,%_ASM_BX
22835 cmp %_ASM_BX,%_ASM_CX
22836 jae bad_put_user
22837 -2: movw %ax,(%_ASM_CX)
22838 +
22839 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22840 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22841 + cmp %_ASM_BX,%_ASM_CX
22842 + jb 1234f
22843 + xor %ebx,%ebx
22844 +1234:
22845 +#endif
22846 +
22847 +#endif
22848 +
22849 +2: __copyuser_seg movw %ax,(_DEST)
22850 xor %eax,%eax
22851 EXIT
22852 ENDPROC(__put_user_2)
22853
22854 ENTRY(__put_user_4)
22855 ENTER
22856 +
22857 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22858 + GET_THREAD_INFO(%_ASM_BX)
22859 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22860 sub $3,%_ASM_BX
22861 cmp %_ASM_BX,%_ASM_CX
22862 jae bad_put_user
22863 -3: movl %eax,(%_ASM_CX)
22864 +
22865 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22866 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22867 + cmp %_ASM_BX,%_ASM_CX
22868 + jb 1234f
22869 + xor %ebx,%ebx
22870 +1234:
22871 +#endif
22872 +
22873 +#endif
22874 +
22875 +3: __copyuser_seg movl %eax,(_DEST)
22876 xor %eax,%eax
22877 EXIT
22878 ENDPROC(__put_user_4)
22879
22880 ENTRY(__put_user_8)
22881 ENTER
22882 +
22883 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22884 + GET_THREAD_INFO(%_ASM_BX)
22885 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22886 sub $7,%_ASM_BX
22887 cmp %_ASM_BX,%_ASM_CX
22888 jae bad_put_user
22889 -4: mov %_ASM_AX,(%_ASM_CX)
22890 +
22891 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22892 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22893 + cmp %_ASM_BX,%_ASM_CX
22894 + jb 1234f
22895 + xor %ebx,%ebx
22896 +1234:
22897 +#endif
22898 +
22899 +#endif
22900 +
22901 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22902 #ifdef CONFIG_X86_32
22903 -5: movl %edx,4(%_ASM_CX)
22904 +5: __copyuser_seg movl %edx,4(_DEST)
22905 #endif
22906 xor %eax,%eax
22907 EXIT
22908 diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
22909 index 05ea55f..6345b9a 100644
22910 --- a/arch/x86/lib/rwlock_64.S
22911 +++ b/arch/x86/lib/rwlock_64.S
22912 @@ -2,6 +2,7 @@
22913
22914 #include <linux/linkage.h>
22915 #include <asm/rwlock.h>
22916 +#include <asm/asm.h>
22917 #include <asm/alternative-asm.h>
22918 #include <asm/dwarf2.h>
22919
22920 @@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
22921 CFI_STARTPROC
22922 LOCK_PREFIX
22923 addl $RW_LOCK_BIAS,(%rdi)
22924 +
22925 +#ifdef CONFIG_PAX_REFCOUNT
22926 + jno 1234f
22927 + LOCK_PREFIX
22928 + subl $RW_LOCK_BIAS,(%rdi)
22929 + int $4
22930 +1234:
22931 + _ASM_EXTABLE(1234b, 1234b)
22932 +#endif
22933 +
22934 1: rep
22935 nop
22936 cmpl $RW_LOCK_BIAS,(%rdi)
22937 jne 1b
22938 LOCK_PREFIX
22939 subl $RW_LOCK_BIAS,(%rdi)
22940 +
22941 +#ifdef CONFIG_PAX_REFCOUNT
22942 + jno 1234f
22943 + LOCK_PREFIX
22944 + addl $RW_LOCK_BIAS,(%rdi)
22945 + int $4
22946 +1234:
22947 + _ASM_EXTABLE(1234b, 1234b)
22948 +#endif
22949 +
22950 jnz __write_lock_failed
22951 + pax_force_retaddr
22952 ret
22953 CFI_ENDPROC
22954 END(__write_lock_failed)
22955 @@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
22956 CFI_STARTPROC
22957 LOCK_PREFIX
22958 incl (%rdi)
22959 +
22960 +#ifdef CONFIG_PAX_REFCOUNT
22961 + jno 1234f
22962 + LOCK_PREFIX
22963 + decl (%rdi)
22964 + int $4
22965 +1234:
22966 + _ASM_EXTABLE(1234b, 1234b)
22967 +#endif
22968 +
22969 1: rep
22970 nop
22971 cmpl $1,(%rdi)
22972 js 1b
22973 LOCK_PREFIX
22974 decl (%rdi)
22975 +
22976 +#ifdef CONFIG_PAX_REFCOUNT
22977 + jno 1234f
22978 + LOCK_PREFIX
22979 + incl (%rdi)
22980 + int $4
22981 +1234:
22982 + _ASM_EXTABLE(1234b, 1234b)
22983 +#endif
22984 +
22985 js __read_lock_failed
22986 + pax_force_retaddr
22987 ret
22988 CFI_ENDPROC
22989 END(__read_lock_failed)
22990 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
22991 index 15acecf..f768b10 100644
22992 --- a/arch/x86/lib/rwsem_64.S
22993 +++ b/arch/x86/lib/rwsem_64.S
22994 @@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
22995 call rwsem_down_read_failed
22996 popq %rdx
22997 restore_common_regs
22998 + pax_force_retaddr
22999 ret
23000 ENDPROC(call_rwsem_down_read_failed)
23001
23002 @@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23003 movq %rax,%rdi
23004 call rwsem_down_write_failed
23005 restore_common_regs
23006 + pax_force_retaddr
23007 ret
23008 ENDPROC(call_rwsem_down_write_failed)
23009
23010 @@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23011 movq %rax,%rdi
23012 call rwsem_wake
23013 restore_common_regs
23014 -1: ret
23015 +1: pax_force_retaddr
23016 + ret
23017 ENDPROC(call_rwsem_wake)
23018
23019 /* Fix up special calling conventions */
23020 @@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23021 call rwsem_downgrade_wake
23022 popq %rdx
23023 restore_common_regs
23024 + pax_force_retaddr
23025 ret
23026 ENDPROC(call_rwsem_downgrade_wake)
23027 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23028 index bf9a7d5..fb06ab5 100644
23029 --- a/arch/x86/lib/thunk_64.S
23030 +++ b/arch/x86/lib/thunk_64.S
23031 @@ -10,7 +10,8 @@
23032 #include <asm/dwarf2.h>
23033 #include <asm/calling.h>
23034 #include <asm/rwlock.h>
23035 -
23036 + #include <asm/alternative-asm.h>
23037 +
23038 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23039 .macro thunk name,func
23040 .globl \name
23041 @@ -70,6 +71,7 @@
23042 SAVE_ARGS
23043 restore:
23044 RESTORE_ARGS
23045 + pax_force_retaddr
23046 ret
23047 CFI_ENDPROC
23048
23049 @@ -77,5 +79,6 @@ restore:
23050 SAVE_ARGS
23051 restore_norax:
23052 RESTORE_ARGS 1
23053 + pax_force_retaddr
23054 ret
23055 CFI_ENDPROC
23056 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23057 index 1f118d4..ec4a953 100644
23058 --- a/arch/x86/lib/usercopy_32.c
23059 +++ b/arch/x86/lib/usercopy_32.c
23060 @@ -43,7 +43,7 @@ do { \
23061 __asm__ __volatile__( \
23062 " testl %1,%1\n" \
23063 " jz 2f\n" \
23064 - "0: lodsb\n" \
23065 + "0: "__copyuser_seg"lodsb\n" \
23066 " stosb\n" \
23067 " testb %%al,%%al\n" \
23068 " jz 1f\n" \
23069 @@ -128,10 +128,12 @@ do { \
23070 int __d0; \
23071 might_fault(); \
23072 __asm__ __volatile__( \
23073 + __COPYUSER_SET_ES \
23074 "0: rep; stosl\n" \
23075 " movl %2,%0\n" \
23076 "1: rep; stosb\n" \
23077 "2:\n" \
23078 + __COPYUSER_RESTORE_ES \
23079 ".section .fixup,\"ax\"\n" \
23080 "3: lea 0(%2,%0,4),%0\n" \
23081 " jmp 2b\n" \
23082 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23083 might_fault();
23084
23085 __asm__ __volatile__(
23086 + __COPYUSER_SET_ES
23087 " testl %0, %0\n"
23088 " jz 3f\n"
23089 " andl %0,%%ecx\n"
23090 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23091 " subl %%ecx,%0\n"
23092 " addl %0,%%eax\n"
23093 "1:\n"
23094 + __COPYUSER_RESTORE_ES
23095 ".section .fixup,\"ax\"\n"
23096 "2: xorl %%eax,%%eax\n"
23097 " jmp 1b\n"
23098 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23099
23100 #ifdef CONFIG_X86_INTEL_USERCOPY
23101 static unsigned long
23102 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
23103 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23104 {
23105 int d0, d1;
23106 __asm__ __volatile__(
23107 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23108 " .align 2,0x90\n"
23109 "3: movl 0(%4), %%eax\n"
23110 "4: movl 4(%4), %%edx\n"
23111 - "5: movl %%eax, 0(%3)\n"
23112 - "6: movl %%edx, 4(%3)\n"
23113 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23114 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23115 "7: movl 8(%4), %%eax\n"
23116 "8: movl 12(%4),%%edx\n"
23117 - "9: movl %%eax, 8(%3)\n"
23118 - "10: movl %%edx, 12(%3)\n"
23119 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23120 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23121 "11: movl 16(%4), %%eax\n"
23122 "12: movl 20(%4), %%edx\n"
23123 - "13: movl %%eax, 16(%3)\n"
23124 - "14: movl %%edx, 20(%3)\n"
23125 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23126 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23127 "15: movl 24(%4), %%eax\n"
23128 "16: movl 28(%4), %%edx\n"
23129 - "17: movl %%eax, 24(%3)\n"
23130 - "18: movl %%edx, 28(%3)\n"
23131 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23132 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23133 "19: movl 32(%4), %%eax\n"
23134 "20: movl 36(%4), %%edx\n"
23135 - "21: movl %%eax, 32(%3)\n"
23136 - "22: movl %%edx, 36(%3)\n"
23137 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23138 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23139 "23: movl 40(%4), %%eax\n"
23140 "24: movl 44(%4), %%edx\n"
23141 - "25: movl %%eax, 40(%3)\n"
23142 - "26: movl %%edx, 44(%3)\n"
23143 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23144 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23145 "27: movl 48(%4), %%eax\n"
23146 "28: movl 52(%4), %%edx\n"
23147 - "29: movl %%eax, 48(%3)\n"
23148 - "30: movl %%edx, 52(%3)\n"
23149 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23150 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23151 "31: movl 56(%4), %%eax\n"
23152 "32: movl 60(%4), %%edx\n"
23153 - "33: movl %%eax, 56(%3)\n"
23154 - "34: movl %%edx, 60(%3)\n"
23155 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23156 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23157 " addl $-64, %0\n"
23158 " addl $64, %4\n"
23159 " addl $64, %3\n"
23160 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23161 " shrl $2, %0\n"
23162 " andl $3, %%eax\n"
23163 " cld\n"
23164 + __COPYUSER_SET_ES
23165 "99: rep; movsl\n"
23166 "36: movl %%eax, %0\n"
23167 "37: rep; movsb\n"
23168 "100:\n"
23169 + __COPYUSER_RESTORE_ES
23170 + ".section .fixup,\"ax\"\n"
23171 + "101: lea 0(%%eax,%0,4),%0\n"
23172 + " jmp 100b\n"
23173 + ".previous\n"
23174 + ".section __ex_table,\"a\"\n"
23175 + " .align 4\n"
23176 + " .long 1b,100b\n"
23177 + " .long 2b,100b\n"
23178 + " .long 3b,100b\n"
23179 + " .long 4b,100b\n"
23180 + " .long 5b,100b\n"
23181 + " .long 6b,100b\n"
23182 + " .long 7b,100b\n"
23183 + " .long 8b,100b\n"
23184 + " .long 9b,100b\n"
23185 + " .long 10b,100b\n"
23186 + " .long 11b,100b\n"
23187 + " .long 12b,100b\n"
23188 + " .long 13b,100b\n"
23189 + " .long 14b,100b\n"
23190 + " .long 15b,100b\n"
23191 + " .long 16b,100b\n"
23192 + " .long 17b,100b\n"
23193 + " .long 18b,100b\n"
23194 + " .long 19b,100b\n"
23195 + " .long 20b,100b\n"
23196 + " .long 21b,100b\n"
23197 + " .long 22b,100b\n"
23198 + " .long 23b,100b\n"
23199 + " .long 24b,100b\n"
23200 + " .long 25b,100b\n"
23201 + " .long 26b,100b\n"
23202 + " .long 27b,100b\n"
23203 + " .long 28b,100b\n"
23204 + " .long 29b,100b\n"
23205 + " .long 30b,100b\n"
23206 + " .long 31b,100b\n"
23207 + " .long 32b,100b\n"
23208 + " .long 33b,100b\n"
23209 + " .long 34b,100b\n"
23210 + " .long 35b,100b\n"
23211 + " .long 36b,100b\n"
23212 + " .long 37b,100b\n"
23213 + " .long 99b,101b\n"
23214 + ".previous"
23215 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23216 + : "1"(to), "2"(from), "0"(size)
23217 + : "eax", "edx", "memory");
23218 + return size;
23219 +}
23220 +
23221 +static unsigned long
23222 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23223 +{
23224 + int d0, d1;
23225 + __asm__ __volatile__(
23226 + " .align 2,0x90\n"
23227 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23228 + " cmpl $67, %0\n"
23229 + " jbe 3f\n"
23230 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23231 + " .align 2,0x90\n"
23232 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23233 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23234 + "5: movl %%eax, 0(%3)\n"
23235 + "6: movl %%edx, 4(%3)\n"
23236 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23237 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23238 + "9: movl %%eax, 8(%3)\n"
23239 + "10: movl %%edx, 12(%3)\n"
23240 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23241 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23242 + "13: movl %%eax, 16(%3)\n"
23243 + "14: movl %%edx, 20(%3)\n"
23244 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23245 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23246 + "17: movl %%eax, 24(%3)\n"
23247 + "18: movl %%edx, 28(%3)\n"
23248 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23249 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23250 + "21: movl %%eax, 32(%3)\n"
23251 + "22: movl %%edx, 36(%3)\n"
23252 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23253 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23254 + "25: movl %%eax, 40(%3)\n"
23255 + "26: movl %%edx, 44(%3)\n"
23256 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23257 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23258 + "29: movl %%eax, 48(%3)\n"
23259 + "30: movl %%edx, 52(%3)\n"
23260 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23261 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23262 + "33: movl %%eax, 56(%3)\n"
23263 + "34: movl %%edx, 60(%3)\n"
23264 + " addl $-64, %0\n"
23265 + " addl $64, %4\n"
23266 + " addl $64, %3\n"
23267 + " cmpl $63, %0\n"
23268 + " ja 1b\n"
23269 + "35: movl %0, %%eax\n"
23270 + " shrl $2, %0\n"
23271 + " andl $3, %%eax\n"
23272 + " cld\n"
23273 + "99: rep; "__copyuser_seg" movsl\n"
23274 + "36: movl %%eax, %0\n"
23275 + "37: rep; "__copyuser_seg" movsb\n"
23276 + "100:\n"
23277 ".section .fixup,\"ax\"\n"
23278 "101: lea 0(%%eax,%0,4),%0\n"
23279 " jmp 100b\n"
23280 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23281 int d0, d1;
23282 __asm__ __volatile__(
23283 " .align 2,0x90\n"
23284 - "0: movl 32(%4), %%eax\n"
23285 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23286 " cmpl $67, %0\n"
23287 " jbe 2f\n"
23288 - "1: movl 64(%4), %%eax\n"
23289 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23290 " .align 2,0x90\n"
23291 - "2: movl 0(%4), %%eax\n"
23292 - "21: movl 4(%4), %%edx\n"
23293 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23294 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23295 " movl %%eax, 0(%3)\n"
23296 " movl %%edx, 4(%3)\n"
23297 - "3: movl 8(%4), %%eax\n"
23298 - "31: movl 12(%4),%%edx\n"
23299 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23300 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23301 " movl %%eax, 8(%3)\n"
23302 " movl %%edx, 12(%3)\n"
23303 - "4: movl 16(%4), %%eax\n"
23304 - "41: movl 20(%4), %%edx\n"
23305 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23306 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23307 " movl %%eax, 16(%3)\n"
23308 " movl %%edx, 20(%3)\n"
23309 - "10: movl 24(%4), %%eax\n"
23310 - "51: movl 28(%4), %%edx\n"
23311 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23312 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23313 " movl %%eax, 24(%3)\n"
23314 " movl %%edx, 28(%3)\n"
23315 - "11: movl 32(%4), %%eax\n"
23316 - "61: movl 36(%4), %%edx\n"
23317 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23318 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23319 " movl %%eax, 32(%3)\n"
23320 " movl %%edx, 36(%3)\n"
23321 - "12: movl 40(%4), %%eax\n"
23322 - "71: movl 44(%4), %%edx\n"
23323 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23324 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23325 " movl %%eax, 40(%3)\n"
23326 " movl %%edx, 44(%3)\n"
23327 - "13: movl 48(%4), %%eax\n"
23328 - "81: movl 52(%4), %%edx\n"
23329 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23330 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23331 " movl %%eax, 48(%3)\n"
23332 " movl %%edx, 52(%3)\n"
23333 - "14: movl 56(%4), %%eax\n"
23334 - "91: movl 60(%4), %%edx\n"
23335 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23336 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23337 " movl %%eax, 56(%3)\n"
23338 " movl %%edx, 60(%3)\n"
23339 " addl $-64, %0\n"
23340 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23341 " shrl $2, %0\n"
23342 " andl $3, %%eax\n"
23343 " cld\n"
23344 - "6: rep; movsl\n"
23345 + "6: rep; "__copyuser_seg" movsl\n"
23346 " movl %%eax,%0\n"
23347 - "7: rep; movsb\n"
23348 + "7: rep; "__copyuser_seg" movsb\n"
23349 "8:\n"
23350 ".section .fixup,\"ax\"\n"
23351 "9: lea 0(%%eax,%0,4),%0\n"
23352 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23353
23354 __asm__ __volatile__(
23355 " .align 2,0x90\n"
23356 - "0: movl 32(%4), %%eax\n"
23357 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23358 " cmpl $67, %0\n"
23359 " jbe 2f\n"
23360 - "1: movl 64(%4), %%eax\n"
23361 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23362 " .align 2,0x90\n"
23363 - "2: movl 0(%4), %%eax\n"
23364 - "21: movl 4(%4), %%edx\n"
23365 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23366 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23367 " movnti %%eax, 0(%3)\n"
23368 " movnti %%edx, 4(%3)\n"
23369 - "3: movl 8(%4), %%eax\n"
23370 - "31: movl 12(%4),%%edx\n"
23371 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23372 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23373 " movnti %%eax, 8(%3)\n"
23374 " movnti %%edx, 12(%3)\n"
23375 - "4: movl 16(%4), %%eax\n"
23376 - "41: movl 20(%4), %%edx\n"
23377 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23378 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23379 " movnti %%eax, 16(%3)\n"
23380 " movnti %%edx, 20(%3)\n"
23381 - "10: movl 24(%4), %%eax\n"
23382 - "51: movl 28(%4), %%edx\n"
23383 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23384 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23385 " movnti %%eax, 24(%3)\n"
23386 " movnti %%edx, 28(%3)\n"
23387 - "11: movl 32(%4), %%eax\n"
23388 - "61: movl 36(%4), %%edx\n"
23389 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23390 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23391 " movnti %%eax, 32(%3)\n"
23392 " movnti %%edx, 36(%3)\n"
23393 - "12: movl 40(%4), %%eax\n"
23394 - "71: movl 44(%4), %%edx\n"
23395 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23396 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23397 " movnti %%eax, 40(%3)\n"
23398 " movnti %%edx, 44(%3)\n"
23399 - "13: movl 48(%4), %%eax\n"
23400 - "81: movl 52(%4), %%edx\n"
23401 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23402 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23403 " movnti %%eax, 48(%3)\n"
23404 " movnti %%edx, 52(%3)\n"
23405 - "14: movl 56(%4), %%eax\n"
23406 - "91: movl 60(%4), %%edx\n"
23407 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23408 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23409 " movnti %%eax, 56(%3)\n"
23410 " movnti %%edx, 60(%3)\n"
23411 " addl $-64, %0\n"
23412 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23413 " shrl $2, %0\n"
23414 " andl $3, %%eax\n"
23415 " cld\n"
23416 - "6: rep; movsl\n"
23417 + "6: rep; "__copyuser_seg" movsl\n"
23418 " movl %%eax,%0\n"
23419 - "7: rep; movsb\n"
23420 + "7: rep; "__copyuser_seg" movsb\n"
23421 "8:\n"
23422 ".section .fixup,\"ax\"\n"
23423 "9: lea 0(%%eax,%0,4),%0\n"
23424 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23425
23426 __asm__ __volatile__(
23427 " .align 2,0x90\n"
23428 - "0: movl 32(%4), %%eax\n"
23429 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23430 " cmpl $67, %0\n"
23431 " jbe 2f\n"
23432 - "1: movl 64(%4), %%eax\n"
23433 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23434 " .align 2,0x90\n"
23435 - "2: movl 0(%4), %%eax\n"
23436 - "21: movl 4(%4), %%edx\n"
23437 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23438 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23439 " movnti %%eax, 0(%3)\n"
23440 " movnti %%edx, 4(%3)\n"
23441 - "3: movl 8(%4), %%eax\n"
23442 - "31: movl 12(%4),%%edx\n"
23443 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23444 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23445 " movnti %%eax, 8(%3)\n"
23446 " movnti %%edx, 12(%3)\n"
23447 - "4: movl 16(%4), %%eax\n"
23448 - "41: movl 20(%4), %%edx\n"
23449 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23450 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23451 " movnti %%eax, 16(%3)\n"
23452 " movnti %%edx, 20(%3)\n"
23453 - "10: movl 24(%4), %%eax\n"
23454 - "51: movl 28(%4), %%edx\n"
23455 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23456 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23457 " movnti %%eax, 24(%3)\n"
23458 " movnti %%edx, 28(%3)\n"
23459 - "11: movl 32(%4), %%eax\n"
23460 - "61: movl 36(%4), %%edx\n"
23461 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23462 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23463 " movnti %%eax, 32(%3)\n"
23464 " movnti %%edx, 36(%3)\n"
23465 - "12: movl 40(%4), %%eax\n"
23466 - "71: movl 44(%4), %%edx\n"
23467 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23468 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23469 " movnti %%eax, 40(%3)\n"
23470 " movnti %%edx, 44(%3)\n"
23471 - "13: movl 48(%4), %%eax\n"
23472 - "81: movl 52(%4), %%edx\n"
23473 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23474 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23475 " movnti %%eax, 48(%3)\n"
23476 " movnti %%edx, 52(%3)\n"
23477 - "14: movl 56(%4), %%eax\n"
23478 - "91: movl 60(%4), %%edx\n"
23479 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23480 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23481 " movnti %%eax, 56(%3)\n"
23482 " movnti %%edx, 60(%3)\n"
23483 " addl $-64, %0\n"
23484 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23485 " shrl $2, %0\n"
23486 " andl $3, %%eax\n"
23487 " cld\n"
23488 - "6: rep; movsl\n"
23489 + "6: rep; "__copyuser_seg" movsl\n"
23490 " movl %%eax,%0\n"
23491 - "7: rep; movsb\n"
23492 + "7: rep; "__copyuser_seg" movsb\n"
23493 "8:\n"
23494 ".section .fixup,\"ax\"\n"
23495 "9: lea 0(%%eax,%0,4),%0\n"
23496 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23497 */
23498 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23499 unsigned long size);
23500 -unsigned long __copy_user_intel(void __user *to, const void *from,
23501 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23502 + unsigned long size);
23503 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23504 unsigned long size);
23505 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23506 const void __user *from, unsigned long size);
23507 #endif /* CONFIG_X86_INTEL_USERCOPY */
23508
23509 /* Generic arbitrary sized copy. */
23510 -#define __copy_user(to, from, size) \
23511 +#define __copy_user(to, from, size, prefix, set, restore) \
23512 do { \
23513 int __d0, __d1, __d2; \
23514 __asm__ __volatile__( \
23515 + set \
23516 " cmp $7,%0\n" \
23517 " jbe 1f\n" \
23518 " movl %1,%0\n" \
23519 " negl %0\n" \
23520 " andl $7,%0\n" \
23521 " subl %0,%3\n" \
23522 - "4: rep; movsb\n" \
23523 + "4: rep; "prefix"movsb\n" \
23524 " movl %3,%0\n" \
23525 " shrl $2,%0\n" \
23526 " andl $3,%3\n" \
23527 " .align 2,0x90\n" \
23528 - "0: rep; movsl\n" \
23529 + "0: rep; "prefix"movsl\n" \
23530 " movl %3,%0\n" \
23531 - "1: rep; movsb\n" \
23532 + "1: rep; "prefix"movsb\n" \
23533 "2:\n" \
23534 + restore \
23535 ".section .fixup,\"ax\"\n" \
23536 "5: addl %3,%0\n" \
23537 " jmp 2b\n" \
23538 @@ -682,14 +799,14 @@ do { \
23539 " negl %0\n" \
23540 " andl $7,%0\n" \
23541 " subl %0,%3\n" \
23542 - "4: rep; movsb\n" \
23543 + "4: rep; "__copyuser_seg"movsb\n" \
23544 " movl %3,%0\n" \
23545 " shrl $2,%0\n" \
23546 " andl $3,%3\n" \
23547 " .align 2,0x90\n" \
23548 - "0: rep; movsl\n" \
23549 + "0: rep; "__copyuser_seg"movsl\n" \
23550 " movl %3,%0\n" \
23551 - "1: rep; movsb\n" \
23552 + "1: rep; "__copyuser_seg"movsb\n" \
23553 "2:\n" \
23554 ".section .fixup,\"ax\"\n" \
23555 "5: addl %3,%0\n" \
23556 @@ -775,9 +892,9 @@ survive:
23557 }
23558 #endif
23559 if (movsl_is_ok(to, from, n))
23560 - __copy_user(to, from, n);
23561 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23562 else
23563 - n = __copy_user_intel(to, from, n);
23564 + n = __generic_copy_to_user_intel(to, from, n);
23565 return n;
23566 }
23567 EXPORT_SYMBOL(__copy_to_user_ll);
23568 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23569 unsigned long n)
23570 {
23571 if (movsl_is_ok(to, from, n))
23572 - __copy_user(to, from, n);
23573 + __copy_user(to, from, n, __copyuser_seg, "", "");
23574 else
23575 - n = __copy_user_intel((void __user *)to,
23576 - (const void *)from, n);
23577 + n = __generic_copy_from_user_intel(to, from, n);
23578 return n;
23579 }
23580 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23581 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23582 if (n > 64 && cpu_has_xmm2)
23583 n = __copy_user_intel_nocache(to, from, n);
23584 else
23585 - __copy_user(to, from, n);
23586 + __copy_user(to, from, n, __copyuser_seg, "", "");
23587 #else
23588 - __copy_user(to, from, n);
23589 + __copy_user(to, from, n, __copyuser_seg, "", "");
23590 #endif
23591 return n;
23592 }
23593 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23594
23595 -/**
23596 - * copy_to_user: - Copy a block of data into user space.
23597 - * @to: Destination address, in user space.
23598 - * @from: Source address, in kernel space.
23599 - * @n: Number of bytes to copy.
23600 - *
23601 - * Context: User context only. This function may sleep.
23602 - *
23603 - * Copy data from kernel space to user space.
23604 - *
23605 - * Returns number of bytes that could not be copied.
23606 - * On success, this will be zero.
23607 - */
23608 -unsigned long
23609 -copy_to_user(void __user *to, const void *from, unsigned long n)
23610 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23611 +void __set_fs(mm_segment_t x)
23612 {
23613 - if (access_ok(VERIFY_WRITE, to, n))
23614 - n = __copy_to_user(to, from, n);
23615 - return n;
23616 + switch (x.seg) {
23617 + case 0:
23618 + loadsegment(gs, 0);
23619 + break;
23620 + case TASK_SIZE_MAX:
23621 + loadsegment(gs, __USER_DS);
23622 + break;
23623 + case -1UL:
23624 + loadsegment(gs, __KERNEL_DS);
23625 + break;
23626 + default:
23627 + BUG();
23628 + }
23629 + return;
23630 }
23631 -EXPORT_SYMBOL(copy_to_user);
23632 +EXPORT_SYMBOL(__set_fs);
23633
23634 -/**
23635 - * copy_from_user: - Copy a block of data from user space.
23636 - * @to: Destination address, in kernel space.
23637 - * @from: Source address, in user space.
23638 - * @n: Number of bytes to copy.
23639 - *
23640 - * Context: User context only. This function may sleep.
23641 - *
23642 - * Copy data from user space to kernel space.
23643 - *
23644 - * Returns number of bytes that could not be copied.
23645 - * On success, this will be zero.
23646 - *
23647 - * If some data could not be copied, this function will pad the copied
23648 - * data to the requested size using zero bytes.
23649 - */
23650 -unsigned long
23651 -copy_from_user(void *to, const void __user *from, unsigned long n)
23652 +void set_fs(mm_segment_t x)
23653 {
23654 - if (access_ok(VERIFY_READ, from, n))
23655 - n = __copy_from_user(to, from, n);
23656 - else
23657 - memset(to, 0, n);
23658 - return n;
23659 + current_thread_info()->addr_limit = x;
23660 + __set_fs(x);
23661 }
23662 -EXPORT_SYMBOL(copy_from_user);
23663 +EXPORT_SYMBOL(set_fs);
23664 +#endif
23665 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23666 index b7c2849..8633ad8 100644
23667 --- a/arch/x86/lib/usercopy_64.c
23668 +++ b/arch/x86/lib/usercopy_64.c
23669 @@ -42,6 +42,12 @@ long
23670 __strncpy_from_user(char *dst, const char __user *src, long count)
23671 {
23672 long res;
23673 +
23674 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23675 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23676 + src += PAX_USER_SHADOW_BASE;
23677 +#endif
23678 +
23679 __do_strncpy_from_user(dst, src, count, res);
23680 return res;
23681 }
23682 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23683 {
23684 long __d0;
23685 might_fault();
23686 +
23687 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23688 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23689 + addr += PAX_USER_SHADOW_BASE;
23690 +#endif
23691 +
23692 /* no memory constraint because it doesn't change any memory gcc knows
23693 about */
23694 asm volatile(
23695 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23696 }
23697 EXPORT_SYMBOL(strlen_user);
23698
23699 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23700 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23701 {
23702 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23703 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23704 - }
23705 - return len;
23706 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23707 +
23708 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23709 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23710 + to += PAX_USER_SHADOW_BASE;
23711 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23712 + from += PAX_USER_SHADOW_BASE;
23713 +#endif
23714 +
23715 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23716 + }
23717 + return len;
23718 }
23719 EXPORT_SYMBOL(copy_in_user);
23720
23721 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23722 * it is not necessary to optimize tail handling.
23723 */
23724 unsigned long
23725 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23726 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23727 {
23728 char c;
23729 unsigned zero_len;
23730 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23731 index 61b41ca..5fef66a 100644
23732 --- a/arch/x86/mm/extable.c
23733 +++ b/arch/x86/mm/extable.c
23734 @@ -1,14 +1,71 @@
23735 #include <linux/module.h>
23736 #include <linux/spinlock.h>
23737 +#include <linux/sort.h>
23738 #include <asm/uaccess.h>
23739 +#include <asm/pgtable.h>
23740
23741 +/*
23742 + * The exception table needs to be sorted so that the binary
23743 + * search that we use to find entries in it works properly.
23744 + * This is used both for the kernel exception table and for
23745 + * the exception tables of modules that get loaded.
23746 + */
23747 +static int cmp_ex(const void *a, const void *b)
23748 +{
23749 + const struct exception_table_entry *x = a, *y = b;
23750 +
23751 + /* avoid overflow */
23752 + if (x->insn > y->insn)
23753 + return 1;
23754 + if (x->insn < y->insn)
23755 + return -1;
23756 + return 0;
23757 +}
23758 +
23759 +static void swap_ex(void *a, void *b, int size)
23760 +{
23761 + struct exception_table_entry t, *x = a, *y = b;
23762 +
23763 + t = *x;
23764 +
23765 + pax_open_kernel();
23766 + *x = *y;
23767 + *y = t;
23768 + pax_close_kernel();
23769 +}
23770 +
23771 +void sort_extable(struct exception_table_entry *start,
23772 + struct exception_table_entry *finish)
23773 +{
23774 + sort(start, finish - start, sizeof(struct exception_table_entry),
23775 + cmp_ex, swap_ex);
23776 +}
23777 +
23778 +#ifdef CONFIG_MODULES
23779 +/*
23780 + * If the exception table is sorted, any referring to the module init
23781 + * will be at the beginning or the end.
23782 + */
23783 +void trim_init_extable(struct module *m)
23784 +{
23785 + /*trim the beginning*/
23786 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
23787 + m->extable++;
23788 + m->num_exentries--;
23789 + }
23790 + /*trim the end*/
23791 + while (m->num_exentries &&
23792 + within_module_init(m->extable[m->num_exentries-1].insn, m))
23793 + m->num_exentries--;
23794 +}
23795 +#endif /* CONFIG_MODULES */
23796
23797 int fixup_exception(struct pt_regs *regs)
23798 {
23799 const struct exception_table_entry *fixup;
23800
23801 #ifdef CONFIG_PNPBIOS
23802 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23803 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23804 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23805 extern u32 pnp_bios_is_utter_crap;
23806 pnp_bios_is_utter_crap = 1;
23807 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23808 index 8ac0d76..87899a4 100644
23809 --- a/arch/x86/mm/fault.c
23810 +++ b/arch/x86/mm/fault.c
23811 @@ -11,10 +11,19 @@
23812 #include <linux/kprobes.h> /* __kprobes, ... */
23813 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
23814 #include <linux/perf_event.h> /* perf_sw_event */
23815 +#include <linux/unistd.h>
23816 +#include <linux/compiler.h>
23817
23818 #include <asm/traps.h> /* dotraplinkage, ... */
23819 #include <asm/pgalloc.h> /* pgd_*(), ... */
23820 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23821 +#include <asm/vsyscall.h>
23822 +#include <asm/tlbflush.h>
23823 +
23824 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23825 +#include <asm/stacktrace.h>
23826 +#include "../kernel/dumpstack.h"
23827 +#endif
23828
23829 /*
23830 * Page fault error code bits:
23831 @@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
23832 int ret = 0;
23833
23834 /* kprobe_running() needs smp_processor_id() */
23835 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23836 + if (kprobes_built_in() && !user_mode(regs)) {
23837 preempt_disable();
23838 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23839 ret = 1;
23840 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23841 return !instr_lo || (instr_lo>>1) == 1;
23842 case 0x00:
23843 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23844 - if (probe_kernel_address(instr, opcode))
23845 + if (user_mode(regs)) {
23846 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23847 + return 0;
23848 + } else if (probe_kernel_address(instr, opcode))
23849 return 0;
23850
23851 *prefetch = (instr_lo == 0xF) &&
23852 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23853 while (instr < max_instr) {
23854 unsigned char opcode;
23855
23856 - if (probe_kernel_address(instr, opcode))
23857 + if (user_mode(regs)) {
23858 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23859 + break;
23860 + } else if (probe_kernel_address(instr, opcode))
23861 break;
23862
23863 instr++;
23864 @@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23865 force_sig_info(si_signo, &info, tsk);
23866 }
23867
23868 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23869 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23870 +#endif
23871 +
23872 +#ifdef CONFIG_PAX_EMUTRAMP
23873 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23874 +#endif
23875 +
23876 +#ifdef CONFIG_PAX_PAGEEXEC
23877 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23878 +{
23879 + pgd_t *pgd;
23880 + pud_t *pud;
23881 + pmd_t *pmd;
23882 +
23883 + pgd = pgd_offset(mm, address);
23884 + if (!pgd_present(*pgd))
23885 + return NULL;
23886 + pud = pud_offset(pgd, address);
23887 + if (!pud_present(*pud))
23888 + return NULL;
23889 + pmd = pmd_offset(pud, address);
23890 + if (!pmd_present(*pmd))
23891 + return NULL;
23892 + return pmd;
23893 +}
23894 +#endif
23895 +
23896 DEFINE_SPINLOCK(pgd_lock);
23897 LIST_HEAD(pgd_list);
23898
23899 @@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
23900 address += PMD_SIZE) {
23901
23902 unsigned long flags;
23903 +
23904 +#ifdef CONFIG_PAX_PER_CPU_PGD
23905 + unsigned long cpu;
23906 +#else
23907 struct page *page;
23908 +#endif
23909
23910 spin_lock_irqsave(&pgd_lock, flags);
23911 +
23912 +#ifdef CONFIG_PAX_PER_CPU_PGD
23913 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23914 + pgd_t *pgd = get_cpu_pgd(cpu);
23915 +#else
23916 list_for_each_entry(page, &pgd_list, lru) {
23917 - if (!vmalloc_sync_one(page_address(page), address))
23918 + pgd_t *pgd = page_address(page);
23919 +#endif
23920 +
23921 + if (!vmalloc_sync_one(pgd, address))
23922 break;
23923 }
23924 spin_unlock_irqrestore(&pgd_lock, flags);
23925 @@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
23926 * an interrupt in the middle of a task switch..
23927 */
23928 pgd_paddr = read_cr3();
23929 +
23930 +#ifdef CONFIG_PAX_PER_CPU_PGD
23931 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23932 +#endif
23933 +
23934 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23935 if (!pmd_k)
23936 return -1;
23937 @@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
23938
23939 const pgd_t *pgd_ref = pgd_offset_k(address);
23940 unsigned long flags;
23941 +
23942 +#ifdef CONFIG_PAX_PER_CPU_PGD
23943 + unsigned long cpu;
23944 +#else
23945 struct page *page;
23946 +#endif
23947
23948 if (pgd_none(*pgd_ref))
23949 continue;
23950
23951 spin_lock_irqsave(&pgd_lock, flags);
23952 +
23953 +#ifdef CONFIG_PAX_PER_CPU_PGD
23954 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23955 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
23956 +#else
23957 list_for_each_entry(page, &pgd_list, lru) {
23958 pgd_t *pgd;
23959 pgd = (pgd_t *)page_address(page) + pgd_index(address);
23960 +#endif
23961 +
23962 if (pgd_none(*pgd))
23963 set_pgd(pgd, *pgd_ref);
23964 else
23965 @@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
23966 * happen within a race in page table update. In the later
23967 * case just flush:
23968 */
23969 +
23970 +#ifdef CONFIG_PAX_PER_CPU_PGD
23971 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23972 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23973 +#else
23974 pgd = pgd_offset(current->active_mm, address);
23975 +#endif
23976 +
23977 pgd_ref = pgd_offset_k(address);
23978 if (pgd_none(*pgd_ref))
23979 return -1;
23980 @@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23981 static int is_errata100(struct pt_regs *regs, unsigned long address)
23982 {
23983 #ifdef CONFIG_X86_64
23984 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23985 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23986 return 1;
23987 #endif
23988 return 0;
23989 @@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23990 }
23991
23992 static const char nx_warning[] = KERN_CRIT
23993 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23994 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23995
23996 static void
23997 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23998 @@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23999 if (!oops_may_print())
24000 return;
24001
24002 - if (error_code & PF_INSTR) {
24003 + if (nx_enabled && (error_code & PF_INSTR)) {
24004 unsigned int level;
24005
24006 pte_t *pte = lookup_address(address, &level);
24007
24008 if (pte && pte_present(*pte) && !pte_exec(*pte))
24009 - printk(nx_warning, current_uid());
24010 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24011 }
24012
24013 +#ifdef CONFIG_PAX_KERNEXEC
24014 + if (init_mm.start_code <= address && address < init_mm.end_code) {
24015 + if (current->signal->curr_ip)
24016 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24017 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24018 + else
24019 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24020 + current->comm, task_pid_nr(current), current_uid(), current_euid());
24021 + }
24022 +#endif
24023 +
24024 printk(KERN_ALERT "BUG: unable to handle kernel ");
24025 if (address < PAGE_SIZE)
24026 printk(KERN_CONT "NULL pointer dereference");
24027 @@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24028 {
24029 struct task_struct *tsk = current;
24030
24031 +#ifdef CONFIG_X86_64
24032 + struct mm_struct *mm = tsk->mm;
24033 +
24034 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24035 + if (regs->ip == (unsigned long)vgettimeofday) {
24036 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24037 + return;
24038 + } else if (regs->ip == (unsigned long)vtime) {
24039 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24040 + return;
24041 + } else if (regs->ip == (unsigned long)vgetcpu) {
24042 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24043 + return;
24044 + }
24045 + }
24046 +#endif
24047 +
24048 /* User mode accesses just cause a SIGSEGV */
24049 if (error_code & PF_USER) {
24050 /*
24051 @@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24052 if (is_errata100(regs, address))
24053 return;
24054
24055 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24056 + if (pax_is_fetch_fault(regs, error_code, address)) {
24057 +
24058 +#ifdef CONFIG_PAX_EMUTRAMP
24059 + switch (pax_handle_fetch_fault(regs)) {
24060 + case 2:
24061 + return;
24062 + }
24063 +#endif
24064 +
24065 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24066 + do_group_exit(SIGKILL);
24067 + }
24068 +#endif
24069 +
24070 if (unlikely(show_unhandled_signals))
24071 show_signal_msg(regs, error_code, address, tsk);
24072
24073 @@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24074 if (fault & VM_FAULT_HWPOISON) {
24075 printk(KERN_ERR
24076 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24077 - tsk->comm, tsk->pid, address);
24078 + tsk->comm, task_pid_nr(tsk), address);
24079 code = BUS_MCEERR_AR;
24080 }
24081 #endif
24082 @@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24083 return 1;
24084 }
24085
24086 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24087 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24088 +{
24089 + pte_t *pte;
24090 + pmd_t *pmd;
24091 + spinlock_t *ptl;
24092 + unsigned char pte_mask;
24093 +
24094 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24095 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
24096 + return 0;
24097 +
24098 + /* PaX: it's our fault, let's handle it if we can */
24099 +
24100 + /* PaX: take a look at read faults before acquiring any locks */
24101 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24102 + /* instruction fetch attempt from a protected page in user mode */
24103 + up_read(&mm->mmap_sem);
24104 +
24105 +#ifdef CONFIG_PAX_EMUTRAMP
24106 + switch (pax_handle_fetch_fault(regs)) {
24107 + case 2:
24108 + return 1;
24109 + }
24110 +#endif
24111 +
24112 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24113 + do_group_exit(SIGKILL);
24114 + }
24115 +
24116 + pmd = pax_get_pmd(mm, address);
24117 + if (unlikely(!pmd))
24118 + return 0;
24119 +
24120 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24121 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24122 + pte_unmap_unlock(pte, ptl);
24123 + return 0;
24124 + }
24125 +
24126 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24127 + /* write attempt to a protected page in user mode */
24128 + pte_unmap_unlock(pte, ptl);
24129 + return 0;
24130 + }
24131 +
24132 +#ifdef CONFIG_SMP
24133 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24134 +#else
24135 + if (likely(address > get_limit(regs->cs)))
24136 +#endif
24137 + {
24138 + set_pte(pte, pte_mkread(*pte));
24139 + __flush_tlb_one(address);
24140 + pte_unmap_unlock(pte, ptl);
24141 + up_read(&mm->mmap_sem);
24142 + return 1;
24143 + }
24144 +
24145 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24146 +
24147 + /*
24148 + * PaX: fill DTLB with user rights and retry
24149 + */
24150 + __asm__ __volatile__ (
24151 + "orb %2,(%1)\n"
24152 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24153 +/*
24154 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24155 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24156 + * page fault when examined during a TLB load attempt. this is true not only
24157 + * for PTEs holding a non-present entry but also present entries that will
24158 + * raise a page fault (such as those set up by PaX, or the copy-on-write
24159 + * mechanism). in effect it means that we do *not* need to flush the TLBs
24160 + * for our target pages since their PTEs are simply not in the TLBs at all.
24161 +
24162 + * the best thing in omitting it is that we gain around 15-20% speed in the
24163 + * fast path of the page fault handler and can get rid of tracing since we
24164 + * can no longer flush unintended entries.
24165 + */
24166 + "invlpg (%0)\n"
24167 +#endif
24168 + __copyuser_seg"testb $0,(%0)\n"
24169 + "xorb %3,(%1)\n"
24170 + :
24171 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24172 + : "memory", "cc");
24173 + pte_unmap_unlock(pte, ptl);
24174 + up_read(&mm->mmap_sem);
24175 + return 1;
24176 +}
24177 +#endif
24178 +
24179 /*
24180 * Handle a spurious fault caused by a stale TLB entry.
24181 *
24182 @@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24183 static inline int
24184 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24185 {
24186 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24187 + return 1;
24188 +
24189 if (write) {
24190 /* write, present and write, not present: */
24191 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24192 @@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24193 {
24194 struct vm_area_struct *vma;
24195 struct task_struct *tsk;
24196 - unsigned long address;
24197 struct mm_struct *mm;
24198 int write;
24199 int fault;
24200
24201 - tsk = current;
24202 - mm = tsk->mm;
24203 -
24204 /* Get the faulting address: */
24205 - address = read_cr2();
24206 + unsigned long address = read_cr2();
24207 +
24208 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24209 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24210 + if (!search_exception_tables(regs->ip)) {
24211 + bad_area_nosemaphore(regs, error_code, address);
24212 + return;
24213 + }
24214 + if (address < PAX_USER_SHADOW_BASE) {
24215 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24216 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24217 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24218 + } else
24219 + address -= PAX_USER_SHADOW_BASE;
24220 + }
24221 +#endif
24222 +
24223 + tsk = current;
24224 + mm = tsk->mm;
24225
24226 /*
24227 * Detect and handle instructions that would cause a page fault for
24228 @@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24229 * User-mode registers count as a user access even for any
24230 * potential system fault or CPU buglet:
24231 */
24232 - if (user_mode_vm(regs)) {
24233 + if (user_mode(regs)) {
24234 local_irq_enable();
24235 error_code |= PF_USER;
24236 } else {
24237 @@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24238 might_sleep();
24239 }
24240
24241 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24242 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24243 + return;
24244 +#endif
24245 +
24246 vma = find_vma(mm, address);
24247 if (unlikely(!vma)) {
24248 bad_area(regs, error_code, address);
24249 @@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24250 bad_area(regs, error_code, address);
24251 return;
24252 }
24253 - if (error_code & PF_USER) {
24254 - /*
24255 - * Accessing the stack below %sp is always a bug.
24256 - * The large cushion allows instructions like enter
24257 - * and pusha to work. ("enter $65535, $31" pushes
24258 - * 32 pointers and then decrements %sp by 65535.)
24259 - */
24260 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24261 - bad_area(regs, error_code, address);
24262 - return;
24263 - }
24264 + /*
24265 + * Accessing the stack below %sp is always a bug.
24266 + * The large cushion allows instructions like enter
24267 + * and pusha to work. ("enter $65535, $31" pushes
24268 + * 32 pointers and then decrements %sp by 65535.)
24269 + */
24270 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24271 + bad_area(regs, error_code, address);
24272 + return;
24273 }
24274 +
24275 +#ifdef CONFIG_PAX_SEGMEXEC
24276 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24277 + bad_area(regs, error_code, address);
24278 + return;
24279 + }
24280 +#endif
24281 +
24282 if (unlikely(expand_stack(vma, address))) {
24283 bad_area(regs, error_code, address);
24284 return;
24285 @@ -1146,3 +1390,292 @@ good_area:
24286
24287 up_read(&mm->mmap_sem);
24288 }
24289 +
24290 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24291 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24292 +{
24293 + struct mm_struct *mm = current->mm;
24294 + unsigned long ip = regs->ip;
24295 +
24296 + if (v8086_mode(regs))
24297 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24298 +
24299 +#ifdef CONFIG_PAX_PAGEEXEC
24300 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24301 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24302 + return true;
24303 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24304 + return true;
24305 + return false;
24306 + }
24307 +#endif
24308 +
24309 +#ifdef CONFIG_PAX_SEGMEXEC
24310 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24311 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24312 + return true;
24313 + return false;
24314 + }
24315 +#endif
24316 +
24317 + return false;
24318 +}
24319 +#endif
24320 +
24321 +#ifdef CONFIG_PAX_EMUTRAMP
24322 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24323 +{
24324 + int err;
24325 +
24326 + do { /* PaX: libffi trampoline emulation */
24327 + unsigned char mov, jmp;
24328 + unsigned int addr1, addr2;
24329 +
24330 +#ifdef CONFIG_X86_64
24331 + if ((regs->ip + 9) >> 32)
24332 + break;
24333 +#endif
24334 +
24335 + err = get_user(mov, (unsigned char __user *)regs->ip);
24336 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24337 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24338 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24339 +
24340 + if (err)
24341 + break;
24342 +
24343 + if (mov == 0xB8 && jmp == 0xE9) {
24344 + regs->ax = addr1;
24345 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24346 + return 2;
24347 + }
24348 + } while (0);
24349 +
24350 + do { /* PaX: gcc trampoline emulation #1 */
24351 + unsigned char mov1, mov2;
24352 + unsigned short jmp;
24353 + unsigned int addr1, addr2;
24354 +
24355 +#ifdef CONFIG_X86_64
24356 + if ((regs->ip + 11) >> 32)
24357 + break;
24358 +#endif
24359 +
24360 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24361 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24362 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24363 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24364 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24365 +
24366 + if (err)
24367 + break;
24368 +
24369 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24370 + regs->cx = addr1;
24371 + regs->ax = addr2;
24372 + regs->ip = addr2;
24373 + return 2;
24374 + }
24375 + } while (0);
24376 +
24377 + do { /* PaX: gcc trampoline emulation #2 */
24378 + unsigned char mov, jmp;
24379 + unsigned int addr1, addr2;
24380 +
24381 +#ifdef CONFIG_X86_64
24382 + if ((regs->ip + 9) >> 32)
24383 + break;
24384 +#endif
24385 +
24386 + err = get_user(mov, (unsigned char __user *)regs->ip);
24387 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24388 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24389 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24390 +
24391 + if (err)
24392 + break;
24393 +
24394 + if (mov == 0xB9 && jmp == 0xE9) {
24395 + regs->cx = addr1;
24396 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24397 + return 2;
24398 + }
24399 + } while (0);
24400 +
24401 + return 1; /* PaX in action */
24402 +}
24403 +
24404 +#ifdef CONFIG_X86_64
24405 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24406 +{
24407 + int err;
24408 +
24409 + do { /* PaX: libffi trampoline emulation */
24410 + unsigned short mov1, mov2, jmp1;
24411 + unsigned char stcclc, jmp2;
24412 + unsigned long addr1, addr2;
24413 +
24414 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24415 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24416 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24417 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24418 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24419 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24420 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24421 +
24422 + if (err)
24423 + break;
24424 +
24425 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24426 + regs->r11 = addr1;
24427 + regs->r10 = addr2;
24428 + if (stcclc == 0xF8)
24429 + regs->flags &= ~X86_EFLAGS_CF;
24430 + else
24431 + regs->flags |= X86_EFLAGS_CF;
24432 + regs->ip = addr1;
24433 + return 2;
24434 + }
24435 + } while (0);
24436 +
24437 + do { /* PaX: gcc trampoline emulation #1 */
24438 + unsigned short mov1, mov2, jmp1;
24439 + unsigned char jmp2;
24440 + unsigned int addr1;
24441 + unsigned long addr2;
24442 +
24443 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24444 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24445 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24446 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24447 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24448 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24449 +
24450 + if (err)
24451 + break;
24452 +
24453 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24454 + regs->r11 = addr1;
24455 + regs->r10 = addr2;
24456 + regs->ip = addr1;
24457 + return 2;
24458 + }
24459 + } while (0);
24460 +
24461 + do { /* PaX: gcc trampoline emulation #2 */
24462 + unsigned short mov1, mov2, jmp1;
24463 + unsigned char jmp2;
24464 + unsigned long addr1, addr2;
24465 +
24466 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24467 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24468 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24469 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24470 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24471 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24472 +
24473 + if (err)
24474 + break;
24475 +
24476 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24477 + regs->r11 = addr1;
24478 + regs->r10 = addr2;
24479 + regs->ip = addr1;
24480 + return 2;
24481 + }
24482 + } while (0);
24483 +
24484 + return 1; /* PaX in action */
24485 +}
24486 +#endif
24487 +
24488 +/*
24489 + * PaX: decide what to do with offenders (regs->ip = fault address)
24490 + *
24491 + * returns 1 when task should be killed
24492 + * 2 when gcc trampoline was detected
24493 + */
24494 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24495 +{
24496 + if (v8086_mode(regs))
24497 + return 1;
24498 +
24499 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24500 + return 1;
24501 +
24502 +#ifdef CONFIG_X86_32
24503 + return pax_handle_fetch_fault_32(regs);
24504 +#else
24505 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24506 + return pax_handle_fetch_fault_32(regs);
24507 + else
24508 + return pax_handle_fetch_fault_64(regs);
24509 +#endif
24510 +}
24511 +#endif
24512 +
24513 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24514 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24515 +{
24516 + long i;
24517 +
24518 + printk(KERN_ERR "PAX: bytes at PC: ");
24519 + for (i = 0; i < 20; i++) {
24520 + unsigned char c;
24521 + if (get_user(c, (unsigned char __force_user *)pc+i))
24522 + printk(KERN_CONT "?? ");
24523 + else
24524 + printk(KERN_CONT "%02x ", c);
24525 + }
24526 + printk("\n");
24527 +
24528 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24529 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24530 + unsigned long c;
24531 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24532 +#ifdef CONFIG_X86_32
24533 + printk(KERN_CONT "???????? ");
24534 +#else
24535 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24536 + printk(KERN_CONT "???????? ???????? ");
24537 + else
24538 + printk(KERN_CONT "???????????????? ");
24539 +#endif
24540 + } else {
24541 +#ifdef CONFIG_X86_64
24542 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24543 + printk(KERN_CONT "%08x ", (unsigned int)c);
24544 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24545 + } else
24546 +#endif
24547 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24548 + }
24549 + }
24550 + printk("\n");
24551 +}
24552 +#endif
24553 +
24554 +/**
24555 + * probe_kernel_write(): safely attempt to write to a location
24556 + * @dst: address to write to
24557 + * @src: pointer to the data that shall be written
24558 + * @size: size of the data chunk
24559 + *
24560 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24561 + * happens, handle that and return -EFAULT.
24562 + */
24563 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24564 +{
24565 + long ret;
24566 + mm_segment_t old_fs = get_fs();
24567 +
24568 + set_fs(KERNEL_DS);
24569 + pagefault_disable();
24570 + pax_open_kernel();
24571 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24572 + pax_close_kernel();
24573 + pagefault_enable();
24574 + set_fs(old_fs);
24575 +
24576 + return ret ? -EFAULT : 0;
24577 +}
24578 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24579 index 71da1bc..7a16bf4 100644
24580 --- a/arch/x86/mm/gup.c
24581 +++ b/arch/x86/mm/gup.c
24582 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24583 addr = start;
24584 len = (unsigned long) nr_pages << PAGE_SHIFT;
24585 end = start + len;
24586 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24587 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24588 (void __user *)start, len)))
24589 return 0;
24590
24591 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24592 index 63a6ba6..79abd7a 100644
24593 --- a/arch/x86/mm/highmem_32.c
24594 +++ b/arch/x86/mm/highmem_32.c
24595 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24596 idx = type + KM_TYPE_NR*smp_processor_id();
24597 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24598 BUG_ON(!pte_none(*(kmap_pte-idx)));
24599 +
24600 + pax_open_kernel();
24601 set_pte(kmap_pte-idx, mk_pte(page, prot));
24602 + pax_close_kernel();
24603
24604 return (void *)vaddr;
24605 }
24606 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24607 index f46c340..6ff9a26 100644
24608 --- a/arch/x86/mm/hugetlbpage.c
24609 +++ b/arch/x86/mm/hugetlbpage.c
24610 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24611 struct hstate *h = hstate_file(file);
24612 struct mm_struct *mm = current->mm;
24613 struct vm_area_struct *vma;
24614 - unsigned long start_addr;
24615 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24616 +
24617 +#ifdef CONFIG_PAX_SEGMEXEC
24618 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24619 + pax_task_size = SEGMEXEC_TASK_SIZE;
24620 +#endif
24621 +
24622 + pax_task_size -= PAGE_SIZE;
24623
24624 if (len > mm->cached_hole_size) {
24625 - start_addr = mm->free_area_cache;
24626 + start_addr = mm->free_area_cache;
24627 } else {
24628 - start_addr = TASK_UNMAPPED_BASE;
24629 - mm->cached_hole_size = 0;
24630 + start_addr = mm->mmap_base;
24631 + mm->cached_hole_size = 0;
24632 }
24633
24634 full_search:
24635 @@ -281,26 +288,27 @@ full_search:
24636
24637 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24638 /* At this point: (!vma || addr < vma->vm_end). */
24639 - if (TASK_SIZE - len < addr) {
24640 + if (pax_task_size - len < addr) {
24641 /*
24642 * Start a new search - just in case we missed
24643 * some holes.
24644 */
24645 - if (start_addr != TASK_UNMAPPED_BASE) {
24646 - start_addr = TASK_UNMAPPED_BASE;
24647 + if (start_addr != mm->mmap_base) {
24648 + start_addr = mm->mmap_base;
24649 mm->cached_hole_size = 0;
24650 goto full_search;
24651 }
24652 return -ENOMEM;
24653 }
24654 - if (!vma || addr + len <= vma->vm_start) {
24655 - mm->free_area_cache = addr + len;
24656 - return addr;
24657 - }
24658 + if (check_heap_stack_gap(vma, addr, len))
24659 + break;
24660 if (addr + mm->cached_hole_size < vma->vm_start)
24661 mm->cached_hole_size = vma->vm_start - addr;
24662 addr = ALIGN(vma->vm_end, huge_page_size(h));
24663 }
24664 +
24665 + mm->free_area_cache = addr + len;
24666 + return addr;
24667 }
24668
24669 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24670 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24671 {
24672 struct hstate *h = hstate_file(file);
24673 struct mm_struct *mm = current->mm;
24674 - struct vm_area_struct *vma, *prev_vma;
24675 - unsigned long base = mm->mmap_base, addr = addr0;
24676 + struct vm_area_struct *vma;
24677 + unsigned long base = mm->mmap_base, addr;
24678 unsigned long largest_hole = mm->cached_hole_size;
24679 - int first_time = 1;
24680
24681 /* don't allow allocations above current base */
24682 if (mm->free_area_cache > base)
24683 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24684 largest_hole = 0;
24685 mm->free_area_cache = base;
24686 }
24687 -try_again:
24688 +
24689 /* make sure it can fit in the remaining address space */
24690 if (mm->free_area_cache < len)
24691 goto fail;
24692
24693 /* either no address requested or cant fit in requested address hole */
24694 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24695 + addr = (mm->free_area_cache - len);
24696 do {
24697 + addr &= huge_page_mask(h);
24698 + vma = find_vma(mm, addr);
24699 /*
24700 * Lookup failure means no vma is above this address,
24701 * i.e. return with success:
24702 - */
24703 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24704 - return addr;
24705 -
24706 - /*
24707 * new region fits between prev_vma->vm_end and
24708 * vma->vm_start, use it:
24709 */
24710 - if (addr + len <= vma->vm_start &&
24711 - (!prev_vma || (addr >= prev_vma->vm_end))) {
24712 + if (check_heap_stack_gap(vma, addr, len)) {
24713 /* remember the address as a hint for next time */
24714 - mm->cached_hole_size = largest_hole;
24715 - return (mm->free_area_cache = addr);
24716 - } else {
24717 - /* pull free_area_cache down to the first hole */
24718 - if (mm->free_area_cache == vma->vm_end) {
24719 - mm->free_area_cache = vma->vm_start;
24720 - mm->cached_hole_size = largest_hole;
24721 - }
24722 + mm->cached_hole_size = largest_hole;
24723 + return (mm->free_area_cache = addr);
24724 + }
24725 + /* pull free_area_cache down to the first hole */
24726 + if (mm->free_area_cache == vma->vm_end) {
24727 + mm->free_area_cache = vma->vm_start;
24728 + mm->cached_hole_size = largest_hole;
24729 }
24730
24731 /* remember the largest hole we saw so far */
24732 if (addr + largest_hole < vma->vm_start)
24733 - largest_hole = vma->vm_start - addr;
24734 + largest_hole = vma->vm_start - addr;
24735
24736 /* try just below the current vma->vm_start */
24737 - addr = (vma->vm_start - len) & huge_page_mask(h);
24738 - } while (len <= vma->vm_start);
24739 + addr = skip_heap_stack_gap(vma, len);
24740 + } while (!IS_ERR_VALUE(addr));
24741
24742 fail:
24743 /*
24744 - * if hint left us with no space for the requested
24745 - * mapping then try again:
24746 - */
24747 - if (first_time) {
24748 - mm->free_area_cache = base;
24749 - largest_hole = 0;
24750 - first_time = 0;
24751 - goto try_again;
24752 - }
24753 - /*
24754 * A failed mmap() very likely causes application failure,
24755 * so fall back to the bottom-up function here. This scenario
24756 * can happen with large stack limits and large mmap()
24757 * allocations.
24758 */
24759 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24760 +
24761 +#ifdef CONFIG_PAX_SEGMEXEC
24762 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24763 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24764 + else
24765 +#endif
24766 +
24767 + mm->mmap_base = TASK_UNMAPPED_BASE;
24768 +
24769 +#ifdef CONFIG_PAX_RANDMMAP
24770 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24771 + mm->mmap_base += mm->delta_mmap;
24772 +#endif
24773 +
24774 + mm->free_area_cache = mm->mmap_base;
24775 mm->cached_hole_size = ~0UL;
24776 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24777 len, pgoff, flags);
24778 @@ -387,6 +393,7 @@ fail:
24779 /*
24780 * Restore the topdown base:
24781 */
24782 + mm->mmap_base = base;
24783 mm->free_area_cache = base;
24784 mm->cached_hole_size = ~0UL;
24785
24786 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24787 struct hstate *h = hstate_file(file);
24788 struct mm_struct *mm = current->mm;
24789 struct vm_area_struct *vma;
24790 + unsigned long pax_task_size = TASK_SIZE;
24791
24792 if (len & ~huge_page_mask(h))
24793 return -EINVAL;
24794 - if (len > TASK_SIZE)
24795 +
24796 +#ifdef CONFIG_PAX_SEGMEXEC
24797 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24798 + pax_task_size = SEGMEXEC_TASK_SIZE;
24799 +#endif
24800 +
24801 + pax_task_size -= PAGE_SIZE;
24802 +
24803 + if (len > pax_task_size)
24804 return -ENOMEM;
24805
24806 if (flags & MAP_FIXED) {
24807 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24808 if (addr) {
24809 addr = ALIGN(addr, huge_page_size(h));
24810 vma = find_vma(mm, addr);
24811 - if (TASK_SIZE - len >= addr &&
24812 - (!vma || addr + len <= vma->vm_start))
24813 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24814 return addr;
24815 }
24816 if (mm->get_unmapped_area == arch_get_unmapped_area)
24817 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24818 index 73ffd55..ad78676 100644
24819 --- a/arch/x86/mm/init.c
24820 +++ b/arch/x86/mm/init.c
24821 @@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
24822 * cause a hotspot and fill up ZONE_DMA. The page tables
24823 * need roughly 0.5KB per GB.
24824 */
24825 -#ifdef CONFIG_X86_32
24826 - start = 0x7000;
24827 -#else
24828 - start = 0x8000;
24829 -#endif
24830 + start = 0x100000;
24831 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
24832 tables, PAGE_SIZE);
24833 if (e820_table_start == -1UL)
24834 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24835 #endif
24836
24837 set_nx();
24838 - if (nx_enabled)
24839 + if (nx_enabled && cpu_has_nx)
24840 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
24841
24842 /* Enable PSE if available */
24843 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24844 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24845 * mmio resources as well as potential bios/acpi data regions.
24846 */
24847 +
24848 int devmem_is_allowed(unsigned long pagenr)
24849 {
24850 +#ifdef CONFIG_GRKERNSEC_KMEM
24851 + /* allow BDA */
24852 + if (!pagenr)
24853 + return 1;
24854 + /* allow EBDA */
24855 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24856 + return 1;
24857 + /* allow ISA/video mem */
24858 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24859 + return 1;
24860 + /* throw out everything else below 1MB */
24861 + if (pagenr <= 256)
24862 + return 0;
24863 +#else
24864 if (pagenr <= 256)
24865 return 1;
24866 +#endif
24867 +
24868 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24869 return 0;
24870 if (!page_is_ram(pagenr))
24871 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24872
24873 void free_initmem(void)
24874 {
24875 +
24876 +#ifdef CONFIG_PAX_KERNEXEC
24877 +#ifdef CONFIG_X86_32
24878 + /* PaX: limit KERNEL_CS to actual size */
24879 + unsigned long addr, limit;
24880 + struct desc_struct d;
24881 + int cpu;
24882 +
24883 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24884 + limit = (limit - 1UL) >> PAGE_SHIFT;
24885 +
24886 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24887 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
24888 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24889 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24890 + }
24891 +
24892 + /* PaX: make KERNEL_CS read-only */
24893 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24894 + if (!paravirt_enabled())
24895 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24896 +/*
24897 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24898 + pgd = pgd_offset_k(addr);
24899 + pud = pud_offset(pgd, addr);
24900 + pmd = pmd_offset(pud, addr);
24901 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24902 + }
24903 +*/
24904 +#ifdef CONFIG_X86_PAE
24905 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24906 +/*
24907 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24908 + pgd = pgd_offset_k(addr);
24909 + pud = pud_offset(pgd, addr);
24910 + pmd = pmd_offset(pud, addr);
24911 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24912 + }
24913 +*/
24914 +#endif
24915 +
24916 +#ifdef CONFIG_MODULES
24917 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24918 +#endif
24919 +
24920 +#else
24921 + pgd_t *pgd;
24922 + pud_t *pud;
24923 + pmd_t *pmd;
24924 + unsigned long addr, end;
24925 +
24926 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24927 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24928 + pgd = pgd_offset_k(addr);
24929 + pud = pud_offset(pgd, addr);
24930 + pmd = pmd_offset(pud, addr);
24931 + if (!pmd_present(*pmd))
24932 + continue;
24933 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24934 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24935 + else
24936 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24937 + }
24938 +
24939 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24940 + end = addr + KERNEL_IMAGE_SIZE;
24941 + for (; addr < end; addr += PMD_SIZE) {
24942 + pgd = pgd_offset_k(addr);
24943 + pud = pud_offset(pgd, addr);
24944 + pmd = pmd_offset(pud, addr);
24945 + if (!pmd_present(*pmd))
24946 + continue;
24947 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24948 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24949 + }
24950 +#endif
24951 +
24952 + flush_tlb_all();
24953 +#endif
24954 +
24955 free_init_pages("unused kernel memory",
24956 (unsigned long)(&__init_begin),
24957 (unsigned long)(&__init_end));
24958 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24959 index 30938c1..bda3d5d 100644
24960 --- a/arch/x86/mm/init_32.c
24961 +++ b/arch/x86/mm/init_32.c
24962 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
24963 }
24964
24965 /*
24966 - * Creates a middle page table and puts a pointer to it in the
24967 - * given global directory entry. This only returns the gd entry
24968 - * in non-PAE compilation mode, since the middle layer is folded.
24969 - */
24970 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24971 -{
24972 - pud_t *pud;
24973 - pmd_t *pmd_table;
24974 -
24975 -#ifdef CONFIG_X86_PAE
24976 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24977 - if (after_bootmem)
24978 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24979 - else
24980 - pmd_table = (pmd_t *)alloc_low_page();
24981 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24982 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24983 - pud = pud_offset(pgd, 0);
24984 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24985 -
24986 - return pmd_table;
24987 - }
24988 -#endif
24989 - pud = pud_offset(pgd, 0);
24990 - pmd_table = pmd_offset(pud, 0);
24991 -
24992 - return pmd_table;
24993 -}
24994 -
24995 -/*
24996 * Create a page table and place a pointer to it in a middle page
24997 * directory entry:
24998 */
24999 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25000 page_table = (pte_t *)alloc_low_page();
25001
25002 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25003 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25004 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25005 +#else
25006 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25007 +#endif
25008 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25009 }
25010
25011 return pte_offset_kernel(pmd, 0);
25012 }
25013
25014 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
25015 +{
25016 + pud_t *pud;
25017 + pmd_t *pmd_table;
25018 +
25019 + pud = pud_offset(pgd, 0);
25020 + pmd_table = pmd_offset(pud, 0);
25021 +
25022 + return pmd_table;
25023 +}
25024 +
25025 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25026 {
25027 int pgd_idx = pgd_index(vaddr);
25028 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25029 int pgd_idx, pmd_idx;
25030 unsigned long vaddr;
25031 pgd_t *pgd;
25032 + pud_t *pud;
25033 pmd_t *pmd;
25034 pte_t *pte = NULL;
25035
25036 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25037 pgd = pgd_base + pgd_idx;
25038
25039 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25040 - pmd = one_md_table_init(pgd);
25041 - pmd = pmd + pmd_index(vaddr);
25042 + pud = pud_offset(pgd, vaddr);
25043 + pmd = pmd_offset(pud, vaddr);
25044 +
25045 +#ifdef CONFIG_X86_PAE
25046 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25047 +#endif
25048 +
25049 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25050 pmd++, pmd_idx++) {
25051 pte = page_table_kmap_check(one_page_table_init(pmd),
25052 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25053 }
25054 }
25055
25056 -static inline int is_kernel_text(unsigned long addr)
25057 +static inline int is_kernel_text(unsigned long start, unsigned long end)
25058 {
25059 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25060 - return 1;
25061 - return 0;
25062 + if ((start > ktla_ktva((unsigned long)_etext) ||
25063 + end <= ktla_ktva((unsigned long)_stext)) &&
25064 + (start > ktla_ktva((unsigned long)_einittext) ||
25065 + end <= ktla_ktva((unsigned long)_sinittext)) &&
25066 +
25067 +#ifdef CONFIG_ACPI_SLEEP
25068 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25069 +#endif
25070 +
25071 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25072 + return 0;
25073 + return 1;
25074 }
25075
25076 /*
25077 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25078 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25079 unsigned long start_pfn, end_pfn;
25080 pgd_t *pgd_base = swapper_pg_dir;
25081 - int pgd_idx, pmd_idx, pte_ofs;
25082 + unsigned int pgd_idx, pmd_idx, pte_ofs;
25083 unsigned long pfn;
25084 pgd_t *pgd;
25085 + pud_t *pud;
25086 pmd_t *pmd;
25087 pte_t *pte;
25088 unsigned pages_2m, pages_4k;
25089 @@ -278,8 +279,13 @@ repeat:
25090 pfn = start_pfn;
25091 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25092 pgd = pgd_base + pgd_idx;
25093 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25094 - pmd = one_md_table_init(pgd);
25095 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25096 + pud = pud_offset(pgd, 0);
25097 + pmd = pmd_offset(pud, 0);
25098 +
25099 +#ifdef CONFIG_X86_PAE
25100 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25101 +#endif
25102
25103 if (pfn >= end_pfn)
25104 continue;
25105 @@ -291,14 +297,13 @@ repeat:
25106 #endif
25107 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25108 pmd++, pmd_idx++) {
25109 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25110 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25111
25112 /*
25113 * Map with big pages if possible, otherwise
25114 * create normal page tables:
25115 */
25116 if (use_pse) {
25117 - unsigned int addr2;
25118 pgprot_t prot = PAGE_KERNEL_LARGE;
25119 /*
25120 * first pass will use the same initial
25121 @@ -308,11 +313,7 @@ repeat:
25122 __pgprot(PTE_IDENT_ATTR |
25123 _PAGE_PSE);
25124
25125 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25126 - PAGE_OFFSET + PAGE_SIZE-1;
25127 -
25128 - if (is_kernel_text(addr) ||
25129 - is_kernel_text(addr2))
25130 + if (is_kernel_text(address, address + PMD_SIZE))
25131 prot = PAGE_KERNEL_LARGE_EXEC;
25132
25133 pages_2m++;
25134 @@ -329,7 +330,7 @@ repeat:
25135 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25136 pte += pte_ofs;
25137 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25138 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25139 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25140 pgprot_t prot = PAGE_KERNEL;
25141 /*
25142 * first pass will use the same initial
25143 @@ -337,7 +338,7 @@ repeat:
25144 */
25145 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25146
25147 - if (is_kernel_text(addr))
25148 + if (is_kernel_text(address, address + PAGE_SIZE))
25149 prot = PAGE_KERNEL_EXEC;
25150
25151 pages_4k++;
25152 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25153
25154 pud = pud_offset(pgd, va);
25155 pmd = pmd_offset(pud, va);
25156 - if (!pmd_present(*pmd))
25157 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
25158 break;
25159
25160 pte = pte_offset_kernel(pmd, va);
25161 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25162
25163 static void __init pagetable_init(void)
25164 {
25165 - pgd_t *pgd_base = swapper_pg_dir;
25166 -
25167 - permanent_kmaps_init(pgd_base);
25168 + permanent_kmaps_init(swapper_pg_dir);
25169 }
25170
25171 #ifdef CONFIG_ACPI_SLEEP
25172 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25173 * ACPI suspend needs this for resume, because things like the intel-agp
25174 * driver might have split up a kernel 4MB mapping.
25175 */
25176 -char swsusp_pg_dir[PAGE_SIZE]
25177 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25178 __attribute__ ((aligned(PAGE_SIZE)));
25179
25180 static inline void save_pg_dir(void)
25181 {
25182 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25183 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25184 }
25185 #else /* !CONFIG_ACPI_SLEEP */
25186 static inline void save_pg_dir(void)
25187 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25188 flush_tlb_all();
25189 }
25190
25191 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25192 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25193 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25194
25195 /* user-defined highmem size */
25196 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25197 * Initialize the boot-time allocator (with low memory only):
25198 */
25199 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25200 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25201 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25202 PAGE_SIZE);
25203 if (bootmap == -1L)
25204 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25205 @@ -864,6 +863,12 @@ void __init mem_init(void)
25206
25207 pci_iommu_alloc();
25208
25209 +#ifdef CONFIG_PAX_PER_CPU_PGD
25210 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25211 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25212 + KERNEL_PGD_PTRS);
25213 +#endif
25214 +
25215 #ifdef CONFIG_FLATMEM
25216 BUG_ON(!mem_map);
25217 #endif
25218 @@ -881,7 +886,7 @@ void __init mem_init(void)
25219 set_highmem_pages_init();
25220
25221 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25222 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25223 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25224 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25225
25226 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25227 @@ -923,10 +928,10 @@ void __init mem_init(void)
25228 ((unsigned long)&__init_end -
25229 (unsigned long)&__init_begin) >> 10,
25230
25231 - (unsigned long)&_etext, (unsigned long)&_edata,
25232 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25233 + (unsigned long)&_sdata, (unsigned long)&_edata,
25234 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25235
25236 - (unsigned long)&_text, (unsigned long)&_etext,
25237 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25238 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25239
25240 /*
25241 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25242 if (!kernel_set_to_readonly)
25243 return;
25244
25245 + start = ktla_ktva(start);
25246 pr_debug("Set kernel text: %lx - %lx for read write\n",
25247 start, start+size);
25248
25249 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25250 if (!kernel_set_to_readonly)
25251 return;
25252
25253 + start = ktla_ktva(start);
25254 pr_debug("Set kernel text: %lx - %lx for read only\n",
25255 start, start+size);
25256
25257 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25258 unsigned long start = PFN_ALIGN(_text);
25259 unsigned long size = PFN_ALIGN(_etext) - start;
25260
25261 + start = ktla_ktva(start);
25262 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25263 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25264 size >> 10);
25265 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25266 index 7d095ad..25d2549 100644
25267 --- a/arch/x86/mm/init_64.c
25268 +++ b/arch/x86/mm/init_64.c
25269 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25270 pmd = fill_pmd(pud, vaddr);
25271 pte = fill_pte(pmd, vaddr);
25272
25273 + pax_open_kernel();
25274 set_pte(pte, new_pte);
25275 + pax_close_kernel();
25276
25277 /*
25278 * It's enough to flush this one mapping.
25279 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25280 pgd = pgd_offset_k((unsigned long)__va(phys));
25281 if (pgd_none(*pgd)) {
25282 pud = (pud_t *) spp_getpage();
25283 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25284 - _PAGE_USER));
25285 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25286 }
25287 pud = pud_offset(pgd, (unsigned long)__va(phys));
25288 if (pud_none(*pud)) {
25289 pmd = (pmd_t *) spp_getpage();
25290 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25291 - _PAGE_USER));
25292 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25293 }
25294 pmd = pmd_offset(pud, phys);
25295 BUG_ON(!pmd_none(*pmd));
25296 @@ -675,6 +675,12 @@ void __init mem_init(void)
25297
25298 pci_iommu_alloc();
25299
25300 +#ifdef CONFIG_PAX_PER_CPU_PGD
25301 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25302 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25303 + KERNEL_PGD_PTRS);
25304 +#endif
25305 +
25306 /* clear_bss() already clear the empty_zero_page */
25307
25308 reservedpages = 0;
25309 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25310 static struct vm_area_struct gate_vma = {
25311 .vm_start = VSYSCALL_START,
25312 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25313 - .vm_page_prot = PAGE_READONLY_EXEC,
25314 - .vm_flags = VM_READ | VM_EXEC
25315 + .vm_page_prot = PAGE_READONLY,
25316 + .vm_flags = VM_READ
25317 };
25318
25319 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25320 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25321
25322 const char *arch_vma_name(struct vm_area_struct *vma)
25323 {
25324 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25325 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25326 return "[vdso]";
25327 if (vma == &gate_vma)
25328 return "[vsyscall]";
25329 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25330 index 84e236c..69bd3f6 100644
25331 --- a/arch/x86/mm/iomap_32.c
25332 +++ b/arch/x86/mm/iomap_32.c
25333 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25334 debug_kmap_atomic(type);
25335 idx = type + KM_TYPE_NR * smp_processor_id();
25336 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25337 +
25338 + pax_open_kernel();
25339 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25340 + pax_close_kernel();
25341 +
25342 arch_flush_lazy_mmu_mode();
25343
25344 return (void *)vaddr;
25345 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25346 index 2feb9bd..3646202 100644
25347 --- a/arch/x86/mm/ioremap.c
25348 +++ b/arch/x86/mm/ioremap.c
25349 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25350 * Second special case: Some BIOSen report the PC BIOS
25351 * area (640->1Mb) as ram even though it is not.
25352 */
25353 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25354 - pagenr < (BIOS_END >> PAGE_SHIFT))
25355 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25356 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25357 return 0;
25358
25359 for (i = 0; i < e820.nr_map; i++) {
25360 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25361 /*
25362 * Don't allow anybody to remap normal RAM that we're using..
25363 */
25364 - for (pfn = phys_addr >> PAGE_SHIFT;
25365 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25366 - pfn++) {
25367 -
25368 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25369 int is_ram = page_is_ram(pfn);
25370
25371 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25372 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25373 return NULL;
25374 WARN_ON_ONCE(is_ram);
25375 }
25376 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_setup(char *str)
25377 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25378
25379 static __initdata int after_paging_init;
25380 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25381 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25382
25383 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25384 {
25385 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
25386 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25387
25388 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25389 - memset(bm_pte, 0, sizeof(bm_pte));
25390 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25391 + pmd_populate_user(&init_mm, pmd, bm_pte);
25392
25393 /*
25394 * The boot-ioremap range spans multiple pmds, for which
25395 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25396 index 8cc1833..1abbc5b 100644
25397 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25398 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25399 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25400 * memory (e.g. tracked pages)? For now, we need this to avoid
25401 * invoking kmemcheck for PnP BIOS calls.
25402 */
25403 - if (regs->flags & X86_VM_MASK)
25404 + if (v8086_mode(regs))
25405 return false;
25406 - if (regs->cs != __KERNEL_CS)
25407 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25408 return false;
25409
25410 pte = kmemcheck_pte_lookup(address);
25411 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25412 index c9e57af..07a321b 100644
25413 --- a/arch/x86/mm/mmap.c
25414 +++ b/arch/x86/mm/mmap.c
25415 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25416 * Leave an at least ~128 MB hole with possible stack randomization.
25417 */
25418 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25419 -#define MAX_GAP (TASK_SIZE/6*5)
25420 +#define MAX_GAP (pax_task_size/6*5)
25421
25422 /*
25423 * True on X86_32 or when emulating IA32 on X86_64
25424 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25425 return rnd << PAGE_SHIFT;
25426 }
25427
25428 -static unsigned long mmap_base(void)
25429 +static unsigned long mmap_base(struct mm_struct *mm)
25430 {
25431 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25432 + unsigned long pax_task_size = TASK_SIZE;
25433 +
25434 +#ifdef CONFIG_PAX_SEGMEXEC
25435 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25436 + pax_task_size = SEGMEXEC_TASK_SIZE;
25437 +#endif
25438
25439 if (gap < MIN_GAP)
25440 gap = MIN_GAP;
25441 else if (gap > MAX_GAP)
25442 gap = MAX_GAP;
25443
25444 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25445 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25446 }
25447
25448 /*
25449 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25450 * does, but not when emulating X86_32
25451 */
25452 -static unsigned long mmap_legacy_base(void)
25453 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25454 {
25455 - if (mmap_is_ia32())
25456 + if (mmap_is_ia32()) {
25457 +
25458 +#ifdef CONFIG_PAX_SEGMEXEC
25459 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25460 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25461 + else
25462 +#endif
25463 +
25464 return TASK_UNMAPPED_BASE;
25465 - else
25466 + } else
25467 return TASK_UNMAPPED_BASE + mmap_rnd();
25468 }
25469
25470 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25471 void arch_pick_mmap_layout(struct mm_struct *mm)
25472 {
25473 if (mmap_is_legacy()) {
25474 - mm->mmap_base = mmap_legacy_base();
25475 + mm->mmap_base = mmap_legacy_base(mm);
25476 +
25477 +#ifdef CONFIG_PAX_RANDMMAP
25478 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25479 + mm->mmap_base += mm->delta_mmap;
25480 +#endif
25481 +
25482 mm->get_unmapped_area = arch_get_unmapped_area;
25483 mm->unmap_area = arch_unmap_area;
25484 } else {
25485 - mm->mmap_base = mmap_base();
25486 + mm->mmap_base = mmap_base(mm);
25487 +
25488 +#ifdef CONFIG_PAX_RANDMMAP
25489 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25490 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25491 +#endif
25492 +
25493 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25494 mm->unmap_area = arch_unmap_area_topdown;
25495 }
25496 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25497 index 132772a..b961f11 100644
25498 --- a/arch/x86/mm/mmio-mod.c
25499 +++ b/arch/x86/mm/mmio-mod.c
25500 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25501 break;
25502 default:
25503 {
25504 - unsigned char *ip = (unsigned char *)instptr;
25505 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25506 my_trace->opcode = MMIO_UNKNOWN_OP;
25507 my_trace->width = 0;
25508 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25509 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25510 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25511 void __iomem *addr)
25512 {
25513 - static atomic_t next_id;
25514 + static atomic_unchecked_t next_id;
25515 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25516 /* These are page-unaligned. */
25517 struct mmiotrace_map map = {
25518 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25519 .private = trace
25520 },
25521 .phys = offset,
25522 - .id = atomic_inc_return(&next_id)
25523 + .id = atomic_inc_return_unchecked(&next_id)
25524 };
25525 map.map_id = trace->id;
25526
25527 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25528 index d253006..e56dd6a 100644
25529 --- a/arch/x86/mm/numa_32.c
25530 +++ b/arch/x86/mm/numa_32.c
25531 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25532 }
25533 #endif
25534
25535 -extern unsigned long find_max_low_pfn(void);
25536 extern unsigned long highend_pfn, highstart_pfn;
25537
25538 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25539 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25540 index e1d1069..2251ff3 100644
25541 --- a/arch/x86/mm/pageattr-test.c
25542 +++ b/arch/x86/mm/pageattr-test.c
25543 @@ -36,7 +36,7 @@ enum {
25544
25545 static int pte_testbit(pte_t pte)
25546 {
25547 - return pte_flags(pte) & _PAGE_UNUSED1;
25548 + return pte_flags(pte) & _PAGE_CPA_TEST;
25549 }
25550
25551 struct split_state {
25552 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25553 index dd38bfb..8c12306 100644
25554 --- a/arch/x86/mm/pageattr.c
25555 +++ b/arch/x86/mm/pageattr.c
25556 @@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25557 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25558 */
25559 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25560 - pgprot_val(forbidden) |= _PAGE_NX;
25561 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25562
25563 /*
25564 * The kernel text needs to be executable for obvious reasons
25565 * Does not cover __inittext since that is gone later on. On
25566 * 64bit we do not enforce !NX on the low mapping
25567 */
25568 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25569 - pgprot_val(forbidden) |= _PAGE_NX;
25570 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25571 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25572
25573 +#ifdef CONFIG_DEBUG_RODATA
25574 /*
25575 * The .rodata section needs to be read-only. Using the pfn
25576 * catches all aliases.
25577 @@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25578 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25579 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25580 pgprot_val(forbidden) |= _PAGE_RW;
25581 +#endif
25582 +
25583 +#ifdef CONFIG_PAX_KERNEXEC
25584 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25585 + pgprot_val(forbidden) |= _PAGE_RW;
25586 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25587 + }
25588 +#endif
25589
25590 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25591
25592 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25593 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25594 {
25595 /* change init_mm */
25596 + pax_open_kernel();
25597 set_pte_atomic(kpte, pte);
25598 +
25599 #ifdef CONFIG_X86_32
25600 if (!SHARED_KERNEL_PMD) {
25601 +
25602 +#ifdef CONFIG_PAX_PER_CPU_PGD
25603 + unsigned long cpu;
25604 +#else
25605 struct page *page;
25606 +#endif
25607
25608 +#ifdef CONFIG_PAX_PER_CPU_PGD
25609 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
25610 + pgd_t *pgd = get_cpu_pgd(cpu);
25611 +#else
25612 list_for_each_entry(page, &pgd_list, lru) {
25613 - pgd_t *pgd;
25614 + pgd_t *pgd = (pgd_t *)page_address(page);
25615 +#endif
25616 +
25617 pud_t *pud;
25618 pmd_t *pmd;
25619
25620 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25621 + pgd += pgd_index(address);
25622 pud = pud_offset(pgd, address);
25623 pmd = pmd_offset(pud, address);
25624 set_pte_atomic((pte_t *)pmd, pte);
25625 }
25626 }
25627 #endif
25628 + pax_close_kernel();
25629 }
25630
25631 static int
25632 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25633 index e78cd0e..de0a817 100644
25634 --- a/arch/x86/mm/pat.c
25635 +++ b/arch/x86/mm/pat.c
25636 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25637
25638 conflict:
25639 printk(KERN_INFO "%s:%d conflicting memory types "
25640 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25641 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25642 new->end, cattr_name(new->type), cattr_name(entry->type));
25643 return -EBUSY;
25644 }
25645 @@ -559,7 +559,7 @@ unlock_ret:
25646
25647 if (err) {
25648 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25649 - current->comm, current->pid, start, end);
25650 + current->comm, task_pid_nr(current), start, end);
25651 }
25652
25653 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25654 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25655 while (cursor < to) {
25656 if (!devmem_is_allowed(pfn)) {
25657 printk(KERN_INFO
25658 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25659 - current->comm, from, to);
25660 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25661 + current->comm, from, to, cursor);
25662 return 0;
25663 }
25664 cursor += PAGE_SIZE;
25665 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25666 printk(KERN_INFO
25667 "%s:%d ioremap_change_attr failed %s "
25668 "for %Lx-%Lx\n",
25669 - current->comm, current->pid,
25670 + current->comm, task_pid_nr(current),
25671 cattr_name(flags),
25672 base, (unsigned long long)(base + size));
25673 return -EINVAL;
25674 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25675 free_memtype(paddr, paddr + size);
25676 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25677 " for %Lx-%Lx, got %s\n",
25678 - current->comm, current->pid,
25679 + current->comm, task_pid_nr(current),
25680 cattr_name(want_flags),
25681 (unsigned long long)paddr,
25682 (unsigned long long)(paddr + size),
25683 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25684 index df3d5c8..c2223e1 100644
25685 --- a/arch/x86/mm/pf_in.c
25686 +++ b/arch/x86/mm/pf_in.c
25687 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25688 int i;
25689 enum reason_type rv = OTHERS;
25690
25691 - p = (unsigned char *)ins_addr;
25692 + p = (unsigned char *)ktla_ktva(ins_addr);
25693 p += skip_prefix(p, &prf);
25694 p += get_opcode(p, &opcode);
25695
25696 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25697 struct prefix_bits prf;
25698 int i;
25699
25700 - p = (unsigned char *)ins_addr;
25701 + p = (unsigned char *)ktla_ktva(ins_addr);
25702 p += skip_prefix(p, &prf);
25703 p += get_opcode(p, &opcode);
25704
25705 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25706 struct prefix_bits prf;
25707 int i;
25708
25709 - p = (unsigned char *)ins_addr;
25710 + p = (unsigned char *)ktla_ktva(ins_addr);
25711 p += skip_prefix(p, &prf);
25712 p += get_opcode(p, &opcode);
25713
25714 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25715 int i;
25716 unsigned long rv;
25717
25718 - p = (unsigned char *)ins_addr;
25719 + p = (unsigned char *)ktla_ktva(ins_addr);
25720 p += skip_prefix(p, &prf);
25721 p += get_opcode(p, &opcode);
25722 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25723 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25724 int i;
25725 unsigned long rv;
25726
25727 - p = (unsigned char *)ins_addr;
25728 + p = (unsigned char *)ktla_ktva(ins_addr);
25729 p += skip_prefix(p, &prf);
25730 p += get_opcode(p, &opcode);
25731 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25732 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25733 index e0e6fad..6b90017 100644
25734 --- a/arch/x86/mm/pgtable.c
25735 +++ b/arch/x86/mm/pgtable.c
25736 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25737 list_del(&page->lru);
25738 }
25739
25740 -#define UNSHARED_PTRS_PER_PGD \
25741 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25742 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25743 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25744
25745 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25746 +{
25747 + while (count--)
25748 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25749 +}
25750 +#endif
25751 +
25752 +#ifdef CONFIG_PAX_PER_CPU_PGD
25753 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25754 +{
25755 + while (count--)
25756 +
25757 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25758 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
25759 +#else
25760 + *dst++ = *src++;
25761 +#endif
25762 +
25763 +}
25764 +#endif
25765 +
25766 +#ifdef CONFIG_X86_64
25767 +#define pxd_t pud_t
25768 +#define pyd_t pgd_t
25769 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25770 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25771 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25772 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
25773 +#define PYD_SIZE PGDIR_SIZE
25774 +#else
25775 +#define pxd_t pmd_t
25776 +#define pyd_t pud_t
25777 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25778 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25779 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25780 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
25781 +#define PYD_SIZE PUD_SIZE
25782 +#endif
25783 +
25784 +#ifdef CONFIG_PAX_PER_CPU_PGD
25785 +static inline void pgd_ctor(pgd_t *pgd) {}
25786 +static inline void pgd_dtor(pgd_t *pgd) {}
25787 +#else
25788 static void pgd_ctor(pgd_t *pgd)
25789 {
25790 /* If the pgd points to a shared pagetable level (either the
25791 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
25792 pgd_list_del(pgd);
25793 spin_unlock_irqrestore(&pgd_lock, flags);
25794 }
25795 +#endif
25796
25797 /*
25798 * List of all pgd's needed for non-PAE so it can invalidate entries
25799 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
25800 * -- wli
25801 */
25802
25803 -#ifdef CONFIG_X86_PAE
25804 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25805 /*
25806 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25807 * updating the top-level pagetable entries to guarantee the
25808 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
25809 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25810 * and initialize the kernel pmds here.
25811 */
25812 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25813 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25814
25815 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25816 {
25817 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25818 */
25819 flush_tlb_mm(mm);
25820 }
25821 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25822 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25823 #else /* !CONFIG_X86_PAE */
25824
25825 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25826 -#define PREALLOCATED_PMDS 0
25827 +#define PREALLOCATED_PXDS 0
25828
25829 #endif /* CONFIG_X86_PAE */
25830
25831 -static void free_pmds(pmd_t *pmds[])
25832 +static void free_pxds(pxd_t *pxds[])
25833 {
25834 int i;
25835
25836 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25837 - if (pmds[i])
25838 - free_page((unsigned long)pmds[i]);
25839 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25840 + if (pxds[i])
25841 + free_page((unsigned long)pxds[i]);
25842 }
25843
25844 -static int preallocate_pmds(pmd_t *pmds[])
25845 +static int preallocate_pxds(pxd_t *pxds[])
25846 {
25847 int i;
25848 bool failed = false;
25849
25850 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25851 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25852 - if (pmd == NULL)
25853 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25854 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25855 + if (pxd == NULL)
25856 failed = true;
25857 - pmds[i] = pmd;
25858 + pxds[i] = pxd;
25859 }
25860
25861 if (failed) {
25862 - free_pmds(pmds);
25863 + free_pxds(pxds);
25864 return -ENOMEM;
25865 }
25866
25867 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
25868 * preallocate which never got a corresponding vma will need to be
25869 * freed manually.
25870 */
25871 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25872 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25873 {
25874 int i;
25875
25876 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25877 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25878 pgd_t pgd = pgdp[i];
25879
25880 if (pgd_val(pgd) != 0) {
25881 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25882 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25883
25884 - pgdp[i] = native_make_pgd(0);
25885 + set_pgd(pgdp + i, native_make_pgd(0));
25886
25887 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25888 - pmd_free(mm, pmd);
25889 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25890 + pxd_free(mm, pxd);
25891 }
25892 }
25893 }
25894
25895 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25896 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25897 {
25898 - pud_t *pud;
25899 + pyd_t *pyd;
25900 unsigned long addr;
25901 int i;
25902
25903 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25904 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25905 return;
25906
25907 - pud = pud_offset(pgd, 0);
25908 +#ifdef CONFIG_X86_64
25909 + pyd = pyd_offset(mm, 0L);
25910 +#else
25911 + pyd = pyd_offset(pgd, 0L);
25912 +#endif
25913
25914 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25915 - i++, pud++, addr += PUD_SIZE) {
25916 - pmd_t *pmd = pmds[i];
25917 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25918 + i++, pyd++, addr += PYD_SIZE) {
25919 + pxd_t *pxd = pxds[i];
25920
25921 if (i >= KERNEL_PGD_BOUNDARY)
25922 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25923 - sizeof(pmd_t) * PTRS_PER_PMD);
25924 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25925 + sizeof(pxd_t) * PTRS_PER_PMD);
25926
25927 - pud_populate(mm, pud, pmd);
25928 + pyd_populate(mm, pyd, pxd);
25929 }
25930 }
25931
25932 pgd_t *pgd_alloc(struct mm_struct *mm)
25933 {
25934 pgd_t *pgd;
25935 - pmd_t *pmds[PREALLOCATED_PMDS];
25936 + pxd_t *pxds[PREALLOCATED_PXDS];
25937 +
25938 unsigned long flags;
25939
25940 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25941 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25942
25943 mm->pgd = pgd;
25944
25945 - if (preallocate_pmds(pmds) != 0)
25946 + if (preallocate_pxds(pxds) != 0)
25947 goto out_free_pgd;
25948
25949 if (paravirt_pgd_alloc(mm) != 0)
25950 - goto out_free_pmds;
25951 + goto out_free_pxds;
25952
25953 /*
25954 * Make sure that pre-populating the pmds is atomic with
25955 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25956 spin_lock_irqsave(&pgd_lock, flags);
25957
25958 pgd_ctor(pgd);
25959 - pgd_prepopulate_pmd(mm, pgd, pmds);
25960 + pgd_prepopulate_pxd(mm, pgd, pxds);
25961
25962 spin_unlock_irqrestore(&pgd_lock, flags);
25963
25964 return pgd;
25965
25966 -out_free_pmds:
25967 - free_pmds(pmds);
25968 +out_free_pxds:
25969 + free_pxds(pxds);
25970 out_free_pgd:
25971 free_page((unsigned long)pgd);
25972 out:
25973 @@ -287,7 +338,7 @@ out:
25974
25975 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25976 {
25977 - pgd_mop_up_pmds(mm, pgd);
25978 + pgd_mop_up_pxds(mm, pgd);
25979 pgd_dtor(pgd);
25980 paravirt_pgd_free(mm, pgd);
25981 free_page((unsigned long)pgd);
25982 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25983 index 46c8834..fcab43d 100644
25984 --- a/arch/x86/mm/pgtable_32.c
25985 +++ b/arch/x86/mm/pgtable_32.c
25986 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25987 return;
25988 }
25989 pte = pte_offset_kernel(pmd, vaddr);
25990 +
25991 + pax_open_kernel();
25992 if (pte_val(pteval))
25993 set_pte_at(&init_mm, vaddr, pte, pteval);
25994 else
25995 pte_clear(&init_mm, vaddr, pte);
25996 + pax_close_kernel();
25997
25998 /*
25999 * It's enough to flush this one mapping.
26000 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26001 index 513d8ed..978c161 100644
26002 --- a/arch/x86/mm/setup_nx.c
26003 +++ b/arch/x86/mm/setup_nx.c
26004 @@ -4,11 +4,10 @@
26005
26006 #include <asm/pgtable.h>
26007
26008 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26009 int nx_enabled;
26010
26011 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26012 -static int disable_nx __cpuinitdata;
26013 -
26014 +#ifndef CONFIG_PAX_PAGEEXEC
26015 /*
26016 * noexec = on|off
26017 *
26018 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26019 if (!str)
26020 return -EINVAL;
26021 if (!strncmp(str, "on", 2)) {
26022 - __supported_pte_mask |= _PAGE_NX;
26023 - disable_nx = 0;
26024 + nx_enabled = 1;
26025 } else if (!strncmp(str, "off", 3)) {
26026 - disable_nx = 1;
26027 - __supported_pte_mask &= ~_PAGE_NX;
26028 + nx_enabled = 0;
26029 }
26030 return 0;
26031 }
26032 early_param("noexec", noexec_setup);
26033 #endif
26034 +#endif
26035
26036 #ifdef CONFIG_X86_PAE
26037 void __init set_nx(void)
26038 {
26039 - unsigned int v[4], l, h;
26040 + if (!nx_enabled && cpu_has_nx) {
26041 + unsigned l, h;
26042
26043 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26044 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26045 -
26046 - if ((v[3] & (1 << 20)) && !disable_nx) {
26047 - rdmsr(MSR_EFER, l, h);
26048 - l |= EFER_NX;
26049 - wrmsr(MSR_EFER, l, h);
26050 - nx_enabled = 1;
26051 - __supported_pte_mask |= _PAGE_NX;
26052 - }
26053 + __supported_pte_mask &= ~_PAGE_NX;
26054 + rdmsr(MSR_EFER, l, h);
26055 + l &= ~EFER_NX;
26056 + wrmsr(MSR_EFER, l, h);
26057 }
26058 }
26059 #else
26060 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26061 unsigned long efer;
26062
26063 rdmsrl(MSR_EFER, efer);
26064 - if (!(efer & EFER_NX) || disable_nx)
26065 + if (!(efer & EFER_NX) || !nx_enabled)
26066 __supported_pte_mask &= ~_PAGE_NX;
26067 }
26068 #endif
26069 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26070 index 36fe08e..b123d3a 100644
26071 --- a/arch/x86/mm/tlb.c
26072 +++ b/arch/x86/mm/tlb.c
26073 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
26074 BUG();
26075 cpumask_clear_cpu(cpu,
26076 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26077 +
26078 +#ifndef CONFIG_PAX_PER_CPU_PGD
26079 load_cr3(swapper_pg_dir);
26080 +#endif
26081 +
26082 }
26083 EXPORT_SYMBOL_GPL(leave_mm);
26084
26085 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26086 index 829edf0..672adb3 100644
26087 --- a/arch/x86/oprofile/backtrace.c
26088 +++ b/arch/x86/oprofile/backtrace.c
26089 @@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26090 {
26091 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26092
26093 - if (!user_mode_vm(regs)) {
26094 + if (!user_mode(regs)) {
26095 unsigned long stack = kernel_stack_pointer(regs);
26096 if (depth)
26097 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26098 diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26099 index e6a160a..36deff6 100644
26100 --- a/arch/x86/oprofile/op_model_p4.c
26101 +++ b/arch/x86/oprofile/op_model_p4.c
26102 @@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26103 #endif
26104 }
26105
26106 -static int inline addr_increment(void)
26107 +static inline int addr_increment(void)
26108 {
26109 #ifdef CONFIG_SMP
26110 return smp_num_siblings == 2 ? 2 : 1;
26111 diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26112 index 1331fcf..03901b2 100644
26113 --- a/arch/x86/pci/common.c
26114 +++ b/arch/x86/pci/common.c
26115 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
26116 int pcibios_last_bus = -1;
26117 unsigned long pirq_table_addr;
26118 struct pci_bus *pci_root_bus;
26119 -struct pci_raw_ops *raw_pci_ops;
26120 -struct pci_raw_ops *raw_pci_ext_ops;
26121 +const struct pci_raw_ops *raw_pci_ops;
26122 +const struct pci_raw_ops *raw_pci_ext_ops;
26123
26124 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26125 int reg, int len, u32 *val)
26126 diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26127 index 347d882..4baf6b6 100644
26128 --- a/arch/x86/pci/direct.c
26129 +++ b/arch/x86/pci/direct.c
26130 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26131
26132 #undef PCI_CONF1_ADDRESS
26133
26134 -struct pci_raw_ops pci_direct_conf1 = {
26135 +const struct pci_raw_ops pci_direct_conf1 = {
26136 .read = pci_conf1_read,
26137 .write = pci_conf1_write,
26138 };
26139 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26140
26141 #undef PCI_CONF2_ADDRESS
26142
26143 -struct pci_raw_ops pci_direct_conf2 = {
26144 +const struct pci_raw_ops pci_direct_conf2 = {
26145 .read = pci_conf2_read,
26146 .write = pci_conf2_write,
26147 };
26148 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26149 * This should be close to trivial, but it isn't, because there are buggy
26150 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26151 */
26152 -static int __init pci_sanity_check(struct pci_raw_ops *o)
26153 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
26154 {
26155 u32 x = 0;
26156 int year, devfn;
26157 diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26158 index f10a7e9..0425342 100644
26159 --- a/arch/x86/pci/mmconfig_32.c
26160 +++ b/arch/x86/pci/mmconfig_32.c
26161 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26162 return 0;
26163 }
26164
26165 -static struct pci_raw_ops pci_mmcfg = {
26166 +static const struct pci_raw_ops pci_mmcfg = {
26167 .read = pci_mmcfg_read,
26168 .write = pci_mmcfg_write,
26169 };
26170 diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26171 index 94349f8..41600a7 100644
26172 --- a/arch/x86/pci/mmconfig_64.c
26173 +++ b/arch/x86/pci/mmconfig_64.c
26174 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26175 return 0;
26176 }
26177
26178 -static struct pci_raw_ops pci_mmcfg = {
26179 +static const struct pci_raw_ops pci_mmcfg = {
26180 .read = pci_mmcfg_read,
26181 .write = pci_mmcfg_write,
26182 };
26183 diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26184 index 8eb295e..86bd657 100644
26185 --- a/arch/x86/pci/numaq_32.c
26186 +++ b/arch/x86/pci/numaq_32.c
26187 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26188
26189 #undef PCI_CONF1_MQ_ADDRESS
26190
26191 -static struct pci_raw_ops pci_direct_conf1_mq = {
26192 +static const struct pci_raw_ops pci_direct_conf1_mq = {
26193 .read = pci_conf1_mq_read,
26194 .write = pci_conf1_mq_write
26195 };
26196 diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26197 index b889d82..5a58a0a 100644
26198 --- a/arch/x86/pci/olpc.c
26199 +++ b/arch/x86/pci/olpc.c
26200 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26201 return 0;
26202 }
26203
26204 -static struct pci_raw_ops pci_olpc_conf = {
26205 +static const struct pci_raw_ops pci_olpc_conf = {
26206 .read = pci_olpc_read,
26207 .write = pci_olpc_write,
26208 };
26209 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26210 index 1c975cc..ffd0536 100644
26211 --- a/arch/x86/pci/pcbios.c
26212 +++ b/arch/x86/pci/pcbios.c
26213 @@ -56,50 +56,93 @@ union bios32 {
26214 static struct {
26215 unsigned long address;
26216 unsigned short segment;
26217 -} bios32_indirect = { 0, __KERNEL_CS };
26218 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26219
26220 /*
26221 * Returns the entry point for the given service, NULL on error
26222 */
26223
26224 -static unsigned long bios32_service(unsigned long service)
26225 +static unsigned long __devinit bios32_service(unsigned long service)
26226 {
26227 unsigned char return_code; /* %al */
26228 unsigned long address; /* %ebx */
26229 unsigned long length; /* %ecx */
26230 unsigned long entry; /* %edx */
26231 unsigned long flags;
26232 + struct desc_struct d, *gdt;
26233
26234 local_irq_save(flags);
26235 - __asm__("lcall *(%%edi); cld"
26236 +
26237 + gdt = get_cpu_gdt_table(smp_processor_id());
26238 +
26239 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26240 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26241 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26242 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26243 +
26244 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26245 : "=a" (return_code),
26246 "=b" (address),
26247 "=c" (length),
26248 "=d" (entry)
26249 : "0" (service),
26250 "1" (0),
26251 - "D" (&bios32_indirect));
26252 + "D" (&bios32_indirect),
26253 + "r"(__PCIBIOS_DS)
26254 + : "memory");
26255 +
26256 + pax_open_kernel();
26257 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26258 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26259 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26260 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26261 + pax_close_kernel();
26262 +
26263 local_irq_restore(flags);
26264
26265 switch (return_code) {
26266 - case 0:
26267 - return address + entry;
26268 - case 0x80: /* Not present */
26269 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26270 - return 0;
26271 - default: /* Shouldn't happen */
26272 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26273 - service, return_code);
26274 + case 0: {
26275 + int cpu;
26276 + unsigned char flags;
26277 +
26278 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26279 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26280 + printk(KERN_WARNING "bios32_service: not valid\n");
26281 return 0;
26282 + }
26283 + address = address + PAGE_OFFSET;
26284 + length += 16UL; /* some BIOSs underreport this... */
26285 + flags = 4;
26286 + if (length >= 64*1024*1024) {
26287 + length >>= PAGE_SHIFT;
26288 + flags |= 8;
26289 + }
26290 +
26291 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
26292 + gdt = get_cpu_gdt_table(cpu);
26293 + pack_descriptor(&d, address, length, 0x9b, flags);
26294 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26295 + pack_descriptor(&d, address, length, 0x93, flags);
26296 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26297 + }
26298 + return entry;
26299 + }
26300 + case 0x80: /* Not present */
26301 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26302 + return 0;
26303 + default: /* Shouldn't happen */
26304 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26305 + service, return_code);
26306 + return 0;
26307 }
26308 }
26309
26310 static struct {
26311 unsigned long address;
26312 unsigned short segment;
26313 -} pci_indirect = { 0, __KERNEL_CS };
26314 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26315
26316 -static int pci_bios_present;
26317 +static int pci_bios_present __read_only;
26318
26319 static int __devinit check_pcibios(void)
26320 {
26321 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26322 unsigned long flags, pcibios_entry;
26323
26324 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26325 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26326 + pci_indirect.address = pcibios_entry;
26327
26328 local_irq_save(flags);
26329 - __asm__(
26330 - "lcall *(%%edi); cld\n\t"
26331 + __asm__("movw %w6, %%ds\n\t"
26332 + "lcall *%%ss:(%%edi); cld\n\t"
26333 + "push %%ss\n\t"
26334 + "pop %%ds\n\t"
26335 "jc 1f\n\t"
26336 "xor %%ah, %%ah\n"
26337 "1:"
26338 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26339 "=b" (ebx),
26340 "=c" (ecx)
26341 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26342 - "D" (&pci_indirect)
26343 + "D" (&pci_indirect),
26344 + "r" (__PCIBIOS_DS)
26345 : "memory");
26346 local_irq_restore(flags);
26347
26348 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26349
26350 switch (len) {
26351 case 1:
26352 - __asm__("lcall *(%%esi); cld\n\t"
26353 + __asm__("movw %w6, %%ds\n\t"
26354 + "lcall *%%ss:(%%esi); cld\n\t"
26355 + "push %%ss\n\t"
26356 + "pop %%ds\n\t"
26357 "jc 1f\n\t"
26358 "xor %%ah, %%ah\n"
26359 "1:"
26360 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26361 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26362 "b" (bx),
26363 "D" ((long)reg),
26364 - "S" (&pci_indirect));
26365 + "S" (&pci_indirect),
26366 + "r" (__PCIBIOS_DS));
26367 /*
26368 * Zero-extend the result beyond 8 bits, do not trust the
26369 * BIOS having done it:
26370 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26371 *value &= 0xff;
26372 break;
26373 case 2:
26374 - __asm__("lcall *(%%esi); cld\n\t"
26375 + __asm__("movw %w6, %%ds\n\t"
26376 + "lcall *%%ss:(%%esi); cld\n\t"
26377 + "push %%ss\n\t"
26378 + "pop %%ds\n\t"
26379 "jc 1f\n\t"
26380 "xor %%ah, %%ah\n"
26381 "1:"
26382 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26383 : "1" (PCIBIOS_READ_CONFIG_WORD),
26384 "b" (bx),
26385 "D" ((long)reg),
26386 - "S" (&pci_indirect));
26387 + "S" (&pci_indirect),
26388 + "r" (__PCIBIOS_DS));
26389 /*
26390 * Zero-extend the result beyond 16 bits, do not trust the
26391 * BIOS having done it:
26392 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26393 *value &= 0xffff;
26394 break;
26395 case 4:
26396 - __asm__("lcall *(%%esi); cld\n\t"
26397 + __asm__("movw %w6, %%ds\n\t"
26398 + "lcall *%%ss:(%%esi); cld\n\t"
26399 + "push %%ss\n\t"
26400 + "pop %%ds\n\t"
26401 "jc 1f\n\t"
26402 "xor %%ah, %%ah\n"
26403 "1:"
26404 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26405 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26406 "b" (bx),
26407 "D" ((long)reg),
26408 - "S" (&pci_indirect));
26409 + "S" (&pci_indirect),
26410 + "r" (__PCIBIOS_DS));
26411 break;
26412 }
26413
26414 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26415
26416 switch (len) {
26417 case 1:
26418 - __asm__("lcall *(%%esi); cld\n\t"
26419 + __asm__("movw %w6, %%ds\n\t"
26420 + "lcall *%%ss:(%%esi); cld\n\t"
26421 + "push %%ss\n\t"
26422 + "pop %%ds\n\t"
26423 "jc 1f\n\t"
26424 "xor %%ah, %%ah\n"
26425 "1:"
26426 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26427 "c" (value),
26428 "b" (bx),
26429 "D" ((long)reg),
26430 - "S" (&pci_indirect));
26431 + "S" (&pci_indirect),
26432 + "r" (__PCIBIOS_DS));
26433 break;
26434 case 2:
26435 - __asm__("lcall *(%%esi); cld\n\t"
26436 + __asm__("movw %w6, %%ds\n\t"
26437 + "lcall *%%ss:(%%esi); cld\n\t"
26438 + "push %%ss\n\t"
26439 + "pop %%ds\n\t"
26440 "jc 1f\n\t"
26441 "xor %%ah, %%ah\n"
26442 "1:"
26443 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26444 "c" (value),
26445 "b" (bx),
26446 "D" ((long)reg),
26447 - "S" (&pci_indirect));
26448 + "S" (&pci_indirect),
26449 + "r" (__PCIBIOS_DS));
26450 break;
26451 case 4:
26452 - __asm__("lcall *(%%esi); cld\n\t"
26453 + __asm__("movw %w6, %%ds\n\t"
26454 + "lcall *%%ss:(%%esi); cld\n\t"
26455 + "push %%ss\n\t"
26456 + "pop %%ds\n\t"
26457 "jc 1f\n\t"
26458 "xor %%ah, %%ah\n"
26459 "1:"
26460 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26461 "c" (value),
26462 "b" (bx),
26463 "D" ((long)reg),
26464 - "S" (&pci_indirect));
26465 + "S" (&pci_indirect),
26466 + "r" (__PCIBIOS_DS));
26467 break;
26468 }
26469
26470 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26471 * Function table for BIOS32 access
26472 */
26473
26474 -static struct pci_raw_ops pci_bios_access = {
26475 +static const struct pci_raw_ops pci_bios_access = {
26476 .read = pci_bios_read,
26477 .write = pci_bios_write
26478 };
26479 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26480 * Try to find PCI BIOS.
26481 */
26482
26483 -static struct pci_raw_ops * __devinit pci_find_bios(void)
26484 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
26485 {
26486 union bios32 *check;
26487 unsigned char sum;
26488 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26489
26490 DBG("PCI: Fetching IRQ routing table... ");
26491 __asm__("push %%es\n\t"
26492 + "movw %w8, %%ds\n\t"
26493 "push %%ds\n\t"
26494 "pop %%es\n\t"
26495 - "lcall *(%%esi); cld\n\t"
26496 + "lcall *%%ss:(%%esi); cld\n\t"
26497 "pop %%es\n\t"
26498 + "push %%ss\n\t"
26499 + "pop %%ds\n"
26500 "jc 1f\n\t"
26501 "xor %%ah, %%ah\n"
26502 "1:"
26503 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26504 "1" (0),
26505 "D" ((long) &opt),
26506 "S" (&pci_indirect),
26507 - "m" (opt)
26508 + "m" (opt),
26509 + "r" (__PCIBIOS_DS)
26510 : "memory");
26511 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26512 if (ret & 0xff00)
26513 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26514 {
26515 int ret;
26516
26517 - __asm__("lcall *(%%esi); cld\n\t"
26518 + __asm__("movw %w5, %%ds\n\t"
26519 + "lcall *%%ss:(%%esi); cld\n\t"
26520 + "push %%ss\n\t"
26521 + "pop %%ds\n"
26522 "jc 1f\n\t"
26523 "xor %%ah, %%ah\n"
26524 "1:"
26525 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26526 : "0" (PCIBIOS_SET_PCI_HW_INT),
26527 "b" ((dev->bus->number << 8) | dev->devfn),
26528 "c" ((irq << 8) | (pin + 10)),
26529 - "S" (&pci_indirect));
26530 + "S" (&pci_indirect),
26531 + "r" (__PCIBIOS_DS));
26532 return !(ret & 0xff00);
26533 }
26534 EXPORT_SYMBOL(pcibios_set_irq_routing);
26535 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26536 index fa0f651..9d8f3d9 100644
26537 --- a/arch/x86/power/cpu.c
26538 +++ b/arch/x86/power/cpu.c
26539 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
26540 static void fix_processor_context(void)
26541 {
26542 int cpu = smp_processor_id();
26543 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26544 + struct tss_struct *t = init_tss + cpu;
26545
26546 set_tss_desc(cpu, t); /*
26547 * This just modifies memory; should not be
26548 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
26549 */
26550
26551 #ifdef CONFIG_X86_64
26552 + pax_open_kernel();
26553 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26554 + pax_close_kernel();
26555
26556 syscall_init(); /* This sets MSR_*STAR and related */
26557 #endif
26558 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26559 index dd78ef6..f9d928d 100644
26560 --- a/arch/x86/vdso/Makefile
26561 +++ b/arch/x86/vdso/Makefile
26562 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26563 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26564 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26565
26566 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26567 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26568 GCOV_PROFILE := n
26569
26570 #
26571 diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26572 index ee55754..0013b2e 100644
26573 --- a/arch/x86/vdso/vclock_gettime.c
26574 +++ b/arch/x86/vdso/vclock_gettime.c
26575 @@ -22,24 +22,48 @@
26576 #include <asm/hpet.h>
26577 #include <asm/unistd.h>
26578 #include <asm/io.h>
26579 +#include <asm/fixmap.h>
26580 #include "vextern.h"
26581
26582 #define gtod vdso_vsyscall_gtod_data
26583
26584 +notrace noinline long __vdso_fallback_time(long *t)
26585 +{
26586 + long secs;
26587 + asm volatile("syscall"
26588 + : "=a" (secs)
26589 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26590 + return secs;
26591 +}
26592 +
26593 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26594 {
26595 long ret;
26596 asm("syscall" : "=a" (ret) :
26597 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26598 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26599 return ret;
26600 }
26601
26602 +notrace static inline cycle_t __vdso_vread_hpet(void)
26603 +{
26604 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26605 +}
26606 +
26607 +notrace static inline cycle_t __vdso_vread_tsc(void)
26608 +{
26609 + cycle_t ret = (cycle_t)vget_cycles();
26610 +
26611 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26612 +}
26613 +
26614 notrace static inline long vgetns(void)
26615 {
26616 long v;
26617 - cycles_t (*vread)(void);
26618 - vread = gtod->clock.vread;
26619 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26620 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26621 + v = __vdso_vread_tsc();
26622 + else
26623 + v = __vdso_vread_hpet();
26624 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26625 return (v * gtod->clock.mult) >> gtod->clock.shift;
26626 }
26627
26628 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26629
26630 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26631 {
26632 - if (likely(gtod->sysctl_enabled))
26633 + if (likely(gtod->sysctl_enabled &&
26634 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26635 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26636 switch (clock) {
26637 case CLOCK_REALTIME:
26638 if (likely(gtod->clock.vread))
26639 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26640 int clock_gettime(clockid_t, struct timespec *)
26641 __attribute__((weak, alias("__vdso_clock_gettime")));
26642
26643 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26644 +{
26645 + long ret;
26646 + asm("syscall" : "=a" (ret) :
26647 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26648 + return ret;
26649 +}
26650 +
26651 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26652 {
26653 - long ret;
26654 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26655 + if (likely(gtod->sysctl_enabled &&
26656 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26657 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26658 + {
26659 if (likely(tv != NULL)) {
26660 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26661 offsetof(struct timespec, tv_nsec) ||
26662 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26663 }
26664 return 0;
26665 }
26666 - asm("syscall" : "=a" (ret) :
26667 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26668 - return ret;
26669 + return __vdso_fallback_gettimeofday(tv, tz);
26670 }
26671 int gettimeofday(struct timeval *, struct timezone *)
26672 __attribute__((weak, alias("__vdso_gettimeofday")));
26673 diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26674 index 4e5dd3b..00ba15e 100644
26675 --- a/arch/x86/vdso/vdso.lds.S
26676 +++ b/arch/x86/vdso/vdso.lds.S
26677 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26678 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26679 #include "vextern.h"
26680 #undef VEXTERN
26681 +
26682 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26683 +VEXTERN(fallback_gettimeofday)
26684 +VEXTERN(fallback_time)
26685 +VEXTERN(getcpu)
26686 +#undef VEXTERN
26687 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26688 index 58bc00f..d53fb48 100644
26689 --- a/arch/x86/vdso/vdso32-setup.c
26690 +++ b/arch/x86/vdso/vdso32-setup.c
26691 @@ -25,6 +25,7 @@
26692 #include <asm/tlbflush.h>
26693 #include <asm/vdso.h>
26694 #include <asm/proto.h>
26695 +#include <asm/mman.h>
26696
26697 enum {
26698 VDSO_DISABLED = 0,
26699 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26700 void enable_sep_cpu(void)
26701 {
26702 int cpu = get_cpu();
26703 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26704 + struct tss_struct *tss = init_tss + cpu;
26705
26706 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26707 put_cpu();
26708 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26709 gate_vma.vm_start = FIXADDR_USER_START;
26710 gate_vma.vm_end = FIXADDR_USER_END;
26711 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26712 - gate_vma.vm_page_prot = __P101;
26713 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26714 /*
26715 * Make sure the vDSO gets into every core dump.
26716 * Dumping its contents makes post-mortem fully interpretable later
26717 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26718 if (compat)
26719 addr = VDSO_HIGH_BASE;
26720 else {
26721 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26722 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26723 if (IS_ERR_VALUE(addr)) {
26724 ret = addr;
26725 goto up_fail;
26726 }
26727 }
26728
26729 - current->mm->context.vdso = (void *)addr;
26730 + current->mm->context.vdso = addr;
26731
26732 if (compat_uses_vma || !compat) {
26733 /*
26734 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26735 }
26736
26737 current_thread_info()->sysenter_return =
26738 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26739 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26740
26741 up_fail:
26742 if (ret)
26743 - current->mm->context.vdso = NULL;
26744 + current->mm->context.vdso = 0;
26745
26746 up_write(&mm->mmap_sem);
26747
26748 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
26749
26750 const char *arch_vma_name(struct vm_area_struct *vma)
26751 {
26752 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26753 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26754 return "[vdso]";
26755 +
26756 +#ifdef CONFIG_PAX_SEGMEXEC
26757 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26758 + return "[vdso]";
26759 +#endif
26760 +
26761 return NULL;
26762 }
26763
26764 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26765 struct mm_struct *mm = tsk->mm;
26766
26767 /* Check to see if this task was created in compat vdso mode */
26768 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26769 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26770 return &gate_vma;
26771 return NULL;
26772 }
26773 diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
26774 index 1683ba2..48d07f3 100644
26775 --- a/arch/x86/vdso/vextern.h
26776 +++ b/arch/x86/vdso/vextern.h
26777 @@ -11,6 +11,5 @@
26778 put into vextern.h and be referenced as a pointer with vdso prefix.
26779 The main kernel later fills in the values. */
26780
26781 -VEXTERN(jiffies)
26782 VEXTERN(vgetcpu_mode)
26783 VEXTERN(vsyscall_gtod_data)
26784 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26785 index 21e1aeb..2c0b3c4 100644
26786 --- a/arch/x86/vdso/vma.c
26787 +++ b/arch/x86/vdso/vma.c
26788 @@ -17,8 +17,6 @@
26789 #include "vextern.h" /* Just for VMAGIC. */
26790 #undef VEXTERN
26791
26792 -unsigned int __read_mostly vdso_enabled = 1;
26793 -
26794 extern char vdso_start[], vdso_end[];
26795 extern unsigned short vdso_sync_cpuid;
26796
26797 @@ -27,10 +25,8 @@ static unsigned vdso_size;
26798
26799 static inline void *var_ref(void *p, char *name)
26800 {
26801 - if (*(void **)p != (void *)VMAGIC) {
26802 - printk("VDSO: variable %s broken\n", name);
26803 - vdso_enabled = 0;
26804 - }
26805 + if (*(void **)p != (void *)VMAGIC)
26806 + panic("VDSO: variable %s broken\n", name);
26807 return p;
26808 }
26809
26810 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
26811 if (!vbase)
26812 goto oom;
26813
26814 - if (memcmp(vbase, "\177ELF", 4)) {
26815 - printk("VDSO: I'm broken; not ELF\n");
26816 - vdso_enabled = 0;
26817 - }
26818 + if (memcmp(vbase, ELFMAG, SELFMAG))
26819 + panic("VDSO: I'm broken; not ELF\n");
26820
26821 #define VEXTERN(x) \
26822 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
26823 #include "vextern.h"
26824 #undef VEXTERN
26825 + vunmap(vbase);
26826 return 0;
26827
26828 oom:
26829 - printk("Cannot allocate vdso\n");
26830 - vdso_enabled = 0;
26831 - return -ENOMEM;
26832 + panic("Cannot allocate vdso\n");
26833 }
26834 __initcall(init_vdso_vars);
26835
26836 @@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26837 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26838 {
26839 struct mm_struct *mm = current->mm;
26840 - unsigned long addr;
26841 + unsigned long addr = 0;
26842 int ret;
26843
26844 - if (!vdso_enabled)
26845 - return 0;
26846 -
26847 down_write(&mm->mmap_sem);
26848 +
26849 +#ifdef CONFIG_PAX_RANDMMAP
26850 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26851 +#endif
26852 +
26853 addr = vdso_addr(mm->start_stack, vdso_size);
26854 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26855 if (IS_ERR_VALUE(addr)) {
26856 @@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26857 goto up_fail;
26858 }
26859
26860 - current->mm->context.vdso = (void *)addr;
26861 + current->mm->context.vdso = addr;
26862
26863 ret = install_special_mapping(mm, addr, vdso_size,
26864 VM_READ|VM_EXEC|
26865 @@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26866 VM_ALWAYSDUMP,
26867 vdso_pages);
26868 if (ret) {
26869 - current->mm->context.vdso = NULL;
26870 + current->mm->context.vdso = 0;
26871 goto up_fail;
26872 }
26873
26874 @@ -132,10 +127,3 @@ up_fail:
26875 up_write(&mm->mmap_sem);
26876 return ret;
26877 }
26878 -
26879 -static __init int vdso_setup(char *s)
26880 -{
26881 - vdso_enabled = simple_strtoul(s, NULL, 0);
26882 - return 0;
26883 -}
26884 -__setup("vdso=", vdso_setup);
26885 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26886 index 0087b00..eecb34f 100644
26887 --- a/arch/x86/xen/enlighten.c
26888 +++ b/arch/x86/xen/enlighten.c
26889 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26890
26891 struct shared_info xen_dummy_shared_info;
26892
26893 -void *xen_initial_gdt;
26894 -
26895 /*
26896 * Point at some empty memory to start with. We map the real shared_info
26897 * page as soon as fixmap is up and running.
26898 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
26899
26900 preempt_disable();
26901
26902 - start = __get_cpu_var(idt_desc).address;
26903 + start = (unsigned long)__get_cpu_var(idt_desc).address;
26904 end = start + __get_cpu_var(idt_desc).size + 1;
26905
26906 xen_mc_flush();
26907 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
26908 #endif
26909 };
26910
26911 -static void xen_reboot(int reason)
26912 +static __noreturn void xen_reboot(int reason)
26913 {
26914 struct sched_shutdown r = { .reason = reason };
26915
26916 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
26917 BUG();
26918 }
26919
26920 -static void xen_restart(char *msg)
26921 +static __noreturn void xen_restart(char *msg)
26922 {
26923 xen_reboot(SHUTDOWN_reboot);
26924 }
26925
26926 -static void xen_emergency_restart(void)
26927 +static __noreturn void xen_emergency_restart(void)
26928 {
26929 xen_reboot(SHUTDOWN_reboot);
26930 }
26931
26932 -static void xen_machine_halt(void)
26933 +static __noreturn void xen_machine_halt(void)
26934 {
26935 xen_reboot(SHUTDOWN_poweroff);
26936 }
26937 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
26938 */
26939 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26940
26941 -#ifdef CONFIG_X86_64
26942 /* Work out if we support NX */
26943 - check_efer();
26944 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26945 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26946 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26947 + unsigned l, h;
26948 +
26949 +#ifdef CONFIG_X86_PAE
26950 + nx_enabled = 1;
26951 +#endif
26952 + __supported_pte_mask |= _PAGE_NX;
26953 + rdmsr(MSR_EFER, l, h);
26954 + l |= EFER_NX;
26955 + wrmsr(MSR_EFER, l, h);
26956 + }
26957 #endif
26958
26959 xen_setup_features();
26960 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
26961
26962 machine_ops = xen_machine_ops;
26963
26964 - /*
26965 - * The only reliable way to retain the initial address of the
26966 - * percpu gdt_page is to remember it here, so we can go and
26967 - * mark it RW later, when the initial percpu area is freed.
26968 - */
26969 - xen_initial_gdt = &per_cpu(gdt_page, 0);
26970 -
26971 xen_smp_init();
26972
26973 pgd = (pgd_t *)xen_start_info->pt_base;
26974 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26975 index 3f90a2c..2c2ad84 100644
26976 --- a/arch/x86/xen/mmu.c
26977 +++ b/arch/x86/xen/mmu.c
26978 @@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26979 convert_pfn_mfn(init_level4_pgt);
26980 convert_pfn_mfn(level3_ident_pgt);
26981 convert_pfn_mfn(level3_kernel_pgt);
26982 + convert_pfn_mfn(level3_vmalloc_start_pgt);
26983 + convert_pfn_mfn(level3_vmalloc_end_pgt);
26984 + convert_pfn_mfn(level3_vmemmap_pgt);
26985
26986 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26987 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26988 @@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26989 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26990 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26991 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26992 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
26993 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
26994 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26995 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26996 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26997 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26998 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26999
27000 @@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27001 pv_mmu_ops.set_pud = xen_set_pud;
27002 #if PAGETABLE_LEVELS == 4
27003 pv_mmu_ops.set_pgd = xen_set_pgd;
27004 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27005 #endif
27006
27007 /* This will work as long as patching hasn't happened yet
27008 @@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27009 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27010 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27011 .set_pgd = xen_set_pgd_hyper,
27012 + .set_pgd_batched = xen_set_pgd_hyper,
27013
27014 .alloc_pud = xen_alloc_pmd_init,
27015 .release_pud = xen_release_pmd_init,
27016 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27017 index a96204a..fca9b8e 100644
27018 --- a/arch/x86/xen/smp.c
27019 +++ b/arch/x86/xen/smp.c
27020 @@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27021 {
27022 BUG_ON(smp_processor_id() != 0);
27023 native_smp_prepare_boot_cpu();
27024 -
27025 - /* We've switched to the "real" per-cpu gdt, so make sure the
27026 - old memory can be recycled */
27027 - make_lowmem_page_readwrite(xen_initial_gdt);
27028 -
27029 xen_setup_vcpu_info_placement();
27030 }
27031
27032 @@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27033 gdt = get_cpu_gdt_table(cpu);
27034
27035 ctxt->flags = VGCF_IN_KERNEL;
27036 - ctxt->user_regs.ds = __USER_DS;
27037 - ctxt->user_regs.es = __USER_DS;
27038 + ctxt->user_regs.ds = __KERNEL_DS;
27039 + ctxt->user_regs.es = __KERNEL_DS;
27040 ctxt->user_regs.ss = __KERNEL_DS;
27041 #ifdef CONFIG_X86_32
27042 ctxt->user_regs.fs = __KERNEL_PERCPU;
27043 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27044 + savesegment(gs, ctxt->user_regs.gs);
27045 #else
27046 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27047 #endif
27048 @@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27049 int rc;
27050
27051 per_cpu(current_task, cpu) = idle;
27052 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27053 #ifdef CONFIG_X86_32
27054 irq_ctx_init(cpu);
27055 #else
27056 clear_tsk_thread_flag(idle, TIF_FORK);
27057 - per_cpu(kernel_stack, cpu) =
27058 - (unsigned long)task_stack_page(idle) -
27059 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27060 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27061 #endif
27062 xen_setup_runstate_info(cpu);
27063 xen_setup_timer(cpu);
27064 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27065 index 9a95a9c..4f39e774 100644
27066 --- a/arch/x86/xen/xen-asm_32.S
27067 +++ b/arch/x86/xen/xen-asm_32.S
27068 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27069 ESP_OFFSET=4 # bytes pushed onto stack
27070
27071 /*
27072 - * Store vcpu_info pointer for easy access. Do it this way to
27073 - * avoid having to reload %fs
27074 + * Store vcpu_info pointer for easy access.
27075 */
27076 #ifdef CONFIG_SMP
27077 - GET_THREAD_INFO(%eax)
27078 - movl TI_cpu(%eax), %eax
27079 - movl __per_cpu_offset(,%eax,4), %eax
27080 - mov per_cpu__xen_vcpu(%eax), %eax
27081 + push %fs
27082 + mov $(__KERNEL_PERCPU), %eax
27083 + mov %eax, %fs
27084 + mov PER_CPU_VAR(xen_vcpu), %eax
27085 + pop %fs
27086 #else
27087 movl per_cpu__xen_vcpu, %eax
27088 #endif
27089 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27090 index 1a5ff24..a187d40 100644
27091 --- a/arch/x86/xen/xen-head.S
27092 +++ b/arch/x86/xen/xen-head.S
27093 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27094 #ifdef CONFIG_X86_32
27095 mov %esi,xen_start_info
27096 mov $init_thread_union+THREAD_SIZE,%esp
27097 +#ifdef CONFIG_SMP
27098 + movl $cpu_gdt_table,%edi
27099 + movl $__per_cpu_load,%eax
27100 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27101 + rorl $16,%eax
27102 + movb %al,__KERNEL_PERCPU + 4(%edi)
27103 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27104 + movl $__per_cpu_end - 1,%eax
27105 + subl $__per_cpu_start,%eax
27106 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27107 +#endif
27108 #else
27109 mov %rsi,xen_start_info
27110 mov $init_thread_union+THREAD_SIZE,%rsp
27111 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27112 index f9153a3..51eab3d 100644
27113 --- a/arch/x86/xen/xen-ops.h
27114 +++ b/arch/x86/xen/xen-ops.h
27115 @@ -10,8 +10,6 @@
27116 extern const char xen_hypervisor_callback[];
27117 extern const char xen_failsafe_callback[];
27118
27119 -extern void *xen_initial_gdt;
27120 -
27121 struct trap_info;
27122 void xen_copy_trap_info(struct trap_info *traps);
27123
27124 diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27125 index 15c6308..96e83c2 100644
27126 --- a/block/blk-integrity.c
27127 +++ b/block/blk-integrity.c
27128 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27129 NULL,
27130 };
27131
27132 -static struct sysfs_ops integrity_ops = {
27133 +static const struct sysfs_ops integrity_ops = {
27134 .show = &integrity_attr_show,
27135 .store = &integrity_attr_store,
27136 };
27137 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27138 index ca56420..f2fc409 100644
27139 --- a/block/blk-iopoll.c
27140 +++ b/block/blk-iopoll.c
27141 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27142 }
27143 EXPORT_SYMBOL(blk_iopoll_complete);
27144
27145 -static void blk_iopoll_softirq(struct softirq_action *h)
27146 +static void blk_iopoll_softirq(void)
27147 {
27148 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27149 int rearm = 0, budget = blk_iopoll_budget;
27150 diff --git a/block/blk-map.c b/block/blk-map.c
27151 index 30a7e51..0aeec6a 100644
27152 --- a/block/blk-map.c
27153 +++ b/block/blk-map.c
27154 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27155 * direct dma. else, set up kernel bounce buffers
27156 */
27157 uaddr = (unsigned long) ubuf;
27158 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
27159 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27160 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27161 else
27162 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27163 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27164 for (i = 0; i < iov_count; i++) {
27165 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27166
27167 + if (!iov[i].iov_len)
27168 + return -EINVAL;
27169 +
27170 if (uaddr & queue_dma_alignment(q)) {
27171 unaligned = 1;
27172 break;
27173 }
27174 - if (!iov[i].iov_len)
27175 - return -EINVAL;
27176 }
27177
27178 if (unaligned || (q->dma_pad_mask & len) || map_data)
27179 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27180 if (!len || !kbuf)
27181 return -EINVAL;
27182
27183 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27184 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27185 if (do_copy)
27186 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27187 else
27188 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27189 index ee9c216..58d410a 100644
27190 --- a/block/blk-softirq.c
27191 +++ b/block/blk-softirq.c
27192 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27193 * Softirq action handler - move entries to local list and loop over them
27194 * while passing them to the queue registered handler.
27195 */
27196 -static void blk_done_softirq(struct softirq_action *h)
27197 +static void blk_done_softirq(void)
27198 {
27199 struct list_head *cpu_list, local_list;
27200
27201 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27202 index bb9c5ea..5330d48 100644
27203 --- a/block/blk-sysfs.c
27204 +++ b/block/blk-sysfs.c
27205 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27206 kmem_cache_free(blk_requestq_cachep, q);
27207 }
27208
27209 -static struct sysfs_ops queue_sysfs_ops = {
27210 +static const struct sysfs_ops queue_sysfs_ops = {
27211 .show = queue_attr_show,
27212 .store = queue_attr_store,
27213 };
27214 diff --git a/block/bsg.c b/block/bsg.c
27215 index 7154a7a..08ac2f0 100644
27216 --- a/block/bsg.c
27217 +++ b/block/bsg.c
27218 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27219 struct sg_io_v4 *hdr, struct bsg_device *bd,
27220 fmode_t has_write_perm)
27221 {
27222 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27223 + unsigned char *cmdptr;
27224 +
27225 if (hdr->request_len > BLK_MAX_CDB) {
27226 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27227 if (!rq->cmd)
27228 return -ENOMEM;
27229 - }
27230 + cmdptr = rq->cmd;
27231 + } else
27232 + cmdptr = tmpcmd;
27233
27234 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27235 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27236 hdr->request_len))
27237 return -EFAULT;
27238
27239 + if (cmdptr != rq->cmd)
27240 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27241 +
27242 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27243 if (blk_verify_command(rq->cmd, has_write_perm))
27244 return -EPERM;
27245 @@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27246 rq->next_rq = next_rq;
27247 next_rq->cmd_type = rq->cmd_type;
27248
27249 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27250 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27251 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27252 hdr->din_xfer_len, GFP_KERNEL);
27253 if (ret)
27254 @@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27255
27256 if (hdr->dout_xfer_len) {
27257 dxfer_len = hdr->dout_xfer_len;
27258 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
27259 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27260 } else if (hdr->din_xfer_len) {
27261 dxfer_len = hdr->din_xfer_len;
27262 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27263 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27264 } else
27265 dxfer_len = 0;
27266
27267 @@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27268 int len = min_t(unsigned int, hdr->max_response_len,
27269 rq->sense_len);
27270
27271 - ret = copy_to_user((void*)(unsigned long)hdr->response,
27272 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27273 rq->sense, len);
27274 if (!ret)
27275 hdr->response_len = len;
27276 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27277 index 9bd086c..ca1fc22 100644
27278 --- a/block/compat_ioctl.c
27279 +++ b/block/compat_ioctl.c
27280 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27281 err |= __get_user(f->spec1, &uf->spec1);
27282 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27283 err |= __get_user(name, &uf->name);
27284 - f->name = compat_ptr(name);
27285 + f->name = (void __force_kernel *)compat_ptr(name);
27286 if (err) {
27287 err = -EFAULT;
27288 goto out;
27289 diff --git a/block/elevator.c b/block/elevator.c
27290 index a847046..75a1746 100644
27291 --- a/block/elevator.c
27292 +++ b/block/elevator.c
27293 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27294 return error;
27295 }
27296
27297 -static struct sysfs_ops elv_sysfs_ops = {
27298 +static const struct sysfs_ops elv_sysfs_ops = {
27299 .show = elv_attr_show,
27300 .store = elv_attr_store,
27301 };
27302 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27303 index 114ee29..d0efa50 100644
27304 --- a/block/scsi_ioctl.c
27305 +++ b/block/scsi_ioctl.c
27306 @@ -24,6 +24,7 @@
27307 #include <linux/capability.h>
27308 #include <linux/completion.h>
27309 #include <linux/cdrom.h>
27310 +#include <linux/ratelimit.h>
27311 #include <linux/slab.h>
27312 #include <linux/times.h>
27313 #include <asm/uaccess.h>
27314 @@ -220,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27315 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27316 struct sg_io_hdr *hdr, fmode_t mode)
27317 {
27318 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27319 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27320 + unsigned char *cmdptr;
27321 +
27322 + if (rq->cmd != rq->__cmd)
27323 + cmdptr = rq->cmd;
27324 + else
27325 + cmdptr = tmpcmd;
27326 +
27327 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27328 return -EFAULT;
27329 +
27330 + if (cmdptr != rq->cmd)
27331 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27332 +
27333 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27334 return -EPERM;
27335
27336 @@ -430,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27337 int err;
27338 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27339 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27340 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27341 + unsigned char *cmdptr;
27342
27343 if (!sic)
27344 return -EINVAL;
27345 @@ -463,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27346 */
27347 err = -EFAULT;
27348 rq->cmd_len = cmdlen;
27349 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27350 +
27351 + if (rq->cmd != rq->__cmd)
27352 + cmdptr = rq->cmd;
27353 + else
27354 + cmdptr = tmpcmd;
27355 +
27356 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27357 goto error;
27358
27359 + if (rq->cmd != cmdptr)
27360 + memcpy(rq->cmd, cmdptr, cmdlen);
27361 +
27362 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27363 goto error;
27364
27365 @@ -689,9 +713,54 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
27366 }
27367 EXPORT_SYMBOL(scsi_cmd_ioctl);
27368
27369 +int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
27370 +{
27371 + if (bd && bd == bd->bd_contains)
27372 + return 0;
27373 +
27374 + /* Actually none of these is particularly useful on a partition,
27375 + * but they are safe.
27376 + */
27377 + switch (cmd) {
27378 + case SCSI_IOCTL_GET_IDLUN:
27379 + case SCSI_IOCTL_GET_BUS_NUMBER:
27380 + case SCSI_IOCTL_GET_PCI:
27381 + case SCSI_IOCTL_PROBE_HOST:
27382 + case SG_GET_VERSION_NUM:
27383 + case SG_SET_TIMEOUT:
27384 + case SG_GET_TIMEOUT:
27385 + case SG_GET_RESERVED_SIZE:
27386 + case SG_SET_RESERVED_SIZE:
27387 + case SG_EMULATED_HOST:
27388 + return 0;
27389 + case CDROM_GET_CAPABILITY:
27390 + /* Keep this until we remove the printk below. udev sends it
27391 + * and we do not want to spam dmesg about it. CD-ROMs do
27392 + * not have partitions, so we get here only for disks.
27393 + */
27394 + return -ENOIOCTLCMD;
27395 + default:
27396 + break;
27397 + }
27398 +
27399 + /* In particular, rule out all resets and host-specific ioctls. */
27400 + if (printk_ratelimit())
27401 + printk(KERN_WARNING "%s: sending ioctl %x to a partition!\n",
27402 + current->comm, cmd);
27403 +
27404 + return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
27405 +}
27406 +EXPORT_SYMBOL(scsi_verify_blk_ioctl);
27407 +
27408 int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
27409 unsigned int cmd, void __user *arg)
27410 {
27411 + int ret;
27412 +
27413 + ret = scsi_verify_blk_ioctl(bd, cmd);
27414 + if (ret < 0)
27415 + return ret;
27416 +
27417 return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
27418 }
27419 EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
27420 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27421 index 3533582..f143117 100644
27422 --- a/crypto/cryptd.c
27423 +++ b/crypto/cryptd.c
27424 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27425
27426 struct cryptd_blkcipher_request_ctx {
27427 crypto_completion_t complete;
27428 -};
27429 +} __no_const;
27430
27431 struct cryptd_hash_ctx {
27432 struct crypto_shash *child;
27433 diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27434 index a90d260..7a9765e 100644
27435 --- a/crypto/gf128mul.c
27436 +++ b/crypto/gf128mul.c
27437 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27438 for (i = 0; i < 7; ++i)
27439 gf128mul_x_lle(&p[i + 1], &p[i]);
27440
27441 - memset(r, 0, sizeof(r));
27442 + memset(r, 0, sizeof(*r));
27443 for (i = 0;;) {
27444 u8 ch = ((u8 *)b)[15 - i];
27445
27446 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27447 for (i = 0; i < 7; ++i)
27448 gf128mul_x_bbe(&p[i + 1], &p[i]);
27449
27450 - memset(r, 0, sizeof(r));
27451 + memset(r, 0, sizeof(*r));
27452 for (i = 0;;) {
27453 u8 ch = ((u8 *)b)[i];
27454
27455 diff --git a/crypto/serpent.c b/crypto/serpent.c
27456 index b651a55..023297d 100644
27457 --- a/crypto/serpent.c
27458 +++ b/crypto/serpent.c
27459 @@ -21,6 +21,7 @@
27460 #include <asm/byteorder.h>
27461 #include <linux/crypto.h>
27462 #include <linux/types.h>
27463 +#include <linux/sched.h>
27464
27465 /* Key is padded to the maximum of 256 bits before round key generation.
27466 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27467 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27468 u32 r0,r1,r2,r3,r4;
27469 int i;
27470
27471 + pax_track_stack();
27472 +
27473 /* Copy key, add padding */
27474
27475 for (i = 0; i < keylen; ++i)
27476 diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
27477 index 9ed9f60..88f160b 100644
27478 --- a/crypto/sha512_generic.c
27479 +++ b/crypto/sha512_generic.c
27480 @@ -21,8 +21,6 @@
27481 #include <linux/percpu.h>
27482 #include <asm/byteorder.h>
27483
27484 -static DEFINE_PER_CPU(u64[80], msg_schedule);
27485 -
27486 static inline u64 Ch(u64 x, u64 y, u64 z)
27487 {
27488 return z ^ (x & (y ^ z));
27489 @@ -80,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
27490
27491 static inline void BLEND_OP(int I, u64 *W)
27492 {
27493 - W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
27494 + W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
27495 }
27496
27497 static void
27498 @@ -89,38 +87,48 @@ sha512_transform(u64 *state, const u8 *input)
27499 u64 a, b, c, d, e, f, g, h, t1, t2;
27500
27501 int i;
27502 - u64 *W = get_cpu_var(msg_schedule);
27503 + u64 W[16];
27504
27505 /* load the input */
27506 for (i = 0; i < 16; i++)
27507 LOAD_OP(i, W, input);
27508
27509 - for (i = 16; i < 80; i++) {
27510 - BLEND_OP(i, W);
27511 - }
27512 -
27513 /* load the state into our registers */
27514 a=state[0]; b=state[1]; c=state[2]; d=state[3];
27515 e=state[4]; f=state[5]; g=state[6]; h=state[7];
27516
27517 - /* now iterate */
27518 - for (i=0; i<80; i+=8) {
27519 - t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ];
27520 - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
27521 - t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1];
27522 - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
27523 - t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2];
27524 - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
27525 - t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3];
27526 - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
27527 - t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4];
27528 - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
27529 - t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
27530 - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
27531 - t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
27532 - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
27533 - t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
27534 - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
27535 +#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \
27536 + t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \
27537 + t2 = e0(a) + Maj(a, b, c); \
27538 + d += t1; \
27539 + h = t1 + t2
27540 +
27541 +#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \
27542 + BLEND_OP(i, W); \
27543 + t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \
27544 + t2 = e0(a) + Maj(a, b, c); \
27545 + d += t1; \
27546 + h = t1 + t2
27547 +
27548 + for (i = 0; i < 16; i += 8) {
27549 + SHA512_0_15(i, a, b, c, d, e, f, g, h);
27550 + SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
27551 + SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
27552 + SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
27553 + SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
27554 + SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
27555 + SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
27556 + SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
27557 + }
27558 + for (i = 16; i < 80; i += 8) {
27559 + SHA512_16_79(i, a, b, c, d, e, f, g, h);
27560 + SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
27561 + SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
27562 + SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
27563 + SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
27564 + SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
27565 + SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
27566 + SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
27567 }
27568
27569 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
27570 @@ -128,8 +136,6 @@ sha512_transform(u64 *state, const u8 *input)
27571
27572 /* erase our data */
27573 a = b = c = d = e = f = g = h = t1 = t2 = 0;
27574 - memset(W, 0, sizeof(__get_cpu_var(msg_schedule)));
27575 - put_cpu_var(msg_schedule);
27576 }
27577
27578 static int
27579 diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27580 index 0d2cdb8..d8de48d 100644
27581 --- a/drivers/acpi/acpi_pad.c
27582 +++ b/drivers/acpi/acpi_pad.c
27583 @@ -30,7 +30,7 @@
27584 #include <acpi/acpi_bus.h>
27585 #include <acpi/acpi_drivers.h>
27586
27587 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27588 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27589 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27590 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27591 static DEFINE_MUTEX(isolated_cpus_lock);
27592 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27593 index 3f4602b..2e41d36 100644
27594 --- a/drivers/acpi/battery.c
27595 +++ b/drivers/acpi/battery.c
27596 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27597 }
27598
27599 static struct battery_file {
27600 - struct file_operations ops;
27601 + const struct file_operations ops;
27602 mode_t mode;
27603 const char *name;
27604 } acpi_battery_file[] = {
27605 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27606 index 7338b6a..82f0257 100644
27607 --- a/drivers/acpi/dock.c
27608 +++ b/drivers/acpi/dock.c
27609 @@ -77,7 +77,7 @@ struct dock_dependent_device {
27610 struct list_head list;
27611 struct list_head hotplug_list;
27612 acpi_handle handle;
27613 - struct acpi_dock_ops *ops;
27614 + const struct acpi_dock_ops *ops;
27615 void *context;
27616 };
27617
27618 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27619 * the dock driver after _DCK is executed.
27620 */
27621 int
27622 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27623 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27624 void *context)
27625 {
27626 struct dock_dependent_device *dd;
27627 diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27628 index 7c1c59e..2993595 100644
27629 --- a/drivers/acpi/osl.c
27630 +++ b/drivers/acpi/osl.c
27631 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27632 void __iomem *virt_addr;
27633
27634 virt_addr = ioremap(phys_addr, width);
27635 + if (!virt_addr)
27636 + return AE_NO_MEMORY;
27637 if (!value)
27638 value = &dummy;
27639
27640 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27641 void __iomem *virt_addr;
27642
27643 virt_addr = ioremap(phys_addr, width);
27644 + if (!virt_addr)
27645 + return AE_NO_MEMORY;
27646
27647 switch (width) {
27648 case 8:
27649 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27650 index c216062..eec10d2 100644
27651 --- a/drivers/acpi/power_meter.c
27652 +++ b/drivers/acpi/power_meter.c
27653 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27654 return res;
27655
27656 temp /= 1000;
27657 - if (temp < 0)
27658 - return -EINVAL;
27659
27660 mutex_lock(&resource->lock);
27661 resource->trip[attr->index - 7] = temp;
27662 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27663 index d0d25e2..961643d 100644
27664 --- a/drivers/acpi/proc.c
27665 +++ b/drivers/acpi/proc.c
27666 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27667 size_t count, loff_t * ppos)
27668 {
27669 struct list_head *node, *next;
27670 - char strbuf[5];
27671 - char str[5] = "";
27672 - unsigned int len = count;
27673 + char strbuf[5] = {0};
27674 struct acpi_device *found_dev = NULL;
27675
27676 - if (len > 4)
27677 - len = 4;
27678 - if (len < 0)
27679 - return -EFAULT;
27680 + if (count > 4)
27681 + count = 4;
27682
27683 - if (copy_from_user(strbuf, buffer, len))
27684 + if (copy_from_user(strbuf, buffer, count))
27685 return -EFAULT;
27686 - strbuf[len] = '\0';
27687 - sscanf(strbuf, "%s", str);
27688 + strbuf[count] = '\0';
27689
27690 mutex_lock(&acpi_device_lock);
27691 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27692 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27693 if (!dev->wakeup.flags.valid)
27694 continue;
27695
27696 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27697 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27698 dev->wakeup.state.enabled =
27699 dev->wakeup.state.enabled ? 0 : 1;
27700 found_dev = dev;
27701 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27702 index 7102474..de8ad22 100644
27703 --- a/drivers/acpi/processor_core.c
27704 +++ b/drivers/acpi/processor_core.c
27705 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27706 return 0;
27707 }
27708
27709 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27710 + BUG_ON(pr->id >= nr_cpu_ids);
27711
27712 /*
27713 * Buggy BIOS check
27714 diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27715 index d933980..5761f13 100644
27716 --- a/drivers/acpi/sbshc.c
27717 +++ b/drivers/acpi/sbshc.c
27718 @@ -17,7 +17,7 @@
27719
27720 #define PREFIX "ACPI: "
27721
27722 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27723 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27724 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27725
27726 struct acpi_smb_hc {
27727 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27728 index 0458094..6978e7b 100644
27729 --- a/drivers/acpi/sleep.c
27730 +++ b/drivers/acpi/sleep.c
27731 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27732 }
27733 }
27734
27735 -static struct platform_suspend_ops acpi_suspend_ops = {
27736 +static const struct platform_suspend_ops acpi_suspend_ops = {
27737 .valid = acpi_suspend_state_valid,
27738 .begin = acpi_suspend_begin,
27739 .prepare_late = acpi_pm_prepare,
27740 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27741 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27742 * been requested.
27743 */
27744 -static struct platform_suspend_ops acpi_suspend_ops_old = {
27745 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
27746 .valid = acpi_suspend_state_valid,
27747 .begin = acpi_suspend_begin_old,
27748 .prepare_late = acpi_pm_disable_gpes,
27749 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27750 acpi_enable_all_runtime_gpes();
27751 }
27752
27753 -static struct platform_hibernation_ops acpi_hibernation_ops = {
27754 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
27755 .begin = acpi_hibernation_begin,
27756 .end = acpi_pm_end,
27757 .pre_snapshot = acpi_hibernation_pre_snapshot,
27758 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27759 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27760 * been requested.
27761 */
27762 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27763 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27764 .begin = acpi_hibernation_begin_old,
27765 .end = acpi_pm_end,
27766 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27767 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27768 index 05dff63..b662ab7 100644
27769 --- a/drivers/acpi/video.c
27770 +++ b/drivers/acpi/video.c
27771 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27772 vd->brightness->levels[request_level]);
27773 }
27774
27775 -static struct backlight_ops acpi_backlight_ops = {
27776 +static const struct backlight_ops acpi_backlight_ops = {
27777 .get_brightness = acpi_video_get_brightness,
27778 .update_status = acpi_video_set_brightness,
27779 };
27780 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27781 index 6787aab..23ffb0e 100644
27782 --- a/drivers/ata/ahci.c
27783 +++ b/drivers/ata/ahci.c
27784 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27785 .sdev_attrs = ahci_sdev_attrs,
27786 };
27787
27788 -static struct ata_port_operations ahci_ops = {
27789 +static const struct ata_port_operations ahci_ops = {
27790 .inherits = &sata_pmp_port_ops,
27791
27792 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27793 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27794 .port_stop = ahci_port_stop,
27795 };
27796
27797 -static struct ata_port_operations ahci_vt8251_ops = {
27798 +static const struct ata_port_operations ahci_vt8251_ops = {
27799 .inherits = &ahci_ops,
27800 .hardreset = ahci_vt8251_hardreset,
27801 };
27802
27803 -static struct ata_port_operations ahci_p5wdh_ops = {
27804 +static const struct ata_port_operations ahci_p5wdh_ops = {
27805 .inherits = &ahci_ops,
27806 .hardreset = ahci_p5wdh_hardreset,
27807 };
27808
27809 -static struct ata_port_operations ahci_sb600_ops = {
27810 +static const struct ata_port_operations ahci_sb600_ops = {
27811 .inherits = &ahci_ops,
27812 .softreset = ahci_sb600_softreset,
27813 .pmp_softreset = ahci_sb600_softreset,
27814 diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27815 index 99e7196..4968c77 100644
27816 --- a/drivers/ata/ata_generic.c
27817 +++ b/drivers/ata/ata_generic.c
27818 @@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27819 ATA_BMDMA_SHT(DRV_NAME),
27820 };
27821
27822 -static struct ata_port_operations generic_port_ops = {
27823 +static const struct ata_port_operations generic_port_ops = {
27824 .inherits = &ata_bmdma_port_ops,
27825 .cable_detect = ata_cable_unknown,
27826 .set_mode = generic_set_mode,
27827 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27828 index c33591d..000c121 100644
27829 --- a/drivers/ata/ata_piix.c
27830 +++ b/drivers/ata/ata_piix.c
27831 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27832 ATA_BMDMA_SHT(DRV_NAME),
27833 };
27834
27835 -static struct ata_port_operations piix_pata_ops = {
27836 +static const struct ata_port_operations piix_pata_ops = {
27837 .inherits = &ata_bmdma32_port_ops,
27838 .cable_detect = ata_cable_40wire,
27839 .set_piomode = piix_set_piomode,
27840 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27841 .prereset = piix_pata_prereset,
27842 };
27843
27844 -static struct ata_port_operations piix_vmw_ops = {
27845 +static const struct ata_port_operations piix_vmw_ops = {
27846 .inherits = &piix_pata_ops,
27847 .bmdma_status = piix_vmw_bmdma_status,
27848 };
27849
27850 -static struct ata_port_operations ich_pata_ops = {
27851 +static const struct ata_port_operations ich_pata_ops = {
27852 .inherits = &piix_pata_ops,
27853 .cable_detect = ich_pata_cable_detect,
27854 .set_dmamode = ich_set_dmamode,
27855 };
27856
27857 -static struct ata_port_operations piix_sata_ops = {
27858 +static const struct ata_port_operations piix_sata_ops = {
27859 .inherits = &ata_bmdma_port_ops,
27860 };
27861
27862 -static struct ata_port_operations piix_sidpr_sata_ops = {
27863 +static const struct ata_port_operations piix_sidpr_sata_ops = {
27864 .inherits = &piix_sata_ops,
27865 .hardreset = sata_std_hardreset,
27866 .scr_read = piix_sidpr_scr_read,
27867 diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27868 index b0882cd..c295d65 100644
27869 --- a/drivers/ata/libata-acpi.c
27870 +++ b/drivers/ata/libata-acpi.c
27871 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27872 ata_acpi_uevent(dev->link->ap, dev, event);
27873 }
27874
27875 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27876 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27877 .handler = ata_acpi_dev_notify_dock,
27878 .uevent = ata_acpi_dev_uevent,
27879 };
27880
27881 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27882 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27883 .handler = ata_acpi_ap_notify_dock,
27884 .uevent = ata_acpi_ap_uevent,
27885 };
27886 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27887 index d4f7f99..94f603e 100644
27888 --- a/drivers/ata/libata-core.c
27889 +++ b/drivers/ata/libata-core.c
27890 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27891 struct ata_port *ap;
27892 unsigned int tag;
27893
27894 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27895 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27896 ap = qc->ap;
27897
27898 qc->flags = 0;
27899 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27900 struct ata_port *ap;
27901 struct ata_link *link;
27902
27903 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27904 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27905 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27906 ap = qc->ap;
27907 link = qc->dev->link;
27908 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27909 * LOCKING:
27910 * None.
27911 */
27912 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
27913 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
27914 {
27915 static DEFINE_SPINLOCK(lock);
27916 const struct ata_port_operations *cur;
27917 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27918 return;
27919
27920 spin_lock(&lock);
27921 + pax_open_kernel();
27922
27923 for (cur = ops->inherits; cur; cur = cur->inherits) {
27924 void **inherit = (void **)cur;
27925 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27926 if (IS_ERR(*pp))
27927 *pp = NULL;
27928
27929 - ops->inherits = NULL;
27930 + *(struct ata_port_operations **)&ops->inherits = NULL;
27931
27932 + pax_close_kernel();
27933 spin_unlock(&lock);
27934 }
27935
27936 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
27937 */
27938 /* KILLME - the only user left is ipr */
27939 void ata_host_init(struct ata_host *host, struct device *dev,
27940 - unsigned long flags, struct ata_port_operations *ops)
27941 + unsigned long flags, const struct ata_port_operations *ops)
27942 {
27943 spin_lock_init(&host->lock);
27944 host->dev = dev;
27945 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
27946 /* truly dummy */
27947 }
27948
27949 -struct ata_port_operations ata_dummy_port_ops = {
27950 +const struct ata_port_operations ata_dummy_port_ops = {
27951 .qc_prep = ata_noop_qc_prep,
27952 .qc_issue = ata_dummy_qc_issue,
27953 .error_handler = ata_dummy_error_handler,
27954 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
27955 index e5bdb9b..45a8e72 100644
27956 --- a/drivers/ata/libata-eh.c
27957 +++ b/drivers/ata/libata-eh.c
27958 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
27959 {
27960 struct ata_link *link;
27961
27962 + pax_track_stack();
27963 +
27964 ata_for_each_link(link, ap, HOST_FIRST)
27965 ata_eh_link_report(link);
27966 }
27967 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
27968 */
27969 void ata_std_error_handler(struct ata_port *ap)
27970 {
27971 - struct ata_port_operations *ops = ap->ops;
27972 + const struct ata_port_operations *ops = ap->ops;
27973 ata_reset_fn_t hardreset = ops->hardreset;
27974
27975 /* ignore built-in hardreset if SCR access is not available */
27976 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
27977 index 51f0ffb..19ce3e3 100644
27978 --- a/drivers/ata/libata-pmp.c
27979 +++ b/drivers/ata/libata-pmp.c
27980 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
27981 */
27982 static int sata_pmp_eh_recover(struct ata_port *ap)
27983 {
27984 - struct ata_port_operations *ops = ap->ops;
27985 + const struct ata_port_operations *ops = ap->ops;
27986 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
27987 struct ata_link *pmp_link = &ap->link;
27988 struct ata_device *pmp_dev = pmp_link->device;
27989 diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
27990 index d8f35fe..288180a 100644
27991 --- a/drivers/ata/pata_acpi.c
27992 +++ b/drivers/ata/pata_acpi.c
27993 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
27994 ATA_BMDMA_SHT(DRV_NAME),
27995 };
27996
27997 -static struct ata_port_operations pacpi_ops = {
27998 +static const struct ata_port_operations pacpi_ops = {
27999 .inherits = &ata_bmdma_port_ops,
28000 .qc_issue = pacpi_qc_issue,
28001 .cable_detect = pacpi_cable_detect,
28002 diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
28003 index 9434114..1f2f364 100644
28004 --- a/drivers/ata/pata_ali.c
28005 +++ b/drivers/ata/pata_ali.c
28006 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
28007 * Port operations for PIO only ALi
28008 */
28009
28010 -static struct ata_port_operations ali_early_port_ops = {
28011 +static const struct ata_port_operations ali_early_port_ops = {
28012 .inherits = &ata_sff_port_ops,
28013 .cable_detect = ata_cable_40wire,
28014 .set_piomode = ali_set_piomode,
28015 @@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
28016 * Port operations for DMA capable ALi without cable
28017 * detect
28018 */
28019 -static struct ata_port_operations ali_20_port_ops = {
28020 +static const struct ata_port_operations ali_20_port_ops = {
28021 .inherits = &ali_dma_base_ops,
28022 .cable_detect = ata_cable_40wire,
28023 .mode_filter = ali_20_filter,
28024 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
28025 /*
28026 * Port operations for DMA capable ALi with cable detect
28027 */
28028 -static struct ata_port_operations ali_c2_port_ops = {
28029 +static const struct ata_port_operations ali_c2_port_ops = {
28030 .inherits = &ali_dma_base_ops,
28031 .check_atapi_dma = ali_check_atapi_dma,
28032 .cable_detect = ali_c2_cable_detect,
28033 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
28034 /*
28035 * Port operations for DMA capable ALi with cable detect
28036 */
28037 -static struct ata_port_operations ali_c4_port_ops = {
28038 +static const struct ata_port_operations ali_c4_port_ops = {
28039 .inherits = &ali_dma_base_ops,
28040 .check_atapi_dma = ali_check_atapi_dma,
28041 .cable_detect = ali_c2_cable_detect,
28042 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
28043 /*
28044 * Port operations for DMA capable ALi with cable detect and LBA48
28045 */
28046 -static struct ata_port_operations ali_c5_port_ops = {
28047 +static const struct ata_port_operations ali_c5_port_ops = {
28048 .inherits = &ali_dma_base_ops,
28049 .check_atapi_dma = ali_check_atapi_dma,
28050 .dev_config = ali_warn_atapi_dma,
28051 diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
28052 index 567f3f7..c8ee0da 100644
28053 --- a/drivers/ata/pata_amd.c
28054 +++ b/drivers/ata/pata_amd.c
28055 @@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
28056 .prereset = amd_pre_reset,
28057 };
28058
28059 -static struct ata_port_operations amd33_port_ops = {
28060 +static const struct ata_port_operations amd33_port_ops = {
28061 .inherits = &amd_base_port_ops,
28062 .cable_detect = ata_cable_40wire,
28063 .set_piomode = amd33_set_piomode,
28064 .set_dmamode = amd33_set_dmamode,
28065 };
28066
28067 -static struct ata_port_operations amd66_port_ops = {
28068 +static const struct ata_port_operations amd66_port_ops = {
28069 .inherits = &amd_base_port_ops,
28070 .cable_detect = ata_cable_unknown,
28071 .set_piomode = amd66_set_piomode,
28072 .set_dmamode = amd66_set_dmamode,
28073 };
28074
28075 -static struct ata_port_operations amd100_port_ops = {
28076 +static const struct ata_port_operations amd100_port_ops = {
28077 .inherits = &amd_base_port_ops,
28078 .cable_detect = ata_cable_unknown,
28079 .set_piomode = amd100_set_piomode,
28080 .set_dmamode = amd100_set_dmamode,
28081 };
28082
28083 -static struct ata_port_operations amd133_port_ops = {
28084 +static const struct ata_port_operations amd133_port_ops = {
28085 .inherits = &amd_base_port_ops,
28086 .cable_detect = amd_cable_detect,
28087 .set_piomode = amd133_set_piomode,
28088 @@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
28089 .host_stop = nv_host_stop,
28090 };
28091
28092 -static struct ata_port_operations nv100_port_ops = {
28093 +static const struct ata_port_operations nv100_port_ops = {
28094 .inherits = &nv_base_port_ops,
28095 .set_piomode = nv100_set_piomode,
28096 .set_dmamode = nv100_set_dmamode,
28097 };
28098
28099 -static struct ata_port_operations nv133_port_ops = {
28100 +static const struct ata_port_operations nv133_port_ops = {
28101 .inherits = &nv_base_port_ops,
28102 .set_piomode = nv133_set_piomode,
28103 .set_dmamode = nv133_set_dmamode,
28104 diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
28105 index d332cfd..4b7eaae 100644
28106 --- a/drivers/ata/pata_artop.c
28107 +++ b/drivers/ata/pata_artop.c
28108 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
28109 ATA_BMDMA_SHT(DRV_NAME),
28110 };
28111
28112 -static struct ata_port_operations artop6210_ops = {
28113 +static const struct ata_port_operations artop6210_ops = {
28114 .inherits = &ata_bmdma_port_ops,
28115 .cable_detect = ata_cable_40wire,
28116 .set_piomode = artop6210_set_piomode,
28117 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
28118 .qc_defer = artop6210_qc_defer,
28119 };
28120
28121 -static struct ata_port_operations artop6260_ops = {
28122 +static const struct ata_port_operations artop6260_ops = {
28123 .inherits = &ata_bmdma_port_ops,
28124 .cable_detect = artop6260_cable_detect,
28125 .set_piomode = artop6260_set_piomode,
28126 diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
28127 index 5c129f9..7bb7ccb 100644
28128 --- a/drivers/ata/pata_at32.c
28129 +++ b/drivers/ata/pata_at32.c
28130 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
28131 ATA_PIO_SHT(DRV_NAME),
28132 };
28133
28134 -static struct ata_port_operations at32_port_ops = {
28135 +static const struct ata_port_operations at32_port_ops = {
28136 .inherits = &ata_sff_port_ops,
28137 .cable_detect = ata_cable_40wire,
28138 .set_piomode = pata_at32_set_piomode,
28139 diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28140 index 41c94b1..829006d 100644
28141 --- a/drivers/ata/pata_at91.c
28142 +++ b/drivers/ata/pata_at91.c
28143 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28144 ATA_PIO_SHT(DRV_NAME),
28145 };
28146
28147 -static struct ata_port_operations pata_at91_port_ops = {
28148 +static const struct ata_port_operations pata_at91_port_ops = {
28149 .inherits = &ata_sff_port_ops,
28150
28151 .sff_data_xfer = pata_at91_data_xfer_noirq,
28152 diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28153 index ae4454d..d391eb4 100644
28154 --- a/drivers/ata/pata_atiixp.c
28155 +++ b/drivers/ata/pata_atiixp.c
28156 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28157 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28158 };
28159
28160 -static struct ata_port_operations atiixp_port_ops = {
28161 +static const struct ata_port_operations atiixp_port_ops = {
28162 .inherits = &ata_bmdma_port_ops,
28163
28164 .qc_prep = ata_sff_dumb_qc_prep,
28165 diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28166 index 6fe7ded..2a425dc 100644
28167 --- a/drivers/ata/pata_atp867x.c
28168 +++ b/drivers/ata/pata_atp867x.c
28169 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28170 ATA_BMDMA_SHT(DRV_NAME),
28171 };
28172
28173 -static struct ata_port_operations atp867x_ops = {
28174 +static const struct ata_port_operations atp867x_ops = {
28175 .inherits = &ata_bmdma_port_ops,
28176 .cable_detect = atp867x_cable_detect,
28177 .set_piomode = atp867x_set_piomode,
28178 diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28179 index c4b47a3..b27a367 100644
28180 --- a/drivers/ata/pata_bf54x.c
28181 +++ b/drivers/ata/pata_bf54x.c
28182 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28183 .dma_boundary = ATA_DMA_BOUNDARY,
28184 };
28185
28186 -static struct ata_port_operations bfin_pata_ops = {
28187 +static const struct ata_port_operations bfin_pata_ops = {
28188 .inherits = &ata_sff_port_ops,
28189
28190 .set_piomode = bfin_set_piomode,
28191 diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28192 index 5acf9fa..84248be 100644
28193 --- a/drivers/ata/pata_cmd640.c
28194 +++ b/drivers/ata/pata_cmd640.c
28195 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28196 ATA_BMDMA_SHT(DRV_NAME),
28197 };
28198
28199 -static struct ata_port_operations cmd640_port_ops = {
28200 +static const struct ata_port_operations cmd640_port_ops = {
28201 .inherits = &ata_bmdma_port_ops,
28202 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28203 .sff_data_xfer = ata_sff_data_xfer_noirq,
28204 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28205 index ccd2694..c869c3d 100644
28206 --- a/drivers/ata/pata_cmd64x.c
28207 +++ b/drivers/ata/pata_cmd64x.c
28208 @@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28209 .set_dmamode = cmd64x_set_dmamode,
28210 };
28211
28212 -static struct ata_port_operations cmd64x_port_ops = {
28213 +static const struct ata_port_operations cmd64x_port_ops = {
28214 .inherits = &cmd64x_base_ops,
28215 .cable_detect = ata_cable_40wire,
28216 };
28217
28218 -static struct ata_port_operations cmd646r1_port_ops = {
28219 +static const struct ata_port_operations cmd646r1_port_ops = {
28220 .inherits = &cmd64x_base_ops,
28221 .bmdma_stop = cmd646r1_bmdma_stop,
28222 .cable_detect = ata_cable_40wire,
28223 };
28224
28225 -static struct ata_port_operations cmd648_port_ops = {
28226 +static const struct ata_port_operations cmd648_port_ops = {
28227 .inherits = &cmd64x_base_ops,
28228 .bmdma_stop = cmd648_bmdma_stop,
28229 .cable_detect = cmd648_cable_detect,
28230 diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28231 index 0df83cf..d7595b0 100644
28232 --- a/drivers/ata/pata_cs5520.c
28233 +++ b/drivers/ata/pata_cs5520.c
28234 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28235 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28236 };
28237
28238 -static struct ata_port_operations cs5520_port_ops = {
28239 +static const struct ata_port_operations cs5520_port_ops = {
28240 .inherits = &ata_bmdma_port_ops,
28241 .qc_prep = ata_sff_dumb_qc_prep,
28242 .cable_detect = ata_cable_40wire,
28243 diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28244 index c974b05..6d26b11 100644
28245 --- a/drivers/ata/pata_cs5530.c
28246 +++ b/drivers/ata/pata_cs5530.c
28247 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28248 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28249 };
28250
28251 -static struct ata_port_operations cs5530_port_ops = {
28252 +static const struct ata_port_operations cs5530_port_ops = {
28253 .inherits = &ata_bmdma_port_ops,
28254
28255 .qc_prep = ata_sff_dumb_qc_prep,
28256 diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28257 index 403f561..aacd26b 100644
28258 --- a/drivers/ata/pata_cs5535.c
28259 +++ b/drivers/ata/pata_cs5535.c
28260 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28261 ATA_BMDMA_SHT(DRV_NAME),
28262 };
28263
28264 -static struct ata_port_operations cs5535_port_ops = {
28265 +static const struct ata_port_operations cs5535_port_ops = {
28266 .inherits = &ata_bmdma_port_ops,
28267 .cable_detect = cs5535_cable_detect,
28268 .set_piomode = cs5535_set_piomode,
28269 diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28270 index 6da4cb4..de24a25 100644
28271 --- a/drivers/ata/pata_cs5536.c
28272 +++ b/drivers/ata/pata_cs5536.c
28273 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28274 ATA_BMDMA_SHT(DRV_NAME),
28275 };
28276
28277 -static struct ata_port_operations cs5536_port_ops = {
28278 +static const struct ata_port_operations cs5536_port_ops = {
28279 .inherits = &ata_bmdma_port_ops,
28280 .cable_detect = cs5536_cable_detect,
28281 .set_piomode = cs5536_set_piomode,
28282 diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28283 index 8fb040b..b16a9c9 100644
28284 --- a/drivers/ata/pata_cypress.c
28285 +++ b/drivers/ata/pata_cypress.c
28286 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28287 ATA_BMDMA_SHT(DRV_NAME),
28288 };
28289
28290 -static struct ata_port_operations cy82c693_port_ops = {
28291 +static const struct ata_port_operations cy82c693_port_ops = {
28292 .inherits = &ata_bmdma_port_ops,
28293 .cable_detect = ata_cable_40wire,
28294 .set_piomode = cy82c693_set_piomode,
28295 diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28296 index 2a6412f..555ee11 100644
28297 --- a/drivers/ata/pata_efar.c
28298 +++ b/drivers/ata/pata_efar.c
28299 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28300 ATA_BMDMA_SHT(DRV_NAME),
28301 };
28302
28303 -static struct ata_port_operations efar_ops = {
28304 +static const struct ata_port_operations efar_ops = {
28305 .inherits = &ata_bmdma_port_ops,
28306 .cable_detect = efar_cable_detect,
28307 .set_piomode = efar_set_piomode,
28308 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28309 index b9d8836..0b92030 100644
28310 --- a/drivers/ata/pata_hpt366.c
28311 +++ b/drivers/ata/pata_hpt366.c
28312 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28313 * Configuration for HPT366/68
28314 */
28315
28316 -static struct ata_port_operations hpt366_port_ops = {
28317 +static const struct ata_port_operations hpt366_port_ops = {
28318 .inherits = &ata_bmdma_port_ops,
28319 .cable_detect = hpt36x_cable_detect,
28320 .mode_filter = hpt366_filter,
28321 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28322 index 5af7f19..00c4980 100644
28323 --- a/drivers/ata/pata_hpt37x.c
28324 +++ b/drivers/ata/pata_hpt37x.c
28325 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28326 * Configuration for HPT370
28327 */
28328
28329 -static struct ata_port_operations hpt370_port_ops = {
28330 +static const struct ata_port_operations hpt370_port_ops = {
28331 .inherits = &ata_bmdma_port_ops,
28332
28333 .bmdma_stop = hpt370_bmdma_stop,
28334 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28335 * Configuration for HPT370A. Close to 370 but less filters
28336 */
28337
28338 -static struct ata_port_operations hpt370a_port_ops = {
28339 +static const struct ata_port_operations hpt370a_port_ops = {
28340 .inherits = &hpt370_port_ops,
28341 .mode_filter = hpt370a_filter,
28342 };
28343 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28344 * and DMA mode setting functionality.
28345 */
28346
28347 -static struct ata_port_operations hpt372_port_ops = {
28348 +static const struct ata_port_operations hpt372_port_ops = {
28349 .inherits = &ata_bmdma_port_ops,
28350
28351 .bmdma_stop = hpt37x_bmdma_stop,
28352 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28353 * but we have a different cable detection procedure for function 1.
28354 */
28355
28356 -static struct ata_port_operations hpt374_fn1_port_ops = {
28357 +static const struct ata_port_operations hpt374_fn1_port_ops = {
28358 .inherits = &hpt372_port_ops,
28359 .prereset = hpt374_fn1_pre_reset,
28360 };
28361 diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28362 index 100f227..2e39382 100644
28363 --- a/drivers/ata/pata_hpt3x2n.c
28364 +++ b/drivers/ata/pata_hpt3x2n.c
28365 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28366 * Configuration for HPT3x2n.
28367 */
28368
28369 -static struct ata_port_operations hpt3x2n_port_ops = {
28370 +static const struct ata_port_operations hpt3x2n_port_ops = {
28371 .inherits = &ata_bmdma_port_ops,
28372
28373 .bmdma_stop = hpt3x2n_bmdma_stop,
28374 diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28375 index 7e31025..6fca8f4 100644
28376 --- a/drivers/ata/pata_hpt3x3.c
28377 +++ b/drivers/ata/pata_hpt3x3.c
28378 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28379 ATA_BMDMA_SHT(DRV_NAME),
28380 };
28381
28382 -static struct ata_port_operations hpt3x3_port_ops = {
28383 +static const struct ata_port_operations hpt3x3_port_ops = {
28384 .inherits = &ata_bmdma_port_ops,
28385 .cable_detect = ata_cable_40wire,
28386 .set_piomode = hpt3x3_set_piomode,
28387 diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28388 index b663b7f..9a26c2a 100644
28389 --- a/drivers/ata/pata_icside.c
28390 +++ b/drivers/ata/pata_icside.c
28391 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28392 }
28393 }
28394
28395 -static struct ata_port_operations pata_icside_port_ops = {
28396 +static const struct ata_port_operations pata_icside_port_ops = {
28397 .inherits = &ata_sff_port_ops,
28398 /* no need to build any PRD tables for DMA */
28399 .qc_prep = ata_noop_qc_prep,
28400 diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28401 index 4bceb88..457dfb6 100644
28402 --- a/drivers/ata/pata_isapnp.c
28403 +++ b/drivers/ata/pata_isapnp.c
28404 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28405 ATA_PIO_SHT(DRV_NAME),
28406 };
28407
28408 -static struct ata_port_operations isapnp_port_ops = {
28409 +static const struct ata_port_operations isapnp_port_ops = {
28410 .inherits = &ata_sff_port_ops,
28411 .cable_detect = ata_cable_40wire,
28412 };
28413
28414 -static struct ata_port_operations isapnp_noalt_port_ops = {
28415 +static const struct ata_port_operations isapnp_noalt_port_ops = {
28416 .inherits = &ata_sff_port_ops,
28417 .cable_detect = ata_cable_40wire,
28418 /* No altstatus so we don't want to use the lost interrupt poll */
28419 diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28420 index f156da8..24976e2 100644
28421 --- a/drivers/ata/pata_it8213.c
28422 +++ b/drivers/ata/pata_it8213.c
28423 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28424 };
28425
28426
28427 -static struct ata_port_operations it8213_ops = {
28428 +static const struct ata_port_operations it8213_ops = {
28429 .inherits = &ata_bmdma_port_ops,
28430 .cable_detect = it8213_cable_detect,
28431 .set_piomode = it8213_set_piomode,
28432 diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28433 index 188bc2f..ca9e785 100644
28434 --- a/drivers/ata/pata_it821x.c
28435 +++ b/drivers/ata/pata_it821x.c
28436 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28437 ATA_BMDMA_SHT(DRV_NAME),
28438 };
28439
28440 -static struct ata_port_operations it821x_smart_port_ops = {
28441 +static const struct ata_port_operations it821x_smart_port_ops = {
28442 .inherits = &ata_bmdma_port_ops,
28443
28444 .check_atapi_dma= it821x_check_atapi_dma,
28445 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28446 .port_start = it821x_port_start,
28447 };
28448
28449 -static struct ata_port_operations it821x_passthru_port_ops = {
28450 +static const struct ata_port_operations it821x_passthru_port_ops = {
28451 .inherits = &ata_bmdma_port_ops,
28452
28453 .check_atapi_dma= it821x_check_atapi_dma,
28454 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28455 .port_start = it821x_port_start,
28456 };
28457
28458 -static struct ata_port_operations it821x_rdc_port_ops = {
28459 +static const struct ata_port_operations it821x_rdc_port_ops = {
28460 .inherits = &ata_bmdma_port_ops,
28461
28462 .check_atapi_dma= it821x_check_atapi_dma,
28463 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28464 index ba54b08..4b952b7 100644
28465 --- a/drivers/ata/pata_ixp4xx_cf.c
28466 +++ b/drivers/ata/pata_ixp4xx_cf.c
28467 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28468 ATA_PIO_SHT(DRV_NAME),
28469 };
28470
28471 -static struct ata_port_operations ixp4xx_port_ops = {
28472 +static const struct ata_port_operations ixp4xx_port_ops = {
28473 .inherits = &ata_sff_port_ops,
28474 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28475 .cable_detect = ata_cable_40wire,
28476 diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28477 index 3a1474a..434b0ff 100644
28478 --- a/drivers/ata/pata_jmicron.c
28479 +++ b/drivers/ata/pata_jmicron.c
28480 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28481 ATA_BMDMA_SHT(DRV_NAME),
28482 };
28483
28484 -static struct ata_port_operations jmicron_ops = {
28485 +static const struct ata_port_operations jmicron_ops = {
28486 .inherits = &ata_bmdma_port_ops,
28487 .prereset = jmicron_pre_reset,
28488 };
28489 diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28490 index 6932e56..220e71d 100644
28491 --- a/drivers/ata/pata_legacy.c
28492 +++ b/drivers/ata/pata_legacy.c
28493 @@ -106,7 +106,7 @@ struct legacy_probe {
28494
28495 struct legacy_controller {
28496 const char *name;
28497 - struct ata_port_operations *ops;
28498 + const struct ata_port_operations *ops;
28499 unsigned int pio_mask;
28500 unsigned int flags;
28501 unsigned int pflags;
28502 @@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28503 * pio_mask as well.
28504 */
28505
28506 -static struct ata_port_operations simple_port_ops = {
28507 +static const struct ata_port_operations simple_port_ops = {
28508 .inherits = &legacy_base_port_ops,
28509 .sff_data_xfer = ata_sff_data_xfer_noirq,
28510 };
28511
28512 -static struct ata_port_operations legacy_port_ops = {
28513 +static const struct ata_port_operations legacy_port_ops = {
28514 .inherits = &legacy_base_port_ops,
28515 .sff_data_xfer = ata_sff_data_xfer_noirq,
28516 .set_mode = legacy_set_mode,
28517 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28518 return buflen;
28519 }
28520
28521 -static struct ata_port_operations pdc20230_port_ops = {
28522 +static const struct ata_port_operations pdc20230_port_ops = {
28523 .inherits = &legacy_base_port_ops,
28524 .set_piomode = pdc20230_set_piomode,
28525 .sff_data_xfer = pdc_data_xfer_vlb,
28526 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28527 ioread8(ap->ioaddr.status_addr);
28528 }
28529
28530 -static struct ata_port_operations ht6560a_port_ops = {
28531 +static const struct ata_port_operations ht6560a_port_ops = {
28532 .inherits = &legacy_base_port_ops,
28533 .set_piomode = ht6560a_set_piomode,
28534 };
28535 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28536 ioread8(ap->ioaddr.status_addr);
28537 }
28538
28539 -static struct ata_port_operations ht6560b_port_ops = {
28540 +static const struct ata_port_operations ht6560b_port_ops = {
28541 .inherits = &legacy_base_port_ops,
28542 .set_piomode = ht6560b_set_piomode,
28543 };
28544 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28545 }
28546
28547
28548 -static struct ata_port_operations opti82c611a_port_ops = {
28549 +static const struct ata_port_operations opti82c611a_port_ops = {
28550 .inherits = &legacy_base_port_ops,
28551 .set_piomode = opti82c611a_set_piomode,
28552 };
28553 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28554 return ata_sff_qc_issue(qc);
28555 }
28556
28557 -static struct ata_port_operations opti82c46x_port_ops = {
28558 +static const struct ata_port_operations opti82c46x_port_ops = {
28559 .inherits = &legacy_base_port_ops,
28560 .set_piomode = opti82c46x_set_piomode,
28561 .qc_issue = opti82c46x_qc_issue,
28562 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28563 return 0;
28564 }
28565
28566 -static struct ata_port_operations qdi6500_port_ops = {
28567 +static const struct ata_port_operations qdi6500_port_ops = {
28568 .inherits = &legacy_base_port_ops,
28569 .set_piomode = qdi6500_set_piomode,
28570 .qc_issue = qdi_qc_issue,
28571 .sff_data_xfer = vlb32_data_xfer,
28572 };
28573
28574 -static struct ata_port_operations qdi6580_port_ops = {
28575 +static const struct ata_port_operations qdi6580_port_ops = {
28576 .inherits = &legacy_base_port_ops,
28577 .set_piomode = qdi6580_set_piomode,
28578 .sff_data_xfer = vlb32_data_xfer,
28579 };
28580
28581 -static struct ata_port_operations qdi6580dp_port_ops = {
28582 +static const struct ata_port_operations qdi6580dp_port_ops = {
28583 .inherits = &legacy_base_port_ops,
28584 .set_piomode = qdi6580dp_set_piomode,
28585 .sff_data_xfer = vlb32_data_xfer,
28586 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28587 return 0;
28588 }
28589
28590 -static struct ata_port_operations winbond_port_ops = {
28591 +static const struct ata_port_operations winbond_port_ops = {
28592 .inherits = &legacy_base_port_ops,
28593 .set_piomode = winbond_set_piomode,
28594 .sff_data_xfer = vlb32_data_xfer,
28595 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28596 int pio_modes = controller->pio_mask;
28597 unsigned long io = probe->port;
28598 u32 mask = (1 << probe->slot);
28599 - struct ata_port_operations *ops = controller->ops;
28600 + const struct ata_port_operations *ops = controller->ops;
28601 struct legacy_data *ld = &legacy_data[probe->slot];
28602 struct ata_host *host = NULL;
28603 struct ata_port *ap;
28604 diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28605 index 2096fb7..4d090fc 100644
28606 --- a/drivers/ata/pata_marvell.c
28607 +++ b/drivers/ata/pata_marvell.c
28608 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28609 ATA_BMDMA_SHT(DRV_NAME),
28610 };
28611
28612 -static struct ata_port_operations marvell_ops = {
28613 +static const struct ata_port_operations marvell_ops = {
28614 .inherits = &ata_bmdma_port_ops,
28615 .cable_detect = marvell_cable_detect,
28616 .prereset = marvell_pre_reset,
28617 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28618 index 99d41be..7d56aa8 100644
28619 --- a/drivers/ata/pata_mpc52xx.c
28620 +++ b/drivers/ata/pata_mpc52xx.c
28621 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28622 ATA_PIO_SHT(DRV_NAME),
28623 };
28624
28625 -static struct ata_port_operations mpc52xx_ata_port_ops = {
28626 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
28627 .inherits = &ata_bmdma_port_ops,
28628 .sff_dev_select = mpc52xx_ata_dev_select,
28629 .set_piomode = mpc52xx_ata_set_piomode,
28630 diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28631 index b21f002..0a27e7f 100644
28632 --- a/drivers/ata/pata_mpiix.c
28633 +++ b/drivers/ata/pata_mpiix.c
28634 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28635 ATA_PIO_SHT(DRV_NAME),
28636 };
28637
28638 -static struct ata_port_operations mpiix_port_ops = {
28639 +static const struct ata_port_operations mpiix_port_ops = {
28640 .inherits = &ata_sff_port_ops,
28641 .qc_issue = mpiix_qc_issue,
28642 .cable_detect = ata_cable_40wire,
28643 diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28644 index f0d52f7..89c3be3 100644
28645 --- a/drivers/ata/pata_netcell.c
28646 +++ b/drivers/ata/pata_netcell.c
28647 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28648 ATA_BMDMA_SHT(DRV_NAME),
28649 };
28650
28651 -static struct ata_port_operations netcell_ops = {
28652 +static const struct ata_port_operations netcell_ops = {
28653 .inherits = &ata_bmdma_port_ops,
28654 .cable_detect = ata_cable_80wire,
28655 .read_id = netcell_read_id,
28656 diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28657 index dd53a66..a3f4317 100644
28658 --- a/drivers/ata/pata_ninja32.c
28659 +++ b/drivers/ata/pata_ninja32.c
28660 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28661 ATA_BMDMA_SHT(DRV_NAME),
28662 };
28663
28664 -static struct ata_port_operations ninja32_port_ops = {
28665 +static const struct ata_port_operations ninja32_port_ops = {
28666 .inherits = &ata_bmdma_port_ops,
28667 .sff_dev_select = ninja32_dev_select,
28668 .cable_detect = ata_cable_40wire,
28669 diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28670 index ca53fac..9aa93ef 100644
28671 --- a/drivers/ata/pata_ns87410.c
28672 +++ b/drivers/ata/pata_ns87410.c
28673 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28674 ATA_PIO_SHT(DRV_NAME),
28675 };
28676
28677 -static struct ata_port_operations ns87410_port_ops = {
28678 +static const struct ata_port_operations ns87410_port_ops = {
28679 .inherits = &ata_sff_port_ops,
28680 .qc_issue = ns87410_qc_issue,
28681 .cable_detect = ata_cable_40wire,
28682 diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28683 index 773b159..55f454e 100644
28684 --- a/drivers/ata/pata_ns87415.c
28685 +++ b/drivers/ata/pata_ns87415.c
28686 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28687 }
28688 #endif /* 87560 SuperIO Support */
28689
28690 -static struct ata_port_operations ns87415_pata_ops = {
28691 +static const struct ata_port_operations ns87415_pata_ops = {
28692 .inherits = &ata_bmdma_port_ops,
28693
28694 .check_atapi_dma = ns87415_check_atapi_dma,
28695 @@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28696 };
28697
28698 #if defined(CONFIG_SUPERIO)
28699 -static struct ata_port_operations ns87560_pata_ops = {
28700 +static const struct ata_port_operations ns87560_pata_ops = {
28701 .inherits = &ns87415_pata_ops,
28702 .sff_tf_read = ns87560_tf_read,
28703 .sff_check_status = ns87560_check_status,
28704 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28705 index d6f6956..639295b 100644
28706 --- a/drivers/ata/pata_octeon_cf.c
28707 +++ b/drivers/ata/pata_octeon_cf.c
28708 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28709 return 0;
28710 }
28711
28712 +/* cannot be const */
28713 static struct ata_port_operations octeon_cf_ops = {
28714 .inherits = &ata_sff_port_ops,
28715 .check_atapi_dma = octeon_cf_check_atapi_dma,
28716 diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28717 index 84ac503..adee1cd 100644
28718 --- a/drivers/ata/pata_oldpiix.c
28719 +++ b/drivers/ata/pata_oldpiix.c
28720 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28721 ATA_BMDMA_SHT(DRV_NAME),
28722 };
28723
28724 -static struct ata_port_operations oldpiix_pata_ops = {
28725 +static const struct ata_port_operations oldpiix_pata_ops = {
28726 .inherits = &ata_bmdma_port_ops,
28727 .qc_issue = oldpiix_qc_issue,
28728 .cable_detect = ata_cable_40wire,
28729 diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28730 index 99eddda..3a4c0aa 100644
28731 --- a/drivers/ata/pata_opti.c
28732 +++ b/drivers/ata/pata_opti.c
28733 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28734 ATA_PIO_SHT(DRV_NAME),
28735 };
28736
28737 -static struct ata_port_operations opti_port_ops = {
28738 +static const struct ata_port_operations opti_port_ops = {
28739 .inherits = &ata_sff_port_ops,
28740 .cable_detect = ata_cable_40wire,
28741 .set_piomode = opti_set_piomode,
28742 diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28743 index 86885a4..8e9968d 100644
28744 --- a/drivers/ata/pata_optidma.c
28745 +++ b/drivers/ata/pata_optidma.c
28746 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28747 ATA_BMDMA_SHT(DRV_NAME),
28748 };
28749
28750 -static struct ata_port_operations optidma_port_ops = {
28751 +static const struct ata_port_operations optidma_port_ops = {
28752 .inherits = &ata_bmdma_port_ops,
28753 .cable_detect = ata_cable_40wire,
28754 .set_piomode = optidma_set_pio_mode,
28755 @@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28756 .prereset = optidma_pre_reset,
28757 };
28758
28759 -static struct ata_port_operations optiplus_port_ops = {
28760 +static const struct ata_port_operations optiplus_port_ops = {
28761 .inherits = &optidma_port_ops,
28762 .set_piomode = optiplus_set_pio_mode,
28763 .set_dmamode = optiplus_set_dma_mode,
28764 diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28765 index 11fb4cc..1a14022 100644
28766 --- a/drivers/ata/pata_palmld.c
28767 +++ b/drivers/ata/pata_palmld.c
28768 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28769 ATA_PIO_SHT(DRV_NAME),
28770 };
28771
28772 -static struct ata_port_operations palmld_port_ops = {
28773 +static const struct ata_port_operations palmld_port_ops = {
28774 .inherits = &ata_sff_port_ops,
28775 .sff_data_xfer = ata_sff_data_xfer_noirq,
28776 .cable_detect = ata_cable_40wire,
28777 diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28778 index dc99e26..7f4b1e4 100644
28779 --- a/drivers/ata/pata_pcmcia.c
28780 +++ b/drivers/ata/pata_pcmcia.c
28781 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28782 ATA_PIO_SHT(DRV_NAME),
28783 };
28784
28785 -static struct ata_port_operations pcmcia_port_ops = {
28786 +static const struct ata_port_operations pcmcia_port_ops = {
28787 .inherits = &ata_sff_port_ops,
28788 .sff_data_xfer = ata_sff_data_xfer_noirq,
28789 .cable_detect = ata_cable_40wire,
28790 .set_mode = pcmcia_set_mode,
28791 };
28792
28793 -static struct ata_port_operations pcmcia_8bit_port_ops = {
28794 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
28795 .inherits = &ata_sff_port_ops,
28796 .sff_data_xfer = ata_data_xfer_8bit,
28797 .cable_detect = ata_cable_40wire,
28798 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28799 unsigned long io_base, ctl_base;
28800 void __iomem *io_addr, *ctl_addr;
28801 int n_ports = 1;
28802 - struct ata_port_operations *ops = &pcmcia_port_ops;
28803 + const struct ata_port_operations *ops = &pcmcia_port_ops;
28804
28805 info = kzalloc(sizeof(*info), GFP_KERNEL);
28806 if (info == NULL)
28807 diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28808 index ca5cad0..3a1f125 100644
28809 --- a/drivers/ata/pata_pdc2027x.c
28810 +++ b/drivers/ata/pata_pdc2027x.c
28811 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28812 ATA_BMDMA_SHT(DRV_NAME),
28813 };
28814
28815 -static struct ata_port_operations pdc2027x_pata100_ops = {
28816 +static const struct ata_port_operations pdc2027x_pata100_ops = {
28817 .inherits = &ata_bmdma_port_ops,
28818 .check_atapi_dma = pdc2027x_check_atapi_dma,
28819 .cable_detect = pdc2027x_cable_detect,
28820 .prereset = pdc2027x_prereset,
28821 };
28822
28823 -static struct ata_port_operations pdc2027x_pata133_ops = {
28824 +static const struct ata_port_operations pdc2027x_pata133_ops = {
28825 .inherits = &pdc2027x_pata100_ops,
28826 .mode_filter = pdc2027x_mode_filter,
28827 .set_piomode = pdc2027x_set_piomode,
28828 diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28829 index 2911120..4bf62aa 100644
28830 --- a/drivers/ata/pata_pdc202xx_old.c
28831 +++ b/drivers/ata/pata_pdc202xx_old.c
28832 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28833 ATA_BMDMA_SHT(DRV_NAME),
28834 };
28835
28836 -static struct ata_port_operations pdc2024x_port_ops = {
28837 +static const struct ata_port_operations pdc2024x_port_ops = {
28838 .inherits = &ata_bmdma_port_ops,
28839
28840 .cable_detect = ata_cable_40wire,
28841 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28842 .sff_exec_command = pdc202xx_exec_command,
28843 };
28844
28845 -static struct ata_port_operations pdc2026x_port_ops = {
28846 +static const struct ata_port_operations pdc2026x_port_ops = {
28847 .inherits = &pdc2024x_port_ops,
28848
28849 .check_atapi_dma = pdc2026x_check_atapi_dma,
28850 diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28851 index 3f6ebc6..a18c358 100644
28852 --- a/drivers/ata/pata_platform.c
28853 +++ b/drivers/ata/pata_platform.c
28854 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28855 ATA_PIO_SHT(DRV_NAME),
28856 };
28857
28858 -static struct ata_port_operations pata_platform_port_ops = {
28859 +static const struct ata_port_operations pata_platform_port_ops = {
28860 .inherits = &ata_sff_port_ops,
28861 .sff_data_xfer = ata_sff_data_xfer_noirq,
28862 .cable_detect = ata_cable_unknown,
28863 diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28864 index 45879dc..165a9f9 100644
28865 --- a/drivers/ata/pata_qdi.c
28866 +++ b/drivers/ata/pata_qdi.c
28867 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28868 ATA_PIO_SHT(DRV_NAME),
28869 };
28870
28871 -static struct ata_port_operations qdi6500_port_ops = {
28872 +static const struct ata_port_operations qdi6500_port_ops = {
28873 .inherits = &ata_sff_port_ops,
28874 .qc_issue = qdi_qc_issue,
28875 .sff_data_xfer = qdi_data_xfer,
28876 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28877 .set_piomode = qdi6500_set_piomode,
28878 };
28879
28880 -static struct ata_port_operations qdi6580_port_ops = {
28881 +static const struct ata_port_operations qdi6580_port_ops = {
28882 .inherits = &qdi6500_port_ops,
28883 .set_piomode = qdi6580_set_piomode,
28884 };
28885 diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28886 index 4401b33..716c5cc 100644
28887 --- a/drivers/ata/pata_radisys.c
28888 +++ b/drivers/ata/pata_radisys.c
28889 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28890 ATA_BMDMA_SHT(DRV_NAME),
28891 };
28892
28893 -static struct ata_port_operations radisys_pata_ops = {
28894 +static const struct ata_port_operations radisys_pata_ops = {
28895 .inherits = &ata_bmdma_port_ops,
28896 .qc_issue = radisys_qc_issue,
28897 .cable_detect = ata_cable_unknown,
28898 diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28899 index 45f1e10..fab6bca 100644
28900 --- a/drivers/ata/pata_rb532_cf.c
28901 +++ b/drivers/ata/pata_rb532_cf.c
28902 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28903 return IRQ_HANDLED;
28904 }
28905
28906 -static struct ata_port_operations rb532_pata_port_ops = {
28907 +static const struct ata_port_operations rb532_pata_port_ops = {
28908 .inherits = &ata_sff_port_ops,
28909 .sff_data_xfer = ata_sff_data_xfer32,
28910 };
28911 diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
28912 index c843a1e..b5853c3 100644
28913 --- a/drivers/ata/pata_rdc.c
28914 +++ b/drivers/ata/pata_rdc.c
28915 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
28916 pci_write_config_byte(dev, 0x48, udma_enable);
28917 }
28918
28919 -static struct ata_port_operations rdc_pata_ops = {
28920 +static const struct ata_port_operations rdc_pata_ops = {
28921 .inherits = &ata_bmdma32_port_ops,
28922 .cable_detect = rdc_pata_cable_detect,
28923 .set_piomode = rdc_set_piomode,
28924 diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
28925 index a5e4dfe..080c8c9 100644
28926 --- a/drivers/ata/pata_rz1000.c
28927 +++ b/drivers/ata/pata_rz1000.c
28928 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
28929 ATA_PIO_SHT(DRV_NAME),
28930 };
28931
28932 -static struct ata_port_operations rz1000_port_ops = {
28933 +static const struct ata_port_operations rz1000_port_ops = {
28934 .inherits = &ata_sff_port_ops,
28935 .cable_detect = ata_cable_40wire,
28936 .set_mode = rz1000_set_mode,
28937 diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
28938 index 3bbed83..e309daf 100644
28939 --- a/drivers/ata/pata_sc1200.c
28940 +++ b/drivers/ata/pata_sc1200.c
28941 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
28942 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28943 };
28944
28945 -static struct ata_port_operations sc1200_port_ops = {
28946 +static const struct ata_port_operations sc1200_port_ops = {
28947 .inherits = &ata_bmdma_port_ops,
28948 .qc_prep = ata_sff_dumb_qc_prep,
28949 .qc_issue = sc1200_qc_issue,
28950 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
28951 index 4257d6b..4c1d9d5 100644
28952 --- a/drivers/ata/pata_scc.c
28953 +++ b/drivers/ata/pata_scc.c
28954 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
28955 ATA_BMDMA_SHT(DRV_NAME),
28956 };
28957
28958 -static struct ata_port_operations scc_pata_ops = {
28959 +static const struct ata_port_operations scc_pata_ops = {
28960 .inherits = &ata_bmdma_port_ops,
28961
28962 .set_piomode = scc_set_piomode,
28963 diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
28964 index 99cceb4..e2e0a87 100644
28965 --- a/drivers/ata/pata_sch.c
28966 +++ b/drivers/ata/pata_sch.c
28967 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
28968 ATA_BMDMA_SHT(DRV_NAME),
28969 };
28970
28971 -static struct ata_port_operations sch_pata_ops = {
28972 +static const struct ata_port_operations sch_pata_ops = {
28973 .inherits = &ata_bmdma_port_ops,
28974 .cable_detect = ata_cable_unknown,
28975 .set_piomode = sch_set_piomode,
28976 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
28977 index beaed12..39969f1 100644
28978 --- a/drivers/ata/pata_serverworks.c
28979 +++ b/drivers/ata/pata_serverworks.c
28980 @@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
28981 ATA_BMDMA_SHT(DRV_NAME),
28982 };
28983
28984 -static struct ata_port_operations serverworks_osb4_port_ops = {
28985 +static const struct ata_port_operations serverworks_osb4_port_ops = {
28986 .inherits = &ata_bmdma_port_ops,
28987 .cable_detect = serverworks_cable_detect,
28988 .mode_filter = serverworks_osb4_filter,
28989 @@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
28990 .set_dmamode = serverworks_set_dmamode,
28991 };
28992
28993 -static struct ata_port_operations serverworks_csb_port_ops = {
28994 +static const struct ata_port_operations serverworks_csb_port_ops = {
28995 .inherits = &serverworks_osb4_port_ops,
28996 .mode_filter = serverworks_csb_filter,
28997 };
28998 diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
28999 index a2ace48..0463b44 100644
29000 --- a/drivers/ata/pata_sil680.c
29001 +++ b/drivers/ata/pata_sil680.c
29002 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
29003 ATA_BMDMA_SHT(DRV_NAME),
29004 };
29005
29006 -static struct ata_port_operations sil680_port_ops = {
29007 +static const struct ata_port_operations sil680_port_ops = {
29008 .inherits = &ata_bmdma32_port_ops,
29009 .cable_detect = sil680_cable_detect,
29010 .set_piomode = sil680_set_piomode,
29011 diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
29012 index 488e77b..b3724d5 100644
29013 --- a/drivers/ata/pata_sis.c
29014 +++ b/drivers/ata/pata_sis.c
29015 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
29016 ATA_BMDMA_SHT(DRV_NAME),
29017 };
29018
29019 -static struct ata_port_operations sis_133_for_sata_ops = {
29020 +static const struct ata_port_operations sis_133_for_sata_ops = {
29021 .inherits = &ata_bmdma_port_ops,
29022 .set_piomode = sis_133_set_piomode,
29023 .set_dmamode = sis_133_set_dmamode,
29024 .cable_detect = sis_133_cable_detect,
29025 };
29026
29027 -static struct ata_port_operations sis_base_ops = {
29028 +static const struct ata_port_operations sis_base_ops = {
29029 .inherits = &ata_bmdma_port_ops,
29030 .prereset = sis_pre_reset,
29031 };
29032
29033 -static struct ata_port_operations sis_133_ops = {
29034 +static const struct ata_port_operations sis_133_ops = {
29035 .inherits = &sis_base_ops,
29036 .set_piomode = sis_133_set_piomode,
29037 .set_dmamode = sis_133_set_dmamode,
29038 .cable_detect = sis_133_cable_detect,
29039 };
29040
29041 -static struct ata_port_operations sis_133_early_ops = {
29042 +static const struct ata_port_operations sis_133_early_ops = {
29043 .inherits = &sis_base_ops,
29044 .set_piomode = sis_100_set_piomode,
29045 .set_dmamode = sis_133_early_set_dmamode,
29046 .cable_detect = sis_66_cable_detect,
29047 };
29048
29049 -static struct ata_port_operations sis_100_ops = {
29050 +static const struct ata_port_operations sis_100_ops = {
29051 .inherits = &sis_base_ops,
29052 .set_piomode = sis_100_set_piomode,
29053 .set_dmamode = sis_100_set_dmamode,
29054 .cable_detect = sis_66_cable_detect,
29055 };
29056
29057 -static struct ata_port_operations sis_66_ops = {
29058 +static const struct ata_port_operations sis_66_ops = {
29059 .inherits = &sis_base_ops,
29060 .set_piomode = sis_old_set_piomode,
29061 .set_dmamode = sis_66_set_dmamode,
29062 .cable_detect = sis_66_cable_detect,
29063 };
29064
29065 -static struct ata_port_operations sis_old_ops = {
29066 +static const struct ata_port_operations sis_old_ops = {
29067 .inherits = &sis_base_ops,
29068 .set_piomode = sis_old_set_piomode,
29069 .set_dmamode = sis_old_set_dmamode,
29070 diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
29071 index 29f733c..43e9ca0 100644
29072 --- a/drivers/ata/pata_sl82c105.c
29073 +++ b/drivers/ata/pata_sl82c105.c
29074 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
29075 ATA_BMDMA_SHT(DRV_NAME),
29076 };
29077
29078 -static struct ata_port_operations sl82c105_port_ops = {
29079 +static const struct ata_port_operations sl82c105_port_ops = {
29080 .inherits = &ata_bmdma_port_ops,
29081 .qc_defer = sl82c105_qc_defer,
29082 .bmdma_start = sl82c105_bmdma_start,
29083 diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
29084 index f1f13ff..df39e99 100644
29085 --- a/drivers/ata/pata_triflex.c
29086 +++ b/drivers/ata/pata_triflex.c
29087 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
29088 ATA_BMDMA_SHT(DRV_NAME),
29089 };
29090
29091 -static struct ata_port_operations triflex_port_ops = {
29092 +static const struct ata_port_operations triflex_port_ops = {
29093 .inherits = &ata_bmdma_port_ops,
29094 .bmdma_start = triflex_bmdma_start,
29095 .bmdma_stop = triflex_bmdma_stop,
29096 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
29097 index 1d73b8d..98a4b29 100644
29098 --- a/drivers/ata/pata_via.c
29099 +++ b/drivers/ata/pata_via.c
29100 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
29101 ATA_BMDMA_SHT(DRV_NAME),
29102 };
29103
29104 -static struct ata_port_operations via_port_ops = {
29105 +static const struct ata_port_operations via_port_ops = {
29106 .inherits = &ata_bmdma_port_ops,
29107 .cable_detect = via_cable_detect,
29108 .set_piomode = via_set_piomode,
29109 @@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
29110 .port_start = via_port_start,
29111 };
29112
29113 -static struct ata_port_operations via_port_ops_noirq = {
29114 +static const struct ata_port_operations via_port_ops_noirq = {
29115 .inherits = &via_port_ops,
29116 .sff_data_xfer = ata_sff_data_xfer_noirq,
29117 };
29118 diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
29119 index 6d8619b..ad511c4 100644
29120 --- a/drivers/ata/pata_winbond.c
29121 +++ b/drivers/ata/pata_winbond.c
29122 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
29123 ATA_PIO_SHT(DRV_NAME),
29124 };
29125
29126 -static struct ata_port_operations winbond_port_ops = {
29127 +static const struct ata_port_operations winbond_port_ops = {
29128 .inherits = &ata_sff_port_ops,
29129 .sff_data_xfer = winbond_data_xfer,
29130 .cable_detect = ata_cable_40wire,
29131 diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
29132 index 6c65b07..f996ec7 100644
29133 --- a/drivers/ata/pdc_adma.c
29134 +++ b/drivers/ata/pdc_adma.c
29135 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
29136 .dma_boundary = ADMA_DMA_BOUNDARY,
29137 };
29138
29139 -static struct ata_port_operations adma_ata_ops = {
29140 +static const struct ata_port_operations adma_ata_ops = {
29141 .inherits = &ata_sff_port_ops,
29142
29143 .lost_interrupt = ATA_OP_NULL,
29144 diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29145 index 172b57e..c49bc1e 100644
29146 --- a/drivers/ata/sata_fsl.c
29147 +++ b/drivers/ata/sata_fsl.c
29148 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29149 .dma_boundary = ATA_DMA_BOUNDARY,
29150 };
29151
29152 -static struct ata_port_operations sata_fsl_ops = {
29153 +static const struct ata_port_operations sata_fsl_ops = {
29154 .inherits = &sata_pmp_port_ops,
29155
29156 .qc_defer = ata_std_qc_defer,
29157 diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29158 index 4406902..60603ef 100644
29159 --- a/drivers/ata/sata_inic162x.c
29160 +++ b/drivers/ata/sata_inic162x.c
29161 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29162 return 0;
29163 }
29164
29165 -static struct ata_port_operations inic_port_ops = {
29166 +static const struct ata_port_operations inic_port_ops = {
29167 .inherits = &sata_port_ops,
29168
29169 .check_atapi_dma = inic_check_atapi_dma,
29170 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29171 index cf41126..8107be6 100644
29172 --- a/drivers/ata/sata_mv.c
29173 +++ b/drivers/ata/sata_mv.c
29174 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29175 .dma_boundary = MV_DMA_BOUNDARY,
29176 };
29177
29178 -static struct ata_port_operations mv5_ops = {
29179 +static const struct ata_port_operations mv5_ops = {
29180 .inherits = &ata_sff_port_ops,
29181
29182 .lost_interrupt = ATA_OP_NULL,
29183 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29184 .port_stop = mv_port_stop,
29185 };
29186
29187 -static struct ata_port_operations mv6_ops = {
29188 +static const struct ata_port_operations mv6_ops = {
29189 .inherits = &mv5_ops,
29190 .dev_config = mv6_dev_config,
29191 .scr_read = mv_scr_read,
29192 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29193 .bmdma_status = mv_bmdma_status,
29194 };
29195
29196 -static struct ata_port_operations mv_iie_ops = {
29197 +static const struct ata_port_operations mv_iie_ops = {
29198 .inherits = &mv6_ops,
29199 .dev_config = ATA_OP_NULL,
29200 .qc_prep = mv_qc_prep_iie,
29201 diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29202 index ae2297c..d5c9c33 100644
29203 --- a/drivers/ata/sata_nv.c
29204 +++ b/drivers/ata/sata_nv.c
29205 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29206 * cases. Define nv_hardreset() which only kicks in for post-boot
29207 * probing and use it for all variants.
29208 */
29209 -static struct ata_port_operations nv_generic_ops = {
29210 +static const struct ata_port_operations nv_generic_ops = {
29211 .inherits = &ata_bmdma_port_ops,
29212 .lost_interrupt = ATA_OP_NULL,
29213 .scr_read = nv_scr_read,
29214 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29215 .hardreset = nv_hardreset,
29216 };
29217
29218 -static struct ata_port_operations nv_nf2_ops = {
29219 +static const struct ata_port_operations nv_nf2_ops = {
29220 .inherits = &nv_generic_ops,
29221 .freeze = nv_nf2_freeze,
29222 .thaw = nv_nf2_thaw,
29223 };
29224
29225 -static struct ata_port_operations nv_ck804_ops = {
29226 +static const struct ata_port_operations nv_ck804_ops = {
29227 .inherits = &nv_generic_ops,
29228 .freeze = nv_ck804_freeze,
29229 .thaw = nv_ck804_thaw,
29230 .host_stop = nv_ck804_host_stop,
29231 };
29232
29233 -static struct ata_port_operations nv_adma_ops = {
29234 +static const struct ata_port_operations nv_adma_ops = {
29235 .inherits = &nv_ck804_ops,
29236
29237 .check_atapi_dma = nv_adma_check_atapi_dma,
29238 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29239 .host_stop = nv_adma_host_stop,
29240 };
29241
29242 -static struct ata_port_operations nv_swncq_ops = {
29243 +static const struct ata_port_operations nv_swncq_ops = {
29244 .inherits = &nv_generic_ops,
29245
29246 .qc_defer = ata_std_qc_defer,
29247 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29248 index 07d8d00..6cc70bb 100644
29249 --- a/drivers/ata/sata_promise.c
29250 +++ b/drivers/ata/sata_promise.c
29251 @@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29252 .error_handler = pdc_error_handler,
29253 };
29254
29255 -static struct ata_port_operations pdc_sata_ops = {
29256 +static const struct ata_port_operations pdc_sata_ops = {
29257 .inherits = &pdc_common_ops,
29258 .cable_detect = pdc_sata_cable_detect,
29259 .freeze = pdc_sata_freeze,
29260 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29261
29262 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29263 and ->freeze/thaw that ignore the hotplug controls. */
29264 -static struct ata_port_operations pdc_old_sata_ops = {
29265 +static const struct ata_port_operations pdc_old_sata_ops = {
29266 .inherits = &pdc_sata_ops,
29267 .freeze = pdc_freeze,
29268 .thaw = pdc_thaw,
29269 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29270 };
29271
29272 -static struct ata_port_operations pdc_pata_ops = {
29273 +static const struct ata_port_operations pdc_pata_ops = {
29274 .inherits = &pdc_common_ops,
29275 .cable_detect = pdc_pata_cable_detect,
29276 .freeze = pdc_freeze,
29277 diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29278 index 326c0cf..36ecebe 100644
29279 --- a/drivers/ata/sata_qstor.c
29280 +++ b/drivers/ata/sata_qstor.c
29281 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29282 .dma_boundary = QS_DMA_BOUNDARY,
29283 };
29284
29285 -static struct ata_port_operations qs_ata_ops = {
29286 +static const struct ata_port_operations qs_ata_ops = {
29287 .inherits = &ata_sff_port_ops,
29288
29289 .check_atapi_dma = qs_check_atapi_dma,
29290 diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29291 index 3cb69d5..0871d3c 100644
29292 --- a/drivers/ata/sata_sil.c
29293 +++ b/drivers/ata/sata_sil.c
29294 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29295 .sg_tablesize = ATA_MAX_PRD
29296 };
29297
29298 -static struct ata_port_operations sil_ops = {
29299 +static const struct ata_port_operations sil_ops = {
29300 .inherits = &ata_bmdma32_port_ops,
29301 .dev_config = sil_dev_config,
29302 .set_mode = sil_set_mode,
29303 diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29304 index e6946fc..eddb794 100644
29305 --- a/drivers/ata/sata_sil24.c
29306 +++ b/drivers/ata/sata_sil24.c
29307 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29308 .dma_boundary = ATA_DMA_BOUNDARY,
29309 };
29310
29311 -static struct ata_port_operations sil24_ops = {
29312 +static const struct ata_port_operations sil24_ops = {
29313 .inherits = &sata_pmp_port_ops,
29314
29315 .qc_defer = sil24_qc_defer,
29316 diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29317 index f8a91bf..9cb06b6 100644
29318 --- a/drivers/ata/sata_sis.c
29319 +++ b/drivers/ata/sata_sis.c
29320 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29321 ATA_BMDMA_SHT(DRV_NAME),
29322 };
29323
29324 -static struct ata_port_operations sis_ops = {
29325 +static const struct ata_port_operations sis_ops = {
29326 .inherits = &ata_bmdma_port_ops,
29327 .scr_read = sis_scr_read,
29328 .scr_write = sis_scr_write,
29329 diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29330 index 7257f2d..d04c6f5 100644
29331 --- a/drivers/ata/sata_svw.c
29332 +++ b/drivers/ata/sata_svw.c
29333 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29334 };
29335
29336
29337 -static struct ata_port_operations k2_sata_ops = {
29338 +static const struct ata_port_operations k2_sata_ops = {
29339 .inherits = &ata_bmdma_port_ops,
29340 .sff_tf_load = k2_sata_tf_load,
29341 .sff_tf_read = k2_sata_tf_read,
29342 diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29343 index bbcf970..cd0df0d 100644
29344 --- a/drivers/ata/sata_sx4.c
29345 +++ b/drivers/ata/sata_sx4.c
29346 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29347 };
29348
29349 /* TODO: inherit from base port_ops after converting to new EH */
29350 -static struct ata_port_operations pdc_20621_ops = {
29351 +static const struct ata_port_operations pdc_20621_ops = {
29352 .inherits = &ata_sff_port_ops,
29353
29354 .check_atapi_dma = pdc_check_atapi_dma,
29355 diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29356 index e5bff47..089d859 100644
29357 --- a/drivers/ata/sata_uli.c
29358 +++ b/drivers/ata/sata_uli.c
29359 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29360 ATA_BMDMA_SHT(DRV_NAME),
29361 };
29362
29363 -static struct ata_port_operations uli_ops = {
29364 +static const struct ata_port_operations uli_ops = {
29365 .inherits = &ata_bmdma_port_ops,
29366 .scr_read = uli_scr_read,
29367 .scr_write = uli_scr_write,
29368 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29369 index f5dcca7..77b94eb 100644
29370 --- a/drivers/ata/sata_via.c
29371 +++ b/drivers/ata/sata_via.c
29372 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29373 ATA_BMDMA_SHT(DRV_NAME),
29374 };
29375
29376 -static struct ata_port_operations svia_base_ops = {
29377 +static const struct ata_port_operations svia_base_ops = {
29378 .inherits = &ata_bmdma_port_ops,
29379 .sff_tf_load = svia_tf_load,
29380 };
29381
29382 -static struct ata_port_operations vt6420_sata_ops = {
29383 +static const struct ata_port_operations vt6420_sata_ops = {
29384 .inherits = &svia_base_ops,
29385 .freeze = svia_noop_freeze,
29386 .prereset = vt6420_prereset,
29387 .bmdma_start = vt6420_bmdma_start,
29388 };
29389
29390 -static struct ata_port_operations vt6421_pata_ops = {
29391 +static const struct ata_port_operations vt6421_pata_ops = {
29392 .inherits = &svia_base_ops,
29393 .cable_detect = vt6421_pata_cable_detect,
29394 .set_piomode = vt6421_set_pio_mode,
29395 .set_dmamode = vt6421_set_dma_mode,
29396 };
29397
29398 -static struct ata_port_operations vt6421_sata_ops = {
29399 +static const struct ata_port_operations vt6421_sata_ops = {
29400 .inherits = &svia_base_ops,
29401 .scr_read = svia_scr_read,
29402 .scr_write = svia_scr_write,
29403 };
29404
29405 -static struct ata_port_operations vt8251_ops = {
29406 +static const struct ata_port_operations vt8251_ops = {
29407 .inherits = &svia_base_ops,
29408 .hardreset = sata_std_hardreset,
29409 .scr_read = vt8251_scr_read,
29410 diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29411 index 8b2a278..51e65d3 100644
29412 --- a/drivers/ata/sata_vsc.c
29413 +++ b/drivers/ata/sata_vsc.c
29414 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29415 };
29416
29417
29418 -static struct ata_port_operations vsc_sata_ops = {
29419 +static const struct ata_port_operations vsc_sata_ops = {
29420 .inherits = &ata_bmdma_port_ops,
29421 /* The IRQ handling is not quite standard SFF behaviour so we
29422 cannot use the default lost interrupt handler */
29423 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29424 index 5effec6..7e4019a 100644
29425 --- a/drivers/atm/adummy.c
29426 +++ b/drivers/atm/adummy.c
29427 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29428 vcc->pop(vcc, skb);
29429 else
29430 dev_kfree_skb_any(skb);
29431 - atomic_inc(&vcc->stats->tx);
29432 + atomic_inc_unchecked(&vcc->stats->tx);
29433
29434 return 0;
29435 }
29436 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29437 index 66e1813..26a27c6 100644
29438 --- a/drivers/atm/ambassador.c
29439 +++ b/drivers/atm/ambassador.c
29440 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29441 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29442
29443 // VC layer stats
29444 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29445 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29446
29447 // free the descriptor
29448 kfree (tx_descr);
29449 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29450 dump_skb ("<<<", vc, skb);
29451
29452 // VC layer stats
29453 - atomic_inc(&atm_vcc->stats->rx);
29454 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29455 __net_timestamp(skb);
29456 // end of our responsability
29457 atm_vcc->push (atm_vcc, skb);
29458 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29459 } else {
29460 PRINTK (KERN_INFO, "dropped over-size frame");
29461 // should we count this?
29462 - atomic_inc(&atm_vcc->stats->rx_drop);
29463 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29464 }
29465
29466 } else {
29467 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29468 }
29469
29470 if (check_area (skb->data, skb->len)) {
29471 - atomic_inc(&atm_vcc->stats->tx_err);
29472 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29473 return -ENOMEM; // ?
29474 }
29475
29476 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29477 index 02ad83d..6daffeb 100644
29478 --- a/drivers/atm/atmtcp.c
29479 +++ b/drivers/atm/atmtcp.c
29480 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29481 if (vcc->pop) vcc->pop(vcc,skb);
29482 else dev_kfree_skb(skb);
29483 if (dev_data) return 0;
29484 - atomic_inc(&vcc->stats->tx_err);
29485 + atomic_inc_unchecked(&vcc->stats->tx_err);
29486 return -ENOLINK;
29487 }
29488 size = skb->len+sizeof(struct atmtcp_hdr);
29489 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29490 if (!new_skb) {
29491 if (vcc->pop) vcc->pop(vcc,skb);
29492 else dev_kfree_skb(skb);
29493 - atomic_inc(&vcc->stats->tx_err);
29494 + atomic_inc_unchecked(&vcc->stats->tx_err);
29495 return -ENOBUFS;
29496 }
29497 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29498 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29499 if (vcc->pop) vcc->pop(vcc,skb);
29500 else dev_kfree_skb(skb);
29501 out_vcc->push(out_vcc,new_skb);
29502 - atomic_inc(&vcc->stats->tx);
29503 - atomic_inc(&out_vcc->stats->rx);
29504 + atomic_inc_unchecked(&vcc->stats->tx);
29505 + atomic_inc_unchecked(&out_vcc->stats->rx);
29506 return 0;
29507 }
29508
29509 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29510 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29511 read_unlock(&vcc_sklist_lock);
29512 if (!out_vcc) {
29513 - atomic_inc(&vcc->stats->tx_err);
29514 + atomic_inc_unchecked(&vcc->stats->tx_err);
29515 goto done;
29516 }
29517 skb_pull(skb,sizeof(struct atmtcp_hdr));
29518 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29519 __net_timestamp(new_skb);
29520 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29521 out_vcc->push(out_vcc,new_skb);
29522 - atomic_inc(&vcc->stats->tx);
29523 - atomic_inc(&out_vcc->stats->rx);
29524 + atomic_inc_unchecked(&vcc->stats->tx);
29525 + atomic_inc_unchecked(&out_vcc->stats->rx);
29526 done:
29527 if (vcc->pop) vcc->pop(vcc,skb);
29528 else dev_kfree_skb(skb);
29529 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29530 index 0c30261..3da356e 100644
29531 --- a/drivers/atm/eni.c
29532 +++ b/drivers/atm/eni.c
29533 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29534 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29535 vcc->dev->number);
29536 length = 0;
29537 - atomic_inc(&vcc->stats->rx_err);
29538 + atomic_inc_unchecked(&vcc->stats->rx_err);
29539 }
29540 else {
29541 length = ATM_CELL_SIZE-1; /* no HEC */
29542 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29543 size);
29544 }
29545 eff = length = 0;
29546 - atomic_inc(&vcc->stats->rx_err);
29547 + atomic_inc_unchecked(&vcc->stats->rx_err);
29548 }
29549 else {
29550 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29551 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29552 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29553 vcc->dev->number,vcc->vci,length,size << 2,descr);
29554 length = eff = 0;
29555 - atomic_inc(&vcc->stats->rx_err);
29556 + atomic_inc_unchecked(&vcc->stats->rx_err);
29557 }
29558 }
29559 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29560 @@ -770,7 +770,7 @@ rx_dequeued++;
29561 vcc->push(vcc,skb);
29562 pushed++;
29563 }
29564 - atomic_inc(&vcc->stats->rx);
29565 + atomic_inc_unchecked(&vcc->stats->rx);
29566 }
29567 wake_up(&eni_dev->rx_wait);
29568 }
29569 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29570 PCI_DMA_TODEVICE);
29571 if (vcc->pop) vcc->pop(vcc,skb);
29572 else dev_kfree_skb_irq(skb);
29573 - atomic_inc(&vcc->stats->tx);
29574 + atomic_inc_unchecked(&vcc->stats->tx);
29575 wake_up(&eni_dev->tx_wait);
29576 dma_complete++;
29577 }
29578 @@ -1570,7 +1570,7 @@ tx_complete++;
29579 /*--------------------------------- entries ---------------------------------*/
29580
29581
29582 -static const char *media_name[] __devinitdata = {
29583 +static const char *media_name[] __devinitconst = {
29584 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29585 "UTP", "05?", "06?", "07?", /* 4- 7 */
29586 "TAXI","09?", "10?", "11?", /* 8-11 */
29587 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29588 index cd5049a..a51209f 100644
29589 --- a/drivers/atm/firestream.c
29590 +++ b/drivers/atm/firestream.c
29591 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29592 }
29593 }
29594
29595 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29596 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29597
29598 fs_dprintk (FS_DEBUG_TXMEM, "i");
29599 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29600 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29601 #endif
29602 skb_put (skb, qe->p1 & 0xffff);
29603 ATM_SKB(skb)->vcc = atm_vcc;
29604 - atomic_inc(&atm_vcc->stats->rx);
29605 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29606 __net_timestamp(skb);
29607 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29608 atm_vcc->push (atm_vcc, skb);
29609 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29610 kfree (pe);
29611 }
29612 if (atm_vcc)
29613 - atomic_inc(&atm_vcc->stats->rx_drop);
29614 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29615 break;
29616 case 0x1f: /* Reassembly abort: no buffers. */
29617 /* Silently increment error counter. */
29618 if (atm_vcc)
29619 - atomic_inc(&atm_vcc->stats->rx_drop);
29620 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29621 break;
29622 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29623 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29624 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29625 index f766cc4..a34002e 100644
29626 --- a/drivers/atm/fore200e.c
29627 +++ b/drivers/atm/fore200e.c
29628 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29629 #endif
29630 /* check error condition */
29631 if (*entry->status & STATUS_ERROR)
29632 - atomic_inc(&vcc->stats->tx_err);
29633 + atomic_inc_unchecked(&vcc->stats->tx_err);
29634 else
29635 - atomic_inc(&vcc->stats->tx);
29636 + atomic_inc_unchecked(&vcc->stats->tx);
29637 }
29638 }
29639
29640 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29641 if (skb == NULL) {
29642 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29643
29644 - atomic_inc(&vcc->stats->rx_drop);
29645 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29646 return -ENOMEM;
29647 }
29648
29649 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29650
29651 dev_kfree_skb_any(skb);
29652
29653 - atomic_inc(&vcc->stats->rx_drop);
29654 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29655 return -ENOMEM;
29656 }
29657
29658 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29659
29660 vcc->push(vcc, skb);
29661 - atomic_inc(&vcc->stats->rx);
29662 + atomic_inc_unchecked(&vcc->stats->rx);
29663
29664 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29665
29666 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29667 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29668 fore200e->atm_dev->number,
29669 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29670 - atomic_inc(&vcc->stats->rx_err);
29671 + atomic_inc_unchecked(&vcc->stats->rx_err);
29672 }
29673 }
29674
29675 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29676 goto retry_here;
29677 }
29678
29679 - atomic_inc(&vcc->stats->tx_err);
29680 + atomic_inc_unchecked(&vcc->stats->tx_err);
29681
29682 fore200e->tx_sat++;
29683 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29684 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29685 index 7066703..2b130de 100644
29686 --- a/drivers/atm/he.c
29687 +++ b/drivers/atm/he.c
29688 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29689
29690 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29691 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29692 - atomic_inc(&vcc->stats->rx_drop);
29693 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29694 goto return_host_buffers;
29695 }
29696
29697 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29698 RBRQ_LEN_ERR(he_dev->rbrq_head)
29699 ? "LEN_ERR" : "",
29700 vcc->vpi, vcc->vci);
29701 - atomic_inc(&vcc->stats->rx_err);
29702 + atomic_inc_unchecked(&vcc->stats->rx_err);
29703 goto return_host_buffers;
29704 }
29705
29706 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29707 vcc->push(vcc, skb);
29708 spin_lock(&he_dev->global_lock);
29709
29710 - atomic_inc(&vcc->stats->rx);
29711 + atomic_inc_unchecked(&vcc->stats->rx);
29712
29713 return_host_buffers:
29714 ++pdus_assembled;
29715 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29716 tpd->vcc->pop(tpd->vcc, tpd->skb);
29717 else
29718 dev_kfree_skb_any(tpd->skb);
29719 - atomic_inc(&tpd->vcc->stats->tx_err);
29720 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29721 }
29722 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29723 return;
29724 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29725 vcc->pop(vcc, skb);
29726 else
29727 dev_kfree_skb_any(skb);
29728 - atomic_inc(&vcc->stats->tx_err);
29729 + atomic_inc_unchecked(&vcc->stats->tx_err);
29730 return -EINVAL;
29731 }
29732
29733 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29734 vcc->pop(vcc, skb);
29735 else
29736 dev_kfree_skb_any(skb);
29737 - atomic_inc(&vcc->stats->tx_err);
29738 + atomic_inc_unchecked(&vcc->stats->tx_err);
29739 return -EINVAL;
29740 }
29741 #endif
29742 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29743 vcc->pop(vcc, skb);
29744 else
29745 dev_kfree_skb_any(skb);
29746 - atomic_inc(&vcc->stats->tx_err);
29747 + atomic_inc_unchecked(&vcc->stats->tx_err);
29748 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29749 return -ENOMEM;
29750 }
29751 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29752 vcc->pop(vcc, skb);
29753 else
29754 dev_kfree_skb_any(skb);
29755 - atomic_inc(&vcc->stats->tx_err);
29756 + atomic_inc_unchecked(&vcc->stats->tx_err);
29757 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29758 return -ENOMEM;
29759 }
29760 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29761 __enqueue_tpd(he_dev, tpd, cid);
29762 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29763
29764 - atomic_inc(&vcc->stats->tx);
29765 + atomic_inc_unchecked(&vcc->stats->tx);
29766
29767 return 0;
29768 }
29769 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29770 index 4e49021..01b1512 100644
29771 --- a/drivers/atm/horizon.c
29772 +++ b/drivers/atm/horizon.c
29773 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29774 {
29775 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29776 // VC layer stats
29777 - atomic_inc(&vcc->stats->rx);
29778 + atomic_inc_unchecked(&vcc->stats->rx);
29779 __net_timestamp(skb);
29780 // end of our responsability
29781 vcc->push (vcc, skb);
29782 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29783 dev->tx_iovec = NULL;
29784
29785 // VC layer stats
29786 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29787 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29788
29789 // free the skb
29790 hrz_kfree_skb (skb);
29791 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29792 index e33ae00..9deb4ab 100644
29793 --- a/drivers/atm/idt77252.c
29794 +++ b/drivers/atm/idt77252.c
29795 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29796 else
29797 dev_kfree_skb(skb);
29798
29799 - atomic_inc(&vcc->stats->tx);
29800 + atomic_inc_unchecked(&vcc->stats->tx);
29801 }
29802
29803 atomic_dec(&scq->used);
29804 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29805 if ((sb = dev_alloc_skb(64)) == NULL) {
29806 printk("%s: Can't allocate buffers for aal0.\n",
29807 card->name);
29808 - atomic_add(i, &vcc->stats->rx_drop);
29809 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
29810 break;
29811 }
29812 if (!atm_charge(vcc, sb->truesize)) {
29813 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29814 card->name);
29815 - atomic_add(i - 1, &vcc->stats->rx_drop);
29816 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29817 dev_kfree_skb(sb);
29818 break;
29819 }
29820 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29821 ATM_SKB(sb)->vcc = vcc;
29822 __net_timestamp(sb);
29823 vcc->push(vcc, sb);
29824 - atomic_inc(&vcc->stats->rx);
29825 + atomic_inc_unchecked(&vcc->stats->rx);
29826
29827 cell += ATM_CELL_PAYLOAD;
29828 }
29829 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29830 "(CDC: %08x)\n",
29831 card->name, len, rpp->len, readl(SAR_REG_CDC));
29832 recycle_rx_pool_skb(card, rpp);
29833 - atomic_inc(&vcc->stats->rx_err);
29834 + atomic_inc_unchecked(&vcc->stats->rx_err);
29835 return;
29836 }
29837 if (stat & SAR_RSQE_CRC) {
29838 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29839 recycle_rx_pool_skb(card, rpp);
29840 - atomic_inc(&vcc->stats->rx_err);
29841 + atomic_inc_unchecked(&vcc->stats->rx_err);
29842 return;
29843 }
29844 if (skb_queue_len(&rpp->queue) > 1) {
29845 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29846 RXPRINTK("%s: Can't alloc RX skb.\n",
29847 card->name);
29848 recycle_rx_pool_skb(card, rpp);
29849 - atomic_inc(&vcc->stats->rx_err);
29850 + atomic_inc_unchecked(&vcc->stats->rx_err);
29851 return;
29852 }
29853 if (!atm_charge(vcc, skb->truesize)) {
29854 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29855 __net_timestamp(skb);
29856
29857 vcc->push(vcc, skb);
29858 - atomic_inc(&vcc->stats->rx);
29859 + atomic_inc_unchecked(&vcc->stats->rx);
29860
29861 return;
29862 }
29863 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29864 __net_timestamp(skb);
29865
29866 vcc->push(vcc, skb);
29867 - atomic_inc(&vcc->stats->rx);
29868 + atomic_inc_unchecked(&vcc->stats->rx);
29869
29870 if (skb->truesize > SAR_FB_SIZE_3)
29871 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29872 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29873 if (vcc->qos.aal != ATM_AAL0) {
29874 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29875 card->name, vpi, vci);
29876 - atomic_inc(&vcc->stats->rx_drop);
29877 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29878 goto drop;
29879 }
29880
29881 if ((sb = dev_alloc_skb(64)) == NULL) {
29882 printk("%s: Can't allocate buffers for AAL0.\n",
29883 card->name);
29884 - atomic_inc(&vcc->stats->rx_err);
29885 + atomic_inc_unchecked(&vcc->stats->rx_err);
29886 goto drop;
29887 }
29888
29889 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29890 ATM_SKB(sb)->vcc = vcc;
29891 __net_timestamp(sb);
29892 vcc->push(vcc, sb);
29893 - atomic_inc(&vcc->stats->rx);
29894 + atomic_inc_unchecked(&vcc->stats->rx);
29895
29896 drop:
29897 skb_pull(queue, 64);
29898 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29899
29900 if (vc == NULL) {
29901 printk("%s: NULL connection in send().\n", card->name);
29902 - atomic_inc(&vcc->stats->tx_err);
29903 + atomic_inc_unchecked(&vcc->stats->tx_err);
29904 dev_kfree_skb(skb);
29905 return -EINVAL;
29906 }
29907 if (!test_bit(VCF_TX, &vc->flags)) {
29908 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29909 - atomic_inc(&vcc->stats->tx_err);
29910 + atomic_inc_unchecked(&vcc->stats->tx_err);
29911 dev_kfree_skb(skb);
29912 return -EINVAL;
29913 }
29914 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29915 break;
29916 default:
29917 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29918 - atomic_inc(&vcc->stats->tx_err);
29919 + atomic_inc_unchecked(&vcc->stats->tx_err);
29920 dev_kfree_skb(skb);
29921 return -EINVAL;
29922 }
29923
29924 if (skb_shinfo(skb)->nr_frags != 0) {
29925 printk("%s: No scatter-gather yet.\n", card->name);
29926 - atomic_inc(&vcc->stats->tx_err);
29927 + atomic_inc_unchecked(&vcc->stats->tx_err);
29928 dev_kfree_skb(skb);
29929 return -EINVAL;
29930 }
29931 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29932
29933 err = queue_skb(card, vc, skb, oam);
29934 if (err) {
29935 - atomic_inc(&vcc->stats->tx_err);
29936 + atomic_inc_unchecked(&vcc->stats->tx_err);
29937 dev_kfree_skb(skb);
29938 return err;
29939 }
29940 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
29941 skb = dev_alloc_skb(64);
29942 if (!skb) {
29943 printk("%s: Out of memory in send_oam().\n", card->name);
29944 - atomic_inc(&vcc->stats->tx_err);
29945 + atomic_inc_unchecked(&vcc->stats->tx_err);
29946 return -ENOMEM;
29947 }
29948 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
29949 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
29950 index b2c1b37..faa672b 100644
29951 --- a/drivers/atm/iphase.c
29952 +++ b/drivers/atm/iphase.c
29953 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
29954 status = (u_short) (buf_desc_ptr->desc_mode);
29955 if (status & (RX_CER | RX_PTE | RX_OFL))
29956 {
29957 - atomic_inc(&vcc->stats->rx_err);
29958 + atomic_inc_unchecked(&vcc->stats->rx_err);
29959 IF_ERR(printk("IA: bad packet, dropping it");)
29960 if (status & RX_CER) {
29961 IF_ERR(printk(" cause: packet CRC error\n");)
29962 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
29963 len = dma_addr - buf_addr;
29964 if (len > iadev->rx_buf_sz) {
29965 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
29966 - atomic_inc(&vcc->stats->rx_err);
29967 + atomic_inc_unchecked(&vcc->stats->rx_err);
29968 goto out_free_desc;
29969 }
29970
29971 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29972 ia_vcc = INPH_IA_VCC(vcc);
29973 if (ia_vcc == NULL)
29974 {
29975 - atomic_inc(&vcc->stats->rx_err);
29976 + atomic_inc_unchecked(&vcc->stats->rx_err);
29977 dev_kfree_skb_any(skb);
29978 atm_return(vcc, atm_guess_pdu2truesize(len));
29979 goto INCR_DLE;
29980 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29981 if ((length > iadev->rx_buf_sz) || (length >
29982 (skb->len - sizeof(struct cpcs_trailer))))
29983 {
29984 - atomic_inc(&vcc->stats->rx_err);
29985 + atomic_inc_unchecked(&vcc->stats->rx_err);
29986 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
29987 length, skb->len);)
29988 dev_kfree_skb_any(skb);
29989 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29990
29991 IF_RX(printk("rx_dle_intr: skb push");)
29992 vcc->push(vcc,skb);
29993 - atomic_inc(&vcc->stats->rx);
29994 + atomic_inc_unchecked(&vcc->stats->rx);
29995 iadev->rx_pkt_cnt++;
29996 }
29997 INCR_DLE:
29998 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
29999 {
30000 struct k_sonet_stats *stats;
30001 stats = &PRIV(_ia_dev[board])->sonet_stats;
30002 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30003 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30004 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30005 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30006 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30007 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30008 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30009 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30010 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30011 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30012 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30013 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30014 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30015 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30016 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30017 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30018 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30019 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30020 }
30021 ia_cmds.status = 0;
30022 break;
30023 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30024 if ((desc == 0) || (desc > iadev->num_tx_desc))
30025 {
30026 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30027 - atomic_inc(&vcc->stats->tx);
30028 + atomic_inc_unchecked(&vcc->stats->tx);
30029 if (vcc->pop)
30030 vcc->pop(vcc, skb);
30031 else
30032 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30033 ATM_DESC(skb) = vcc->vci;
30034 skb_queue_tail(&iadev->tx_dma_q, skb);
30035
30036 - atomic_inc(&vcc->stats->tx);
30037 + atomic_inc_unchecked(&vcc->stats->tx);
30038 iadev->tx_pkt_cnt++;
30039 /* Increment transaction counter */
30040 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30041
30042 #if 0
30043 /* add flow control logic */
30044 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30045 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30046 if (iavcc->vc_desc_cnt > 10) {
30047 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30048 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30049 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30050 index cf97c34..8d30655 100644
30051 --- a/drivers/atm/lanai.c
30052 +++ b/drivers/atm/lanai.c
30053 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30054 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30055 lanai_endtx(lanai, lvcc);
30056 lanai_free_skb(lvcc->tx.atmvcc, skb);
30057 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30058 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30059 }
30060
30061 /* Try to fill the buffer - don't call unless there is backlog */
30062 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30063 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30064 __net_timestamp(skb);
30065 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30066 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30067 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30068 out:
30069 lvcc->rx.buf.ptr = end;
30070 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30071 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30072 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30073 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30074 lanai->stats.service_rxnotaal5++;
30075 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30076 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30077 return 0;
30078 }
30079 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30080 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30081 int bytes;
30082 read_unlock(&vcc_sklist_lock);
30083 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30084 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30085 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30086 lvcc->stats.x.aal5.service_trash++;
30087 bytes = (SERVICE_GET_END(s) * 16) -
30088 (((unsigned long) lvcc->rx.buf.ptr) -
30089 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30090 }
30091 if (s & SERVICE_STREAM) {
30092 read_unlock(&vcc_sklist_lock);
30093 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30094 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30095 lvcc->stats.x.aal5.service_stream++;
30096 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30097 "PDU on VCI %d!\n", lanai->number, vci);
30098 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30099 return 0;
30100 }
30101 DPRINTK("got rx crc error on vci %d\n", vci);
30102 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30103 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30104 lvcc->stats.x.aal5.service_rxcrc++;
30105 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30106 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30107 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30108 index 3da804b..d3b0eed 100644
30109 --- a/drivers/atm/nicstar.c
30110 +++ b/drivers/atm/nicstar.c
30111 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30112 if ((vc = (vc_map *) vcc->dev_data) == NULL)
30113 {
30114 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
30115 - atomic_inc(&vcc->stats->tx_err);
30116 + atomic_inc_unchecked(&vcc->stats->tx_err);
30117 dev_kfree_skb_any(skb);
30118 return -EINVAL;
30119 }
30120 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30121 if (!vc->tx)
30122 {
30123 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
30124 - atomic_inc(&vcc->stats->tx_err);
30125 + atomic_inc_unchecked(&vcc->stats->tx_err);
30126 dev_kfree_skb_any(skb);
30127 return -EINVAL;
30128 }
30129 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30130 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
30131 {
30132 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
30133 - atomic_inc(&vcc->stats->tx_err);
30134 + atomic_inc_unchecked(&vcc->stats->tx_err);
30135 dev_kfree_skb_any(skb);
30136 return -EINVAL;
30137 }
30138 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30139 if (skb_shinfo(skb)->nr_frags != 0)
30140 {
30141 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30142 - atomic_inc(&vcc->stats->tx_err);
30143 + atomic_inc_unchecked(&vcc->stats->tx_err);
30144 dev_kfree_skb_any(skb);
30145 return -EINVAL;
30146 }
30147 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30148
30149 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30150 {
30151 - atomic_inc(&vcc->stats->tx_err);
30152 + atomic_inc_unchecked(&vcc->stats->tx_err);
30153 dev_kfree_skb_any(skb);
30154 return -EIO;
30155 }
30156 - atomic_inc(&vcc->stats->tx);
30157 + atomic_inc_unchecked(&vcc->stats->tx);
30158
30159 return 0;
30160 }
30161 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30162 {
30163 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30164 card->index);
30165 - atomic_add(i,&vcc->stats->rx_drop);
30166 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
30167 break;
30168 }
30169 if (!atm_charge(vcc, sb->truesize))
30170 {
30171 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30172 card->index);
30173 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30174 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30175 dev_kfree_skb_any(sb);
30176 break;
30177 }
30178 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30179 ATM_SKB(sb)->vcc = vcc;
30180 __net_timestamp(sb);
30181 vcc->push(vcc, sb);
30182 - atomic_inc(&vcc->stats->rx);
30183 + atomic_inc_unchecked(&vcc->stats->rx);
30184 cell += ATM_CELL_PAYLOAD;
30185 }
30186
30187 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30188 if (iovb == NULL)
30189 {
30190 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30191 - atomic_inc(&vcc->stats->rx_drop);
30192 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30193 recycle_rx_buf(card, skb);
30194 return;
30195 }
30196 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30197 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30198 {
30199 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30200 - atomic_inc(&vcc->stats->rx_err);
30201 + atomic_inc_unchecked(&vcc->stats->rx_err);
30202 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30203 NS_SKB(iovb)->iovcnt = 0;
30204 iovb->len = 0;
30205 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30206 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30207 card->index);
30208 which_list(card, skb);
30209 - atomic_inc(&vcc->stats->rx_err);
30210 + atomic_inc_unchecked(&vcc->stats->rx_err);
30211 recycle_rx_buf(card, skb);
30212 vc->rx_iov = NULL;
30213 recycle_iov_buf(card, iovb);
30214 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30215 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30216 card->index);
30217 which_list(card, skb);
30218 - atomic_inc(&vcc->stats->rx_err);
30219 + atomic_inc_unchecked(&vcc->stats->rx_err);
30220 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30221 NS_SKB(iovb)->iovcnt);
30222 vc->rx_iov = NULL;
30223 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30224 printk(" - PDU size mismatch.\n");
30225 else
30226 printk(".\n");
30227 - atomic_inc(&vcc->stats->rx_err);
30228 + atomic_inc_unchecked(&vcc->stats->rx_err);
30229 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30230 NS_SKB(iovb)->iovcnt);
30231 vc->rx_iov = NULL;
30232 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30233 if (!atm_charge(vcc, skb->truesize))
30234 {
30235 push_rxbufs(card, skb);
30236 - atomic_inc(&vcc->stats->rx_drop);
30237 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30238 }
30239 else
30240 {
30241 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30242 ATM_SKB(skb)->vcc = vcc;
30243 __net_timestamp(skb);
30244 vcc->push(vcc, skb);
30245 - atomic_inc(&vcc->stats->rx);
30246 + atomic_inc_unchecked(&vcc->stats->rx);
30247 }
30248 }
30249 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30250 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30251 if (!atm_charge(vcc, sb->truesize))
30252 {
30253 push_rxbufs(card, sb);
30254 - atomic_inc(&vcc->stats->rx_drop);
30255 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30256 }
30257 else
30258 {
30259 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30260 ATM_SKB(sb)->vcc = vcc;
30261 __net_timestamp(sb);
30262 vcc->push(vcc, sb);
30263 - atomic_inc(&vcc->stats->rx);
30264 + atomic_inc_unchecked(&vcc->stats->rx);
30265 }
30266
30267 push_rxbufs(card, skb);
30268 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30269 if (!atm_charge(vcc, skb->truesize))
30270 {
30271 push_rxbufs(card, skb);
30272 - atomic_inc(&vcc->stats->rx_drop);
30273 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30274 }
30275 else
30276 {
30277 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30278 ATM_SKB(skb)->vcc = vcc;
30279 __net_timestamp(skb);
30280 vcc->push(vcc, skb);
30281 - atomic_inc(&vcc->stats->rx);
30282 + atomic_inc_unchecked(&vcc->stats->rx);
30283 }
30284
30285 push_rxbufs(card, sb);
30286 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30287 if (hb == NULL)
30288 {
30289 printk("nicstar%d: Out of huge buffers.\n", card->index);
30290 - atomic_inc(&vcc->stats->rx_drop);
30291 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30292 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30293 NS_SKB(iovb)->iovcnt);
30294 vc->rx_iov = NULL;
30295 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30296 }
30297 else
30298 dev_kfree_skb_any(hb);
30299 - atomic_inc(&vcc->stats->rx_drop);
30300 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30301 }
30302 else
30303 {
30304 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30305 #endif /* NS_USE_DESTRUCTORS */
30306 __net_timestamp(hb);
30307 vcc->push(vcc, hb);
30308 - atomic_inc(&vcc->stats->rx);
30309 + atomic_inc_unchecked(&vcc->stats->rx);
30310 }
30311 }
30312
30313 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30314 index 84c93ff..e6ed269 100644
30315 --- a/drivers/atm/solos-pci.c
30316 +++ b/drivers/atm/solos-pci.c
30317 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30318 }
30319 atm_charge(vcc, skb->truesize);
30320 vcc->push(vcc, skb);
30321 - atomic_inc(&vcc->stats->rx);
30322 + atomic_inc_unchecked(&vcc->stats->rx);
30323 break;
30324
30325 case PKT_STATUS:
30326 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30327 char msg[500];
30328 char item[10];
30329
30330 + pax_track_stack();
30331 +
30332 len = buf->len;
30333 for (i = 0; i < len; i++){
30334 if(i % 8 == 0)
30335 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30336 vcc = SKB_CB(oldskb)->vcc;
30337
30338 if (vcc) {
30339 - atomic_inc(&vcc->stats->tx);
30340 + atomic_inc_unchecked(&vcc->stats->tx);
30341 solos_pop(vcc, oldskb);
30342 } else
30343 dev_kfree_skb_irq(oldskb);
30344 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30345 index 6dd3f59..ee377f3 100644
30346 --- a/drivers/atm/suni.c
30347 +++ b/drivers/atm/suni.c
30348 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30349
30350
30351 #define ADD_LIMITED(s,v) \
30352 - atomic_add((v),&stats->s); \
30353 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30354 + atomic_add_unchecked((v),&stats->s); \
30355 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30356
30357
30358 static void suni_hz(unsigned long from_timer)
30359 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30360 index fc8cb07..4a80e53 100644
30361 --- a/drivers/atm/uPD98402.c
30362 +++ b/drivers/atm/uPD98402.c
30363 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30364 struct sonet_stats tmp;
30365 int error = 0;
30366
30367 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30368 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30369 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30370 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30371 if (zero && !error) {
30372 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30373
30374
30375 #define ADD_LIMITED(s,v) \
30376 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30377 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30378 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30379 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30380 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30381 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30382
30383
30384 static void stat_event(struct atm_dev *dev)
30385 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30386 if (reason & uPD98402_INT_PFM) stat_event(dev);
30387 if (reason & uPD98402_INT_PCO) {
30388 (void) GET(PCOCR); /* clear interrupt cause */
30389 - atomic_add(GET(HECCT),
30390 + atomic_add_unchecked(GET(HECCT),
30391 &PRIV(dev)->sonet_stats.uncorr_hcs);
30392 }
30393 if ((reason & uPD98402_INT_RFO) &&
30394 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30395 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30396 uPD98402_INT_LOS),PIMR); /* enable them */
30397 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30398 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30399 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30400 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30401 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30402 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30403 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30404 return 0;
30405 }
30406
30407 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30408 index 2e9635b..32927b4 100644
30409 --- a/drivers/atm/zatm.c
30410 +++ b/drivers/atm/zatm.c
30411 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30412 }
30413 if (!size) {
30414 dev_kfree_skb_irq(skb);
30415 - if (vcc) atomic_inc(&vcc->stats->rx_err);
30416 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30417 continue;
30418 }
30419 if (!atm_charge(vcc,skb->truesize)) {
30420 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30421 skb->len = size;
30422 ATM_SKB(skb)->vcc = vcc;
30423 vcc->push(vcc,skb);
30424 - atomic_inc(&vcc->stats->rx);
30425 + atomic_inc_unchecked(&vcc->stats->rx);
30426 }
30427 zout(pos & 0xffff,MTA(mbx));
30428 #if 0 /* probably a stupid idea */
30429 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30430 skb_queue_head(&zatm_vcc->backlog,skb);
30431 break;
30432 }
30433 - atomic_inc(&vcc->stats->tx);
30434 + atomic_inc_unchecked(&vcc->stats->tx);
30435 wake_up(&zatm_vcc->tx_wait);
30436 }
30437
30438 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30439 index 63c143e..fece183 100644
30440 --- a/drivers/base/bus.c
30441 +++ b/drivers/base/bus.c
30442 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30443 return ret;
30444 }
30445
30446 -static struct sysfs_ops driver_sysfs_ops = {
30447 +static const struct sysfs_ops driver_sysfs_ops = {
30448 .show = drv_attr_show,
30449 .store = drv_attr_store,
30450 };
30451 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30452 return ret;
30453 }
30454
30455 -static struct sysfs_ops bus_sysfs_ops = {
30456 +static const struct sysfs_ops bus_sysfs_ops = {
30457 .show = bus_attr_show,
30458 .store = bus_attr_store,
30459 };
30460 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30461 return 0;
30462 }
30463
30464 -static struct kset_uevent_ops bus_uevent_ops = {
30465 +static const struct kset_uevent_ops bus_uevent_ops = {
30466 .filter = bus_uevent_filter,
30467 };
30468
30469 diff --git a/drivers/base/class.c b/drivers/base/class.c
30470 index 6e2c3b0..cb61871 100644
30471 --- a/drivers/base/class.c
30472 +++ b/drivers/base/class.c
30473 @@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30474 kfree(cp);
30475 }
30476
30477 -static struct sysfs_ops class_sysfs_ops = {
30478 +static const struct sysfs_ops class_sysfs_ops = {
30479 .show = class_attr_show,
30480 .store = class_attr_store,
30481 };
30482 diff --git a/drivers/base/core.c b/drivers/base/core.c
30483 index f33d768..a9358d0 100644
30484 --- a/drivers/base/core.c
30485 +++ b/drivers/base/core.c
30486 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30487 return ret;
30488 }
30489
30490 -static struct sysfs_ops dev_sysfs_ops = {
30491 +static const struct sysfs_ops dev_sysfs_ops = {
30492 .show = dev_attr_show,
30493 .store = dev_attr_store,
30494 };
30495 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30496 return retval;
30497 }
30498
30499 -static struct kset_uevent_ops device_uevent_ops = {
30500 +static const struct kset_uevent_ops device_uevent_ops = {
30501 .filter = dev_uevent_filter,
30502 .name = dev_uevent_name,
30503 .uevent = dev_uevent,
30504 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30505 index 989429c..2272b00 100644
30506 --- a/drivers/base/memory.c
30507 +++ b/drivers/base/memory.c
30508 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30509 return retval;
30510 }
30511
30512 -static struct kset_uevent_ops memory_uevent_ops = {
30513 +static const struct kset_uevent_ops memory_uevent_ops = {
30514 .name = memory_uevent_name,
30515 .uevent = memory_uevent,
30516 };
30517 diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30518 index 3f202f7..61c4a6f 100644
30519 --- a/drivers/base/sys.c
30520 +++ b/drivers/base/sys.c
30521 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30522 return -EIO;
30523 }
30524
30525 -static struct sysfs_ops sysfs_ops = {
30526 +static const struct sysfs_ops sysfs_ops = {
30527 .show = sysdev_show,
30528 .store = sysdev_store,
30529 };
30530 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30531 return -EIO;
30532 }
30533
30534 -static struct sysfs_ops sysfs_class_ops = {
30535 +static const struct sysfs_ops sysfs_class_ops = {
30536 .show = sysdev_class_show,
30537 .store = sysdev_class_store,
30538 };
30539 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30540 index eb4fa19..1954777 100644
30541 --- a/drivers/block/DAC960.c
30542 +++ b/drivers/block/DAC960.c
30543 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30544 unsigned long flags;
30545 int Channel, TargetID;
30546
30547 + pax_track_stack();
30548 +
30549 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30550 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30551 sizeof(DAC960_SCSI_Inquiry_T) +
30552 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30553 index 68b90d9..7e2e3f3 100644
30554 --- a/drivers/block/cciss.c
30555 +++ b/drivers/block/cciss.c
30556 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30557 int err;
30558 u32 cp;
30559
30560 + memset(&arg64, 0, sizeof(arg64));
30561 +
30562 err = 0;
30563 err |=
30564 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30565 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30566 /* Wait (up to 20 seconds) for a command to complete */
30567
30568 for (i = 20 * HZ; i > 0; i--) {
30569 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30570 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30571 if (done == FIFO_EMPTY)
30572 schedule_timeout_uninterruptible(1);
30573 else
30574 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30575 resend_cmd1:
30576
30577 /* Disable interrupt on the board. */
30578 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30579 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30580
30581 /* Make sure there is room in the command FIFO */
30582 /* Actually it should be completely empty at this time */
30583 @@ -2884,13 +2886,13 @@ resend_cmd1:
30584 /* tape side of the driver. */
30585 for (i = 200000; i > 0; i--) {
30586 /* if fifo isn't full go */
30587 - if (!(h->access.fifo_full(h)))
30588 + if (!(h->access->fifo_full(h)))
30589 break;
30590 udelay(10);
30591 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30592 " waiting!\n", h->ctlr);
30593 }
30594 - h->access.submit_command(h, c); /* Send the cmd */
30595 + h->access->submit_command(h, c); /* Send the cmd */
30596 do {
30597 complete = pollcomplete(h->ctlr);
30598
30599 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30600 while (!hlist_empty(&h->reqQ)) {
30601 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30602 /* can't do anything if fifo is full */
30603 - if ((h->access.fifo_full(h))) {
30604 + if ((h->access->fifo_full(h))) {
30605 printk(KERN_WARNING "cciss: fifo full\n");
30606 break;
30607 }
30608 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30609 h->Qdepth--;
30610
30611 /* Tell the controller execute command */
30612 - h->access.submit_command(h, c);
30613 + h->access->submit_command(h, c);
30614
30615 /* Put job onto the completed Q */
30616 addQ(&h->cmpQ, c);
30617 @@ -3393,17 +3395,17 @@ startio:
30618
30619 static inline unsigned long get_next_completion(ctlr_info_t *h)
30620 {
30621 - return h->access.command_completed(h);
30622 + return h->access->command_completed(h);
30623 }
30624
30625 static inline int interrupt_pending(ctlr_info_t *h)
30626 {
30627 - return h->access.intr_pending(h);
30628 + return h->access->intr_pending(h);
30629 }
30630
30631 static inline long interrupt_not_for_us(ctlr_info_t *h)
30632 {
30633 - return (((h->access.intr_pending(h) == 0) ||
30634 + return (((h->access->intr_pending(h) == 0) ||
30635 (h->interrupts_enabled == 0)));
30636 }
30637
30638 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30639 */
30640 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30641 c->product_name = products[prod_index].product_name;
30642 - c->access = *(products[prod_index].access);
30643 + c->access = products[prod_index].access;
30644 c->nr_cmds = c->max_commands - 4;
30645 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30646 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30647 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30648 }
30649
30650 /* make sure the board interrupts are off */
30651 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30652 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30653 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30654 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30655 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30656 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30657 cciss_scsi_setup(i);
30658
30659 /* Turn the interrupts on so we can service requests */
30660 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30661 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30662
30663 /* Get the firmware version */
30664 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30665 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30666 index 04d6bf8..36e712d 100644
30667 --- a/drivers/block/cciss.h
30668 +++ b/drivers/block/cciss.h
30669 @@ -90,7 +90,7 @@ struct ctlr_info
30670 // information about each logical volume
30671 drive_info_struct *drv[CISS_MAX_LUN];
30672
30673 - struct access_method access;
30674 + struct access_method *access;
30675
30676 /* queue and queue Info */
30677 struct hlist_head reqQ;
30678 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30679 index 6422651..bb1bdef 100644
30680 --- a/drivers/block/cpqarray.c
30681 +++ b/drivers/block/cpqarray.c
30682 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30683 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30684 goto Enomem4;
30685 }
30686 - hba[i]->access.set_intr_mask(hba[i], 0);
30687 + hba[i]->access->set_intr_mask(hba[i], 0);
30688 if (request_irq(hba[i]->intr, do_ida_intr,
30689 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30690 {
30691 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30692 add_timer(&hba[i]->timer);
30693
30694 /* Enable IRQ now that spinlock and rate limit timer are set up */
30695 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30696 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30697
30698 for(j=0; j<NWD; j++) {
30699 struct gendisk *disk = ida_gendisk[i][j];
30700 @@ -695,7 +695,7 @@ DBGINFO(
30701 for(i=0; i<NR_PRODUCTS; i++) {
30702 if (board_id == products[i].board_id) {
30703 c->product_name = products[i].product_name;
30704 - c->access = *(products[i].access);
30705 + c->access = products[i].access;
30706 break;
30707 }
30708 }
30709 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30710 hba[ctlr]->intr = intr;
30711 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30712 hba[ctlr]->product_name = products[j].product_name;
30713 - hba[ctlr]->access = *(products[j].access);
30714 + hba[ctlr]->access = products[j].access;
30715 hba[ctlr]->ctlr = ctlr;
30716 hba[ctlr]->board_id = board_id;
30717 hba[ctlr]->pci_dev = NULL; /* not PCI */
30718 @@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30719 struct scatterlist tmp_sg[SG_MAX];
30720 int i, dir, seg;
30721
30722 + pax_track_stack();
30723 +
30724 if (blk_queue_plugged(q))
30725 goto startio;
30726
30727 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30728
30729 while((c = h->reqQ) != NULL) {
30730 /* Can't do anything if we're busy */
30731 - if (h->access.fifo_full(h) == 0)
30732 + if (h->access->fifo_full(h) == 0)
30733 return;
30734
30735 /* Get the first entry from the request Q */
30736 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30737 h->Qdepth--;
30738
30739 /* Tell the controller to do our bidding */
30740 - h->access.submit_command(h, c);
30741 + h->access->submit_command(h, c);
30742
30743 /* Get onto the completion Q */
30744 addQ(&h->cmpQ, c);
30745 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30746 unsigned long flags;
30747 __u32 a,a1;
30748
30749 - istat = h->access.intr_pending(h);
30750 + istat = h->access->intr_pending(h);
30751 /* Is this interrupt for us? */
30752 if (istat == 0)
30753 return IRQ_NONE;
30754 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30755 */
30756 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30757 if (istat & FIFO_NOT_EMPTY) {
30758 - while((a = h->access.command_completed(h))) {
30759 + while((a = h->access->command_completed(h))) {
30760 a1 = a; a &= ~3;
30761 if ((c = h->cmpQ) == NULL)
30762 {
30763 @@ -1434,11 +1436,11 @@ static int sendcmd(
30764 /*
30765 * Disable interrupt
30766 */
30767 - info_p->access.set_intr_mask(info_p, 0);
30768 + info_p->access->set_intr_mask(info_p, 0);
30769 /* Make sure there is room in the command FIFO */
30770 /* Actually it should be completely empty at this time. */
30771 for (i = 200000; i > 0; i--) {
30772 - temp = info_p->access.fifo_full(info_p);
30773 + temp = info_p->access->fifo_full(info_p);
30774 if (temp != 0) {
30775 break;
30776 }
30777 @@ -1451,7 +1453,7 @@ DBG(
30778 /*
30779 * Send the cmd
30780 */
30781 - info_p->access.submit_command(info_p, c);
30782 + info_p->access->submit_command(info_p, c);
30783 complete = pollcomplete(ctlr);
30784
30785 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30786 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30787 * we check the new geometry. Then turn interrupts back on when
30788 * we're done.
30789 */
30790 - host->access.set_intr_mask(host, 0);
30791 + host->access->set_intr_mask(host, 0);
30792 getgeometry(ctlr);
30793 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30794 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30795
30796 for(i=0; i<NWD; i++) {
30797 struct gendisk *disk = ida_gendisk[ctlr][i];
30798 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30799 /* Wait (up to 2 seconds) for a command to complete */
30800
30801 for (i = 200000; i > 0; i--) {
30802 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30803 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30804 if (done == 0) {
30805 udelay(10); /* a short fixed delay */
30806 } else
30807 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30808 index be73e9d..7fbf140 100644
30809 --- a/drivers/block/cpqarray.h
30810 +++ b/drivers/block/cpqarray.h
30811 @@ -99,7 +99,7 @@ struct ctlr_info {
30812 drv_info_t drv[NWD];
30813 struct proc_dir_entry *proc;
30814
30815 - struct access_method access;
30816 + struct access_method *access;
30817
30818 cmdlist_t *reqQ;
30819 cmdlist_t *cmpQ;
30820 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30821 index 8ec2d70..2804b30 100644
30822 --- a/drivers/block/loop.c
30823 +++ b/drivers/block/loop.c
30824 @@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30825 mm_segment_t old_fs = get_fs();
30826
30827 set_fs(get_ds());
30828 - bw = file->f_op->write(file, buf, len, &pos);
30829 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30830 set_fs(old_fs);
30831 if (likely(bw == len))
30832 return 0;
30833 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30834 index 26ada47..083c480 100644
30835 --- a/drivers/block/nbd.c
30836 +++ b/drivers/block/nbd.c
30837 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30838 struct kvec iov;
30839 sigset_t blocked, oldset;
30840
30841 + pax_track_stack();
30842 +
30843 if (unlikely(!sock)) {
30844 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30845 lo->disk->disk_name, (send ? "send" : "recv"));
30846 @@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30847 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30848 unsigned int cmd, unsigned long arg)
30849 {
30850 + pax_track_stack();
30851 +
30852 switch (cmd) {
30853 case NBD_DISCONNECT: {
30854 struct request sreq;
30855 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30856 index a5d585d..d087be3 100644
30857 --- a/drivers/block/pktcdvd.c
30858 +++ b/drivers/block/pktcdvd.c
30859 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30860 return len;
30861 }
30862
30863 -static struct sysfs_ops kobj_pkt_ops = {
30864 +static const struct sysfs_ops kobj_pkt_ops = {
30865 .show = kobj_pkt_show,
30866 .store = kobj_pkt_store
30867 };
30868 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30869 index 6aad99e..89cd142 100644
30870 --- a/drivers/char/Kconfig
30871 +++ b/drivers/char/Kconfig
30872 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30873
30874 config DEVKMEM
30875 bool "/dev/kmem virtual device support"
30876 - default y
30877 + default n
30878 + depends on !GRKERNSEC_KMEM
30879 help
30880 Say Y here if you want to support the /dev/kmem device. The
30881 /dev/kmem device is rarely used, but can be used for certain
30882 @@ -1114,6 +1115,7 @@ config DEVPORT
30883 bool
30884 depends on !M68K
30885 depends on ISA || PCI
30886 + depends on !GRKERNSEC_KMEM
30887 default y
30888
30889 source "drivers/s390/char/Kconfig"
30890 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30891 index a96f319..a778a5b 100644
30892 --- a/drivers/char/agp/frontend.c
30893 +++ b/drivers/char/agp/frontend.c
30894 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30895 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30896 return -EFAULT;
30897
30898 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30899 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30900 return -EFAULT;
30901
30902 client = agp_find_client_by_pid(reserve.pid);
30903 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30904 index d8cff90..9628e70 100644
30905 --- a/drivers/char/briq_panel.c
30906 +++ b/drivers/char/briq_panel.c
30907 @@ -10,6 +10,7 @@
30908 #include <linux/types.h>
30909 #include <linux/errno.h>
30910 #include <linux/tty.h>
30911 +#include <linux/mutex.h>
30912 #include <linux/timer.h>
30913 #include <linux/kernel.h>
30914 #include <linux/wait.h>
30915 @@ -36,6 +37,7 @@ static int vfd_is_open;
30916 static unsigned char vfd[40];
30917 static int vfd_cursor;
30918 static unsigned char ledpb, led;
30919 +static DEFINE_MUTEX(vfd_mutex);
30920
30921 static void update_vfd(void)
30922 {
30923 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30924 if (!vfd_is_open)
30925 return -EBUSY;
30926
30927 + mutex_lock(&vfd_mutex);
30928 for (;;) {
30929 char c;
30930 if (!indx)
30931 break;
30932 - if (get_user(c, buf))
30933 + if (get_user(c, buf)) {
30934 + mutex_unlock(&vfd_mutex);
30935 return -EFAULT;
30936 + }
30937 if (esc) {
30938 set_led(c);
30939 esc = 0;
30940 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30941 buf++;
30942 }
30943 update_vfd();
30944 + mutex_unlock(&vfd_mutex);
30945
30946 return len;
30947 }
30948 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
30949 index 31e7c91..161afc0 100644
30950 --- a/drivers/char/genrtc.c
30951 +++ b/drivers/char/genrtc.c
30952 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
30953 switch (cmd) {
30954
30955 case RTC_PLL_GET:
30956 + memset(&pll, 0, sizeof(pll));
30957 if (get_rtc_pll(&pll))
30958 return -EINVAL;
30959 else
30960 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
30961 index 006466d..a2bb21c 100644
30962 --- a/drivers/char/hpet.c
30963 +++ b/drivers/char/hpet.c
30964 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
30965 return 0;
30966 }
30967
30968 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
30969 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
30970
30971 static int
30972 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
30973 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
30974 }
30975
30976 static int
30977 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30978 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
30979 {
30980 struct hpet_timer __iomem *timer;
30981 struct hpet __iomem *hpet;
30982 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30983 {
30984 struct hpet_info info;
30985
30986 + memset(&info, 0, sizeof(info));
30987 +
30988 if (devp->hd_ireqfreq)
30989 info.hi_ireqfreq =
30990 hpet_time_div(hpetp, devp->hd_ireqfreq);
30991 - else
30992 - info.hi_ireqfreq = 0;
30993 info.hi_flags =
30994 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
30995 info.hi_hpet = hpetp->hp_which;
30996 diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
30997 index 0afc8b8..6913fc3 100644
30998 --- a/drivers/char/hvc_beat.c
30999 +++ b/drivers/char/hvc_beat.c
31000 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
31001 return cnt;
31002 }
31003
31004 -static struct hv_ops hvc_beat_get_put_ops = {
31005 +static const struct hv_ops hvc_beat_get_put_ops = {
31006 .get_chars = hvc_beat_get_chars,
31007 .put_chars = hvc_beat_put_chars,
31008 };
31009 diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
31010 index 98097f2..407dddc 100644
31011 --- a/drivers/char/hvc_console.c
31012 +++ b/drivers/char/hvc_console.c
31013 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
31014 * console interfaces but can still be used as a tty device. This has to be
31015 * static because kmalloc will not work during early console init.
31016 */
31017 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31018 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31019 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
31020 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
31021
31022 @@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
31023 * vty adapters do NOT get an hvc_instantiate() callback since they
31024 * appear after early console init.
31025 */
31026 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
31027 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
31028 {
31029 struct hvc_struct *hp;
31030
31031 @@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
31032 };
31033
31034 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
31035 - struct hv_ops *ops, int outbuf_size)
31036 + const struct hv_ops *ops, int outbuf_size)
31037 {
31038 struct hvc_struct *hp;
31039 int i;
31040 diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
31041 index 10950ca..ed176c3 100644
31042 --- a/drivers/char/hvc_console.h
31043 +++ b/drivers/char/hvc_console.h
31044 @@ -55,7 +55,7 @@ struct hvc_struct {
31045 int outbuf_size;
31046 int n_outbuf;
31047 uint32_t vtermno;
31048 - struct hv_ops *ops;
31049 + const struct hv_ops *ops;
31050 int irq_requested;
31051 int data;
31052 struct winsize ws;
31053 @@ -76,11 +76,11 @@ struct hv_ops {
31054 };
31055
31056 /* Register a vterm and a slot index for use as a console (console_init) */
31057 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
31058 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
31059
31060 /* register a vterm for hvc tty operation (module_init or hotplug add) */
31061 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
31062 - struct hv_ops *ops, int outbuf_size);
31063 + const struct hv_ops *ops, int outbuf_size);
31064 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
31065 extern int hvc_remove(struct hvc_struct *hp);
31066
31067 diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
31068 index 936d05b..fd02426 100644
31069 --- a/drivers/char/hvc_iseries.c
31070 +++ b/drivers/char/hvc_iseries.c
31071 @@ -197,7 +197,7 @@ done:
31072 return sent;
31073 }
31074
31075 -static struct hv_ops hvc_get_put_ops = {
31076 +static const struct hv_ops hvc_get_put_ops = {
31077 .get_chars = get_chars,
31078 .put_chars = put_chars,
31079 .notifier_add = notifier_add_irq,
31080 diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
31081 index b0e168f..69cda2a 100644
31082 --- a/drivers/char/hvc_iucv.c
31083 +++ b/drivers/char/hvc_iucv.c
31084 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
31085
31086
31087 /* HVC operations */
31088 -static struct hv_ops hvc_iucv_ops = {
31089 +static const struct hv_ops hvc_iucv_ops = {
31090 .get_chars = hvc_iucv_get_chars,
31091 .put_chars = hvc_iucv_put_chars,
31092 .notifier_add = hvc_iucv_notifier_add,
31093 diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
31094 index 88590d0..61c4a61 100644
31095 --- a/drivers/char/hvc_rtas.c
31096 +++ b/drivers/char/hvc_rtas.c
31097 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
31098 return i;
31099 }
31100
31101 -static struct hv_ops hvc_rtas_get_put_ops = {
31102 +static const struct hv_ops hvc_rtas_get_put_ops = {
31103 .get_chars = hvc_rtas_read_console,
31104 .put_chars = hvc_rtas_write_console,
31105 };
31106 diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
31107 index bd63ba8..b0957e6 100644
31108 --- a/drivers/char/hvc_udbg.c
31109 +++ b/drivers/char/hvc_udbg.c
31110 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
31111 return i;
31112 }
31113
31114 -static struct hv_ops hvc_udbg_ops = {
31115 +static const struct hv_ops hvc_udbg_ops = {
31116 .get_chars = hvc_udbg_get,
31117 .put_chars = hvc_udbg_put,
31118 };
31119 diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
31120 index 10be343..27370e9 100644
31121 --- a/drivers/char/hvc_vio.c
31122 +++ b/drivers/char/hvc_vio.c
31123 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
31124 return got;
31125 }
31126
31127 -static struct hv_ops hvc_get_put_ops = {
31128 +static const struct hv_ops hvc_get_put_ops = {
31129 .get_chars = filtered_get_chars,
31130 .put_chars = hvc_put_chars,
31131 .notifier_add = notifier_add_irq,
31132 diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
31133 index a6ee32b..94f8c26 100644
31134 --- a/drivers/char/hvc_xen.c
31135 +++ b/drivers/char/hvc_xen.c
31136 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
31137 return recv;
31138 }
31139
31140 -static struct hv_ops hvc_ops = {
31141 +static const struct hv_ops hvc_ops = {
31142 .get_chars = read_console,
31143 .put_chars = write_console,
31144 .notifier_add = notifier_add_irq,
31145 diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31146 index 266b858..f3ee0bb 100644
31147 --- a/drivers/char/hvcs.c
31148 +++ b/drivers/char/hvcs.c
31149 @@ -82,6 +82,7 @@
31150 #include <asm/hvcserver.h>
31151 #include <asm/uaccess.h>
31152 #include <asm/vio.h>
31153 +#include <asm/local.h>
31154
31155 /*
31156 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31157 @@ -269,7 +270,7 @@ struct hvcs_struct {
31158 unsigned int index;
31159
31160 struct tty_struct *tty;
31161 - int open_count;
31162 + local_t open_count;
31163
31164 /*
31165 * Used to tell the driver kernel_thread what operations need to take
31166 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31167
31168 spin_lock_irqsave(&hvcsd->lock, flags);
31169
31170 - if (hvcsd->open_count > 0) {
31171 + if (local_read(&hvcsd->open_count) > 0) {
31172 spin_unlock_irqrestore(&hvcsd->lock, flags);
31173 printk(KERN_INFO "HVCS: vterm state unchanged. "
31174 "The hvcs device node is still in use.\n");
31175 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31176 if ((retval = hvcs_partner_connect(hvcsd)))
31177 goto error_release;
31178
31179 - hvcsd->open_count = 1;
31180 + local_set(&hvcsd->open_count, 1);
31181 hvcsd->tty = tty;
31182 tty->driver_data = hvcsd;
31183
31184 @@ -1169,7 +1170,7 @@ fast_open:
31185
31186 spin_lock_irqsave(&hvcsd->lock, flags);
31187 kref_get(&hvcsd->kref);
31188 - hvcsd->open_count++;
31189 + local_inc(&hvcsd->open_count);
31190 hvcsd->todo_mask |= HVCS_SCHED_READ;
31191 spin_unlock_irqrestore(&hvcsd->lock, flags);
31192
31193 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31194 hvcsd = tty->driver_data;
31195
31196 spin_lock_irqsave(&hvcsd->lock, flags);
31197 - if (--hvcsd->open_count == 0) {
31198 + if (local_dec_and_test(&hvcsd->open_count)) {
31199
31200 vio_disable_interrupts(hvcsd->vdev);
31201
31202 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31203 free_irq(irq, hvcsd);
31204 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31205 return;
31206 - } else if (hvcsd->open_count < 0) {
31207 + } else if (local_read(&hvcsd->open_count) < 0) {
31208 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31209 " is missmanaged.\n",
31210 - hvcsd->vdev->unit_address, hvcsd->open_count);
31211 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31212 }
31213
31214 spin_unlock_irqrestore(&hvcsd->lock, flags);
31215 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31216
31217 spin_lock_irqsave(&hvcsd->lock, flags);
31218 /* Preserve this so that we know how many kref refs to put */
31219 - temp_open_count = hvcsd->open_count;
31220 + temp_open_count = local_read(&hvcsd->open_count);
31221
31222 /*
31223 * Don't kref put inside the spinlock because the destruction
31224 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31225 hvcsd->tty->driver_data = NULL;
31226 hvcsd->tty = NULL;
31227
31228 - hvcsd->open_count = 0;
31229 + local_set(&hvcsd->open_count, 0);
31230
31231 /* This will drop any buffered data on the floor which is OK in a hangup
31232 * scenario. */
31233 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31234 * the middle of a write operation? This is a crummy place to do this
31235 * but we want to keep it all in the spinlock.
31236 */
31237 - if (hvcsd->open_count <= 0) {
31238 + if (local_read(&hvcsd->open_count) <= 0) {
31239 spin_unlock_irqrestore(&hvcsd->lock, flags);
31240 return -ENODEV;
31241 }
31242 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31243 {
31244 struct hvcs_struct *hvcsd = tty->driver_data;
31245
31246 - if (!hvcsd || hvcsd->open_count <= 0)
31247 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31248 return 0;
31249
31250 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31251 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31252 index ec5e3f8..02455ba 100644
31253 --- a/drivers/char/ipmi/ipmi_msghandler.c
31254 +++ b/drivers/char/ipmi/ipmi_msghandler.c
31255 @@ -414,7 +414,7 @@ struct ipmi_smi {
31256 struct proc_dir_entry *proc_dir;
31257 char proc_dir_name[10];
31258
31259 - atomic_t stats[IPMI_NUM_STATS];
31260 + atomic_unchecked_t stats[IPMI_NUM_STATS];
31261
31262 /*
31263 * run_to_completion duplicate of smb_info, smi_info
31264 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31265
31266
31267 #define ipmi_inc_stat(intf, stat) \
31268 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31269 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31270 #define ipmi_get_stat(intf, stat) \
31271 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31272 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31273
31274 static int is_lan_addr(struct ipmi_addr *addr)
31275 {
31276 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31277 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31278 init_waitqueue_head(&intf->waitq);
31279 for (i = 0; i < IPMI_NUM_STATS; i++)
31280 - atomic_set(&intf->stats[i], 0);
31281 + atomic_set_unchecked(&intf->stats[i], 0);
31282
31283 intf->proc_dir = NULL;
31284
31285 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31286 struct ipmi_smi_msg smi_msg;
31287 struct ipmi_recv_msg recv_msg;
31288
31289 + pax_track_stack();
31290 +
31291 si = (struct ipmi_system_interface_addr *) &addr;
31292 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31293 si->channel = IPMI_BMC_CHANNEL;
31294 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31295 index abae8c9..8021979 100644
31296 --- a/drivers/char/ipmi/ipmi_si_intf.c
31297 +++ b/drivers/char/ipmi/ipmi_si_intf.c
31298 @@ -277,7 +277,7 @@ struct smi_info {
31299 unsigned char slave_addr;
31300
31301 /* Counters and things for the proc filesystem. */
31302 - atomic_t stats[SI_NUM_STATS];
31303 + atomic_unchecked_t stats[SI_NUM_STATS];
31304
31305 struct task_struct *thread;
31306
31307 @@ -285,9 +285,9 @@ struct smi_info {
31308 };
31309
31310 #define smi_inc_stat(smi, stat) \
31311 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31312 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31313 #define smi_get_stat(smi, stat) \
31314 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31315 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31316
31317 #define SI_MAX_PARMS 4
31318
31319 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31320 atomic_set(&new_smi->req_events, 0);
31321 new_smi->run_to_completion = 0;
31322 for (i = 0; i < SI_NUM_STATS; i++)
31323 - atomic_set(&new_smi->stats[i], 0);
31324 + atomic_set_unchecked(&new_smi->stats[i], 0);
31325
31326 new_smi->interrupt_disabled = 0;
31327 atomic_set(&new_smi->stop_operation, 0);
31328 diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31329 index 402838f..55e2200 100644
31330 --- a/drivers/char/istallion.c
31331 +++ b/drivers/char/istallion.c
31332 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31333 * re-used for each stats call.
31334 */
31335 static comstats_t stli_comstats;
31336 -static combrd_t stli_brdstats;
31337 static struct asystats stli_cdkstats;
31338
31339 /*****************************************************************************/
31340 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31341 {
31342 struct stlibrd *brdp;
31343 unsigned int i;
31344 + combrd_t stli_brdstats;
31345
31346 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31347 return -EFAULT;
31348 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31349 struct stliport stli_dummyport;
31350 struct stliport *portp;
31351
31352 + pax_track_stack();
31353 +
31354 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31355 return -EFAULT;
31356 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31357 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31358 struct stlibrd stli_dummybrd;
31359 struct stlibrd *brdp;
31360
31361 + pax_track_stack();
31362 +
31363 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31364 return -EFAULT;
31365 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31366 diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31367 index 950837c..e55a288 100644
31368 --- a/drivers/char/keyboard.c
31369 +++ b/drivers/char/keyboard.c
31370 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31371 kbd->kbdmode == VC_MEDIUMRAW) &&
31372 value != KVAL(K_SAK))
31373 return; /* SAK is allowed even in raw mode */
31374 +
31375 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31376 + {
31377 + void *func = fn_handler[value];
31378 + if (func == fn_show_state || func == fn_show_ptregs ||
31379 + func == fn_show_mem)
31380 + return;
31381 + }
31382 +#endif
31383 +
31384 fn_handler[value](vc);
31385 }
31386
31387 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31388 .evbit = { BIT_MASK(EV_SND) },
31389 },
31390
31391 - { }, /* Terminating entry */
31392 + { 0 }, /* Terminating entry */
31393 };
31394
31395 MODULE_DEVICE_TABLE(input, kbd_ids);
31396 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31397 index 87c67b4..230527a 100644
31398 --- a/drivers/char/mbcs.c
31399 +++ b/drivers/char/mbcs.c
31400 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31401 return 0;
31402 }
31403
31404 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31405 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31406 {
31407 .part_num = MBCS_PART_NUM,
31408 .mfg_num = MBCS_MFG_NUM,
31409 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31410 index 1270f64..8495f49 100644
31411 --- a/drivers/char/mem.c
31412 +++ b/drivers/char/mem.c
31413 @@ -18,6 +18,7 @@
31414 #include <linux/raw.h>
31415 #include <linux/tty.h>
31416 #include <linux/capability.h>
31417 +#include <linux/security.h>
31418 #include <linux/ptrace.h>
31419 #include <linux/device.h>
31420 #include <linux/highmem.h>
31421 @@ -35,6 +36,10 @@
31422 # include <linux/efi.h>
31423 #endif
31424
31425 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31426 +extern struct file_operations grsec_fops;
31427 +#endif
31428 +
31429 static inline unsigned long size_inside_page(unsigned long start,
31430 unsigned long size)
31431 {
31432 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31433
31434 while (cursor < to) {
31435 if (!devmem_is_allowed(pfn)) {
31436 +#ifdef CONFIG_GRKERNSEC_KMEM
31437 + gr_handle_mem_readwrite(from, to);
31438 +#else
31439 printk(KERN_INFO
31440 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31441 current->comm, from, to);
31442 +#endif
31443 return 0;
31444 }
31445 cursor += PAGE_SIZE;
31446 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31447 }
31448 return 1;
31449 }
31450 +#elif defined(CONFIG_GRKERNSEC_KMEM)
31451 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31452 +{
31453 + return 0;
31454 +}
31455 #else
31456 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31457 {
31458 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31459 #endif
31460
31461 while (count > 0) {
31462 + char *temp;
31463 +
31464 /*
31465 * Handle first page in case it's not aligned
31466 */
31467 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31468 if (!ptr)
31469 return -EFAULT;
31470
31471 - if (copy_to_user(buf, ptr, sz)) {
31472 +#ifdef CONFIG_PAX_USERCOPY
31473 + temp = kmalloc(sz, GFP_KERNEL);
31474 + if (!temp) {
31475 + unxlate_dev_mem_ptr(p, ptr);
31476 + return -ENOMEM;
31477 + }
31478 + memcpy(temp, ptr, sz);
31479 +#else
31480 + temp = ptr;
31481 +#endif
31482 +
31483 + if (copy_to_user(buf, temp, sz)) {
31484 +
31485 +#ifdef CONFIG_PAX_USERCOPY
31486 + kfree(temp);
31487 +#endif
31488 +
31489 unxlate_dev_mem_ptr(p, ptr);
31490 return -EFAULT;
31491 }
31492
31493 +#ifdef CONFIG_PAX_USERCOPY
31494 + kfree(temp);
31495 +#endif
31496 +
31497 unxlate_dev_mem_ptr(p, ptr);
31498
31499 buf += sz;
31500 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31501 size_t count, loff_t *ppos)
31502 {
31503 unsigned long p = *ppos;
31504 - ssize_t low_count, read, sz;
31505 + ssize_t low_count, read, sz, err = 0;
31506 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31507 - int err = 0;
31508
31509 read = 0;
31510 if (p < (unsigned long) high_memory) {
31511 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31512 }
31513 #endif
31514 while (low_count > 0) {
31515 + char *temp;
31516 +
31517 sz = size_inside_page(p, low_count);
31518
31519 /*
31520 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31521 */
31522 kbuf = xlate_dev_kmem_ptr((char *)p);
31523
31524 - if (copy_to_user(buf, kbuf, sz))
31525 +#ifdef CONFIG_PAX_USERCOPY
31526 + temp = kmalloc(sz, GFP_KERNEL);
31527 + if (!temp)
31528 + return -ENOMEM;
31529 + memcpy(temp, kbuf, sz);
31530 +#else
31531 + temp = kbuf;
31532 +#endif
31533 +
31534 + err = copy_to_user(buf, temp, sz);
31535 +
31536 +#ifdef CONFIG_PAX_USERCOPY
31537 + kfree(temp);
31538 +#endif
31539 +
31540 + if (err)
31541 return -EFAULT;
31542 buf += sz;
31543 p += sz;
31544 @@ -889,6 +941,9 @@ static const struct memdev {
31545 #ifdef CONFIG_CRASH_DUMP
31546 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31547 #endif
31548 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31549 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31550 +#endif
31551 };
31552
31553 static int memory_open(struct inode *inode, struct file *filp)
31554 diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31555 index 674b3ab..a8d1970 100644
31556 --- a/drivers/char/pcmcia/ipwireless/tty.c
31557 +++ b/drivers/char/pcmcia/ipwireless/tty.c
31558 @@ -29,6 +29,7 @@
31559 #include <linux/tty_driver.h>
31560 #include <linux/tty_flip.h>
31561 #include <linux/uaccess.h>
31562 +#include <asm/local.h>
31563
31564 #include "tty.h"
31565 #include "network.h"
31566 @@ -51,7 +52,7 @@ struct ipw_tty {
31567 int tty_type;
31568 struct ipw_network *network;
31569 struct tty_struct *linux_tty;
31570 - int open_count;
31571 + local_t open_count;
31572 unsigned int control_lines;
31573 struct mutex ipw_tty_mutex;
31574 int tx_bytes_queued;
31575 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31576 mutex_unlock(&tty->ipw_tty_mutex);
31577 return -ENODEV;
31578 }
31579 - if (tty->open_count == 0)
31580 + if (local_read(&tty->open_count) == 0)
31581 tty->tx_bytes_queued = 0;
31582
31583 - tty->open_count++;
31584 + local_inc(&tty->open_count);
31585
31586 tty->linux_tty = linux_tty;
31587 linux_tty->driver_data = tty;
31588 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31589
31590 static void do_ipw_close(struct ipw_tty *tty)
31591 {
31592 - tty->open_count--;
31593 -
31594 - if (tty->open_count == 0) {
31595 + if (local_dec_return(&tty->open_count) == 0) {
31596 struct tty_struct *linux_tty = tty->linux_tty;
31597
31598 if (linux_tty != NULL) {
31599 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31600 return;
31601
31602 mutex_lock(&tty->ipw_tty_mutex);
31603 - if (tty->open_count == 0) {
31604 + if (local_read(&tty->open_count) == 0) {
31605 mutex_unlock(&tty->ipw_tty_mutex);
31606 return;
31607 }
31608 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31609 return;
31610 }
31611
31612 - if (!tty->open_count) {
31613 + if (!local_read(&tty->open_count)) {
31614 mutex_unlock(&tty->ipw_tty_mutex);
31615 return;
31616 }
31617 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31618 return -ENODEV;
31619
31620 mutex_lock(&tty->ipw_tty_mutex);
31621 - if (!tty->open_count) {
31622 + if (!local_read(&tty->open_count)) {
31623 mutex_unlock(&tty->ipw_tty_mutex);
31624 return -EINVAL;
31625 }
31626 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31627 if (!tty)
31628 return -ENODEV;
31629
31630 - if (!tty->open_count)
31631 + if (!local_read(&tty->open_count))
31632 return -EINVAL;
31633
31634 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31635 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31636 if (!tty)
31637 return 0;
31638
31639 - if (!tty->open_count)
31640 + if (!local_read(&tty->open_count))
31641 return 0;
31642
31643 return tty->tx_bytes_queued;
31644 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31645 if (!tty)
31646 return -ENODEV;
31647
31648 - if (!tty->open_count)
31649 + if (!local_read(&tty->open_count))
31650 return -EINVAL;
31651
31652 return get_control_lines(tty);
31653 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31654 if (!tty)
31655 return -ENODEV;
31656
31657 - if (!tty->open_count)
31658 + if (!local_read(&tty->open_count))
31659 return -EINVAL;
31660
31661 return set_control_lines(tty, set, clear);
31662 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31663 if (!tty)
31664 return -ENODEV;
31665
31666 - if (!tty->open_count)
31667 + if (!local_read(&tty->open_count))
31668 return -EINVAL;
31669
31670 /* FIXME: Exactly how is the tty object locked here .. */
31671 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31672 against a parallel ioctl etc */
31673 mutex_lock(&ttyj->ipw_tty_mutex);
31674 }
31675 - while (ttyj->open_count)
31676 + while (local_read(&ttyj->open_count))
31677 do_ipw_close(ttyj);
31678 ipwireless_disassociate_network_ttys(network,
31679 ttyj->channel_idx);
31680 diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31681 index 62f282e..e45c45c 100644
31682 --- a/drivers/char/pty.c
31683 +++ b/drivers/char/pty.c
31684 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31685 register_sysctl_table(pty_root_table);
31686
31687 /* Now create the /dev/ptmx special device */
31688 + pax_open_kernel();
31689 tty_default_fops(&ptmx_fops);
31690 - ptmx_fops.open = ptmx_open;
31691 + *(void **)&ptmx_fops.open = ptmx_open;
31692 + pax_close_kernel();
31693
31694 cdev_init(&ptmx_cdev, &ptmx_fops);
31695 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31696 diff --git a/drivers/char/random.c b/drivers/char/random.c
31697 index 3a19e2d..6ed09d3 100644
31698 --- a/drivers/char/random.c
31699 +++ b/drivers/char/random.c
31700 @@ -254,8 +254,13 @@
31701 /*
31702 * Configuration information
31703 */
31704 +#ifdef CONFIG_GRKERNSEC_RANDNET
31705 +#define INPUT_POOL_WORDS 512
31706 +#define OUTPUT_POOL_WORDS 128
31707 +#else
31708 #define INPUT_POOL_WORDS 128
31709 #define OUTPUT_POOL_WORDS 32
31710 +#endif
31711 #define SEC_XFER_SIZE 512
31712
31713 /*
31714 @@ -292,10 +297,17 @@ static struct poolinfo {
31715 int poolwords;
31716 int tap1, tap2, tap3, tap4, tap5;
31717 } poolinfo_table[] = {
31718 +#ifdef CONFIG_GRKERNSEC_RANDNET
31719 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31720 + { 512, 411, 308, 208, 104, 1 },
31721 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31722 + { 128, 103, 76, 51, 25, 1 },
31723 +#else
31724 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31725 { 128, 103, 76, 51, 25, 1 },
31726 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31727 { 32, 26, 20, 14, 7, 1 },
31728 +#endif
31729 #if 0
31730 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31731 { 2048, 1638, 1231, 819, 411, 1 },
31732 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31733 #include <linux/sysctl.h>
31734
31735 static int min_read_thresh = 8, min_write_thresh;
31736 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
31737 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31738 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31739 static char sysctl_bootid[16];
31740
31741 diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31742 index 0e29a23..0efc2c2 100644
31743 --- a/drivers/char/rocket.c
31744 +++ b/drivers/char/rocket.c
31745 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31746 struct rocket_ports tmp;
31747 int board;
31748
31749 + pax_track_stack();
31750 +
31751 if (!retports)
31752 return -EFAULT;
31753 memset(&tmp, 0, sizeof (tmp));
31754 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31755 index 8c262aa..4d3b058 100644
31756 --- a/drivers/char/sonypi.c
31757 +++ b/drivers/char/sonypi.c
31758 @@ -55,6 +55,7 @@
31759 #include <asm/uaccess.h>
31760 #include <asm/io.h>
31761 #include <asm/system.h>
31762 +#include <asm/local.h>
31763
31764 #include <linux/sonypi.h>
31765
31766 @@ -491,7 +492,7 @@ static struct sonypi_device {
31767 spinlock_t fifo_lock;
31768 wait_queue_head_t fifo_proc_list;
31769 struct fasync_struct *fifo_async;
31770 - int open_count;
31771 + local_t open_count;
31772 int model;
31773 struct input_dev *input_jog_dev;
31774 struct input_dev *input_key_dev;
31775 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31776 static int sonypi_misc_release(struct inode *inode, struct file *file)
31777 {
31778 mutex_lock(&sonypi_device.lock);
31779 - sonypi_device.open_count--;
31780 + local_dec(&sonypi_device.open_count);
31781 mutex_unlock(&sonypi_device.lock);
31782 return 0;
31783 }
31784 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31785 lock_kernel();
31786 mutex_lock(&sonypi_device.lock);
31787 /* Flush input queue on first open */
31788 - if (!sonypi_device.open_count)
31789 + if (!local_read(&sonypi_device.open_count))
31790 kfifo_reset(sonypi_device.fifo);
31791 - sonypi_device.open_count++;
31792 + local_inc(&sonypi_device.open_count);
31793 mutex_unlock(&sonypi_device.lock);
31794 unlock_kernel();
31795 return 0;
31796 diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31797 index db6dcfa..13834cb 100644
31798 --- a/drivers/char/stallion.c
31799 +++ b/drivers/char/stallion.c
31800 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31801 struct stlport stl_dummyport;
31802 struct stlport *portp;
31803
31804 + pax_track_stack();
31805 +
31806 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31807 return -EFAULT;
31808 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31809 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31810 index a0789f6..cea3902 100644
31811 --- a/drivers/char/tpm/tpm.c
31812 +++ b/drivers/char/tpm/tpm.c
31813 @@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31814 chip->vendor.req_complete_val)
31815 goto out_recv;
31816
31817 - if ((status == chip->vendor.req_canceled)) {
31818 + if (status == chip->vendor.req_canceled) {
31819 dev_err(chip->dev, "Operation Canceled\n");
31820 rc = -ECANCELED;
31821 goto out;
31822 @@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31823
31824 struct tpm_chip *chip = dev_get_drvdata(dev);
31825
31826 + pax_track_stack();
31827 +
31828 tpm_cmd.header.in = tpm_readpubek_header;
31829 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31830 "attempting to read the PUBEK");
31831 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31832 index bf2170f..ce8cab9 100644
31833 --- a/drivers/char/tpm/tpm_bios.c
31834 +++ b/drivers/char/tpm/tpm_bios.c
31835 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31836 event = addr;
31837
31838 if ((event->event_type == 0 && event->event_size == 0) ||
31839 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31840 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31841 return NULL;
31842
31843 return addr;
31844 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31845 return NULL;
31846
31847 if ((event->event_type == 0 && event->event_size == 0) ||
31848 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31849 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31850 return NULL;
31851
31852 (*pos)++;
31853 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31854 int i;
31855
31856 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31857 - seq_putc(m, data[i]);
31858 + if (!seq_putc(m, data[i]))
31859 + return -EFAULT;
31860
31861 return 0;
31862 }
31863 @@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31864 log->bios_event_log_end = log->bios_event_log + len;
31865
31866 virt = acpi_os_map_memory(start, len);
31867 + if (!virt) {
31868 + kfree(log->bios_event_log);
31869 + log->bios_event_log = NULL;
31870 + return -EFAULT;
31871 + }
31872
31873 - memcpy(log->bios_event_log, virt, len);
31874 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31875
31876 acpi_os_unmap_memory(virt, len);
31877 return 0;
31878 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31879 index 123cedf..137edef 100644
31880 --- a/drivers/char/tty_io.c
31881 +++ b/drivers/char/tty_io.c
31882 @@ -1774,6 +1774,7 @@ got_driver:
31883
31884 if (IS_ERR(tty)) {
31885 mutex_unlock(&tty_mutex);
31886 + tty_driver_kref_put(driver);
31887 return PTR_ERR(tty);
31888 }
31889 }
31890 @@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31891 return retval;
31892 }
31893
31894 +EXPORT_SYMBOL(tty_ioctl);
31895 +
31896 #ifdef CONFIG_COMPAT
31897 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31898 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
31899 unsigned long arg)
31900 {
31901 struct inode *inode = file->f_dentry->d_inode;
31902 @@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31903
31904 return retval;
31905 }
31906 +
31907 +EXPORT_SYMBOL(tty_compat_ioctl);
31908 #endif
31909
31910 /*
31911 @@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
31912
31913 void tty_default_fops(struct file_operations *fops)
31914 {
31915 - *fops = tty_fops;
31916 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
31917 }
31918
31919 /*
31920 diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
31921 index d814a3d..b55b9c9 100644
31922 --- a/drivers/char/tty_ldisc.c
31923 +++ b/drivers/char/tty_ldisc.c
31924 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
31925 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
31926 struct tty_ldisc_ops *ldo = ld->ops;
31927
31928 - ldo->refcount--;
31929 + atomic_dec(&ldo->refcount);
31930 module_put(ldo->owner);
31931 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31932
31933 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
31934 spin_lock_irqsave(&tty_ldisc_lock, flags);
31935 tty_ldiscs[disc] = new_ldisc;
31936 new_ldisc->num = disc;
31937 - new_ldisc->refcount = 0;
31938 + atomic_set(&new_ldisc->refcount, 0);
31939 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31940
31941 return ret;
31942 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
31943 return -EINVAL;
31944
31945 spin_lock_irqsave(&tty_ldisc_lock, flags);
31946 - if (tty_ldiscs[disc]->refcount)
31947 + if (atomic_read(&tty_ldiscs[disc]->refcount))
31948 ret = -EBUSY;
31949 else
31950 tty_ldiscs[disc] = NULL;
31951 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
31952 if (ldops) {
31953 ret = ERR_PTR(-EAGAIN);
31954 if (try_module_get(ldops->owner)) {
31955 - ldops->refcount++;
31956 + atomic_inc(&ldops->refcount);
31957 ret = ldops;
31958 }
31959 }
31960 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
31961 unsigned long flags;
31962
31963 spin_lock_irqsave(&tty_ldisc_lock, flags);
31964 - ldops->refcount--;
31965 + atomic_dec(&ldops->refcount);
31966 module_put(ldops->owner);
31967 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31968 }
31969 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
31970 index a035ae3..c27fe2c 100644
31971 --- a/drivers/char/virtio_console.c
31972 +++ b/drivers/char/virtio_console.c
31973 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
31974 * virtqueue, so we let the drivers do some boutique early-output thing. */
31975 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
31976 {
31977 - virtio_cons.put_chars = put_chars;
31978 + pax_open_kernel();
31979 + *(void **)&virtio_cons.put_chars = put_chars;
31980 + pax_close_kernel();
31981 return hvc_instantiate(0, 0, &virtio_cons);
31982 }
31983
31984 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
31985 out_vq = vqs[1];
31986
31987 /* Start using the new console output. */
31988 - virtio_cons.get_chars = get_chars;
31989 - virtio_cons.put_chars = put_chars;
31990 - virtio_cons.notifier_add = notifier_add_vio;
31991 - virtio_cons.notifier_del = notifier_del_vio;
31992 - virtio_cons.notifier_hangup = notifier_del_vio;
31993 + pax_open_kernel();
31994 + *(void **)&virtio_cons.get_chars = get_chars;
31995 + *(void **)&virtio_cons.put_chars = put_chars;
31996 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
31997 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
31998 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
31999 + pax_close_kernel();
32000
32001 /* The first argument of hvc_alloc() is the virtual console number, so
32002 * we use zero. The second argument is the parameter for the
32003 diff --git a/drivers/char/vt.c b/drivers/char/vt.c
32004 index 0c80c68..53d59c1 100644
32005 --- a/drivers/char/vt.c
32006 +++ b/drivers/char/vt.c
32007 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
32008
32009 static void notify_write(struct vc_data *vc, unsigned int unicode)
32010 {
32011 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
32012 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
32013 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
32014 }
32015
32016 diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
32017 index 6351a26..999af95 100644
32018 --- a/drivers/char/vt_ioctl.c
32019 +++ b/drivers/char/vt_ioctl.c
32020 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32021 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
32022 return -EFAULT;
32023
32024 - if (!capable(CAP_SYS_TTY_CONFIG))
32025 - perm = 0;
32026 -
32027 switch (cmd) {
32028 case KDGKBENT:
32029 key_map = key_maps[s];
32030 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32031 val = (i ? K_HOLE : K_NOSUCHMAP);
32032 return put_user(val, &user_kbe->kb_value);
32033 case KDSKBENT:
32034 + if (!capable(CAP_SYS_TTY_CONFIG))
32035 + perm = 0;
32036 +
32037 if (!perm)
32038 return -EPERM;
32039 +
32040 if (!i && v == K_NOSUCHMAP) {
32041 /* deallocate map */
32042 key_map = key_maps[s];
32043 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32044 int i, j, k;
32045 int ret;
32046
32047 - if (!capable(CAP_SYS_TTY_CONFIG))
32048 - perm = 0;
32049 -
32050 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
32051 if (!kbs) {
32052 ret = -ENOMEM;
32053 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32054 kfree(kbs);
32055 return ((p && *p) ? -EOVERFLOW : 0);
32056 case KDSKBSENT:
32057 + if (!capable(CAP_SYS_TTY_CONFIG))
32058 + perm = 0;
32059 +
32060 if (!perm) {
32061 ret = -EPERM;
32062 goto reterr;
32063 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
32064 index c7ae026..1769c1d 100644
32065 --- a/drivers/cpufreq/cpufreq.c
32066 +++ b/drivers/cpufreq/cpufreq.c
32067 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
32068 complete(&policy->kobj_unregister);
32069 }
32070
32071 -static struct sysfs_ops sysfs_ops = {
32072 +static const struct sysfs_ops sysfs_ops = {
32073 .show = show,
32074 .store = store,
32075 };
32076 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
32077 index 97b0038..2056670 100644
32078 --- a/drivers/cpuidle/sysfs.c
32079 +++ b/drivers/cpuidle/sysfs.c
32080 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
32081 return ret;
32082 }
32083
32084 -static struct sysfs_ops cpuidle_sysfs_ops = {
32085 +static const struct sysfs_ops cpuidle_sysfs_ops = {
32086 .show = cpuidle_show,
32087 .store = cpuidle_store,
32088 };
32089 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
32090 return ret;
32091 }
32092
32093 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
32094 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
32095 .show = cpuidle_state_show,
32096 };
32097
32098 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
32099 .release = cpuidle_state_sysfs_release,
32100 };
32101
32102 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32103 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32104 {
32105 kobject_put(&device->kobjs[i]->kobj);
32106 wait_for_completion(&device->kobjs[i]->kobj_unregister);
32107 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
32108 index 5f753fc..0377ae9 100644
32109 --- a/drivers/crypto/hifn_795x.c
32110 +++ b/drivers/crypto/hifn_795x.c
32111 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
32112 0xCA, 0x34, 0x2B, 0x2E};
32113 struct scatterlist sg;
32114
32115 + pax_track_stack();
32116 +
32117 memset(src, 0, sizeof(src));
32118 memset(ctx.key, 0, sizeof(ctx.key));
32119
32120 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
32121 index 71e6482..de8d96c 100644
32122 --- a/drivers/crypto/padlock-aes.c
32123 +++ b/drivers/crypto/padlock-aes.c
32124 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
32125 struct crypto_aes_ctx gen_aes;
32126 int cpu;
32127
32128 + pax_track_stack();
32129 +
32130 if (key_len % 8) {
32131 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32132 return -EINVAL;
32133 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32134 index dcc4ab7..cc834bb 100644
32135 --- a/drivers/dma/ioat/dma.c
32136 +++ b/drivers/dma/ioat/dma.c
32137 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32138 return entry->show(&chan->common, page);
32139 }
32140
32141 -struct sysfs_ops ioat_sysfs_ops = {
32142 +const struct sysfs_ops ioat_sysfs_ops = {
32143 .show = ioat_attr_show,
32144 };
32145
32146 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32147 index bbc3e78..f2db62c 100644
32148 --- a/drivers/dma/ioat/dma.h
32149 +++ b/drivers/dma/ioat/dma.h
32150 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32151 unsigned long *phys_complete);
32152 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32153 void ioat_kobject_del(struct ioatdma_device *device);
32154 -extern struct sysfs_ops ioat_sysfs_ops;
32155 +extern const struct sysfs_ops ioat_sysfs_ops;
32156 extern struct ioat_sysfs_entry ioat_version_attr;
32157 extern struct ioat_sysfs_entry ioat_cap_attr;
32158 #endif /* IOATDMA_H */
32159 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32160 index 9908c9e..3ceb0e5 100644
32161 --- a/drivers/dma/ioat/dma_v3.c
32162 +++ b/drivers/dma/ioat/dma_v3.c
32163 @@ -71,10 +71,10 @@
32164 /* provide a lookup table for setting the source address in the base or
32165 * extended descriptor of an xor or pq descriptor
32166 */
32167 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32168 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32169 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32170 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32171 +static const u8 xor_idx_to_desc = 0xd0;
32172 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32173 +static const u8 pq_idx_to_desc = 0xf8;
32174 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32175
32176 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32177 {
32178 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32179 index 85c464a..afd1e73 100644
32180 --- a/drivers/edac/amd64_edac.c
32181 +++ b/drivers/edac/amd64_edac.c
32182 @@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32183 * PCI core identifies what devices are on a system during boot, and then
32184 * inquiry this table to see if this driver is for a given device found.
32185 */
32186 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32187 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32188 {
32189 .vendor = PCI_VENDOR_ID_AMD,
32190 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32191 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32192 index 2b95f1a..4f52793 100644
32193 --- a/drivers/edac/amd76x_edac.c
32194 +++ b/drivers/edac/amd76x_edac.c
32195 @@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32196 edac_mc_free(mci);
32197 }
32198
32199 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32200 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32201 {
32202 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32203 AMD762},
32204 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32205 index d205d49..74c9672 100644
32206 --- a/drivers/edac/e752x_edac.c
32207 +++ b/drivers/edac/e752x_edac.c
32208 @@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32209 edac_mc_free(mci);
32210 }
32211
32212 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32213 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32214 {
32215 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32216 E7520},
32217 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32218 index c7d11cc..c59c1ca 100644
32219 --- a/drivers/edac/e7xxx_edac.c
32220 +++ b/drivers/edac/e7xxx_edac.c
32221 @@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32222 edac_mc_free(mci);
32223 }
32224
32225 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32226 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32227 {
32228 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32229 E7205},
32230 diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32231 index 5376457..5fdedbc 100644
32232 --- a/drivers/edac/edac_device_sysfs.c
32233 +++ b/drivers/edac/edac_device_sysfs.c
32234 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32235 }
32236
32237 /* edac_dev file operations for an 'ctl_info' */
32238 -static struct sysfs_ops device_ctl_info_ops = {
32239 +static const struct sysfs_ops device_ctl_info_ops = {
32240 .show = edac_dev_ctl_info_show,
32241 .store = edac_dev_ctl_info_store
32242 };
32243 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32244 }
32245
32246 /* edac_dev file operations for an 'instance' */
32247 -static struct sysfs_ops device_instance_ops = {
32248 +static const struct sysfs_ops device_instance_ops = {
32249 .show = edac_dev_instance_show,
32250 .store = edac_dev_instance_store
32251 };
32252 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32253 }
32254
32255 /* edac_dev file operations for a 'block' */
32256 -static struct sysfs_ops device_block_ops = {
32257 +static const struct sysfs_ops device_block_ops = {
32258 .show = edac_dev_block_show,
32259 .store = edac_dev_block_store
32260 };
32261 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32262 index e1d4ce0..88840e9 100644
32263 --- a/drivers/edac/edac_mc_sysfs.c
32264 +++ b/drivers/edac/edac_mc_sysfs.c
32265 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32266 return -EIO;
32267 }
32268
32269 -static struct sysfs_ops csrowfs_ops = {
32270 +static const struct sysfs_ops csrowfs_ops = {
32271 .show = csrowdev_show,
32272 .store = csrowdev_store
32273 };
32274 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32275 }
32276
32277 /* Intermediate show/store table */
32278 -static struct sysfs_ops mci_ops = {
32279 +static const struct sysfs_ops mci_ops = {
32280 .show = mcidev_show,
32281 .store = mcidev_store
32282 };
32283 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32284 index 422728c..d8d9c88 100644
32285 --- a/drivers/edac/edac_pci_sysfs.c
32286 +++ b/drivers/edac/edac_pci_sysfs.c
32287 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32288 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32289 static int edac_pci_poll_msec = 1000; /* one second workq period */
32290
32291 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
32292 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32293 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32294 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32295
32296 static struct kobject *edac_pci_top_main_kobj;
32297 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32298 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32299 }
32300
32301 /* fs_ops table */
32302 -static struct sysfs_ops pci_instance_ops = {
32303 +static const struct sysfs_ops pci_instance_ops = {
32304 .show = edac_pci_instance_show,
32305 .store = edac_pci_instance_store
32306 };
32307 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32308 return -EIO;
32309 }
32310
32311 -static struct sysfs_ops edac_pci_sysfs_ops = {
32312 +static const struct sysfs_ops edac_pci_sysfs_ops = {
32313 .show = edac_pci_dev_show,
32314 .store = edac_pci_dev_store
32315 };
32316 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32317 edac_printk(KERN_CRIT, EDAC_PCI,
32318 "Signaled System Error on %s\n",
32319 pci_name(dev));
32320 - atomic_inc(&pci_nonparity_count);
32321 + atomic_inc_unchecked(&pci_nonparity_count);
32322 }
32323
32324 if (status & (PCI_STATUS_PARITY)) {
32325 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32326 "Master Data Parity Error on %s\n",
32327 pci_name(dev));
32328
32329 - atomic_inc(&pci_parity_count);
32330 + atomic_inc_unchecked(&pci_parity_count);
32331 }
32332
32333 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32334 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32335 "Detected Parity Error on %s\n",
32336 pci_name(dev));
32337
32338 - atomic_inc(&pci_parity_count);
32339 + atomic_inc_unchecked(&pci_parity_count);
32340 }
32341 }
32342
32343 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32344 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32345 "Signaled System Error on %s\n",
32346 pci_name(dev));
32347 - atomic_inc(&pci_nonparity_count);
32348 + atomic_inc_unchecked(&pci_nonparity_count);
32349 }
32350
32351 if (status & (PCI_STATUS_PARITY)) {
32352 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32353 "Master Data Parity Error on "
32354 "%s\n", pci_name(dev));
32355
32356 - atomic_inc(&pci_parity_count);
32357 + atomic_inc_unchecked(&pci_parity_count);
32358 }
32359
32360 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32361 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32362 "Detected Parity Error on %s\n",
32363 pci_name(dev));
32364
32365 - atomic_inc(&pci_parity_count);
32366 + atomic_inc_unchecked(&pci_parity_count);
32367 }
32368 }
32369 }
32370 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32371 if (!check_pci_errors)
32372 return;
32373
32374 - before_count = atomic_read(&pci_parity_count);
32375 + before_count = atomic_read_unchecked(&pci_parity_count);
32376
32377 /* scan all PCI devices looking for a Parity Error on devices and
32378 * bridges.
32379 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32380 /* Only if operator has selected panic on PCI Error */
32381 if (edac_pci_get_panic_on_pe()) {
32382 /* If the count is different 'after' from 'before' */
32383 - if (before_count != atomic_read(&pci_parity_count))
32384 + if (before_count != atomic_read_unchecked(&pci_parity_count))
32385 panic("EDAC: PCI Parity Error");
32386 }
32387 }
32388 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32389 index 6c9a0f2..9c1cf7e 100644
32390 --- a/drivers/edac/i3000_edac.c
32391 +++ b/drivers/edac/i3000_edac.c
32392 @@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32393 edac_mc_free(mci);
32394 }
32395
32396 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32397 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32398 {
32399 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32400 I3000},
32401 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32402 index fde4db9..fe108f9 100644
32403 --- a/drivers/edac/i3200_edac.c
32404 +++ b/drivers/edac/i3200_edac.c
32405 @@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32406 edac_mc_free(mci);
32407 }
32408
32409 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32410 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32411 {
32412 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32413 I3200},
32414 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32415 index adc10a2..57d4ccf 100644
32416 --- a/drivers/edac/i5000_edac.c
32417 +++ b/drivers/edac/i5000_edac.c
32418 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32419 *
32420 * The "E500P" device is the first device supported.
32421 */
32422 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32423 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32424 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32425 .driver_data = I5000P},
32426
32427 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32428 index 22db05a..b2b5503 100644
32429 --- a/drivers/edac/i5100_edac.c
32430 +++ b/drivers/edac/i5100_edac.c
32431 @@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32432 edac_mc_free(mci);
32433 }
32434
32435 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32436 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32437 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32438 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32439 { 0, }
32440 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32441 index f99d106..f050710 100644
32442 --- a/drivers/edac/i5400_edac.c
32443 +++ b/drivers/edac/i5400_edac.c
32444 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32445 *
32446 * The "E500P" device is the first device supported.
32447 */
32448 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32449 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32450 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32451 {0,} /* 0 terminated list. */
32452 };
32453 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32454 index 577760a..9ce16ce 100644
32455 --- a/drivers/edac/i82443bxgx_edac.c
32456 +++ b/drivers/edac/i82443bxgx_edac.c
32457 @@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32458
32459 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32460
32461 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32462 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32463 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32464 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32465 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32466 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32467 index c0088ba..64a7b98 100644
32468 --- a/drivers/edac/i82860_edac.c
32469 +++ b/drivers/edac/i82860_edac.c
32470 @@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32471 edac_mc_free(mci);
32472 }
32473
32474 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32475 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32476 {
32477 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32478 I82860},
32479 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32480 index b2d83b9..a34357b 100644
32481 --- a/drivers/edac/i82875p_edac.c
32482 +++ b/drivers/edac/i82875p_edac.c
32483 @@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32484 edac_mc_free(mci);
32485 }
32486
32487 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32488 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32489 {
32490 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32491 I82875P},
32492 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32493 index 2eed3ea..87bbbd1 100644
32494 --- a/drivers/edac/i82975x_edac.c
32495 +++ b/drivers/edac/i82975x_edac.c
32496 @@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32497 edac_mc_free(mci);
32498 }
32499
32500 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32501 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32502 {
32503 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32504 I82975X
32505 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32506 index 9900675..78ac2b6 100644
32507 --- a/drivers/edac/r82600_edac.c
32508 +++ b/drivers/edac/r82600_edac.c
32509 @@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32510 edac_mc_free(mci);
32511 }
32512
32513 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32514 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32515 {
32516 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32517 },
32518 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32519 index d4ec605..4cfec4e 100644
32520 --- a/drivers/edac/x38_edac.c
32521 +++ b/drivers/edac/x38_edac.c
32522 @@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32523 edac_mc_free(mci);
32524 }
32525
32526 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32527 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32528 {
32529 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32530 X38},
32531 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32532 index 3fc2ceb..daf098f 100644
32533 --- a/drivers/firewire/core-card.c
32534 +++ b/drivers/firewire/core-card.c
32535 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32536
32537 void fw_core_remove_card(struct fw_card *card)
32538 {
32539 - struct fw_card_driver dummy_driver = dummy_driver_template;
32540 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
32541
32542 card->driver->update_phy_reg(card, 4,
32543 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32544 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32545 index 4560d8f..36db24a 100644
32546 --- a/drivers/firewire/core-cdev.c
32547 +++ b/drivers/firewire/core-cdev.c
32548 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32549 int ret;
32550
32551 if ((request->channels == 0 && request->bandwidth == 0) ||
32552 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32553 - request->bandwidth < 0)
32554 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32555 return -EINVAL;
32556
32557 r = kmalloc(sizeof(*r), GFP_KERNEL);
32558 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32559 index da628c7..cf54a2c 100644
32560 --- a/drivers/firewire/core-transaction.c
32561 +++ b/drivers/firewire/core-transaction.c
32562 @@ -36,6 +36,7 @@
32563 #include <linux/string.h>
32564 #include <linux/timer.h>
32565 #include <linux/types.h>
32566 +#include <linux/sched.h>
32567
32568 #include <asm/byteorder.h>
32569
32570 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32571 struct transaction_callback_data d;
32572 struct fw_transaction t;
32573
32574 + pax_track_stack();
32575 +
32576 init_completion(&d.done);
32577 d.payload = payload;
32578 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32579 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32580 index 7ff6e75..a2965d9 100644
32581 --- a/drivers/firewire/core.h
32582 +++ b/drivers/firewire/core.h
32583 @@ -86,6 +86,7 @@ struct fw_card_driver {
32584
32585 int (*stop_iso)(struct fw_iso_context *ctx);
32586 };
32587 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32588
32589 void fw_card_initialize(struct fw_card *card,
32590 const struct fw_card_driver *driver, struct device *device);
32591 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32592 index 3a2ccb0..82fd7c4 100644
32593 --- a/drivers/firmware/dmi_scan.c
32594 +++ b/drivers/firmware/dmi_scan.c
32595 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32596 }
32597 }
32598 else {
32599 - /*
32600 - * no iounmap() for that ioremap(); it would be a no-op, but
32601 - * it's so early in setup that sucker gets confused into doing
32602 - * what it shouldn't if we actually call it.
32603 - */
32604 p = dmi_ioremap(0xF0000, 0x10000);
32605 if (p == NULL)
32606 goto error;
32607 @@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32608 if (buf == NULL)
32609 return -1;
32610
32611 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32612 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32613
32614 iounmap(buf);
32615 return 0;
32616 diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32617 index 9e4f59d..110e24e 100644
32618 --- a/drivers/firmware/edd.c
32619 +++ b/drivers/firmware/edd.c
32620 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32621 return ret;
32622 }
32623
32624 -static struct sysfs_ops edd_attr_ops = {
32625 +static const struct sysfs_ops edd_attr_ops = {
32626 .show = edd_attr_show,
32627 };
32628
32629 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32630 index f4f709d..082f06e 100644
32631 --- a/drivers/firmware/efivars.c
32632 +++ b/drivers/firmware/efivars.c
32633 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32634 return ret;
32635 }
32636
32637 -static struct sysfs_ops efivar_attr_ops = {
32638 +static const struct sysfs_ops efivar_attr_ops = {
32639 .show = efivar_attr_show,
32640 .store = efivar_attr_store,
32641 };
32642 diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32643 index 051d1eb..0a5d4e7 100644
32644 --- a/drivers/firmware/iscsi_ibft.c
32645 +++ b/drivers/firmware/iscsi_ibft.c
32646 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32647 return ret;
32648 }
32649
32650 -static struct sysfs_ops ibft_attr_ops = {
32651 +static const struct sysfs_ops ibft_attr_ops = {
32652 .show = ibft_show_attribute,
32653 };
32654
32655 diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32656 index 56f9234..8c58c7b 100644
32657 --- a/drivers/firmware/memmap.c
32658 +++ b/drivers/firmware/memmap.c
32659 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32660 NULL
32661 };
32662
32663 -static struct sysfs_ops memmap_attr_ops = {
32664 +static const struct sysfs_ops memmap_attr_ops = {
32665 .show = memmap_attr_show,
32666 };
32667
32668 diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32669 index b16c9a8..2af7d3f 100644
32670 --- a/drivers/gpio/vr41xx_giu.c
32671 +++ b/drivers/gpio/vr41xx_giu.c
32672 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32673 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32674 maskl, pendl, maskh, pendh);
32675
32676 - atomic_inc(&irq_err_count);
32677 + atomic_inc_unchecked(&irq_err_count);
32678
32679 return -EINVAL;
32680 }
32681 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32682 index bea6efc..3dc0f42 100644
32683 --- a/drivers/gpu/drm/drm_crtc.c
32684 +++ b/drivers/gpu/drm/drm_crtc.c
32685 @@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32686 */
32687 if ((out_resp->count_modes >= mode_count) && mode_count) {
32688 copied = 0;
32689 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32690 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32691 list_for_each_entry(mode, &connector->modes, head) {
32692 drm_crtc_convert_to_umode(&u_mode, mode);
32693 if (copy_to_user(mode_ptr + copied,
32694 @@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32695
32696 if ((out_resp->count_props >= props_count) && props_count) {
32697 copied = 0;
32698 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32699 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32700 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32701 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32702 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32703 if (connector->property_ids[i] != 0) {
32704 if (put_user(connector->property_ids[i],
32705 @@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32706
32707 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32708 copied = 0;
32709 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32710 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32711 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32712 if (connector->encoder_ids[i] != 0) {
32713 if (put_user(connector->encoder_ids[i],
32714 @@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32715 }
32716
32717 for (i = 0; i < crtc_req->count_connectors; i++) {
32718 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32719 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32720 if (get_user(out_id, &set_connectors_ptr[i])) {
32721 ret = -EFAULT;
32722 goto out;
32723 @@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32724 out_resp->flags = property->flags;
32725
32726 if ((out_resp->count_values >= value_count) && value_count) {
32727 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32728 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32729 for (i = 0; i < value_count; i++) {
32730 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32731 ret = -EFAULT;
32732 @@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32733 if (property->flags & DRM_MODE_PROP_ENUM) {
32734 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32735 copied = 0;
32736 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32737 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32738 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32739
32740 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32741 @@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32742 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32743 copied = 0;
32744 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32745 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32746 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32747
32748 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32749 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32750 @@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32751 blob = obj_to_blob(obj);
32752
32753 if (out_resp->length == blob->length) {
32754 - blob_ptr = (void *)(unsigned long)out_resp->data;
32755 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
32756 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32757 ret = -EFAULT;
32758 goto done;
32759 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32760 index 1b8745d..92fdbf6 100644
32761 --- a/drivers/gpu/drm/drm_crtc_helper.c
32762 +++ b/drivers/gpu/drm/drm_crtc_helper.c
32763 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32764 struct drm_crtc *tmp;
32765 int crtc_mask = 1;
32766
32767 - WARN(!crtc, "checking null crtc?");
32768 + BUG_ON(!crtc);
32769
32770 dev = crtc->dev;
32771
32772 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32773
32774 adjusted_mode = drm_mode_duplicate(dev, mode);
32775
32776 + pax_track_stack();
32777 +
32778 crtc->enabled = drm_helper_crtc_in_use(crtc);
32779
32780 if (!crtc->enabled)
32781 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32782 index 0e27d98..dec8768 100644
32783 --- a/drivers/gpu/drm/drm_drv.c
32784 +++ b/drivers/gpu/drm/drm_drv.c
32785 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32786 char *kdata = NULL;
32787
32788 atomic_inc(&dev->ioctl_count);
32789 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32790 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32791 ++file_priv->ioctl_count;
32792
32793 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32794 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32795 index ba14553..182d0bb 100644
32796 --- a/drivers/gpu/drm/drm_fops.c
32797 +++ b/drivers/gpu/drm/drm_fops.c
32798 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32799 }
32800
32801 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32802 - atomic_set(&dev->counts[i], 0);
32803 + atomic_set_unchecked(&dev->counts[i], 0);
32804
32805 dev->sigdata.lock = NULL;
32806
32807 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32808
32809 retcode = drm_open_helper(inode, filp, dev);
32810 if (!retcode) {
32811 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32812 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32813 spin_lock(&dev->count_lock);
32814 - if (!dev->open_count++) {
32815 + if (local_inc_return(&dev->open_count) == 1) {
32816 spin_unlock(&dev->count_lock);
32817 retcode = drm_setup(dev);
32818 goto out;
32819 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32820
32821 lock_kernel();
32822
32823 - DRM_DEBUG("open_count = %d\n", dev->open_count);
32824 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32825
32826 if (dev->driver->preclose)
32827 dev->driver->preclose(dev, file_priv);
32828 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32829 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32830 task_pid_nr(current),
32831 (long)old_encode_dev(file_priv->minor->device),
32832 - dev->open_count);
32833 + local_read(&dev->open_count));
32834
32835 /* if the master has gone away we can't do anything with the lock */
32836 if (file_priv->minor->master)
32837 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, struct file *filp)
32838 * End inline drm_release
32839 */
32840
32841 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32842 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32843 spin_lock(&dev->count_lock);
32844 - if (!--dev->open_count) {
32845 + if (local_dec_and_test(&dev->open_count)) {
32846 if (atomic_read(&dev->ioctl_count)) {
32847 DRM_ERROR("Device busy: %d\n",
32848 atomic_read(&dev->ioctl_count));
32849 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32850 index 8bf3770..79422805 100644
32851 --- a/drivers/gpu/drm/drm_gem.c
32852 +++ b/drivers/gpu/drm/drm_gem.c
32853 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32854 spin_lock_init(&dev->object_name_lock);
32855 idr_init(&dev->object_name_idr);
32856 atomic_set(&dev->object_count, 0);
32857 - atomic_set(&dev->object_memory, 0);
32858 + atomic_set_unchecked(&dev->object_memory, 0);
32859 atomic_set(&dev->pin_count, 0);
32860 - atomic_set(&dev->pin_memory, 0);
32861 + atomic_set_unchecked(&dev->pin_memory, 0);
32862 atomic_set(&dev->gtt_count, 0);
32863 - atomic_set(&dev->gtt_memory, 0);
32864 + atomic_set_unchecked(&dev->gtt_memory, 0);
32865
32866 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32867 if (!mm) {
32868 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32869 goto fput;
32870 }
32871 atomic_inc(&dev->object_count);
32872 - atomic_add(obj->size, &dev->object_memory);
32873 + atomic_add_unchecked(obj->size, &dev->object_memory);
32874 return obj;
32875 fput:
32876 fput(obj->filp);
32877 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32878
32879 fput(obj->filp);
32880 atomic_dec(&dev->object_count);
32881 - atomic_sub(obj->size, &dev->object_memory);
32882 + atomic_sub_unchecked(obj->size, &dev->object_memory);
32883 kfree(obj);
32884 }
32885 EXPORT_SYMBOL(drm_gem_object_free);
32886 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32887 index f0f6c6b..34af322 100644
32888 --- a/drivers/gpu/drm/drm_info.c
32889 +++ b/drivers/gpu/drm/drm_info.c
32890 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32891 struct drm_local_map *map;
32892 struct drm_map_list *r_list;
32893
32894 - /* Hardcoded from _DRM_FRAME_BUFFER,
32895 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32896 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32897 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32898 + static const char * const types[] = {
32899 + [_DRM_FRAME_BUFFER] = "FB",
32900 + [_DRM_REGISTERS] = "REG",
32901 + [_DRM_SHM] = "SHM",
32902 + [_DRM_AGP] = "AGP",
32903 + [_DRM_SCATTER_GATHER] = "SG",
32904 + [_DRM_CONSISTENT] = "PCI",
32905 + [_DRM_GEM] = "GEM" };
32906 const char *type;
32907 int i;
32908
32909 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32910 map = r_list->map;
32911 if (!map)
32912 continue;
32913 - if (map->type < 0 || map->type > 5)
32914 + if (map->type >= ARRAY_SIZE(types))
32915 type = "??";
32916 else
32917 type = types[map->type];
32918 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
32919 struct drm_device *dev = node->minor->dev;
32920
32921 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
32922 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
32923 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
32924 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
32925 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
32926 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
32927 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
32928 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
32929 seq_printf(m, "%d gtt total\n", dev->gtt_total);
32930 return 0;
32931 }
32932 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32933 mutex_lock(&dev->struct_mutex);
32934 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
32935 atomic_read(&dev->vma_count),
32936 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32937 + NULL, 0);
32938 +#else
32939 high_memory, (u64)virt_to_phys(high_memory));
32940 +#endif
32941
32942 list_for_each_entry(pt, &dev->vmalist, head) {
32943 vma = pt->vma;
32944 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
32945 continue;
32946 seq_printf(m,
32947 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
32948 - pt->pid, vma->vm_start, vma->vm_end,
32949 + pt->pid,
32950 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32951 + 0, 0,
32952 +#else
32953 + vma->vm_start, vma->vm_end,
32954 +#endif
32955 vma->vm_flags & VM_READ ? 'r' : '-',
32956 vma->vm_flags & VM_WRITE ? 'w' : '-',
32957 vma->vm_flags & VM_EXEC ? 'x' : '-',
32958 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32959 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32960 vma->vm_flags & VM_IO ? 'i' : '-',
32961 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32962 + 0);
32963 +#else
32964 vma->vm_pgoff);
32965 +#endif
32966
32967 #if defined(__i386__)
32968 pgprot = pgprot_val(vma->vm_page_prot);
32969 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32970 index 282d9fd..71e5f11 100644
32971 --- a/drivers/gpu/drm/drm_ioc32.c
32972 +++ b/drivers/gpu/drm/drm_ioc32.c
32973 @@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32974 request = compat_alloc_user_space(nbytes);
32975 if (!access_ok(VERIFY_WRITE, request, nbytes))
32976 return -EFAULT;
32977 - list = (struct drm_buf_desc *) (request + 1);
32978 + list = (struct drm_buf_desc __user *) (request + 1);
32979
32980 if (__put_user(count, &request->count)
32981 || __put_user(list, &request->list))
32982 @@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32983 request = compat_alloc_user_space(nbytes);
32984 if (!access_ok(VERIFY_WRITE, request, nbytes))
32985 return -EFAULT;
32986 - list = (struct drm_buf_pub *) (request + 1);
32987 + list = (struct drm_buf_pub __user *) (request + 1);
32988
32989 if (__put_user(count, &request->count)
32990 || __put_user(list, &request->list))
32991 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32992 index 9b9ff46..4ea724c 100644
32993 --- a/drivers/gpu/drm/drm_ioctl.c
32994 +++ b/drivers/gpu/drm/drm_ioctl.c
32995 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32996 stats->data[i].value =
32997 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32998 else
32999 - stats->data[i].value = atomic_read(&dev->counts[i]);
33000 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
33001 stats->data[i].type = dev->types[i];
33002 }
33003
33004 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
33005 index e2f70a5..c703e86 100644
33006 --- a/drivers/gpu/drm/drm_lock.c
33007 +++ b/drivers/gpu/drm/drm_lock.c
33008 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33009 if (drm_lock_take(&master->lock, lock->context)) {
33010 master->lock.file_priv = file_priv;
33011 master->lock.lock_time = jiffies;
33012 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
33013 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
33014 break; /* Got lock */
33015 }
33016
33017 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33018 return -EINVAL;
33019 }
33020
33021 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
33022 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
33023
33024 /* kernel_context_switch isn't used by any of the x86 drm
33025 * modules but is required by the Sparc driver.
33026 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
33027 index 7d1d88c..b9131b2 100644
33028 --- a/drivers/gpu/drm/i810/i810_dma.c
33029 +++ b/drivers/gpu/drm/i810/i810_dma.c
33030 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
33031 dma->buflist[vertex->idx],
33032 vertex->discard, vertex->used);
33033
33034 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33035 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33036 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33037 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33038 sarea_priv->last_enqueue = dev_priv->counter - 1;
33039 sarea_priv->last_dispatch = (int)hw_status[5];
33040
33041 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
33042 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
33043 mc->last_render);
33044
33045 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33046 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33047 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33048 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33049 sarea_priv->last_enqueue = dev_priv->counter - 1;
33050 sarea_priv->last_dispatch = (int)hw_status[5];
33051
33052 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
33053 index 21e2691..7321edd 100644
33054 --- a/drivers/gpu/drm/i810/i810_drv.h
33055 +++ b/drivers/gpu/drm/i810/i810_drv.h
33056 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
33057 int page_flipping;
33058
33059 wait_queue_head_t irq_queue;
33060 - atomic_t irq_received;
33061 - atomic_t irq_emitted;
33062 + atomic_unchecked_t irq_received;
33063 + atomic_unchecked_t irq_emitted;
33064
33065 int front_offset;
33066 } drm_i810_private_t;
33067 diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
33068 index da82afe..48a45de 100644
33069 --- a/drivers/gpu/drm/i830/i830_drv.h
33070 +++ b/drivers/gpu/drm/i830/i830_drv.h
33071 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
33072 int page_flipping;
33073
33074 wait_queue_head_t irq_queue;
33075 - atomic_t irq_received;
33076 - atomic_t irq_emitted;
33077 + atomic_unchecked_t irq_received;
33078 + atomic_unchecked_t irq_emitted;
33079
33080 int use_mi_batchbuffer_start;
33081
33082 diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
33083 index 91ec2bb..6f21fab 100644
33084 --- a/drivers/gpu/drm/i830/i830_irq.c
33085 +++ b/drivers/gpu/drm/i830/i830_irq.c
33086 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
33087
33088 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
33089
33090 - atomic_inc(&dev_priv->irq_received);
33091 + atomic_inc_unchecked(&dev_priv->irq_received);
33092 wake_up_interruptible(&dev_priv->irq_queue);
33093
33094 return IRQ_HANDLED;
33095 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
33096
33097 DRM_DEBUG("%s\n", __func__);
33098
33099 - atomic_inc(&dev_priv->irq_emitted);
33100 + atomic_inc_unchecked(&dev_priv->irq_emitted);
33101
33102 BEGIN_LP_RING(2);
33103 OUT_RING(0);
33104 OUT_RING(GFX_OP_USER_INTERRUPT);
33105 ADVANCE_LP_RING();
33106
33107 - return atomic_read(&dev_priv->irq_emitted);
33108 + return atomic_read_unchecked(&dev_priv->irq_emitted);
33109 }
33110
33111 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33112 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33113
33114 DRM_DEBUG("%s\n", __func__);
33115
33116 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33117 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33118 return 0;
33119
33120 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
33121 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33122
33123 for (;;) {
33124 __set_current_state(TASK_INTERRUPTIBLE);
33125 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33126 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33127 break;
33128 if ((signed)(end - jiffies) <= 0) {
33129 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
33130 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33131 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33132 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33133 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33134 - atomic_set(&dev_priv->irq_received, 0);
33135 - atomic_set(&dev_priv->irq_emitted, 0);
33136 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33137 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33138 init_waitqueue_head(&dev_priv->irq_queue);
33139 }
33140
33141 diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33142 index 288fc50..c6092055 100644
33143 --- a/drivers/gpu/drm/i915/dvo.h
33144 +++ b/drivers/gpu/drm/i915/dvo.h
33145 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33146 *
33147 * \return singly-linked list of modes or NULL if no modes found.
33148 */
33149 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33150 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33151
33152 /**
33153 * Clean up driver-specific bits of the output
33154 */
33155 - void (*destroy) (struct intel_dvo_device *dvo);
33156 + void (* const destroy) (struct intel_dvo_device *dvo);
33157
33158 /**
33159 * Debugging hook to dump device registers to log file
33160 */
33161 - void (*dump_regs)(struct intel_dvo_device *dvo);
33162 + void (* const dump_regs)(struct intel_dvo_device *dvo);
33163 };
33164
33165 -extern struct intel_dvo_dev_ops sil164_ops;
33166 -extern struct intel_dvo_dev_ops ch7xxx_ops;
33167 -extern struct intel_dvo_dev_ops ivch_ops;
33168 -extern struct intel_dvo_dev_ops tfp410_ops;
33169 -extern struct intel_dvo_dev_ops ch7017_ops;
33170 +extern const struct intel_dvo_dev_ops sil164_ops;
33171 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
33172 +extern const struct intel_dvo_dev_ops ivch_ops;
33173 +extern const struct intel_dvo_dev_ops tfp410_ops;
33174 +extern const struct intel_dvo_dev_ops ch7017_ops;
33175
33176 #endif /* _INTEL_DVO_H */
33177 diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33178 index 621815b..499d82e 100644
33179 --- a/drivers/gpu/drm/i915/dvo_ch7017.c
33180 +++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33181 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33182 }
33183 }
33184
33185 -struct intel_dvo_dev_ops ch7017_ops = {
33186 +const struct intel_dvo_dev_ops ch7017_ops = {
33187 .init = ch7017_init,
33188 .detect = ch7017_detect,
33189 .mode_valid = ch7017_mode_valid,
33190 diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33191 index a9b8962..ac769ba 100644
33192 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33193 +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33194 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33195 }
33196 }
33197
33198 -struct intel_dvo_dev_ops ch7xxx_ops = {
33199 +const struct intel_dvo_dev_ops ch7xxx_ops = {
33200 .init = ch7xxx_init,
33201 .detect = ch7xxx_detect,
33202 .mode_valid = ch7xxx_mode_valid,
33203 diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33204 index aa176f9..ed2930c 100644
33205 --- a/drivers/gpu/drm/i915/dvo_ivch.c
33206 +++ b/drivers/gpu/drm/i915/dvo_ivch.c
33207 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33208 }
33209 }
33210
33211 -struct intel_dvo_dev_ops ivch_ops= {
33212 +const struct intel_dvo_dev_ops ivch_ops= {
33213 .init = ivch_init,
33214 .dpms = ivch_dpms,
33215 .save = ivch_save,
33216 diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33217 index e1c1f73..7dbebcf 100644
33218 --- a/drivers/gpu/drm/i915/dvo_sil164.c
33219 +++ b/drivers/gpu/drm/i915/dvo_sil164.c
33220 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33221 }
33222 }
33223
33224 -struct intel_dvo_dev_ops sil164_ops = {
33225 +const struct intel_dvo_dev_ops sil164_ops = {
33226 .init = sil164_init,
33227 .detect = sil164_detect,
33228 .mode_valid = sil164_mode_valid,
33229 diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33230 index 16dce84..7e1b6f8 100644
33231 --- a/drivers/gpu/drm/i915/dvo_tfp410.c
33232 +++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33233 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33234 }
33235 }
33236
33237 -struct intel_dvo_dev_ops tfp410_ops = {
33238 +const struct intel_dvo_dev_ops tfp410_ops = {
33239 .init = tfp410_init,
33240 .detect = tfp410_detect,
33241 .mode_valid = tfp410_mode_valid,
33242 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33243 index 7e859d6..7d1cf2b 100644
33244 --- a/drivers/gpu/drm/i915/i915_debugfs.c
33245 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
33246 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33247 I915_READ(GTIMR));
33248 }
33249 seq_printf(m, "Interrupts received: %d\n",
33250 - atomic_read(&dev_priv->irq_received));
33251 + atomic_read_unchecked(&dev_priv->irq_received));
33252 if (dev_priv->hw_status_page != NULL) {
33253 seq_printf(m, "Current sequence: %d\n",
33254 i915_get_gem_seqno(dev));
33255 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33256 index 5449239..7e4f68d 100644
33257 --- a/drivers/gpu/drm/i915/i915_drv.c
33258 +++ b/drivers/gpu/drm/i915/i915_drv.c
33259 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33260 return i915_resume(dev);
33261 }
33262
33263 -static struct vm_operations_struct i915_gem_vm_ops = {
33264 +static const struct vm_operations_struct i915_gem_vm_ops = {
33265 .fault = i915_gem_fault,
33266 .open = drm_gem_vm_open,
33267 .close = drm_gem_vm_close,
33268 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33269 index 97163f7..c24c7c7 100644
33270 --- a/drivers/gpu/drm/i915/i915_drv.h
33271 +++ b/drivers/gpu/drm/i915/i915_drv.h
33272 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33273 /* display clock increase/decrease */
33274 /* pll clock increase/decrease */
33275 /* clock gating init */
33276 -};
33277 +} __no_const;
33278
33279 typedef struct drm_i915_private {
33280 struct drm_device *dev;
33281 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33282 int page_flipping;
33283
33284 wait_queue_head_t irq_queue;
33285 - atomic_t irq_received;
33286 + atomic_unchecked_t irq_received;
33287 /** Protects user_irq_refcount and irq_mask_reg */
33288 spinlock_t user_irq_lock;
33289 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33290 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33291 index 27a3074..eb3f959 100644
33292 --- a/drivers/gpu/drm/i915/i915_gem.c
33293 +++ b/drivers/gpu/drm/i915/i915_gem.c
33294 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33295
33296 args->aper_size = dev->gtt_total;
33297 args->aper_available_size = (args->aper_size -
33298 - atomic_read(&dev->pin_memory));
33299 + atomic_read_unchecked(&dev->pin_memory));
33300
33301 return 0;
33302 }
33303 @@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33304
33305 if (obj_priv->gtt_space) {
33306 atomic_dec(&dev->gtt_count);
33307 - atomic_sub(obj->size, &dev->gtt_memory);
33308 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33309
33310 drm_mm_put_block(obj_priv->gtt_space);
33311 obj_priv->gtt_space = NULL;
33312 @@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33313 goto search_free;
33314 }
33315 atomic_inc(&dev->gtt_count);
33316 - atomic_add(obj->size, &dev->gtt_memory);
33317 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
33318
33319 /* Assert that the object is not currently in any GPU domain. As it
33320 * wasn't in the GTT, there shouldn't be any way it could have been in
33321 @@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33322 "%d/%d gtt bytes\n",
33323 atomic_read(&dev->object_count),
33324 atomic_read(&dev->pin_count),
33325 - atomic_read(&dev->object_memory),
33326 - atomic_read(&dev->pin_memory),
33327 - atomic_read(&dev->gtt_memory),
33328 + atomic_read_unchecked(&dev->object_memory),
33329 + atomic_read_unchecked(&dev->pin_memory),
33330 + atomic_read_unchecked(&dev->gtt_memory),
33331 dev->gtt_total);
33332 }
33333 goto err;
33334 @@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33335 */
33336 if (obj_priv->pin_count == 1) {
33337 atomic_inc(&dev->pin_count);
33338 - atomic_add(obj->size, &dev->pin_memory);
33339 + atomic_add_unchecked(obj->size, &dev->pin_memory);
33340 if (!obj_priv->active &&
33341 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33342 !list_empty(&obj_priv->list))
33343 @@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33344 list_move_tail(&obj_priv->list,
33345 &dev_priv->mm.inactive_list);
33346 atomic_dec(&dev->pin_count);
33347 - atomic_sub(obj->size, &dev->pin_memory);
33348 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
33349 }
33350 i915_verify_inactive(dev, __FILE__, __LINE__);
33351 }
33352 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33353 index 63f28ad..f5469da 100644
33354 --- a/drivers/gpu/drm/i915/i915_irq.c
33355 +++ b/drivers/gpu/drm/i915/i915_irq.c
33356 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33357 int irq_received;
33358 int ret = IRQ_NONE;
33359
33360 - atomic_inc(&dev_priv->irq_received);
33361 + atomic_inc_unchecked(&dev_priv->irq_received);
33362
33363 if (IS_IGDNG(dev))
33364 return igdng_irq_handler(dev);
33365 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33366 {
33367 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33368
33369 - atomic_set(&dev_priv->irq_received, 0);
33370 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33371
33372 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33373 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33374 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33375 index 5d9c6a7..d1b0e29 100644
33376 --- a/drivers/gpu/drm/i915/intel_sdvo.c
33377 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
33378 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33379 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33380
33381 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33382 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33383 + pax_open_kernel();
33384 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33385 + pax_close_kernel();
33386
33387 /* Read the regs to test if we can talk to the device */
33388 for (i = 0; i < 0x40; i++) {
33389 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33390 index be6c6b9..8615d9c 100644
33391 --- a/drivers/gpu/drm/mga/mga_drv.h
33392 +++ b/drivers/gpu/drm/mga/mga_drv.h
33393 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33394 u32 clear_cmd;
33395 u32 maccess;
33396
33397 - atomic_t vbl_received; /**< Number of vblanks received. */
33398 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33399 wait_queue_head_t fence_queue;
33400 - atomic_t last_fence_retired;
33401 + atomic_unchecked_t last_fence_retired;
33402 u32 next_fence_to_post;
33403
33404 unsigned int fb_cpp;
33405 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33406 index daa6041..a28a5da 100644
33407 --- a/drivers/gpu/drm/mga/mga_irq.c
33408 +++ b/drivers/gpu/drm/mga/mga_irq.c
33409 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33410 if (crtc != 0)
33411 return 0;
33412
33413 - return atomic_read(&dev_priv->vbl_received);
33414 + return atomic_read_unchecked(&dev_priv->vbl_received);
33415 }
33416
33417
33418 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33419 /* VBLANK interrupt */
33420 if (status & MGA_VLINEPEN) {
33421 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33422 - atomic_inc(&dev_priv->vbl_received);
33423 + atomic_inc_unchecked(&dev_priv->vbl_received);
33424 drm_handle_vblank(dev, 0);
33425 handled = 1;
33426 }
33427 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33428 MGA_WRITE(MGA_PRIMEND, prim_end);
33429 }
33430
33431 - atomic_inc(&dev_priv->last_fence_retired);
33432 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
33433 DRM_WAKEUP(&dev_priv->fence_queue);
33434 handled = 1;
33435 }
33436 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33437 * using fences.
33438 */
33439 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33440 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33441 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33442 - *sequence) <= (1 << 23)));
33443
33444 *sequence = cur_fence;
33445 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33446 index 4c39a40..b22a9ea 100644
33447 --- a/drivers/gpu/drm/r128/r128_cce.c
33448 +++ b/drivers/gpu/drm/r128/r128_cce.c
33449 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33450
33451 /* GH: Simple idle check.
33452 */
33453 - atomic_set(&dev_priv->idle_count, 0);
33454 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33455
33456 /* We don't support anything other than bus-mastering ring mode,
33457 * but the ring can be in either AGP or PCI space for the ring
33458 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33459 index 3c60829..4faf484 100644
33460 --- a/drivers/gpu/drm/r128/r128_drv.h
33461 +++ b/drivers/gpu/drm/r128/r128_drv.h
33462 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33463 int is_pci;
33464 unsigned long cce_buffers_offset;
33465
33466 - atomic_t idle_count;
33467 + atomic_unchecked_t idle_count;
33468
33469 int page_flipping;
33470 int current_page;
33471 u32 crtc_offset;
33472 u32 crtc_offset_cntl;
33473
33474 - atomic_t vbl_received;
33475 + atomic_unchecked_t vbl_received;
33476
33477 u32 color_fmt;
33478 unsigned int front_offset;
33479 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33480 index 69810fb..97bf17a 100644
33481 --- a/drivers/gpu/drm/r128/r128_irq.c
33482 +++ b/drivers/gpu/drm/r128/r128_irq.c
33483 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33484 if (crtc != 0)
33485 return 0;
33486
33487 - return atomic_read(&dev_priv->vbl_received);
33488 + return atomic_read_unchecked(&dev_priv->vbl_received);
33489 }
33490
33491 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33492 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33493 /* VBLANK interrupt */
33494 if (status & R128_CRTC_VBLANK_INT) {
33495 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33496 - atomic_inc(&dev_priv->vbl_received);
33497 + atomic_inc_unchecked(&dev_priv->vbl_received);
33498 drm_handle_vblank(dev, 0);
33499 return IRQ_HANDLED;
33500 }
33501 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33502 index af2665c..51922d2 100644
33503 --- a/drivers/gpu/drm/r128/r128_state.c
33504 +++ b/drivers/gpu/drm/r128/r128_state.c
33505 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33506
33507 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33508 {
33509 - if (atomic_read(&dev_priv->idle_count) == 0) {
33510 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33511 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33512 } else {
33513 - atomic_set(&dev_priv->idle_count, 0);
33514 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33515 }
33516 }
33517
33518 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33519 index dd72b91..8644b3c 100644
33520 --- a/drivers/gpu/drm/radeon/atom.c
33521 +++ b/drivers/gpu/drm/radeon/atom.c
33522 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33523 char name[512];
33524 int i;
33525
33526 + pax_track_stack();
33527 +
33528 ctx->card = card;
33529 ctx->bios = bios;
33530
33531 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33532 index 0d79577..efaa7a5 100644
33533 --- a/drivers/gpu/drm/radeon/mkregtable.c
33534 +++ b/drivers/gpu/drm/radeon/mkregtable.c
33535 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33536 regex_t mask_rex;
33537 regmatch_t match[4];
33538 char buf[1024];
33539 - size_t end;
33540 + long end;
33541 int len;
33542 int done = 0;
33543 int r;
33544 unsigned o;
33545 struct offset *offset;
33546 char last_reg_s[10];
33547 - int last_reg;
33548 + unsigned long last_reg;
33549
33550 if (regcomp
33551 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33552 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33553 index 6735213..38c2c67 100644
33554 --- a/drivers/gpu/drm/radeon/radeon.h
33555 +++ b/drivers/gpu/drm/radeon/radeon.h
33556 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33557 */
33558 struct radeon_fence_driver {
33559 uint32_t scratch_reg;
33560 - atomic_t seq;
33561 + atomic_unchecked_t seq;
33562 uint32_t last_seq;
33563 unsigned long count_timeout;
33564 wait_queue_head_t queue;
33565 @@ -640,7 +640,7 @@ struct radeon_asic {
33566 uint32_t offset, uint32_t obj_size);
33567 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33568 void (*bandwidth_update)(struct radeon_device *rdev);
33569 -};
33570 +} __no_const;
33571
33572 /*
33573 * Asic structures
33574 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33575 index 4e928b9..d8b6008 100644
33576 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
33577 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33578 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33579 bool linkb;
33580 struct radeon_i2c_bus_rec ddc_bus;
33581
33582 + pax_track_stack();
33583 +
33584 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33585
33586 if (data_offset == 0)
33587 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33588 }
33589 }
33590
33591 -struct bios_connector {
33592 +static struct bios_connector {
33593 bool valid;
33594 uint16_t line_mux;
33595 uint16_t devices;
33596 int connector_type;
33597 struct radeon_i2c_bus_rec ddc_bus;
33598 -};
33599 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33600
33601 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33602 drm_device
33603 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33604 uint8_t dac;
33605 union atom_supported_devices *supported_devices;
33606 int i, j;
33607 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33608
33609 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33610
33611 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33612 index 083a181..ccccae0 100644
33613 --- a/drivers/gpu/drm/radeon/radeon_display.c
33614 +++ b/drivers/gpu/drm/radeon/radeon_display.c
33615 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33616
33617 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33618 error = freq - current_freq;
33619 - error = error < 0 ? 0xffffffff : error;
33620 + error = (int32_t)error < 0 ? 0xffffffff : error;
33621 } else
33622 error = abs(current_freq - freq);
33623 vco_diff = abs(vco - best_vco);
33624 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33625 index 76e4070..193fa7f 100644
33626 --- a/drivers/gpu/drm/radeon/radeon_drv.h
33627 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
33628 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33629
33630 /* SW interrupt */
33631 wait_queue_head_t swi_queue;
33632 - atomic_t swi_emitted;
33633 + atomic_unchecked_t swi_emitted;
33634 int vblank_crtc;
33635 uint32_t irq_enable_reg;
33636 uint32_t r500_disp_irq_reg;
33637 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33638 index 3beb26d..6ce9c4a 100644
33639 --- a/drivers/gpu/drm/radeon/radeon_fence.c
33640 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
33641 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33642 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33643 return 0;
33644 }
33645 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33646 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33647 if (!rdev->cp.ready) {
33648 /* FIXME: cp is not running assume everythings is done right
33649 * away
33650 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33651 return r;
33652 }
33653 WREG32(rdev->fence_drv.scratch_reg, 0);
33654 - atomic_set(&rdev->fence_drv.seq, 0);
33655 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33656 INIT_LIST_HEAD(&rdev->fence_drv.created);
33657 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33658 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33659 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33660 index a1bf11d..4a123c0 100644
33661 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33662 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33663 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33664 request = compat_alloc_user_space(sizeof(*request));
33665 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33666 || __put_user(req32.param, &request->param)
33667 - || __put_user((void __user *)(unsigned long)req32.value,
33668 + || __put_user((unsigned long)req32.value,
33669 &request->value))
33670 return -EFAULT;
33671
33672 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33673 index b79ecc4..8dab92d 100644
33674 --- a/drivers/gpu/drm/radeon/radeon_irq.c
33675 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
33676 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33677 unsigned int ret;
33678 RING_LOCALS;
33679
33680 - atomic_inc(&dev_priv->swi_emitted);
33681 - ret = atomic_read(&dev_priv->swi_emitted);
33682 + atomic_inc_unchecked(&dev_priv->swi_emitted);
33683 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33684
33685 BEGIN_RING(4);
33686 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33687 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33688 drm_radeon_private_t *dev_priv =
33689 (drm_radeon_private_t *) dev->dev_private;
33690
33691 - atomic_set(&dev_priv->swi_emitted, 0);
33692 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33693 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33694
33695 dev->max_vblank_count = 0x001fffff;
33696 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33697 index 4747910..48ca4b3 100644
33698 --- a/drivers/gpu/drm/radeon/radeon_state.c
33699 +++ b/drivers/gpu/drm/radeon/radeon_state.c
33700 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33701 {
33702 drm_radeon_private_t *dev_priv = dev->dev_private;
33703 drm_radeon_getparam_t *param = data;
33704 - int value;
33705 + int value = 0;
33706
33707 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33708
33709 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33710 index 1381e06..0e53b17 100644
33711 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
33712 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33713 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33714 DRM_INFO("radeon: ttm finalized\n");
33715 }
33716
33717 -static struct vm_operations_struct radeon_ttm_vm_ops;
33718 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
33719 -
33720 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33721 -{
33722 - struct ttm_buffer_object *bo;
33723 - int r;
33724 -
33725 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
33726 - if (bo == NULL) {
33727 - return VM_FAULT_NOPAGE;
33728 - }
33729 - r = ttm_vm_ops->fault(vma, vmf);
33730 - return r;
33731 -}
33732 -
33733 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33734 {
33735 struct drm_file *file_priv;
33736 struct radeon_device *rdev;
33737 - int r;
33738
33739 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33740 return drm_mmap(filp, vma);
33741 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33742
33743 file_priv = (struct drm_file *)filp->private_data;
33744 rdev = file_priv->minor->dev->dev_private;
33745 - if (rdev == NULL) {
33746 + if (!rdev)
33747 return -EINVAL;
33748 - }
33749 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33750 - if (unlikely(r != 0)) {
33751 - return r;
33752 - }
33753 - if (unlikely(ttm_vm_ops == NULL)) {
33754 - ttm_vm_ops = vma->vm_ops;
33755 - radeon_ttm_vm_ops = *ttm_vm_ops;
33756 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33757 - }
33758 - vma->vm_ops = &radeon_ttm_vm_ops;
33759 - return 0;
33760 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33761 }
33762
33763
33764 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33765 index b12ff76..0bd0c6e 100644
33766 --- a/drivers/gpu/drm/radeon/rs690.c
33767 +++ b/drivers/gpu/drm/radeon/rs690.c
33768 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33769 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33770 rdev->pm.sideport_bandwidth.full)
33771 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33772 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33773 + read_delay_latency.full = rfixed_const(800 * 1000);
33774 read_delay_latency.full = rfixed_div(read_delay_latency,
33775 rdev->pm.igp_sideport_mclk);
33776 + a.full = rfixed_const(370);
33777 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33778 } else {
33779 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33780 rdev->pm.k8_bandwidth.full)
33781 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33782 index 0ed436e..e6e7ce3 100644
33783 --- a/drivers/gpu/drm/ttm/ttm_bo.c
33784 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
33785 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33786 NULL
33787 };
33788
33789 -static struct sysfs_ops ttm_bo_global_ops = {
33790 +static const struct sysfs_ops ttm_bo_global_ops = {
33791 .show = &ttm_bo_global_show
33792 };
33793
33794 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33795 index 1c040d0..f9e4af8 100644
33796 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33797 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33798 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33799 {
33800 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33801 vma->vm_private_data;
33802 - struct ttm_bo_device *bdev = bo->bdev;
33803 + struct ttm_bo_device *bdev;
33804 unsigned long bus_base;
33805 unsigned long bus_offset;
33806 unsigned long bus_size;
33807 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33808 unsigned long address = (unsigned long)vmf->virtual_address;
33809 int retval = VM_FAULT_NOPAGE;
33810
33811 + if (!bo)
33812 + return VM_FAULT_NOPAGE;
33813 + bdev = bo->bdev;
33814 +
33815 /*
33816 * Work around locking order reversal in fault / nopfn
33817 * between mmap_sem and bo_reserve: Perform a trylock operation
33818 diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33819 index b170071..28ae90e 100644
33820 --- a/drivers/gpu/drm/ttm/ttm_global.c
33821 +++ b/drivers/gpu/drm/ttm/ttm_global.c
33822 @@ -36,7 +36,7 @@
33823 struct ttm_global_item {
33824 struct mutex mutex;
33825 void *object;
33826 - int refcount;
33827 + atomic_t refcount;
33828 };
33829
33830 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33831 @@ -49,7 +49,7 @@ void ttm_global_init(void)
33832 struct ttm_global_item *item = &glob[i];
33833 mutex_init(&item->mutex);
33834 item->object = NULL;
33835 - item->refcount = 0;
33836 + atomic_set(&item->refcount, 0);
33837 }
33838 }
33839
33840 @@ -59,7 +59,7 @@ void ttm_global_release(void)
33841 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33842 struct ttm_global_item *item = &glob[i];
33843 BUG_ON(item->object != NULL);
33844 - BUG_ON(item->refcount != 0);
33845 + BUG_ON(atomic_read(&item->refcount) != 0);
33846 }
33847 }
33848
33849 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33850 void *object;
33851
33852 mutex_lock(&item->mutex);
33853 - if (item->refcount == 0) {
33854 + if (atomic_read(&item->refcount) == 0) {
33855 item->object = kzalloc(ref->size, GFP_KERNEL);
33856 if (unlikely(item->object == NULL)) {
33857 ret = -ENOMEM;
33858 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33859 goto out_err;
33860
33861 }
33862 - ++item->refcount;
33863 + atomic_inc(&item->refcount);
33864 ref->object = item->object;
33865 object = item->object;
33866 mutex_unlock(&item->mutex);
33867 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33868 struct ttm_global_item *item = &glob[ref->global_type];
33869
33870 mutex_lock(&item->mutex);
33871 - BUG_ON(item->refcount == 0);
33872 + BUG_ON(atomic_read(&item->refcount) == 0);
33873 BUG_ON(ref->object != item->object);
33874 - if (--item->refcount == 0) {
33875 + if (atomic_dec_and_test(&item->refcount)) {
33876 ref->release(ref);
33877 item->object = NULL;
33878 }
33879 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33880 index 072c281..d8ef483 100644
33881 --- a/drivers/gpu/drm/ttm/ttm_memory.c
33882 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
33883 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33884 NULL
33885 };
33886
33887 -static struct sysfs_ops ttm_mem_zone_ops = {
33888 +static const struct sysfs_ops ttm_mem_zone_ops = {
33889 .show = &ttm_mem_zone_show,
33890 .store = &ttm_mem_zone_store
33891 };
33892 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33893 index cafcb84..b8e66cc 100644
33894 --- a/drivers/gpu/drm/via/via_drv.h
33895 +++ b/drivers/gpu/drm/via/via_drv.h
33896 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33897 typedef uint32_t maskarray_t[5];
33898
33899 typedef struct drm_via_irq {
33900 - atomic_t irq_received;
33901 + atomic_unchecked_t irq_received;
33902 uint32_t pending_mask;
33903 uint32_t enable_mask;
33904 wait_queue_head_t irq_queue;
33905 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
33906 struct timeval last_vblank;
33907 int last_vblank_valid;
33908 unsigned usec_per_vblank;
33909 - atomic_t vbl_received;
33910 + atomic_unchecked_t vbl_received;
33911 drm_via_state_t hc_state;
33912 char pci_buf[VIA_PCI_BUF_SIZE];
33913 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33914 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33915 index 5935b88..127a8a6 100644
33916 --- a/drivers/gpu/drm/via/via_irq.c
33917 +++ b/drivers/gpu/drm/via/via_irq.c
33918 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33919 if (crtc != 0)
33920 return 0;
33921
33922 - return atomic_read(&dev_priv->vbl_received);
33923 + return atomic_read_unchecked(&dev_priv->vbl_received);
33924 }
33925
33926 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33927 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33928
33929 status = VIA_READ(VIA_REG_INTERRUPT);
33930 if (status & VIA_IRQ_VBLANK_PENDING) {
33931 - atomic_inc(&dev_priv->vbl_received);
33932 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33933 + atomic_inc_unchecked(&dev_priv->vbl_received);
33934 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33935 do_gettimeofday(&cur_vblank);
33936 if (dev_priv->last_vblank_valid) {
33937 dev_priv->usec_per_vblank =
33938 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33939 dev_priv->last_vblank = cur_vblank;
33940 dev_priv->last_vblank_valid = 1;
33941 }
33942 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33943 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33944 DRM_DEBUG("US per vblank is: %u\n",
33945 dev_priv->usec_per_vblank);
33946 }
33947 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33948
33949 for (i = 0; i < dev_priv->num_irqs; ++i) {
33950 if (status & cur_irq->pending_mask) {
33951 - atomic_inc(&cur_irq->irq_received);
33952 + atomic_inc_unchecked(&cur_irq->irq_received);
33953 DRM_WAKEUP(&cur_irq->irq_queue);
33954 handled = 1;
33955 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
33956 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
33957 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33958 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33959 masks[irq][4]));
33960 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33961 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33962 } else {
33963 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33964 (((cur_irq_sequence =
33965 - atomic_read(&cur_irq->irq_received)) -
33966 + atomic_read_unchecked(&cur_irq->irq_received)) -
33967 *sequence) <= (1 << 23)));
33968 }
33969 *sequence = cur_irq_sequence;
33970 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
33971 }
33972
33973 for (i = 0; i < dev_priv->num_irqs; ++i) {
33974 - atomic_set(&cur_irq->irq_received, 0);
33975 + atomic_set_unchecked(&cur_irq->irq_received, 0);
33976 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33977 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33978 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33979 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33980 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33981 case VIA_IRQ_RELATIVE:
33982 irqwait->request.sequence +=
33983 - atomic_read(&cur_irq->irq_received);
33984 + atomic_read_unchecked(&cur_irq->irq_received);
33985 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33986 case VIA_IRQ_ABSOLUTE:
33987 break;
33988 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
33989 index aa8688d..6a0140c 100644
33990 --- a/drivers/gpu/vga/vgaarb.c
33991 +++ b/drivers/gpu/vga/vgaarb.c
33992 @@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
33993 uc = &priv->cards[i];
33994 }
33995
33996 - if (!uc)
33997 - return -EINVAL;
33998 + if (!uc) {
33999 + ret_val = -EINVAL;
34000 + goto done;
34001 + }
34002
34003 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
34004 - return -EINVAL;
34005 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
34006 + ret_val = -EINVAL;
34007 + goto done;
34008 + }
34009
34010 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
34011 - return -EINVAL;
34012 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
34013 + ret_val = -EINVAL;
34014 + goto done;
34015 + }
34016
34017 vga_put(pdev, io_state);
34018
34019 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
34020 index 11f8069..4783396 100644
34021 --- a/drivers/hid/hid-core.c
34022 +++ b/drivers/hid/hid-core.c
34023 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
34024
34025 int hid_add_device(struct hid_device *hdev)
34026 {
34027 - static atomic_t id = ATOMIC_INIT(0);
34028 + static atomic_unchecked_t id = ATOMIC_INIT(0);
34029 int ret;
34030
34031 if (WARN_ON(hdev->status & HID_STAT_ADDED))
34032 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
34033 /* XXX hack, any other cleaner solution after the driver core
34034 * is converted to allow more than 20 bytes as the device name? */
34035 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
34036 - hdev->vendor, hdev->product, atomic_inc_return(&id));
34037 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
34038
34039 ret = device_add(&hdev->dev);
34040 if (!ret)
34041 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
34042 index 8b6ee24..70f657d 100644
34043 --- a/drivers/hid/usbhid/hiddev.c
34044 +++ b/drivers/hid/usbhid/hiddev.c
34045 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
34046 return put_user(HID_VERSION, (int __user *)arg);
34047
34048 case HIDIOCAPPLICATION:
34049 - if (arg < 0 || arg >= hid->maxapplication)
34050 + if (arg >= hid->maxapplication)
34051 return -EINVAL;
34052
34053 for (i = 0; i < hid->maxcollection; i++)
34054 diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
34055 index 5d5ed69..f40533e 100644
34056 --- a/drivers/hwmon/lis3lv02d.c
34057 +++ b/drivers/hwmon/lis3lv02d.c
34058 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
34059 * the lid is closed. This leads to interrupts as soon as a little move
34060 * is done.
34061 */
34062 - atomic_inc(&lis3_dev.count);
34063 + atomic_inc_unchecked(&lis3_dev.count);
34064
34065 wake_up_interruptible(&lis3_dev.misc_wait);
34066 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
34067 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34068 if (test_and_set_bit(0, &lis3_dev.misc_opened))
34069 return -EBUSY; /* already open */
34070
34071 - atomic_set(&lis3_dev.count, 0);
34072 + atomic_set_unchecked(&lis3_dev.count, 0);
34073
34074 /*
34075 * The sensor can generate interrupts for free-fall and direction
34076 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34077 add_wait_queue(&lis3_dev.misc_wait, &wait);
34078 while (true) {
34079 set_current_state(TASK_INTERRUPTIBLE);
34080 - data = atomic_xchg(&lis3_dev.count, 0);
34081 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
34082 if (data)
34083 break;
34084
34085 @@ -244,7 +244,7 @@ out:
34086 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34087 {
34088 poll_wait(file, &lis3_dev.misc_wait, wait);
34089 - if (atomic_read(&lis3_dev.count))
34090 + if (atomic_read_unchecked(&lis3_dev.count))
34091 return POLLIN | POLLRDNORM;
34092 return 0;
34093 }
34094 diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
34095 index 7cdd76f..fe0efdf 100644
34096 --- a/drivers/hwmon/lis3lv02d.h
34097 +++ b/drivers/hwmon/lis3lv02d.h
34098 @@ -201,7 +201,7 @@ struct lis3lv02d {
34099
34100 struct input_polled_dev *idev; /* input device */
34101 struct platform_device *pdev; /* platform device */
34102 - atomic_t count; /* interrupt count after last read */
34103 + atomic_unchecked_t count; /* interrupt count after last read */
34104 int xcalib; /* calibrated null value for x */
34105 int ycalib; /* calibrated null value for y */
34106 int zcalib; /* calibrated null value for z */
34107 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
34108 index 2040507..706ec1e 100644
34109 --- a/drivers/hwmon/sht15.c
34110 +++ b/drivers/hwmon/sht15.c
34111 @@ -112,7 +112,7 @@ struct sht15_data {
34112 int supply_uV;
34113 int supply_uV_valid;
34114 struct work_struct update_supply_work;
34115 - atomic_t interrupt_handled;
34116 + atomic_unchecked_t interrupt_handled;
34117 };
34118
34119 /**
34120 @@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
34121 return ret;
34122
34123 gpio_direction_input(data->pdata->gpio_data);
34124 - atomic_set(&data->interrupt_handled, 0);
34125 + atomic_set_unchecked(&data->interrupt_handled, 0);
34126
34127 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34128 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34129 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34130 /* Only relevant if the interrupt hasn't occured. */
34131 - if (!atomic_read(&data->interrupt_handled))
34132 + if (!atomic_read_unchecked(&data->interrupt_handled))
34133 schedule_work(&data->read_work);
34134 }
34135 ret = wait_event_timeout(data->wait_queue,
34136 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34137 struct sht15_data *data = d;
34138 /* First disable the interrupt */
34139 disable_irq_nosync(irq);
34140 - atomic_inc(&data->interrupt_handled);
34141 + atomic_inc_unchecked(&data->interrupt_handled);
34142 /* Then schedule a reading work struct */
34143 if (data->flag != SHT15_READING_NOTHING)
34144 schedule_work(&data->read_work);
34145 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34146 here as could have gone low in meantime so verify
34147 it hasn't!
34148 */
34149 - atomic_set(&data->interrupt_handled, 0);
34150 + atomic_set_unchecked(&data->interrupt_handled, 0);
34151 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34152 /* If still not occured or another handler has been scheduled */
34153 if (gpio_get_value(data->pdata->gpio_data)
34154 - || atomic_read(&data->interrupt_handled))
34155 + || atomic_read_unchecked(&data->interrupt_handled))
34156 return;
34157 }
34158 /* Read the data back from the device */
34159 diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34160 index 97851c5..cb40626 100644
34161 --- a/drivers/hwmon/w83791d.c
34162 +++ b/drivers/hwmon/w83791d.c
34163 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34164 struct i2c_board_info *info);
34165 static int w83791d_remove(struct i2c_client *client);
34166
34167 -static int w83791d_read(struct i2c_client *client, u8 register);
34168 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34169 +static int w83791d_read(struct i2c_client *client, u8 reg);
34170 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34171 static struct w83791d_data *w83791d_update_device(struct device *dev);
34172
34173 #ifdef DEBUG
34174 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34175 index 378fcb5..5e91fa8 100644
34176 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
34177 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34178 @@ -43,7 +43,7 @@
34179 extern struct i2c_adapter amd756_smbus;
34180
34181 static struct i2c_adapter *s4882_adapter;
34182 -static struct i2c_algorithm *s4882_algo;
34183 +static i2c_algorithm_no_const *s4882_algo;
34184
34185 /* Wrapper access functions for multiplexed SMBus */
34186 static DEFINE_MUTEX(amd756_lock);
34187 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34188 index 29015eb..af2d8e9 100644
34189 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34190 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34191 @@ -41,7 +41,7 @@
34192 extern struct i2c_adapter *nforce2_smbus;
34193
34194 static struct i2c_adapter *s4985_adapter;
34195 -static struct i2c_algorithm *s4985_algo;
34196 +static i2c_algorithm_no_const *s4985_algo;
34197
34198 /* Wrapper access functions for multiplexed SMBus */
34199 static DEFINE_MUTEX(nforce2_lock);
34200 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34201 index 878f8ec..12376fc 100644
34202 --- a/drivers/ide/aec62xx.c
34203 +++ b/drivers/ide/aec62xx.c
34204 @@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34205 .cable_detect = atp86x_cable_detect,
34206 };
34207
34208 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34209 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34210 { /* 0: AEC6210 */
34211 .name = DRV_NAME,
34212 .init_chipset = init_chipset_aec62xx,
34213 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34214 index e59b6de..4b4fc65 100644
34215 --- a/drivers/ide/alim15x3.c
34216 +++ b/drivers/ide/alim15x3.c
34217 @@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34218 .dma_sff_read_status = ide_dma_sff_read_status,
34219 };
34220
34221 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
34222 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
34223 .name = DRV_NAME,
34224 .init_chipset = init_chipset_ali15x3,
34225 .init_hwif = init_hwif_ali15x3,
34226 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34227 index 628cd2e..087a414 100644
34228 --- a/drivers/ide/amd74xx.c
34229 +++ b/drivers/ide/amd74xx.c
34230 @@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34231 .udma_mask = udma, \
34232 }
34233
34234 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34235 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34236 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34237 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34238 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34239 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34240 index 837322b..837fd71 100644
34241 --- a/drivers/ide/atiixp.c
34242 +++ b/drivers/ide/atiixp.c
34243 @@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34244 .cable_detect = atiixp_cable_detect,
34245 };
34246
34247 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34248 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34249 { /* 0: IXP200/300/400/700 */
34250 .name = DRV_NAME,
34251 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34252 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34253 index ca0c46f..d55318a 100644
34254 --- a/drivers/ide/cmd64x.c
34255 +++ b/drivers/ide/cmd64x.c
34256 @@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34257 .dma_sff_read_status = ide_dma_sff_read_status,
34258 };
34259
34260 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34261 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34262 { /* 0: CMD643 */
34263 .name = DRV_NAME,
34264 .init_chipset = init_chipset_cmd64x,
34265 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34266 index 09f98ed..cebc5bc 100644
34267 --- a/drivers/ide/cs5520.c
34268 +++ b/drivers/ide/cs5520.c
34269 @@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34270 .set_dma_mode = cs5520_set_dma_mode,
34271 };
34272
34273 -static const struct ide_port_info cyrix_chipset __devinitdata = {
34274 +static const struct ide_port_info cyrix_chipset __devinitconst = {
34275 .name = DRV_NAME,
34276 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34277 .port_ops = &cs5520_port_ops,
34278 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34279 index 40bf05e..7d58ca0 100644
34280 --- a/drivers/ide/cs5530.c
34281 +++ b/drivers/ide/cs5530.c
34282 @@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34283 .udma_filter = cs5530_udma_filter,
34284 };
34285
34286 -static const struct ide_port_info cs5530_chipset __devinitdata = {
34287 +static const struct ide_port_info cs5530_chipset __devinitconst = {
34288 .name = DRV_NAME,
34289 .init_chipset = init_chipset_cs5530,
34290 .init_hwif = init_hwif_cs5530,
34291 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34292 index 983d957..53e6172 100644
34293 --- a/drivers/ide/cs5535.c
34294 +++ b/drivers/ide/cs5535.c
34295 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34296 .cable_detect = cs5535_cable_detect,
34297 };
34298
34299 -static const struct ide_port_info cs5535_chipset __devinitdata = {
34300 +static const struct ide_port_info cs5535_chipset __devinitconst = {
34301 .name = DRV_NAME,
34302 .port_ops = &cs5535_port_ops,
34303 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34304 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34305 index 74fc540..8e933d8 100644
34306 --- a/drivers/ide/cy82c693.c
34307 +++ b/drivers/ide/cy82c693.c
34308 @@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34309 .set_dma_mode = cy82c693_set_dma_mode,
34310 };
34311
34312 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
34313 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
34314 .name = DRV_NAME,
34315 .init_iops = init_iops_cy82c693,
34316 .port_ops = &cy82c693_port_ops,
34317 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34318 index 7ce68ef..e78197d 100644
34319 --- a/drivers/ide/hpt366.c
34320 +++ b/drivers/ide/hpt366.c
34321 @@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34322 }
34323 };
34324
34325 -static const struct hpt_info hpt36x __devinitdata = {
34326 +static const struct hpt_info hpt36x __devinitconst = {
34327 .chip_name = "HPT36x",
34328 .chip_type = HPT36x,
34329 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34330 @@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34331 .timings = &hpt36x_timings
34332 };
34333
34334 -static const struct hpt_info hpt370 __devinitdata = {
34335 +static const struct hpt_info hpt370 __devinitconst = {
34336 .chip_name = "HPT370",
34337 .chip_type = HPT370,
34338 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34339 @@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34340 .timings = &hpt37x_timings
34341 };
34342
34343 -static const struct hpt_info hpt370a __devinitdata = {
34344 +static const struct hpt_info hpt370a __devinitconst = {
34345 .chip_name = "HPT370A",
34346 .chip_type = HPT370A,
34347 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34348 @@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34349 .timings = &hpt37x_timings
34350 };
34351
34352 -static const struct hpt_info hpt374 __devinitdata = {
34353 +static const struct hpt_info hpt374 __devinitconst = {
34354 .chip_name = "HPT374",
34355 .chip_type = HPT374,
34356 .udma_mask = ATA_UDMA5,
34357 @@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34358 .timings = &hpt37x_timings
34359 };
34360
34361 -static const struct hpt_info hpt372 __devinitdata = {
34362 +static const struct hpt_info hpt372 __devinitconst = {
34363 .chip_name = "HPT372",
34364 .chip_type = HPT372,
34365 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34366 @@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34367 .timings = &hpt37x_timings
34368 };
34369
34370 -static const struct hpt_info hpt372a __devinitdata = {
34371 +static const struct hpt_info hpt372a __devinitconst = {
34372 .chip_name = "HPT372A",
34373 .chip_type = HPT372A,
34374 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34375 @@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34376 .timings = &hpt37x_timings
34377 };
34378
34379 -static const struct hpt_info hpt302 __devinitdata = {
34380 +static const struct hpt_info hpt302 __devinitconst = {
34381 .chip_name = "HPT302",
34382 .chip_type = HPT302,
34383 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34384 @@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34385 .timings = &hpt37x_timings
34386 };
34387
34388 -static const struct hpt_info hpt371 __devinitdata = {
34389 +static const struct hpt_info hpt371 __devinitconst = {
34390 .chip_name = "HPT371",
34391 .chip_type = HPT371,
34392 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34393 @@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34394 .timings = &hpt37x_timings
34395 };
34396
34397 -static const struct hpt_info hpt372n __devinitdata = {
34398 +static const struct hpt_info hpt372n __devinitconst = {
34399 .chip_name = "HPT372N",
34400 .chip_type = HPT372N,
34401 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34402 @@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34403 .timings = &hpt37x_timings
34404 };
34405
34406 -static const struct hpt_info hpt302n __devinitdata = {
34407 +static const struct hpt_info hpt302n __devinitconst = {
34408 .chip_name = "HPT302N",
34409 .chip_type = HPT302N,
34410 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34411 @@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34412 .timings = &hpt37x_timings
34413 };
34414
34415 -static const struct hpt_info hpt371n __devinitdata = {
34416 +static const struct hpt_info hpt371n __devinitconst = {
34417 .chip_name = "HPT371N",
34418 .chip_type = HPT371N,
34419 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34420 @@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34421 .dma_sff_read_status = ide_dma_sff_read_status,
34422 };
34423
34424 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34425 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34426 { /* 0: HPT36x */
34427 .name = DRV_NAME,
34428 .init_chipset = init_chipset_hpt366,
34429 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34430 index 2de76cc..74186a1 100644
34431 --- a/drivers/ide/ide-cd.c
34432 +++ b/drivers/ide/ide-cd.c
34433 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34434 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34435 if ((unsigned long)buf & alignment
34436 || blk_rq_bytes(rq) & q->dma_pad_mask
34437 - || object_is_on_stack(buf))
34438 + || object_starts_on_stack(buf))
34439 drive->dma = 0;
34440 }
34441 }
34442 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34443 index fefbdfc..62ff465 100644
34444 --- a/drivers/ide/ide-floppy.c
34445 +++ b/drivers/ide/ide-floppy.c
34446 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34447 u8 pc_buf[256], header_len, desc_cnt;
34448 int i, rc = 1, blocks, length;
34449
34450 + pax_track_stack();
34451 +
34452 ide_debug_log(IDE_DBG_FUNC, "enter");
34453
34454 drive->bios_cyl = 0;
34455 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34456 index 39d4e01..11538ce 100644
34457 --- a/drivers/ide/ide-pci-generic.c
34458 +++ b/drivers/ide/ide-pci-generic.c
34459 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34460 .udma_mask = ATA_UDMA6, \
34461 }
34462
34463 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
34464 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
34465 /* 0: Unknown */
34466 DECLARE_GENERIC_PCI_DEV(0),
34467
34468 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34469 index 0d266a5..aaca790 100644
34470 --- a/drivers/ide/it8172.c
34471 +++ b/drivers/ide/it8172.c
34472 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34473 .set_dma_mode = it8172_set_dma_mode,
34474 };
34475
34476 -static const struct ide_port_info it8172_port_info __devinitdata = {
34477 +static const struct ide_port_info it8172_port_info __devinitconst = {
34478 .name = DRV_NAME,
34479 .port_ops = &it8172_port_ops,
34480 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34481 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34482 index 4797616..4be488a 100644
34483 --- a/drivers/ide/it8213.c
34484 +++ b/drivers/ide/it8213.c
34485 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34486 .cable_detect = it8213_cable_detect,
34487 };
34488
34489 -static const struct ide_port_info it8213_chipset __devinitdata = {
34490 +static const struct ide_port_info it8213_chipset __devinitconst = {
34491 .name = DRV_NAME,
34492 .enablebits = { {0x41, 0x80, 0x80} },
34493 .port_ops = &it8213_port_ops,
34494 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34495 index 51aa745..146ee60 100644
34496 --- a/drivers/ide/it821x.c
34497 +++ b/drivers/ide/it821x.c
34498 @@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34499 .cable_detect = it821x_cable_detect,
34500 };
34501
34502 -static const struct ide_port_info it821x_chipset __devinitdata = {
34503 +static const struct ide_port_info it821x_chipset __devinitconst = {
34504 .name = DRV_NAME,
34505 .init_chipset = init_chipset_it821x,
34506 .init_hwif = init_hwif_it821x,
34507 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34508 index bf2be64..9270098 100644
34509 --- a/drivers/ide/jmicron.c
34510 +++ b/drivers/ide/jmicron.c
34511 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34512 .cable_detect = jmicron_cable_detect,
34513 };
34514
34515 -static const struct ide_port_info jmicron_chipset __devinitdata = {
34516 +static const struct ide_port_info jmicron_chipset __devinitconst = {
34517 .name = DRV_NAME,
34518 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34519 .port_ops = &jmicron_port_ops,
34520 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34521 index 95327a2..73f78d8 100644
34522 --- a/drivers/ide/ns87415.c
34523 +++ b/drivers/ide/ns87415.c
34524 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34525 .dma_sff_read_status = superio_dma_sff_read_status,
34526 };
34527
34528 -static const struct ide_port_info ns87415_chipset __devinitdata = {
34529 +static const struct ide_port_info ns87415_chipset __devinitconst = {
34530 .name = DRV_NAME,
34531 .init_hwif = init_hwif_ns87415,
34532 .tp_ops = &ns87415_tp_ops,
34533 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34534 index f1d70d6..e1de05b 100644
34535 --- a/drivers/ide/opti621.c
34536 +++ b/drivers/ide/opti621.c
34537 @@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34538 .set_pio_mode = opti621_set_pio_mode,
34539 };
34540
34541 -static const struct ide_port_info opti621_chipset __devinitdata = {
34542 +static const struct ide_port_info opti621_chipset __devinitconst = {
34543 .name = DRV_NAME,
34544 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34545 .port_ops = &opti621_port_ops,
34546 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34547 index 65ba823..7311f4d 100644
34548 --- a/drivers/ide/pdc202xx_new.c
34549 +++ b/drivers/ide/pdc202xx_new.c
34550 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34551 .udma_mask = udma, \
34552 }
34553
34554 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34555 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34556 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34557 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34558 };
34559 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34560 index cb812f3..af816ef 100644
34561 --- a/drivers/ide/pdc202xx_old.c
34562 +++ b/drivers/ide/pdc202xx_old.c
34563 @@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34564 .max_sectors = sectors, \
34565 }
34566
34567 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34568 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34569 { /* 0: PDC20246 */
34570 .name = DRV_NAME,
34571 .init_chipset = init_chipset_pdc202xx,
34572 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34573 index bf14f39..15c4b98 100644
34574 --- a/drivers/ide/piix.c
34575 +++ b/drivers/ide/piix.c
34576 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34577 .udma_mask = udma, \
34578 }
34579
34580 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
34581 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
34582 /* 0: MPIIX */
34583 { /*
34584 * MPIIX actually has only a single IDE channel mapped to
34585 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34586 index a6414a8..c04173e 100644
34587 --- a/drivers/ide/rz1000.c
34588 +++ b/drivers/ide/rz1000.c
34589 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34590 }
34591 }
34592
34593 -static const struct ide_port_info rz1000_chipset __devinitdata = {
34594 +static const struct ide_port_info rz1000_chipset __devinitconst = {
34595 .name = DRV_NAME,
34596 .host_flags = IDE_HFLAG_NO_DMA,
34597 };
34598 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34599 index d467478..9203942 100644
34600 --- a/drivers/ide/sc1200.c
34601 +++ b/drivers/ide/sc1200.c
34602 @@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34603 .dma_sff_read_status = ide_dma_sff_read_status,
34604 };
34605
34606 -static const struct ide_port_info sc1200_chipset __devinitdata = {
34607 +static const struct ide_port_info sc1200_chipset __devinitconst = {
34608 .name = DRV_NAME,
34609 .port_ops = &sc1200_port_ops,
34610 .dma_ops = &sc1200_dma_ops,
34611 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34612 index 1104bb3..59c5194 100644
34613 --- a/drivers/ide/scc_pata.c
34614 +++ b/drivers/ide/scc_pata.c
34615 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34616 .dma_sff_read_status = scc_dma_sff_read_status,
34617 };
34618
34619 -static const struct ide_port_info scc_chipset __devinitdata = {
34620 +static const struct ide_port_info scc_chipset __devinitconst = {
34621 .name = "sccIDE",
34622 .init_iops = init_iops_scc,
34623 .init_dma = scc_init_dma,
34624 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34625 index b6554ef..6cc2cc3 100644
34626 --- a/drivers/ide/serverworks.c
34627 +++ b/drivers/ide/serverworks.c
34628 @@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34629 .cable_detect = svwks_cable_detect,
34630 };
34631
34632 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34633 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34634 { /* 0: OSB4 */
34635 .name = DRV_NAME,
34636 .init_chipset = init_chipset_svwks,
34637 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34638 index ab3db61..afed580 100644
34639 --- a/drivers/ide/setup-pci.c
34640 +++ b/drivers/ide/setup-pci.c
34641 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34642 int ret, i, n_ports = dev2 ? 4 : 2;
34643 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34644
34645 + pax_track_stack();
34646 +
34647 for (i = 0; i < n_ports / 2; i++) {
34648 ret = ide_setup_pci_controller(pdev[i], d, !i);
34649 if (ret < 0)
34650 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34651 index d95df52..0b03a39 100644
34652 --- a/drivers/ide/siimage.c
34653 +++ b/drivers/ide/siimage.c
34654 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34655 .udma_mask = ATA_UDMA6, \
34656 }
34657
34658 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34659 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34660 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34661 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34662 };
34663 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34664 index 3b88eba..ca8699d 100644
34665 --- a/drivers/ide/sis5513.c
34666 +++ b/drivers/ide/sis5513.c
34667 @@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34668 .cable_detect = sis_cable_detect,
34669 };
34670
34671 -static const struct ide_port_info sis5513_chipset __devinitdata = {
34672 +static const struct ide_port_info sis5513_chipset __devinitconst = {
34673 .name = DRV_NAME,
34674 .init_chipset = init_chipset_sis5513,
34675 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34676 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34677 index d698da4..fca42a4 100644
34678 --- a/drivers/ide/sl82c105.c
34679 +++ b/drivers/ide/sl82c105.c
34680 @@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34681 .dma_sff_read_status = ide_dma_sff_read_status,
34682 };
34683
34684 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
34685 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
34686 .name = DRV_NAME,
34687 .init_chipset = init_chipset_sl82c105,
34688 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34689 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34690 index 1ccfb40..83d5779 100644
34691 --- a/drivers/ide/slc90e66.c
34692 +++ b/drivers/ide/slc90e66.c
34693 @@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34694 .cable_detect = slc90e66_cable_detect,
34695 };
34696
34697 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
34698 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
34699 .name = DRV_NAME,
34700 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34701 .port_ops = &slc90e66_port_ops,
34702 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34703 index 05a93d6..5f9e325 100644
34704 --- a/drivers/ide/tc86c001.c
34705 +++ b/drivers/ide/tc86c001.c
34706 @@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34707 .dma_sff_read_status = ide_dma_sff_read_status,
34708 };
34709
34710 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
34711 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
34712 .name = DRV_NAME,
34713 .init_hwif = init_hwif_tc86c001,
34714 .port_ops = &tc86c001_port_ops,
34715 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34716 index 8773c3b..7907d6c 100644
34717 --- a/drivers/ide/triflex.c
34718 +++ b/drivers/ide/triflex.c
34719 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34720 .set_dma_mode = triflex_set_mode,
34721 };
34722
34723 -static const struct ide_port_info triflex_device __devinitdata = {
34724 +static const struct ide_port_info triflex_device __devinitconst = {
34725 .name = DRV_NAME,
34726 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34727 .port_ops = &triflex_port_ops,
34728 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34729 index 4b42ca0..e494a98 100644
34730 --- a/drivers/ide/trm290.c
34731 +++ b/drivers/ide/trm290.c
34732 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34733 .dma_check = trm290_dma_check,
34734 };
34735
34736 -static const struct ide_port_info trm290_chipset __devinitdata = {
34737 +static const struct ide_port_info trm290_chipset __devinitconst = {
34738 .name = DRV_NAME,
34739 .init_hwif = init_hwif_trm290,
34740 .tp_ops = &trm290_tp_ops,
34741 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34742 index 028de26..520d5d5 100644
34743 --- a/drivers/ide/via82cxxx.c
34744 +++ b/drivers/ide/via82cxxx.c
34745 @@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34746 .cable_detect = via82cxxx_cable_detect,
34747 };
34748
34749 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34750 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34751 .name = DRV_NAME,
34752 .init_chipset = init_chipset_via82cxxx,
34753 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34754 diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34755 index 2cd00b5..14de699 100644
34756 --- a/drivers/ieee1394/dv1394.c
34757 +++ b/drivers/ieee1394/dv1394.c
34758 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34759 based upon DIF section and sequence
34760 */
34761
34762 -static void inline
34763 +static inline void
34764 frame_put_packet (struct frame *f, struct packet *p)
34765 {
34766 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34767 diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34768 index e947d8f..6a966b9 100644
34769 --- a/drivers/ieee1394/hosts.c
34770 +++ b/drivers/ieee1394/hosts.c
34771 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34772 }
34773
34774 static struct hpsb_host_driver dummy_driver = {
34775 + .name = "dummy",
34776 .transmit_packet = dummy_transmit_packet,
34777 .devctl = dummy_devctl,
34778 .isoctl = dummy_isoctl
34779 diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34780 index ddaab6e..8d37435 100644
34781 --- a/drivers/ieee1394/init_ohci1394_dma.c
34782 +++ b/drivers/ieee1394/init_ohci1394_dma.c
34783 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34784 for (func = 0; func < 8; func++) {
34785 u32 class = read_pci_config(num,slot,func,
34786 PCI_CLASS_REVISION);
34787 - if ((class == 0xffffffff))
34788 + if (class == 0xffffffff)
34789 continue; /* No device at this func */
34790
34791 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34792 diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34793 index 65c1429..5d8c11f 100644
34794 --- a/drivers/ieee1394/ohci1394.c
34795 +++ b/drivers/ieee1394/ohci1394.c
34796 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34797 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34798
34799 /* Module Parameters */
34800 -static int phys_dma = 1;
34801 +static int phys_dma;
34802 module_param(phys_dma, int, 0444);
34803 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34804 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34805
34806 static void dma_trm_tasklet(unsigned long data);
34807 static void dma_trm_reset(struct dma_trm_ctx *d);
34808 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34809 index f199896..78c9fc8 100644
34810 --- a/drivers/ieee1394/sbp2.c
34811 +++ b/drivers/ieee1394/sbp2.c
34812 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34813 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34814 MODULE_LICENSE("GPL");
34815
34816 -static int sbp2_module_init(void)
34817 +static int __init sbp2_module_init(void)
34818 {
34819 int ret;
34820
34821 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34822 index a5dea6b..0cefe8f 100644
34823 --- a/drivers/infiniband/core/cm.c
34824 +++ b/drivers/infiniband/core/cm.c
34825 @@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34826
34827 struct cm_counter_group {
34828 struct kobject obj;
34829 - atomic_long_t counter[CM_ATTR_COUNT];
34830 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34831 };
34832
34833 struct cm_counter_attribute {
34834 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34835 struct ib_mad_send_buf *msg = NULL;
34836 int ret;
34837
34838 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34839 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34840 counter[CM_REQ_COUNTER]);
34841
34842 /* Quick state check to discard duplicate REQs. */
34843 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34844 if (!cm_id_priv)
34845 return;
34846
34847 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34848 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34849 counter[CM_REP_COUNTER]);
34850 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34851 if (ret)
34852 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34853 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34854 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34855 spin_unlock_irq(&cm_id_priv->lock);
34856 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34857 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34858 counter[CM_RTU_COUNTER]);
34859 goto out;
34860 }
34861 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34862 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34863 dreq_msg->local_comm_id);
34864 if (!cm_id_priv) {
34865 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34866 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34867 counter[CM_DREQ_COUNTER]);
34868 cm_issue_drep(work->port, work->mad_recv_wc);
34869 return -EINVAL;
34870 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34871 case IB_CM_MRA_REP_RCVD:
34872 break;
34873 case IB_CM_TIMEWAIT:
34874 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34875 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34876 counter[CM_DREQ_COUNTER]);
34877 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34878 goto unlock;
34879 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34880 cm_free_msg(msg);
34881 goto deref;
34882 case IB_CM_DREQ_RCVD:
34883 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34884 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34885 counter[CM_DREQ_COUNTER]);
34886 goto unlock;
34887 default:
34888 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34889 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34890 cm_id_priv->msg, timeout)) {
34891 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34892 - atomic_long_inc(&work->port->
34893 + atomic_long_inc_unchecked(&work->port->
34894 counter_group[CM_RECV_DUPLICATES].
34895 counter[CM_MRA_COUNTER]);
34896 goto out;
34897 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
34898 break;
34899 case IB_CM_MRA_REQ_RCVD:
34900 case IB_CM_MRA_REP_RCVD:
34901 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34902 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34903 counter[CM_MRA_COUNTER]);
34904 /* fall through */
34905 default:
34906 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
34907 case IB_CM_LAP_IDLE:
34908 break;
34909 case IB_CM_MRA_LAP_SENT:
34910 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34911 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34912 counter[CM_LAP_COUNTER]);
34913 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34914 goto unlock;
34915 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
34916 cm_free_msg(msg);
34917 goto deref;
34918 case IB_CM_LAP_RCVD:
34919 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34920 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34921 counter[CM_LAP_COUNTER]);
34922 goto unlock;
34923 default:
34924 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
34925 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
34926 if (cur_cm_id_priv) {
34927 spin_unlock_irq(&cm.lock);
34928 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34929 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34930 counter[CM_SIDR_REQ_COUNTER]);
34931 goto out; /* Duplicate message. */
34932 }
34933 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
34934 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
34935 msg->retries = 1;
34936
34937 - atomic_long_add(1 + msg->retries,
34938 + atomic_long_add_unchecked(1 + msg->retries,
34939 &port->counter_group[CM_XMIT].counter[attr_index]);
34940 if (msg->retries)
34941 - atomic_long_add(msg->retries,
34942 + atomic_long_add_unchecked(msg->retries,
34943 &port->counter_group[CM_XMIT_RETRIES].
34944 counter[attr_index]);
34945
34946 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
34947 }
34948
34949 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
34950 - atomic_long_inc(&port->counter_group[CM_RECV].
34951 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
34952 counter[attr_id - CM_ATTR_ID_OFFSET]);
34953
34954 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
34955 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
34956 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
34957
34958 return sprintf(buf, "%ld\n",
34959 - atomic_long_read(&group->counter[cm_attr->index]));
34960 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
34961 }
34962
34963 -static struct sysfs_ops cm_counter_ops = {
34964 +static const struct sysfs_ops cm_counter_ops = {
34965 .show = cm_show_counter
34966 };
34967
34968 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
34969 index 8fd3a6f..61d8075 100644
34970 --- a/drivers/infiniband/core/cma.c
34971 +++ b/drivers/infiniband/core/cma.c
34972 @@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
34973
34974 req.private_data_len = sizeof(struct cma_hdr) +
34975 conn_param->private_data_len;
34976 + if (req.private_data_len < conn_param->private_data_len)
34977 + return -EINVAL;
34978 +
34979 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
34980 if (!req.private_data)
34981 return -ENOMEM;
34982 @@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
34983 memset(&req, 0, sizeof req);
34984 offset = cma_user_data_offset(id_priv->id.ps);
34985 req.private_data_len = offset + conn_param->private_data_len;
34986 + if (req.private_data_len < conn_param->private_data_len)
34987 + return -EINVAL;
34988 +
34989 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
34990 if (!private_data)
34991 return -ENOMEM;
34992 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
34993 index 4507043..14ad522 100644
34994 --- a/drivers/infiniband/core/fmr_pool.c
34995 +++ b/drivers/infiniband/core/fmr_pool.c
34996 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
34997
34998 struct task_struct *thread;
34999
35000 - atomic_t req_ser;
35001 - atomic_t flush_ser;
35002 + atomic_unchecked_t req_ser;
35003 + atomic_unchecked_t flush_ser;
35004
35005 wait_queue_head_t force_wait;
35006 };
35007 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35008 struct ib_fmr_pool *pool = pool_ptr;
35009
35010 do {
35011 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
35012 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
35013 ib_fmr_batch_release(pool);
35014
35015 - atomic_inc(&pool->flush_ser);
35016 + atomic_inc_unchecked(&pool->flush_ser);
35017 wake_up_interruptible(&pool->force_wait);
35018
35019 if (pool->flush_function)
35020 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35021 }
35022
35023 set_current_state(TASK_INTERRUPTIBLE);
35024 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
35025 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
35026 !kthread_should_stop())
35027 schedule();
35028 __set_current_state(TASK_RUNNING);
35029 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
35030 pool->dirty_watermark = params->dirty_watermark;
35031 pool->dirty_len = 0;
35032 spin_lock_init(&pool->pool_lock);
35033 - atomic_set(&pool->req_ser, 0);
35034 - atomic_set(&pool->flush_ser, 0);
35035 + atomic_set_unchecked(&pool->req_ser, 0);
35036 + atomic_set_unchecked(&pool->flush_ser, 0);
35037 init_waitqueue_head(&pool->force_wait);
35038
35039 pool->thread = kthread_run(ib_fmr_cleanup_thread,
35040 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
35041 }
35042 spin_unlock_irq(&pool->pool_lock);
35043
35044 - serial = atomic_inc_return(&pool->req_ser);
35045 + serial = atomic_inc_return_unchecked(&pool->req_ser);
35046 wake_up_process(pool->thread);
35047
35048 if (wait_event_interruptible(pool->force_wait,
35049 - atomic_read(&pool->flush_ser) - serial >= 0))
35050 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
35051 return -EINTR;
35052
35053 return 0;
35054 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
35055 } else {
35056 list_add_tail(&fmr->list, &pool->dirty_list);
35057 if (++pool->dirty_len >= pool->dirty_watermark) {
35058 - atomic_inc(&pool->req_ser);
35059 + atomic_inc_unchecked(&pool->req_ser);
35060 wake_up_process(pool->thread);
35061 }
35062 }
35063 diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
35064 index 158a214..1558bb7 100644
35065 --- a/drivers/infiniband/core/sysfs.c
35066 +++ b/drivers/infiniband/core/sysfs.c
35067 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
35068 return port_attr->show(p, port_attr, buf);
35069 }
35070
35071 -static struct sysfs_ops port_sysfs_ops = {
35072 +static const struct sysfs_ops port_sysfs_ops = {
35073 .show = port_attr_show
35074 };
35075
35076 diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
35077 index 5440da0..1194ecb 100644
35078 --- a/drivers/infiniband/core/uverbs_marshall.c
35079 +++ b/drivers/infiniband/core/uverbs_marshall.c
35080 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
35081 dst->grh.sgid_index = src->grh.sgid_index;
35082 dst->grh.hop_limit = src->grh.hop_limit;
35083 dst->grh.traffic_class = src->grh.traffic_class;
35084 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
35085 dst->dlid = src->dlid;
35086 dst->sl = src->sl;
35087 dst->src_path_bits = src->src_path_bits;
35088 dst->static_rate = src->static_rate;
35089 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
35090 dst->port_num = src->port_num;
35091 + dst->reserved = 0;
35092 }
35093 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
35094
35095 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35096 struct ib_qp_attr *src)
35097 {
35098 + dst->qp_state = src->qp_state;
35099 dst->cur_qp_state = src->cur_qp_state;
35100 dst->path_mtu = src->path_mtu;
35101 dst->path_mig_state = src->path_mig_state;
35102 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35103 dst->rnr_retry = src->rnr_retry;
35104 dst->alt_port_num = src->alt_port_num;
35105 dst->alt_timeout = src->alt_timeout;
35106 + memset(dst->reserved, 0, sizeof(dst->reserved));
35107 }
35108 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
35109
35110 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
35111 index 100da85..62e6b88 100644
35112 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
35113 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
35114 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
35115 struct infinipath_counters counters;
35116 struct ipath_devdata *dd;
35117
35118 + pax_track_stack();
35119 +
35120 dd = file->f_path.dentry->d_inode->i_private;
35121 dd->ipath_f_read_counters(dd, &counters);
35122
35123 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
35124 index cbde0cf..afaf55c 100644
35125 --- a/drivers/infiniband/hw/nes/nes.c
35126 +++ b/drivers/infiniband/hw/nes/nes.c
35127 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
35128 LIST_HEAD(nes_adapter_list);
35129 static LIST_HEAD(nes_dev_list);
35130
35131 -atomic_t qps_destroyed;
35132 +atomic_unchecked_t qps_destroyed;
35133
35134 static unsigned int ee_flsh_adapter;
35135 static unsigned int sysfs_nonidx_addr;
35136 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35137 struct nes_adapter *nesadapter = nesdev->nesadapter;
35138 u32 qp_id;
35139
35140 - atomic_inc(&qps_destroyed);
35141 + atomic_inc_unchecked(&qps_destroyed);
35142
35143 /* Free the control structures */
35144
35145 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35146 index bcc6abc..9c76b2f 100644
35147 --- a/drivers/infiniband/hw/nes/nes.h
35148 +++ b/drivers/infiniband/hw/nes/nes.h
35149 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35150 extern unsigned int wqm_quanta;
35151 extern struct list_head nes_adapter_list;
35152
35153 -extern atomic_t cm_connects;
35154 -extern atomic_t cm_accepts;
35155 -extern atomic_t cm_disconnects;
35156 -extern atomic_t cm_closes;
35157 -extern atomic_t cm_connecteds;
35158 -extern atomic_t cm_connect_reqs;
35159 -extern atomic_t cm_rejects;
35160 -extern atomic_t mod_qp_timouts;
35161 -extern atomic_t qps_created;
35162 -extern atomic_t qps_destroyed;
35163 -extern atomic_t sw_qps_destroyed;
35164 +extern atomic_unchecked_t cm_connects;
35165 +extern atomic_unchecked_t cm_accepts;
35166 +extern atomic_unchecked_t cm_disconnects;
35167 +extern atomic_unchecked_t cm_closes;
35168 +extern atomic_unchecked_t cm_connecteds;
35169 +extern atomic_unchecked_t cm_connect_reqs;
35170 +extern atomic_unchecked_t cm_rejects;
35171 +extern atomic_unchecked_t mod_qp_timouts;
35172 +extern atomic_unchecked_t qps_created;
35173 +extern atomic_unchecked_t qps_destroyed;
35174 +extern atomic_unchecked_t sw_qps_destroyed;
35175 extern u32 mh_detected;
35176 extern u32 mh_pauses_sent;
35177 extern u32 cm_packets_sent;
35178 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35179 extern u32 cm_listens_created;
35180 extern u32 cm_listens_destroyed;
35181 extern u32 cm_backlog_drops;
35182 -extern atomic_t cm_loopbacks;
35183 -extern atomic_t cm_nodes_created;
35184 -extern atomic_t cm_nodes_destroyed;
35185 -extern atomic_t cm_accel_dropped_pkts;
35186 -extern atomic_t cm_resets_recvd;
35187 +extern atomic_unchecked_t cm_loopbacks;
35188 +extern atomic_unchecked_t cm_nodes_created;
35189 +extern atomic_unchecked_t cm_nodes_destroyed;
35190 +extern atomic_unchecked_t cm_accel_dropped_pkts;
35191 +extern atomic_unchecked_t cm_resets_recvd;
35192
35193 extern u32 int_mod_timer_init;
35194 extern u32 int_mod_cq_depth_256;
35195 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35196 index 73473db..5ed06e8 100644
35197 --- a/drivers/infiniband/hw/nes/nes_cm.c
35198 +++ b/drivers/infiniband/hw/nes/nes_cm.c
35199 @@ -69,11 +69,11 @@ u32 cm_packets_received;
35200 u32 cm_listens_created;
35201 u32 cm_listens_destroyed;
35202 u32 cm_backlog_drops;
35203 -atomic_t cm_loopbacks;
35204 -atomic_t cm_nodes_created;
35205 -atomic_t cm_nodes_destroyed;
35206 -atomic_t cm_accel_dropped_pkts;
35207 -atomic_t cm_resets_recvd;
35208 +atomic_unchecked_t cm_loopbacks;
35209 +atomic_unchecked_t cm_nodes_created;
35210 +atomic_unchecked_t cm_nodes_destroyed;
35211 +atomic_unchecked_t cm_accel_dropped_pkts;
35212 +atomic_unchecked_t cm_resets_recvd;
35213
35214 static inline int mini_cm_accelerated(struct nes_cm_core *,
35215 struct nes_cm_node *);
35216 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35217
35218 static struct nes_cm_core *g_cm_core;
35219
35220 -atomic_t cm_connects;
35221 -atomic_t cm_accepts;
35222 -atomic_t cm_disconnects;
35223 -atomic_t cm_closes;
35224 -atomic_t cm_connecteds;
35225 -atomic_t cm_connect_reqs;
35226 -atomic_t cm_rejects;
35227 +atomic_unchecked_t cm_connects;
35228 +atomic_unchecked_t cm_accepts;
35229 +atomic_unchecked_t cm_disconnects;
35230 +atomic_unchecked_t cm_closes;
35231 +atomic_unchecked_t cm_connecteds;
35232 +atomic_unchecked_t cm_connect_reqs;
35233 +atomic_unchecked_t cm_rejects;
35234
35235
35236 /**
35237 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35238 cm_node->rem_mac);
35239
35240 add_hte_node(cm_core, cm_node);
35241 - atomic_inc(&cm_nodes_created);
35242 + atomic_inc_unchecked(&cm_nodes_created);
35243
35244 return cm_node;
35245 }
35246 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35247 }
35248
35249 atomic_dec(&cm_core->node_cnt);
35250 - atomic_inc(&cm_nodes_destroyed);
35251 + atomic_inc_unchecked(&cm_nodes_destroyed);
35252 nesqp = cm_node->nesqp;
35253 if (nesqp) {
35254 nesqp->cm_node = NULL;
35255 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35256
35257 static void drop_packet(struct sk_buff *skb)
35258 {
35259 - atomic_inc(&cm_accel_dropped_pkts);
35260 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35261 dev_kfree_skb_any(skb);
35262 }
35263
35264 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35265
35266 int reset = 0; /* whether to send reset in case of err.. */
35267 int passive_state;
35268 - atomic_inc(&cm_resets_recvd);
35269 + atomic_inc_unchecked(&cm_resets_recvd);
35270 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35271 " refcnt=%d\n", cm_node, cm_node->state,
35272 atomic_read(&cm_node->ref_count));
35273 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35274 rem_ref_cm_node(cm_node->cm_core, cm_node);
35275 return NULL;
35276 }
35277 - atomic_inc(&cm_loopbacks);
35278 + atomic_inc_unchecked(&cm_loopbacks);
35279 loopbackremotenode->loopbackpartner = cm_node;
35280 loopbackremotenode->tcp_cntxt.rcv_wscale =
35281 NES_CM_DEFAULT_RCV_WND_SCALE;
35282 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35283 add_ref_cm_node(cm_node);
35284 } else if (cm_node->state == NES_CM_STATE_TSA) {
35285 rem_ref_cm_node(cm_core, cm_node);
35286 - atomic_inc(&cm_accel_dropped_pkts);
35287 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35288 dev_kfree_skb_any(skb);
35289 break;
35290 }
35291 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35292
35293 if ((cm_id) && (cm_id->event_handler)) {
35294 if (issue_disconn) {
35295 - atomic_inc(&cm_disconnects);
35296 + atomic_inc_unchecked(&cm_disconnects);
35297 cm_event.event = IW_CM_EVENT_DISCONNECT;
35298 cm_event.status = disconn_status;
35299 cm_event.local_addr = cm_id->local_addr;
35300 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35301 }
35302
35303 if (issue_close) {
35304 - atomic_inc(&cm_closes);
35305 + atomic_inc_unchecked(&cm_closes);
35306 nes_disconnect(nesqp, 1);
35307
35308 cm_id->provider_data = nesqp;
35309 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35310
35311 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35312 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35313 - atomic_inc(&cm_accepts);
35314 + atomic_inc_unchecked(&cm_accepts);
35315
35316 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35317 atomic_read(&nesvnic->netdev->refcnt));
35318 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35319
35320 struct nes_cm_core *cm_core;
35321
35322 - atomic_inc(&cm_rejects);
35323 + atomic_inc_unchecked(&cm_rejects);
35324 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35325 loopback = cm_node->loopbackpartner;
35326 cm_core = cm_node->cm_core;
35327 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35328 ntohl(cm_id->local_addr.sin_addr.s_addr),
35329 ntohs(cm_id->local_addr.sin_port));
35330
35331 - atomic_inc(&cm_connects);
35332 + atomic_inc_unchecked(&cm_connects);
35333 nesqp->active_conn = 1;
35334
35335 /* cache the cm_id in the qp */
35336 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35337 if (nesqp->destroyed) {
35338 return;
35339 }
35340 - atomic_inc(&cm_connecteds);
35341 + atomic_inc_unchecked(&cm_connecteds);
35342 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35343 " local port 0x%04X. jiffies = %lu.\n",
35344 nesqp->hwqp.qp_id,
35345 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35346
35347 ret = cm_id->event_handler(cm_id, &cm_event);
35348 cm_id->add_ref(cm_id);
35349 - atomic_inc(&cm_closes);
35350 + atomic_inc_unchecked(&cm_closes);
35351 cm_event.event = IW_CM_EVENT_CLOSE;
35352 cm_event.status = IW_CM_EVENT_STATUS_OK;
35353 cm_event.provider_data = cm_id->provider_data;
35354 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35355 return;
35356 cm_id = cm_node->cm_id;
35357
35358 - atomic_inc(&cm_connect_reqs);
35359 + atomic_inc_unchecked(&cm_connect_reqs);
35360 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35361 cm_node, cm_id, jiffies);
35362
35363 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35364 return;
35365 cm_id = cm_node->cm_id;
35366
35367 - atomic_inc(&cm_connect_reqs);
35368 + atomic_inc_unchecked(&cm_connect_reqs);
35369 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35370 cm_node, cm_id, jiffies);
35371
35372 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35373 index e593af3..870694a 100644
35374 --- a/drivers/infiniband/hw/nes/nes_nic.c
35375 +++ b/drivers/infiniband/hw/nes/nes_nic.c
35376 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35377 target_stat_values[++index] = mh_detected;
35378 target_stat_values[++index] = mh_pauses_sent;
35379 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35380 - target_stat_values[++index] = atomic_read(&cm_connects);
35381 - target_stat_values[++index] = atomic_read(&cm_accepts);
35382 - target_stat_values[++index] = atomic_read(&cm_disconnects);
35383 - target_stat_values[++index] = atomic_read(&cm_connecteds);
35384 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35385 - target_stat_values[++index] = atomic_read(&cm_rejects);
35386 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35387 - target_stat_values[++index] = atomic_read(&qps_created);
35388 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35389 - target_stat_values[++index] = atomic_read(&qps_destroyed);
35390 - target_stat_values[++index] = atomic_read(&cm_closes);
35391 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35392 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35393 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35394 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35395 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35396 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35397 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35398 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35399 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35400 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35401 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35402 target_stat_values[++index] = cm_packets_sent;
35403 target_stat_values[++index] = cm_packets_bounced;
35404 target_stat_values[++index] = cm_packets_created;
35405 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35406 target_stat_values[++index] = cm_listens_created;
35407 target_stat_values[++index] = cm_listens_destroyed;
35408 target_stat_values[++index] = cm_backlog_drops;
35409 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
35410 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
35411 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35412 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35413 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35414 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35415 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35416 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35417 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35418 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35419 target_stat_values[++index] = int_mod_timer_init;
35420 target_stat_values[++index] = int_mod_cq_depth_1;
35421 target_stat_values[++index] = int_mod_cq_depth_4;
35422 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35423 index a680c42..f914deb 100644
35424 --- a/drivers/infiniband/hw/nes/nes_verbs.c
35425 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
35426 @@ -45,9 +45,9 @@
35427
35428 #include <rdma/ib_umem.h>
35429
35430 -atomic_t mod_qp_timouts;
35431 -atomic_t qps_created;
35432 -atomic_t sw_qps_destroyed;
35433 +atomic_unchecked_t mod_qp_timouts;
35434 +atomic_unchecked_t qps_created;
35435 +atomic_unchecked_t sw_qps_destroyed;
35436
35437 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35438
35439 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35440 if (init_attr->create_flags)
35441 return ERR_PTR(-EINVAL);
35442
35443 - atomic_inc(&qps_created);
35444 + atomic_inc_unchecked(&qps_created);
35445 switch (init_attr->qp_type) {
35446 case IB_QPT_RC:
35447 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35448 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35449 struct iw_cm_event cm_event;
35450 int ret;
35451
35452 - atomic_inc(&sw_qps_destroyed);
35453 + atomic_inc_unchecked(&sw_qps_destroyed);
35454 nesqp->destroyed = 1;
35455
35456 /* Blow away the connection if it exists. */
35457 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35458 index ac11be0..3883c04 100644
35459 --- a/drivers/input/gameport/gameport.c
35460 +++ b/drivers/input/gameport/gameport.c
35461 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35462 */
35463 static void gameport_init_port(struct gameport *gameport)
35464 {
35465 - static atomic_t gameport_no = ATOMIC_INIT(0);
35466 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35467
35468 __module_get(THIS_MODULE);
35469
35470 mutex_init(&gameport->drv_mutex);
35471 device_initialize(&gameport->dev);
35472 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35473 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35474 gameport->dev.bus = &gameport_bus;
35475 gameport->dev.release = gameport_release_port;
35476 if (gameport->parent)
35477 diff --git a/drivers/input/input.c b/drivers/input/input.c
35478 index c82ae82..8cfb9cb 100644
35479 --- a/drivers/input/input.c
35480 +++ b/drivers/input/input.c
35481 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35482 */
35483 int input_register_device(struct input_dev *dev)
35484 {
35485 - static atomic_t input_no = ATOMIC_INIT(0);
35486 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35487 struct input_handler *handler;
35488 const char *path;
35489 int error;
35490 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35491 dev->setkeycode = input_default_setkeycode;
35492
35493 dev_set_name(&dev->dev, "input%ld",
35494 - (unsigned long) atomic_inc_return(&input_no) - 1);
35495 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35496
35497 error = device_add(&dev->dev);
35498 if (error)
35499 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35500 index ca13a6b..b032b0c 100644
35501 --- a/drivers/input/joystick/sidewinder.c
35502 +++ b/drivers/input/joystick/sidewinder.c
35503 @@ -30,6 +30,7 @@
35504 #include <linux/kernel.h>
35505 #include <linux/module.h>
35506 #include <linux/slab.h>
35507 +#include <linux/sched.h>
35508 #include <linux/init.h>
35509 #include <linux/input.h>
35510 #include <linux/gameport.h>
35511 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35512 unsigned char buf[SW_LENGTH];
35513 int i;
35514
35515 + pax_track_stack();
35516 +
35517 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35518
35519 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35520 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35521 index 79e3edc..01412b9 100644
35522 --- a/drivers/input/joystick/xpad.c
35523 +++ b/drivers/input/joystick/xpad.c
35524 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35525
35526 static int xpad_led_probe(struct usb_xpad *xpad)
35527 {
35528 - static atomic_t led_seq = ATOMIC_INIT(0);
35529 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35530 long led_no;
35531 struct xpad_led *led;
35532 struct led_classdev *led_cdev;
35533 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35534 if (!led)
35535 return -ENOMEM;
35536
35537 - led_no = (long)atomic_inc_return(&led_seq) - 1;
35538 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35539
35540 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35541 led->xpad = xpad;
35542 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35543 index 0236f0d..c7327f1 100644
35544 --- a/drivers/input/serio/serio.c
35545 +++ b/drivers/input/serio/serio.c
35546 @@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35547 */
35548 static void serio_init_port(struct serio *serio)
35549 {
35550 - static atomic_t serio_no = ATOMIC_INIT(0);
35551 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35552
35553 __module_get(THIS_MODULE);
35554
35555 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35556 mutex_init(&serio->drv_mutex);
35557 device_initialize(&serio->dev);
35558 dev_set_name(&serio->dev, "serio%ld",
35559 - (long)atomic_inc_return(&serio_no) - 1);
35560 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
35561 serio->dev.bus = &serio_bus;
35562 serio->dev.release = serio_release_port;
35563 if (serio->parent) {
35564 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35565 index 33dcd8d..2783d25 100644
35566 --- a/drivers/isdn/gigaset/common.c
35567 +++ b/drivers/isdn/gigaset/common.c
35568 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35569 cs->commands_pending = 0;
35570 cs->cur_at_seq = 0;
35571 cs->gotfwver = -1;
35572 - cs->open_count = 0;
35573 + local_set(&cs->open_count, 0);
35574 cs->dev = NULL;
35575 cs->tty = NULL;
35576 cs->tty_dev = NULL;
35577 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35578 index a2f6125..6a70677 100644
35579 --- a/drivers/isdn/gigaset/gigaset.h
35580 +++ b/drivers/isdn/gigaset/gigaset.h
35581 @@ -34,6 +34,7 @@
35582 #include <linux/tty_driver.h>
35583 #include <linux/list.h>
35584 #include <asm/atomic.h>
35585 +#include <asm/local.h>
35586
35587 #define GIG_VERSION {0,5,0,0}
35588 #define GIG_COMPAT {0,4,0,0}
35589 @@ -446,7 +447,7 @@ struct cardstate {
35590 spinlock_t cmdlock;
35591 unsigned curlen, cmdbytes;
35592
35593 - unsigned open_count;
35594 + local_t open_count;
35595 struct tty_struct *tty;
35596 struct tasklet_struct if_wake_tasklet;
35597 unsigned control_state;
35598 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35599 index b3065b8..c7e8cc9 100644
35600 --- a/drivers/isdn/gigaset/interface.c
35601 +++ b/drivers/isdn/gigaset/interface.c
35602 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35603 return -ERESTARTSYS; // FIXME -EINTR?
35604 tty->driver_data = cs;
35605
35606 - ++cs->open_count;
35607 -
35608 - if (cs->open_count == 1) {
35609 + if (local_inc_return(&cs->open_count) == 1) {
35610 spin_lock_irqsave(&cs->lock, flags);
35611 cs->tty = tty;
35612 spin_unlock_irqrestore(&cs->lock, flags);
35613 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35614
35615 if (!cs->connected)
35616 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35617 - else if (!cs->open_count)
35618 + else if (!local_read(&cs->open_count))
35619 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35620 else {
35621 - if (!--cs->open_count) {
35622 + if (!local_dec_return(&cs->open_count)) {
35623 spin_lock_irqsave(&cs->lock, flags);
35624 cs->tty = NULL;
35625 spin_unlock_irqrestore(&cs->lock, flags);
35626 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35627 if (!cs->connected) {
35628 gig_dbg(DEBUG_IF, "not connected");
35629 retval = -ENODEV;
35630 - } else if (!cs->open_count)
35631 + } else if (!local_read(&cs->open_count))
35632 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35633 else {
35634 retval = 0;
35635 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35636 if (!cs->connected) {
35637 gig_dbg(DEBUG_IF, "not connected");
35638 retval = -ENODEV;
35639 - } else if (!cs->open_count)
35640 + } else if (!local_read(&cs->open_count))
35641 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35642 else if (cs->mstate != MS_LOCKED) {
35643 dev_warn(cs->dev, "can't write to unlocked device\n");
35644 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35645 if (!cs->connected) {
35646 gig_dbg(DEBUG_IF, "not connected");
35647 retval = -ENODEV;
35648 - } else if (!cs->open_count)
35649 + } else if (!local_read(&cs->open_count))
35650 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35651 else if (cs->mstate != MS_LOCKED) {
35652 dev_warn(cs->dev, "can't write to unlocked device\n");
35653 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35654
35655 if (!cs->connected)
35656 gig_dbg(DEBUG_IF, "not connected");
35657 - else if (!cs->open_count)
35658 + else if (!local_read(&cs->open_count))
35659 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35660 else if (cs->mstate != MS_LOCKED)
35661 dev_warn(cs->dev, "can't write to unlocked device\n");
35662 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35663
35664 if (!cs->connected)
35665 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35666 - else if (!cs->open_count)
35667 + else if (!local_read(&cs->open_count))
35668 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35669 else {
35670 //FIXME
35671 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35672
35673 if (!cs->connected)
35674 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35675 - else if (!cs->open_count)
35676 + else if (!local_read(&cs->open_count))
35677 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35678 else {
35679 //FIXME
35680 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35681 goto out;
35682 }
35683
35684 - if (!cs->open_count) {
35685 + if (!local_read(&cs->open_count)) {
35686 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35687 goto out;
35688 }
35689 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35690 index a7c0083..62a7cb6 100644
35691 --- a/drivers/isdn/hardware/avm/b1.c
35692 +++ b/drivers/isdn/hardware/avm/b1.c
35693 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35694 }
35695 if (left) {
35696 if (t4file->user) {
35697 - if (copy_from_user(buf, dp, left))
35698 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35699 return -EFAULT;
35700 } else {
35701 memcpy(buf, dp, left);
35702 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35703 }
35704 if (left) {
35705 if (config->user) {
35706 - if (copy_from_user(buf, dp, left))
35707 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35708 return -EFAULT;
35709 } else {
35710 memcpy(buf, dp, left);
35711 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35712 index f130724..c373c68 100644
35713 --- a/drivers/isdn/hardware/eicon/capidtmf.c
35714 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
35715 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35716 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35717 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35718
35719 + pax_track_stack();
35720
35721 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35722 {
35723 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35724 index 4d425c6..a9be6c4 100644
35725 --- a/drivers/isdn/hardware/eicon/capifunc.c
35726 +++ b/drivers/isdn/hardware/eicon/capifunc.c
35727 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35728 IDI_SYNC_REQ req;
35729 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35730
35731 + pax_track_stack();
35732 +
35733 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35734
35735 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35736 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35737 index 3029234..ef0d9e2 100644
35738 --- a/drivers/isdn/hardware/eicon/diddfunc.c
35739 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
35740 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35741 IDI_SYNC_REQ req;
35742 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35743
35744 + pax_track_stack();
35745 +
35746 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35747
35748 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35749 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35750 index d36a4c0..11e7d1a 100644
35751 --- a/drivers/isdn/hardware/eicon/divasfunc.c
35752 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
35753 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35754 IDI_SYNC_REQ req;
35755 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35756
35757 + pax_track_stack();
35758 +
35759 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35760
35761 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35762 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35763 index 85784a7..a19ca98 100644
35764 --- a/drivers/isdn/hardware/eicon/divasync.h
35765 +++ b/drivers/isdn/hardware/eicon/divasync.h
35766 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35767 } diva_didd_add_adapter_t;
35768 typedef struct _diva_didd_remove_adapter {
35769 IDI_CALL p_request;
35770 -} diva_didd_remove_adapter_t;
35771 +} __no_const diva_didd_remove_adapter_t;
35772 typedef struct _diva_didd_read_adapter_array {
35773 void * buffer;
35774 dword length;
35775 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35776 index db87d51..7d09acf 100644
35777 --- a/drivers/isdn/hardware/eicon/idifunc.c
35778 +++ b/drivers/isdn/hardware/eicon/idifunc.c
35779 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35780 IDI_SYNC_REQ req;
35781 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35782
35783 + pax_track_stack();
35784 +
35785 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35786
35787 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35788 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35789 index ae89fb8..0fab299 100644
35790 --- a/drivers/isdn/hardware/eicon/message.c
35791 +++ b/drivers/isdn/hardware/eicon/message.c
35792 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35793 dword d;
35794 word w;
35795
35796 + pax_track_stack();
35797 +
35798 a = plci->adapter;
35799 Id = ((word)plci->Id<<8)|a->Id;
35800 PUT_WORD(&SS_Ind[4],0x0000);
35801 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35802 word j, n, w;
35803 dword d;
35804
35805 + pax_track_stack();
35806 +
35807
35808 for(i=0;i<8;i++) bp_parms[i].length = 0;
35809 for(i=0;i<2;i++) global_config[i].length = 0;
35810 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35811 const byte llc3[] = {4,3,2,2,6,6,0};
35812 const byte header[] = {0,2,3,3,0,0,0};
35813
35814 + pax_track_stack();
35815 +
35816 for(i=0;i<8;i++) bp_parms[i].length = 0;
35817 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35818 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35819 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35820 word appl_number_group_type[MAX_APPL];
35821 PLCI *auxplci;
35822
35823 + pax_track_stack();
35824 +
35825 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35826
35827 if(!a->group_optimization_enabled)
35828 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35829 index a564b75..f3cf8b5 100644
35830 --- a/drivers/isdn/hardware/eicon/mntfunc.c
35831 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
35832 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35833 IDI_SYNC_REQ req;
35834 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35835
35836 + pax_track_stack();
35837 +
35838 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35839
35840 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35841 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35842 index a3bd163..8956575 100644
35843 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35844 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35845 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35846 typedef struct _diva_os_idi_adapter_interface {
35847 diva_init_card_proc_t cleanup_adapter_proc;
35848 diva_cmd_card_proc_t cmd_proc;
35849 -} diva_os_idi_adapter_interface_t;
35850 +} __no_const diva_os_idi_adapter_interface_t;
35851
35852 typedef struct _diva_os_xdi_adapter {
35853 struct list_head link;
35854 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35855 index adb1e8c..21b590b 100644
35856 --- a/drivers/isdn/i4l/isdn_common.c
35857 +++ b/drivers/isdn/i4l/isdn_common.c
35858 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35859 } iocpar;
35860 void __user *argp = (void __user *)arg;
35861
35862 + pax_track_stack();
35863 +
35864 #define name iocpar.name
35865 #define bname iocpar.bname
35866 #define iocts iocpar.iocts
35867 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35868 index bf7997a..cf091db 100644
35869 --- a/drivers/isdn/icn/icn.c
35870 +++ b/drivers/isdn/icn/icn.c
35871 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35872 if (count > len)
35873 count = len;
35874 if (user) {
35875 - if (copy_from_user(msg, buf, count))
35876 + if (count > sizeof msg || copy_from_user(msg, buf, count))
35877 return -EFAULT;
35878 } else
35879 memcpy(msg, buf, count);
35880 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
35881 index feb0fa4..f76f830 100644
35882 --- a/drivers/isdn/mISDN/socket.c
35883 +++ b/drivers/isdn/mISDN/socket.c
35884 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35885 if (dev) {
35886 struct mISDN_devinfo di;
35887
35888 + memset(&di, 0, sizeof(di));
35889 di.id = dev->id;
35890 di.Dprotocols = dev->Dprotocols;
35891 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35892 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35893 if (dev) {
35894 struct mISDN_devinfo di;
35895
35896 + memset(&di, 0, sizeof(di));
35897 di.id = dev->id;
35898 di.Dprotocols = dev->Dprotocols;
35899 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35900 diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
35901 index 485be8b..f0225bc 100644
35902 --- a/drivers/isdn/sc/interrupt.c
35903 +++ b/drivers/isdn/sc/interrupt.c
35904 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35905 }
35906 else if(callid>=0x0000 && callid<=0x7FFF)
35907 {
35908 + int len;
35909 +
35910 pr_debug("%s: Got Incoming Call\n",
35911 sc_adapter[card]->devicename);
35912 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
35913 - strcpy(setup.eazmsn,
35914 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
35915 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
35916 + sizeof(setup.phone));
35917 + if (len >= sizeof(setup.phone))
35918 + continue;
35919 + len = strlcpy(setup.eazmsn,
35920 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35921 + sizeof(setup.eazmsn));
35922 + if (len >= sizeof(setup.eazmsn))
35923 + continue;
35924 setup.si1 = 7;
35925 setup.si2 = 0;
35926 setup.plan = 0;
35927 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35928 * Handle a GetMyNumber Rsp
35929 */
35930 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
35931 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
35932 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35933 + rcvmsg.msg_data.byte_array,
35934 + sizeof(rcvmsg.msg_data.byte_array));
35935 continue;
35936 }
35937
35938 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
35939 index 8744d24..d1f9a9a 100644
35940 --- a/drivers/lguest/core.c
35941 +++ b/drivers/lguest/core.c
35942 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
35943 * it's worked so far. The end address needs +1 because __get_vm_area
35944 * allocates an extra guard page, so we need space for that.
35945 */
35946 +
35947 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
35948 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35949 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
35950 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35951 +#else
35952 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35953 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
35954 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35955 +#endif
35956 +
35957 if (!switcher_vma) {
35958 err = -ENOMEM;
35959 printk("lguest: could not map switcher pages high\n");
35960 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
35961 * Now the Switcher is mapped at the right address, we can't fail!
35962 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
35963 */
35964 - memcpy(switcher_vma->addr, start_switcher_text,
35965 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
35966 end_switcher_text - start_switcher_text);
35967
35968 printk(KERN_INFO "lguest: mapped switcher at %p\n",
35969 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
35970 index 6ae3888..8b38145 100644
35971 --- a/drivers/lguest/x86/core.c
35972 +++ b/drivers/lguest/x86/core.c
35973 @@ -59,7 +59,7 @@ static struct {
35974 /* Offset from where switcher.S was compiled to where we've copied it */
35975 static unsigned long switcher_offset(void)
35976 {
35977 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
35978 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
35979 }
35980
35981 /* This cpu's struct lguest_pages. */
35982 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
35983 * These copies are pretty cheap, so we do them unconditionally: */
35984 /* Save the current Host top-level page directory.
35985 */
35986 +
35987 +#ifdef CONFIG_PAX_PER_CPU_PGD
35988 + pages->state.host_cr3 = read_cr3();
35989 +#else
35990 pages->state.host_cr3 = __pa(current->mm->pgd);
35991 +#endif
35992 +
35993 /*
35994 * Set up the Guest's page tables to see this CPU's pages (and no
35995 * other CPU's pages).
35996 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
35997 * compiled-in switcher code and the high-mapped copy we just made.
35998 */
35999 for (i = 0; i < IDT_ENTRIES; i++)
36000 - default_idt_entries[i] += switcher_offset();
36001 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
36002
36003 /*
36004 * Set up the Switcher's per-cpu areas.
36005 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
36006 * it will be undisturbed when we switch. To change %cs and jump we
36007 * need this structure to feed to Intel's "lcall" instruction.
36008 */
36009 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
36010 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
36011 lguest_entry.segment = LGUEST_CS;
36012
36013 /*
36014 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
36015 index 40634b0..4f5855e 100644
36016 --- a/drivers/lguest/x86/switcher_32.S
36017 +++ b/drivers/lguest/x86/switcher_32.S
36018 @@ -87,6 +87,7 @@
36019 #include <asm/page.h>
36020 #include <asm/segment.h>
36021 #include <asm/lguest.h>
36022 +#include <asm/processor-flags.h>
36023
36024 // We mark the start of the code to copy
36025 // It's placed in .text tho it's never run here
36026 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
36027 // Changes type when we load it: damn Intel!
36028 // For after we switch over our page tables
36029 // That entry will be read-only: we'd crash.
36030 +
36031 +#ifdef CONFIG_PAX_KERNEXEC
36032 + mov %cr0, %edx
36033 + xor $X86_CR0_WP, %edx
36034 + mov %edx, %cr0
36035 +#endif
36036 +
36037 movl $(GDT_ENTRY_TSS*8), %edx
36038 ltr %dx
36039
36040 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
36041 // Let's clear it again for our return.
36042 // The GDT descriptor of the Host
36043 // Points to the table after two "size" bytes
36044 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
36045 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
36046 // Clear "used" from type field (byte 5, bit 2)
36047 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
36048 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
36049 +
36050 +#ifdef CONFIG_PAX_KERNEXEC
36051 + mov %cr0, %eax
36052 + xor $X86_CR0_WP, %eax
36053 + mov %eax, %cr0
36054 +#endif
36055
36056 // Once our page table's switched, the Guest is live!
36057 // The Host fades as we run this final step.
36058 @@ -295,13 +309,12 @@ deliver_to_host:
36059 // I consulted gcc, and it gave
36060 // These instructions, which I gladly credit:
36061 leal (%edx,%ebx,8), %eax
36062 - movzwl (%eax),%edx
36063 - movl 4(%eax), %eax
36064 - xorw %ax, %ax
36065 - orl %eax, %edx
36066 + movl 4(%eax), %edx
36067 + movw (%eax), %dx
36068 // Now the address of the handler's in %edx
36069 // We call it now: its "iret" drops us home.
36070 - jmp *%edx
36071 + ljmp $__KERNEL_CS, $1f
36072 +1: jmp *%edx
36073
36074 // Every interrupt can come to us here
36075 // But we must truly tell each apart.
36076 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
36077 index 588a5b0..b71db89 100644
36078 --- a/drivers/macintosh/macio_asic.c
36079 +++ b/drivers/macintosh/macio_asic.c
36080 @@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
36081 * MacIO is matched against any Apple ID, it's probe() function
36082 * will then decide wether it applies or not
36083 */
36084 -static const struct pci_device_id __devinitdata pci_ids [] = { {
36085 +static const struct pci_device_id __devinitconst pci_ids [] = { {
36086 .vendor = PCI_VENDOR_ID_APPLE,
36087 .device = PCI_ANY_ID,
36088 .subvendor = PCI_ANY_ID,
36089 diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
36090 index a348bb0..ecd9b3f 100644
36091 --- a/drivers/macintosh/via-pmu-backlight.c
36092 +++ b/drivers/macintosh/via-pmu-backlight.c
36093 @@ -15,7 +15,7 @@
36094
36095 #define MAX_PMU_LEVEL 0xFF
36096
36097 -static struct backlight_ops pmu_backlight_data;
36098 +static const struct backlight_ops pmu_backlight_data;
36099 static DEFINE_SPINLOCK(pmu_backlight_lock);
36100 static int sleeping, uses_pmu_bl;
36101 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
36102 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
36103 return bd->props.brightness;
36104 }
36105
36106 -static struct backlight_ops pmu_backlight_data = {
36107 +static const struct backlight_ops pmu_backlight_data = {
36108 .get_brightness = pmu_backlight_get_brightness,
36109 .update_status = pmu_backlight_update_status,
36110
36111 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
36112 index 6f308a4..b5f7ff7 100644
36113 --- a/drivers/macintosh/via-pmu.c
36114 +++ b/drivers/macintosh/via-pmu.c
36115 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
36116 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
36117 }
36118
36119 -static struct platform_suspend_ops pmu_pm_ops = {
36120 +static const struct platform_suspend_ops pmu_pm_ops = {
36121 .enter = powerbook_sleep,
36122 .valid = pmu_sleep_valid,
36123 };
36124 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
36125 index 818b617..4656e38 100644
36126 --- a/drivers/md/dm-ioctl.c
36127 +++ b/drivers/md/dm-ioctl.c
36128 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
36129 cmd == DM_LIST_VERSIONS_CMD)
36130 return 0;
36131
36132 - if ((cmd == DM_DEV_CREATE_CMD)) {
36133 + if (cmd == DM_DEV_CREATE_CMD) {
36134 if (!*param->name) {
36135 DMWARN("name not supplied when creating device");
36136 return -EINVAL;
36137 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36138 index 6021d0a..a878643 100644
36139 --- a/drivers/md/dm-raid1.c
36140 +++ b/drivers/md/dm-raid1.c
36141 @@ -41,7 +41,7 @@ enum dm_raid1_error {
36142
36143 struct mirror {
36144 struct mirror_set *ms;
36145 - atomic_t error_count;
36146 + atomic_unchecked_t error_count;
36147 unsigned long error_type;
36148 struct dm_dev *dev;
36149 sector_t offset;
36150 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36151 * simple way to tell if a device has encountered
36152 * errors.
36153 */
36154 - atomic_inc(&m->error_count);
36155 + atomic_inc_unchecked(&m->error_count);
36156
36157 if (test_and_set_bit(error_type, &m->error_type))
36158 return;
36159 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36160 }
36161
36162 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36163 - if (!atomic_read(&new->error_count)) {
36164 + if (!atomic_read_unchecked(&new->error_count)) {
36165 set_default_mirror(new);
36166 break;
36167 }
36168 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36169 struct mirror *m = get_default_mirror(ms);
36170
36171 do {
36172 - if (likely(!atomic_read(&m->error_count)))
36173 + if (likely(!atomic_read_unchecked(&m->error_count)))
36174 return m;
36175
36176 if (m-- == ms->mirror)
36177 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36178 {
36179 struct mirror *default_mirror = get_default_mirror(m->ms);
36180
36181 - return !atomic_read(&default_mirror->error_count);
36182 + return !atomic_read_unchecked(&default_mirror->error_count);
36183 }
36184
36185 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36186 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36187 */
36188 if (likely(region_in_sync(ms, region, 1)))
36189 m = choose_mirror(ms, bio->bi_sector);
36190 - else if (m && atomic_read(&m->error_count))
36191 + else if (m && atomic_read_unchecked(&m->error_count))
36192 m = NULL;
36193
36194 if (likely(m))
36195 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36196 }
36197
36198 ms->mirror[mirror].ms = ms;
36199 - atomic_set(&(ms->mirror[mirror].error_count), 0);
36200 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36201 ms->mirror[mirror].error_type = 0;
36202 ms->mirror[mirror].offset = offset;
36203
36204 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36205 */
36206 static char device_status_char(struct mirror *m)
36207 {
36208 - if (!atomic_read(&(m->error_count)))
36209 + if (!atomic_read_unchecked(&(m->error_count)))
36210 return 'A';
36211
36212 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36213 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36214 index bd58703..9f26571 100644
36215 --- a/drivers/md/dm-stripe.c
36216 +++ b/drivers/md/dm-stripe.c
36217 @@ -20,7 +20,7 @@ struct stripe {
36218 struct dm_dev *dev;
36219 sector_t physical_start;
36220
36221 - atomic_t error_count;
36222 + atomic_unchecked_t error_count;
36223 };
36224
36225 struct stripe_c {
36226 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36227 kfree(sc);
36228 return r;
36229 }
36230 - atomic_set(&(sc->stripe[i].error_count), 0);
36231 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36232 }
36233
36234 ti->private = sc;
36235 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36236 DMEMIT("%d ", sc->stripes);
36237 for (i = 0; i < sc->stripes; i++) {
36238 DMEMIT("%s ", sc->stripe[i].dev->name);
36239 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36240 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36241 'D' : 'A';
36242 }
36243 buffer[i] = '\0';
36244 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36245 */
36246 for (i = 0; i < sc->stripes; i++)
36247 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36248 - atomic_inc(&(sc->stripe[i].error_count));
36249 - if (atomic_read(&(sc->stripe[i].error_count)) <
36250 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
36251 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36252 DM_IO_ERROR_THRESHOLD)
36253 queue_work(kstriped, &sc->kstriped_ws);
36254 }
36255 diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36256 index 4b04590..13a77b2 100644
36257 --- a/drivers/md/dm-sysfs.c
36258 +++ b/drivers/md/dm-sysfs.c
36259 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36260 NULL,
36261 };
36262
36263 -static struct sysfs_ops dm_sysfs_ops = {
36264 +static const struct sysfs_ops dm_sysfs_ops = {
36265 .show = dm_attr_show,
36266 };
36267
36268 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36269 index 03345bb..332250d 100644
36270 --- a/drivers/md/dm-table.c
36271 +++ b/drivers/md/dm-table.c
36272 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36273 if (!dev_size)
36274 return 0;
36275
36276 - if ((start >= dev_size) || (start + len > dev_size)) {
36277 + if ((start >= dev_size) || (len > dev_size - start)) {
36278 DMWARN("%s: %s too small for target: "
36279 "start=%llu, len=%llu, dev_size=%llu",
36280 dm_device_name(ti->table->md), bdevname(bdev, b),
36281 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36282 index c988ac2..c418141 100644
36283 --- a/drivers/md/dm.c
36284 +++ b/drivers/md/dm.c
36285 @@ -165,9 +165,9 @@ struct mapped_device {
36286 /*
36287 * Event handling.
36288 */
36289 - atomic_t event_nr;
36290 + atomic_unchecked_t event_nr;
36291 wait_queue_head_t eventq;
36292 - atomic_t uevent_seq;
36293 + atomic_unchecked_t uevent_seq;
36294 struct list_head uevent_list;
36295 spinlock_t uevent_lock; /* Protect access to uevent_list */
36296
36297 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36298 rwlock_init(&md->map_lock);
36299 atomic_set(&md->holders, 1);
36300 atomic_set(&md->open_count, 0);
36301 - atomic_set(&md->event_nr, 0);
36302 - atomic_set(&md->uevent_seq, 0);
36303 + atomic_set_unchecked(&md->event_nr, 0);
36304 + atomic_set_unchecked(&md->uevent_seq, 0);
36305 INIT_LIST_HEAD(&md->uevent_list);
36306 spin_lock_init(&md->uevent_lock);
36307
36308 @@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36309
36310 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36311
36312 - atomic_inc(&md->event_nr);
36313 + atomic_inc_unchecked(&md->event_nr);
36314 wake_up(&md->eventq);
36315 }
36316
36317 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36318
36319 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36320 {
36321 - return atomic_add_return(1, &md->uevent_seq);
36322 + return atomic_add_return_unchecked(1, &md->uevent_seq);
36323 }
36324
36325 uint32_t dm_get_event_nr(struct mapped_device *md)
36326 {
36327 - return atomic_read(&md->event_nr);
36328 + return atomic_read_unchecked(&md->event_nr);
36329 }
36330
36331 int dm_wait_event(struct mapped_device *md, int event_nr)
36332 {
36333 return wait_event_interruptible(md->eventq,
36334 - (event_nr != atomic_read(&md->event_nr)));
36335 + (event_nr != atomic_read_unchecked(&md->event_nr)));
36336 }
36337
36338 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36339 diff --git a/drivers/md/md.c b/drivers/md/md.c
36340 index 4ce6e2f..7a9530a 100644
36341 --- a/drivers/md/md.c
36342 +++ b/drivers/md/md.c
36343 @@ -153,10 +153,10 @@ static int start_readonly;
36344 * start build, activate spare
36345 */
36346 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36347 -static atomic_t md_event_count;
36348 +static atomic_unchecked_t md_event_count;
36349 void md_new_event(mddev_t *mddev)
36350 {
36351 - atomic_inc(&md_event_count);
36352 + atomic_inc_unchecked(&md_event_count);
36353 wake_up(&md_event_waiters);
36354 }
36355 EXPORT_SYMBOL_GPL(md_new_event);
36356 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36357 */
36358 static void md_new_event_inintr(mddev_t *mddev)
36359 {
36360 - atomic_inc(&md_event_count);
36361 + atomic_inc_unchecked(&md_event_count);
36362 wake_up(&md_event_waiters);
36363 }
36364
36365 @@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36366
36367 rdev->preferred_minor = 0xffff;
36368 rdev->data_offset = le64_to_cpu(sb->data_offset);
36369 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36370 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36371
36372 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36373 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36374 @@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36375 else
36376 sb->resync_offset = cpu_to_le64(0);
36377
36378 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36379 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36380
36381 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36382 sb->size = cpu_to_le64(mddev->dev_sectors);
36383 @@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36384 static ssize_t
36385 errors_show(mdk_rdev_t *rdev, char *page)
36386 {
36387 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36388 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36389 }
36390
36391 static ssize_t
36392 @@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36393 char *e;
36394 unsigned long n = simple_strtoul(buf, &e, 10);
36395 if (*buf && (*e == 0 || *e == '\n')) {
36396 - atomic_set(&rdev->corrected_errors, n);
36397 + atomic_set_unchecked(&rdev->corrected_errors, n);
36398 return len;
36399 }
36400 return -EINVAL;
36401 @@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36402 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36403 kfree(rdev);
36404 }
36405 -static struct sysfs_ops rdev_sysfs_ops = {
36406 +static const struct sysfs_ops rdev_sysfs_ops = {
36407 .show = rdev_attr_show,
36408 .store = rdev_attr_store,
36409 };
36410 @@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36411 rdev->data_offset = 0;
36412 rdev->sb_events = 0;
36413 atomic_set(&rdev->nr_pending, 0);
36414 - atomic_set(&rdev->read_errors, 0);
36415 - atomic_set(&rdev->corrected_errors, 0);
36416 + atomic_set_unchecked(&rdev->read_errors, 0);
36417 + atomic_set_unchecked(&rdev->corrected_errors, 0);
36418
36419 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36420 if (!size) {
36421 @@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36422 kfree(mddev);
36423 }
36424
36425 -static struct sysfs_ops md_sysfs_ops = {
36426 +static const struct sysfs_ops md_sysfs_ops = {
36427 .show = md_attr_show,
36428 .store = md_attr_store,
36429 };
36430 @@ -4482,7 +4482,8 @@ out:
36431 err = 0;
36432 blk_integrity_unregister(disk);
36433 md_new_event(mddev);
36434 - sysfs_notify_dirent(mddev->sysfs_state);
36435 + if (mddev->sysfs_state)
36436 + sysfs_notify_dirent(mddev->sysfs_state);
36437 return err;
36438 }
36439
36440 @@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36441
36442 spin_unlock(&pers_lock);
36443 seq_printf(seq, "\n");
36444 - mi->event = atomic_read(&md_event_count);
36445 + mi->event = atomic_read_unchecked(&md_event_count);
36446 return 0;
36447 }
36448 if (v == (void*)2) {
36449 @@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36450 chunk_kb ? "KB" : "B");
36451 if (bitmap->file) {
36452 seq_printf(seq, ", file: ");
36453 - seq_path(seq, &bitmap->file->f_path, " \t\n");
36454 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36455 }
36456
36457 seq_printf(seq, "\n");
36458 @@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36459 else {
36460 struct seq_file *p = file->private_data;
36461 p->private = mi;
36462 - mi->event = atomic_read(&md_event_count);
36463 + mi->event = atomic_read_unchecked(&md_event_count);
36464 }
36465 return error;
36466 }
36467 @@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36468 /* always allow read */
36469 mask = POLLIN | POLLRDNORM;
36470
36471 - if (mi->event != atomic_read(&md_event_count))
36472 + if (mi->event != atomic_read_unchecked(&md_event_count))
36473 mask |= POLLERR | POLLPRI;
36474 return mask;
36475 }
36476 @@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36477 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36478 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36479 (int)part_stat_read(&disk->part0, sectors[1]) -
36480 - atomic_read(&disk->sync_io);
36481 + atomic_read_unchecked(&disk->sync_io);
36482 /* sync IO will cause sync_io to increase before the disk_stats
36483 * as sync_io is counted when a request starts, and
36484 * disk_stats is counted when it completes.
36485 diff --git a/drivers/md/md.h b/drivers/md/md.h
36486 index 87430fe..0024a4c 100644
36487 --- a/drivers/md/md.h
36488 +++ b/drivers/md/md.h
36489 @@ -94,10 +94,10 @@ struct mdk_rdev_s
36490 * only maintained for arrays that
36491 * support hot removal
36492 */
36493 - atomic_t read_errors; /* number of consecutive read errors that
36494 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
36495 * we have tried to ignore.
36496 */
36497 - atomic_t corrected_errors; /* number of corrected read errors,
36498 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36499 * for reporting to userspace and storing
36500 * in superblock.
36501 */
36502 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36503
36504 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36505 {
36506 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36507 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36508 }
36509
36510 struct mdk_personality
36511 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36512 index 968cb14..f0ad2e4 100644
36513 --- a/drivers/md/raid1.c
36514 +++ b/drivers/md/raid1.c
36515 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36516 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36517 continue;
36518 rdev = conf->mirrors[d].rdev;
36519 - atomic_add(s, &rdev->corrected_errors);
36520 + atomic_add_unchecked(s, &rdev->corrected_errors);
36521 if (sync_page_io(rdev->bdev,
36522 sect + rdev->data_offset,
36523 s<<9,
36524 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36525 /* Well, this device is dead */
36526 md_error(mddev, rdev);
36527 else {
36528 - atomic_add(s, &rdev->corrected_errors);
36529 + atomic_add_unchecked(s, &rdev->corrected_errors);
36530 printk(KERN_INFO
36531 "raid1:%s: read error corrected "
36532 "(%d sectors at %llu on %s)\n",
36533 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36534 index 1b4e232..cf0f534 100644
36535 --- a/drivers/md/raid10.c
36536 +++ b/drivers/md/raid10.c
36537 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36538 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36539 set_bit(R10BIO_Uptodate, &r10_bio->state);
36540 else {
36541 - atomic_add(r10_bio->sectors,
36542 + atomic_add_unchecked(r10_bio->sectors,
36543 &conf->mirrors[d].rdev->corrected_errors);
36544 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36545 md_error(r10_bio->mddev,
36546 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36547 test_bit(In_sync, &rdev->flags)) {
36548 atomic_inc(&rdev->nr_pending);
36549 rcu_read_unlock();
36550 - atomic_add(s, &rdev->corrected_errors);
36551 + atomic_add_unchecked(s, &rdev->corrected_errors);
36552 if (sync_page_io(rdev->bdev,
36553 r10_bio->devs[sl].addr +
36554 sect + rdev->data_offset,
36555 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36556 index 883215d..675bf47 100644
36557 --- a/drivers/md/raid5.c
36558 +++ b/drivers/md/raid5.c
36559 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36560 bi->bi_next = NULL;
36561 if ((rw & WRITE) &&
36562 test_bit(R5_ReWrite, &sh->dev[i].flags))
36563 - atomic_add(STRIPE_SECTORS,
36564 + atomic_add_unchecked(STRIPE_SECTORS,
36565 &rdev->corrected_errors);
36566 generic_make_request(bi);
36567 } else {
36568 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36569 clear_bit(R5_ReadError, &sh->dev[i].flags);
36570 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36571 }
36572 - if (atomic_read(&conf->disks[i].rdev->read_errors))
36573 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
36574 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36575 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36576 } else {
36577 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36578 int retry = 0;
36579 rdev = conf->disks[i].rdev;
36580
36581 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36582 - atomic_inc(&rdev->read_errors);
36583 + atomic_inc_unchecked(&rdev->read_errors);
36584 if (conf->mddev->degraded >= conf->max_degraded)
36585 printk_rl(KERN_WARNING
36586 "raid5:%s: read error not correctable "
36587 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36588 (unsigned long long)(sh->sector
36589 + rdev->data_offset),
36590 bdn);
36591 - else if (atomic_read(&rdev->read_errors)
36592 + else if (atomic_read_unchecked(&rdev->read_errors)
36593 > conf->max_nr_stripes)
36594 printk(KERN_WARNING
36595 "raid5:%s: Too many read errors, failing device %s.\n",
36596 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36597 sector_t r_sector;
36598 struct stripe_head sh2;
36599
36600 + pax_track_stack();
36601
36602 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36603 stripe = new_sector;
36604 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36605 index 05bde9c..2f31d40 100644
36606 --- a/drivers/media/common/saa7146_hlp.c
36607 +++ b/drivers/media/common/saa7146_hlp.c
36608 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36609
36610 int x[32], y[32], w[32], h[32];
36611
36612 + pax_track_stack();
36613 +
36614 /* clear out memory */
36615 memset(&line_list[0], 0x00, sizeof(u32)*32);
36616 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36617 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36618 index cb22da5..82b686e 100644
36619 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36620 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36621 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36622 u8 buf[HOST_LINK_BUF_SIZE];
36623 int i;
36624
36625 + pax_track_stack();
36626 +
36627 dprintk("%s\n", __func__);
36628
36629 /* check if we have space for a link buf in the rx_buffer */
36630 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36631 unsigned long timeout;
36632 int written;
36633
36634 + pax_track_stack();
36635 +
36636 dprintk("%s\n", __func__);
36637
36638 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36639 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36640 index 2fe05d0..a3289c4 100644
36641 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
36642 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36643 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
36644 union {
36645 dmx_ts_cb ts;
36646 dmx_section_cb sec;
36647 - } cb;
36648 + } __no_const cb;
36649
36650 struct dvb_demux *demux;
36651 void *priv;
36652 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36653 index 94159b9..376bd8e 100644
36654 --- a/drivers/media/dvb/dvb-core/dvbdev.c
36655 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
36656 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36657 const struct dvb_device *template, void *priv, int type)
36658 {
36659 struct dvb_device *dvbdev;
36660 - struct file_operations *dvbdevfops;
36661 + file_operations_no_const *dvbdevfops;
36662 struct device *clsdev;
36663 int minor;
36664 int id;
36665 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36666 index 2a53dd0..db8c07a 100644
36667 --- a/drivers/media/dvb/dvb-usb/cxusb.c
36668 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
36669 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36670 struct dib0700_adapter_state {
36671 int (*set_param_save) (struct dvb_frontend *,
36672 struct dvb_frontend_parameters *);
36673 -};
36674 +} __no_const;
36675
36676 static int dib7070_set_param_override(struct dvb_frontend *fe,
36677 struct dvb_frontend_parameters *fep)
36678 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36679 index db7f7f7..f55e96f 100644
36680 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36681 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36682 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36683
36684 u8 buf[260];
36685
36686 + pax_track_stack();
36687 +
36688 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36689 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36690
36691 diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36692 index 524acf5..5ffc403 100644
36693 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36694 +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36695 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36696
36697 struct dib0700_adapter_state {
36698 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36699 -};
36700 +} __no_const;
36701
36702 /* Hauppauge Nova-T 500 (aka Bristol)
36703 * has a LNA on GPIO0 which is enabled by setting 1 */
36704 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36705 index ba91735..4261d84 100644
36706 --- a/drivers/media/dvb/frontends/dib3000.h
36707 +++ b/drivers/media/dvb/frontends/dib3000.h
36708 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36709 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36710 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36711 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36712 -};
36713 +} __no_const;
36714
36715 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36716 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36717 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36718 index c709ce6..b3fe620 100644
36719 --- a/drivers/media/dvb/frontends/or51211.c
36720 +++ b/drivers/media/dvb/frontends/or51211.c
36721 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36722 u8 tudata[585];
36723 int i;
36724
36725 + pax_track_stack();
36726 +
36727 dprintk("Firmware is %zd bytes\n",fw->size);
36728
36729 /* Get eprom data */
36730 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36731 index 482d0f3..ee1e202 100644
36732 --- a/drivers/media/radio/radio-cadet.c
36733 +++ b/drivers/media/radio/radio-cadet.c
36734 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36735 while (i < count && dev->rdsin != dev->rdsout)
36736 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36737
36738 - if (copy_to_user(data, readbuf, i))
36739 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36740 return -EFAULT;
36741 return i;
36742 }
36743 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36744 index 6dd51e2..0359b92 100644
36745 --- a/drivers/media/video/cx18/cx18-driver.c
36746 +++ b/drivers/media/video/cx18/cx18-driver.c
36747 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36748
36749 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36750
36751 -static atomic_t cx18_instance = ATOMIC_INIT(0);
36752 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36753
36754 /* Parameter declarations */
36755 static int cardtype[CX18_MAX_CARDS];
36756 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36757 struct i2c_client c;
36758 u8 eedata[256];
36759
36760 + pax_track_stack();
36761 +
36762 memset(&c, 0, sizeof(c));
36763 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36764 c.adapter = &cx->i2c_adap[0];
36765 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36766 struct cx18 *cx;
36767
36768 /* FIXME - module parameter arrays constrain max instances */
36769 - i = atomic_inc_return(&cx18_instance) - 1;
36770 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36771 if (i >= CX18_MAX_CARDS) {
36772 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36773 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36774 diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36775 index 463ec34..2f4625a 100644
36776 --- a/drivers/media/video/ivtv/ivtv-driver.c
36777 +++ b/drivers/media/video/ivtv/ivtv-driver.c
36778 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36779 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36780
36781 /* ivtv instance counter */
36782 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
36783 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36784
36785 /* Parameter declarations */
36786 static int cardtype[IVTV_MAX_CARDS];
36787 diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36788 index 5fc4ac0..652a54a 100644
36789 --- a/drivers/media/video/omap24xxcam.c
36790 +++ b/drivers/media/video/omap24xxcam.c
36791 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36792 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36793
36794 do_gettimeofday(&vb->ts);
36795 - vb->field_count = atomic_add_return(2, &fh->field_count);
36796 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36797 if (csr & csr_error) {
36798 vb->state = VIDEOBUF_ERROR;
36799 if (!atomic_read(&fh->cam->in_reset)) {
36800 diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36801 index 2ce67f5..cf26a5b 100644
36802 --- a/drivers/media/video/omap24xxcam.h
36803 +++ b/drivers/media/video/omap24xxcam.h
36804 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36805 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36806 struct videobuf_queue vbq;
36807 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36808 - atomic_t field_count; /* field counter for videobuf_buffer */
36809 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36810 /* accessing cam here doesn't need serialisation: it's constant */
36811 struct omap24xxcam_device *cam;
36812 };
36813 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36814 index 299afa4..eb47459 100644
36815 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36816 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36817 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36818 u8 *eeprom;
36819 struct tveeprom tvdata;
36820
36821 + pax_track_stack();
36822 +
36823 memset(&tvdata,0,sizeof(tvdata));
36824
36825 eeprom = pvr2_eeprom_fetch(hdw);
36826 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36827 index 5b152ff..3320638 100644
36828 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36829 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36830 @@ -195,7 +195,7 @@ struct pvr2_hdw {
36831
36832 /* I2C stuff */
36833 struct i2c_adapter i2c_adap;
36834 - struct i2c_algorithm i2c_algo;
36835 + i2c_algorithm_no_const i2c_algo;
36836 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36837 int i2c_cx25840_hack_state;
36838 int i2c_linked;
36839 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36840 index 1eabff6..8e2313a 100644
36841 --- a/drivers/media/video/saa7134/saa6752hs.c
36842 +++ b/drivers/media/video/saa7134/saa6752hs.c
36843 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36844 unsigned char localPAT[256];
36845 unsigned char localPMT[256];
36846
36847 + pax_track_stack();
36848 +
36849 /* Set video format - must be done first as it resets other settings */
36850 set_reg8(client, 0x41, h->video_format);
36851
36852 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36853 index 9c1d3ac..b1b49e9 100644
36854 --- a/drivers/media/video/saa7164/saa7164-cmd.c
36855 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
36856 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36857 wait_queue_head_t *q = 0;
36858 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36859
36860 + pax_track_stack();
36861 +
36862 /* While any outstand message on the bus exists... */
36863 do {
36864
36865 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36866 u8 tmp[512];
36867 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36868
36869 + pax_track_stack();
36870 +
36871 while (loop) {
36872
36873 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36874 diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36875 index b085496..cde0270 100644
36876 --- a/drivers/media/video/usbvideo/ibmcam.c
36877 +++ b/drivers/media/video/usbvideo/ibmcam.c
36878 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
36879 static int __init ibmcam_init(void)
36880 {
36881 struct usbvideo_cb cbTbl;
36882 - memset(&cbTbl, 0, sizeof(cbTbl));
36883 - cbTbl.probe = ibmcam_probe;
36884 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
36885 - cbTbl.videoStart = ibmcam_video_start;
36886 - cbTbl.videoStop = ibmcam_video_stop;
36887 - cbTbl.processData = ibmcam_ProcessIsocData;
36888 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36889 - cbTbl.adjustPicture = ibmcam_adjust_picture;
36890 - cbTbl.getFPS = ibmcam_calculate_fps;
36891 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
36892 + *(void **)&cbTbl.probe = ibmcam_probe;
36893 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
36894 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
36895 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
36896 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
36897 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36898 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
36899 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
36900 return usbvideo_register(
36901 &cams,
36902 MAX_IBMCAM,
36903 diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
36904 index 31d57f2..600b735 100644
36905 --- a/drivers/media/video/usbvideo/konicawc.c
36906 +++ b/drivers/media/video/usbvideo/konicawc.c
36907 @@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
36908 int error;
36909
36910 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36911 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36912 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36913
36914 cam->input = input_dev = input_allocate_device();
36915 if (!input_dev) {
36916 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
36917 struct usbvideo_cb cbTbl;
36918 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
36919 DRIVER_DESC "\n");
36920 - memset(&cbTbl, 0, sizeof(cbTbl));
36921 - cbTbl.probe = konicawc_probe;
36922 - cbTbl.setupOnOpen = konicawc_setup_on_open;
36923 - cbTbl.processData = konicawc_process_isoc;
36924 - cbTbl.getFPS = konicawc_calculate_fps;
36925 - cbTbl.setVideoMode = konicawc_set_video_mode;
36926 - cbTbl.startDataPump = konicawc_start_data;
36927 - cbTbl.stopDataPump = konicawc_stop_data;
36928 - cbTbl.adjustPicture = konicawc_adjust_picture;
36929 - cbTbl.userFree = konicawc_free_uvd;
36930 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
36931 + *(void **)&cbTbl.probe = konicawc_probe;
36932 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
36933 + *(void **)&cbTbl.processData = konicawc_process_isoc;
36934 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
36935 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
36936 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
36937 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
36938 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
36939 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
36940 return usbvideo_register(
36941 &cams,
36942 MAX_CAMERAS,
36943 diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
36944 index 803d3e4..c4d1b96 100644
36945 --- a/drivers/media/video/usbvideo/quickcam_messenger.c
36946 +++ b/drivers/media/video/usbvideo/quickcam_messenger.c
36947 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
36948 int error;
36949
36950 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36951 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36952 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36953
36954 cam->input = input_dev = input_allocate_device();
36955 if (!input_dev) {
36956 diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
36957 index fbd1b63..292f9f0 100644
36958 --- a/drivers/media/video/usbvideo/ultracam.c
36959 +++ b/drivers/media/video/usbvideo/ultracam.c
36960 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
36961 {
36962 struct usbvideo_cb cbTbl;
36963 memset(&cbTbl, 0, sizeof(cbTbl));
36964 - cbTbl.probe = ultracam_probe;
36965 - cbTbl.setupOnOpen = ultracam_setup_on_open;
36966 - cbTbl.videoStart = ultracam_video_start;
36967 - cbTbl.videoStop = ultracam_video_stop;
36968 - cbTbl.processData = ultracam_ProcessIsocData;
36969 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36970 - cbTbl.adjustPicture = ultracam_adjust_picture;
36971 - cbTbl.getFPS = ultracam_calculate_fps;
36972 + *(void **)&cbTbl.probe = ultracam_probe;
36973 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
36974 + *(void **)&cbTbl.videoStart = ultracam_video_start;
36975 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
36976 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
36977 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36978 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
36979 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
36980 return usbvideo_register(
36981 &cams,
36982 MAX_CAMERAS,
36983 diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
36984 index dea8b32..34f6878 100644
36985 --- a/drivers/media/video/usbvideo/usbvideo.c
36986 +++ b/drivers/media/video/usbvideo/usbvideo.c
36987 @@ -697,15 +697,15 @@ int usbvideo_register(
36988 __func__, cams, base_size, num_cams);
36989
36990 /* Copy callbacks, apply defaults for those that are not set */
36991 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
36992 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
36993 if (cams->cb.getFrame == NULL)
36994 - cams->cb.getFrame = usbvideo_GetFrame;
36995 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
36996 if (cams->cb.disconnect == NULL)
36997 - cams->cb.disconnect = usbvideo_Disconnect;
36998 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
36999 if (cams->cb.startDataPump == NULL)
37000 - cams->cb.startDataPump = usbvideo_StartDataPump;
37001 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
37002 if (cams->cb.stopDataPump == NULL)
37003 - cams->cb.stopDataPump = usbvideo_StopDataPump;
37004 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
37005
37006 cams->num_cameras = num_cams;
37007 cams->cam = (struct uvd *) &cams[1];
37008 diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
37009 index c66985b..7fa143a 100644
37010 --- a/drivers/media/video/usbvideo/usbvideo.h
37011 +++ b/drivers/media/video/usbvideo/usbvideo.h
37012 @@ -268,7 +268,7 @@ struct usbvideo_cb {
37013 int (*startDataPump)(struct uvd *uvd);
37014 void (*stopDataPump)(struct uvd *uvd);
37015 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
37016 -};
37017 +} __no_const;
37018
37019 struct usbvideo {
37020 int num_cameras; /* As allocated */
37021 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
37022 index e0f91e4..37554ea 100644
37023 --- a/drivers/media/video/usbvision/usbvision-core.c
37024 +++ b/drivers/media/video/usbvision/usbvision-core.c
37025 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
37026 unsigned char rv, gv, bv;
37027 static unsigned char *Y, *U, *V;
37028
37029 + pax_track_stack();
37030 +
37031 frame = usbvision->curFrame;
37032 imageSize = frame->frmwidth * frame->frmheight;
37033 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
37034 diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
37035 index 0d06e7c..3d17d24 100644
37036 --- a/drivers/media/video/v4l2-device.c
37037 +++ b/drivers/media/video/v4l2-device.c
37038 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
37039 EXPORT_SYMBOL_GPL(v4l2_device_register);
37040
37041 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
37042 - atomic_t *instance)
37043 + atomic_unchecked_t *instance)
37044 {
37045 - int num = atomic_inc_return(instance) - 1;
37046 + int num = atomic_inc_return_unchecked(instance) - 1;
37047 int len = strlen(basename);
37048
37049 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
37050 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
37051 index 032ebae..6a3532c 100644
37052 --- a/drivers/media/video/videobuf-dma-sg.c
37053 +++ b/drivers/media/video/videobuf-dma-sg.c
37054 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
37055 {
37056 struct videobuf_queue q;
37057
37058 + pax_track_stack();
37059 +
37060 /* Required to make generic handler to call __videobuf_alloc */
37061 q.int_ops = &sg_ops;
37062
37063 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
37064 index b6992b7..9fa7547 100644
37065 --- a/drivers/message/fusion/mptbase.c
37066 +++ b/drivers/message/fusion/mptbase.c
37067 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
37068 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
37069 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
37070
37071 +#ifdef CONFIG_GRKERNSEC_HIDESYM
37072 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37073 + NULL, NULL);
37074 +#else
37075 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37076 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
37077 +#endif
37078 +
37079 /*
37080 * Rounding UP to nearest 4-kB boundary here...
37081 */
37082 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
37083 index 83873e3..e360e9a 100644
37084 --- a/drivers/message/fusion/mptsas.c
37085 +++ b/drivers/message/fusion/mptsas.c
37086 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
37087 return 0;
37088 }
37089
37090 +static inline void
37091 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37092 +{
37093 + if (phy_info->port_details) {
37094 + phy_info->port_details->rphy = rphy;
37095 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37096 + ioc->name, rphy));
37097 + }
37098 +
37099 + if (rphy) {
37100 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37101 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37102 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37103 + ioc->name, rphy, rphy->dev.release));
37104 + }
37105 +}
37106 +
37107 /* no mutex */
37108 static void
37109 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
37110 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
37111 return NULL;
37112 }
37113
37114 -static inline void
37115 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37116 -{
37117 - if (phy_info->port_details) {
37118 - phy_info->port_details->rphy = rphy;
37119 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37120 - ioc->name, rphy));
37121 - }
37122 -
37123 - if (rphy) {
37124 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37125 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37126 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37127 - ioc->name, rphy, rphy->dev.release));
37128 - }
37129 -}
37130 -
37131 static inline struct sas_port *
37132 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37133 {
37134 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37135 index bd096ca..332cf76 100644
37136 --- a/drivers/message/fusion/mptscsih.c
37137 +++ b/drivers/message/fusion/mptscsih.c
37138 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37139
37140 h = shost_priv(SChost);
37141
37142 - if (h) {
37143 - if (h->info_kbuf == NULL)
37144 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37145 - return h->info_kbuf;
37146 - h->info_kbuf[0] = '\0';
37147 + if (!h)
37148 + return NULL;
37149
37150 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37151 - h->info_kbuf[size-1] = '\0';
37152 - }
37153 + if (h->info_kbuf == NULL)
37154 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37155 + return h->info_kbuf;
37156 + h->info_kbuf[0] = '\0';
37157 +
37158 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37159 + h->info_kbuf[size-1] = '\0';
37160
37161 return h->info_kbuf;
37162 }
37163 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37164 index efba702..59b2c0f 100644
37165 --- a/drivers/message/i2o/i2o_config.c
37166 +++ b/drivers/message/i2o/i2o_config.c
37167 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37168 struct i2o_message *msg;
37169 unsigned int iop;
37170
37171 + pax_track_stack();
37172 +
37173 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37174 return -EFAULT;
37175
37176 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37177 index 7045c45..c07b170 100644
37178 --- a/drivers/message/i2o/i2o_proc.c
37179 +++ b/drivers/message/i2o/i2o_proc.c
37180 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37181 "Array Controller Device"
37182 };
37183
37184 -static char *chtostr(u8 * chars, int n)
37185 -{
37186 - char tmp[256];
37187 - tmp[0] = 0;
37188 - return strncat(tmp, (char *)chars, n);
37189 -}
37190 -
37191 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37192 char *group)
37193 {
37194 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37195
37196 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37197 seq_printf(seq, "%-#8x", ddm_table.module_id);
37198 - seq_printf(seq, "%-29s",
37199 - chtostr(ddm_table.module_name_version, 28));
37200 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37201 seq_printf(seq, "%9d ", ddm_table.data_size);
37202 seq_printf(seq, "%8d", ddm_table.code_size);
37203
37204 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37205
37206 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37207 seq_printf(seq, "%-#8x", dst->module_id);
37208 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37209 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37210 + seq_printf(seq, "%-.28s", dst->module_name_version);
37211 + seq_printf(seq, "%-.8s", dst->date);
37212 seq_printf(seq, "%8d ", dst->module_size);
37213 seq_printf(seq, "%8d ", dst->mpb_size);
37214 seq_printf(seq, "0x%04x", dst->module_flags);
37215 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37216 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37217 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37218 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37219 - seq_printf(seq, "Vendor info : %s\n",
37220 - chtostr((u8 *) (work32 + 2), 16));
37221 - seq_printf(seq, "Product info : %s\n",
37222 - chtostr((u8 *) (work32 + 6), 16));
37223 - seq_printf(seq, "Description : %s\n",
37224 - chtostr((u8 *) (work32 + 10), 16));
37225 - seq_printf(seq, "Product rev. : %s\n",
37226 - chtostr((u8 *) (work32 + 14), 8));
37227 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37228 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37229 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37230 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37231
37232 seq_printf(seq, "Serial number : ");
37233 print_serial_number(seq, (u8 *) (work32 + 16),
37234 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37235 }
37236
37237 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37238 - seq_printf(seq, "Module name : %s\n",
37239 - chtostr(result.module_name, 24));
37240 - seq_printf(seq, "Module revision : %s\n",
37241 - chtostr(result.module_rev, 8));
37242 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
37243 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37244
37245 seq_printf(seq, "Serial number : ");
37246 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37247 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37248 return 0;
37249 }
37250
37251 - seq_printf(seq, "Device name : %s\n",
37252 - chtostr(result.device_name, 64));
37253 - seq_printf(seq, "Service name : %s\n",
37254 - chtostr(result.service_name, 64));
37255 - seq_printf(seq, "Physical name : %s\n",
37256 - chtostr(result.physical_location, 64));
37257 - seq_printf(seq, "Instance number : %s\n",
37258 - chtostr(result.instance_number, 4));
37259 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
37260 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
37261 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37262 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37263
37264 return 0;
37265 }
37266 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37267 index 27cf4af..b1205b8 100644
37268 --- a/drivers/message/i2o/iop.c
37269 +++ b/drivers/message/i2o/iop.c
37270 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37271
37272 spin_lock_irqsave(&c->context_list_lock, flags);
37273
37274 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37275 - atomic_inc(&c->context_list_counter);
37276 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37277 + atomic_inc_unchecked(&c->context_list_counter);
37278
37279 - entry->context = atomic_read(&c->context_list_counter);
37280 + entry->context = atomic_read_unchecked(&c->context_list_counter);
37281
37282 list_add(&entry->list, &c->context_list);
37283
37284 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37285
37286 #if BITS_PER_LONG == 64
37287 spin_lock_init(&c->context_list_lock);
37288 - atomic_set(&c->context_list_counter, 0);
37289 + atomic_set_unchecked(&c->context_list_counter, 0);
37290 INIT_LIST_HEAD(&c->context_list);
37291 #endif
37292
37293 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37294 index 78e3e85..66c9a0d 100644
37295 --- a/drivers/mfd/ab3100-core.c
37296 +++ b/drivers/mfd/ab3100-core.c
37297 @@ -777,7 +777,7 @@ struct ab_family_id {
37298 char *name;
37299 };
37300
37301 -static const struct ab_family_id ids[] __initdata = {
37302 +static const struct ab_family_id ids[] __initconst = {
37303 /* AB3100 */
37304 {
37305 .id = 0xc0,
37306 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37307 index 8d8c932..8104515 100644
37308 --- a/drivers/mfd/wm8350-i2c.c
37309 +++ b/drivers/mfd/wm8350-i2c.c
37310 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37311 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37312 int ret;
37313
37314 + pax_track_stack();
37315 +
37316 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37317 return -EINVAL;
37318
37319 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37320 index e4ff50b..4cc3f04 100644
37321 --- a/drivers/misc/kgdbts.c
37322 +++ b/drivers/misc/kgdbts.c
37323 @@ -118,7 +118,7 @@
37324 } while (0)
37325 #define MAX_CONFIG_LEN 40
37326
37327 -static struct kgdb_io kgdbts_io_ops;
37328 +static const struct kgdb_io kgdbts_io_ops;
37329 static char get_buf[BUFMAX];
37330 static int get_buf_cnt;
37331 static char put_buf[BUFMAX];
37332 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37333 module_put(THIS_MODULE);
37334 }
37335
37336 -static struct kgdb_io kgdbts_io_ops = {
37337 +static const struct kgdb_io kgdbts_io_ops = {
37338 .name = "kgdbts",
37339 .read_char = kgdbts_get_char,
37340 .write_char = kgdbts_put_char,
37341 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37342 index 37e7cfc..67cfb76 100644
37343 --- a/drivers/misc/sgi-gru/gruhandles.c
37344 +++ b/drivers/misc/sgi-gru/gruhandles.c
37345 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37346
37347 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37348 {
37349 - atomic_long_inc(&mcs_op_statistics[op].count);
37350 - atomic_long_add(clks, &mcs_op_statistics[op].total);
37351 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37352 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37353 if (mcs_op_statistics[op].max < clks)
37354 mcs_op_statistics[op].max = clks;
37355 }
37356 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37357 index 3f2375c..467c6e6 100644
37358 --- a/drivers/misc/sgi-gru/gruprocfs.c
37359 +++ b/drivers/misc/sgi-gru/gruprocfs.c
37360 @@ -32,9 +32,9 @@
37361
37362 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37363
37364 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37365 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37366 {
37367 - unsigned long val = atomic_long_read(v);
37368 + unsigned long val = atomic_long_read_unchecked(v);
37369
37370 if (val)
37371 seq_printf(s, "%16lu %s\n", val, id);
37372 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37373 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37374
37375 for (op = 0; op < mcsop_last; op++) {
37376 - count = atomic_long_read(&mcs_op_statistics[op].count);
37377 - total = atomic_long_read(&mcs_op_statistics[op].total);
37378 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37379 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37380 max = mcs_op_statistics[op].max;
37381 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37382 count ? total / count : 0, max);
37383 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37384 index 46990bc..4a251b5 100644
37385 --- a/drivers/misc/sgi-gru/grutables.h
37386 +++ b/drivers/misc/sgi-gru/grutables.h
37387 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37388 * GRU statistics.
37389 */
37390 struct gru_stats_s {
37391 - atomic_long_t vdata_alloc;
37392 - atomic_long_t vdata_free;
37393 - atomic_long_t gts_alloc;
37394 - atomic_long_t gts_free;
37395 - atomic_long_t vdata_double_alloc;
37396 - atomic_long_t gts_double_allocate;
37397 - atomic_long_t assign_context;
37398 - atomic_long_t assign_context_failed;
37399 - atomic_long_t free_context;
37400 - atomic_long_t load_user_context;
37401 - atomic_long_t load_kernel_context;
37402 - atomic_long_t lock_kernel_context;
37403 - atomic_long_t unlock_kernel_context;
37404 - atomic_long_t steal_user_context;
37405 - atomic_long_t steal_kernel_context;
37406 - atomic_long_t steal_context_failed;
37407 - atomic_long_t nopfn;
37408 - atomic_long_t break_cow;
37409 - atomic_long_t asid_new;
37410 - atomic_long_t asid_next;
37411 - atomic_long_t asid_wrap;
37412 - atomic_long_t asid_reuse;
37413 - atomic_long_t intr;
37414 - atomic_long_t intr_mm_lock_failed;
37415 - atomic_long_t call_os;
37416 - atomic_long_t call_os_offnode_reference;
37417 - atomic_long_t call_os_check_for_bug;
37418 - atomic_long_t call_os_wait_queue;
37419 - atomic_long_t user_flush_tlb;
37420 - atomic_long_t user_unload_context;
37421 - atomic_long_t user_exception;
37422 - atomic_long_t set_context_option;
37423 - atomic_long_t migrate_check;
37424 - atomic_long_t migrated_retarget;
37425 - atomic_long_t migrated_unload;
37426 - atomic_long_t migrated_unload_delay;
37427 - atomic_long_t migrated_nopfn_retarget;
37428 - atomic_long_t migrated_nopfn_unload;
37429 - atomic_long_t tlb_dropin;
37430 - atomic_long_t tlb_dropin_fail_no_asid;
37431 - atomic_long_t tlb_dropin_fail_upm;
37432 - atomic_long_t tlb_dropin_fail_invalid;
37433 - atomic_long_t tlb_dropin_fail_range_active;
37434 - atomic_long_t tlb_dropin_fail_idle;
37435 - atomic_long_t tlb_dropin_fail_fmm;
37436 - atomic_long_t tlb_dropin_fail_no_exception;
37437 - atomic_long_t tlb_dropin_fail_no_exception_war;
37438 - atomic_long_t tfh_stale_on_fault;
37439 - atomic_long_t mmu_invalidate_range;
37440 - atomic_long_t mmu_invalidate_page;
37441 - atomic_long_t mmu_clear_flush_young;
37442 - atomic_long_t flush_tlb;
37443 - atomic_long_t flush_tlb_gru;
37444 - atomic_long_t flush_tlb_gru_tgh;
37445 - atomic_long_t flush_tlb_gru_zero_asid;
37446 + atomic_long_unchecked_t vdata_alloc;
37447 + atomic_long_unchecked_t vdata_free;
37448 + atomic_long_unchecked_t gts_alloc;
37449 + atomic_long_unchecked_t gts_free;
37450 + atomic_long_unchecked_t vdata_double_alloc;
37451 + atomic_long_unchecked_t gts_double_allocate;
37452 + atomic_long_unchecked_t assign_context;
37453 + atomic_long_unchecked_t assign_context_failed;
37454 + atomic_long_unchecked_t free_context;
37455 + atomic_long_unchecked_t load_user_context;
37456 + atomic_long_unchecked_t load_kernel_context;
37457 + atomic_long_unchecked_t lock_kernel_context;
37458 + atomic_long_unchecked_t unlock_kernel_context;
37459 + atomic_long_unchecked_t steal_user_context;
37460 + atomic_long_unchecked_t steal_kernel_context;
37461 + atomic_long_unchecked_t steal_context_failed;
37462 + atomic_long_unchecked_t nopfn;
37463 + atomic_long_unchecked_t break_cow;
37464 + atomic_long_unchecked_t asid_new;
37465 + atomic_long_unchecked_t asid_next;
37466 + atomic_long_unchecked_t asid_wrap;
37467 + atomic_long_unchecked_t asid_reuse;
37468 + atomic_long_unchecked_t intr;
37469 + atomic_long_unchecked_t intr_mm_lock_failed;
37470 + atomic_long_unchecked_t call_os;
37471 + atomic_long_unchecked_t call_os_offnode_reference;
37472 + atomic_long_unchecked_t call_os_check_for_bug;
37473 + atomic_long_unchecked_t call_os_wait_queue;
37474 + atomic_long_unchecked_t user_flush_tlb;
37475 + atomic_long_unchecked_t user_unload_context;
37476 + atomic_long_unchecked_t user_exception;
37477 + atomic_long_unchecked_t set_context_option;
37478 + atomic_long_unchecked_t migrate_check;
37479 + atomic_long_unchecked_t migrated_retarget;
37480 + atomic_long_unchecked_t migrated_unload;
37481 + atomic_long_unchecked_t migrated_unload_delay;
37482 + atomic_long_unchecked_t migrated_nopfn_retarget;
37483 + atomic_long_unchecked_t migrated_nopfn_unload;
37484 + atomic_long_unchecked_t tlb_dropin;
37485 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37486 + atomic_long_unchecked_t tlb_dropin_fail_upm;
37487 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
37488 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
37489 + atomic_long_unchecked_t tlb_dropin_fail_idle;
37490 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
37491 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37492 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37493 + atomic_long_unchecked_t tfh_stale_on_fault;
37494 + atomic_long_unchecked_t mmu_invalidate_range;
37495 + atomic_long_unchecked_t mmu_invalidate_page;
37496 + atomic_long_unchecked_t mmu_clear_flush_young;
37497 + atomic_long_unchecked_t flush_tlb;
37498 + atomic_long_unchecked_t flush_tlb_gru;
37499 + atomic_long_unchecked_t flush_tlb_gru_tgh;
37500 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37501
37502 - atomic_long_t copy_gpa;
37503 + atomic_long_unchecked_t copy_gpa;
37504
37505 - atomic_long_t mesq_receive;
37506 - atomic_long_t mesq_receive_none;
37507 - atomic_long_t mesq_send;
37508 - atomic_long_t mesq_send_failed;
37509 - atomic_long_t mesq_noop;
37510 - atomic_long_t mesq_send_unexpected_error;
37511 - atomic_long_t mesq_send_lb_overflow;
37512 - atomic_long_t mesq_send_qlimit_reached;
37513 - atomic_long_t mesq_send_amo_nacked;
37514 - atomic_long_t mesq_send_put_nacked;
37515 - atomic_long_t mesq_qf_not_full;
37516 - atomic_long_t mesq_qf_locked;
37517 - atomic_long_t mesq_qf_noop_not_full;
37518 - atomic_long_t mesq_qf_switch_head_failed;
37519 - atomic_long_t mesq_qf_unexpected_error;
37520 - atomic_long_t mesq_noop_unexpected_error;
37521 - atomic_long_t mesq_noop_lb_overflow;
37522 - atomic_long_t mesq_noop_qlimit_reached;
37523 - atomic_long_t mesq_noop_amo_nacked;
37524 - atomic_long_t mesq_noop_put_nacked;
37525 + atomic_long_unchecked_t mesq_receive;
37526 + atomic_long_unchecked_t mesq_receive_none;
37527 + atomic_long_unchecked_t mesq_send;
37528 + atomic_long_unchecked_t mesq_send_failed;
37529 + atomic_long_unchecked_t mesq_noop;
37530 + atomic_long_unchecked_t mesq_send_unexpected_error;
37531 + atomic_long_unchecked_t mesq_send_lb_overflow;
37532 + atomic_long_unchecked_t mesq_send_qlimit_reached;
37533 + atomic_long_unchecked_t mesq_send_amo_nacked;
37534 + atomic_long_unchecked_t mesq_send_put_nacked;
37535 + atomic_long_unchecked_t mesq_qf_not_full;
37536 + atomic_long_unchecked_t mesq_qf_locked;
37537 + atomic_long_unchecked_t mesq_qf_noop_not_full;
37538 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
37539 + atomic_long_unchecked_t mesq_qf_unexpected_error;
37540 + atomic_long_unchecked_t mesq_noop_unexpected_error;
37541 + atomic_long_unchecked_t mesq_noop_lb_overflow;
37542 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
37543 + atomic_long_unchecked_t mesq_noop_amo_nacked;
37544 + atomic_long_unchecked_t mesq_noop_put_nacked;
37545
37546 };
37547
37548 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37549 cchop_deallocate, tghop_invalidate, mcsop_last};
37550
37551 struct mcs_op_statistic {
37552 - atomic_long_t count;
37553 - atomic_long_t total;
37554 + atomic_long_unchecked_t count;
37555 + atomic_long_unchecked_t total;
37556 unsigned long max;
37557 };
37558
37559 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37560
37561 #define STAT(id) do { \
37562 if (gru_options & OPT_STATS) \
37563 - atomic_long_inc(&gru_stats.id); \
37564 + atomic_long_inc_unchecked(&gru_stats.id); \
37565 } while (0)
37566
37567 #ifdef CONFIG_SGI_GRU_DEBUG
37568 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37569 index 2275126..12a9dbfb 100644
37570 --- a/drivers/misc/sgi-xp/xp.h
37571 +++ b/drivers/misc/sgi-xp/xp.h
37572 @@ -289,7 +289,7 @@ struct xpc_interface {
37573 xpc_notify_func, void *);
37574 void (*received) (short, int, void *);
37575 enum xp_retval (*partid_to_nasids) (short, void *);
37576 -};
37577 +} __no_const;
37578
37579 extern struct xpc_interface xpc_interface;
37580
37581 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37582 index b94d5f7..7f494c5 100644
37583 --- a/drivers/misc/sgi-xp/xpc.h
37584 +++ b/drivers/misc/sgi-xp/xpc.h
37585 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
37586 void (*received_payload) (struct xpc_channel *, void *);
37587 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37588 };
37589 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37590
37591 /* struct xpc_partition act_state values (for XPC HB) */
37592
37593 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37594 /* found in xpc_main.c */
37595 extern struct device *xpc_part;
37596 extern struct device *xpc_chan;
37597 -extern struct xpc_arch_operations xpc_arch_ops;
37598 +extern xpc_arch_operations_no_const xpc_arch_ops;
37599 extern int xpc_disengage_timelimit;
37600 extern int xpc_disengage_timedout;
37601 extern int xpc_activate_IRQ_rcvd;
37602 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37603 index fd3688a..7e211a4 100644
37604 --- a/drivers/misc/sgi-xp/xpc_main.c
37605 +++ b/drivers/misc/sgi-xp/xpc_main.c
37606 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37607 .notifier_call = xpc_system_die,
37608 };
37609
37610 -struct xpc_arch_operations xpc_arch_ops;
37611 +xpc_arch_operations_no_const xpc_arch_ops;
37612
37613 /*
37614 * Timer function to enforce the timelimit on the partition disengage.
37615 diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37616 index 8b70e03..700bda6 100644
37617 --- a/drivers/misc/sgi-xp/xpc_sn2.c
37618 +++ b/drivers/misc/sgi-xp/xpc_sn2.c
37619 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37620 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37621 }
37622
37623 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37624 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37625 .setup_partitions = xpc_setup_partitions_sn2,
37626 .teardown_partitions = xpc_teardown_partitions_sn2,
37627 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37628 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37629 int ret;
37630 size_t buf_size;
37631
37632 - xpc_arch_ops = xpc_arch_ops_sn2;
37633 + pax_open_kernel();
37634 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37635 + pax_close_kernel();
37636
37637 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37638 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37639 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37640 index 8e08d71..7cb8c9b 100644
37641 --- a/drivers/misc/sgi-xp/xpc_uv.c
37642 +++ b/drivers/misc/sgi-xp/xpc_uv.c
37643 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37644 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37645 }
37646
37647 -static struct xpc_arch_operations xpc_arch_ops_uv = {
37648 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
37649 .setup_partitions = xpc_setup_partitions_uv,
37650 .teardown_partitions = xpc_teardown_partitions_uv,
37651 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37652 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37653 int
37654 xpc_init_uv(void)
37655 {
37656 - xpc_arch_ops = xpc_arch_ops_uv;
37657 + pax_open_kernel();
37658 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37659 + pax_close_kernel();
37660
37661 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37662 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37663 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37664 index 6fd20b42..650efe3 100644
37665 --- a/drivers/mmc/host/sdhci-pci.c
37666 +++ b/drivers/mmc/host/sdhci-pci.c
37667 @@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37668 .probe = via_probe,
37669 };
37670
37671 -static const struct pci_device_id pci_ids[] __devinitdata = {
37672 +static const struct pci_device_id pci_ids[] __devinitconst = {
37673 {
37674 .vendor = PCI_VENDOR_ID_RICOH,
37675 .device = PCI_DEVICE_ID_RICOH_R5C822,
37676 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37677 index e7563a9..5f90ce5 100644
37678 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
37679 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37680 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37681 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37682 unsigned long timeo = jiffies + HZ;
37683
37684 + pax_track_stack();
37685 +
37686 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37687 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37688 goto sleep;
37689 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37690 unsigned long initial_adr;
37691 int initial_len = len;
37692
37693 + pax_track_stack();
37694 +
37695 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37696 adr += chip->start;
37697 initial_adr = adr;
37698 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37699 int retries = 3;
37700 int ret;
37701
37702 + pax_track_stack();
37703 +
37704 adr += chip->start;
37705
37706 retry:
37707 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37708 index 0667a67..3ab97ed 100644
37709 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
37710 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37711 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37712 unsigned long cmd_addr;
37713 struct cfi_private *cfi = map->fldrv_priv;
37714
37715 + pax_track_stack();
37716 +
37717 adr += chip->start;
37718
37719 /* Ensure cmd read/writes are aligned. */
37720 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37721 DECLARE_WAITQUEUE(wait, current);
37722 int wbufsize, z;
37723
37724 + pax_track_stack();
37725 +
37726 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37727 if (adr & (map_bankwidth(map)-1))
37728 return -EINVAL;
37729 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37730 DECLARE_WAITQUEUE(wait, current);
37731 int ret = 0;
37732
37733 + pax_track_stack();
37734 +
37735 adr += chip->start;
37736
37737 /* Let's determine this according to the interleave only once */
37738 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37739 unsigned long timeo = jiffies + HZ;
37740 DECLARE_WAITQUEUE(wait, current);
37741
37742 + pax_track_stack();
37743 +
37744 adr += chip->start;
37745
37746 /* Let's determine this according to the interleave only once */
37747 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37748 unsigned long timeo = jiffies + HZ;
37749 DECLARE_WAITQUEUE(wait, current);
37750
37751 + pax_track_stack();
37752 +
37753 adr += chip->start;
37754
37755 /* Let's determine this according to the interleave only once */
37756 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37757 index 5bf5f46..c5de373 100644
37758 --- a/drivers/mtd/devices/doc2000.c
37759 +++ b/drivers/mtd/devices/doc2000.c
37760 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37761
37762 /* The ECC will not be calculated correctly if less than 512 is written */
37763 /* DBB-
37764 - if (len != 0x200 && eccbuf)
37765 + if (len != 0x200)
37766 printk(KERN_WARNING
37767 "ECC needs a full sector write (adr: %lx size %lx)\n",
37768 (long) to, (long) len);
37769 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37770 index 0990f78..bb4e8a4 100644
37771 --- a/drivers/mtd/devices/doc2001.c
37772 +++ b/drivers/mtd/devices/doc2001.c
37773 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37774 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37775
37776 /* Don't allow read past end of device */
37777 - if (from >= this->totlen)
37778 + if (from >= this->totlen || !len)
37779 return -EINVAL;
37780
37781 /* Don't allow a single read to cross a 512-byte block boundary */
37782 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37783 index e56d6b4..f07e6cf 100644
37784 --- a/drivers/mtd/ftl.c
37785 +++ b/drivers/mtd/ftl.c
37786 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37787 loff_t offset;
37788 uint16_t srcunitswap = cpu_to_le16(srcunit);
37789
37790 + pax_track_stack();
37791 +
37792 eun = &part->EUNInfo[srcunit];
37793 xfer = &part->XferInfo[xferunit];
37794 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37795 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37796 index 8aca552..146446e 100755
37797 --- a/drivers/mtd/inftlcore.c
37798 +++ b/drivers/mtd/inftlcore.c
37799 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37800 struct inftl_oob oob;
37801 size_t retlen;
37802
37803 + pax_track_stack();
37804 +
37805 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37806 "pending=%d)\n", inftl, thisVUC, pendingblock);
37807
37808 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37809 index 32e82ae..ed50953 100644
37810 --- a/drivers/mtd/inftlmount.c
37811 +++ b/drivers/mtd/inftlmount.c
37812 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37813 struct INFTLPartition *ip;
37814 size_t retlen;
37815
37816 + pax_track_stack();
37817 +
37818 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37819
37820 /*
37821 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37822 index 79bf40f..fe5f8fd 100644
37823 --- a/drivers/mtd/lpddr/qinfo_probe.c
37824 +++ b/drivers/mtd/lpddr/qinfo_probe.c
37825 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37826 {
37827 map_word pfow_val[4];
37828
37829 + pax_track_stack();
37830 +
37831 /* Check identification string */
37832 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37833 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37834 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37835 index 726a1b8..f46b460 100644
37836 --- a/drivers/mtd/mtdchar.c
37837 +++ b/drivers/mtd/mtdchar.c
37838 @@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37839 u_long size;
37840 struct mtd_info_user info;
37841
37842 + pax_track_stack();
37843 +
37844 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37845
37846 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37847 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37848 index 1002e18..26d82d5 100644
37849 --- a/drivers/mtd/nftlcore.c
37850 +++ b/drivers/mtd/nftlcore.c
37851 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37852 int inplace = 1;
37853 size_t retlen;
37854
37855 + pax_track_stack();
37856 +
37857 memset(BlockMap, 0xff, sizeof(BlockMap));
37858 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37859
37860 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37861 index 8b22b18..6fada85 100644
37862 --- a/drivers/mtd/nftlmount.c
37863 +++ b/drivers/mtd/nftlmount.c
37864 @@ -23,6 +23,7 @@
37865 #include <asm/errno.h>
37866 #include <linux/delay.h>
37867 #include <linux/slab.h>
37868 +#include <linux/sched.h>
37869 #include <linux/mtd/mtd.h>
37870 #include <linux/mtd/nand.h>
37871 #include <linux/mtd/nftl.h>
37872 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37873 struct mtd_info *mtd = nftl->mbd.mtd;
37874 unsigned int i;
37875
37876 + pax_track_stack();
37877 +
37878 /* Assume logical EraseSize == physical erasesize for starting the scan.
37879 We'll sort it out later if we find a MediaHeader which says otherwise */
37880 /* Actually, we won't. The new DiskOnChip driver has already scanned
37881 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
37882 index 14cec04..d775b87 100644
37883 --- a/drivers/mtd/ubi/build.c
37884 +++ b/drivers/mtd/ubi/build.c
37885 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
37886 static int __init bytes_str_to_int(const char *str)
37887 {
37888 char *endp;
37889 - unsigned long result;
37890 + unsigned long result, scale = 1;
37891
37892 result = simple_strtoul(str, &endp, 0);
37893 if (str == endp || result >= INT_MAX) {
37894 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
37895
37896 switch (*endp) {
37897 case 'G':
37898 - result *= 1024;
37899 + scale *= 1024;
37900 case 'M':
37901 - result *= 1024;
37902 + scale *= 1024;
37903 case 'K':
37904 - result *= 1024;
37905 + scale *= 1024;
37906 if (endp[1] == 'i' && endp[2] == 'B')
37907 endp += 2;
37908 case '\0':
37909 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
37910 return -EINVAL;
37911 }
37912
37913 - return result;
37914 + if ((intoverflow_t)result*scale >= INT_MAX) {
37915 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
37916 + str);
37917 + return -EINVAL;
37918 + }
37919 +
37920 + return result*scale;
37921 }
37922
37923 /**
37924 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
37925 index ab68886..ca405e8 100644
37926 --- a/drivers/net/atlx/atl2.c
37927 +++ b/drivers/net/atlx/atl2.c
37928 @@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
37929 */
37930
37931 #define ATL2_PARAM(X, desc) \
37932 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37933 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37934 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
37935 MODULE_PARM_DESC(X, desc);
37936 #else
37937 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
37938 index 4874b2b..67f8526 100644
37939 --- a/drivers/net/bnx2.c
37940 +++ b/drivers/net/bnx2.c
37941 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
37942 int rc = 0;
37943 u32 magic, csum;
37944
37945 + pax_track_stack();
37946 +
37947 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
37948 goto test_nvram_done;
37949
37950 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
37951 index fd3eb07..8a6978d 100644
37952 --- a/drivers/net/cxgb3/l2t.h
37953 +++ b/drivers/net/cxgb3/l2t.h
37954 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
37955 */
37956 struct l2t_skb_cb {
37957 arp_failure_handler_func arp_failure_handler;
37958 -};
37959 +} __no_const;
37960
37961 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
37962
37963 diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
37964 index 032cfe0..411af379 100644
37965 --- a/drivers/net/cxgb3/t3_hw.c
37966 +++ b/drivers/net/cxgb3/t3_hw.c
37967 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
37968 int i, addr, ret;
37969 struct t3_vpd vpd;
37970
37971 + pax_track_stack();
37972 +
37973 /*
37974 * Card information is normally at VPD_BASE but some early cards had
37975 * it at 0.
37976 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
37977 index d1e0563..b9e129c 100644
37978 --- a/drivers/net/e1000e/82571.c
37979 +++ b/drivers/net/e1000e/82571.c
37980 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
37981 {
37982 struct e1000_hw *hw = &adapter->hw;
37983 struct e1000_mac_info *mac = &hw->mac;
37984 - struct e1000_mac_operations *func = &mac->ops;
37985 + e1000_mac_operations_no_const *func = &mac->ops;
37986 u32 swsm = 0;
37987 u32 swsm2 = 0;
37988 bool force_clear_smbi = false;
37989 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
37990 temp = er32(ICRXDMTC);
37991 }
37992
37993 -static struct e1000_mac_operations e82571_mac_ops = {
37994 +static const struct e1000_mac_operations e82571_mac_ops = {
37995 /* .check_mng_mode: mac type dependent */
37996 /* .check_for_link: media type dependent */
37997 .id_led_init = e1000e_id_led_init,
37998 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
37999 .setup_led = e1000e_setup_led_generic,
38000 };
38001
38002 -static struct e1000_phy_operations e82_phy_ops_igp = {
38003 +static const struct e1000_phy_operations e82_phy_ops_igp = {
38004 .acquire_phy = e1000_get_hw_semaphore_82571,
38005 .check_reset_block = e1000e_check_reset_block_generic,
38006 .commit_phy = NULL,
38007 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
38008 .cfg_on_link_up = NULL,
38009 };
38010
38011 -static struct e1000_phy_operations e82_phy_ops_m88 = {
38012 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
38013 .acquire_phy = e1000_get_hw_semaphore_82571,
38014 .check_reset_block = e1000e_check_reset_block_generic,
38015 .commit_phy = e1000e_phy_sw_reset,
38016 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
38017 .cfg_on_link_up = NULL,
38018 };
38019
38020 -static struct e1000_phy_operations e82_phy_ops_bm = {
38021 +static const struct e1000_phy_operations e82_phy_ops_bm = {
38022 .acquire_phy = e1000_get_hw_semaphore_82571,
38023 .check_reset_block = e1000e_check_reset_block_generic,
38024 .commit_phy = e1000e_phy_sw_reset,
38025 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
38026 .cfg_on_link_up = NULL,
38027 };
38028
38029 -static struct e1000_nvm_operations e82571_nvm_ops = {
38030 +static const struct e1000_nvm_operations e82571_nvm_ops = {
38031 .acquire_nvm = e1000_acquire_nvm_82571,
38032 .read_nvm = e1000e_read_nvm_eerd,
38033 .release_nvm = e1000_release_nvm_82571,
38034 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
38035 index 47db9bd..fa58ccd 100644
38036 --- a/drivers/net/e1000e/e1000.h
38037 +++ b/drivers/net/e1000e/e1000.h
38038 @@ -375,9 +375,9 @@ struct e1000_info {
38039 u32 pba;
38040 u32 max_hw_frame_size;
38041 s32 (*get_variants)(struct e1000_adapter *);
38042 - struct e1000_mac_operations *mac_ops;
38043 - struct e1000_phy_operations *phy_ops;
38044 - struct e1000_nvm_operations *nvm_ops;
38045 + const struct e1000_mac_operations *mac_ops;
38046 + const struct e1000_phy_operations *phy_ops;
38047 + const struct e1000_nvm_operations *nvm_ops;
38048 };
38049
38050 /* hardware capability, feature, and workaround flags */
38051 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
38052 index ae5d736..e9a93a1 100644
38053 --- a/drivers/net/e1000e/es2lan.c
38054 +++ b/drivers/net/e1000e/es2lan.c
38055 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
38056 {
38057 struct e1000_hw *hw = &adapter->hw;
38058 struct e1000_mac_info *mac = &hw->mac;
38059 - struct e1000_mac_operations *func = &mac->ops;
38060 + e1000_mac_operations_no_const *func = &mac->ops;
38061
38062 /* Set media type */
38063 switch (adapter->pdev->device) {
38064 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
38065 temp = er32(ICRXDMTC);
38066 }
38067
38068 -static struct e1000_mac_operations es2_mac_ops = {
38069 +static const struct e1000_mac_operations es2_mac_ops = {
38070 .id_led_init = e1000e_id_led_init,
38071 .check_mng_mode = e1000e_check_mng_mode_generic,
38072 /* check_for_link dependent on media type */
38073 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
38074 .setup_led = e1000e_setup_led_generic,
38075 };
38076
38077 -static struct e1000_phy_operations es2_phy_ops = {
38078 +static const struct e1000_phy_operations es2_phy_ops = {
38079 .acquire_phy = e1000_acquire_phy_80003es2lan,
38080 .check_reset_block = e1000e_check_reset_block_generic,
38081 .commit_phy = e1000e_phy_sw_reset,
38082 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
38083 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
38084 };
38085
38086 -static struct e1000_nvm_operations es2_nvm_ops = {
38087 +static const struct e1000_nvm_operations es2_nvm_ops = {
38088 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
38089 .read_nvm = e1000e_read_nvm_eerd,
38090 .release_nvm = e1000_release_nvm_80003es2lan,
38091 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
38092 index 11f3b7c..6381887 100644
38093 --- a/drivers/net/e1000e/hw.h
38094 +++ b/drivers/net/e1000e/hw.h
38095 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
38096 s32 (*setup_physical_interface)(struct e1000_hw *);
38097 s32 (*setup_led)(struct e1000_hw *);
38098 };
38099 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38100
38101 /* Function pointers for the PHY. */
38102 struct e1000_phy_operations {
38103 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
38104 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
38105 s32 (*cfg_on_link_up)(struct e1000_hw *);
38106 };
38107 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38108
38109 /* Function pointers for the NVM. */
38110 struct e1000_nvm_operations {
38111 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
38112 s32 (*validate_nvm)(struct e1000_hw *);
38113 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
38114 };
38115 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38116
38117 struct e1000_mac_info {
38118 - struct e1000_mac_operations ops;
38119 + e1000_mac_operations_no_const ops;
38120
38121 u8 addr[6];
38122 u8 perm_addr[6];
38123 @@ -823,7 +826,7 @@ struct e1000_mac_info {
38124 };
38125
38126 struct e1000_phy_info {
38127 - struct e1000_phy_operations ops;
38128 + e1000_phy_operations_no_const ops;
38129
38130 enum e1000_phy_type type;
38131
38132 @@ -857,7 +860,7 @@ struct e1000_phy_info {
38133 };
38134
38135 struct e1000_nvm_info {
38136 - struct e1000_nvm_operations ops;
38137 + e1000_nvm_operations_no_const ops;
38138
38139 enum e1000_nvm_type type;
38140 enum e1000_nvm_override override;
38141 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38142 index de39f9a..e28d3e0 100644
38143 --- a/drivers/net/e1000e/ich8lan.c
38144 +++ b/drivers/net/e1000e/ich8lan.c
38145 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38146 }
38147 }
38148
38149 -static struct e1000_mac_operations ich8_mac_ops = {
38150 +static const struct e1000_mac_operations ich8_mac_ops = {
38151 .id_led_init = e1000e_id_led_init,
38152 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38153 .check_for_link = e1000_check_for_copper_link_ich8lan,
38154 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38155 /* id_led_init dependent on mac type */
38156 };
38157
38158 -static struct e1000_phy_operations ich8_phy_ops = {
38159 +static const struct e1000_phy_operations ich8_phy_ops = {
38160 .acquire_phy = e1000_acquire_swflag_ich8lan,
38161 .check_reset_block = e1000_check_reset_block_ich8lan,
38162 .commit_phy = NULL,
38163 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38164 .write_phy_reg = e1000e_write_phy_reg_igp,
38165 };
38166
38167 -static struct e1000_nvm_operations ich8_nvm_ops = {
38168 +static const struct e1000_nvm_operations ich8_nvm_ops = {
38169 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38170 .read_nvm = e1000_read_nvm_ich8lan,
38171 .release_nvm = e1000_release_nvm_ich8lan,
38172 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38173 index 18d5fbb..542d96d 100644
38174 --- a/drivers/net/fealnx.c
38175 +++ b/drivers/net/fealnx.c
38176 @@ -151,7 +151,7 @@ struct chip_info {
38177 int flags;
38178 };
38179
38180 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38181 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38182 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38183 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38184 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38185 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38186 index 0e5b54b..b503f82 100644
38187 --- a/drivers/net/hamradio/6pack.c
38188 +++ b/drivers/net/hamradio/6pack.c
38189 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38190 unsigned char buf[512];
38191 int count1;
38192
38193 + pax_track_stack();
38194 +
38195 if (!count)
38196 return;
38197
38198 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38199 index 5862282..7cce8cb 100644
38200 --- a/drivers/net/ibmveth.c
38201 +++ b/drivers/net/ibmveth.c
38202 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38203 NULL,
38204 };
38205
38206 -static struct sysfs_ops veth_pool_ops = {
38207 +static const struct sysfs_ops veth_pool_ops = {
38208 .show = veth_pool_show,
38209 .store = veth_pool_store,
38210 };
38211 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38212 index d617f2d..57b5309 100644
38213 --- a/drivers/net/igb/e1000_82575.c
38214 +++ b/drivers/net/igb/e1000_82575.c
38215 @@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38216 wr32(E1000_VT_CTL, vt_ctl);
38217 }
38218
38219 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
38220 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38221 .reset_hw = igb_reset_hw_82575,
38222 .init_hw = igb_init_hw_82575,
38223 .check_for_link = igb_check_for_link_82575,
38224 @@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38225 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38226 };
38227
38228 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
38229 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38230 .acquire = igb_acquire_phy_82575,
38231 .get_cfg_done = igb_get_cfg_done_82575,
38232 .release = igb_release_phy_82575,
38233 };
38234
38235 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38236 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38237 .acquire = igb_acquire_nvm_82575,
38238 .read = igb_read_nvm_eerd,
38239 .release = igb_release_nvm_82575,
38240 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38241 index 72081df..d855cf5 100644
38242 --- a/drivers/net/igb/e1000_hw.h
38243 +++ b/drivers/net/igb/e1000_hw.h
38244 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
38245 s32 (*read_mac_addr)(struct e1000_hw *);
38246 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38247 };
38248 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38249
38250 struct e1000_phy_operations {
38251 s32 (*acquire)(struct e1000_hw *);
38252 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
38253 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38254 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38255 };
38256 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38257
38258 struct e1000_nvm_operations {
38259 s32 (*acquire)(struct e1000_hw *);
38260 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38261 void (*release)(struct e1000_hw *);
38262 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38263 };
38264 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38265
38266 struct e1000_info {
38267 s32 (*get_invariants)(struct e1000_hw *);
38268 @@ -321,7 +324,7 @@ struct e1000_info {
38269 extern const struct e1000_info e1000_82575_info;
38270
38271 struct e1000_mac_info {
38272 - struct e1000_mac_operations ops;
38273 + e1000_mac_operations_no_const ops;
38274
38275 u8 addr[6];
38276 u8 perm_addr[6];
38277 @@ -365,7 +368,7 @@ struct e1000_mac_info {
38278 };
38279
38280 struct e1000_phy_info {
38281 - struct e1000_phy_operations ops;
38282 + e1000_phy_operations_no_const ops;
38283
38284 enum e1000_phy_type type;
38285
38286 @@ -400,7 +403,7 @@ struct e1000_phy_info {
38287 };
38288
38289 struct e1000_nvm_info {
38290 - struct e1000_nvm_operations ops;
38291 + e1000_nvm_operations_no_const ops;
38292
38293 enum e1000_nvm_type type;
38294 enum e1000_nvm_override override;
38295 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38296 s32 (*check_for_ack)(struct e1000_hw *, u16);
38297 s32 (*check_for_rst)(struct e1000_hw *, u16);
38298 };
38299 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38300
38301 struct e1000_mbx_stats {
38302 u32 msgs_tx;
38303 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38304 };
38305
38306 struct e1000_mbx_info {
38307 - struct e1000_mbx_operations ops;
38308 + e1000_mbx_operations_no_const ops;
38309 struct e1000_mbx_stats stats;
38310 u32 timeout;
38311 u32 usec_delay;
38312 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38313 index 1e8ce37..549c453 100644
38314 --- a/drivers/net/igbvf/vf.h
38315 +++ b/drivers/net/igbvf/vf.h
38316 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
38317 s32 (*read_mac_addr)(struct e1000_hw *);
38318 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38319 };
38320 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38321
38322 struct e1000_mac_info {
38323 - struct e1000_mac_operations ops;
38324 + e1000_mac_operations_no_const ops;
38325 u8 addr[6];
38326 u8 perm_addr[6];
38327
38328 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38329 s32 (*check_for_ack)(struct e1000_hw *);
38330 s32 (*check_for_rst)(struct e1000_hw *);
38331 };
38332 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38333
38334 struct e1000_mbx_stats {
38335 u32 msgs_tx;
38336 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38337 };
38338
38339 struct e1000_mbx_info {
38340 - struct e1000_mbx_operations ops;
38341 + e1000_mbx_operations_no_const ops;
38342 struct e1000_mbx_stats stats;
38343 u32 timeout;
38344 u32 usec_delay;
38345 diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38346 index aa7286b..a61394f 100644
38347 --- a/drivers/net/iseries_veth.c
38348 +++ b/drivers/net/iseries_veth.c
38349 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38350 NULL
38351 };
38352
38353 -static struct sysfs_ops veth_cnx_sysfs_ops = {
38354 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
38355 .show = veth_cnx_attribute_show
38356 };
38357
38358 @@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38359 NULL
38360 };
38361
38362 -static struct sysfs_ops veth_port_sysfs_ops = {
38363 +static const struct sysfs_ops veth_port_sysfs_ops = {
38364 .show = veth_port_attribute_show
38365 };
38366
38367 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38368 index 8aa44dc..fa1e797 100644
38369 --- a/drivers/net/ixgb/ixgb_main.c
38370 +++ b/drivers/net/ixgb/ixgb_main.c
38371 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38372 u32 rctl;
38373 int i;
38374
38375 + pax_track_stack();
38376 +
38377 /* Check for Promiscuous and All Multicast modes */
38378
38379 rctl = IXGB_READ_REG(hw, RCTL);
38380 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38381 index af35e1d..8781785 100644
38382 --- a/drivers/net/ixgb/ixgb_param.c
38383 +++ b/drivers/net/ixgb/ixgb_param.c
38384 @@ -260,6 +260,9 @@ void __devinit
38385 ixgb_check_options(struct ixgb_adapter *adapter)
38386 {
38387 int bd = adapter->bd_number;
38388 +
38389 + pax_track_stack();
38390 +
38391 if (bd >= IXGB_MAX_NIC) {
38392 printk(KERN_NOTICE
38393 "Warning: no configuration for board #%i\n", bd);
38394 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38395 index b17aa73..ed74540 100644
38396 --- a/drivers/net/ixgbe/ixgbe_type.h
38397 +++ b/drivers/net/ixgbe/ixgbe_type.h
38398 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38399 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38400 s32 (*update_checksum)(struct ixgbe_hw *);
38401 };
38402 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38403
38404 struct ixgbe_mac_operations {
38405 s32 (*init_hw)(struct ixgbe_hw *);
38406 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38407 /* Flow Control */
38408 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38409 };
38410 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38411
38412 struct ixgbe_phy_operations {
38413 s32 (*identify)(struct ixgbe_hw *);
38414 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38415 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38416 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38417 };
38418 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38419
38420 struct ixgbe_eeprom_info {
38421 - struct ixgbe_eeprom_operations ops;
38422 + ixgbe_eeprom_operations_no_const ops;
38423 enum ixgbe_eeprom_type type;
38424 u32 semaphore_delay;
38425 u16 word_size;
38426 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38427 };
38428
38429 struct ixgbe_mac_info {
38430 - struct ixgbe_mac_operations ops;
38431 + ixgbe_mac_operations_no_const ops;
38432 enum ixgbe_mac_type type;
38433 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38434 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38435 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38436 };
38437
38438 struct ixgbe_phy_info {
38439 - struct ixgbe_phy_operations ops;
38440 + ixgbe_phy_operations_no_const ops;
38441 struct mdio_if_info mdio;
38442 enum ixgbe_phy_type type;
38443 u32 id;
38444 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38445 index 291a505..2543756 100644
38446 --- a/drivers/net/mlx4/main.c
38447 +++ b/drivers/net/mlx4/main.c
38448 @@ -38,6 +38,7 @@
38449 #include <linux/errno.h>
38450 #include <linux/pci.h>
38451 #include <linux/dma-mapping.h>
38452 +#include <linux/sched.h>
38453
38454 #include <linux/mlx4/device.h>
38455 #include <linux/mlx4/doorbell.h>
38456 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38457 u64 icm_size;
38458 int err;
38459
38460 + pax_track_stack();
38461 +
38462 err = mlx4_QUERY_FW(dev);
38463 if (err) {
38464 if (err == -EACCES)
38465 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38466 index 2dce134..fa5ce75 100644
38467 --- a/drivers/net/niu.c
38468 +++ b/drivers/net/niu.c
38469 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38470 int i, num_irqs, err;
38471 u8 first_ldg;
38472
38473 + pax_track_stack();
38474 +
38475 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38476 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38477 ldg_num_map[i] = first_ldg + i;
38478 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38479 index c1b3f09..97cd8c4 100644
38480 --- a/drivers/net/pcnet32.c
38481 +++ b/drivers/net/pcnet32.c
38482 @@ -79,7 +79,7 @@ static int cards_found;
38483 /*
38484 * VLB I/O addresses
38485 */
38486 -static unsigned int pcnet32_portlist[] __initdata =
38487 +static unsigned int pcnet32_portlist[] __devinitdata =
38488 { 0x300, 0x320, 0x340, 0x360, 0 };
38489
38490 static int pcnet32_debug = 0;
38491 @@ -267,7 +267,7 @@ struct pcnet32_private {
38492 struct sk_buff **rx_skbuff;
38493 dma_addr_t *tx_dma_addr;
38494 dma_addr_t *rx_dma_addr;
38495 - struct pcnet32_access a;
38496 + struct pcnet32_access *a;
38497 spinlock_t lock; /* Guard lock */
38498 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38499 unsigned int rx_ring_size; /* current rx ring size */
38500 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38501 u16 val;
38502
38503 netif_wake_queue(dev);
38504 - val = lp->a.read_csr(ioaddr, CSR3);
38505 + val = lp->a->read_csr(ioaddr, CSR3);
38506 val &= 0x00ff;
38507 - lp->a.write_csr(ioaddr, CSR3, val);
38508 + lp->a->write_csr(ioaddr, CSR3, val);
38509 napi_enable(&lp->napi);
38510 }
38511
38512 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38513 r = mii_link_ok(&lp->mii_if);
38514 } else if (lp->chip_version >= PCNET32_79C970A) {
38515 ulong ioaddr = dev->base_addr; /* card base I/O address */
38516 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38517 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38518 } else { /* can not detect link on really old chips */
38519 r = 1;
38520 }
38521 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38522 pcnet32_netif_stop(dev);
38523
38524 spin_lock_irqsave(&lp->lock, flags);
38525 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38526 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38527
38528 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38529
38530 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38531 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38532 {
38533 struct pcnet32_private *lp = netdev_priv(dev);
38534 - struct pcnet32_access *a = &lp->a; /* access to registers */
38535 + struct pcnet32_access *a = lp->a; /* access to registers */
38536 ulong ioaddr = dev->base_addr; /* card base I/O address */
38537 struct sk_buff *skb; /* sk buff */
38538 int x, i; /* counters */
38539 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38540 pcnet32_netif_stop(dev);
38541
38542 spin_lock_irqsave(&lp->lock, flags);
38543 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38544 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38545
38546 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38547
38548 /* Reset the PCNET32 */
38549 - lp->a.reset(ioaddr);
38550 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38551 + lp->a->reset(ioaddr);
38552 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38553
38554 /* switch pcnet32 to 32bit mode */
38555 - lp->a.write_bcr(ioaddr, 20, 2);
38556 + lp->a->write_bcr(ioaddr, 20, 2);
38557
38558 /* purge & init rings but don't actually restart */
38559 pcnet32_restart(dev, 0x0000);
38560
38561 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38562 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38563
38564 /* Initialize Transmit buffers. */
38565 size = data_len + 15;
38566 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38567
38568 /* set int loopback in CSR15 */
38569 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38570 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38571 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38572
38573 teststatus = cpu_to_le16(0x8000);
38574 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38575 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38576
38577 /* Check status of descriptors */
38578 for (x = 0; x < numbuffs; x++) {
38579 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38580 }
38581 }
38582
38583 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38584 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38585 wmb();
38586 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38587 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38588 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38589 pcnet32_restart(dev, CSR0_NORMAL);
38590 } else {
38591 pcnet32_purge_rx_ring(dev);
38592 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38593 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38594 }
38595 spin_unlock_irqrestore(&lp->lock, flags);
38596
38597 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38598 static void pcnet32_led_blink_callback(struct net_device *dev)
38599 {
38600 struct pcnet32_private *lp = netdev_priv(dev);
38601 - struct pcnet32_access *a = &lp->a;
38602 + struct pcnet32_access *a = lp->a;
38603 ulong ioaddr = dev->base_addr;
38604 unsigned long flags;
38605 int i;
38606 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38607 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38608 {
38609 struct pcnet32_private *lp = netdev_priv(dev);
38610 - struct pcnet32_access *a = &lp->a;
38611 + struct pcnet32_access *a = lp->a;
38612 ulong ioaddr = dev->base_addr;
38613 unsigned long flags;
38614 int i, regs[4];
38615 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38616 {
38617 int csr5;
38618 struct pcnet32_private *lp = netdev_priv(dev);
38619 - struct pcnet32_access *a = &lp->a;
38620 + struct pcnet32_access *a = lp->a;
38621 ulong ioaddr = dev->base_addr;
38622 int ticks;
38623
38624 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38625 spin_lock_irqsave(&lp->lock, flags);
38626 if (pcnet32_tx(dev)) {
38627 /* reset the chip to clear the error condition, then restart */
38628 - lp->a.reset(ioaddr);
38629 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38630 + lp->a->reset(ioaddr);
38631 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38632 pcnet32_restart(dev, CSR0_START);
38633 netif_wake_queue(dev);
38634 }
38635 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38636 __napi_complete(napi);
38637
38638 /* clear interrupt masks */
38639 - val = lp->a.read_csr(ioaddr, CSR3);
38640 + val = lp->a->read_csr(ioaddr, CSR3);
38641 val &= 0x00ff;
38642 - lp->a.write_csr(ioaddr, CSR3, val);
38643 + lp->a->write_csr(ioaddr, CSR3, val);
38644
38645 /* Set interrupt enable. */
38646 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38647 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38648
38649 spin_unlock_irqrestore(&lp->lock, flags);
38650 }
38651 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38652 int i, csr0;
38653 u16 *buff = ptr;
38654 struct pcnet32_private *lp = netdev_priv(dev);
38655 - struct pcnet32_access *a = &lp->a;
38656 + struct pcnet32_access *a = lp->a;
38657 ulong ioaddr = dev->base_addr;
38658 unsigned long flags;
38659
38660 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38661 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38662 if (lp->phymask & (1 << j)) {
38663 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38664 - lp->a.write_bcr(ioaddr, 33,
38665 + lp->a->write_bcr(ioaddr, 33,
38666 (j << 5) | i);
38667 - *buff++ = lp->a.read_bcr(ioaddr, 34);
38668 + *buff++ = lp->a->read_bcr(ioaddr, 34);
38669 }
38670 }
38671 }
38672 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38673 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38674 lp->options |= PCNET32_PORT_FD;
38675
38676 - lp->a = *a;
38677 + lp->a = a;
38678
38679 /* prior to register_netdev, dev->name is not yet correct */
38680 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38681 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38682 if (lp->mii) {
38683 /* lp->phycount and lp->phymask are set to 0 by memset above */
38684
38685 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38686 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38687 /* scan for PHYs */
38688 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38689 unsigned short id1, id2;
38690 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38691 "Found PHY %04x:%04x at address %d.\n",
38692 id1, id2, i);
38693 }
38694 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38695 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38696 if (lp->phycount > 1) {
38697 lp->options |= PCNET32_PORT_MII;
38698 }
38699 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38700 }
38701
38702 /* Reset the PCNET32 */
38703 - lp->a.reset(ioaddr);
38704 + lp->a->reset(ioaddr);
38705
38706 /* switch pcnet32 to 32bit mode */
38707 - lp->a.write_bcr(ioaddr, 20, 2);
38708 + lp->a->write_bcr(ioaddr, 20, 2);
38709
38710 if (netif_msg_ifup(lp))
38711 printk(KERN_DEBUG
38712 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38713 (u32) (lp->init_dma_addr));
38714
38715 /* set/reset autoselect bit */
38716 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
38717 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
38718 if (lp->options & PCNET32_PORT_ASEL)
38719 val |= 2;
38720 - lp->a.write_bcr(ioaddr, 2, val);
38721 + lp->a->write_bcr(ioaddr, 2, val);
38722
38723 /* handle full duplex setting */
38724 if (lp->mii_if.full_duplex) {
38725 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
38726 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
38727 if (lp->options & PCNET32_PORT_FD) {
38728 val |= 1;
38729 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38730 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38731 if (lp->chip_version == 0x2627)
38732 val |= 3;
38733 }
38734 - lp->a.write_bcr(ioaddr, 9, val);
38735 + lp->a->write_bcr(ioaddr, 9, val);
38736 }
38737
38738 /* set/reset GPSI bit in test register */
38739 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38740 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38741 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38742 val |= 0x10;
38743 - lp->a.write_csr(ioaddr, 124, val);
38744 + lp->a->write_csr(ioaddr, 124, val);
38745
38746 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38747 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38748 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38749 * duplex, and/or enable auto negotiation, and clear DANAS
38750 */
38751 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38752 - lp->a.write_bcr(ioaddr, 32,
38753 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
38754 + lp->a->write_bcr(ioaddr, 32,
38755 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
38756 /* disable Auto Negotiation, set 10Mpbs, HD */
38757 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38758 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38759 if (lp->options & PCNET32_PORT_FD)
38760 val |= 0x10;
38761 if (lp->options & PCNET32_PORT_100)
38762 val |= 0x08;
38763 - lp->a.write_bcr(ioaddr, 32, val);
38764 + lp->a->write_bcr(ioaddr, 32, val);
38765 } else {
38766 if (lp->options & PCNET32_PORT_ASEL) {
38767 - lp->a.write_bcr(ioaddr, 32,
38768 - lp->a.read_bcr(ioaddr,
38769 + lp->a->write_bcr(ioaddr, 32,
38770 + lp->a->read_bcr(ioaddr,
38771 32) | 0x0080);
38772 /* enable auto negotiate, setup, disable fd */
38773 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38774 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38775 val |= 0x20;
38776 - lp->a.write_bcr(ioaddr, 32, val);
38777 + lp->a->write_bcr(ioaddr, 32, val);
38778 }
38779 }
38780 } else {
38781 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38782 * There is really no good other way to handle multiple PHYs
38783 * other than turning off all automatics
38784 */
38785 - val = lp->a.read_bcr(ioaddr, 2);
38786 - lp->a.write_bcr(ioaddr, 2, val & ~2);
38787 - val = lp->a.read_bcr(ioaddr, 32);
38788 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38789 + val = lp->a->read_bcr(ioaddr, 2);
38790 + lp->a->write_bcr(ioaddr, 2, val & ~2);
38791 + val = lp->a->read_bcr(ioaddr, 32);
38792 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38793
38794 if (!(lp->options & PCNET32_PORT_ASEL)) {
38795 /* setup ecmd */
38796 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38797 ecmd.speed =
38798 lp->
38799 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38800 - bcr9 = lp->a.read_bcr(ioaddr, 9);
38801 + bcr9 = lp->a->read_bcr(ioaddr, 9);
38802
38803 if (lp->options & PCNET32_PORT_FD) {
38804 ecmd.duplex = DUPLEX_FULL;
38805 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38806 ecmd.duplex = DUPLEX_HALF;
38807 bcr9 |= ~(1 << 0);
38808 }
38809 - lp->a.write_bcr(ioaddr, 9, bcr9);
38810 + lp->a->write_bcr(ioaddr, 9, bcr9);
38811 }
38812
38813 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38814 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38815
38816 #ifdef DO_DXSUFLO
38817 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38818 - val = lp->a.read_csr(ioaddr, CSR3);
38819 + val = lp->a->read_csr(ioaddr, CSR3);
38820 val |= 0x40;
38821 - lp->a.write_csr(ioaddr, CSR3, val);
38822 + lp->a->write_csr(ioaddr, CSR3, val);
38823 }
38824 #endif
38825
38826 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38827 napi_enable(&lp->napi);
38828
38829 /* Re-initialize the PCNET32, and start it when done. */
38830 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38831 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38832 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38833 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38834
38835 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38836 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38837 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38838 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38839
38840 netif_start_queue(dev);
38841
38842 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38843
38844 i = 0;
38845 while (i++ < 100)
38846 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38847 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38848 break;
38849 /*
38850 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38851 * reports that doing so triggers a bug in the '974.
38852 */
38853 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38854 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38855
38856 if (netif_msg_ifup(lp))
38857 printk(KERN_DEBUG
38858 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38859 dev->name, i,
38860 (u32) (lp->init_dma_addr),
38861 - lp->a.read_csr(ioaddr, CSR0));
38862 + lp->a->read_csr(ioaddr, CSR0));
38863
38864 spin_unlock_irqrestore(&lp->lock, flags);
38865
38866 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38867 * Switch back to 16bit mode to avoid problems with dumb
38868 * DOS packet driver after a warm reboot
38869 */
38870 - lp->a.write_bcr(ioaddr, 20, 4);
38871 + lp->a->write_bcr(ioaddr, 20, 4);
38872
38873 err_free_irq:
38874 spin_unlock_irqrestore(&lp->lock, flags);
38875 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38876
38877 /* wait for stop */
38878 for (i = 0; i < 100; i++)
38879 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
38880 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
38881 break;
38882
38883 if (i >= 100 && netif_msg_drv(lp))
38884 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38885 return;
38886
38887 /* ReInit Ring */
38888 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38889 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38890 i = 0;
38891 while (i++ < 1000)
38892 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38893 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38894 break;
38895
38896 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
38897 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
38898 }
38899
38900 static void pcnet32_tx_timeout(struct net_device *dev)
38901 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
38902 if (pcnet32_debug & NETIF_MSG_DRV)
38903 printk(KERN_ERR
38904 "%s: transmit timed out, status %4.4x, resetting.\n",
38905 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38906 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38907 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38908 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38909 dev->stats.tx_errors++;
38910 if (netif_msg_tx_err(lp)) {
38911 int i;
38912 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38913 if (netif_msg_tx_queued(lp)) {
38914 printk(KERN_DEBUG
38915 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
38916 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38917 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38918 }
38919
38920 /* Default status -- will not enable Successful-TxDone
38921 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38922 dev->stats.tx_bytes += skb->len;
38923
38924 /* Trigger an immediate send poll. */
38925 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38926 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38927
38928 dev->trans_start = jiffies;
38929
38930 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
38931
38932 spin_lock(&lp->lock);
38933
38934 - csr0 = lp->a.read_csr(ioaddr, CSR0);
38935 + csr0 = lp->a->read_csr(ioaddr, CSR0);
38936 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
38937 if (csr0 == 0xffff) {
38938 break; /* PCMCIA remove happened */
38939 }
38940 /* Acknowledge all of the current interrupt sources ASAP. */
38941 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38942 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38943
38944 if (netif_msg_intr(lp))
38945 printk(KERN_DEBUG
38946 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
38947 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
38948 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
38949
38950 /* Log misc errors. */
38951 if (csr0 & 0x4000)
38952 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
38953 if (napi_schedule_prep(&lp->napi)) {
38954 u16 val;
38955 /* set interrupt masks */
38956 - val = lp->a.read_csr(ioaddr, CSR3);
38957 + val = lp->a->read_csr(ioaddr, CSR3);
38958 val |= 0x5f00;
38959 - lp->a.write_csr(ioaddr, CSR3, val);
38960 + lp->a->write_csr(ioaddr, CSR3, val);
38961
38962 __napi_schedule(&lp->napi);
38963 break;
38964 }
38965 - csr0 = lp->a.read_csr(ioaddr, CSR0);
38966 + csr0 = lp->a->read_csr(ioaddr, CSR0);
38967 }
38968
38969 if (netif_msg_intr(lp))
38970 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
38971 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38972 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38973
38974 spin_unlock(&lp->lock);
38975
38976 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
38977
38978 spin_lock_irqsave(&lp->lock, flags);
38979
38980 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38981 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38982
38983 if (netif_msg_ifdown(lp))
38984 printk(KERN_DEBUG
38985 "%s: Shutting down ethercard, status was %2.2x.\n",
38986 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38987 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38988
38989 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
38990 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38991 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38992
38993 /*
38994 * Switch back to 16bit mode to avoid problems with dumb
38995 * DOS packet driver after a warm reboot
38996 */
38997 - lp->a.write_bcr(ioaddr, 20, 4);
38998 + lp->a->write_bcr(ioaddr, 20, 4);
38999
39000 spin_unlock_irqrestore(&lp->lock, flags);
39001
39002 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
39003 unsigned long flags;
39004
39005 spin_lock_irqsave(&lp->lock, flags);
39006 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39007 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39008 spin_unlock_irqrestore(&lp->lock, flags);
39009
39010 return &dev->stats;
39011 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
39012 if (dev->flags & IFF_ALLMULTI) {
39013 ib->filter[0] = cpu_to_le32(~0U);
39014 ib->filter[1] = cpu_to_le32(~0U);
39015 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39016 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39017 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39018 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39019 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39020 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39021 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39022 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39023 return;
39024 }
39025 /* clear the multicast filter */
39026 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
39027 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
39028 }
39029 for (i = 0; i < 4; i++)
39030 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
39031 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
39032 le16_to_cpu(mcast_table[i]));
39033 return;
39034 }
39035 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39036
39037 spin_lock_irqsave(&lp->lock, flags);
39038 suspended = pcnet32_suspend(dev, &flags, 0);
39039 - csr15 = lp->a.read_csr(ioaddr, CSR15);
39040 + csr15 = lp->a->read_csr(ioaddr, CSR15);
39041 if (dev->flags & IFF_PROMISC) {
39042 /* Log any net taps. */
39043 if (netif_msg_hw(lp))
39044 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39045 lp->init_block->mode =
39046 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
39047 7);
39048 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
39049 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
39050 } else {
39051 lp->init_block->mode =
39052 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
39053 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39054 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39055 pcnet32_load_multicast(dev);
39056 }
39057
39058 if (suspended) {
39059 int csr5;
39060 /* clear SUSPEND (SPND) - CSR5 bit 0 */
39061 - csr5 = lp->a.read_csr(ioaddr, CSR5);
39062 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39063 + csr5 = lp->a->read_csr(ioaddr, CSR5);
39064 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39065 } else {
39066 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39067 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39068 pcnet32_restart(dev, CSR0_NORMAL);
39069 netif_wake_queue(dev);
39070 }
39071 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
39072 if (!lp->mii)
39073 return 0;
39074
39075 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39076 - val_out = lp->a.read_bcr(ioaddr, 34);
39077 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39078 + val_out = lp->a->read_bcr(ioaddr, 34);
39079
39080 return val_out;
39081 }
39082 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
39083 if (!lp->mii)
39084 return;
39085
39086 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39087 - lp->a.write_bcr(ioaddr, 34, val);
39088 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39089 + lp->a->write_bcr(ioaddr, 34, val);
39090 }
39091
39092 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39093 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39094 curr_link = mii_link_ok(&lp->mii_if);
39095 } else {
39096 ulong ioaddr = dev->base_addr; /* card base I/O address */
39097 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
39098 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
39099 }
39100 if (!curr_link) {
39101 if (prev_link || verbose) {
39102 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39103 (ecmd.duplex ==
39104 DUPLEX_FULL) ? "full" : "half");
39105 }
39106 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
39107 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
39108 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
39109 if (lp->mii_if.full_duplex)
39110 bcr9 |= (1 << 0);
39111 else
39112 bcr9 &= ~(1 << 0);
39113 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
39114 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
39115 }
39116 } else {
39117 if (netif_msg_link(lp))
39118 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
39119 index 7cc9898..6eb50d3 100644
39120 --- a/drivers/net/sis190.c
39121 +++ b/drivers/net/sis190.c
39122 @@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
39123 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
39124 struct net_device *dev)
39125 {
39126 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
39127 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
39128 struct sis190_private *tp = netdev_priv(dev);
39129 struct pci_dev *isa_bridge;
39130 u8 reg, tmp8;
39131 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39132 index e13685a..60c948c 100644
39133 --- a/drivers/net/sundance.c
39134 +++ b/drivers/net/sundance.c
39135 @@ -225,7 +225,7 @@ enum {
39136 struct pci_id_info {
39137 const char *name;
39138 };
39139 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39140 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39141 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39142 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39143 {"D-Link DFE-580TX 4 port Server Adapter"},
39144 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39145 index 529f55a..cccaa18 100644
39146 --- a/drivers/net/tg3.h
39147 +++ b/drivers/net/tg3.h
39148 @@ -95,6 +95,7 @@
39149 #define CHIPREV_ID_5750_A0 0x4000
39150 #define CHIPREV_ID_5750_A1 0x4001
39151 #define CHIPREV_ID_5750_A3 0x4003
39152 +#define CHIPREV_ID_5750_C1 0x4201
39153 #define CHIPREV_ID_5750_C2 0x4202
39154 #define CHIPREV_ID_5752_A0_HW 0x5000
39155 #define CHIPREV_ID_5752_A0 0x6000
39156 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39157 index b9db1b5..720f9ce 100644
39158 --- a/drivers/net/tokenring/abyss.c
39159 +++ b/drivers/net/tokenring/abyss.c
39160 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39161
39162 static int __init abyss_init (void)
39163 {
39164 - abyss_netdev_ops = tms380tr_netdev_ops;
39165 + pax_open_kernel();
39166 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39167
39168 - abyss_netdev_ops.ndo_open = abyss_open;
39169 - abyss_netdev_ops.ndo_stop = abyss_close;
39170 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39171 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39172 + pax_close_kernel();
39173
39174 return pci_register_driver(&abyss_driver);
39175 }
39176 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39177 index 456f8bf..373e56d 100644
39178 --- a/drivers/net/tokenring/madgemc.c
39179 +++ b/drivers/net/tokenring/madgemc.c
39180 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39181
39182 static int __init madgemc_init (void)
39183 {
39184 - madgemc_netdev_ops = tms380tr_netdev_ops;
39185 - madgemc_netdev_ops.ndo_open = madgemc_open;
39186 - madgemc_netdev_ops.ndo_stop = madgemc_close;
39187 + pax_open_kernel();
39188 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39189 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39190 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39191 + pax_close_kernel();
39192
39193 return mca_register_driver (&madgemc_driver);
39194 }
39195 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39196 index 16e8783..925bd49 100644
39197 --- a/drivers/net/tokenring/proteon.c
39198 +++ b/drivers/net/tokenring/proteon.c
39199 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
39200 struct platform_device *pdev;
39201 int i, num = 0, err = 0;
39202
39203 - proteon_netdev_ops = tms380tr_netdev_ops;
39204 - proteon_netdev_ops.ndo_open = proteon_open;
39205 - proteon_netdev_ops.ndo_stop = tms380tr_close;
39206 + pax_open_kernel();
39207 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39208 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39209 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39210 + pax_close_kernel();
39211
39212 err = platform_driver_register(&proteon_driver);
39213 if (err)
39214 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39215 index 46db5c5..37c1536 100644
39216 --- a/drivers/net/tokenring/skisa.c
39217 +++ b/drivers/net/tokenring/skisa.c
39218 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39219 struct platform_device *pdev;
39220 int i, num = 0, err = 0;
39221
39222 - sk_isa_netdev_ops = tms380tr_netdev_ops;
39223 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
39224 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39225 + pax_open_kernel();
39226 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39227 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39228 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39229 + pax_close_kernel();
39230
39231 err = platform_driver_register(&sk_isa_driver);
39232 if (err)
39233 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39234 index 74e5ba4..5cf6bc9 100644
39235 --- a/drivers/net/tulip/de2104x.c
39236 +++ b/drivers/net/tulip/de2104x.c
39237 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39238 struct de_srom_info_leaf *il;
39239 void *bufp;
39240
39241 + pax_track_stack();
39242 +
39243 /* download entire eeprom */
39244 for (i = 0; i < DE_EEPROM_WORDS; i++)
39245 ((__le16 *)ee_data)[i] =
39246 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39247 index a8349b7..90f9dfe 100644
39248 --- a/drivers/net/tulip/de4x5.c
39249 +++ b/drivers/net/tulip/de4x5.c
39250 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39251 for (i=0; i<ETH_ALEN; i++) {
39252 tmp.addr[i] = dev->dev_addr[i];
39253 }
39254 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39255 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39256 break;
39257
39258 case DE4X5_SET_HWADDR: /* Set the hardware address */
39259 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39260 spin_lock_irqsave(&lp->lock, flags);
39261 memcpy(&statbuf, &lp->pktStats, ioc->len);
39262 spin_unlock_irqrestore(&lp->lock, flags);
39263 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
39264 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39265 return -EFAULT;
39266 break;
39267 }
39268 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39269 index 391acd3..56d11cd 100644
39270 --- a/drivers/net/tulip/eeprom.c
39271 +++ b/drivers/net/tulip/eeprom.c
39272 @@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39273 {NULL}};
39274
39275
39276 -static const char *block_name[] __devinitdata = {
39277 +static const char *block_name[] __devinitconst = {
39278 "21140 non-MII",
39279 "21140 MII PHY",
39280 "21142 Serial PHY",
39281 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39282 index b38d3b7..b1cff23 100644
39283 --- a/drivers/net/tulip/winbond-840.c
39284 +++ b/drivers/net/tulip/winbond-840.c
39285 @@ -235,7 +235,7 @@ struct pci_id_info {
39286 int drv_flags; /* Driver use, intended as capability flags. */
39287 };
39288
39289 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39290 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39291 { /* Sometime a Level-One switch card. */
39292 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39293 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39294 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39295 index f450bc9..2b747c8 100644
39296 --- a/drivers/net/usb/hso.c
39297 +++ b/drivers/net/usb/hso.c
39298 @@ -71,7 +71,7 @@
39299 #include <asm/byteorder.h>
39300 #include <linux/serial_core.h>
39301 #include <linux/serial.h>
39302 -
39303 +#include <asm/local.h>
39304
39305 #define DRIVER_VERSION "1.2"
39306 #define MOD_AUTHOR "Option Wireless"
39307 @@ -258,7 +258,7 @@ struct hso_serial {
39308
39309 /* from usb_serial_port */
39310 struct tty_struct *tty;
39311 - int open_count;
39312 + local_t open_count;
39313 spinlock_t serial_lock;
39314
39315 int (*write_data) (struct hso_serial *serial);
39316 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39317 struct urb *urb;
39318
39319 urb = serial->rx_urb[0];
39320 - if (serial->open_count > 0) {
39321 + if (local_read(&serial->open_count) > 0) {
39322 count = put_rxbuf_data(urb, serial);
39323 if (count == -1)
39324 return;
39325 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39326 DUMP1(urb->transfer_buffer, urb->actual_length);
39327
39328 /* Anyone listening? */
39329 - if (serial->open_count == 0)
39330 + if (local_read(&serial->open_count) == 0)
39331 return;
39332
39333 if (status == 0) {
39334 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39335 spin_unlock_irq(&serial->serial_lock);
39336
39337 /* check for port already opened, if not set the termios */
39338 - serial->open_count++;
39339 - if (serial->open_count == 1) {
39340 + if (local_inc_return(&serial->open_count) == 1) {
39341 tty->low_latency = 1;
39342 serial->rx_state = RX_IDLE;
39343 /* Force default termio settings */
39344 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39345 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39346 if (result) {
39347 hso_stop_serial_device(serial->parent);
39348 - serial->open_count--;
39349 + local_dec(&serial->open_count);
39350 kref_put(&serial->parent->ref, hso_serial_ref_free);
39351 }
39352 } else {
39353 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39354
39355 /* reset the rts and dtr */
39356 /* do the actual close */
39357 - serial->open_count--;
39358 + local_dec(&serial->open_count);
39359
39360 - if (serial->open_count <= 0) {
39361 - serial->open_count = 0;
39362 + if (local_read(&serial->open_count) <= 0) {
39363 + local_set(&serial->open_count, 0);
39364 spin_lock_irq(&serial->serial_lock);
39365 if (serial->tty == tty) {
39366 serial->tty->driver_data = NULL;
39367 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39368
39369 /* the actual setup */
39370 spin_lock_irqsave(&serial->serial_lock, flags);
39371 - if (serial->open_count)
39372 + if (local_read(&serial->open_count))
39373 _hso_serial_set_termios(tty, old);
39374 else
39375 tty->termios = old;
39376 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39377 /* Start all serial ports */
39378 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39379 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39380 - if (dev2ser(serial_table[i])->open_count) {
39381 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
39382 result =
39383 hso_start_serial_device(serial_table[i], GFP_NOIO);
39384 hso_kick_transmit(dev2ser(serial_table[i]));
39385 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39386 index 3e94f0c..ffdd926 100644
39387 --- a/drivers/net/vxge/vxge-config.h
39388 +++ b/drivers/net/vxge/vxge-config.h
39389 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39390 void (*link_down)(struct __vxge_hw_device *devh);
39391 void (*crit_err)(struct __vxge_hw_device *devh,
39392 enum vxge_hw_event type, u64 ext_data);
39393 -};
39394 +} __no_const;
39395
39396 /*
39397 * struct __vxge_hw_blockpool_entry - Block private data structure
39398 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39399 index 068d7a9..35293de 100644
39400 --- a/drivers/net/vxge/vxge-main.c
39401 +++ b/drivers/net/vxge/vxge-main.c
39402 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39403 struct sk_buff *completed[NR_SKB_COMPLETED];
39404 int more;
39405
39406 + pax_track_stack();
39407 +
39408 do {
39409 more = 0;
39410 skb_ptr = completed;
39411 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39412 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39413 int index;
39414
39415 + pax_track_stack();
39416 +
39417 /*
39418 * Filling
39419 * - itable with bucket numbers
39420 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39421 index 461742b..81be42e 100644
39422 --- a/drivers/net/vxge/vxge-traffic.h
39423 +++ b/drivers/net/vxge/vxge-traffic.h
39424 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39425 struct vxge_hw_mempool_dma *dma_object,
39426 u32 index,
39427 u32 is_last);
39428 -};
39429 +} __no_const;
39430
39431 void
39432 __vxge_hw_mempool_destroy(
39433 diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39434 index cd8cb95..4153b79 100644
39435 --- a/drivers/net/wan/cycx_x25.c
39436 +++ b/drivers/net/wan/cycx_x25.c
39437 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39438 unsigned char hex[1024],
39439 * phex = hex;
39440
39441 + pax_track_stack();
39442 +
39443 if (len >= (sizeof(hex) / 2))
39444 len = (sizeof(hex) / 2) - 1;
39445
39446 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39447 index aa9248f..a4e3c3b 100644
39448 --- a/drivers/net/wan/hdlc_x25.c
39449 +++ b/drivers/net/wan/hdlc_x25.c
39450 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39451
39452 static int x25_open(struct net_device *dev)
39453 {
39454 - struct lapb_register_struct cb;
39455 + static struct lapb_register_struct cb = {
39456 + .connect_confirmation = x25_connected,
39457 + .connect_indication = x25_connected,
39458 + .disconnect_confirmation = x25_disconnected,
39459 + .disconnect_indication = x25_disconnected,
39460 + .data_indication = x25_data_indication,
39461 + .data_transmit = x25_data_transmit
39462 + };
39463 int result;
39464
39465 - cb.connect_confirmation = x25_connected;
39466 - cb.connect_indication = x25_connected;
39467 - cb.disconnect_confirmation = x25_disconnected;
39468 - cb.disconnect_indication = x25_disconnected;
39469 - cb.data_indication = x25_data_indication;
39470 - cb.data_transmit = x25_data_transmit;
39471 -
39472 result = lapb_register(dev, &cb);
39473 if (result != LAPB_OK)
39474 return result;
39475 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39476 index 5ad287c..783b020 100644
39477 --- a/drivers/net/wimax/i2400m/usb-fw.c
39478 +++ b/drivers/net/wimax/i2400m/usb-fw.c
39479 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39480 int do_autopm = 1;
39481 DECLARE_COMPLETION_ONSTACK(notif_completion);
39482
39483 + pax_track_stack();
39484 +
39485 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39486 i2400m, ack, ack_size);
39487 BUG_ON(_ack == i2400m->bm_ack_buf);
39488 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39489 index 6c26840..62c97c3 100644
39490 --- a/drivers/net/wireless/airo.c
39491 +++ b/drivers/net/wireless/airo.c
39492 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39493 BSSListElement * loop_net;
39494 BSSListElement * tmp_net;
39495
39496 + pax_track_stack();
39497 +
39498 /* Blow away current list of scan results */
39499 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39500 list_move_tail (&loop_net->list, &ai->network_free_list);
39501 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39502 WepKeyRid wkr;
39503 int rc;
39504
39505 + pax_track_stack();
39506 +
39507 memset( &mySsid, 0, sizeof( mySsid ) );
39508 kfree (ai->flash);
39509 ai->flash = NULL;
39510 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39511 __le32 *vals = stats.vals;
39512 int len;
39513
39514 + pax_track_stack();
39515 +
39516 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39517 return -ENOMEM;
39518 data = (struct proc_data *)file->private_data;
39519 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39520 /* If doLoseSync is not 1, we won't do a Lose Sync */
39521 int doLoseSync = -1;
39522
39523 + pax_track_stack();
39524 +
39525 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39526 return -ENOMEM;
39527 data = (struct proc_data *)file->private_data;
39528 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39529 int i;
39530 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39531
39532 + pax_track_stack();
39533 +
39534 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39535 if (!qual)
39536 return -ENOMEM;
39537 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39538 CapabilityRid cap_rid;
39539 __le32 *vals = stats_rid.vals;
39540
39541 + pax_track_stack();
39542 +
39543 /* Get stats out of the card */
39544 clear_bit(JOB_WSTATS, &local->jobs);
39545 if (local->power.event) {
39546 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39547 index 747508c..82e965d 100644
39548 --- a/drivers/net/wireless/ath/ath5k/debug.c
39549 +++ b/drivers/net/wireless/ath/ath5k/debug.c
39550 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39551 unsigned int v;
39552 u64 tsf;
39553
39554 + pax_track_stack();
39555 +
39556 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39557 len += snprintf(buf+len, sizeof(buf)-len,
39558 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39559 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39560 unsigned int len = 0;
39561 unsigned int i;
39562
39563 + pax_track_stack();
39564 +
39565 len += snprintf(buf+len, sizeof(buf)-len,
39566 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39567
39568 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39569 index 2be4c22..593b1eb 100644
39570 --- a/drivers/net/wireless/ath/ath9k/debug.c
39571 +++ b/drivers/net/wireless/ath/ath9k/debug.c
39572 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39573 char buf[512];
39574 unsigned int len = 0;
39575
39576 + pax_track_stack();
39577 +
39578 len += snprintf(buf + len, sizeof(buf) - len,
39579 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39580 len += snprintf(buf + len, sizeof(buf) - len,
39581 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39582 int i;
39583 u8 addr[ETH_ALEN];
39584
39585 + pax_track_stack();
39586 +
39587 len += snprintf(buf + len, sizeof(buf) - len,
39588 "primary: %s (%s chan=%d ht=%d)\n",
39589 wiphy_name(sc->pri_wiphy->hw->wiphy),
39590 diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39591 index 80b19a4..dab3a45 100644
39592 --- a/drivers/net/wireless/b43/debugfs.c
39593 +++ b/drivers/net/wireless/b43/debugfs.c
39594 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
39595 struct b43_debugfs_fops {
39596 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39597 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39598 - struct file_operations fops;
39599 + const struct file_operations fops;
39600 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39601 size_t file_struct_offset;
39602 };
39603 diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39604 index 1f85ac5..c99b4b4 100644
39605 --- a/drivers/net/wireless/b43legacy/debugfs.c
39606 +++ b/drivers/net/wireless/b43legacy/debugfs.c
39607 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
39608 struct b43legacy_debugfs_fops {
39609 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39610 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39611 - struct file_operations fops;
39612 + const struct file_operations fops;
39613 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39614 size_t file_struct_offset;
39615 /* Take wl->irq_lock before calling read/write? */
39616 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39617 index 43102bf..3b569c3 100644
39618 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
39619 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39620 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39621 int err;
39622 DECLARE_SSID_BUF(ssid);
39623
39624 + pax_track_stack();
39625 +
39626 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39627
39628 if (ssid_len)
39629 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39630 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39631 int err;
39632
39633 + pax_track_stack();
39634 +
39635 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39636 idx, keylen, len);
39637
39638 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39639 index 282b1f7..169f0cf 100644
39640 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39641 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39642 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39643 unsigned long flags;
39644 DECLARE_SSID_BUF(ssid);
39645
39646 + pax_track_stack();
39647 +
39648 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39649 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39650 print_ssid(ssid, info_element->data, info_element->len),
39651 diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39652 index 950267a..80d5fd2 100644
39653 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39654 +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39655 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39656 },
39657 };
39658
39659 -static struct iwl_ops iwl1000_ops = {
39660 +static const struct iwl_ops iwl1000_ops = {
39661 .ucode = &iwl5000_ucode,
39662 .lib = &iwl1000_lib,
39663 .hcmd = &iwl5000_hcmd,
39664 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39665 index 56bfcc3..b348020 100644
39666 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39667 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39668 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39669 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39670 };
39671
39672 -static struct iwl_ops iwl3945_ops = {
39673 +static const struct iwl_ops iwl3945_ops = {
39674 .ucode = &iwl3945_ucode,
39675 .lib = &iwl3945_lib,
39676 .hcmd = &iwl3945_hcmd,
39677 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39678 index 585b8d4..e142963 100644
39679 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39680 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39681 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39682 },
39683 };
39684
39685 -static struct iwl_ops iwl4965_ops = {
39686 +static const struct iwl_ops iwl4965_ops = {
39687 .ucode = &iwl4965_ucode,
39688 .lib = &iwl4965_lib,
39689 .hcmd = &iwl4965_hcmd,
39690 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39691 index 1f423f2..e37c192 100644
39692 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39693 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39694 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39695 },
39696 };
39697
39698 -struct iwl_ops iwl5000_ops = {
39699 +const struct iwl_ops iwl5000_ops = {
39700 .ucode = &iwl5000_ucode,
39701 .lib = &iwl5000_lib,
39702 .hcmd = &iwl5000_hcmd,
39703 .utils = &iwl5000_hcmd_utils,
39704 };
39705
39706 -static struct iwl_ops iwl5150_ops = {
39707 +static const struct iwl_ops iwl5150_ops = {
39708 .ucode = &iwl5000_ucode,
39709 .lib = &iwl5150_lib,
39710 .hcmd = &iwl5000_hcmd,
39711 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39712 index 1473452..f07d5e1 100644
39713 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39714 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39715 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39716 .calc_rssi = iwl5000_calc_rssi,
39717 };
39718
39719 -static struct iwl_ops iwl6000_ops = {
39720 +static const struct iwl_ops iwl6000_ops = {
39721 .ucode = &iwl5000_ucode,
39722 .lib = &iwl6000_lib,
39723 .hcmd = &iwl5000_hcmd,
39724 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39725 index 1a3dfa2..b3e0a61 100644
39726 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39727 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39728 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39729 u8 active_index = 0;
39730 s32 tpt = 0;
39731
39732 + pax_track_stack();
39733 +
39734 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39735
39736 if (!ieee80211_is_data(hdr->frame_control) ||
39737 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39738 u8 valid_tx_ant = 0;
39739 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39740
39741 + pax_track_stack();
39742 +
39743 /* Override starting rate (index 0) if needed for debug purposes */
39744 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39745
39746 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39747 index 0e56d78..6a3c107 100644
39748 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39749 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39750 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39751 if (iwl_debug_level & IWL_DL_INFO)
39752 dev_printk(KERN_DEBUG, &(pdev->dev),
39753 "Disabling hw_scan\n");
39754 - iwl_hw_ops.hw_scan = NULL;
39755 + pax_open_kernel();
39756 + *(void **)&iwl_hw_ops.hw_scan = NULL;
39757 + pax_close_kernel();
39758 }
39759
39760 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39761 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39762 index cbc6290..eb323d7 100644
39763 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39764 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39765 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39766 #endif
39767
39768 #else
39769 -#define IWL_DEBUG(__priv, level, fmt, args...)
39770 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39771 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39772 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39773 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39774 void *p, u32 len)
39775 {}
39776 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39777 index a198bcf..8e68233 100644
39778 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39779 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39780 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39781 int pos = 0;
39782 const size_t bufsz = sizeof(buf);
39783
39784 + pax_track_stack();
39785 +
39786 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39787 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39788 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39789 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39790 const size_t bufsz = sizeof(buf);
39791 ssize_t ret;
39792
39793 + pax_track_stack();
39794 +
39795 for (i = 0; i < AC_NUM; i++) {
39796 pos += scnprintf(buf + pos, bufsz - pos,
39797 "\tcw_min\tcw_max\taifsn\ttxop\n");
39798 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39799 index 3539ea4..b174bfa 100644
39800 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39801 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39802 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
39803
39804 /* shared structures from iwl-5000.c */
39805 extern struct iwl_mod_params iwl50_mod_params;
39806 -extern struct iwl_ops iwl5000_ops;
39807 +extern const struct iwl_ops iwl5000_ops;
39808 extern struct iwl_ucode_ops iwl5000_ucode;
39809 extern struct iwl_lib_ops iwl5000_lib;
39810 extern struct iwl_hcmd_ops iwl5000_hcmd;
39811 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39812 index 619590d..69235ee 100644
39813 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39814 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39815 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39816 */
39817 if (iwl3945_mod_params.disable_hw_scan) {
39818 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39819 - iwl3945_hw_ops.hw_scan = NULL;
39820 + pax_open_kernel();
39821 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39822 + pax_close_kernel();
39823 }
39824
39825
39826 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39827 index 1465379..fe4d78b 100644
39828 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39829 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39830 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39831 int buf_len = 512;
39832 size_t len = 0;
39833
39834 + pax_track_stack();
39835 +
39836 if (*ppos != 0)
39837 return 0;
39838 if (count < sizeof(buf))
39839 diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39840 index 893a55c..7f66a50 100644
39841 --- a/drivers/net/wireless/libertas/debugfs.c
39842 +++ b/drivers/net/wireless/libertas/debugfs.c
39843 @@ -708,7 +708,7 @@ out_unlock:
39844 struct lbs_debugfs_files {
39845 const char *name;
39846 int perm;
39847 - struct file_operations fops;
39848 + const struct file_operations fops;
39849 };
39850
39851 static const struct lbs_debugfs_files debugfs_files[] = {
39852 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39853 index 2ecbedb..42704f0 100644
39854 --- a/drivers/net/wireless/rndis_wlan.c
39855 +++ b/drivers/net/wireless/rndis_wlan.c
39856 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39857
39858 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39859
39860 - if (rts_threshold < 0 || rts_threshold > 2347)
39861 + if (rts_threshold > 2347)
39862 rts_threshold = 2347;
39863
39864 tmp = cpu_to_le32(rts_threshold);
39865 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39866 index 334ccd6..47f8944 100644
39867 --- a/drivers/oprofile/buffer_sync.c
39868 +++ b/drivers/oprofile/buffer_sync.c
39869 @@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39870 if (cookie == NO_COOKIE)
39871 offset = pc;
39872 if (cookie == INVALID_COOKIE) {
39873 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39874 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39875 offset = pc;
39876 }
39877 if (cookie != last_cookie) {
39878 @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39879 /* add userspace sample */
39880
39881 if (!mm) {
39882 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
39883 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39884 return 0;
39885 }
39886
39887 cookie = lookup_dcookie(mm, s->eip, &offset);
39888
39889 if (cookie == INVALID_COOKIE) {
39890 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39891 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39892 return 0;
39893 }
39894
39895 @@ -562,7 +562,7 @@ void sync_buffer(int cpu)
39896 /* ignore backtraces if failed to add a sample */
39897 if (state == sb_bt_start) {
39898 state = sb_bt_ignore;
39899 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39900 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39901 }
39902 }
39903 release_mm(mm);
39904 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39905 index 5df60a6..72f5c1c 100644
39906 --- a/drivers/oprofile/event_buffer.c
39907 +++ b/drivers/oprofile/event_buffer.c
39908 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39909 }
39910
39911 if (buffer_pos == buffer_size) {
39912 - atomic_inc(&oprofile_stats.event_lost_overflow);
39913 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39914 return;
39915 }
39916
39917 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39918 index dc8a042..fe5f315 100644
39919 --- a/drivers/oprofile/oprof.c
39920 +++ b/drivers/oprofile/oprof.c
39921 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39922 if (oprofile_ops.switch_events())
39923 return;
39924
39925 - atomic_inc(&oprofile_stats.multiplex_counter);
39926 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39927 start_switch_worker();
39928 }
39929
39930 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39931 index 61689e8..387f7f8 100644
39932 --- a/drivers/oprofile/oprofile_stats.c
39933 +++ b/drivers/oprofile/oprofile_stats.c
39934 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39935 cpu_buf->sample_invalid_eip = 0;
39936 }
39937
39938 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39939 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39940 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
39941 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39942 - atomic_set(&oprofile_stats.multiplex_counter, 0);
39943 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39944 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39945 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39946 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39947 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39948 }
39949
39950
39951 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39952 index 0b54e46..a37c527 100644
39953 --- a/drivers/oprofile/oprofile_stats.h
39954 +++ b/drivers/oprofile/oprofile_stats.h
39955 @@ -13,11 +13,11 @@
39956 #include <asm/atomic.h>
39957
39958 struct oprofile_stat_struct {
39959 - atomic_t sample_lost_no_mm;
39960 - atomic_t sample_lost_no_mapping;
39961 - atomic_t bt_lost_no_mapping;
39962 - atomic_t event_lost_overflow;
39963 - atomic_t multiplex_counter;
39964 + atomic_unchecked_t sample_lost_no_mm;
39965 + atomic_unchecked_t sample_lost_no_mapping;
39966 + atomic_unchecked_t bt_lost_no_mapping;
39967 + atomic_unchecked_t event_lost_overflow;
39968 + atomic_unchecked_t multiplex_counter;
39969 };
39970
39971 extern struct oprofile_stat_struct oprofile_stats;
39972 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
39973 index 2766a6d..80c77e2 100644
39974 --- a/drivers/oprofile/oprofilefs.c
39975 +++ b/drivers/oprofile/oprofilefs.c
39976 @@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
39977
39978
39979 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
39980 - char const *name, atomic_t *val)
39981 + char const *name, atomic_unchecked_t *val)
39982 {
39983 struct dentry *d = __oprofilefs_create_file(sb, root, name,
39984 &atomic_ro_fops, 0444);
39985 diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
39986 index 13a64bc..ad62835 100644
39987 --- a/drivers/parisc/pdc_stable.c
39988 +++ b/drivers/parisc/pdc_stable.c
39989 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
39990 return ret;
39991 }
39992
39993 -static struct sysfs_ops pdcspath_attr_ops = {
39994 +static const struct sysfs_ops pdcspath_attr_ops = {
39995 .show = pdcspath_attr_show,
39996 .store = pdcspath_attr_store,
39997 };
39998 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
39999 index 8eefe56..40751a7 100644
40000 --- a/drivers/parport/procfs.c
40001 +++ b/drivers/parport/procfs.c
40002 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40003
40004 *ppos += len;
40005
40006 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40007 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40008 }
40009
40010 #ifdef CONFIG_PARPORT_1284
40011 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40012
40013 *ppos += len;
40014
40015 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40016 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40017 }
40018 #endif /* IEEE1284.3 support. */
40019
40020 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
40021 index 73e7d8e..c80f3d2 100644
40022 --- a/drivers/pci/hotplug/acpiphp_glue.c
40023 +++ b/drivers/pci/hotplug/acpiphp_glue.c
40024 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
40025 }
40026
40027
40028 -static struct acpi_dock_ops acpiphp_dock_ops = {
40029 +static const struct acpi_dock_ops acpiphp_dock_ops = {
40030 .handler = handle_hotplug_event_func,
40031 };
40032
40033 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
40034 index 9fff878..ad0ad53 100644
40035 --- a/drivers/pci/hotplug/cpci_hotplug.h
40036 +++ b/drivers/pci/hotplug/cpci_hotplug.h
40037 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
40038 int (*hardware_test) (struct slot* slot, u32 value);
40039 u8 (*get_power) (struct slot* slot);
40040 int (*set_power) (struct slot* slot, int value);
40041 -};
40042 +} __no_const;
40043
40044 struct cpci_hp_controller {
40045 unsigned int irq;
40046 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40047 index 76ba8a1..20ca857 100644
40048 --- a/drivers/pci/hotplug/cpqphp_nvram.c
40049 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
40050 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40051
40052 void compaq_nvram_init (void __iomem *rom_start)
40053 {
40054 +
40055 +#ifndef CONFIG_PAX_KERNEXEC
40056 if (rom_start) {
40057 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40058 }
40059 +#endif
40060 +
40061 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40062
40063 /* initialize our int15 lock */
40064 diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
40065 index 6151389..0a894ef 100644
40066 --- a/drivers/pci/hotplug/fakephp.c
40067 +++ b/drivers/pci/hotplug/fakephp.c
40068 @@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
40069 }
40070
40071 static struct kobj_type legacy_ktype = {
40072 - .sysfs_ops = &(struct sysfs_ops){
40073 + .sysfs_ops = &(const struct sysfs_ops){
40074 .store = legacy_store, .show = legacy_show
40075 },
40076 .release = &legacy_release,
40077 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
40078 index 5b680df..fe05b7e 100644
40079 --- a/drivers/pci/intel-iommu.c
40080 +++ b/drivers/pci/intel-iommu.c
40081 @@ -2643,7 +2643,7 @@ error:
40082 return 0;
40083 }
40084
40085 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
40086 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
40087 unsigned long offset, size_t size,
40088 enum dma_data_direction dir,
40089 struct dma_attrs *attrs)
40090 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
40091 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
40092 }
40093
40094 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40095 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40096 size_t size, enum dma_data_direction dir,
40097 struct dma_attrs *attrs)
40098 {
40099 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40100 }
40101 }
40102
40103 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40104 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
40105 dma_addr_t *dma_handle, gfp_t flags)
40106 {
40107 void *vaddr;
40108 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40109 return NULL;
40110 }
40111
40112 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40113 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40114 dma_addr_t dma_handle)
40115 {
40116 int order;
40117 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40118 free_pages((unsigned long)vaddr, order);
40119 }
40120
40121 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40122 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40123 int nelems, enum dma_data_direction dir,
40124 struct dma_attrs *attrs)
40125 {
40126 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
40127 return nelems;
40128 }
40129
40130 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40131 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40132 enum dma_data_direction dir, struct dma_attrs *attrs)
40133 {
40134 int i;
40135 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40136 return nelems;
40137 }
40138
40139 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40140 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40141 {
40142 return !dma_addr;
40143 }
40144
40145 -struct dma_map_ops intel_dma_ops = {
40146 +const struct dma_map_ops intel_dma_ops = {
40147 .alloc_coherent = intel_alloc_coherent,
40148 .free_coherent = intel_free_coherent,
40149 .map_sg = intel_map_sg,
40150 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40151 index 5b7056c..607bc94 100644
40152 --- a/drivers/pci/pcie/aspm.c
40153 +++ b/drivers/pci/pcie/aspm.c
40154 @@ -27,9 +27,9 @@
40155 #define MODULE_PARAM_PREFIX "pcie_aspm."
40156
40157 /* Note: those are not register definitions */
40158 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40159 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40160 -#define ASPM_STATE_L1 (4) /* L1 state */
40161 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40162 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40163 +#define ASPM_STATE_L1 (4U) /* L1 state */
40164 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40165 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40166
40167 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40168 index 8105e32..ca10419 100644
40169 --- a/drivers/pci/probe.c
40170 +++ b/drivers/pci/probe.c
40171 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40172 return ret;
40173 }
40174
40175 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40176 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40177 struct device_attribute *attr,
40178 char *buf)
40179 {
40180 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40181 }
40182
40183 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40184 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40185 struct device_attribute *attr,
40186 char *buf)
40187 {
40188 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40189 index a03ad8c..024b0da 100644
40190 --- a/drivers/pci/proc.c
40191 +++ b/drivers/pci/proc.c
40192 @@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40193 static int __init pci_proc_init(void)
40194 {
40195 struct pci_dev *dev = NULL;
40196 +
40197 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40198 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40199 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40200 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40201 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40202 +#endif
40203 +#else
40204 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40205 +#endif
40206 proc_create("devices", 0, proc_bus_pci_dir,
40207 &proc_bus_pci_dev_operations);
40208 proc_initialized = 1;
40209 diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40210 index 8c02b6c..5584d8e 100644
40211 --- a/drivers/pci/slot.c
40212 +++ b/drivers/pci/slot.c
40213 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40214 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40215 }
40216
40217 -static struct sysfs_ops pci_slot_sysfs_ops = {
40218 +static const struct sysfs_ops pci_slot_sysfs_ops = {
40219 .show = pci_slot_attr_show,
40220 .store = pci_slot_attr_store,
40221 };
40222 diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40223 index 30cf71d2..50938f1 100644
40224 --- a/drivers/pcmcia/pcmcia_ioctl.c
40225 +++ b/drivers/pcmcia/pcmcia_ioctl.c
40226 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40227 return -EFAULT;
40228 }
40229 }
40230 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40231 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40232 if (!buf)
40233 return -ENOMEM;
40234
40235 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40236 index 52183c4..b224c69 100644
40237 --- a/drivers/platform/x86/acer-wmi.c
40238 +++ b/drivers/platform/x86/acer-wmi.c
40239 @@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40240 return 0;
40241 }
40242
40243 -static struct backlight_ops acer_bl_ops = {
40244 +static const struct backlight_ops acer_bl_ops = {
40245 .get_brightness = read_brightness,
40246 .update_status = update_bl_status,
40247 };
40248 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40249 index 767cb61..a87380b 100644
40250 --- a/drivers/platform/x86/asus-laptop.c
40251 +++ b/drivers/platform/x86/asus-laptop.c
40252 @@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40253 */
40254 static int read_brightness(struct backlight_device *bd);
40255 static int update_bl_status(struct backlight_device *bd);
40256 -static struct backlight_ops asusbl_ops = {
40257 +static const struct backlight_ops asusbl_ops = {
40258 .get_brightness = read_brightness,
40259 .update_status = update_bl_status,
40260 };
40261 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40262 index d66c07a..a4abaac 100644
40263 --- a/drivers/platform/x86/asus_acpi.c
40264 +++ b/drivers/platform/x86/asus_acpi.c
40265 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40266 return 0;
40267 }
40268
40269 -static struct backlight_ops asus_backlight_data = {
40270 +static const struct backlight_ops asus_backlight_data = {
40271 .get_brightness = read_brightness,
40272 .update_status = set_brightness_status,
40273 };
40274 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40275 index 11003bb..550ff1b 100644
40276 --- a/drivers/platform/x86/compal-laptop.c
40277 +++ b/drivers/platform/x86/compal-laptop.c
40278 @@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40279 return set_lcd_level(b->props.brightness);
40280 }
40281
40282 -static struct backlight_ops compalbl_ops = {
40283 +static const struct backlight_ops compalbl_ops = {
40284 .get_brightness = bl_get_brightness,
40285 .update_status = bl_update_status,
40286 };
40287 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40288 index 07a74da..9dc99fa 100644
40289 --- a/drivers/platform/x86/dell-laptop.c
40290 +++ b/drivers/platform/x86/dell-laptop.c
40291 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40292 return buffer.output[1];
40293 }
40294
40295 -static struct backlight_ops dell_ops = {
40296 +static const struct backlight_ops dell_ops = {
40297 .get_brightness = dell_get_intensity,
40298 .update_status = dell_send_intensity,
40299 };
40300 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40301 index c533b1c..5c81f22 100644
40302 --- a/drivers/platform/x86/eeepc-laptop.c
40303 +++ b/drivers/platform/x86/eeepc-laptop.c
40304 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40305 */
40306 static int read_brightness(struct backlight_device *bd);
40307 static int update_bl_status(struct backlight_device *bd);
40308 -static struct backlight_ops eeepcbl_ops = {
40309 +static const struct backlight_ops eeepcbl_ops = {
40310 .get_brightness = read_brightness,
40311 .update_status = update_bl_status,
40312 };
40313 diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40314 index bcd4ba8..a249b35 100644
40315 --- a/drivers/platform/x86/fujitsu-laptop.c
40316 +++ b/drivers/platform/x86/fujitsu-laptop.c
40317 @@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40318 return ret;
40319 }
40320
40321 -static struct backlight_ops fujitsubl_ops = {
40322 +static const struct backlight_ops fujitsubl_ops = {
40323 .get_brightness = bl_get_brightness,
40324 .update_status = bl_update_status,
40325 };
40326 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40327 index 759763d..1093ba2 100644
40328 --- a/drivers/platform/x86/msi-laptop.c
40329 +++ b/drivers/platform/x86/msi-laptop.c
40330 @@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40331 return set_lcd_level(b->props.brightness);
40332 }
40333
40334 -static struct backlight_ops msibl_ops = {
40335 +static const struct backlight_ops msibl_ops = {
40336 .get_brightness = bl_get_brightness,
40337 .update_status = bl_update_status,
40338 };
40339 diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40340 index fe7cf01..9012d8d 100644
40341 --- a/drivers/platform/x86/panasonic-laptop.c
40342 +++ b/drivers/platform/x86/panasonic-laptop.c
40343 @@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40344 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40345 }
40346
40347 -static struct backlight_ops pcc_backlight_ops = {
40348 +static const struct backlight_ops pcc_backlight_ops = {
40349 .get_brightness = bl_get,
40350 .update_status = bl_set_status,
40351 };
40352 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40353 index a2a742c..b37e25e 100644
40354 --- a/drivers/platform/x86/sony-laptop.c
40355 +++ b/drivers/platform/x86/sony-laptop.c
40356 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40357 }
40358
40359 static struct backlight_device *sony_backlight_device;
40360 -static struct backlight_ops sony_backlight_ops = {
40361 +static const struct backlight_ops sony_backlight_ops = {
40362 .update_status = sony_backlight_update_status,
40363 .get_brightness = sony_backlight_get_brightness,
40364 };
40365 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40366 index 68271ae..5e8fb10 100644
40367 --- a/drivers/platform/x86/thinkpad_acpi.c
40368 +++ b/drivers/platform/x86/thinkpad_acpi.c
40369 @@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40370 return 0;
40371 }
40372
40373 -void static hotkey_mask_warn_incomplete_mask(void)
40374 +static void hotkey_mask_warn_incomplete_mask(void)
40375 {
40376 /* log only what the user can fix... */
40377 const u32 wantedmask = hotkey_driver_mask &
40378 @@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40379 BACKLIGHT_UPDATE_HOTKEY);
40380 }
40381
40382 -static struct backlight_ops ibm_backlight_data = {
40383 +static const struct backlight_ops ibm_backlight_data = {
40384 .get_brightness = brightness_get,
40385 .update_status = brightness_update_status,
40386 };
40387 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40388 index 51c0a8b..0786629 100644
40389 --- a/drivers/platform/x86/toshiba_acpi.c
40390 +++ b/drivers/platform/x86/toshiba_acpi.c
40391 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40392 return AE_OK;
40393 }
40394
40395 -static struct backlight_ops toshiba_backlight_data = {
40396 +static const struct backlight_ops toshiba_backlight_data = {
40397 .get_brightness = get_lcd,
40398 .update_status = set_lcd_status,
40399 };
40400 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40401 index fc83783c..cf370d7 100644
40402 --- a/drivers/pnp/pnpbios/bioscalls.c
40403 +++ b/drivers/pnp/pnpbios/bioscalls.c
40404 @@ -60,7 +60,7 @@ do { \
40405 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40406 } while(0)
40407
40408 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40409 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40410 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40411
40412 /*
40413 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40414
40415 cpu = get_cpu();
40416 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40417 +
40418 + pax_open_kernel();
40419 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40420 + pax_close_kernel();
40421
40422 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40423 spin_lock_irqsave(&pnp_bios_lock, flags);
40424 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40425 :"memory");
40426 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40427
40428 + pax_open_kernel();
40429 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40430 + pax_close_kernel();
40431 +
40432 put_cpu();
40433
40434 /* If we get here and this is set then the PnP BIOS faulted on us. */
40435 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40436 return status;
40437 }
40438
40439 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
40440 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40441 {
40442 int i;
40443
40444 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40445 pnp_bios_callpoint.offset = header->fields.pm16offset;
40446 pnp_bios_callpoint.segment = PNP_CS16;
40447
40448 + pax_open_kernel();
40449 +
40450 for_each_possible_cpu(i) {
40451 struct desc_struct *gdt = get_cpu_gdt_table(i);
40452 if (!gdt)
40453 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40454 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40455 (unsigned long)__va(header->fields.pm16dseg));
40456 }
40457 +
40458 + pax_close_kernel();
40459 }
40460 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40461 index ba97654..66b99d4 100644
40462 --- a/drivers/pnp/resource.c
40463 +++ b/drivers/pnp/resource.c
40464 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40465 return 1;
40466
40467 /* check if the resource is valid */
40468 - if (*irq < 0 || *irq > 15)
40469 + if (*irq > 15)
40470 return 0;
40471
40472 /* check if the resource is reserved */
40473 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40474 return 1;
40475
40476 /* check if the resource is valid */
40477 - if (*dma < 0 || *dma == 4 || *dma > 7)
40478 + if (*dma == 4 || *dma > 7)
40479 return 0;
40480
40481 /* check if the resource is reserved */
40482 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40483 index 62bb981..24a2dc9 100644
40484 --- a/drivers/power/bq27x00_battery.c
40485 +++ b/drivers/power/bq27x00_battery.c
40486 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
40487 struct bq27x00_access_methods {
40488 int (*read)(u8 reg, int *rt_value, int b_single,
40489 struct bq27x00_device_info *di);
40490 -};
40491 +} __no_const;
40492
40493 struct bq27x00_device_info {
40494 struct device *dev;
40495 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40496 index 62227cd..b5b538b 100644
40497 --- a/drivers/rtc/rtc-dev.c
40498 +++ b/drivers/rtc/rtc-dev.c
40499 @@ -14,6 +14,7 @@
40500 #include <linux/module.h>
40501 #include <linux/rtc.h>
40502 #include <linux/sched.h>
40503 +#include <linux/grsecurity.h>
40504 #include "rtc-core.h"
40505
40506 static dev_t rtc_devt;
40507 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40508 if (copy_from_user(&tm, uarg, sizeof(tm)))
40509 return -EFAULT;
40510
40511 + gr_log_timechange();
40512 +
40513 return rtc_set_time(rtc, &tm);
40514
40515 case RTC_PIE_ON:
40516 diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40517 index 968e3c7..fbc637a 100644
40518 --- a/drivers/s390/cio/qdio_perf.c
40519 +++ b/drivers/s390/cio/qdio_perf.c
40520 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40521 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40522 {
40523 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40524 - (long)atomic_long_read(&perf_stats.qdio_int));
40525 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40526 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40527 - (long)atomic_long_read(&perf_stats.pci_int));
40528 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40529 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40530 - (long)atomic_long_read(&perf_stats.thin_int));
40531 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40532 seq_printf(m, "\n");
40533 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40534 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
40535 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40536 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40537 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
40538 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40539 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40540 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
40541 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40542 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40543 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40544 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40545 - (long)atomic_long_read(&perf_stats.thinint_inbound),
40546 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40547 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40548 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40549 seq_printf(m, "\n");
40550 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40551 - (long)atomic_long_read(&perf_stats.siga_in));
40552 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40553 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40554 - (long)atomic_long_read(&perf_stats.siga_out));
40555 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40556 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40557 - (long)atomic_long_read(&perf_stats.siga_sync));
40558 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40559 seq_printf(m, "\n");
40560 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40561 - (long)atomic_long_read(&perf_stats.inbound_handler));
40562 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40563 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40564 - (long)atomic_long_read(&perf_stats.outbound_handler));
40565 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40566 seq_printf(m, "\n");
40567 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40568 - (long)atomic_long_read(&perf_stats.fast_requeue));
40569 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40570 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40571 - (long)atomic_long_read(&perf_stats.outbound_target_full));
40572 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40573 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40574 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40575 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40576 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40577 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
40578 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40579 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40580 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40581 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40582 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40583 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40584 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40585 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40586 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40587 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40588 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40589 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40590 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40591 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40592 seq_printf(m, "\n");
40593 return 0;
40594 }
40595 diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40596 index ff4504c..b3604c3 100644
40597 --- a/drivers/s390/cio/qdio_perf.h
40598 +++ b/drivers/s390/cio/qdio_perf.h
40599 @@ -13,46 +13,46 @@
40600
40601 struct qdio_perf_stats {
40602 /* interrupt handler calls */
40603 - atomic_long_t qdio_int;
40604 - atomic_long_t pci_int;
40605 - atomic_long_t thin_int;
40606 + atomic_long_unchecked_t qdio_int;
40607 + atomic_long_unchecked_t pci_int;
40608 + atomic_long_unchecked_t thin_int;
40609
40610 /* tasklet runs */
40611 - atomic_long_t tasklet_inbound;
40612 - atomic_long_t tasklet_outbound;
40613 - atomic_long_t tasklet_thinint;
40614 - atomic_long_t tasklet_thinint_loop;
40615 - atomic_long_t thinint_inbound;
40616 - atomic_long_t thinint_inbound_loop;
40617 - atomic_long_t thinint_inbound_loop2;
40618 + atomic_long_unchecked_t tasklet_inbound;
40619 + atomic_long_unchecked_t tasklet_outbound;
40620 + atomic_long_unchecked_t tasklet_thinint;
40621 + atomic_long_unchecked_t tasklet_thinint_loop;
40622 + atomic_long_unchecked_t thinint_inbound;
40623 + atomic_long_unchecked_t thinint_inbound_loop;
40624 + atomic_long_unchecked_t thinint_inbound_loop2;
40625
40626 /* signal adapter calls */
40627 - atomic_long_t siga_out;
40628 - atomic_long_t siga_in;
40629 - atomic_long_t siga_sync;
40630 + atomic_long_unchecked_t siga_out;
40631 + atomic_long_unchecked_t siga_in;
40632 + atomic_long_unchecked_t siga_sync;
40633
40634 /* misc */
40635 - atomic_long_t inbound_handler;
40636 - atomic_long_t outbound_handler;
40637 - atomic_long_t fast_requeue;
40638 - atomic_long_t outbound_target_full;
40639 + atomic_long_unchecked_t inbound_handler;
40640 + atomic_long_unchecked_t outbound_handler;
40641 + atomic_long_unchecked_t fast_requeue;
40642 + atomic_long_unchecked_t outbound_target_full;
40643
40644 /* for debugging */
40645 - atomic_long_t debug_tl_out_timer;
40646 - atomic_long_t debug_stop_polling;
40647 - atomic_long_t debug_eqbs_all;
40648 - atomic_long_t debug_eqbs_incomplete;
40649 - atomic_long_t debug_sqbs_all;
40650 - atomic_long_t debug_sqbs_incomplete;
40651 + atomic_long_unchecked_t debug_tl_out_timer;
40652 + atomic_long_unchecked_t debug_stop_polling;
40653 + atomic_long_unchecked_t debug_eqbs_all;
40654 + atomic_long_unchecked_t debug_eqbs_incomplete;
40655 + atomic_long_unchecked_t debug_sqbs_all;
40656 + atomic_long_unchecked_t debug_sqbs_incomplete;
40657 };
40658
40659 extern struct qdio_perf_stats perf_stats;
40660 extern int qdio_performance_stats;
40661
40662 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
40663 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40664 {
40665 if (qdio_performance_stats)
40666 - atomic_long_inc(count);
40667 + atomic_long_inc_unchecked(count);
40668 }
40669
40670 int qdio_setup_perf_stats(void);
40671 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40672 index 1ddcf40..a85f062 100644
40673 --- a/drivers/scsi/BusLogic.c
40674 +++ b/drivers/scsi/BusLogic.c
40675 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40676 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40677 *PrototypeHostAdapter)
40678 {
40679 + pax_track_stack();
40680 +
40681 /*
40682 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40683 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40684 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40685 index cdbdec9..b7d560b 100644
40686 --- a/drivers/scsi/aacraid/aacraid.h
40687 +++ b/drivers/scsi/aacraid/aacraid.h
40688 @@ -471,7 +471,7 @@ struct adapter_ops
40689 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40690 /* Administrative operations */
40691 int (*adapter_comm)(struct aac_dev * dev, int comm);
40692 -};
40693 +} __no_const;
40694
40695 /*
40696 * Define which interrupt handler needs to be installed
40697 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40698 index a5b8e7b..a6a0e43 100644
40699 --- a/drivers/scsi/aacraid/commctrl.c
40700 +++ b/drivers/scsi/aacraid/commctrl.c
40701 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40702 u32 actual_fibsize64, actual_fibsize = 0;
40703 int i;
40704
40705 + pax_track_stack();
40706
40707 if (dev->in_reset) {
40708 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40709 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40710 index 9b97c3e..f099725 100644
40711 --- a/drivers/scsi/aacraid/linit.c
40712 +++ b/drivers/scsi/aacraid/linit.c
40713 @@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40714 #elif defined(__devinitconst)
40715 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40716 #else
40717 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40718 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40719 #endif
40720 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40721 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40722 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40723 index 996f722..9127845 100644
40724 --- a/drivers/scsi/aic94xx/aic94xx_init.c
40725 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
40726 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40727 flash_error_table[i].reason);
40728 }
40729
40730 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40731 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40732 asd_show_update_bios, asd_store_update_bios);
40733
40734 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40735 @@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40736 .lldd_control_phy = asd_control_phy,
40737 };
40738
40739 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40740 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40741 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40742 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40743 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40744 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40745 index 58efd4b..cb48dc7 100644
40746 --- a/drivers/scsi/bfa/bfa_ioc.h
40747 +++ b/drivers/scsi/bfa/bfa_ioc.h
40748 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40749 bfa_ioc_disable_cbfn_t disable_cbfn;
40750 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40751 bfa_ioc_reset_cbfn_t reset_cbfn;
40752 -};
40753 +} __no_const;
40754
40755 /**
40756 * Heartbeat failure notification queue element.
40757 diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40758 index 7ad177e..5503586 100644
40759 --- a/drivers/scsi/bfa/bfa_iocfc.h
40760 +++ b/drivers/scsi/bfa/bfa_iocfc.h
40761 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
40762 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40763 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40764 u32 *nvecs, u32 *maxvec);
40765 -};
40766 +} __no_const;
40767 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40768
40769 struct bfa_iocfc_s {
40770 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40771 index 4967643..cbec06b 100644
40772 --- a/drivers/scsi/dpt_i2o.c
40773 +++ b/drivers/scsi/dpt_i2o.c
40774 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40775 dma_addr_t addr;
40776 ulong flags = 0;
40777
40778 + pax_track_stack();
40779 +
40780 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40781 // get user msg size in u32s
40782 if(get_user(size, &user_msg[0])){
40783 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40784 s32 rcode;
40785 dma_addr_t addr;
40786
40787 + pax_track_stack();
40788 +
40789 memset(msg, 0 , sizeof(msg));
40790 len = scsi_bufflen(cmd);
40791 direction = 0x00000000;
40792 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40793 index c7076ce..e20c67c 100644
40794 --- a/drivers/scsi/eata.c
40795 +++ b/drivers/scsi/eata.c
40796 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40797 struct hostdata *ha;
40798 char name[16];
40799
40800 + pax_track_stack();
40801 +
40802 sprintf(name, "%s%d", driver_name, j);
40803
40804 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40805 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40806 index 11ae5c9..891daec 100644
40807 --- a/drivers/scsi/fcoe/libfcoe.c
40808 +++ b/drivers/scsi/fcoe/libfcoe.c
40809 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40810 size_t rlen;
40811 size_t dlen;
40812
40813 + pax_track_stack();
40814 +
40815 fiph = (struct fip_header *)skb->data;
40816 sub = fiph->fip_subcode;
40817 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40818 diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40819 index 71c7bbe..e93088a 100644
40820 --- a/drivers/scsi/fnic/fnic_main.c
40821 +++ b/drivers/scsi/fnic/fnic_main.c
40822 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40823 /* Start local port initiatialization */
40824
40825 lp->link_up = 0;
40826 - lp->tt = fnic_transport_template;
40827 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40828
40829 lp->max_retry_count = fnic->config.flogi_retries;
40830 lp->max_rport_retry_count = fnic->config.plogi_retries;
40831 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40832 index bb96d74..9ec3ce4 100644
40833 --- a/drivers/scsi/gdth.c
40834 +++ b/drivers/scsi/gdth.c
40835 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40836 ulong flags;
40837 gdth_ha_str *ha;
40838
40839 + pax_track_stack();
40840 +
40841 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40842 return -EFAULT;
40843 ha = gdth_find_ha(ldrv.ionode);
40844 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40845 gdth_ha_str *ha;
40846 int rval;
40847
40848 + pax_track_stack();
40849 +
40850 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40851 res.number >= MAX_HDRIVES)
40852 return -EFAULT;
40853 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40854 gdth_ha_str *ha;
40855 int rval;
40856
40857 + pax_track_stack();
40858 +
40859 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40860 return -EFAULT;
40861 ha = gdth_find_ha(gen.ionode);
40862 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40863 int i;
40864 gdth_cmd_str gdtcmd;
40865 char cmnd[MAX_COMMAND_SIZE];
40866 +
40867 + pax_track_stack();
40868 +
40869 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40870
40871 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40872 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40873 index 1258da3..20d8ae6 100644
40874 --- a/drivers/scsi/gdth_proc.c
40875 +++ b/drivers/scsi/gdth_proc.c
40876 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
40877 ulong64 paddr;
40878
40879 char cmnd[MAX_COMMAND_SIZE];
40880 +
40881 + pax_track_stack();
40882 +
40883 memset(cmnd, 0xff, 12);
40884 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
40885
40886 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
40887 gdth_hget_str *phg;
40888 char cmnd[MAX_COMMAND_SIZE];
40889
40890 + pax_track_stack();
40891 +
40892 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
40893 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
40894 if (!gdtcmd || !estr)
40895 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40896 index d03a926..f324286 100644
40897 --- a/drivers/scsi/hosts.c
40898 +++ b/drivers/scsi/hosts.c
40899 @@ -40,7 +40,7 @@
40900 #include "scsi_logging.h"
40901
40902
40903 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
40904 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
40905
40906
40907 static void scsi_host_cls_release(struct device *dev)
40908 @@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40909 * subtract one because we increment first then return, but we need to
40910 * know what the next host number was before increment
40911 */
40912 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40913 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40914 shost->dma_channel = 0xff;
40915
40916 /* These three are default values which can be overridden */
40917 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
40918 index a601159..55e19d2 100644
40919 --- a/drivers/scsi/ipr.c
40920 +++ b/drivers/scsi/ipr.c
40921 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
40922 return true;
40923 }
40924
40925 -static struct ata_port_operations ipr_sata_ops = {
40926 +static const struct ata_port_operations ipr_sata_ops = {
40927 .phy_reset = ipr_ata_phy_reset,
40928 .hardreset = ipr_sata_reset,
40929 .post_internal_cmd = ipr_ata_post_internal,
40930 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
40931 index 4e49fbc..97907ff 100644
40932 --- a/drivers/scsi/ips.h
40933 +++ b/drivers/scsi/ips.h
40934 @@ -1027,7 +1027,7 @@ typedef struct {
40935 int (*intr)(struct ips_ha *);
40936 void (*enableint)(struct ips_ha *);
40937 uint32_t (*statupd)(struct ips_ha *);
40938 -} ips_hw_func_t;
40939 +} __no_const ips_hw_func_t;
40940
40941 typedef struct ips_ha {
40942 uint8_t ha_id[IPS_MAX_CHANNELS+1];
40943 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40944 index c1c1574..a9c9348 100644
40945 --- a/drivers/scsi/libfc/fc_exch.c
40946 +++ b/drivers/scsi/libfc/fc_exch.c
40947 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
40948 * all together if not used XXX
40949 */
40950 struct {
40951 - atomic_t no_free_exch;
40952 - atomic_t no_free_exch_xid;
40953 - atomic_t xid_not_found;
40954 - atomic_t xid_busy;
40955 - atomic_t seq_not_found;
40956 - atomic_t non_bls_resp;
40957 + atomic_unchecked_t no_free_exch;
40958 + atomic_unchecked_t no_free_exch_xid;
40959 + atomic_unchecked_t xid_not_found;
40960 + atomic_unchecked_t xid_busy;
40961 + atomic_unchecked_t seq_not_found;
40962 + atomic_unchecked_t non_bls_resp;
40963 } stats;
40964 };
40965 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
40966 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40967 /* allocate memory for exchange */
40968 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40969 if (!ep) {
40970 - atomic_inc(&mp->stats.no_free_exch);
40971 + atomic_inc_unchecked(&mp->stats.no_free_exch);
40972 goto out;
40973 }
40974 memset(ep, 0, sizeof(*ep));
40975 @@ -557,7 +557,7 @@ out:
40976 return ep;
40977 err:
40978 spin_unlock_bh(&pool->lock);
40979 - atomic_inc(&mp->stats.no_free_exch_xid);
40980 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40981 mempool_free(ep, mp->ep_pool);
40982 return NULL;
40983 }
40984 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40985 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40986 ep = fc_exch_find(mp, xid);
40987 if (!ep) {
40988 - atomic_inc(&mp->stats.xid_not_found);
40989 + atomic_inc_unchecked(&mp->stats.xid_not_found);
40990 reject = FC_RJT_OX_ID;
40991 goto out;
40992 }
40993 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40994 ep = fc_exch_find(mp, xid);
40995 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40996 if (ep) {
40997 - atomic_inc(&mp->stats.xid_busy);
40998 + atomic_inc_unchecked(&mp->stats.xid_busy);
40999 reject = FC_RJT_RX_ID;
41000 goto rel;
41001 }
41002 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41003 }
41004 xid = ep->xid; /* get our XID */
41005 } else if (!ep) {
41006 - atomic_inc(&mp->stats.xid_not_found);
41007 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41008 reject = FC_RJT_RX_ID; /* XID not found */
41009 goto out;
41010 }
41011 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41012 } else {
41013 sp = &ep->seq;
41014 if (sp->id != fh->fh_seq_id) {
41015 - atomic_inc(&mp->stats.seq_not_found);
41016 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41017 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
41018 goto rel;
41019 }
41020 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41021
41022 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41023 if (!ep) {
41024 - atomic_inc(&mp->stats.xid_not_found);
41025 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41026 goto out;
41027 }
41028 if (ep->esb_stat & ESB_ST_COMPLETE) {
41029 - atomic_inc(&mp->stats.xid_not_found);
41030 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41031 goto out;
41032 }
41033 if (ep->rxid == FC_XID_UNKNOWN)
41034 ep->rxid = ntohs(fh->fh_rx_id);
41035 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41036 - atomic_inc(&mp->stats.xid_not_found);
41037 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41038 goto rel;
41039 }
41040 if (ep->did != ntoh24(fh->fh_s_id) &&
41041 ep->did != FC_FID_FLOGI) {
41042 - atomic_inc(&mp->stats.xid_not_found);
41043 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41044 goto rel;
41045 }
41046 sof = fr_sof(fp);
41047 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41048 } else {
41049 sp = &ep->seq;
41050 if (sp->id != fh->fh_seq_id) {
41051 - atomic_inc(&mp->stats.seq_not_found);
41052 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41053 goto rel;
41054 }
41055 }
41056 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41057 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41058
41059 if (!sp)
41060 - atomic_inc(&mp->stats.xid_not_found);
41061 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41062 else
41063 - atomic_inc(&mp->stats.non_bls_resp);
41064 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
41065
41066 fc_frame_free(fp);
41067 }
41068 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41069 index 0ee989f..a582241 100644
41070 --- a/drivers/scsi/libsas/sas_ata.c
41071 +++ b/drivers/scsi/libsas/sas_ata.c
41072 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
41073 }
41074 }
41075
41076 -static struct ata_port_operations sas_sata_ops = {
41077 +static const struct ata_port_operations sas_sata_ops = {
41078 .phy_reset = sas_ata_phy_reset,
41079 .post_internal_cmd = sas_ata_post_internal,
41080 .qc_defer = ata_std_qc_defer,
41081 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41082 index aa10f79..5cc79e4 100644
41083 --- a/drivers/scsi/lpfc/lpfc.h
41084 +++ b/drivers/scsi/lpfc/lpfc.h
41085 @@ -400,7 +400,7 @@ struct lpfc_vport {
41086 struct dentry *debug_nodelist;
41087 struct dentry *vport_debugfs_root;
41088 struct lpfc_debugfs_trc *disc_trc;
41089 - atomic_t disc_trc_cnt;
41090 + atomic_unchecked_t disc_trc_cnt;
41091 #endif
41092 uint8_t stat_data_enabled;
41093 uint8_t stat_data_blocked;
41094 @@ -725,8 +725,8 @@ struct lpfc_hba {
41095 struct timer_list fabric_block_timer;
41096 unsigned long bit_flags;
41097 #define FABRIC_COMANDS_BLOCKED 0
41098 - atomic_t num_rsrc_err;
41099 - atomic_t num_cmd_success;
41100 + atomic_unchecked_t num_rsrc_err;
41101 + atomic_unchecked_t num_cmd_success;
41102 unsigned long last_rsrc_error_time;
41103 unsigned long last_ramp_down_time;
41104 unsigned long last_ramp_up_time;
41105 @@ -740,7 +740,7 @@ struct lpfc_hba {
41106 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
41107 struct dentry *debug_slow_ring_trc;
41108 struct lpfc_debugfs_trc *slow_ring_trc;
41109 - atomic_t slow_ring_trc_cnt;
41110 + atomic_unchecked_t slow_ring_trc_cnt;
41111 #endif
41112
41113 /* Used for deferred freeing of ELS data buffers */
41114 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41115 index 8d0f0de..7c77a62 100644
41116 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
41117 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41118 @@ -124,7 +124,7 @@ struct lpfc_debug {
41119 int len;
41120 };
41121
41122 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41123 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41124 static unsigned long lpfc_debugfs_start_time = 0L;
41125
41126 /**
41127 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41128 lpfc_debugfs_enable = 0;
41129
41130 len = 0;
41131 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41132 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41133 (lpfc_debugfs_max_disc_trc - 1);
41134 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41135 dtp = vport->disc_trc + i;
41136 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41137 lpfc_debugfs_enable = 0;
41138
41139 len = 0;
41140 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41141 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41142 (lpfc_debugfs_max_slow_ring_trc - 1);
41143 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41144 dtp = phba->slow_ring_trc + i;
41145 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41146 uint32_t *ptr;
41147 char buffer[1024];
41148
41149 + pax_track_stack();
41150 +
41151 off = 0;
41152 spin_lock_irq(&phba->hbalock);
41153
41154 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41155 !vport || !vport->disc_trc)
41156 return;
41157
41158 - index = atomic_inc_return(&vport->disc_trc_cnt) &
41159 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41160 (lpfc_debugfs_max_disc_trc - 1);
41161 dtp = vport->disc_trc + index;
41162 dtp->fmt = fmt;
41163 dtp->data1 = data1;
41164 dtp->data2 = data2;
41165 dtp->data3 = data3;
41166 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41167 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41168 dtp->jif = jiffies;
41169 #endif
41170 return;
41171 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41172 !phba || !phba->slow_ring_trc)
41173 return;
41174
41175 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41176 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41177 (lpfc_debugfs_max_slow_ring_trc - 1);
41178 dtp = phba->slow_ring_trc + index;
41179 dtp->fmt = fmt;
41180 dtp->data1 = data1;
41181 dtp->data2 = data2;
41182 dtp->data3 = data3;
41183 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41184 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41185 dtp->jif = jiffies;
41186 #endif
41187 return;
41188 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41189 "slow_ring buffer\n");
41190 goto debug_failed;
41191 }
41192 - atomic_set(&phba->slow_ring_trc_cnt, 0);
41193 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41194 memset(phba->slow_ring_trc, 0,
41195 (sizeof(struct lpfc_debugfs_trc) *
41196 lpfc_debugfs_max_slow_ring_trc));
41197 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41198 "buffer\n");
41199 goto debug_failed;
41200 }
41201 - atomic_set(&vport->disc_trc_cnt, 0);
41202 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41203
41204 snprintf(name, sizeof(name), "discovery_trace");
41205 vport->debug_disc_trc =
41206 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41207 index 549bc7d..8189dbb 100644
41208 --- a/drivers/scsi/lpfc/lpfc_init.c
41209 +++ b/drivers/scsi/lpfc/lpfc_init.c
41210 @@ -8021,8 +8021,10 @@ lpfc_init(void)
41211 printk(LPFC_COPYRIGHT "\n");
41212
41213 if (lpfc_enable_npiv) {
41214 - lpfc_transport_functions.vport_create = lpfc_vport_create;
41215 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41216 + pax_open_kernel();
41217 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41218 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41219 + pax_close_kernel();
41220 }
41221 lpfc_transport_template =
41222 fc_attach_transport(&lpfc_transport_functions);
41223 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41224 index c88f59f..ff2a42f 100644
41225 --- a/drivers/scsi/lpfc/lpfc_scsi.c
41226 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
41227 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41228 uint32_t evt_posted;
41229
41230 spin_lock_irqsave(&phba->hbalock, flags);
41231 - atomic_inc(&phba->num_rsrc_err);
41232 + atomic_inc_unchecked(&phba->num_rsrc_err);
41233 phba->last_rsrc_error_time = jiffies;
41234
41235 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41236 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41237 unsigned long flags;
41238 struct lpfc_hba *phba = vport->phba;
41239 uint32_t evt_posted;
41240 - atomic_inc(&phba->num_cmd_success);
41241 + atomic_inc_unchecked(&phba->num_cmd_success);
41242
41243 if (vport->cfg_lun_queue_depth <= queue_depth)
41244 return;
41245 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41246 int i;
41247 struct lpfc_rport_data *rdata;
41248
41249 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41250 - num_cmd_success = atomic_read(&phba->num_cmd_success);
41251 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41252 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41253
41254 vports = lpfc_create_vport_work_array(phba);
41255 if (vports != NULL)
41256 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41257 }
41258 }
41259 lpfc_destroy_vport_work_array(phba, vports);
41260 - atomic_set(&phba->num_rsrc_err, 0);
41261 - atomic_set(&phba->num_cmd_success, 0);
41262 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41263 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41264 }
41265
41266 /**
41267 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41268 }
41269 }
41270 lpfc_destroy_vport_work_array(phba, vports);
41271 - atomic_set(&phba->num_rsrc_err, 0);
41272 - atomic_set(&phba->num_cmd_success, 0);
41273 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41274 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41275 }
41276
41277 /**
41278 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41279 index 234f0b7..3020aea 100644
41280 --- a/drivers/scsi/megaraid/megaraid_mbox.c
41281 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
41282 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41283 int rval;
41284 int i;
41285
41286 + pax_track_stack();
41287 +
41288 // Allocate memory for the base list of scb for management module.
41289 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41290
41291 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41292 index 7a117c1..ee01e9e 100644
41293 --- a/drivers/scsi/osd/osd_initiator.c
41294 +++ b/drivers/scsi/osd/osd_initiator.c
41295 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41296 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41297 int ret;
41298
41299 + pax_track_stack();
41300 +
41301 or = osd_start_request(od, GFP_KERNEL);
41302 if (!or)
41303 return -ENOMEM;
41304 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41305 index 9ab8c86..9425ad3 100644
41306 --- a/drivers/scsi/pmcraid.c
41307 +++ b/drivers/scsi/pmcraid.c
41308 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41309 res->scsi_dev = scsi_dev;
41310 scsi_dev->hostdata = res;
41311 res->change_detected = 0;
41312 - atomic_set(&res->read_failures, 0);
41313 - atomic_set(&res->write_failures, 0);
41314 + atomic_set_unchecked(&res->read_failures, 0);
41315 + atomic_set_unchecked(&res->write_failures, 0);
41316 rc = 0;
41317 }
41318 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41319 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41320
41321 /* If this was a SCSI read/write command keep count of errors */
41322 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41323 - atomic_inc(&res->read_failures);
41324 + atomic_inc_unchecked(&res->read_failures);
41325 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41326 - atomic_inc(&res->write_failures);
41327 + atomic_inc_unchecked(&res->write_failures);
41328
41329 if (!RES_IS_GSCSI(res->cfg_entry) &&
41330 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41331 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41332
41333 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41334 /* add resources only after host is added into system */
41335 - if (!atomic_read(&pinstance->expose_resources))
41336 + if (!atomic_read_unchecked(&pinstance->expose_resources))
41337 return;
41338
41339 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41340 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41341 init_waitqueue_head(&pinstance->reset_wait_q);
41342
41343 atomic_set(&pinstance->outstanding_cmds, 0);
41344 - atomic_set(&pinstance->expose_resources, 0);
41345 + atomic_set_unchecked(&pinstance->expose_resources, 0);
41346
41347 INIT_LIST_HEAD(&pinstance->free_res_q);
41348 INIT_LIST_HEAD(&pinstance->used_res_q);
41349 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41350 /* Schedule worker thread to handle CCN and take care of adding and
41351 * removing devices to OS
41352 */
41353 - atomic_set(&pinstance->expose_resources, 1);
41354 + atomic_set_unchecked(&pinstance->expose_resources, 1);
41355 schedule_work(&pinstance->worker_q);
41356 return rc;
41357
41358 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41359 index 3441b3f..6cbe8f7 100644
41360 --- a/drivers/scsi/pmcraid.h
41361 +++ b/drivers/scsi/pmcraid.h
41362 @@ -690,7 +690,7 @@ struct pmcraid_instance {
41363 atomic_t outstanding_cmds;
41364
41365 /* should add/delete resources to mid-layer now ?*/
41366 - atomic_t expose_resources;
41367 + atomic_unchecked_t expose_resources;
41368
41369 /* Tasklet to handle deferred processing */
41370 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41371 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41372 struct list_head queue; /* link to "to be exposed" resources */
41373 struct pmcraid_config_table_entry cfg_entry;
41374 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41375 - atomic_t read_failures; /* count of failed READ commands */
41376 - atomic_t write_failures; /* count of failed WRITE commands */
41377 + atomic_unchecked_t read_failures; /* count of failed READ commands */
41378 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41379
41380 /* To indicate add/delete/modify during CCN */
41381 u8 change_detected;
41382 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41383 index 2150618..7034215 100644
41384 --- a/drivers/scsi/qla2xxx/qla_def.h
41385 +++ b/drivers/scsi/qla2xxx/qla_def.h
41386 @@ -2089,7 +2089,7 @@ struct isp_operations {
41387
41388 int (*get_flash_version) (struct scsi_qla_host *, void *);
41389 int (*start_scsi) (srb_t *);
41390 -};
41391 +} __no_const;
41392
41393 /* MSI-X Support *************************************************************/
41394
41395 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41396 index 81b5f29..2ae1fad 100644
41397 --- a/drivers/scsi/qla4xxx/ql4_def.h
41398 +++ b/drivers/scsi/qla4xxx/ql4_def.h
41399 @@ -240,7 +240,7 @@ struct ddb_entry {
41400 atomic_t retry_relogin_timer; /* Min Time between relogins
41401 * (4000 only) */
41402 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41403 - atomic_t relogin_retry_count; /* Num of times relogin has been
41404 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41405 * retried */
41406
41407 uint16_t port;
41408 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41409 index af8c323..515dd51 100644
41410 --- a/drivers/scsi/qla4xxx/ql4_init.c
41411 +++ b/drivers/scsi/qla4xxx/ql4_init.c
41412 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41413 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41414 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41415 atomic_set(&ddb_entry->relogin_timer, 0);
41416 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41417 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41418 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41419 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41420 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41421 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41422 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41423 atomic_set(&ddb_entry->port_down_timer,
41424 ha->port_down_retry_count);
41425 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41426 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41427 atomic_set(&ddb_entry->relogin_timer, 0);
41428 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41429 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41430 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41431 index 83c8b5e..a82b348 100644
41432 --- a/drivers/scsi/qla4xxx/ql4_os.c
41433 +++ b/drivers/scsi/qla4xxx/ql4_os.c
41434 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41435 ddb_entry->fw_ddb_device_state ==
41436 DDB_DS_SESSION_FAILED) {
41437 /* Reset retry relogin timer */
41438 - atomic_inc(&ddb_entry->relogin_retry_count);
41439 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41440 DEBUG2(printk("scsi%ld: index[%d] relogin"
41441 " timed out-retrying"
41442 " relogin (%d)\n",
41443 ha->host_no,
41444 ddb_entry->fw_ddb_index,
41445 - atomic_read(&ddb_entry->
41446 + atomic_read_unchecked(&ddb_entry->
41447 relogin_retry_count))
41448 );
41449 start_dpc++;
41450 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41451 index dd098ca..686ce01 100644
41452 --- a/drivers/scsi/scsi.c
41453 +++ b/drivers/scsi/scsi.c
41454 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41455 unsigned long timeout;
41456 int rtn = 0;
41457
41458 - atomic_inc(&cmd->device->iorequest_cnt);
41459 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41460
41461 /* check if the device is still usable */
41462 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41463 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41464 index bc3e363..e1a8e50 100644
41465 --- a/drivers/scsi/scsi_debug.c
41466 +++ b/drivers/scsi/scsi_debug.c
41467 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41468 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41469 unsigned char *cmd = (unsigned char *)scp->cmnd;
41470
41471 + pax_track_stack();
41472 +
41473 if ((errsts = check_readiness(scp, 1, devip)))
41474 return errsts;
41475 memset(arr, 0, sizeof(arr));
41476 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41477 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41478 unsigned char *cmd = (unsigned char *)scp->cmnd;
41479
41480 + pax_track_stack();
41481 +
41482 if ((errsts = check_readiness(scp, 1, devip)))
41483 return errsts;
41484 memset(arr, 0, sizeof(arr));
41485 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41486 index 8df12522..c4c1472 100644
41487 --- a/drivers/scsi/scsi_lib.c
41488 +++ b/drivers/scsi/scsi_lib.c
41489 @@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41490 shost = sdev->host;
41491 scsi_init_cmd_errh(cmd);
41492 cmd->result = DID_NO_CONNECT << 16;
41493 - atomic_inc(&cmd->device->iorequest_cnt);
41494 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41495
41496 /*
41497 * SCSI request completion path will do scsi_device_unbusy(),
41498 @@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41499 */
41500 cmd->serial_number = 0;
41501
41502 - atomic_inc(&cmd->device->iodone_cnt);
41503 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
41504 if (cmd->result)
41505 - atomic_inc(&cmd->device->ioerr_cnt);
41506 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41507
41508 disposition = scsi_decide_disposition(cmd);
41509 if (disposition != SUCCESS &&
41510 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41511 index 91a93e0..eae0fe3 100644
41512 --- a/drivers/scsi/scsi_sysfs.c
41513 +++ b/drivers/scsi/scsi_sysfs.c
41514 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41515 char *buf) \
41516 { \
41517 struct scsi_device *sdev = to_scsi_device(dev); \
41518 - unsigned long long count = atomic_read(&sdev->field); \
41519 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
41520 return snprintf(buf, 20, "0x%llx\n", count); \
41521 } \
41522 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41523 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41524 index 1030327..f91fd30 100644
41525 --- a/drivers/scsi/scsi_tgt_lib.c
41526 +++ b/drivers/scsi/scsi_tgt_lib.c
41527 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41528 int err;
41529
41530 dprintk("%lx %u\n", uaddr, len);
41531 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41532 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41533 if (err) {
41534 /*
41535 * TODO: need to fixup sg_tablesize, max_segment_size,
41536 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41537 index db02e31..1b42ea9 100644
41538 --- a/drivers/scsi/scsi_transport_fc.c
41539 +++ b/drivers/scsi/scsi_transport_fc.c
41540 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41541 * Netlink Infrastructure
41542 */
41543
41544 -static atomic_t fc_event_seq;
41545 +static atomic_unchecked_t fc_event_seq;
41546
41547 /**
41548 * fc_get_event_number - Obtain the next sequential FC event number
41549 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41550 u32
41551 fc_get_event_number(void)
41552 {
41553 - return atomic_add_return(1, &fc_event_seq);
41554 + return atomic_add_return_unchecked(1, &fc_event_seq);
41555 }
41556 EXPORT_SYMBOL(fc_get_event_number);
41557
41558 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41559 {
41560 int error;
41561
41562 - atomic_set(&fc_event_seq, 0);
41563 + atomic_set_unchecked(&fc_event_seq, 0);
41564
41565 error = transport_class_register(&fc_host_class);
41566 if (error)
41567 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41568 index de2f8c4..63c5278 100644
41569 --- a/drivers/scsi/scsi_transport_iscsi.c
41570 +++ b/drivers/scsi/scsi_transport_iscsi.c
41571 @@ -81,7 +81,7 @@ struct iscsi_internal {
41572 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41573 };
41574
41575 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41576 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41577 static struct workqueue_struct *iscsi_eh_timer_workq;
41578
41579 /*
41580 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41581 int err;
41582
41583 ihost = shost->shost_data;
41584 - session->sid = atomic_add_return(1, &iscsi_session_nr);
41585 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41586
41587 if (id == ISCSI_MAX_TARGET) {
41588 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41589 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41590 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41591 ISCSI_TRANSPORT_VERSION);
41592
41593 - atomic_set(&iscsi_session_nr, 0);
41594 + atomic_set_unchecked(&iscsi_session_nr, 0);
41595
41596 err = class_register(&iscsi_transport_class);
41597 if (err)
41598 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41599 index 21a045e..ec89e03 100644
41600 --- a/drivers/scsi/scsi_transport_srp.c
41601 +++ b/drivers/scsi/scsi_transport_srp.c
41602 @@ -33,7 +33,7 @@
41603 #include "scsi_transport_srp_internal.h"
41604
41605 struct srp_host_attrs {
41606 - atomic_t next_port_id;
41607 + atomic_unchecked_t next_port_id;
41608 };
41609 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41610
41611 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41612 struct Scsi_Host *shost = dev_to_shost(dev);
41613 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41614
41615 - atomic_set(&srp_host->next_port_id, 0);
41616 + atomic_set_unchecked(&srp_host->next_port_id, 0);
41617 return 0;
41618 }
41619
41620 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41621 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41622 rport->roles = ids->roles;
41623
41624 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41625 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41626 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41627
41628 transport_setup_device(&rport->dev);
41629 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
41630 index 2dd1b73..fd8145f 100644
41631 --- a/drivers/scsi/sd.c
41632 +++ b/drivers/scsi/sd.c
41633 @@ -817,6 +817,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
41634 SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n",
41635 disk->disk_name, cmd));
41636
41637 + error = scsi_verify_blk_ioctl(bdev, cmd);
41638 + if (error < 0)
41639 + return error;
41640 +
41641 /*
41642 * If we are in the middle of error recovery, don't let anyone
41643 * else try and use this device. Also, if error recovery fails, it
41644 @@ -996,6 +1000,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
41645 unsigned int cmd, unsigned long arg)
41646 {
41647 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
41648 + int ret;
41649 +
41650 + ret = scsi_verify_blk_ioctl(bdev, cmd);
41651 + if (ret < 0)
41652 + return ret;
41653
41654 /*
41655 * If we are in the middle of error recovery, don't let anyone
41656 @@ -1007,8 +1016,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
41657 return -ENODEV;
41658
41659 if (sdev->host->hostt->compat_ioctl) {
41660 - int ret;
41661 -
41662 ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
41663
41664 return ret;
41665 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41666 index 040f751..98a5ed2 100644
41667 --- a/drivers/scsi/sg.c
41668 +++ b/drivers/scsi/sg.c
41669 @@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41670 sdp->disk->disk_name,
41671 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41672 NULL,
41673 - (char *)arg);
41674 + (char __user *)arg);
41675 case BLKTRACESTART:
41676 return blk_trace_startstop(sdp->device->request_queue, 1);
41677 case BLKTRACESTOP:
41678 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41679 const struct file_operations * fops;
41680 };
41681
41682 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41683 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41684 {"allow_dio", &adio_fops},
41685 {"debug", &debug_fops},
41686 {"def_reserved_size", &dressz_fops},
41687 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
41688 {
41689 int k, mask;
41690 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41691 - struct sg_proc_leaf * leaf;
41692 + const struct sg_proc_leaf * leaf;
41693
41694 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41695 if (!sg_proc_sgp)
41696 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41697 index c19ca5e..3eb5959 100644
41698 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41699 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41700 @@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41701 int do_iounmap = 0;
41702 int do_disable_device = 1;
41703
41704 + pax_track_stack();
41705 +
41706 memset(&sym_dev, 0, sizeof(sym_dev));
41707 memset(&nvram, 0, sizeof(nvram));
41708 sym_dev.pdev = pdev;
41709 diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41710 index eadc1ab..2d81457 100644
41711 --- a/drivers/serial/kgdboc.c
41712 +++ b/drivers/serial/kgdboc.c
41713 @@ -18,7 +18,7 @@
41714
41715 #define MAX_CONFIG_LEN 40
41716
41717 -static struct kgdb_io kgdboc_io_ops;
41718 +static const struct kgdb_io kgdboc_io_ops;
41719
41720 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41721 static int configured = -1;
41722 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41723 module_put(THIS_MODULE);
41724 }
41725
41726 -static struct kgdb_io kgdboc_io_ops = {
41727 +static const struct kgdb_io kgdboc_io_ops = {
41728 .name = "kgdboc",
41729 .read_char = kgdboc_get_char,
41730 .write_char = kgdboc_put_char,
41731 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41732 index b76f246..7f41af7 100644
41733 --- a/drivers/spi/spi.c
41734 +++ b/drivers/spi/spi.c
41735 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41736 EXPORT_SYMBOL_GPL(spi_sync);
41737
41738 /* portable code must never pass more than 32 bytes */
41739 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41740 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41741
41742 static u8 *buf;
41743
41744 diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41745 index 99010d4..6bad87b 100644
41746 --- a/drivers/staging/android/binder.c
41747 +++ b/drivers/staging/android/binder.c
41748 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41749 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41750 }
41751
41752 -static struct vm_operations_struct binder_vm_ops = {
41753 +static const struct vm_operations_struct binder_vm_ops = {
41754 .open = binder_vma_open,
41755 .close = binder_vma_close,
41756 };
41757 diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41758 index cda26bb..39fed3f 100644
41759 --- a/drivers/staging/b3dfg/b3dfg.c
41760 +++ b/drivers/staging/b3dfg/b3dfg.c
41761 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41762 return VM_FAULT_NOPAGE;
41763 }
41764
41765 -static struct vm_operations_struct b3dfg_vm_ops = {
41766 +static const struct vm_operations_struct b3dfg_vm_ops = {
41767 .fault = b3dfg_vma_fault,
41768 };
41769
41770 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41771 return r;
41772 }
41773
41774 -static struct file_operations b3dfg_fops = {
41775 +static const struct file_operations b3dfg_fops = {
41776 .owner = THIS_MODULE,
41777 .open = b3dfg_open,
41778 .release = b3dfg_release,
41779 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41780 index 908f25a..c9a579b 100644
41781 --- a/drivers/staging/comedi/comedi_fops.c
41782 +++ b/drivers/staging/comedi/comedi_fops.c
41783 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41784 mutex_unlock(&dev->mutex);
41785 }
41786
41787 -static struct vm_operations_struct comedi_vm_ops = {
41788 +static const struct vm_operations_struct comedi_vm_ops = {
41789 .close = comedi_unmap,
41790 };
41791
41792 diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41793 index e55a0db..577b776 100644
41794 --- a/drivers/staging/dream/qdsp5/adsp_driver.c
41795 +++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41796 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41797 static dev_t adsp_devno;
41798 static struct class *adsp_class;
41799
41800 -static struct file_operations adsp_fops = {
41801 +static const struct file_operations adsp_fops = {
41802 .owner = THIS_MODULE,
41803 .open = adsp_open,
41804 .unlocked_ioctl = adsp_ioctl,
41805 diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41806 index ad2390f..4116ee8 100644
41807 --- a/drivers/staging/dream/qdsp5/audio_aac.c
41808 +++ b/drivers/staging/dream/qdsp5/audio_aac.c
41809 @@ -1022,7 +1022,7 @@ done:
41810 return rc;
41811 }
41812
41813 -static struct file_operations audio_aac_fops = {
41814 +static const struct file_operations audio_aac_fops = {
41815 .owner = THIS_MODULE,
41816 .open = audio_open,
41817 .release = audio_release,
41818 diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41819 index cd818a5..870b37b 100644
41820 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41821 +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41822 @@ -833,7 +833,7 @@ done:
41823 return rc;
41824 }
41825
41826 -static struct file_operations audio_amrnb_fops = {
41827 +static const struct file_operations audio_amrnb_fops = {
41828 .owner = THIS_MODULE,
41829 .open = audamrnb_open,
41830 .release = audamrnb_release,
41831 diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41832 index 4b43e18..cedafda 100644
41833 --- a/drivers/staging/dream/qdsp5/audio_evrc.c
41834 +++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41835 @@ -805,7 +805,7 @@ dma_fail:
41836 return rc;
41837 }
41838
41839 -static struct file_operations audio_evrc_fops = {
41840 +static const struct file_operations audio_evrc_fops = {
41841 .owner = THIS_MODULE,
41842 .open = audevrc_open,
41843 .release = audevrc_release,
41844 diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41845 index 3d950a2..9431118 100644
41846 --- a/drivers/staging/dream/qdsp5/audio_in.c
41847 +++ b/drivers/staging/dream/qdsp5/audio_in.c
41848 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41849 return 0;
41850 }
41851
41852 -static struct file_operations audio_fops = {
41853 +static const struct file_operations audio_fops = {
41854 .owner = THIS_MODULE,
41855 .open = audio_in_open,
41856 .release = audio_in_release,
41857 @@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41858 .unlocked_ioctl = audio_in_ioctl,
41859 };
41860
41861 -static struct file_operations audpre_fops = {
41862 +static const struct file_operations audpre_fops = {
41863 .owner = THIS_MODULE,
41864 .open = audpre_open,
41865 .unlocked_ioctl = audpre_ioctl,
41866 diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41867 index b95574f..286c2f4 100644
41868 --- a/drivers/staging/dream/qdsp5/audio_mp3.c
41869 +++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41870 @@ -941,7 +941,7 @@ done:
41871 return rc;
41872 }
41873
41874 -static struct file_operations audio_mp3_fops = {
41875 +static const struct file_operations audio_mp3_fops = {
41876 .owner = THIS_MODULE,
41877 .open = audio_open,
41878 .release = audio_release,
41879 diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41880 index d1adcf6..f8f9833 100644
41881 --- a/drivers/staging/dream/qdsp5/audio_out.c
41882 +++ b/drivers/staging/dream/qdsp5/audio_out.c
41883 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41884 return 0;
41885 }
41886
41887 -static struct file_operations audio_fops = {
41888 +static const struct file_operations audio_fops = {
41889 .owner = THIS_MODULE,
41890 .open = audio_open,
41891 .release = audio_release,
41892 @@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41893 .unlocked_ioctl = audio_ioctl,
41894 };
41895
41896 -static struct file_operations audpp_fops = {
41897 +static const struct file_operations audpp_fops = {
41898 .owner = THIS_MODULE,
41899 .open = audpp_open,
41900 .unlocked_ioctl = audpp_ioctl,
41901 diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41902 index f0f50e3..f6b9dbc 100644
41903 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41904 +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41905 @@ -816,7 +816,7 @@ err:
41906 return rc;
41907 }
41908
41909 -static struct file_operations audio_qcelp_fops = {
41910 +static const struct file_operations audio_qcelp_fops = {
41911 .owner = THIS_MODULE,
41912 .open = audqcelp_open,
41913 .release = audqcelp_release,
41914 diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
41915 index 037d7ff..5469ec3 100644
41916 --- a/drivers/staging/dream/qdsp5/snd.c
41917 +++ b/drivers/staging/dream/qdsp5/snd.c
41918 @@ -242,7 +242,7 @@ err:
41919 return rc;
41920 }
41921
41922 -static struct file_operations snd_fops = {
41923 +static const struct file_operations snd_fops = {
41924 .owner = THIS_MODULE,
41925 .open = snd_open,
41926 .release = snd_release,
41927 diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
41928 index d4e7d88..0ea632a 100644
41929 --- a/drivers/staging/dream/smd/smd_qmi.c
41930 +++ b/drivers/staging/dream/smd/smd_qmi.c
41931 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
41932 return 0;
41933 }
41934
41935 -static struct file_operations qmi_fops = {
41936 +static const struct file_operations qmi_fops = {
41937 .owner = THIS_MODULE,
41938 .read = qmi_read,
41939 .write = qmi_write,
41940 diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41941 index cd3910b..ff053d3 100644
41942 --- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
41943 +++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41944 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
41945 return rc;
41946 }
41947
41948 -static struct file_operations rpcrouter_server_fops = {
41949 +static const struct file_operations rpcrouter_server_fops = {
41950 .owner = THIS_MODULE,
41951 .open = rpcrouter_open,
41952 .release = rpcrouter_release,
41953 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
41954 .unlocked_ioctl = rpcrouter_ioctl,
41955 };
41956
41957 -static struct file_operations rpcrouter_router_fops = {
41958 +static const struct file_operations rpcrouter_router_fops = {
41959 .owner = THIS_MODULE,
41960 .open = rpcrouter_open,
41961 .release = rpcrouter_release,
41962 diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
41963 index c24e4e0..07665be 100644
41964 --- a/drivers/staging/dst/dcore.c
41965 +++ b/drivers/staging/dst/dcore.c
41966 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
41967 return 0;
41968 }
41969
41970 -static struct block_device_operations dst_blk_ops = {
41971 +static const struct block_device_operations dst_blk_ops = {
41972 .open = dst_bdev_open,
41973 .release = dst_bdev_release,
41974 .owner = THIS_MODULE,
41975 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
41976 n->size = ctl->size;
41977
41978 atomic_set(&n->refcnt, 1);
41979 - atomic_long_set(&n->gen, 0);
41980 + atomic_long_set_unchecked(&n->gen, 0);
41981 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
41982
41983 err = dst_node_sysfs_init(n);
41984 diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
41985 index 557d372..8d84422 100644
41986 --- a/drivers/staging/dst/trans.c
41987 +++ b/drivers/staging/dst/trans.c
41988 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
41989 t->error = 0;
41990 t->retries = 0;
41991 atomic_set(&t->refcnt, 1);
41992 - t->gen = atomic_long_inc_return(&n->gen);
41993 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
41994
41995 t->enc = bio_data_dir(bio);
41996 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
41997 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
41998 index 94f7752..d051514 100644
41999 --- a/drivers/staging/et131x/et1310_tx.c
42000 +++ b/drivers/staging/et131x/et1310_tx.c
42001 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
42002 struct net_device_stats *stats = &etdev->net_stats;
42003
42004 if (pMpTcb->Flags & fMP_DEST_BROAD)
42005 - atomic_inc(&etdev->Stats.brdcstxmt);
42006 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
42007 else if (pMpTcb->Flags & fMP_DEST_MULTI)
42008 - atomic_inc(&etdev->Stats.multixmt);
42009 + atomic_inc_unchecked(&etdev->Stats.multixmt);
42010 else
42011 - atomic_inc(&etdev->Stats.unixmt);
42012 + atomic_inc_unchecked(&etdev->Stats.unixmt);
42013
42014 if (pMpTcb->Packet) {
42015 stats->tx_bytes += pMpTcb->Packet->len;
42016 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
42017 index 1dfe06f..f469b4d 100644
42018 --- a/drivers/staging/et131x/et131x_adapter.h
42019 +++ b/drivers/staging/et131x/et131x_adapter.h
42020 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
42021 * operations
42022 */
42023 u32 unircv; /* # multicast packets received */
42024 - atomic_t unixmt; /* # multicast packets for Tx */
42025 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
42026 u32 multircv; /* # multicast packets received */
42027 - atomic_t multixmt; /* # multicast packets for Tx */
42028 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
42029 u32 brdcstrcv; /* # broadcast packets received */
42030 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
42031 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
42032 u32 norcvbuf; /* # Rx packets discarded */
42033 u32 noxmtbuf; /* # Tx packets discarded */
42034
42035 diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
42036 index 4bd353a..e28f455 100644
42037 --- a/drivers/staging/go7007/go7007-v4l2.c
42038 +++ b/drivers/staging/go7007/go7007-v4l2.c
42039 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42040 return 0;
42041 }
42042
42043 -static struct vm_operations_struct go7007_vm_ops = {
42044 +static const struct vm_operations_struct go7007_vm_ops = {
42045 .open = go7007_vm_open,
42046 .close = go7007_vm_close,
42047 .fault = go7007_vm_fault,
42048 diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
42049 index 366dc95..b974d87 100644
42050 --- a/drivers/staging/hv/Channel.c
42051 +++ b/drivers/staging/hv/Channel.c
42052 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
42053
42054 DPRINT_ENTER(VMBUS);
42055
42056 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
42057 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
42058 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
42059 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
42060
42061 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
42062 ASSERT(msgInfo != NULL);
42063 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
42064 index b12237f..01ae28a 100644
42065 --- a/drivers/staging/hv/Hv.c
42066 +++ b/drivers/staging/hv/Hv.c
42067 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
42068 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
42069 u32 outputAddressHi = outputAddress >> 32;
42070 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
42071 - volatile void *hypercallPage = gHvContext.HypercallPage;
42072 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
42073
42074 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
42075 Control, Input, Output);
42076 diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
42077 index d089bb1..2ebc158 100644
42078 --- a/drivers/staging/hv/VmbusApi.h
42079 +++ b/drivers/staging/hv/VmbusApi.h
42080 @@ -109,7 +109,7 @@ struct vmbus_channel_interface {
42081 u32 *GpadlHandle);
42082 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
42083 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
42084 -};
42085 +} __no_const;
42086
42087 /* Base driver object */
42088 struct hv_driver {
42089 diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
42090 index 5a37cce..6ecc88c 100644
42091 --- a/drivers/staging/hv/VmbusPrivate.h
42092 +++ b/drivers/staging/hv/VmbusPrivate.h
42093 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
42094 struct VMBUS_CONNECTION {
42095 enum VMBUS_CONNECT_STATE ConnectState;
42096
42097 - atomic_t NextGpadlHandle;
42098 + atomic_unchecked_t NextGpadlHandle;
42099
42100 /*
42101 * Represents channel interrupts. Each bit position represents a
42102 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
42103 index 871a202..ca50ddf 100644
42104 --- a/drivers/staging/hv/blkvsc_drv.c
42105 +++ b/drivers/staging/hv/blkvsc_drv.c
42106 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
42107 /* The one and only one */
42108 static struct blkvsc_driver_context g_blkvsc_drv;
42109
42110 -static struct block_device_operations block_ops = {
42111 +static const struct block_device_operations block_ops = {
42112 .owner = THIS_MODULE,
42113 .open = blkvsc_open,
42114 .release = blkvsc_release,
42115 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
42116 index 6acc49a..fbc8d46 100644
42117 --- a/drivers/staging/hv/vmbus_drv.c
42118 +++ b/drivers/staging/hv/vmbus_drv.c
42119 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42120 to_device_context(root_device_obj);
42121 struct device_context *child_device_ctx =
42122 to_device_context(child_device_obj);
42123 - static atomic_t device_num = ATOMIC_INIT(0);
42124 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42125
42126 DPRINT_ENTER(VMBUS_DRV);
42127
42128 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42129
42130 /* Set the device name. Otherwise, device_register() will fail. */
42131 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
42132 - atomic_inc_return(&device_num));
42133 + atomic_inc_return_unchecked(&device_num));
42134
42135 /* The new device belongs to this bus */
42136 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
42137 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
42138 index d926189..17b19fd 100644
42139 --- a/drivers/staging/iio/ring_generic.h
42140 +++ b/drivers/staging/iio/ring_generic.h
42141 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
42142
42143 int (*is_enabled)(struct iio_ring_buffer *ring);
42144 int (*enable)(struct iio_ring_buffer *ring);
42145 -};
42146 +} __no_const;
42147
42148 /**
42149 * struct iio_ring_buffer - general ring buffer structure
42150 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42151 index 1b237b7..88c624e 100644
42152 --- a/drivers/staging/octeon/ethernet-rx.c
42153 +++ b/drivers/staging/octeon/ethernet-rx.c
42154 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42155 /* Increment RX stats for virtual ports */
42156 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42157 #ifdef CONFIG_64BIT
42158 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42159 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42160 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42161 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42162 #else
42163 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42164 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42165 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42166 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42167 #endif
42168 }
42169 netif_receive_skb(skb);
42170 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42171 dev->name);
42172 */
42173 #ifdef CONFIG_64BIT
42174 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42175 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42176 #else
42177 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42178 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42179 #endif
42180 dev_kfree_skb_irq(skb);
42181 }
42182 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42183 index 492c502..d9909f1 100644
42184 --- a/drivers/staging/octeon/ethernet.c
42185 +++ b/drivers/staging/octeon/ethernet.c
42186 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42187 * since the RX tasklet also increments it.
42188 */
42189 #ifdef CONFIG_64BIT
42190 - atomic64_add(rx_status.dropped_packets,
42191 - (atomic64_t *)&priv->stats.rx_dropped);
42192 + atomic64_add_unchecked(rx_status.dropped_packets,
42193 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42194 #else
42195 - atomic_add(rx_status.dropped_packets,
42196 - (atomic_t *)&priv->stats.rx_dropped);
42197 + atomic_add_unchecked(rx_status.dropped_packets,
42198 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
42199 #endif
42200 }
42201
42202 diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42203 index a35bd5d..28fff45 100644
42204 --- a/drivers/staging/otus/80211core/pub_zfi.h
42205 +++ b/drivers/staging/otus/80211core/pub_zfi.h
42206 @@ -531,7 +531,7 @@ struct zsCbFuncTbl
42207 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42208
42209 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42210 -};
42211 +} __no_const;
42212
42213 extern void zfZeroMemory(u8_t* va, u16_t length);
42214 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42215 diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42216 index c39a25f..696f5aa 100644
42217 --- a/drivers/staging/panel/panel.c
42218 +++ b/drivers/staging/panel/panel.c
42219 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42220 return 0;
42221 }
42222
42223 -static struct file_operations lcd_fops = {
42224 +static const struct file_operations lcd_fops = {
42225 .write = lcd_write,
42226 .open = lcd_open,
42227 .release = lcd_release,
42228 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42229 return 0;
42230 }
42231
42232 -static struct file_operations keypad_fops = {
42233 +static const struct file_operations keypad_fops = {
42234 .read = keypad_read, /* read */
42235 .open = keypad_open, /* open */
42236 .release = keypad_release, /* close */
42237 diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42238 index 270ebcb..37e46af 100644
42239 --- a/drivers/staging/phison/phison.c
42240 +++ b/drivers/staging/phison/phison.c
42241 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42242 ATA_BMDMA_SHT(DRV_NAME),
42243 };
42244
42245 -static struct ata_port_operations phison_ops = {
42246 +static const struct ata_port_operations phison_ops = {
42247 .inherits = &ata_bmdma_port_ops,
42248 .prereset = phison_pre_reset,
42249 };
42250 diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42251 index 2eb8e3d..57616a7 100644
42252 --- a/drivers/staging/poch/poch.c
42253 +++ b/drivers/staging/poch/poch.c
42254 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42255 return 0;
42256 }
42257
42258 -static struct file_operations poch_fops = {
42259 +static const struct file_operations poch_fops = {
42260 .owner = THIS_MODULE,
42261 .open = poch_open,
42262 .release = poch_release,
42263 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42264 index c94de31..19402bc 100644
42265 --- a/drivers/staging/pohmelfs/inode.c
42266 +++ b/drivers/staging/pohmelfs/inode.c
42267 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42268 mutex_init(&psb->mcache_lock);
42269 psb->mcache_root = RB_ROOT;
42270 psb->mcache_timeout = msecs_to_jiffies(5000);
42271 - atomic_long_set(&psb->mcache_gen, 0);
42272 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
42273
42274 psb->trans_max_pages = 100;
42275
42276 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42277 INIT_LIST_HEAD(&psb->crypto_ready_list);
42278 INIT_LIST_HEAD(&psb->crypto_active_list);
42279
42280 - atomic_set(&psb->trans_gen, 1);
42281 + atomic_set_unchecked(&psb->trans_gen, 1);
42282 atomic_long_set(&psb->total_inodes, 0);
42283
42284 mutex_init(&psb->state_lock);
42285 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42286 index e22665c..a2a9390 100644
42287 --- a/drivers/staging/pohmelfs/mcache.c
42288 +++ b/drivers/staging/pohmelfs/mcache.c
42289 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42290 m->data = data;
42291 m->start = start;
42292 m->size = size;
42293 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
42294 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42295
42296 mutex_lock(&psb->mcache_lock);
42297 err = pohmelfs_mcache_insert(psb, m);
42298 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42299 index 623a07d..4035c19 100644
42300 --- a/drivers/staging/pohmelfs/netfs.h
42301 +++ b/drivers/staging/pohmelfs/netfs.h
42302 @@ -570,14 +570,14 @@ struct pohmelfs_config;
42303 struct pohmelfs_sb {
42304 struct rb_root mcache_root;
42305 struct mutex mcache_lock;
42306 - atomic_long_t mcache_gen;
42307 + atomic_long_unchecked_t mcache_gen;
42308 unsigned long mcache_timeout;
42309
42310 unsigned int idx;
42311
42312 unsigned int trans_retries;
42313
42314 - atomic_t trans_gen;
42315 + atomic_unchecked_t trans_gen;
42316
42317 unsigned int crypto_attached_size;
42318 unsigned int crypto_align_size;
42319 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42320 index 36a2535..0591bf4 100644
42321 --- a/drivers/staging/pohmelfs/trans.c
42322 +++ b/drivers/staging/pohmelfs/trans.c
42323 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42324 int err;
42325 struct netfs_cmd *cmd = t->iovec.iov_base;
42326
42327 - t->gen = atomic_inc_return(&psb->trans_gen);
42328 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42329
42330 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42331 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42332 diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42333 index f890a16..509ece8 100644
42334 --- a/drivers/staging/sep/sep_driver.c
42335 +++ b/drivers/staging/sep/sep_driver.c
42336 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42337 static dev_t sep_devno;
42338
42339 /* the files operations structure of the driver */
42340 -static struct file_operations sep_file_operations = {
42341 +static const struct file_operations sep_file_operations = {
42342 .owner = THIS_MODULE,
42343 .ioctl = sep_ioctl,
42344 .poll = sep_poll,
42345 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42346 index 5e16bc3..7655b10 100644
42347 --- a/drivers/staging/usbip/usbip_common.h
42348 +++ b/drivers/staging/usbip/usbip_common.h
42349 @@ -374,7 +374,7 @@ struct usbip_device {
42350 void (*shutdown)(struct usbip_device *);
42351 void (*reset)(struct usbip_device *);
42352 void (*unusable)(struct usbip_device *);
42353 - } eh_ops;
42354 + } __no_const eh_ops;
42355 };
42356
42357
42358 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42359 index 57f7946..d9df23d 100644
42360 --- a/drivers/staging/usbip/vhci.h
42361 +++ b/drivers/staging/usbip/vhci.h
42362 @@ -92,7 +92,7 @@ struct vhci_hcd {
42363 unsigned resuming:1;
42364 unsigned long re_timeout;
42365
42366 - atomic_t seqnum;
42367 + atomic_unchecked_t seqnum;
42368
42369 /*
42370 * NOTE:
42371 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42372 index 20cd7db..c2693ff 100644
42373 --- a/drivers/staging/usbip/vhci_hcd.c
42374 +++ b/drivers/staging/usbip/vhci_hcd.c
42375 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42376 return;
42377 }
42378
42379 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42380 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42381 if (priv->seqnum == 0xffff)
42382 usbip_uinfo("seqnum max\n");
42383
42384 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42385 return -ENOMEM;
42386 }
42387
42388 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42389 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42390 if (unlink->seqnum == 0xffff)
42391 usbip_uinfo("seqnum max\n");
42392
42393 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42394 vdev->rhport = rhport;
42395 }
42396
42397 - atomic_set(&vhci->seqnum, 0);
42398 + atomic_set_unchecked(&vhci->seqnum, 0);
42399 spin_lock_init(&vhci->lock);
42400
42401
42402 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42403 index 7fd76fe..673695a 100644
42404 --- a/drivers/staging/usbip/vhci_rx.c
42405 +++ b/drivers/staging/usbip/vhci_rx.c
42406 @@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42407 usbip_uerr("cannot find a urb of seqnum %u\n",
42408 pdu->base.seqnum);
42409 usbip_uinfo("max seqnum %d\n",
42410 - atomic_read(&the_controller->seqnum));
42411 + atomic_read_unchecked(&the_controller->seqnum));
42412 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42413 return;
42414 }
42415 diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42416 index 7891288..8e31300 100644
42417 --- a/drivers/staging/vme/devices/vme_user.c
42418 +++ b/drivers/staging/vme/devices/vme_user.c
42419 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42420 static int __init vme_user_probe(struct device *, int, int);
42421 static int __exit vme_user_remove(struct device *, int, int);
42422
42423 -static struct file_operations vme_user_fops = {
42424 +static const struct file_operations vme_user_fops = {
42425 .open = vme_user_open,
42426 .release = vme_user_release,
42427 .read = vme_user_read,
42428 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42429 index 58abf44..00c1fc8 100644
42430 --- a/drivers/staging/vt6655/hostap.c
42431 +++ b/drivers/staging/vt6655/hostap.c
42432 @@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42433 PSDevice apdev_priv;
42434 struct net_device *dev = pDevice->dev;
42435 int ret;
42436 - const struct net_device_ops apdev_netdev_ops = {
42437 + net_device_ops_no_const apdev_netdev_ops = {
42438 .ndo_start_xmit = pDevice->tx_80211,
42439 };
42440
42441 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42442 index 0c8267a..db1f363 100644
42443 --- a/drivers/staging/vt6656/hostap.c
42444 +++ b/drivers/staging/vt6656/hostap.c
42445 @@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42446 PSDevice apdev_priv;
42447 struct net_device *dev = pDevice->dev;
42448 int ret;
42449 - const struct net_device_ops apdev_netdev_ops = {
42450 + net_device_ops_no_const apdev_netdev_ops = {
42451 .ndo_start_xmit = pDevice->tx_80211,
42452 };
42453
42454 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42455 index 925678b..da7f5ed 100644
42456 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
42457 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42458 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42459
42460 struct usbctlx_completor {
42461 int (*complete) (struct usbctlx_completor *);
42462 -};
42463 +} __no_const;
42464 typedef struct usbctlx_completor usbctlx_completor_t;
42465
42466 static int
42467 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42468 index 40de151..924f268 100644
42469 --- a/drivers/telephony/ixj.c
42470 +++ b/drivers/telephony/ixj.c
42471 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42472 bool mContinue;
42473 char *pIn, *pOut;
42474
42475 + pax_track_stack();
42476 +
42477 if (!SCI_Prepare(j))
42478 return 0;
42479
42480 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42481 index e941367..b631f5a 100644
42482 --- a/drivers/uio/uio.c
42483 +++ b/drivers/uio/uio.c
42484 @@ -23,6 +23,7 @@
42485 #include <linux/string.h>
42486 #include <linux/kobject.h>
42487 #include <linux/uio_driver.h>
42488 +#include <asm/local.h>
42489
42490 #define UIO_MAX_DEVICES 255
42491
42492 @@ -30,10 +31,10 @@ struct uio_device {
42493 struct module *owner;
42494 struct device *dev;
42495 int minor;
42496 - atomic_t event;
42497 + atomic_unchecked_t event;
42498 struct fasync_struct *async_queue;
42499 wait_queue_head_t wait;
42500 - int vma_count;
42501 + local_t vma_count;
42502 struct uio_info *info;
42503 struct kobject *map_dir;
42504 struct kobject *portio_dir;
42505 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42506 return entry->show(mem, buf);
42507 }
42508
42509 -static struct sysfs_ops map_sysfs_ops = {
42510 +static const struct sysfs_ops map_sysfs_ops = {
42511 .show = map_type_show,
42512 };
42513
42514 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42515 return entry->show(port, buf);
42516 }
42517
42518 -static struct sysfs_ops portio_sysfs_ops = {
42519 +static const struct sysfs_ops portio_sysfs_ops = {
42520 .show = portio_type_show,
42521 };
42522
42523 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42524 struct uio_device *idev = dev_get_drvdata(dev);
42525 if (idev)
42526 return sprintf(buf, "%u\n",
42527 - (unsigned int)atomic_read(&idev->event));
42528 + (unsigned int)atomic_read_unchecked(&idev->event));
42529 else
42530 return -ENODEV;
42531 }
42532 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42533 {
42534 struct uio_device *idev = info->uio_dev;
42535
42536 - atomic_inc(&idev->event);
42537 + atomic_inc_unchecked(&idev->event);
42538 wake_up_interruptible(&idev->wait);
42539 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42540 }
42541 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42542 }
42543
42544 listener->dev = idev;
42545 - listener->event_count = atomic_read(&idev->event);
42546 + listener->event_count = atomic_read_unchecked(&idev->event);
42547 filep->private_data = listener;
42548
42549 if (idev->info->open) {
42550 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42551 return -EIO;
42552
42553 poll_wait(filep, &idev->wait, wait);
42554 - if (listener->event_count != atomic_read(&idev->event))
42555 + if (listener->event_count != atomic_read_unchecked(&idev->event))
42556 return POLLIN | POLLRDNORM;
42557 return 0;
42558 }
42559 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42560 do {
42561 set_current_state(TASK_INTERRUPTIBLE);
42562
42563 - event_count = atomic_read(&idev->event);
42564 + event_count = atomic_read_unchecked(&idev->event);
42565 if (event_count != listener->event_count) {
42566 if (copy_to_user(buf, &event_count, count))
42567 retval = -EFAULT;
42568 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42569 static void uio_vma_open(struct vm_area_struct *vma)
42570 {
42571 struct uio_device *idev = vma->vm_private_data;
42572 - idev->vma_count++;
42573 + local_inc(&idev->vma_count);
42574 }
42575
42576 static void uio_vma_close(struct vm_area_struct *vma)
42577 {
42578 struct uio_device *idev = vma->vm_private_data;
42579 - idev->vma_count--;
42580 + local_dec(&idev->vma_count);
42581 }
42582
42583 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42584 @@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42585 idev->owner = owner;
42586 idev->info = info;
42587 init_waitqueue_head(&idev->wait);
42588 - atomic_set(&idev->event, 0);
42589 + atomic_set_unchecked(&idev->event, 0);
42590
42591 ret = uio_get_minor(idev);
42592 if (ret)
42593 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42594 index fbea856..06efea6 100644
42595 --- a/drivers/usb/atm/usbatm.c
42596 +++ b/drivers/usb/atm/usbatm.c
42597 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42598 if (printk_ratelimit())
42599 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42600 __func__, vpi, vci);
42601 - atomic_inc(&vcc->stats->rx_err);
42602 + atomic_inc_unchecked(&vcc->stats->rx_err);
42603 return;
42604 }
42605
42606 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42607 if (length > ATM_MAX_AAL5_PDU) {
42608 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42609 __func__, length, vcc);
42610 - atomic_inc(&vcc->stats->rx_err);
42611 + atomic_inc_unchecked(&vcc->stats->rx_err);
42612 goto out;
42613 }
42614
42615 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42616 if (sarb->len < pdu_length) {
42617 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42618 __func__, pdu_length, sarb->len, vcc);
42619 - atomic_inc(&vcc->stats->rx_err);
42620 + atomic_inc_unchecked(&vcc->stats->rx_err);
42621 goto out;
42622 }
42623
42624 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42625 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42626 __func__, vcc);
42627 - atomic_inc(&vcc->stats->rx_err);
42628 + atomic_inc_unchecked(&vcc->stats->rx_err);
42629 goto out;
42630 }
42631
42632 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42633 if (printk_ratelimit())
42634 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42635 __func__, length);
42636 - atomic_inc(&vcc->stats->rx_drop);
42637 + atomic_inc_unchecked(&vcc->stats->rx_drop);
42638 goto out;
42639 }
42640
42641 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42642
42643 vcc->push(vcc, skb);
42644
42645 - atomic_inc(&vcc->stats->rx);
42646 + atomic_inc_unchecked(&vcc->stats->rx);
42647 out:
42648 skb_trim(sarb, 0);
42649 }
42650 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42651 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42652
42653 usbatm_pop(vcc, skb);
42654 - atomic_inc(&vcc->stats->tx);
42655 + atomic_inc_unchecked(&vcc->stats->tx);
42656
42657 skb = skb_dequeue(&instance->sndqueue);
42658 }
42659 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42660 if (!left--)
42661 return sprintf(page,
42662 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42663 - atomic_read(&atm_dev->stats.aal5.tx),
42664 - atomic_read(&atm_dev->stats.aal5.tx_err),
42665 - atomic_read(&atm_dev->stats.aal5.rx),
42666 - atomic_read(&atm_dev->stats.aal5.rx_err),
42667 - atomic_read(&atm_dev->stats.aal5.rx_drop));
42668 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42669 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42670 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42671 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42672 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42673
42674 if (!left--) {
42675 if (instance->disconnected)
42676 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42677 index 24e6205..fe5a5d4 100644
42678 --- a/drivers/usb/core/hcd.c
42679 +++ b/drivers/usb/core/hcd.c
42680 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42681
42682 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42683
42684 -struct usb_mon_operations *mon_ops;
42685 +const struct usb_mon_operations *mon_ops;
42686
42687 /*
42688 * The registration is unlocked.
42689 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42690 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42691 */
42692
42693 -int usb_mon_register (struct usb_mon_operations *ops)
42694 +int usb_mon_register (const struct usb_mon_operations *ops)
42695 {
42696
42697 if (mon_ops)
42698 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42699 index bcbe104..9cfd1c6 100644
42700 --- a/drivers/usb/core/hcd.h
42701 +++ b/drivers/usb/core/hcd.h
42702 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42703 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42704
42705 struct usb_mon_operations {
42706 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42707 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42708 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42709 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42710 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42711 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42712 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42713 };
42714
42715 -extern struct usb_mon_operations *mon_ops;
42716 +extern const struct usb_mon_operations *mon_ops;
42717
42718 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42719 {
42720 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42721 (*mon_ops->urb_complete)(bus, urb, status);
42722 }
42723
42724 -int usb_mon_register(struct usb_mon_operations *ops);
42725 +int usb_mon_register(const struct usb_mon_operations *ops);
42726 void usb_mon_deregister(void);
42727
42728 #else
42729 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42730 index 409cc94..a673bad 100644
42731 --- a/drivers/usb/core/message.c
42732 +++ b/drivers/usb/core/message.c
42733 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42734 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42735 if (buf) {
42736 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42737 - if (len > 0) {
42738 - smallbuf = kmalloc(++len, GFP_NOIO);
42739 + if (len++ > 0) {
42740 + smallbuf = kmalloc(len, GFP_NOIO);
42741 if (!smallbuf)
42742 return buf;
42743 memcpy(smallbuf, buf, len);
42744 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42745 index 62ff5e7..530b74e 100644
42746 --- a/drivers/usb/misc/appledisplay.c
42747 +++ b/drivers/usb/misc/appledisplay.c
42748 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42749 return pdata->msgdata[1];
42750 }
42751
42752 -static struct backlight_ops appledisplay_bl_data = {
42753 +static const struct backlight_ops appledisplay_bl_data = {
42754 .get_brightness = appledisplay_bl_get_brightness,
42755 .update_status = appledisplay_bl_update_status,
42756 };
42757 diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42758 index e0c2db3..bd8cb66 100644
42759 --- a/drivers/usb/mon/mon_main.c
42760 +++ b/drivers/usb/mon/mon_main.c
42761 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42762 /*
42763 * Ops
42764 */
42765 -static struct usb_mon_operations mon_ops_0 = {
42766 +static const struct usb_mon_operations mon_ops_0 = {
42767 .urb_submit = mon_submit,
42768 .urb_submit_error = mon_submit_error,
42769 .urb_complete = mon_complete,
42770 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42771 index d6bea3e..60b250e 100644
42772 --- a/drivers/usb/wusbcore/wa-hc.h
42773 +++ b/drivers/usb/wusbcore/wa-hc.h
42774 @@ -192,7 +192,7 @@ struct wahc {
42775 struct list_head xfer_delayed_list;
42776 spinlock_t xfer_list_lock;
42777 struct work_struct xfer_work;
42778 - atomic_t xfer_id_count;
42779 + atomic_unchecked_t xfer_id_count;
42780 };
42781
42782
42783 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42784 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42785 spin_lock_init(&wa->xfer_list_lock);
42786 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42787 - atomic_set(&wa->xfer_id_count, 1);
42788 + atomic_set_unchecked(&wa->xfer_id_count, 1);
42789 }
42790
42791 /**
42792 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42793 index 613a5fc..3174865 100644
42794 --- a/drivers/usb/wusbcore/wa-xfer.c
42795 +++ b/drivers/usb/wusbcore/wa-xfer.c
42796 @@ -293,7 +293,7 @@ out:
42797 */
42798 static void wa_xfer_id_init(struct wa_xfer *xfer)
42799 {
42800 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42801 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42802 }
42803
42804 /*
42805 diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42806 index aa42fce..f8a828c 100644
42807 --- a/drivers/uwb/wlp/messages.c
42808 +++ b/drivers/uwb/wlp/messages.c
42809 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42810 size_t len = skb->len;
42811 size_t used;
42812 ssize_t result;
42813 - struct wlp_nonce enonce, rnonce;
42814 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42815 enum wlp_assc_error assc_err;
42816 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42817 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42818 diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42819 index 0370399..6627c94 100644
42820 --- a/drivers/uwb/wlp/sysfs.c
42821 +++ b/drivers/uwb/wlp/sysfs.c
42822 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42823 return ret;
42824 }
42825
42826 -static
42827 -struct sysfs_ops wss_sysfs_ops = {
42828 +static const struct sysfs_ops wss_sysfs_ops = {
42829 .show = wlp_wss_attr_show,
42830 .store = wlp_wss_attr_store,
42831 };
42832 diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42833 index d5e8010..5687b56 100644
42834 --- a/drivers/video/atmel_lcdfb.c
42835 +++ b/drivers/video/atmel_lcdfb.c
42836 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42837 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42838 }
42839
42840 -static struct backlight_ops atmel_lcdc_bl_ops = {
42841 +static const struct backlight_ops atmel_lcdc_bl_ops = {
42842 .update_status = atmel_bl_update_status,
42843 .get_brightness = atmel_bl_get_brightness,
42844 };
42845 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42846 index e4e4d43..66bcbcc 100644
42847 --- a/drivers/video/aty/aty128fb.c
42848 +++ b/drivers/video/aty/aty128fb.c
42849 @@ -149,7 +149,7 @@ enum {
42850 };
42851
42852 /* Must match above enum */
42853 -static const char *r128_family[] __devinitdata = {
42854 +static const char *r128_family[] __devinitconst = {
42855 "AGP",
42856 "PCI",
42857 "PRO AGP",
42858 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42859 return bd->props.brightness;
42860 }
42861
42862 -static struct backlight_ops aty128_bl_data = {
42863 +static const struct backlight_ops aty128_bl_data = {
42864 .get_brightness = aty128_bl_get_brightness,
42865 .update_status = aty128_bl_update_status,
42866 };
42867 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42868 index 913b4a4..9295a38 100644
42869 --- a/drivers/video/aty/atyfb_base.c
42870 +++ b/drivers/video/aty/atyfb_base.c
42871 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42872 return bd->props.brightness;
42873 }
42874
42875 -static struct backlight_ops aty_bl_data = {
42876 +static const struct backlight_ops aty_bl_data = {
42877 .get_brightness = aty_bl_get_brightness,
42878 .update_status = aty_bl_update_status,
42879 };
42880 diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42881 index 1a056ad..221bd6a 100644
42882 --- a/drivers/video/aty/radeon_backlight.c
42883 +++ b/drivers/video/aty/radeon_backlight.c
42884 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42885 return bd->props.brightness;
42886 }
42887
42888 -static struct backlight_ops radeon_bl_data = {
42889 +static const struct backlight_ops radeon_bl_data = {
42890 .get_brightness = radeon_bl_get_brightness,
42891 .update_status = radeon_bl_update_status,
42892 };
42893 diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42894 index ad05da5..3cb2cb9 100644
42895 --- a/drivers/video/backlight/adp5520_bl.c
42896 +++ b/drivers/video/backlight/adp5520_bl.c
42897 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42898 return error ? data->current_brightness : reg_val;
42899 }
42900
42901 -static struct backlight_ops adp5520_bl_ops = {
42902 +static const struct backlight_ops adp5520_bl_ops = {
42903 .update_status = adp5520_bl_update_status,
42904 .get_brightness = adp5520_bl_get_brightness,
42905 };
42906 diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42907 index 2c3bdfc..d769b0b 100644
42908 --- a/drivers/video/backlight/adx_bl.c
42909 +++ b/drivers/video/backlight/adx_bl.c
42910 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42911 return 1;
42912 }
42913
42914 -static struct backlight_ops adx_backlight_ops = {
42915 +static const struct backlight_ops adx_backlight_ops = {
42916 .options = 0,
42917 .update_status = adx_backlight_update_status,
42918 .get_brightness = adx_backlight_get_brightness,
42919 diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
42920 index 505c082..6b6b3cc 100644
42921 --- a/drivers/video/backlight/atmel-pwm-bl.c
42922 +++ b/drivers/video/backlight/atmel-pwm-bl.c
42923 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
42924 return pwm_channel_enable(&pwmbl->pwmc);
42925 }
42926
42927 -static struct backlight_ops atmel_pwm_bl_ops = {
42928 +static const struct backlight_ops atmel_pwm_bl_ops = {
42929 .get_brightness = atmel_pwm_bl_get_intensity,
42930 .update_status = atmel_pwm_bl_set_intensity,
42931 };
42932 diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
42933 index 5e20e6e..89025e6 100644
42934 --- a/drivers/video/backlight/backlight.c
42935 +++ b/drivers/video/backlight/backlight.c
42936 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
42937 * ERR_PTR() or a pointer to the newly allocated device.
42938 */
42939 struct backlight_device *backlight_device_register(const char *name,
42940 - struct device *parent, void *devdata, struct backlight_ops *ops)
42941 + struct device *parent, void *devdata, const struct backlight_ops *ops)
42942 {
42943 struct backlight_device *new_bd;
42944 int rc;
42945 diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
42946 index 9677494..b4bcf80 100644
42947 --- a/drivers/video/backlight/corgi_lcd.c
42948 +++ b/drivers/video/backlight/corgi_lcd.c
42949 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
42950 }
42951 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
42952
42953 -static struct backlight_ops corgi_bl_ops = {
42954 +static const struct backlight_ops corgi_bl_ops = {
42955 .get_brightness = corgi_bl_get_intensity,
42956 .update_status = corgi_bl_update_status,
42957 };
42958 diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
42959 index b9fe62b..2914bf1 100644
42960 --- a/drivers/video/backlight/cr_bllcd.c
42961 +++ b/drivers/video/backlight/cr_bllcd.c
42962 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
42963 return intensity;
42964 }
42965
42966 -static struct backlight_ops cr_backlight_ops = {
42967 +static const struct backlight_ops cr_backlight_ops = {
42968 .get_brightness = cr_backlight_get_intensity,
42969 .update_status = cr_backlight_set_intensity,
42970 };
42971 diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
42972 index 701a108..feacfd5 100644
42973 --- a/drivers/video/backlight/da903x_bl.c
42974 +++ b/drivers/video/backlight/da903x_bl.c
42975 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
42976 return data->current_brightness;
42977 }
42978
42979 -static struct backlight_ops da903x_backlight_ops = {
42980 +static const struct backlight_ops da903x_backlight_ops = {
42981 .update_status = da903x_backlight_update_status,
42982 .get_brightness = da903x_backlight_get_brightness,
42983 };
42984 diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
42985 index 6d27f62..e6d348e 100644
42986 --- a/drivers/video/backlight/generic_bl.c
42987 +++ b/drivers/video/backlight/generic_bl.c
42988 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
42989 }
42990 EXPORT_SYMBOL(corgibl_limit_intensity);
42991
42992 -static struct backlight_ops genericbl_ops = {
42993 +static const struct backlight_ops genericbl_ops = {
42994 .options = BL_CORE_SUSPENDRESUME,
42995 .get_brightness = genericbl_get_intensity,
42996 .update_status = genericbl_send_intensity,
42997 diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
42998 index 7fb4eef..f7cc528 100644
42999 --- a/drivers/video/backlight/hp680_bl.c
43000 +++ b/drivers/video/backlight/hp680_bl.c
43001 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
43002 return current_intensity;
43003 }
43004
43005 -static struct backlight_ops hp680bl_ops = {
43006 +static const struct backlight_ops hp680bl_ops = {
43007 .get_brightness = hp680bl_get_intensity,
43008 .update_status = hp680bl_set_intensity,
43009 };
43010 diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
43011 index 7aed256..db9071f 100644
43012 --- a/drivers/video/backlight/jornada720_bl.c
43013 +++ b/drivers/video/backlight/jornada720_bl.c
43014 @@ -93,7 +93,7 @@ out:
43015 return ret;
43016 }
43017
43018 -static struct backlight_ops jornada_bl_ops = {
43019 +static const struct backlight_ops jornada_bl_ops = {
43020 .get_brightness = jornada_bl_get_brightness,
43021 .update_status = jornada_bl_update_status,
43022 .options = BL_CORE_SUSPENDRESUME,
43023 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43024 index a38fda1..939e7b8 100644
43025 --- a/drivers/video/backlight/kb3886_bl.c
43026 +++ b/drivers/video/backlight/kb3886_bl.c
43027 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
43028 return kb3886bl_intensity;
43029 }
43030
43031 -static struct backlight_ops kb3886bl_ops = {
43032 +static const struct backlight_ops kb3886bl_ops = {
43033 .get_brightness = kb3886bl_get_intensity,
43034 .update_status = kb3886bl_send_intensity,
43035 };
43036 diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
43037 index 6b488b8..00a9591 100644
43038 --- a/drivers/video/backlight/locomolcd.c
43039 +++ b/drivers/video/backlight/locomolcd.c
43040 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
43041 return current_intensity;
43042 }
43043
43044 -static struct backlight_ops locomobl_data = {
43045 +static const struct backlight_ops locomobl_data = {
43046 .get_brightness = locomolcd_get_intensity,
43047 .update_status = locomolcd_set_intensity,
43048 };
43049 diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
43050 index 99bdfa8..3dac448 100644
43051 --- a/drivers/video/backlight/mbp_nvidia_bl.c
43052 +++ b/drivers/video/backlight/mbp_nvidia_bl.c
43053 @@ -33,7 +33,7 @@ struct dmi_match_data {
43054 unsigned long iostart;
43055 unsigned long iolen;
43056 /* Backlight operations structure. */
43057 - struct backlight_ops backlight_ops;
43058 + const struct backlight_ops backlight_ops;
43059 };
43060
43061 /* Module parameters. */
43062 diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
43063 index cbad67e..3cf900e 100644
43064 --- a/drivers/video/backlight/omap1_bl.c
43065 +++ b/drivers/video/backlight/omap1_bl.c
43066 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
43067 return bl->current_intensity;
43068 }
43069
43070 -static struct backlight_ops omapbl_ops = {
43071 +static const struct backlight_ops omapbl_ops = {
43072 .get_brightness = omapbl_get_intensity,
43073 .update_status = omapbl_update_status,
43074 };
43075 diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
43076 index 9edaf24..075786e 100644
43077 --- a/drivers/video/backlight/progear_bl.c
43078 +++ b/drivers/video/backlight/progear_bl.c
43079 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
43080 return intensity - HW_LEVEL_MIN;
43081 }
43082
43083 -static struct backlight_ops progearbl_ops = {
43084 +static const struct backlight_ops progearbl_ops = {
43085 .get_brightness = progearbl_get_intensity,
43086 .update_status = progearbl_set_intensity,
43087 };
43088 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
43089 index 8871662..df9e0b3 100644
43090 --- a/drivers/video/backlight/pwm_bl.c
43091 +++ b/drivers/video/backlight/pwm_bl.c
43092 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
43093 return bl->props.brightness;
43094 }
43095
43096 -static struct backlight_ops pwm_backlight_ops = {
43097 +static const struct backlight_ops pwm_backlight_ops = {
43098 .update_status = pwm_backlight_update_status,
43099 .get_brightness = pwm_backlight_get_brightness,
43100 };
43101 diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
43102 index 43edbad..e14ce4d 100644
43103 --- a/drivers/video/backlight/tosa_bl.c
43104 +++ b/drivers/video/backlight/tosa_bl.c
43105 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
43106 return props->brightness;
43107 }
43108
43109 -static struct backlight_ops bl_ops = {
43110 +static const struct backlight_ops bl_ops = {
43111 .get_brightness = tosa_bl_get_brightness,
43112 .update_status = tosa_bl_update_status,
43113 };
43114 diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
43115 index 467bdb7..e32add3 100644
43116 --- a/drivers/video/backlight/wm831x_bl.c
43117 +++ b/drivers/video/backlight/wm831x_bl.c
43118 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
43119 return data->current_brightness;
43120 }
43121
43122 -static struct backlight_ops wm831x_backlight_ops = {
43123 +static const struct backlight_ops wm831x_backlight_ops = {
43124 .options = BL_CORE_SUSPENDRESUME,
43125 .update_status = wm831x_backlight_update_status,
43126 .get_brightness = wm831x_backlight_get_brightness,
43127 diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
43128 index e49ae5e..db4e6f7 100644
43129 --- a/drivers/video/bf54x-lq043fb.c
43130 +++ b/drivers/video/bf54x-lq043fb.c
43131 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43132 return 0;
43133 }
43134
43135 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43136 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43137 .get_brightness = bl_get_brightness,
43138 };
43139
43140 diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
43141 index 2c72a7c..d523e52 100644
43142 --- a/drivers/video/bfin-t350mcqb-fb.c
43143 +++ b/drivers/video/bfin-t350mcqb-fb.c
43144 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43145 return 0;
43146 }
43147
43148 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43149 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43150 .get_brightness = bl_get_brightness,
43151 };
43152
43153 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43154 index f53b9f1..958bf4e 100644
43155 --- a/drivers/video/fbcmap.c
43156 +++ b/drivers/video/fbcmap.c
43157 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43158 rc = -ENODEV;
43159 goto out;
43160 }
43161 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43162 - !info->fbops->fb_setcmap)) {
43163 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43164 rc = -EINVAL;
43165 goto out1;
43166 }
43167 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43168 index 99bbd28..ad3829e 100644
43169 --- a/drivers/video/fbmem.c
43170 +++ b/drivers/video/fbmem.c
43171 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43172 image->dx += image->width + 8;
43173 }
43174 } else if (rotate == FB_ROTATE_UD) {
43175 - for (x = 0; x < num && image->dx >= 0; x++) {
43176 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43177 info->fbops->fb_imageblit(info, image);
43178 image->dx -= image->width + 8;
43179 }
43180 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43181 image->dy += image->height + 8;
43182 }
43183 } else if (rotate == FB_ROTATE_CCW) {
43184 - for (x = 0; x < num && image->dy >= 0; x++) {
43185 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43186 info->fbops->fb_imageblit(info, image);
43187 image->dy -= image->height + 8;
43188 }
43189 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43190 int flags = info->flags;
43191 int ret = 0;
43192
43193 + pax_track_stack();
43194 +
43195 if (var->activate & FB_ACTIVATE_INV_MODE) {
43196 struct fb_videomode mode1, mode2;
43197
43198 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43199 void __user *argp = (void __user *)arg;
43200 long ret = 0;
43201
43202 + pax_track_stack();
43203 +
43204 switch (cmd) {
43205 case FBIOGET_VSCREENINFO:
43206 if (!lock_fb_info(info))
43207 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43208 return -EFAULT;
43209 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43210 return -EINVAL;
43211 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43212 + if (con2fb.framebuffer >= FB_MAX)
43213 return -EINVAL;
43214 if (!registered_fb[con2fb.framebuffer])
43215 request_module("fb%d", con2fb.framebuffer);
43216 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43217 index f20eff8..3e4f622 100644
43218 --- a/drivers/video/geode/gx1fb_core.c
43219 +++ b/drivers/video/geode/gx1fb_core.c
43220 @@ -30,7 +30,7 @@ static int crt_option = 1;
43221 static char panel_option[32] = "";
43222
43223 /* Modes relevant to the GX1 (taken from modedb.c) */
43224 -static const struct fb_videomode __initdata gx1_modedb[] = {
43225 +static const struct fb_videomode __initconst gx1_modedb[] = {
43226 /* 640x480-60 VESA */
43227 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43228 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43229 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43230 index 896e53d..4d87d0b 100644
43231 --- a/drivers/video/gxt4500.c
43232 +++ b/drivers/video/gxt4500.c
43233 @@ -156,7 +156,7 @@ struct gxt4500_par {
43234 static char *mode_option;
43235
43236 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43237 -static const struct fb_videomode defaultmode __devinitdata = {
43238 +static const struct fb_videomode defaultmode __devinitconst = {
43239 .refresh = 60,
43240 .xres = 1280,
43241 .yres = 1024,
43242 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43243 return 0;
43244 }
43245
43246 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43247 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43248 .id = "IBM GXT4500P",
43249 .type = FB_TYPE_PACKED_PIXELS,
43250 .visual = FB_VISUAL_PSEUDOCOLOR,
43251 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43252 index f5bedee..28c6028 100644
43253 --- a/drivers/video/i810/i810_accel.c
43254 +++ b/drivers/video/i810/i810_accel.c
43255 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43256 }
43257 }
43258 printk("ringbuffer lockup!!!\n");
43259 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43260 i810_report_error(mmio);
43261 par->dev_flags |= LOCKUP;
43262 info->pixmap.scan_align = 1;
43263 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43264 index 5743ea2..457f82c 100644
43265 --- a/drivers/video/i810/i810_main.c
43266 +++ b/drivers/video/i810/i810_main.c
43267 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43268 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43269
43270 /* PCI */
43271 -static const char *i810_pci_list[] __devinitdata = {
43272 +static const char *i810_pci_list[] __devinitconst = {
43273 "Intel(R) 810 Framebuffer Device" ,
43274 "Intel(R) 810-DC100 Framebuffer Device" ,
43275 "Intel(R) 810E Framebuffer Device" ,
43276 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43277 index 3c14e43..eafa544 100644
43278 --- a/drivers/video/logo/logo_linux_clut224.ppm
43279 +++ b/drivers/video/logo/logo_linux_clut224.ppm
43280 @@ -1,1604 +1,1123 @@
43281 P3
43282 -# Standard 224-color Linux logo
43283 80 80
43284 255
43285 - 0 0 0 0 0 0 0 0 0 0 0 0
43286 - 0 0 0 0 0 0 0 0 0 0 0 0
43287 - 0 0 0 0 0 0 0 0 0 0 0 0
43288 - 0 0 0 0 0 0 0 0 0 0 0 0
43289 - 0 0 0 0 0 0 0 0 0 0 0 0
43290 - 0 0 0 0 0 0 0 0 0 0 0 0
43291 - 0 0 0 0 0 0 0 0 0 0 0 0
43292 - 0 0 0 0 0 0 0 0 0 0 0 0
43293 - 0 0 0 0 0 0 0 0 0 0 0 0
43294 - 6 6 6 6 6 6 10 10 10 10 10 10
43295 - 10 10 10 6 6 6 6 6 6 6 6 6
43296 - 0 0 0 0 0 0 0 0 0 0 0 0
43297 - 0 0 0 0 0 0 0 0 0 0 0 0
43298 - 0 0 0 0 0 0 0 0 0 0 0 0
43299 - 0 0 0 0 0 0 0 0 0 0 0 0
43300 - 0 0 0 0 0 0 0 0 0 0 0 0
43301 - 0 0 0 0 0 0 0 0 0 0 0 0
43302 - 0 0 0 0 0 0 0 0 0 0 0 0
43303 - 0 0 0 0 0 0 0 0 0 0 0 0
43304 - 0 0 0 0 0 0 0 0 0 0 0 0
43305 - 0 0 0 0 0 0 0 0 0 0 0 0
43306 - 0 0 0 0 0 0 0 0 0 0 0 0
43307 - 0 0 0 0 0 0 0 0 0 0 0 0
43308 - 0 0 0 0 0 0 0 0 0 0 0 0
43309 - 0 0 0 0 0 0 0 0 0 0 0 0
43310 - 0 0 0 0 0 0 0 0 0 0 0 0
43311 - 0 0 0 0 0 0 0 0 0 0 0 0
43312 - 0 0 0 0 0 0 0 0 0 0 0 0
43313 - 0 0 0 6 6 6 10 10 10 14 14 14
43314 - 22 22 22 26 26 26 30 30 30 34 34 34
43315 - 30 30 30 30 30 30 26 26 26 18 18 18
43316 - 14 14 14 10 10 10 6 6 6 0 0 0
43317 - 0 0 0 0 0 0 0 0 0 0 0 0
43318 - 0 0 0 0 0 0 0 0 0 0 0 0
43319 - 0 0 0 0 0 0 0 0 0 0 0 0
43320 - 0 0 0 0 0 0 0 0 0 0 0 0
43321 - 0 0 0 0 0 0 0 0 0 0 0 0
43322 - 0 0 0 0 0 0 0 0 0 0 0 0
43323 - 0 0 0 0 0 0 0 0 0 0 0 0
43324 - 0 0 0 0 0 0 0 0 0 0 0 0
43325 - 0 0 0 0 0 0 0 0 0 0 0 0
43326 - 0 0 0 0 0 1 0 0 1 0 0 0
43327 - 0 0 0 0 0 0 0 0 0 0 0 0
43328 - 0 0 0 0 0 0 0 0 0 0 0 0
43329 - 0 0 0 0 0 0 0 0 0 0 0 0
43330 - 0 0 0 0 0 0 0 0 0 0 0 0
43331 - 0 0 0 0 0 0 0 0 0 0 0 0
43332 - 0 0 0 0 0 0 0 0 0 0 0 0
43333 - 6 6 6 14 14 14 26 26 26 42 42 42
43334 - 54 54 54 66 66 66 78 78 78 78 78 78
43335 - 78 78 78 74 74 74 66 66 66 54 54 54
43336 - 42 42 42 26 26 26 18 18 18 10 10 10
43337 - 6 6 6 0 0 0 0 0 0 0 0 0
43338 - 0 0 0 0 0 0 0 0 0 0 0 0
43339 - 0 0 0 0 0 0 0 0 0 0 0 0
43340 - 0 0 0 0 0 0 0 0 0 0 0 0
43341 - 0 0 0 0 0 0 0 0 0 0 0 0
43342 - 0 0 0 0 0 0 0 0 0 0 0 0
43343 - 0 0 0 0 0 0 0 0 0 0 0 0
43344 - 0 0 0 0 0 0 0 0 0 0 0 0
43345 - 0 0 0 0 0 0 0 0 0 0 0 0
43346 - 0 0 1 0 0 0 0 0 0 0 0 0
43347 - 0 0 0 0 0 0 0 0 0 0 0 0
43348 - 0 0 0 0 0 0 0 0 0 0 0 0
43349 - 0 0 0 0 0 0 0 0 0 0 0 0
43350 - 0 0 0 0 0 0 0 0 0 0 0 0
43351 - 0 0 0 0 0 0 0 0 0 0 0 0
43352 - 0 0 0 0 0 0 0 0 0 10 10 10
43353 - 22 22 22 42 42 42 66 66 66 86 86 86
43354 - 66 66 66 38 38 38 38 38 38 22 22 22
43355 - 26 26 26 34 34 34 54 54 54 66 66 66
43356 - 86 86 86 70 70 70 46 46 46 26 26 26
43357 - 14 14 14 6 6 6 0 0 0 0 0 0
43358 - 0 0 0 0 0 0 0 0 0 0 0 0
43359 - 0 0 0 0 0 0 0 0 0 0 0 0
43360 - 0 0 0 0 0 0 0 0 0 0 0 0
43361 - 0 0 0 0 0 0 0 0 0 0 0 0
43362 - 0 0 0 0 0 0 0 0 0 0 0 0
43363 - 0 0 0 0 0 0 0 0 0 0 0 0
43364 - 0 0 0 0 0 0 0 0 0 0 0 0
43365 - 0 0 0 0 0 0 0 0 0 0 0 0
43366 - 0 0 1 0 0 1 0 0 1 0 0 0
43367 - 0 0 0 0 0 0 0 0 0 0 0 0
43368 - 0 0 0 0 0 0 0 0 0 0 0 0
43369 - 0 0 0 0 0 0 0 0 0 0 0 0
43370 - 0 0 0 0 0 0 0 0 0 0 0 0
43371 - 0 0 0 0 0 0 0 0 0 0 0 0
43372 - 0 0 0 0 0 0 10 10 10 26 26 26
43373 - 50 50 50 82 82 82 58 58 58 6 6 6
43374 - 2 2 6 2 2 6 2 2 6 2 2 6
43375 - 2 2 6 2 2 6 2 2 6 2 2 6
43376 - 6 6 6 54 54 54 86 86 86 66 66 66
43377 - 38 38 38 18 18 18 6 6 6 0 0 0
43378 - 0 0 0 0 0 0 0 0 0 0 0 0
43379 - 0 0 0 0 0 0 0 0 0 0 0 0
43380 - 0 0 0 0 0 0 0 0 0 0 0 0
43381 - 0 0 0 0 0 0 0 0 0 0 0 0
43382 - 0 0 0 0 0 0 0 0 0 0 0 0
43383 - 0 0 0 0 0 0 0 0 0 0 0 0
43384 - 0 0 0 0 0 0 0 0 0 0 0 0
43385 - 0 0 0 0 0 0 0 0 0 0 0 0
43386 - 0 0 0 0 0 0 0 0 0 0 0 0
43387 - 0 0 0 0 0 0 0 0 0 0 0 0
43388 - 0 0 0 0 0 0 0 0 0 0 0 0
43389 - 0 0 0 0 0 0 0 0 0 0 0 0
43390 - 0 0 0 0 0 0 0 0 0 0 0 0
43391 - 0 0 0 0 0 0 0 0 0 0 0 0
43392 - 0 0 0 6 6 6 22 22 22 50 50 50
43393 - 78 78 78 34 34 34 2 2 6 2 2 6
43394 - 2 2 6 2 2 6 2 2 6 2 2 6
43395 - 2 2 6 2 2 6 2 2 6 2 2 6
43396 - 2 2 6 2 2 6 6 6 6 70 70 70
43397 - 78 78 78 46 46 46 22 22 22 6 6 6
43398 - 0 0 0 0 0 0 0 0 0 0 0 0
43399 - 0 0 0 0 0 0 0 0 0 0 0 0
43400 - 0 0 0 0 0 0 0 0 0 0 0 0
43401 - 0 0 0 0 0 0 0 0 0 0 0 0
43402 - 0 0 0 0 0 0 0 0 0 0 0 0
43403 - 0 0 0 0 0 0 0 0 0 0 0 0
43404 - 0 0 0 0 0 0 0 0 0 0 0 0
43405 - 0 0 0 0 0 0 0 0 0 0 0 0
43406 - 0 0 1 0 0 1 0 0 1 0 0 0
43407 - 0 0 0 0 0 0 0 0 0 0 0 0
43408 - 0 0 0 0 0 0 0 0 0 0 0 0
43409 - 0 0 0 0 0 0 0 0 0 0 0 0
43410 - 0 0 0 0 0 0 0 0 0 0 0 0
43411 - 0 0 0 0 0 0 0 0 0 0 0 0
43412 - 6 6 6 18 18 18 42 42 42 82 82 82
43413 - 26 26 26 2 2 6 2 2 6 2 2 6
43414 - 2 2 6 2 2 6 2 2 6 2 2 6
43415 - 2 2 6 2 2 6 2 2 6 14 14 14
43416 - 46 46 46 34 34 34 6 6 6 2 2 6
43417 - 42 42 42 78 78 78 42 42 42 18 18 18
43418 - 6 6 6 0 0 0 0 0 0 0 0 0
43419 - 0 0 0 0 0 0 0 0 0 0 0 0
43420 - 0 0 0 0 0 0 0 0 0 0 0 0
43421 - 0 0 0 0 0 0 0 0 0 0 0 0
43422 - 0 0 0 0 0 0 0 0 0 0 0 0
43423 - 0 0 0 0 0 0 0 0 0 0 0 0
43424 - 0 0 0 0 0 0 0 0 0 0 0 0
43425 - 0 0 0 0 0 0 0 0 0 0 0 0
43426 - 0 0 1 0 0 0 0 0 1 0 0 0
43427 - 0 0 0 0 0 0 0 0 0 0 0 0
43428 - 0 0 0 0 0 0 0 0 0 0 0 0
43429 - 0 0 0 0 0 0 0 0 0 0 0 0
43430 - 0 0 0 0 0 0 0 0 0 0 0 0
43431 - 0 0 0 0 0 0 0 0 0 0 0 0
43432 - 10 10 10 30 30 30 66 66 66 58 58 58
43433 - 2 2 6 2 2 6 2 2 6 2 2 6
43434 - 2 2 6 2 2 6 2 2 6 2 2 6
43435 - 2 2 6 2 2 6 2 2 6 26 26 26
43436 - 86 86 86 101 101 101 46 46 46 10 10 10
43437 - 2 2 6 58 58 58 70 70 70 34 34 34
43438 - 10 10 10 0 0 0 0 0 0 0 0 0
43439 - 0 0 0 0 0 0 0 0 0 0 0 0
43440 - 0 0 0 0 0 0 0 0 0 0 0 0
43441 - 0 0 0 0 0 0 0 0 0 0 0 0
43442 - 0 0 0 0 0 0 0 0 0 0 0 0
43443 - 0 0 0 0 0 0 0 0 0 0 0 0
43444 - 0 0 0 0 0 0 0 0 0 0 0 0
43445 - 0 0 0 0 0 0 0 0 0 0 0 0
43446 - 0 0 1 0 0 1 0 0 1 0 0 0
43447 - 0 0 0 0 0 0 0 0 0 0 0 0
43448 - 0 0 0 0 0 0 0 0 0 0 0 0
43449 - 0 0 0 0 0 0 0 0 0 0 0 0
43450 - 0 0 0 0 0 0 0 0 0 0 0 0
43451 - 0 0 0 0 0 0 0 0 0 0 0 0
43452 - 14 14 14 42 42 42 86 86 86 10 10 10
43453 - 2 2 6 2 2 6 2 2 6 2 2 6
43454 - 2 2 6 2 2 6 2 2 6 2 2 6
43455 - 2 2 6 2 2 6 2 2 6 30 30 30
43456 - 94 94 94 94 94 94 58 58 58 26 26 26
43457 - 2 2 6 6 6 6 78 78 78 54 54 54
43458 - 22 22 22 6 6 6 0 0 0 0 0 0
43459 - 0 0 0 0 0 0 0 0 0 0 0 0
43460 - 0 0 0 0 0 0 0 0 0 0 0 0
43461 - 0 0 0 0 0 0 0 0 0 0 0 0
43462 - 0 0 0 0 0 0 0 0 0 0 0 0
43463 - 0 0 0 0 0 0 0 0 0 0 0 0
43464 - 0 0 0 0 0 0 0 0 0 0 0 0
43465 - 0 0 0 0 0 0 0 0 0 0 0 0
43466 - 0 0 0 0 0 0 0 0 0 0 0 0
43467 - 0 0 0 0 0 0 0 0 0 0 0 0
43468 - 0 0 0 0 0 0 0 0 0 0 0 0
43469 - 0 0 0 0 0 0 0 0 0 0 0 0
43470 - 0 0 0 0 0 0 0 0 0 0 0 0
43471 - 0 0 0 0 0 0 0 0 0 6 6 6
43472 - 22 22 22 62 62 62 62 62 62 2 2 6
43473 - 2 2 6 2 2 6 2 2 6 2 2 6
43474 - 2 2 6 2 2 6 2 2 6 2 2 6
43475 - 2 2 6 2 2 6 2 2 6 26 26 26
43476 - 54 54 54 38 38 38 18 18 18 10 10 10
43477 - 2 2 6 2 2 6 34 34 34 82 82 82
43478 - 38 38 38 14 14 14 0 0 0 0 0 0
43479 - 0 0 0 0 0 0 0 0 0 0 0 0
43480 - 0 0 0 0 0 0 0 0 0 0 0 0
43481 - 0 0 0 0 0 0 0 0 0 0 0 0
43482 - 0 0 0 0 0 0 0 0 0 0 0 0
43483 - 0 0 0 0 0 0 0 0 0 0 0 0
43484 - 0 0 0 0 0 0 0 0 0 0 0 0
43485 - 0 0 0 0 0 0 0 0 0 0 0 0
43486 - 0 0 0 0 0 1 0 0 1 0 0 0
43487 - 0 0 0 0 0 0 0 0 0 0 0 0
43488 - 0 0 0 0 0 0 0 0 0 0 0 0
43489 - 0 0 0 0 0 0 0 0 0 0 0 0
43490 - 0 0 0 0 0 0 0 0 0 0 0 0
43491 - 0 0 0 0 0 0 0 0 0 6 6 6
43492 - 30 30 30 78 78 78 30 30 30 2 2 6
43493 - 2 2 6 2 2 6 2 2 6 2 2 6
43494 - 2 2 6 2 2 6 2 2 6 2 2 6
43495 - 2 2 6 2 2 6 2 2 6 10 10 10
43496 - 10 10 10 2 2 6 2 2 6 2 2 6
43497 - 2 2 6 2 2 6 2 2 6 78 78 78
43498 - 50 50 50 18 18 18 6 6 6 0 0 0
43499 - 0 0 0 0 0 0 0 0 0 0 0 0
43500 - 0 0 0 0 0 0 0 0 0 0 0 0
43501 - 0 0 0 0 0 0 0 0 0 0 0 0
43502 - 0 0 0 0 0 0 0 0 0 0 0 0
43503 - 0 0 0 0 0 0 0 0 0 0 0 0
43504 - 0 0 0 0 0 0 0 0 0 0 0 0
43505 - 0 0 0 0 0 0 0 0 0 0 0 0
43506 - 0 0 1 0 0 0 0 0 0 0 0 0
43507 - 0 0 0 0 0 0 0 0 0 0 0 0
43508 - 0 0 0 0 0 0 0 0 0 0 0 0
43509 - 0 0 0 0 0 0 0 0 0 0 0 0
43510 - 0 0 0 0 0 0 0 0 0 0 0 0
43511 - 0 0 0 0 0 0 0 0 0 10 10 10
43512 - 38 38 38 86 86 86 14 14 14 2 2 6
43513 - 2 2 6 2 2 6 2 2 6 2 2 6
43514 - 2 2 6 2 2 6 2 2 6 2 2 6
43515 - 2 2 6 2 2 6 2 2 6 2 2 6
43516 - 2 2 6 2 2 6 2 2 6 2 2 6
43517 - 2 2 6 2 2 6 2 2 6 54 54 54
43518 - 66 66 66 26 26 26 6 6 6 0 0 0
43519 - 0 0 0 0 0 0 0 0 0 0 0 0
43520 - 0 0 0 0 0 0 0 0 0 0 0 0
43521 - 0 0 0 0 0 0 0 0 0 0 0 0
43522 - 0 0 0 0 0 0 0 0 0 0 0 0
43523 - 0 0 0 0 0 0 0 0 0 0 0 0
43524 - 0 0 0 0 0 0 0 0 0 0 0 0
43525 - 0 0 0 0 0 0 0 0 0 0 0 0
43526 - 0 0 0 0 0 1 0 0 1 0 0 0
43527 - 0 0 0 0 0 0 0 0 0 0 0 0
43528 - 0 0 0 0 0 0 0 0 0 0 0 0
43529 - 0 0 0 0 0 0 0 0 0 0 0 0
43530 - 0 0 0 0 0 0 0 0 0 0 0 0
43531 - 0 0 0 0 0 0 0 0 0 14 14 14
43532 - 42 42 42 82 82 82 2 2 6 2 2 6
43533 - 2 2 6 6 6 6 10 10 10 2 2 6
43534 - 2 2 6 2 2 6 2 2 6 2 2 6
43535 - 2 2 6 2 2 6 2 2 6 6 6 6
43536 - 14 14 14 10 10 10 2 2 6 2 2 6
43537 - 2 2 6 2 2 6 2 2 6 18 18 18
43538 - 82 82 82 34 34 34 10 10 10 0 0 0
43539 - 0 0 0 0 0 0 0 0 0 0 0 0
43540 - 0 0 0 0 0 0 0 0 0 0 0 0
43541 - 0 0 0 0 0 0 0 0 0 0 0 0
43542 - 0 0 0 0 0 0 0 0 0 0 0 0
43543 - 0 0 0 0 0 0 0 0 0 0 0 0
43544 - 0 0 0 0 0 0 0 0 0 0 0 0
43545 - 0 0 0 0 0 0 0 0 0 0 0 0
43546 - 0 0 1 0 0 0 0 0 0 0 0 0
43547 - 0 0 0 0 0 0 0 0 0 0 0 0
43548 - 0 0 0 0 0 0 0 0 0 0 0 0
43549 - 0 0 0 0 0 0 0 0 0 0 0 0
43550 - 0 0 0 0 0 0 0 0 0 0 0 0
43551 - 0 0 0 0 0 0 0 0 0 14 14 14
43552 - 46 46 46 86 86 86 2 2 6 2 2 6
43553 - 6 6 6 6 6 6 22 22 22 34 34 34
43554 - 6 6 6 2 2 6 2 2 6 2 2 6
43555 - 2 2 6 2 2 6 18 18 18 34 34 34
43556 - 10 10 10 50 50 50 22 22 22 2 2 6
43557 - 2 2 6 2 2 6 2 2 6 10 10 10
43558 - 86 86 86 42 42 42 14 14 14 0 0 0
43559 - 0 0 0 0 0 0 0 0 0 0 0 0
43560 - 0 0 0 0 0 0 0 0 0 0 0 0
43561 - 0 0 0 0 0 0 0 0 0 0 0 0
43562 - 0 0 0 0 0 0 0 0 0 0 0 0
43563 - 0 0 0 0 0 0 0 0 0 0 0 0
43564 - 0 0 0 0 0 0 0 0 0 0 0 0
43565 - 0 0 0 0 0 0 0 0 0 0 0 0
43566 - 0 0 1 0 0 1 0 0 1 0 0 0
43567 - 0 0 0 0 0 0 0 0 0 0 0 0
43568 - 0 0 0 0 0 0 0 0 0 0 0 0
43569 - 0 0 0 0 0 0 0 0 0 0 0 0
43570 - 0 0 0 0 0 0 0 0 0 0 0 0
43571 - 0 0 0 0 0 0 0 0 0 14 14 14
43572 - 46 46 46 86 86 86 2 2 6 2 2 6
43573 - 38 38 38 116 116 116 94 94 94 22 22 22
43574 - 22 22 22 2 2 6 2 2 6 2 2 6
43575 - 14 14 14 86 86 86 138 138 138 162 162 162
43576 -154 154 154 38 38 38 26 26 26 6 6 6
43577 - 2 2 6 2 2 6 2 2 6 2 2 6
43578 - 86 86 86 46 46 46 14 14 14 0 0 0
43579 - 0 0 0 0 0 0 0 0 0 0 0 0
43580 - 0 0 0 0 0 0 0 0 0 0 0 0
43581 - 0 0 0 0 0 0 0 0 0 0 0 0
43582 - 0 0 0 0 0 0 0 0 0 0 0 0
43583 - 0 0 0 0 0 0 0 0 0 0 0 0
43584 - 0 0 0 0 0 0 0 0 0 0 0 0
43585 - 0 0 0 0 0 0 0 0 0 0 0 0
43586 - 0 0 0 0 0 0 0 0 0 0 0 0
43587 - 0 0 0 0 0 0 0 0 0 0 0 0
43588 - 0 0 0 0 0 0 0 0 0 0 0 0
43589 - 0 0 0 0 0 0 0 0 0 0 0 0
43590 - 0 0 0 0 0 0 0 0 0 0 0 0
43591 - 0 0 0 0 0 0 0 0 0 14 14 14
43592 - 46 46 46 86 86 86 2 2 6 14 14 14
43593 -134 134 134 198 198 198 195 195 195 116 116 116
43594 - 10 10 10 2 2 6 2 2 6 6 6 6
43595 -101 98 89 187 187 187 210 210 210 218 218 218
43596 -214 214 214 134 134 134 14 14 14 6 6 6
43597 - 2 2 6 2 2 6 2 2 6 2 2 6
43598 - 86 86 86 50 50 50 18 18 18 6 6 6
43599 - 0 0 0 0 0 0 0 0 0 0 0 0
43600 - 0 0 0 0 0 0 0 0 0 0 0 0
43601 - 0 0 0 0 0 0 0 0 0 0 0 0
43602 - 0 0 0 0 0 0 0 0 0 0 0 0
43603 - 0 0 0 0 0 0 0 0 0 0 0 0
43604 - 0 0 0 0 0 0 0 0 0 0 0 0
43605 - 0 0 0 0 0 0 0 0 1 0 0 0
43606 - 0 0 1 0 0 1 0 0 1 0 0 0
43607 - 0 0 0 0 0 0 0 0 0 0 0 0
43608 - 0 0 0 0 0 0 0 0 0 0 0 0
43609 - 0 0 0 0 0 0 0 0 0 0 0 0
43610 - 0 0 0 0 0 0 0 0 0 0 0 0
43611 - 0 0 0 0 0 0 0 0 0 14 14 14
43612 - 46 46 46 86 86 86 2 2 6 54 54 54
43613 -218 218 218 195 195 195 226 226 226 246 246 246
43614 - 58 58 58 2 2 6 2 2 6 30 30 30
43615 -210 210 210 253 253 253 174 174 174 123 123 123
43616 -221 221 221 234 234 234 74 74 74 2 2 6
43617 - 2 2 6 2 2 6 2 2 6 2 2 6
43618 - 70 70 70 58 58 58 22 22 22 6 6 6
43619 - 0 0 0 0 0 0 0 0 0 0 0 0
43620 - 0 0 0 0 0 0 0 0 0 0 0 0
43621 - 0 0 0 0 0 0 0 0 0 0 0 0
43622 - 0 0 0 0 0 0 0 0 0 0 0 0
43623 - 0 0 0 0 0 0 0 0 0 0 0 0
43624 - 0 0 0 0 0 0 0 0 0 0 0 0
43625 - 0 0 0 0 0 0 0 0 0 0 0 0
43626 - 0 0 0 0 0 0 0 0 0 0 0 0
43627 - 0 0 0 0 0 0 0 0 0 0 0 0
43628 - 0 0 0 0 0 0 0 0 0 0 0 0
43629 - 0 0 0 0 0 0 0 0 0 0 0 0
43630 - 0 0 0 0 0 0 0 0 0 0 0 0
43631 - 0 0 0 0 0 0 0 0 0 14 14 14
43632 - 46 46 46 82 82 82 2 2 6 106 106 106
43633 -170 170 170 26 26 26 86 86 86 226 226 226
43634 -123 123 123 10 10 10 14 14 14 46 46 46
43635 -231 231 231 190 190 190 6 6 6 70 70 70
43636 - 90 90 90 238 238 238 158 158 158 2 2 6
43637 - 2 2 6 2 2 6 2 2 6 2 2 6
43638 - 70 70 70 58 58 58 22 22 22 6 6 6
43639 - 0 0 0 0 0 0 0 0 0 0 0 0
43640 - 0 0 0 0 0 0 0 0 0 0 0 0
43641 - 0 0 0 0 0 0 0 0 0 0 0 0
43642 - 0 0 0 0 0 0 0 0 0 0 0 0
43643 - 0 0 0 0 0 0 0 0 0 0 0 0
43644 - 0 0 0 0 0 0 0 0 0 0 0 0
43645 - 0 0 0 0 0 0 0 0 1 0 0 0
43646 - 0 0 1 0 0 1 0 0 1 0 0 0
43647 - 0 0 0 0 0 0 0 0 0 0 0 0
43648 - 0 0 0 0 0 0 0 0 0 0 0 0
43649 - 0 0 0 0 0 0 0 0 0 0 0 0
43650 - 0 0 0 0 0 0 0 0 0 0 0 0
43651 - 0 0 0 0 0 0 0 0 0 14 14 14
43652 - 42 42 42 86 86 86 6 6 6 116 116 116
43653 -106 106 106 6 6 6 70 70 70 149 149 149
43654 -128 128 128 18 18 18 38 38 38 54 54 54
43655 -221 221 221 106 106 106 2 2 6 14 14 14
43656 - 46 46 46 190 190 190 198 198 198 2 2 6
43657 - 2 2 6 2 2 6 2 2 6 2 2 6
43658 - 74 74 74 62 62 62 22 22 22 6 6 6
43659 - 0 0 0 0 0 0 0 0 0 0 0 0
43660 - 0 0 0 0 0 0 0 0 0 0 0 0
43661 - 0 0 0 0 0 0 0 0 0 0 0 0
43662 - 0 0 0 0 0 0 0 0 0 0 0 0
43663 - 0 0 0 0 0 0 0 0 0 0 0 0
43664 - 0 0 0 0 0 0 0 0 0 0 0 0
43665 - 0 0 0 0 0 0 0 0 1 0 0 0
43666 - 0 0 1 0 0 0 0 0 1 0 0 0
43667 - 0 0 0 0 0 0 0 0 0 0 0 0
43668 - 0 0 0 0 0 0 0 0 0 0 0 0
43669 - 0 0 0 0 0 0 0 0 0 0 0 0
43670 - 0 0 0 0 0 0 0 0 0 0 0 0
43671 - 0 0 0 0 0 0 0 0 0 14 14 14
43672 - 42 42 42 94 94 94 14 14 14 101 101 101
43673 -128 128 128 2 2 6 18 18 18 116 116 116
43674 -118 98 46 121 92 8 121 92 8 98 78 10
43675 -162 162 162 106 106 106 2 2 6 2 2 6
43676 - 2 2 6 195 195 195 195 195 195 6 6 6
43677 - 2 2 6 2 2 6 2 2 6 2 2 6
43678 - 74 74 74 62 62 62 22 22 22 6 6 6
43679 - 0 0 0 0 0 0 0 0 0 0 0 0
43680 - 0 0 0 0 0 0 0 0 0 0 0 0
43681 - 0 0 0 0 0 0 0 0 0 0 0 0
43682 - 0 0 0 0 0 0 0 0 0 0 0 0
43683 - 0 0 0 0 0 0 0 0 0 0 0 0
43684 - 0 0 0 0 0 0 0 0 0 0 0 0
43685 - 0 0 0 0 0 0 0 0 1 0 0 1
43686 - 0 0 1 0 0 0 0 0 1 0 0 0
43687 - 0 0 0 0 0 0 0 0 0 0 0 0
43688 - 0 0 0 0 0 0 0 0 0 0 0 0
43689 - 0 0 0 0 0 0 0 0 0 0 0 0
43690 - 0 0 0 0 0 0 0 0 0 0 0 0
43691 - 0 0 0 0 0 0 0 0 0 10 10 10
43692 - 38 38 38 90 90 90 14 14 14 58 58 58
43693 -210 210 210 26 26 26 54 38 6 154 114 10
43694 -226 170 11 236 186 11 225 175 15 184 144 12
43695 -215 174 15 175 146 61 37 26 9 2 2 6
43696 - 70 70 70 246 246 246 138 138 138 2 2 6
43697 - 2 2 6 2 2 6 2 2 6 2 2 6
43698 - 70 70 70 66 66 66 26 26 26 6 6 6
43699 - 0 0 0 0 0 0 0 0 0 0 0 0
43700 - 0 0 0 0 0 0 0 0 0 0 0 0
43701 - 0 0 0 0 0 0 0 0 0 0 0 0
43702 - 0 0 0 0 0 0 0 0 0 0 0 0
43703 - 0 0 0 0 0 0 0 0 0 0 0 0
43704 - 0 0 0 0 0 0 0 0 0 0 0 0
43705 - 0 0 0 0 0 0 0 0 0 0 0 0
43706 - 0 0 0 0 0 0 0 0 0 0 0 0
43707 - 0 0 0 0 0 0 0 0 0 0 0 0
43708 - 0 0 0 0 0 0 0 0 0 0 0 0
43709 - 0 0 0 0 0 0 0 0 0 0 0 0
43710 - 0 0 0 0 0 0 0 0 0 0 0 0
43711 - 0 0 0 0 0 0 0 0 0 10 10 10
43712 - 38 38 38 86 86 86 14 14 14 10 10 10
43713 -195 195 195 188 164 115 192 133 9 225 175 15
43714 -239 182 13 234 190 10 232 195 16 232 200 30
43715 -245 207 45 241 208 19 232 195 16 184 144 12
43716 -218 194 134 211 206 186 42 42 42 2 2 6
43717 - 2 2 6 2 2 6 2 2 6 2 2 6
43718 - 50 50 50 74 74 74 30 30 30 6 6 6
43719 - 0 0 0 0 0 0 0 0 0 0 0 0
43720 - 0 0 0 0 0 0 0 0 0 0 0 0
43721 - 0 0 0 0 0 0 0 0 0 0 0 0
43722 - 0 0 0 0 0 0 0 0 0 0 0 0
43723 - 0 0 0 0 0 0 0 0 0 0 0 0
43724 - 0 0 0 0 0 0 0 0 0 0 0 0
43725 - 0 0 0 0 0 0 0 0 0 0 0 0
43726 - 0 0 0 0 0 0 0 0 0 0 0 0
43727 - 0 0 0 0 0 0 0 0 0 0 0 0
43728 - 0 0 0 0 0 0 0 0 0 0 0 0
43729 - 0 0 0 0 0 0 0 0 0 0 0 0
43730 - 0 0 0 0 0 0 0 0 0 0 0 0
43731 - 0 0 0 0 0 0 0 0 0 10 10 10
43732 - 34 34 34 86 86 86 14 14 14 2 2 6
43733 -121 87 25 192 133 9 219 162 10 239 182 13
43734 -236 186 11 232 195 16 241 208 19 244 214 54
43735 -246 218 60 246 218 38 246 215 20 241 208 19
43736 -241 208 19 226 184 13 121 87 25 2 2 6
43737 - 2 2 6 2 2 6 2 2 6 2 2 6
43738 - 50 50 50 82 82 82 34 34 34 10 10 10
43739 - 0 0 0 0 0 0 0 0 0 0 0 0
43740 - 0 0 0 0 0 0 0 0 0 0 0 0
43741 - 0 0 0 0 0 0 0 0 0 0 0 0
43742 - 0 0 0 0 0 0 0 0 0 0 0 0
43743 - 0 0 0 0 0 0 0 0 0 0 0 0
43744 - 0 0 0 0 0 0 0 0 0 0 0 0
43745 - 0 0 0 0 0 0 0 0 0 0 0 0
43746 - 0 0 0 0 0 0 0 0 0 0 0 0
43747 - 0 0 0 0 0 0 0 0 0 0 0 0
43748 - 0 0 0 0 0 0 0 0 0 0 0 0
43749 - 0 0 0 0 0 0 0 0 0 0 0 0
43750 - 0 0 0 0 0 0 0 0 0 0 0 0
43751 - 0 0 0 0 0 0 0 0 0 10 10 10
43752 - 34 34 34 82 82 82 30 30 30 61 42 6
43753 -180 123 7 206 145 10 230 174 11 239 182 13
43754 -234 190 10 238 202 15 241 208 19 246 218 74
43755 -246 218 38 246 215 20 246 215 20 246 215 20
43756 -226 184 13 215 174 15 184 144 12 6 6 6
43757 - 2 2 6 2 2 6 2 2 6 2 2 6
43758 - 26 26 26 94 94 94 42 42 42 14 14 14
43759 - 0 0 0 0 0 0 0 0 0 0 0 0
43760 - 0 0 0 0 0 0 0 0 0 0 0 0
43761 - 0 0 0 0 0 0 0 0 0 0 0 0
43762 - 0 0 0 0 0 0 0 0 0 0 0 0
43763 - 0 0 0 0 0 0 0 0 0 0 0 0
43764 - 0 0 0 0 0 0 0 0 0 0 0 0
43765 - 0 0 0 0 0 0 0 0 0 0 0 0
43766 - 0 0 0 0 0 0 0 0 0 0 0 0
43767 - 0 0 0 0 0 0 0 0 0 0 0 0
43768 - 0 0 0 0 0 0 0 0 0 0 0 0
43769 - 0 0 0 0 0 0 0 0 0 0 0 0
43770 - 0 0 0 0 0 0 0 0 0 0 0 0
43771 - 0 0 0 0 0 0 0 0 0 10 10 10
43772 - 30 30 30 78 78 78 50 50 50 104 69 6
43773 -192 133 9 216 158 10 236 178 12 236 186 11
43774 -232 195 16 241 208 19 244 214 54 245 215 43
43775 -246 215 20 246 215 20 241 208 19 198 155 10
43776 -200 144 11 216 158 10 156 118 10 2 2 6
43777 - 2 2 6 2 2 6 2 2 6 2 2 6
43778 - 6 6 6 90 90 90 54 54 54 18 18 18
43779 - 6 6 6 0 0 0 0 0 0 0 0 0
43780 - 0 0 0 0 0 0 0 0 0 0 0 0
43781 - 0 0 0 0 0 0 0 0 0 0 0 0
43782 - 0 0 0 0 0 0 0 0 0 0 0 0
43783 - 0 0 0 0 0 0 0 0 0 0 0 0
43784 - 0 0 0 0 0 0 0 0 0 0 0 0
43785 - 0 0 0 0 0 0 0 0 0 0 0 0
43786 - 0 0 0 0 0 0 0 0 0 0 0 0
43787 - 0 0 0 0 0 0 0 0 0 0 0 0
43788 - 0 0 0 0 0 0 0 0 0 0 0 0
43789 - 0 0 0 0 0 0 0 0 0 0 0 0
43790 - 0 0 0 0 0 0 0 0 0 0 0 0
43791 - 0 0 0 0 0 0 0 0 0 10 10 10
43792 - 30 30 30 78 78 78 46 46 46 22 22 22
43793 -137 92 6 210 162 10 239 182 13 238 190 10
43794 -238 202 15 241 208 19 246 215 20 246 215 20
43795 -241 208 19 203 166 17 185 133 11 210 150 10
43796 -216 158 10 210 150 10 102 78 10 2 2 6
43797 - 6 6 6 54 54 54 14 14 14 2 2 6
43798 - 2 2 6 62 62 62 74 74 74 30 30 30
43799 - 10 10 10 0 0 0 0 0 0 0 0 0
43800 - 0 0 0 0 0 0 0 0 0 0 0 0
43801 - 0 0 0 0 0 0 0 0 0 0 0 0
43802 - 0 0 0 0 0 0 0 0 0 0 0 0
43803 - 0 0 0 0 0 0 0 0 0 0 0 0
43804 - 0 0 0 0 0 0 0 0 0 0 0 0
43805 - 0 0 0 0 0 0 0 0 0 0 0 0
43806 - 0 0 0 0 0 0 0 0 0 0 0 0
43807 - 0 0 0 0 0 0 0 0 0 0 0 0
43808 - 0 0 0 0 0 0 0 0 0 0 0 0
43809 - 0 0 0 0 0 0 0 0 0 0 0 0
43810 - 0 0 0 0 0 0 0 0 0 0 0 0
43811 - 0 0 0 0 0 0 0 0 0 10 10 10
43812 - 34 34 34 78 78 78 50 50 50 6 6 6
43813 - 94 70 30 139 102 15 190 146 13 226 184 13
43814 -232 200 30 232 195 16 215 174 15 190 146 13
43815 -168 122 10 192 133 9 210 150 10 213 154 11
43816 -202 150 34 182 157 106 101 98 89 2 2 6
43817 - 2 2 6 78 78 78 116 116 116 58 58 58
43818 - 2 2 6 22 22 22 90 90 90 46 46 46
43819 - 18 18 18 6 6 6 0 0 0 0 0 0
43820 - 0 0 0 0 0 0 0 0 0 0 0 0
43821 - 0 0 0 0 0 0 0 0 0 0 0 0
43822 - 0 0 0 0 0 0 0 0 0 0 0 0
43823 - 0 0 0 0 0 0 0 0 0 0 0 0
43824 - 0 0 0 0 0 0 0 0 0 0 0 0
43825 - 0 0 0 0 0 0 0 0 0 0 0 0
43826 - 0 0 0 0 0 0 0 0 0 0 0 0
43827 - 0 0 0 0 0 0 0 0 0 0 0 0
43828 - 0 0 0 0 0 0 0 0 0 0 0 0
43829 - 0 0 0 0 0 0 0 0 0 0 0 0
43830 - 0 0 0 0 0 0 0 0 0 0 0 0
43831 - 0 0 0 0 0 0 0 0 0 10 10 10
43832 - 38 38 38 86 86 86 50 50 50 6 6 6
43833 -128 128 128 174 154 114 156 107 11 168 122 10
43834 -198 155 10 184 144 12 197 138 11 200 144 11
43835 -206 145 10 206 145 10 197 138 11 188 164 115
43836 -195 195 195 198 198 198 174 174 174 14 14 14
43837 - 2 2 6 22 22 22 116 116 116 116 116 116
43838 - 22 22 22 2 2 6 74 74 74 70 70 70
43839 - 30 30 30 10 10 10 0 0 0 0 0 0
43840 - 0 0 0 0 0 0 0 0 0 0 0 0
43841 - 0 0 0 0 0 0 0 0 0 0 0 0
43842 - 0 0 0 0 0 0 0 0 0 0 0 0
43843 - 0 0 0 0 0 0 0 0 0 0 0 0
43844 - 0 0 0 0 0 0 0 0 0 0 0 0
43845 - 0 0 0 0 0 0 0 0 0 0 0 0
43846 - 0 0 0 0 0 0 0 0 0 0 0 0
43847 - 0 0 0 0 0 0 0 0 0 0 0 0
43848 - 0 0 0 0 0 0 0 0 0 0 0 0
43849 - 0 0 0 0 0 0 0 0 0 0 0 0
43850 - 0 0 0 0 0 0 0 0 0 0 0 0
43851 - 0 0 0 0 0 0 6 6 6 18 18 18
43852 - 50 50 50 101 101 101 26 26 26 10 10 10
43853 -138 138 138 190 190 190 174 154 114 156 107 11
43854 -197 138 11 200 144 11 197 138 11 192 133 9
43855 -180 123 7 190 142 34 190 178 144 187 187 187
43856 -202 202 202 221 221 221 214 214 214 66 66 66
43857 - 2 2 6 2 2 6 50 50 50 62 62 62
43858 - 6 6 6 2 2 6 10 10 10 90 90 90
43859 - 50 50 50 18 18 18 6 6 6 0 0 0
43860 - 0 0 0 0 0 0 0 0 0 0 0 0
43861 - 0 0 0 0 0 0 0 0 0 0 0 0
43862 - 0 0 0 0 0 0 0 0 0 0 0 0
43863 - 0 0 0 0 0 0 0 0 0 0 0 0
43864 - 0 0 0 0 0 0 0 0 0 0 0 0
43865 - 0 0 0 0 0 0 0 0 0 0 0 0
43866 - 0 0 0 0 0 0 0 0 0 0 0 0
43867 - 0 0 0 0 0 0 0 0 0 0 0 0
43868 - 0 0 0 0 0 0 0 0 0 0 0 0
43869 - 0 0 0 0 0 0 0 0 0 0 0 0
43870 - 0 0 0 0 0 0 0 0 0 0 0 0
43871 - 0 0 0 0 0 0 10 10 10 34 34 34
43872 - 74 74 74 74 74 74 2 2 6 6 6 6
43873 -144 144 144 198 198 198 190 190 190 178 166 146
43874 -154 121 60 156 107 11 156 107 11 168 124 44
43875 -174 154 114 187 187 187 190 190 190 210 210 210
43876 -246 246 246 253 253 253 253 253 253 182 182 182
43877 - 6 6 6 2 2 6 2 2 6 2 2 6
43878 - 2 2 6 2 2 6 2 2 6 62 62 62
43879 - 74 74 74 34 34 34 14 14 14 0 0 0
43880 - 0 0 0 0 0 0 0 0 0 0 0 0
43881 - 0 0 0 0 0 0 0 0 0 0 0 0
43882 - 0 0 0 0 0 0 0 0 0 0 0 0
43883 - 0 0 0 0 0 0 0 0 0 0 0 0
43884 - 0 0 0 0 0 0 0 0 0 0 0 0
43885 - 0 0 0 0 0 0 0 0 0 0 0 0
43886 - 0 0 0 0 0 0 0 0 0 0 0 0
43887 - 0 0 0 0 0 0 0 0 0 0 0 0
43888 - 0 0 0 0 0 0 0 0 0 0 0 0
43889 - 0 0 0 0 0 0 0 0 0 0 0 0
43890 - 0 0 0 0 0 0 0 0 0 0 0 0
43891 - 0 0 0 10 10 10 22 22 22 54 54 54
43892 - 94 94 94 18 18 18 2 2 6 46 46 46
43893 -234 234 234 221 221 221 190 190 190 190 190 190
43894 -190 190 190 187 187 187 187 187 187 190 190 190
43895 -190 190 190 195 195 195 214 214 214 242 242 242
43896 -253 253 253 253 253 253 253 253 253 253 253 253
43897 - 82 82 82 2 2 6 2 2 6 2 2 6
43898 - 2 2 6 2 2 6 2 2 6 14 14 14
43899 - 86 86 86 54 54 54 22 22 22 6 6 6
43900 - 0 0 0 0 0 0 0 0 0 0 0 0
43901 - 0 0 0 0 0 0 0 0 0 0 0 0
43902 - 0 0 0 0 0 0 0 0 0 0 0 0
43903 - 0 0 0 0 0 0 0 0 0 0 0 0
43904 - 0 0 0 0 0 0 0 0 0 0 0 0
43905 - 0 0 0 0 0 0 0 0 0 0 0 0
43906 - 0 0 0 0 0 0 0 0 0 0 0 0
43907 - 0 0 0 0 0 0 0 0 0 0 0 0
43908 - 0 0 0 0 0 0 0 0 0 0 0 0
43909 - 0 0 0 0 0 0 0 0 0 0 0 0
43910 - 0 0 0 0 0 0 0 0 0 0 0 0
43911 - 6 6 6 18 18 18 46 46 46 90 90 90
43912 - 46 46 46 18 18 18 6 6 6 182 182 182
43913 -253 253 253 246 246 246 206 206 206 190 190 190
43914 -190 190 190 190 190 190 190 190 190 190 190 190
43915 -206 206 206 231 231 231 250 250 250 253 253 253
43916 -253 253 253 253 253 253 253 253 253 253 253 253
43917 -202 202 202 14 14 14 2 2 6 2 2 6
43918 - 2 2 6 2 2 6 2 2 6 2 2 6
43919 - 42 42 42 86 86 86 42 42 42 18 18 18
43920 - 6 6 6 0 0 0 0 0 0 0 0 0
43921 - 0 0 0 0 0 0 0 0 0 0 0 0
43922 - 0 0 0 0 0 0 0 0 0 0 0 0
43923 - 0 0 0 0 0 0 0 0 0 0 0 0
43924 - 0 0 0 0 0 0 0 0 0 0 0 0
43925 - 0 0 0 0 0 0 0 0 0 0 0 0
43926 - 0 0 0 0 0 0 0 0 0 0 0 0
43927 - 0 0 0 0 0 0 0 0 0 0 0 0
43928 - 0 0 0 0 0 0 0 0 0 0 0 0
43929 - 0 0 0 0 0 0 0 0 0 0 0 0
43930 - 0 0 0 0 0 0 0 0 0 6 6 6
43931 - 14 14 14 38 38 38 74 74 74 66 66 66
43932 - 2 2 6 6 6 6 90 90 90 250 250 250
43933 -253 253 253 253 253 253 238 238 238 198 198 198
43934 -190 190 190 190 190 190 195 195 195 221 221 221
43935 -246 246 246 253 253 253 253 253 253 253 253 253
43936 -253 253 253 253 253 253 253 253 253 253 253 253
43937 -253 253 253 82 82 82 2 2 6 2 2 6
43938 - 2 2 6 2 2 6 2 2 6 2 2 6
43939 - 2 2 6 78 78 78 70 70 70 34 34 34
43940 - 14 14 14 6 6 6 0 0 0 0 0 0
43941 - 0 0 0 0 0 0 0 0 0 0 0 0
43942 - 0 0 0 0 0 0 0 0 0 0 0 0
43943 - 0 0 0 0 0 0 0 0 0 0 0 0
43944 - 0 0 0 0 0 0 0 0 0 0 0 0
43945 - 0 0 0 0 0 0 0 0 0 0 0 0
43946 - 0 0 0 0 0 0 0 0 0 0 0 0
43947 - 0 0 0 0 0 0 0 0 0 0 0 0
43948 - 0 0 0 0 0 0 0 0 0 0 0 0
43949 - 0 0 0 0 0 0 0 0 0 0 0 0
43950 - 0 0 0 0 0 0 0 0 0 14 14 14
43951 - 34 34 34 66 66 66 78 78 78 6 6 6
43952 - 2 2 6 18 18 18 218 218 218 253 253 253
43953 -253 253 253 253 253 253 253 253 253 246 246 246
43954 -226 226 226 231 231 231 246 246 246 253 253 253
43955 -253 253 253 253 253 253 253 253 253 253 253 253
43956 -253 253 253 253 253 253 253 253 253 253 253 253
43957 -253 253 253 178 178 178 2 2 6 2 2 6
43958 - 2 2 6 2 2 6 2 2 6 2 2 6
43959 - 2 2 6 18 18 18 90 90 90 62 62 62
43960 - 30 30 30 10 10 10 0 0 0 0 0 0
43961 - 0 0 0 0 0 0 0 0 0 0 0 0
43962 - 0 0 0 0 0 0 0 0 0 0 0 0
43963 - 0 0 0 0 0 0 0 0 0 0 0 0
43964 - 0 0 0 0 0 0 0 0 0 0 0 0
43965 - 0 0 0 0 0 0 0 0 0 0 0 0
43966 - 0 0 0 0 0 0 0 0 0 0 0 0
43967 - 0 0 0 0 0 0 0 0 0 0 0 0
43968 - 0 0 0 0 0 0 0 0 0 0 0 0
43969 - 0 0 0 0 0 0 0 0 0 0 0 0
43970 - 0 0 0 0 0 0 10 10 10 26 26 26
43971 - 58 58 58 90 90 90 18 18 18 2 2 6
43972 - 2 2 6 110 110 110 253 253 253 253 253 253
43973 -253 253 253 253 253 253 253 253 253 253 253 253
43974 -250 250 250 253 253 253 253 253 253 253 253 253
43975 -253 253 253 253 253 253 253 253 253 253 253 253
43976 -253 253 253 253 253 253 253 253 253 253 253 253
43977 -253 253 253 231 231 231 18 18 18 2 2 6
43978 - 2 2 6 2 2 6 2 2 6 2 2 6
43979 - 2 2 6 2 2 6 18 18 18 94 94 94
43980 - 54 54 54 26 26 26 10 10 10 0 0 0
43981 - 0 0 0 0 0 0 0 0 0 0 0 0
43982 - 0 0 0 0 0 0 0 0 0 0 0 0
43983 - 0 0 0 0 0 0 0 0 0 0 0 0
43984 - 0 0 0 0 0 0 0 0 0 0 0 0
43985 - 0 0 0 0 0 0 0 0 0 0 0 0
43986 - 0 0 0 0 0 0 0 0 0 0 0 0
43987 - 0 0 0 0 0 0 0 0 0 0 0 0
43988 - 0 0 0 0 0 0 0 0 0 0 0 0
43989 - 0 0 0 0 0 0 0 0 0 0 0 0
43990 - 0 0 0 6 6 6 22 22 22 50 50 50
43991 - 90 90 90 26 26 26 2 2 6 2 2 6
43992 - 14 14 14 195 195 195 250 250 250 253 253 253
43993 -253 253 253 253 253 253 253 253 253 253 253 253
43994 -253 253 253 253 253 253 253 253 253 253 253 253
43995 -253 253 253 253 253 253 253 253 253 253 253 253
43996 -253 253 253 253 253 253 253 253 253 253 253 253
43997 -250 250 250 242 242 242 54 54 54 2 2 6
43998 - 2 2 6 2 2 6 2 2 6 2 2 6
43999 - 2 2 6 2 2 6 2 2 6 38 38 38
44000 - 86 86 86 50 50 50 22 22 22 6 6 6
44001 - 0 0 0 0 0 0 0 0 0 0 0 0
44002 - 0 0 0 0 0 0 0 0 0 0 0 0
44003 - 0 0 0 0 0 0 0 0 0 0 0 0
44004 - 0 0 0 0 0 0 0 0 0 0 0 0
44005 - 0 0 0 0 0 0 0 0 0 0 0 0
44006 - 0 0 0 0 0 0 0 0 0 0 0 0
44007 - 0 0 0 0 0 0 0 0 0 0 0 0
44008 - 0 0 0 0 0 0 0 0 0 0 0 0
44009 - 0 0 0 0 0 0 0 0 0 0 0 0
44010 - 6 6 6 14 14 14 38 38 38 82 82 82
44011 - 34 34 34 2 2 6 2 2 6 2 2 6
44012 - 42 42 42 195 195 195 246 246 246 253 253 253
44013 -253 253 253 253 253 253 253 253 253 250 250 250
44014 -242 242 242 242 242 242 250 250 250 253 253 253
44015 -253 253 253 253 253 253 253 253 253 253 253 253
44016 -253 253 253 250 250 250 246 246 246 238 238 238
44017 -226 226 226 231 231 231 101 101 101 6 6 6
44018 - 2 2 6 2 2 6 2 2 6 2 2 6
44019 - 2 2 6 2 2 6 2 2 6 2 2 6
44020 - 38 38 38 82 82 82 42 42 42 14 14 14
44021 - 6 6 6 0 0 0 0 0 0 0 0 0
44022 - 0 0 0 0 0 0 0 0 0 0 0 0
44023 - 0 0 0 0 0 0 0 0 0 0 0 0
44024 - 0 0 0 0 0 0 0 0 0 0 0 0
44025 - 0 0 0 0 0 0 0 0 0 0 0 0
44026 - 0 0 0 0 0 0 0 0 0 0 0 0
44027 - 0 0 0 0 0 0 0 0 0 0 0 0
44028 - 0 0 0 0 0 0 0 0 0 0 0 0
44029 - 0 0 0 0 0 0 0 0 0 0 0 0
44030 - 10 10 10 26 26 26 62 62 62 66 66 66
44031 - 2 2 6 2 2 6 2 2 6 6 6 6
44032 - 70 70 70 170 170 170 206 206 206 234 234 234
44033 -246 246 246 250 250 250 250 250 250 238 238 238
44034 -226 226 226 231 231 231 238 238 238 250 250 250
44035 -250 250 250 250 250 250 246 246 246 231 231 231
44036 -214 214 214 206 206 206 202 202 202 202 202 202
44037 -198 198 198 202 202 202 182 182 182 18 18 18
44038 - 2 2 6 2 2 6 2 2 6 2 2 6
44039 - 2 2 6 2 2 6 2 2 6 2 2 6
44040 - 2 2 6 62 62 62 66 66 66 30 30 30
44041 - 10 10 10 0 0 0 0 0 0 0 0 0
44042 - 0 0 0 0 0 0 0 0 0 0 0 0
44043 - 0 0 0 0 0 0 0 0 0 0 0 0
44044 - 0 0 0 0 0 0 0 0 0 0 0 0
44045 - 0 0 0 0 0 0 0 0 0 0 0 0
44046 - 0 0 0 0 0 0 0 0 0 0 0 0
44047 - 0 0 0 0 0 0 0 0 0 0 0 0
44048 - 0 0 0 0 0 0 0 0 0 0 0 0
44049 - 0 0 0 0 0 0 0 0 0 0 0 0
44050 - 14 14 14 42 42 42 82 82 82 18 18 18
44051 - 2 2 6 2 2 6 2 2 6 10 10 10
44052 - 94 94 94 182 182 182 218 218 218 242 242 242
44053 -250 250 250 253 253 253 253 253 253 250 250 250
44054 -234 234 234 253 253 253 253 253 253 253 253 253
44055 -253 253 253 253 253 253 253 253 253 246 246 246
44056 -238 238 238 226 226 226 210 210 210 202 202 202
44057 -195 195 195 195 195 195 210 210 210 158 158 158
44058 - 6 6 6 14 14 14 50 50 50 14 14 14
44059 - 2 2 6 2 2 6 2 2 6 2 2 6
44060 - 2 2 6 6 6 6 86 86 86 46 46 46
44061 - 18 18 18 6 6 6 0 0 0 0 0 0
44062 - 0 0 0 0 0 0 0 0 0 0 0 0
44063 - 0 0 0 0 0 0 0 0 0 0 0 0
44064 - 0 0 0 0 0 0 0 0 0 0 0 0
44065 - 0 0 0 0 0 0 0 0 0 0 0 0
44066 - 0 0 0 0 0 0 0 0 0 0 0 0
44067 - 0 0 0 0 0 0 0 0 0 0 0 0
44068 - 0 0 0 0 0 0 0 0 0 0 0 0
44069 - 0 0 0 0 0 0 0 0 0 6 6 6
44070 - 22 22 22 54 54 54 70 70 70 2 2 6
44071 - 2 2 6 10 10 10 2 2 6 22 22 22
44072 -166 166 166 231 231 231 250 250 250 253 253 253
44073 -253 253 253 253 253 253 253 253 253 250 250 250
44074 -242 242 242 253 253 253 253 253 253 253 253 253
44075 -253 253 253 253 253 253 253 253 253 253 253 253
44076 -253 253 253 253 253 253 253 253 253 246 246 246
44077 -231 231 231 206 206 206 198 198 198 226 226 226
44078 - 94 94 94 2 2 6 6 6 6 38 38 38
44079 - 30 30 30 2 2 6 2 2 6 2 2 6
44080 - 2 2 6 2 2 6 62 62 62 66 66 66
44081 - 26 26 26 10 10 10 0 0 0 0 0 0
44082 - 0 0 0 0 0 0 0 0 0 0 0 0
44083 - 0 0 0 0 0 0 0 0 0 0 0 0
44084 - 0 0 0 0 0 0 0 0 0 0 0 0
44085 - 0 0 0 0 0 0 0 0 0 0 0 0
44086 - 0 0 0 0 0 0 0 0 0 0 0 0
44087 - 0 0 0 0 0 0 0 0 0 0 0 0
44088 - 0 0 0 0 0 0 0 0 0 0 0 0
44089 - 0 0 0 0 0 0 0 0 0 10 10 10
44090 - 30 30 30 74 74 74 50 50 50 2 2 6
44091 - 26 26 26 26 26 26 2 2 6 106 106 106
44092 -238 238 238 253 253 253 253 253 253 253 253 253
44093 -253 253 253 253 253 253 253 253 253 253 253 253
44094 -253 253 253 253 253 253 253 253 253 253 253 253
44095 -253 253 253 253 253 253 253 253 253 253 253 253
44096 -253 253 253 253 253 253 253 253 253 253 253 253
44097 -253 253 253 246 246 246 218 218 218 202 202 202
44098 -210 210 210 14 14 14 2 2 6 2 2 6
44099 - 30 30 30 22 22 22 2 2 6 2 2 6
44100 - 2 2 6 2 2 6 18 18 18 86 86 86
44101 - 42 42 42 14 14 14 0 0 0 0 0 0
44102 - 0 0 0 0 0 0 0 0 0 0 0 0
44103 - 0 0 0 0 0 0 0 0 0 0 0 0
44104 - 0 0 0 0 0 0 0 0 0 0 0 0
44105 - 0 0 0 0 0 0 0 0 0 0 0 0
44106 - 0 0 0 0 0 0 0 0 0 0 0 0
44107 - 0 0 0 0 0 0 0 0 0 0 0 0
44108 - 0 0 0 0 0 0 0 0 0 0 0 0
44109 - 0 0 0 0 0 0 0 0 0 14 14 14
44110 - 42 42 42 90 90 90 22 22 22 2 2 6
44111 - 42 42 42 2 2 6 18 18 18 218 218 218
44112 -253 253 253 253 253 253 253 253 253 253 253 253
44113 -253 253 253 253 253 253 253 253 253 253 253 253
44114 -253 253 253 253 253 253 253 253 253 253 253 253
44115 -253 253 253 253 253 253 253 253 253 253 253 253
44116 -253 253 253 253 253 253 253 253 253 253 253 253
44117 -253 253 253 253 253 253 250 250 250 221 221 221
44118 -218 218 218 101 101 101 2 2 6 14 14 14
44119 - 18 18 18 38 38 38 10 10 10 2 2 6
44120 - 2 2 6 2 2 6 2 2 6 78 78 78
44121 - 58 58 58 22 22 22 6 6 6 0 0 0
44122 - 0 0 0 0 0 0 0 0 0 0 0 0
44123 - 0 0 0 0 0 0 0 0 0 0 0 0
44124 - 0 0 0 0 0 0 0 0 0 0 0 0
44125 - 0 0 0 0 0 0 0 0 0 0 0 0
44126 - 0 0 0 0 0 0 0 0 0 0 0 0
44127 - 0 0 0 0 0 0 0 0 0 0 0 0
44128 - 0 0 0 0 0 0 0 0 0 0 0 0
44129 - 0 0 0 0 0 0 6 6 6 18 18 18
44130 - 54 54 54 82 82 82 2 2 6 26 26 26
44131 - 22 22 22 2 2 6 123 123 123 253 253 253
44132 -253 253 253 253 253 253 253 253 253 253 253 253
44133 -253 253 253 253 253 253 253 253 253 253 253 253
44134 -253 253 253 253 253 253 253 253 253 253 253 253
44135 -253 253 253 253 253 253 253 253 253 253 253 253
44136 -253 253 253 253 253 253 253 253 253 253 253 253
44137 -253 253 253 253 253 253 253 253 253 250 250 250
44138 -238 238 238 198 198 198 6 6 6 38 38 38
44139 - 58 58 58 26 26 26 38 38 38 2 2 6
44140 - 2 2 6 2 2 6 2 2 6 46 46 46
44141 - 78 78 78 30 30 30 10 10 10 0 0 0
44142 - 0 0 0 0 0 0 0 0 0 0 0 0
44143 - 0 0 0 0 0 0 0 0 0 0 0 0
44144 - 0 0 0 0 0 0 0 0 0 0 0 0
44145 - 0 0 0 0 0 0 0 0 0 0 0 0
44146 - 0 0 0 0 0 0 0 0 0 0 0 0
44147 - 0 0 0 0 0 0 0 0 0 0 0 0
44148 - 0 0 0 0 0 0 0 0 0 0 0 0
44149 - 0 0 0 0 0 0 10 10 10 30 30 30
44150 - 74 74 74 58 58 58 2 2 6 42 42 42
44151 - 2 2 6 22 22 22 231 231 231 253 253 253
44152 -253 253 253 253 253 253 253 253 253 253 253 253
44153 -253 253 253 253 253 253 253 253 253 250 250 250
44154 -253 253 253 253 253 253 253 253 253 253 253 253
44155 -253 253 253 253 253 253 253 253 253 253 253 253
44156 -253 253 253 253 253 253 253 253 253 253 253 253
44157 -253 253 253 253 253 253 253 253 253 253 253 253
44158 -253 253 253 246 246 246 46 46 46 38 38 38
44159 - 42 42 42 14 14 14 38 38 38 14 14 14
44160 - 2 2 6 2 2 6 2 2 6 6 6 6
44161 - 86 86 86 46 46 46 14 14 14 0 0 0
44162 - 0 0 0 0 0 0 0 0 0 0 0 0
44163 - 0 0 0 0 0 0 0 0 0 0 0 0
44164 - 0 0 0 0 0 0 0 0 0 0 0 0
44165 - 0 0 0 0 0 0 0 0 0 0 0 0
44166 - 0 0 0 0 0 0 0 0 0 0 0 0
44167 - 0 0 0 0 0 0 0 0 0 0 0 0
44168 - 0 0 0 0 0 0 0 0 0 0 0 0
44169 - 0 0 0 6 6 6 14 14 14 42 42 42
44170 - 90 90 90 18 18 18 18 18 18 26 26 26
44171 - 2 2 6 116 116 116 253 253 253 253 253 253
44172 -253 253 253 253 253 253 253 253 253 253 253 253
44173 -253 253 253 253 253 253 250 250 250 238 238 238
44174 -253 253 253 253 253 253 253 253 253 253 253 253
44175 -253 253 253 253 253 253 253 253 253 253 253 253
44176 -253 253 253 253 253 253 253 253 253 253 253 253
44177 -253 253 253 253 253 253 253 253 253 253 253 253
44178 -253 253 253 253 253 253 94 94 94 6 6 6
44179 - 2 2 6 2 2 6 10 10 10 34 34 34
44180 - 2 2 6 2 2 6 2 2 6 2 2 6
44181 - 74 74 74 58 58 58 22 22 22 6 6 6
44182 - 0 0 0 0 0 0 0 0 0 0 0 0
44183 - 0 0 0 0 0 0 0 0 0 0 0 0
44184 - 0 0 0 0 0 0 0 0 0 0 0 0
44185 - 0 0 0 0 0 0 0 0 0 0 0 0
44186 - 0 0 0 0 0 0 0 0 0 0 0 0
44187 - 0 0 0 0 0 0 0 0 0 0 0 0
44188 - 0 0 0 0 0 0 0 0 0 0 0 0
44189 - 0 0 0 10 10 10 26 26 26 66 66 66
44190 - 82 82 82 2 2 6 38 38 38 6 6 6
44191 - 14 14 14 210 210 210 253 253 253 253 253 253
44192 -253 253 253 253 253 253 253 253 253 253 253 253
44193 -253 253 253 253 253 253 246 246 246 242 242 242
44194 -253 253 253 253 253 253 253 253 253 253 253 253
44195 -253 253 253 253 253 253 253 253 253 253 253 253
44196 -253 253 253 253 253 253 253 253 253 253 253 253
44197 -253 253 253 253 253 253 253 253 253 253 253 253
44198 -253 253 253 253 253 253 144 144 144 2 2 6
44199 - 2 2 6 2 2 6 2 2 6 46 46 46
44200 - 2 2 6 2 2 6 2 2 6 2 2 6
44201 - 42 42 42 74 74 74 30 30 30 10 10 10
44202 - 0 0 0 0 0 0 0 0 0 0 0 0
44203 - 0 0 0 0 0 0 0 0 0 0 0 0
44204 - 0 0 0 0 0 0 0 0 0 0 0 0
44205 - 0 0 0 0 0 0 0 0 0 0 0 0
44206 - 0 0 0 0 0 0 0 0 0 0 0 0
44207 - 0 0 0 0 0 0 0 0 0 0 0 0
44208 - 0 0 0 0 0 0 0 0 0 0 0 0
44209 - 6 6 6 14 14 14 42 42 42 90 90 90
44210 - 26 26 26 6 6 6 42 42 42 2 2 6
44211 - 74 74 74 250 250 250 253 253 253 253 253 253
44212 -253 253 253 253 253 253 253 253 253 253 253 253
44213 -253 253 253 253 253 253 242 242 242 242 242 242
44214 -253 253 253 253 253 253 253 253 253 253 253 253
44215 -253 253 253 253 253 253 253 253 253 253 253 253
44216 -253 253 253 253 253 253 253 253 253 253 253 253
44217 -253 253 253 253 253 253 253 253 253 253 253 253
44218 -253 253 253 253 253 253 182 182 182 2 2 6
44219 - 2 2 6 2 2 6 2 2 6 46 46 46
44220 - 2 2 6 2 2 6 2 2 6 2 2 6
44221 - 10 10 10 86 86 86 38 38 38 10 10 10
44222 - 0 0 0 0 0 0 0 0 0 0 0 0
44223 - 0 0 0 0 0 0 0 0 0 0 0 0
44224 - 0 0 0 0 0 0 0 0 0 0 0 0
44225 - 0 0 0 0 0 0 0 0 0 0 0 0
44226 - 0 0 0 0 0 0 0 0 0 0 0 0
44227 - 0 0 0 0 0 0 0 0 0 0 0 0
44228 - 0 0 0 0 0 0 0 0 0 0 0 0
44229 - 10 10 10 26 26 26 66 66 66 82 82 82
44230 - 2 2 6 22 22 22 18 18 18 2 2 6
44231 -149 149 149 253 253 253 253 253 253 253 253 253
44232 -253 253 253 253 253 253 253 253 253 253 253 253
44233 -253 253 253 253 253 253 234 234 234 242 242 242
44234 -253 253 253 253 253 253 253 253 253 253 253 253
44235 -253 253 253 253 253 253 253 253 253 253 253 253
44236 -253 253 253 253 253 253 253 253 253 253 253 253
44237 -253 253 253 253 253 253 253 253 253 253 253 253
44238 -253 253 253 253 253 253 206 206 206 2 2 6
44239 - 2 2 6 2 2 6 2 2 6 38 38 38
44240 - 2 2 6 2 2 6 2 2 6 2 2 6
44241 - 6 6 6 86 86 86 46 46 46 14 14 14
44242 - 0 0 0 0 0 0 0 0 0 0 0 0
44243 - 0 0 0 0 0 0 0 0 0 0 0 0
44244 - 0 0 0 0 0 0 0 0 0 0 0 0
44245 - 0 0 0 0 0 0 0 0 0 0 0 0
44246 - 0 0 0 0 0 0 0 0 0 0 0 0
44247 - 0 0 0 0 0 0 0 0 0 0 0 0
44248 - 0 0 0 0 0 0 0 0 0 6 6 6
44249 - 18 18 18 46 46 46 86 86 86 18 18 18
44250 - 2 2 6 34 34 34 10 10 10 6 6 6
44251 -210 210 210 253 253 253 253 253 253 253 253 253
44252 -253 253 253 253 253 253 253 253 253 253 253 253
44253 -253 253 253 253 253 253 234 234 234 242 242 242
44254 -253 253 253 253 253 253 253 253 253 253 253 253
44255 -253 253 253 253 253 253 253 253 253 253 253 253
44256 -253 253 253 253 253 253 253 253 253 253 253 253
44257 -253 253 253 253 253 253 253 253 253 253 253 253
44258 -253 253 253 253 253 253 221 221 221 6 6 6
44259 - 2 2 6 2 2 6 6 6 6 30 30 30
44260 - 2 2 6 2 2 6 2 2 6 2 2 6
44261 - 2 2 6 82 82 82 54 54 54 18 18 18
44262 - 6 6 6 0 0 0 0 0 0 0 0 0
44263 - 0 0 0 0 0 0 0 0 0 0 0 0
44264 - 0 0 0 0 0 0 0 0 0 0 0 0
44265 - 0 0 0 0 0 0 0 0 0 0 0 0
44266 - 0 0 0 0 0 0 0 0 0 0 0 0
44267 - 0 0 0 0 0 0 0 0 0 0 0 0
44268 - 0 0 0 0 0 0 0 0 0 10 10 10
44269 - 26 26 26 66 66 66 62 62 62 2 2 6
44270 - 2 2 6 38 38 38 10 10 10 26 26 26
44271 -238 238 238 253 253 253 253 253 253 253 253 253
44272 -253 253 253 253 253 253 253 253 253 253 253 253
44273 -253 253 253 253 253 253 231 231 231 238 238 238
44274 -253 253 253 253 253 253 253 253 253 253 253 253
44275 -253 253 253 253 253 253 253 253 253 253 253 253
44276 -253 253 253 253 253 253 253 253 253 253 253 253
44277 -253 253 253 253 253 253 253 253 253 253 253 253
44278 -253 253 253 253 253 253 231 231 231 6 6 6
44279 - 2 2 6 2 2 6 10 10 10 30 30 30
44280 - 2 2 6 2 2 6 2 2 6 2 2 6
44281 - 2 2 6 66 66 66 58 58 58 22 22 22
44282 - 6 6 6 0 0 0 0 0 0 0 0 0
44283 - 0 0 0 0 0 0 0 0 0 0 0 0
44284 - 0 0 0 0 0 0 0 0 0 0 0 0
44285 - 0 0 0 0 0 0 0 0 0 0 0 0
44286 - 0 0 0 0 0 0 0 0 0 0 0 0
44287 - 0 0 0 0 0 0 0 0 0 0 0 0
44288 - 0 0 0 0 0 0 0 0 0 10 10 10
44289 - 38 38 38 78 78 78 6 6 6 2 2 6
44290 - 2 2 6 46 46 46 14 14 14 42 42 42
44291 -246 246 246 253 253 253 253 253 253 253 253 253
44292 -253 253 253 253 253 253 253 253 253 253 253 253
44293 -253 253 253 253 253 253 231 231 231 242 242 242
44294 -253 253 253 253 253 253 253 253 253 253 253 253
44295 -253 253 253 253 253 253 253 253 253 253 253 253
44296 -253 253 253 253 253 253 253 253 253 253 253 253
44297 -253 253 253 253 253 253 253 253 253 253 253 253
44298 -253 253 253 253 253 253 234 234 234 10 10 10
44299 - 2 2 6 2 2 6 22 22 22 14 14 14
44300 - 2 2 6 2 2 6 2 2 6 2 2 6
44301 - 2 2 6 66 66 66 62 62 62 22 22 22
44302 - 6 6 6 0 0 0 0 0 0 0 0 0
44303 - 0 0 0 0 0 0 0 0 0 0 0 0
44304 - 0 0 0 0 0 0 0 0 0 0 0 0
44305 - 0 0 0 0 0 0 0 0 0 0 0 0
44306 - 0 0 0 0 0 0 0 0 0 0 0 0
44307 - 0 0 0 0 0 0 0 0 0 0 0 0
44308 - 0 0 0 0 0 0 6 6 6 18 18 18
44309 - 50 50 50 74 74 74 2 2 6 2 2 6
44310 - 14 14 14 70 70 70 34 34 34 62 62 62
44311 -250 250 250 253 253 253 253 253 253 253 253 253
44312 -253 253 253 253 253 253 253 253 253 253 253 253
44313 -253 253 253 253 253 253 231 231 231 246 246 246
44314 -253 253 253 253 253 253 253 253 253 253 253 253
44315 -253 253 253 253 253 253 253 253 253 253 253 253
44316 -253 253 253 253 253 253 253 253 253 253 253 253
44317 -253 253 253 253 253 253 253 253 253 253 253 253
44318 -253 253 253 253 253 253 234 234 234 14 14 14
44319 - 2 2 6 2 2 6 30 30 30 2 2 6
44320 - 2 2 6 2 2 6 2 2 6 2 2 6
44321 - 2 2 6 66 66 66 62 62 62 22 22 22
44322 - 6 6 6 0 0 0 0 0 0 0 0 0
44323 - 0 0 0 0 0 0 0 0 0 0 0 0
44324 - 0 0 0 0 0 0 0 0 0 0 0 0
44325 - 0 0 0 0 0 0 0 0 0 0 0 0
44326 - 0 0 0 0 0 0 0 0 0 0 0 0
44327 - 0 0 0 0 0 0 0 0 0 0 0 0
44328 - 0 0 0 0 0 0 6 6 6 18 18 18
44329 - 54 54 54 62 62 62 2 2 6 2 2 6
44330 - 2 2 6 30 30 30 46 46 46 70 70 70
44331 -250 250 250 253 253 253 253 253 253 253 253 253
44332 -253 253 253 253 253 253 253 253 253 253 253 253
44333 -253 253 253 253 253 253 231 231 231 246 246 246
44334 -253 253 253 253 253 253 253 253 253 253 253 253
44335 -253 253 253 253 253 253 253 253 253 253 253 253
44336 -253 253 253 253 253 253 253 253 253 253 253 253
44337 -253 253 253 253 253 253 253 253 253 253 253 253
44338 -253 253 253 253 253 253 226 226 226 10 10 10
44339 - 2 2 6 6 6 6 30 30 30 2 2 6
44340 - 2 2 6 2 2 6 2 2 6 2 2 6
44341 - 2 2 6 66 66 66 58 58 58 22 22 22
44342 - 6 6 6 0 0 0 0 0 0 0 0 0
44343 - 0 0 0 0 0 0 0 0 0 0 0 0
44344 - 0 0 0 0 0 0 0 0 0 0 0 0
44345 - 0 0 0 0 0 0 0 0 0 0 0 0
44346 - 0 0 0 0 0 0 0 0 0 0 0 0
44347 - 0 0 0 0 0 0 0 0 0 0 0 0
44348 - 0 0 0 0 0 0 6 6 6 22 22 22
44349 - 58 58 58 62 62 62 2 2 6 2 2 6
44350 - 2 2 6 2 2 6 30 30 30 78 78 78
44351 -250 250 250 253 253 253 253 253 253 253 253 253
44352 -253 253 253 253 253 253 253 253 253 253 253 253
44353 -253 253 253 253 253 253 231 231 231 246 246 246
44354 -253 253 253 253 253 253 253 253 253 253 253 253
44355 -253 253 253 253 253 253 253 253 253 253 253 253
44356 -253 253 253 253 253 253 253 253 253 253 253 253
44357 -253 253 253 253 253 253 253 253 253 253 253 253
44358 -253 253 253 253 253 253 206 206 206 2 2 6
44359 - 22 22 22 34 34 34 18 14 6 22 22 22
44360 - 26 26 26 18 18 18 6 6 6 2 2 6
44361 - 2 2 6 82 82 82 54 54 54 18 18 18
44362 - 6 6 6 0 0 0 0 0 0 0 0 0
44363 - 0 0 0 0 0 0 0 0 0 0 0 0
44364 - 0 0 0 0 0 0 0 0 0 0 0 0
44365 - 0 0 0 0 0 0 0 0 0 0 0 0
44366 - 0 0 0 0 0 0 0 0 0 0 0 0
44367 - 0 0 0 0 0 0 0 0 0 0 0 0
44368 - 0 0 0 0 0 0 6 6 6 26 26 26
44369 - 62 62 62 106 106 106 74 54 14 185 133 11
44370 -210 162 10 121 92 8 6 6 6 62 62 62
44371 -238 238 238 253 253 253 253 253 253 253 253 253
44372 -253 253 253 253 253 253 253 253 253 253 253 253
44373 -253 253 253 253 253 253 231 231 231 246 246 246
44374 -253 253 253 253 253 253 253 253 253 253 253 253
44375 -253 253 253 253 253 253 253 253 253 253 253 253
44376 -253 253 253 253 253 253 253 253 253 253 253 253
44377 -253 253 253 253 253 253 253 253 253 253 253 253
44378 -253 253 253 253 253 253 158 158 158 18 18 18
44379 - 14 14 14 2 2 6 2 2 6 2 2 6
44380 - 6 6 6 18 18 18 66 66 66 38 38 38
44381 - 6 6 6 94 94 94 50 50 50 18 18 18
44382 - 6 6 6 0 0 0 0 0 0 0 0 0
44383 - 0 0 0 0 0 0 0 0 0 0 0 0
44384 - 0 0 0 0 0 0 0 0 0 0 0 0
44385 - 0 0 0 0 0 0 0 0 0 0 0 0
44386 - 0 0 0 0 0 0 0 0 0 0 0 0
44387 - 0 0 0 0 0 0 0 0 0 6 6 6
44388 - 10 10 10 10 10 10 18 18 18 38 38 38
44389 - 78 78 78 142 134 106 216 158 10 242 186 14
44390 -246 190 14 246 190 14 156 118 10 10 10 10
44391 - 90 90 90 238 238 238 253 253 253 253 253 253
44392 -253 253 253 253 253 253 253 253 253 253 253 253
44393 -253 253 253 253 253 253 231 231 231 250 250 250
44394 -253 253 253 253 253 253 253 253 253 253 253 253
44395 -253 253 253 253 253 253 253 253 253 253 253 253
44396 -253 253 253 253 253 253 253 253 253 253 253 253
44397 -253 253 253 253 253 253 253 253 253 246 230 190
44398 -238 204 91 238 204 91 181 142 44 37 26 9
44399 - 2 2 6 2 2 6 2 2 6 2 2 6
44400 - 2 2 6 2 2 6 38 38 38 46 46 46
44401 - 26 26 26 106 106 106 54 54 54 18 18 18
44402 - 6 6 6 0 0 0 0 0 0 0 0 0
44403 - 0 0 0 0 0 0 0 0 0 0 0 0
44404 - 0 0 0 0 0 0 0 0 0 0 0 0
44405 - 0 0 0 0 0 0 0 0 0 0 0 0
44406 - 0 0 0 0 0 0 0 0 0 0 0 0
44407 - 0 0 0 6 6 6 14 14 14 22 22 22
44408 - 30 30 30 38 38 38 50 50 50 70 70 70
44409 -106 106 106 190 142 34 226 170 11 242 186 14
44410 -246 190 14 246 190 14 246 190 14 154 114 10
44411 - 6 6 6 74 74 74 226 226 226 253 253 253
44412 -253 253 253 253 253 253 253 253 253 253 253 253
44413 -253 253 253 253 253 253 231 231 231 250 250 250
44414 -253 253 253 253 253 253 253 253 253 253 253 253
44415 -253 253 253 253 253 253 253 253 253 253 253 253
44416 -253 253 253 253 253 253 253 253 253 253 253 253
44417 -253 253 253 253 253 253 253 253 253 228 184 62
44418 -241 196 14 241 208 19 232 195 16 38 30 10
44419 - 2 2 6 2 2 6 2 2 6 2 2 6
44420 - 2 2 6 6 6 6 30 30 30 26 26 26
44421 -203 166 17 154 142 90 66 66 66 26 26 26
44422 - 6 6 6 0 0 0 0 0 0 0 0 0
44423 - 0 0 0 0 0 0 0 0 0 0 0 0
44424 - 0 0 0 0 0 0 0 0 0 0 0 0
44425 - 0 0 0 0 0 0 0 0 0 0 0 0
44426 - 0 0 0 0 0 0 0 0 0 0 0 0
44427 - 6 6 6 18 18 18 38 38 38 58 58 58
44428 - 78 78 78 86 86 86 101 101 101 123 123 123
44429 -175 146 61 210 150 10 234 174 13 246 186 14
44430 -246 190 14 246 190 14 246 190 14 238 190 10
44431 -102 78 10 2 2 6 46 46 46 198 198 198
44432 -253 253 253 253 253 253 253 253 253 253 253 253
44433 -253 253 253 253 253 253 234 234 234 242 242 242
44434 -253 253 253 253 253 253 253 253 253 253 253 253
44435 -253 253 253 253 253 253 253 253 253 253 253 253
44436 -253 253 253 253 253 253 253 253 253 253 253 253
44437 -253 253 253 253 253 253 253 253 253 224 178 62
44438 -242 186 14 241 196 14 210 166 10 22 18 6
44439 - 2 2 6 2 2 6 2 2 6 2 2 6
44440 - 2 2 6 2 2 6 6 6 6 121 92 8
44441 -238 202 15 232 195 16 82 82 82 34 34 34
44442 - 10 10 10 0 0 0 0 0 0 0 0 0
44443 - 0 0 0 0 0 0 0 0 0 0 0 0
44444 - 0 0 0 0 0 0 0 0 0 0 0 0
44445 - 0 0 0 0 0 0 0 0 0 0 0 0
44446 - 0 0 0 0 0 0 0 0 0 0 0 0
44447 - 14 14 14 38 38 38 70 70 70 154 122 46
44448 -190 142 34 200 144 11 197 138 11 197 138 11
44449 -213 154 11 226 170 11 242 186 14 246 190 14
44450 -246 190 14 246 190 14 246 190 14 246 190 14
44451 -225 175 15 46 32 6 2 2 6 22 22 22
44452 -158 158 158 250 250 250 253 253 253 253 253 253
44453 -253 253 253 253 253 253 253 253 253 253 253 253
44454 -253 253 253 253 253 253 253 253 253 253 253 253
44455 -253 253 253 253 253 253 253 253 253 253 253 253
44456 -253 253 253 253 253 253 253 253 253 253 253 253
44457 -253 253 253 250 250 250 242 242 242 224 178 62
44458 -239 182 13 236 186 11 213 154 11 46 32 6
44459 - 2 2 6 2 2 6 2 2 6 2 2 6
44460 - 2 2 6 2 2 6 61 42 6 225 175 15
44461 -238 190 10 236 186 11 112 100 78 42 42 42
44462 - 14 14 14 0 0 0 0 0 0 0 0 0
44463 - 0 0 0 0 0 0 0 0 0 0 0 0
44464 - 0 0 0 0 0 0 0 0 0 0 0 0
44465 - 0 0 0 0 0 0 0 0 0 0 0 0
44466 - 0 0 0 0 0 0 0 0 0 6 6 6
44467 - 22 22 22 54 54 54 154 122 46 213 154 11
44468 -226 170 11 230 174 11 226 170 11 226 170 11
44469 -236 178 12 242 186 14 246 190 14 246 190 14
44470 -246 190 14 246 190 14 246 190 14 246 190 14
44471 -241 196 14 184 144 12 10 10 10 2 2 6
44472 - 6 6 6 116 116 116 242 242 242 253 253 253
44473 -253 253 253 253 253 253 253 253 253 253 253 253
44474 -253 253 253 253 253 253 253 253 253 253 253 253
44475 -253 253 253 253 253 253 253 253 253 253 253 253
44476 -253 253 253 253 253 253 253 253 253 253 253 253
44477 -253 253 253 231 231 231 198 198 198 214 170 54
44478 -236 178 12 236 178 12 210 150 10 137 92 6
44479 - 18 14 6 2 2 6 2 2 6 2 2 6
44480 - 6 6 6 70 47 6 200 144 11 236 178 12
44481 -239 182 13 239 182 13 124 112 88 58 58 58
44482 - 22 22 22 6 6 6 0 0 0 0 0 0
44483 - 0 0 0 0 0 0 0 0 0 0 0 0
44484 - 0 0 0 0 0 0 0 0 0 0 0 0
44485 - 0 0 0 0 0 0 0 0 0 0 0 0
44486 - 0 0 0 0 0 0 0 0 0 10 10 10
44487 - 30 30 30 70 70 70 180 133 36 226 170 11
44488 -239 182 13 242 186 14 242 186 14 246 186 14
44489 -246 190 14 246 190 14 246 190 14 246 190 14
44490 -246 190 14 246 190 14 246 190 14 246 190 14
44491 -246 190 14 232 195 16 98 70 6 2 2 6
44492 - 2 2 6 2 2 6 66 66 66 221 221 221
44493 -253 253 253 253 253 253 253 253 253 253 253 253
44494 -253 253 253 253 253 253 253 253 253 253 253 253
44495 -253 253 253 253 253 253 253 253 253 253 253 253
44496 -253 253 253 253 253 253 253 253 253 253 253 253
44497 -253 253 253 206 206 206 198 198 198 214 166 58
44498 -230 174 11 230 174 11 216 158 10 192 133 9
44499 -163 110 8 116 81 8 102 78 10 116 81 8
44500 -167 114 7 197 138 11 226 170 11 239 182 13
44501 -242 186 14 242 186 14 162 146 94 78 78 78
44502 - 34 34 34 14 14 14 6 6 6 0 0 0
44503 - 0 0 0 0 0 0 0 0 0 0 0 0
44504 - 0 0 0 0 0 0 0 0 0 0 0 0
44505 - 0 0 0 0 0 0 0 0 0 0 0 0
44506 - 0 0 0 0 0 0 0 0 0 6 6 6
44507 - 30 30 30 78 78 78 190 142 34 226 170 11
44508 -239 182 13 246 190 14 246 190 14 246 190 14
44509 -246 190 14 246 190 14 246 190 14 246 190 14
44510 -246 190 14 246 190 14 246 190 14 246 190 14
44511 -246 190 14 241 196 14 203 166 17 22 18 6
44512 - 2 2 6 2 2 6 2 2 6 38 38 38
44513 -218 218 218 253 253 253 253 253 253 253 253 253
44514 -253 253 253 253 253 253 253 253 253 253 253 253
44515 -253 253 253 253 253 253 253 253 253 253 253 253
44516 -253 253 253 253 253 253 253 253 253 253 253 253
44517 -250 250 250 206 206 206 198 198 198 202 162 69
44518 -226 170 11 236 178 12 224 166 10 210 150 10
44519 -200 144 11 197 138 11 192 133 9 197 138 11
44520 -210 150 10 226 170 11 242 186 14 246 190 14
44521 -246 190 14 246 186 14 225 175 15 124 112 88
44522 - 62 62 62 30 30 30 14 14 14 6 6 6
44523 - 0 0 0 0 0 0 0 0 0 0 0 0
44524 - 0 0 0 0 0 0 0 0 0 0 0 0
44525 - 0 0 0 0 0 0 0 0 0 0 0 0
44526 - 0 0 0 0 0 0 0 0 0 10 10 10
44527 - 30 30 30 78 78 78 174 135 50 224 166 10
44528 -239 182 13 246 190 14 246 190 14 246 190 14
44529 -246 190 14 246 190 14 246 190 14 246 190 14
44530 -246 190 14 246 190 14 246 190 14 246 190 14
44531 -246 190 14 246 190 14 241 196 14 139 102 15
44532 - 2 2 6 2 2 6 2 2 6 2 2 6
44533 - 78 78 78 250 250 250 253 253 253 253 253 253
44534 -253 253 253 253 253 253 253 253 253 253 253 253
44535 -253 253 253 253 253 253 253 253 253 253 253 253
44536 -253 253 253 253 253 253 253 253 253 253 253 253
44537 -250 250 250 214 214 214 198 198 198 190 150 46
44538 -219 162 10 236 178 12 234 174 13 224 166 10
44539 -216 158 10 213 154 11 213 154 11 216 158 10
44540 -226 170 11 239 182 13 246 190 14 246 190 14
44541 -246 190 14 246 190 14 242 186 14 206 162 42
44542 -101 101 101 58 58 58 30 30 30 14 14 14
44543 - 6 6 6 0 0 0 0 0 0 0 0 0
44544 - 0 0 0 0 0 0 0 0 0 0 0 0
44545 - 0 0 0 0 0 0 0 0 0 0 0 0
44546 - 0 0 0 0 0 0 0 0 0 10 10 10
44547 - 30 30 30 74 74 74 174 135 50 216 158 10
44548 -236 178 12 246 190 14 246 190 14 246 190 14
44549 -246 190 14 246 190 14 246 190 14 246 190 14
44550 -246 190 14 246 190 14 246 190 14 246 190 14
44551 -246 190 14 246 190 14 241 196 14 226 184 13
44552 - 61 42 6 2 2 6 2 2 6 2 2 6
44553 - 22 22 22 238 238 238 253 253 253 253 253 253
44554 -253 253 253 253 253 253 253 253 253 253 253 253
44555 -253 253 253 253 253 253 253 253 253 253 253 253
44556 -253 253 253 253 253 253 253 253 253 253 253 253
44557 -253 253 253 226 226 226 187 187 187 180 133 36
44558 -216 158 10 236 178 12 239 182 13 236 178 12
44559 -230 174 11 226 170 11 226 170 11 230 174 11
44560 -236 178 12 242 186 14 246 190 14 246 190 14
44561 -246 190 14 246 190 14 246 186 14 239 182 13
44562 -206 162 42 106 106 106 66 66 66 34 34 34
44563 - 14 14 14 6 6 6 0 0 0 0 0 0
44564 - 0 0 0 0 0 0 0 0 0 0 0 0
44565 - 0 0 0 0 0 0 0 0 0 0 0 0
44566 - 0 0 0 0 0 0 0 0 0 6 6 6
44567 - 26 26 26 70 70 70 163 133 67 213 154 11
44568 -236 178 12 246 190 14 246 190 14 246 190 14
44569 -246 190 14 246 190 14 246 190 14 246 190 14
44570 -246 190 14 246 190 14 246 190 14 246 190 14
44571 -246 190 14 246 190 14 246 190 14 241 196 14
44572 -190 146 13 18 14 6 2 2 6 2 2 6
44573 - 46 46 46 246 246 246 253 253 253 253 253 253
44574 -253 253 253 253 253 253 253 253 253 253 253 253
44575 -253 253 253 253 253 253 253 253 253 253 253 253
44576 -253 253 253 253 253 253 253 253 253 253 253 253
44577 -253 253 253 221 221 221 86 86 86 156 107 11
44578 -216 158 10 236 178 12 242 186 14 246 186 14
44579 -242 186 14 239 182 13 239 182 13 242 186 14
44580 -242 186 14 246 186 14 246 190 14 246 190 14
44581 -246 190 14 246 190 14 246 190 14 246 190 14
44582 -242 186 14 225 175 15 142 122 72 66 66 66
44583 - 30 30 30 10 10 10 0 0 0 0 0 0
44584 - 0 0 0 0 0 0 0 0 0 0 0 0
44585 - 0 0 0 0 0 0 0 0 0 0 0 0
44586 - 0 0 0 0 0 0 0 0 0 6 6 6
44587 - 26 26 26 70 70 70 163 133 67 210 150 10
44588 -236 178 12 246 190 14 246 190 14 246 190 14
44589 -246 190 14 246 190 14 246 190 14 246 190 14
44590 -246 190 14 246 190 14 246 190 14 246 190 14
44591 -246 190 14 246 190 14 246 190 14 246 190 14
44592 -232 195 16 121 92 8 34 34 34 106 106 106
44593 -221 221 221 253 253 253 253 253 253 253 253 253
44594 -253 253 253 253 253 253 253 253 253 253 253 253
44595 -253 253 253 253 253 253 253 253 253 253 253 253
44596 -253 253 253 253 253 253 253 253 253 253 253 253
44597 -242 242 242 82 82 82 18 14 6 163 110 8
44598 -216 158 10 236 178 12 242 186 14 246 190 14
44599 -246 190 14 246 190 14 246 190 14 246 190 14
44600 -246 190 14 246 190 14 246 190 14 246 190 14
44601 -246 190 14 246 190 14 246 190 14 246 190 14
44602 -246 190 14 246 190 14 242 186 14 163 133 67
44603 - 46 46 46 18 18 18 6 6 6 0 0 0
44604 - 0 0 0 0 0 0 0 0 0 0 0 0
44605 - 0 0 0 0 0 0 0 0 0 0 0 0
44606 - 0 0 0 0 0 0 0 0 0 10 10 10
44607 - 30 30 30 78 78 78 163 133 67 210 150 10
44608 -236 178 12 246 186 14 246 190 14 246 190 14
44609 -246 190 14 246 190 14 246 190 14 246 190 14
44610 -246 190 14 246 190 14 246 190 14 246 190 14
44611 -246 190 14 246 190 14 246 190 14 246 190 14
44612 -241 196 14 215 174 15 190 178 144 253 253 253
44613 -253 253 253 253 253 253 253 253 253 253 253 253
44614 -253 253 253 253 253 253 253 253 253 253 253 253
44615 -253 253 253 253 253 253 253 253 253 253 253 253
44616 -253 253 253 253 253 253 253 253 253 218 218 218
44617 - 58 58 58 2 2 6 22 18 6 167 114 7
44618 -216 158 10 236 178 12 246 186 14 246 190 14
44619 -246 190 14 246 190 14 246 190 14 246 190 14
44620 -246 190 14 246 190 14 246 190 14 246 190 14
44621 -246 190 14 246 190 14 246 190 14 246 190 14
44622 -246 190 14 246 186 14 242 186 14 190 150 46
44623 - 54 54 54 22 22 22 6 6 6 0 0 0
44624 - 0 0 0 0 0 0 0 0 0 0 0 0
44625 - 0 0 0 0 0 0 0 0 0 0 0 0
44626 - 0 0 0 0 0 0 0 0 0 14 14 14
44627 - 38 38 38 86 86 86 180 133 36 213 154 11
44628 -236 178 12 246 186 14 246 190 14 246 190 14
44629 -246 190 14 246 190 14 246 190 14 246 190 14
44630 -246 190 14 246 190 14 246 190 14 246 190 14
44631 -246 190 14 246 190 14 246 190 14 246 190 14
44632 -246 190 14 232 195 16 190 146 13 214 214 214
44633 -253 253 253 253 253 253 253 253 253 253 253 253
44634 -253 253 253 253 253 253 253 253 253 253 253 253
44635 -253 253 253 253 253 253 253 253 253 253 253 253
44636 -253 253 253 250 250 250 170 170 170 26 26 26
44637 - 2 2 6 2 2 6 37 26 9 163 110 8
44638 -219 162 10 239 182 13 246 186 14 246 190 14
44639 -246 190 14 246 190 14 246 190 14 246 190 14
44640 -246 190 14 246 190 14 246 190 14 246 190 14
44641 -246 190 14 246 190 14 246 190 14 246 190 14
44642 -246 186 14 236 178 12 224 166 10 142 122 72
44643 - 46 46 46 18 18 18 6 6 6 0 0 0
44644 - 0 0 0 0 0 0 0 0 0 0 0 0
44645 - 0 0 0 0 0 0 0 0 0 0 0 0
44646 - 0 0 0 0 0 0 6 6 6 18 18 18
44647 - 50 50 50 109 106 95 192 133 9 224 166 10
44648 -242 186 14 246 190 14 246 190 14 246 190 14
44649 -246 190 14 246 190 14 246 190 14 246 190 14
44650 -246 190 14 246 190 14 246 190 14 246 190 14
44651 -246 190 14 246 190 14 246 190 14 246 190 14
44652 -242 186 14 226 184 13 210 162 10 142 110 46
44653 -226 226 226 253 253 253 253 253 253 253 253 253
44654 -253 253 253 253 253 253 253 253 253 253 253 253
44655 -253 253 253 253 253 253 253 253 253 253 253 253
44656 -198 198 198 66 66 66 2 2 6 2 2 6
44657 - 2 2 6 2 2 6 50 34 6 156 107 11
44658 -219 162 10 239 182 13 246 186 14 246 190 14
44659 -246 190 14 246 190 14 246 190 14 246 190 14
44660 -246 190 14 246 190 14 246 190 14 246 190 14
44661 -246 190 14 246 190 14 246 190 14 242 186 14
44662 -234 174 13 213 154 11 154 122 46 66 66 66
44663 - 30 30 30 10 10 10 0 0 0 0 0 0
44664 - 0 0 0 0 0 0 0 0 0 0 0 0
44665 - 0 0 0 0 0 0 0 0 0 0 0 0
44666 - 0 0 0 0 0 0 6 6 6 22 22 22
44667 - 58 58 58 154 121 60 206 145 10 234 174 13
44668 -242 186 14 246 186 14 246 190 14 246 190 14
44669 -246 190 14 246 190 14 246 190 14 246 190 14
44670 -246 190 14 246 190 14 246 190 14 246 190 14
44671 -246 190 14 246 190 14 246 190 14 246 190 14
44672 -246 186 14 236 178 12 210 162 10 163 110 8
44673 - 61 42 6 138 138 138 218 218 218 250 250 250
44674 -253 253 253 253 253 253 253 253 253 250 250 250
44675 -242 242 242 210 210 210 144 144 144 66 66 66
44676 - 6 6 6 2 2 6 2 2 6 2 2 6
44677 - 2 2 6 2 2 6 61 42 6 163 110 8
44678 -216 158 10 236 178 12 246 190 14 246 190 14
44679 -246 190 14 246 190 14 246 190 14 246 190 14
44680 -246 190 14 246 190 14 246 190 14 246 190 14
44681 -246 190 14 239 182 13 230 174 11 216 158 10
44682 -190 142 34 124 112 88 70 70 70 38 38 38
44683 - 18 18 18 6 6 6 0 0 0 0 0 0
44684 - 0 0 0 0 0 0 0 0 0 0 0 0
44685 - 0 0 0 0 0 0 0 0 0 0 0 0
44686 - 0 0 0 0 0 0 6 6 6 22 22 22
44687 - 62 62 62 168 124 44 206 145 10 224 166 10
44688 -236 178 12 239 182 13 242 186 14 242 186 14
44689 -246 186 14 246 190 14 246 190 14 246 190 14
44690 -246 190 14 246 190 14 246 190 14 246 190 14
44691 -246 190 14 246 190 14 246 190 14 246 190 14
44692 -246 190 14 236 178 12 216 158 10 175 118 6
44693 - 80 54 7 2 2 6 6 6 6 30 30 30
44694 - 54 54 54 62 62 62 50 50 50 38 38 38
44695 - 14 14 14 2 2 6 2 2 6 2 2 6
44696 - 2 2 6 2 2 6 2 2 6 2 2 6
44697 - 2 2 6 6 6 6 80 54 7 167 114 7
44698 -213 154 11 236 178 12 246 190 14 246 190 14
44699 -246 190 14 246 190 14 246 190 14 246 190 14
44700 -246 190 14 242 186 14 239 182 13 239 182 13
44701 -230 174 11 210 150 10 174 135 50 124 112 88
44702 - 82 82 82 54 54 54 34 34 34 18 18 18
44703 - 6 6 6 0 0 0 0 0 0 0 0 0
44704 - 0 0 0 0 0 0 0 0 0 0 0 0
44705 - 0 0 0 0 0 0 0 0 0 0 0 0
44706 - 0 0 0 0 0 0 6 6 6 18 18 18
44707 - 50 50 50 158 118 36 192 133 9 200 144 11
44708 -216 158 10 219 162 10 224 166 10 226 170 11
44709 -230 174 11 236 178 12 239 182 13 239 182 13
44710 -242 186 14 246 186 14 246 190 14 246 190 14
44711 -246 190 14 246 190 14 246 190 14 246 190 14
44712 -246 186 14 230 174 11 210 150 10 163 110 8
44713 -104 69 6 10 10 10 2 2 6 2 2 6
44714 - 2 2 6 2 2 6 2 2 6 2 2 6
44715 - 2 2 6 2 2 6 2 2 6 2 2 6
44716 - 2 2 6 2 2 6 2 2 6 2 2 6
44717 - 2 2 6 6 6 6 91 60 6 167 114 7
44718 -206 145 10 230 174 11 242 186 14 246 190 14
44719 -246 190 14 246 190 14 246 186 14 242 186 14
44720 -239 182 13 230 174 11 224 166 10 213 154 11
44721 -180 133 36 124 112 88 86 86 86 58 58 58
44722 - 38 38 38 22 22 22 10 10 10 6 6 6
44723 - 0 0 0 0 0 0 0 0 0 0 0 0
44724 - 0 0 0 0 0 0 0 0 0 0 0 0
44725 - 0 0 0 0 0 0 0 0 0 0 0 0
44726 - 0 0 0 0 0 0 0 0 0 14 14 14
44727 - 34 34 34 70 70 70 138 110 50 158 118 36
44728 -167 114 7 180 123 7 192 133 9 197 138 11
44729 -200 144 11 206 145 10 213 154 11 219 162 10
44730 -224 166 10 230 174 11 239 182 13 242 186 14
44731 -246 186 14 246 186 14 246 186 14 246 186 14
44732 -239 182 13 216 158 10 185 133 11 152 99 6
44733 -104 69 6 18 14 6 2 2 6 2 2 6
44734 - 2 2 6 2 2 6 2 2 6 2 2 6
44735 - 2 2 6 2 2 6 2 2 6 2 2 6
44736 - 2 2 6 2 2 6 2 2 6 2 2 6
44737 - 2 2 6 6 6 6 80 54 7 152 99 6
44738 -192 133 9 219 162 10 236 178 12 239 182 13
44739 -246 186 14 242 186 14 239 182 13 236 178 12
44740 -224 166 10 206 145 10 192 133 9 154 121 60
44741 - 94 94 94 62 62 62 42 42 42 22 22 22
44742 - 14 14 14 6 6 6 0 0 0 0 0 0
44743 - 0 0 0 0 0 0 0 0 0 0 0 0
44744 - 0 0 0 0 0 0 0 0 0 0 0 0
44745 - 0 0 0 0 0 0 0 0 0 0 0 0
44746 - 0 0 0 0 0 0 0 0 0 6 6 6
44747 - 18 18 18 34 34 34 58 58 58 78 78 78
44748 -101 98 89 124 112 88 142 110 46 156 107 11
44749 -163 110 8 167 114 7 175 118 6 180 123 7
44750 -185 133 11 197 138 11 210 150 10 219 162 10
44751 -226 170 11 236 178 12 236 178 12 234 174 13
44752 -219 162 10 197 138 11 163 110 8 130 83 6
44753 - 91 60 6 10 10 10 2 2 6 2 2 6
44754 - 18 18 18 38 38 38 38 38 38 38 38 38
44755 - 38 38 38 38 38 38 38 38 38 38 38 38
44756 - 38 38 38 38 38 38 26 26 26 2 2 6
44757 - 2 2 6 6 6 6 70 47 6 137 92 6
44758 -175 118 6 200 144 11 219 162 10 230 174 11
44759 -234 174 13 230 174 11 219 162 10 210 150 10
44760 -192 133 9 163 110 8 124 112 88 82 82 82
44761 - 50 50 50 30 30 30 14 14 14 6 6 6
44762 - 0 0 0 0 0 0 0 0 0 0 0 0
44763 - 0 0 0 0 0 0 0 0 0 0 0 0
44764 - 0 0 0 0 0 0 0 0 0 0 0 0
44765 - 0 0 0 0 0 0 0 0 0 0 0 0
44766 - 0 0 0 0 0 0 0 0 0 0 0 0
44767 - 6 6 6 14 14 14 22 22 22 34 34 34
44768 - 42 42 42 58 58 58 74 74 74 86 86 86
44769 -101 98 89 122 102 70 130 98 46 121 87 25
44770 -137 92 6 152 99 6 163 110 8 180 123 7
44771 -185 133 11 197 138 11 206 145 10 200 144 11
44772 -180 123 7 156 107 11 130 83 6 104 69 6
44773 - 50 34 6 54 54 54 110 110 110 101 98 89
44774 - 86 86 86 82 82 82 78 78 78 78 78 78
44775 - 78 78 78 78 78 78 78 78 78 78 78 78
44776 - 78 78 78 82 82 82 86 86 86 94 94 94
44777 -106 106 106 101 101 101 86 66 34 124 80 6
44778 -156 107 11 180 123 7 192 133 9 200 144 11
44779 -206 145 10 200 144 11 192 133 9 175 118 6
44780 -139 102 15 109 106 95 70 70 70 42 42 42
44781 - 22 22 22 10 10 10 0 0 0 0 0 0
44782 - 0 0 0 0 0 0 0 0 0 0 0 0
44783 - 0 0 0 0 0 0 0 0 0 0 0 0
44784 - 0 0 0 0 0 0 0 0 0 0 0 0
44785 - 0 0 0 0 0 0 0 0 0 0 0 0
44786 - 0 0 0 0 0 0 0 0 0 0 0 0
44787 - 0 0 0 0 0 0 6 6 6 10 10 10
44788 - 14 14 14 22 22 22 30 30 30 38 38 38
44789 - 50 50 50 62 62 62 74 74 74 90 90 90
44790 -101 98 89 112 100 78 121 87 25 124 80 6
44791 -137 92 6 152 99 6 152 99 6 152 99 6
44792 -138 86 6 124 80 6 98 70 6 86 66 30
44793 -101 98 89 82 82 82 58 58 58 46 46 46
44794 - 38 38 38 34 34 34 34 34 34 34 34 34
44795 - 34 34 34 34 34 34 34 34 34 34 34 34
44796 - 34 34 34 34 34 34 38 38 38 42 42 42
44797 - 54 54 54 82 82 82 94 86 76 91 60 6
44798 -134 86 6 156 107 11 167 114 7 175 118 6
44799 -175 118 6 167 114 7 152 99 6 121 87 25
44800 -101 98 89 62 62 62 34 34 34 18 18 18
44801 - 6 6 6 0 0 0 0 0 0 0 0 0
44802 - 0 0 0 0 0 0 0 0 0 0 0 0
44803 - 0 0 0 0 0 0 0 0 0 0 0 0
44804 - 0 0 0 0 0 0 0 0 0 0 0 0
44805 - 0 0 0 0 0 0 0 0 0 0 0 0
44806 - 0 0 0 0 0 0 0 0 0 0 0 0
44807 - 0 0 0 0 0 0 0 0 0 0 0 0
44808 - 0 0 0 6 6 6 6 6 6 10 10 10
44809 - 18 18 18 22 22 22 30 30 30 42 42 42
44810 - 50 50 50 66 66 66 86 86 86 101 98 89
44811 -106 86 58 98 70 6 104 69 6 104 69 6
44812 -104 69 6 91 60 6 82 62 34 90 90 90
44813 - 62 62 62 38 38 38 22 22 22 14 14 14
44814 - 10 10 10 10 10 10 10 10 10 10 10 10
44815 - 10 10 10 10 10 10 6 6 6 10 10 10
44816 - 10 10 10 10 10 10 10 10 10 14 14 14
44817 - 22 22 22 42 42 42 70 70 70 89 81 66
44818 - 80 54 7 104 69 6 124 80 6 137 92 6
44819 -134 86 6 116 81 8 100 82 52 86 86 86
44820 - 58 58 58 30 30 30 14 14 14 6 6 6
44821 - 0 0 0 0 0 0 0 0 0 0 0 0
44822 - 0 0 0 0 0 0 0 0 0 0 0 0
44823 - 0 0 0 0 0 0 0 0 0 0 0 0
44824 - 0 0 0 0 0 0 0 0 0 0 0 0
44825 - 0 0 0 0 0 0 0 0 0 0 0 0
44826 - 0 0 0 0 0 0 0 0 0 0 0 0
44827 - 0 0 0 0 0 0 0 0 0 0 0 0
44828 - 0 0 0 0 0 0 0 0 0 0 0 0
44829 - 0 0 0 6 6 6 10 10 10 14 14 14
44830 - 18 18 18 26 26 26 38 38 38 54 54 54
44831 - 70 70 70 86 86 86 94 86 76 89 81 66
44832 - 89 81 66 86 86 86 74 74 74 50 50 50
44833 - 30 30 30 14 14 14 6 6 6 0 0 0
44834 - 0 0 0 0 0 0 0 0 0 0 0 0
44835 - 0 0 0 0 0 0 0 0 0 0 0 0
44836 - 0 0 0 0 0 0 0 0 0 0 0 0
44837 - 6 6 6 18 18 18 34 34 34 58 58 58
44838 - 82 82 82 89 81 66 89 81 66 89 81 66
44839 - 94 86 66 94 86 76 74 74 74 50 50 50
44840 - 26 26 26 14 14 14 6 6 6 0 0 0
44841 - 0 0 0 0 0 0 0 0 0 0 0 0
44842 - 0 0 0 0 0 0 0 0 0 0 0 0
44843 - 0 0 0 0 0 0 0 0 0 0 0 0
44844 - 0 0 0 0 0 0 0 0 0 0 0 0
44845 - 0 0 0 0 0 0 0 0 0 0 0 0
44846 - 0 0 0 0 0 0 0 0 0 0 0 0
44847 - 0 0 0 0 0 0 0 0 0 0 0 0
44848 - 0 0 0 0 0 0 0 0 0 0 0 0
44849 - 0 0 0 0 0 0 0 0 0 0 0 0
44850 - 6 6 6 6 6 6 14 14 14 18 18 18
44851 - 30 30 30 38 38 38 46 46 46 54 54 54
44852 - 50 50 50 42 42 42 30 30 30 18 18 18
44853 - 10 10 10 0 0 0 0 0 0 0 0 0
44854 - 0 0 0 0 0 0 0 0 0 0 0 0
44855 - 0 0 0 0 0 0 0 0 0 0 0 0
44856 - 0 0 0 0 0 0 0 0 0 0 0 0
44857 - 0 0 0 6 6 6 14 14 14 26 26 26
44858 - 38 38 38 50 50 50 58 58 58 58 58 58
44859 - 54 54 54 42 42 42 30 30 30 18 18 18
44860 - 10 10 10 0 0 0 0 0 0 0 0 0
44861 - 0 0 0 0 0 0 0 0 0 0 0 0
44862 - 0 0 0 0 0 0 0 0 0 0 0 0
44863 - 0 0 0 0 0 0 0 0 0 0 0 0
44864 - 0 0 0 0 0 0 0 0 0 0 0 0
44865 - 0 0 0 0 0 0 0 0 0 0 0 0
44866 - 0 0 0 0 0 0 0 0 0 0 0 0
44867 - 0 0 0 0 0 0 0 0 0 0 0 0
44868 - 0 0 0 0 0 0 0 0 0 0 0 0
44869 - 0 0 0 0 0 0 0 0 0 0 0 0
44870 - 0 0 0 0 0 0 0 0 0 6 6 6
44871 - 6 6 6 10 10 10 14 14 14 18 18 18
44872 - 18 18 18 14 14 14 10 10 10 6 6 6
44873 - 0 0 0 0 0 0 0 0 0 0 0 0
44874 - 0 0 0 0 0 0 0 0 0 0 0 0
44875 - 0 0 0 0 0 0 0 0 0 0 0 0
44876 - 0 0 0 0 0 0 0 0 0 0 0 0
44877 - 0 0 0 0 0 0 0 0 0 6 6 6
44878 - 14 14 14 18 18 18 22 22 22 22 22 22
44879 - 18 18 18 14 14 14 10 10 10 6 6 6
44880 - 0 0 0 0 0 0 0 0 0 0 0 0
44881 - 0 0 0 0 0 0 0 0 0 0 0 0
44882 - 0 0 0 0 0 0 0 0 0 0 0 0
44883 - 0 0 0 0 0 0 0 0 0 0 0 0
44884 - 0 0 0 0 0 0 0 0 0 0 0 0
44885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44898 +4 4 4 4 4 4
44899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44912 +4 4 4 4 4 4
44913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44926 +4 4 4 4 4 4
44927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44940 +4 4 4 4 4 4
44941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44948 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44949 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44954 +4 4 4 4 4 4
44955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44962 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44968 +4 4 4 4 4 4
44969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44973 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
44974 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
44975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44976 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44978 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
44979 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44980 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
44981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44982 +4 4 4 4 4 4
44983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44987 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
44988 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
44989 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44992 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
44993 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
44994 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
44995 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44996 +4 4 4 4 4 4
44997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45001 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45002 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45003 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45006 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45007 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45008 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45009 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45010 +4 4 4 4 4 4
45011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45014 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45015 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45016 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45017 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45019 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45020 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45021 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45022 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45023 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45024 +4 4 4 4 4 4
45025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45028 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45029 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45030 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45031 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45032 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45033 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45034 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45035 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45036 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45037 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45038 +4 4 4 4 4 4
45039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45042 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45043 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45044 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45045 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45046 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45047 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45048 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45049 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45050 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45051 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45052 +4 4 4 4 4 4
45053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45055 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45056 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45057 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45058 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45059 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45060 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45061 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45062 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45063 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45064 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45065 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45066 +4 4 4 4 4 4
45067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45069 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45070 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45071 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45072 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45073 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45074 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45075 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45076 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45077 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45078 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45079 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45080 +4 4 4 4 4 4
45081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45083 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45084 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45085 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45086 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45087 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45088 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45089 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45090 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45091 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45092 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45093 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45094 +4 4 4 4 4 4
45095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45097 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45098 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45099 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45100 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45101 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45102 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45103 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45104 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45105 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45106 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45107 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45108 +4 4 4 4 4 4
45109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45110 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45111 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45112 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45113 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45114 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45115 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45116 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45117 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45118 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45119 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45120 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45121 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45122 +4 4 4 4 4 4
45123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45124 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45125 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45126 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45127 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45128 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45129 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45130 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45131 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45132 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45133 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45134 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45135 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45136 +0 0 0 4 4 4
45137 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45138 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45139 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45140 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45141 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45142 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45143 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45144 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45145 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45146 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45147 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45148 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45149 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45150 +2 0 0 0 0 0
45151 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45152 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45153 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45154 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45155 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45156 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45157 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45158 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45159 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45160 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45161 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45162 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45163 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45164 +37 38 37 0 0 0
45165 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45166 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45167 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45168 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45169 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45170 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45171 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45172 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45173 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45174 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45175 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45176 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45177 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45178 +85 115 134 4 0 0
45179 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45180 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45181 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45182 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45183 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45184 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45185 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45186 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45187 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45188 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45189 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45190 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45191 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45192 +60 73 81 4 0 0
45193 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45194 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45195 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45196 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45197 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45198 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45199 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45200 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45201 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45202 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45203 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45204 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45205 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45206 +16 19 21 4 0 0
45207 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45208 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45209 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45210 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45211 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45212 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45213 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45214 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45215 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45216 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45217 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45218 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45219 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45220 +4 0 0 4 3 3
45221 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45222 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45223 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45225 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45226 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45227 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45228 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45229 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45230 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45231 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45232 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45233 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45234 +3 2 2 4 4 4
45235 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45236 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45237 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45238 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45239 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45240 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45241 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45242 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45243 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45244 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45245 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45246 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45247 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45248 +4 4 4 4 4 4
45249 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45250 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45251 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45252 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45253 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45254 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45255 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45256 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45257 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45258 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45259 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45260 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45261 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45262 +4 4 4 4 4 4
45263 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45264 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45265 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45266 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45267 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45268 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45269 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45270 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45271 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45272 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45273 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45274 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45275 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45276 +5 5 5 5 5 5
45277 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45278 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45279 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45280 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45281 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45282 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45283 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45284 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45285 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45286 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45287 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45288 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45289 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45290 +5 5 5 4 4 4
45291 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45292 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45293 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45294 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45295 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45296 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45297 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45298 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45299 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45300 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45301 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45302 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45304 +4 4 4 4 4 4
45305 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45306 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45307 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45308 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45309 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45310 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45311 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45312 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45313 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45314 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45315 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45316 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45318 +4 4 4 4 4 4
45319 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45320 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45321 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45322 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45323 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45324 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45325 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45326 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45327 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45328 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45329 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45332 +4 4 4 4 4 4
45333 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45334 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45335 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45336 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45337 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45338 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45339 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45340 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45341 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45342 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45343 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45346 +4 4 4 4 4 4
45347 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45348 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45349 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45350 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45351 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45352 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45353 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45354 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45355 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45356 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45357 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45358 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45360 +4 4 4 4 4 4
45361 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45362 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45363 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45364 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45365 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45366 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45367 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45368 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45369 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45370 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45371 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45374 +4 4 4 4 4 4
45375 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45376 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45377 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45378 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45379 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45380 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45381 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45382 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45383 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45384 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45385 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45386 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45388 +4 4 4 4 4 4
45389 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45390 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45391 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45392 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45393 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45394 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45395 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45396 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45397 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45398 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45399 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45400 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45402 +4 4 4 4 4 4
45403 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45404 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45405 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45406 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45407 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45408 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45409 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45410 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45411 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45412 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45413 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45414 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45416 +4 4 4 4 4 4
45417 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45418 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45419 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45420 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45421 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45422 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45423 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45424 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45425 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45426 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45427 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45430 +4 4 4 4 4 4
45431 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45432 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45433 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45434 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45435 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45436 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45437 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45438 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45439 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45440 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45441 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45444 +4 4 4 4 4 4
45445 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45446 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45447 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45448 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45449 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45450 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45451 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45452 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45453 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45454 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45455 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45458 +4 4 4 4 4 4
45459 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45460 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45461 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45462 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45463 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45464 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45465 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45466 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45467 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45468 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45469 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45472 +4 4 4 4 4 4
45473 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45474 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45475 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45476 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45477 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45478 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45479 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45480 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45481 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45482 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45483 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45486 +4 4 4 4 4 4
45487 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45488 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45489 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45490 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45491 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45492 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45493 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45494 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45495 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45496 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45497 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45500 +4 4 4 4 4 4
45501 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45502 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45503 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45504 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45505 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45506 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45507 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45508 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45509 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45510 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45511 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45514 +4 4 4 4 4 4
45515 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45516 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45517 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45518 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45519 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45520 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45521 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45522 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45523 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45524 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45525 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45528 +4 4 4 4 4 4
45529 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45530 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45531 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45532 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45533 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45534 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45535 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45536 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45537 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45538 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45539 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45541 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45542 +4 4 4 4 4 4
45543 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45544 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45545 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45546 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45547 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45548 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45549 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45550 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45551 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45552 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45553 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45556 +4 4 4 4 4 4
45557 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45558 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45559 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45560 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45561 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45562 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45563 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45564 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45565 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45566 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45567 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45569 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45570 +4 4 4 4 4 4
45571 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45572 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45573 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45574 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45575 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45576 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45577 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45578 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45579 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45580 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45581 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45584 +4 4 4 4 4 4
45585 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45586 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45587 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45588 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45589 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45590 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45591 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45592 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45593 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45594 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45595 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45596 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45598 +4 4 4 4 4 4
45599 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45600 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45601 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45602 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45603 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45604 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45605 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45606 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45607 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45608 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45609 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45610 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45612 +4 4 4 4 4 4
45613 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45614 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45615 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45616 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45617 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45618 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45619 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45620 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45621 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45622 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45623 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45624 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45626 +4 4 4 4 4 4
45627 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45628 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45629 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45630 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45631 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45632 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45633 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45634 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45635 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45636 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45637 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45640 +4 4 4 4 4 4
45641 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45642 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45643 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45644 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45645 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45646 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45647 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45648 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45649 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45650 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45651 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45652 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45653 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45654 +4 4 4 4 4 4
45655 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45656 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45657 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45658 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45659 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45660 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45661 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45662 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45663 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45664 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45665 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45666 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45668 +4 4 4 4 4 4
45669 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45670 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45671 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45672 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45673 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45674 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45675 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45677 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45678 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45679 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45680 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45681 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45682 +4 4 4 4 4 4
45683 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45684 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45685 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45686 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45687 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45688 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45689 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45690 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45691 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45692 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45693 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45694 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45695 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45696 +4 4 4 4 4 4
45697 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45698 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45699 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45700 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45701 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45702 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45703 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45704 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45705 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45706 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45708 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45710 +4 4 4 4 4 4
45711 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45712 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45713 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45714 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45715 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45716 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45717 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45718 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45719 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45720 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45722 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45724 +4 4 4 4 4 4
45725 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45726 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45727 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45728 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45729 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45730 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45731 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45732 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45733 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45734 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45736 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45738 +4 4 4 4 4 4
45739 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45740 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45741 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45742 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45743 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45744 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45745 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45746 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45747 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45748 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45750 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45752 +4 4 4 4 4 4
45753 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45754 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45755 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45756 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45757 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45758 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45759 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45760 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45761 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45764 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45766 +4 4 4 4 4 4
45767 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45768 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45769 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45770 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45771 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45772 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45773 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45774 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45775 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45778 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45780 +4 4 4 4 4 4
45781 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45782 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45783 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45784 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45785 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45786 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45787 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45788 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45789 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45792 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45794 +4 4 4 4 4 4
45795 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45796 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45797 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45798 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45799 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45800 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45801 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45802 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45803 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45806 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45808 +4 4 4 4 4 4
45809 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45810 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45811 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45812 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45813 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45814 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45815 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45816 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45817 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45820 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45822 +4 4 4 4 4 4
45823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45824 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45825 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45826 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45827 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45828 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45829 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45830 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45832 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45834 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45836 +4 4 4 4 4 4
45837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45838 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45839 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45840 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45841 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45842 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45843 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45844 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45850 +4 4 4 4 4 4
45851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45852 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45853 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45854 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45855 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45856 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45857 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45858 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45864 +4 4 4 4 4 4
45865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45866 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45867 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45868 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45869 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45870 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45871 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45872 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45878 +4 4 4 4 4 4
45879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45880 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45881 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45882 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45883 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45884 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45885 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45892 +4 4 4 4 4 4
45893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45896 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45897 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45898 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45899 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45906 +4 4 4 4 4 4
45907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45910 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45911 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45912 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45913 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
45914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45920 +4 4 4 4 4 4
45921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45924 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
45925 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
45926 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
45927 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
45928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45934 +4 4 4 4 4 4
45935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45939 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
45940 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45941 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45948 +4 4 4 4 4 4
45949 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45953 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
45954 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
45955 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45962 +4 4 4 4 4 4
45963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45967 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
45968 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
45969 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45976 +4 4 4 4 4 4
45977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45981 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
45982 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
45983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45990 +4 4 4 4 4 4
45991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45995 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45996 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
45997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46004 +4 4 4 4 4 4
46005 diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
46006 index 443e3c8..c443d6a 100644
46007 --- a/drivers/video/nvidia/nv_backlight.c
46008 +++ b/drivers/video/nvidia/nv_backlight.c
46009 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
46010 return bd->props.brightness;
46011 }
46012
46013 -static struct backlight_ops nvidia_bl_ops = {
46014 +static const struct backlight_ops nvidia_bl_ops = {
46015 .get_brightness = nvidia_bl_get_brightness,
46016 .update_status = nvidia_bl_update_status,
46017 };
46018 diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
46019 index d94c57f..912984c 100644
46020 --- a/drivers/video/riva/fbdev.c
46021 +++ b/drivers/video/riva/fbdev.c
46022 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
46023 return bd->props.brightness;
46024 }
46025
46026 -static struct backlight_ops riva_bl_ops = {
46027 +static const struct backlight_ops riva_bl_ops = {
46028 .get_brightness = riva_bl_get_brightness,
46029 .update_status = riva_bl_update_status,
46030 };
46031 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46032 index 54fbb29..2c108fc 100644
46033 --- a/drivers/video/uvesafb.c
46034 +++ b/drivers/video/uvesafb.c
46035 @@ -18,6 +18,7 @@
46036 #include <linux/fb.h>
46037 #include <linux/io.h>
46038 #include <linux/mutex.h>
46039 +#include <linux/moduleloader.h>
46040 #include <video/edid.h>
46041 #include <video/uvesafb.h>
46042 #ifdef CONFIG_X86
46043 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
46044 NULL,
46045 };
46046
46047 - return call_usermodehelper(v86d_path, argv, envp, 1);
46048 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
46049 }
46050
46051 /*
46052 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46053 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46054 par->pmi_setpal = par->ypan = 0;
46055 } else {
46056 +
46057 +#ifdef CONFIG_PAX_KERNEXEC
46058 +#ifdef CONFIG_MODULES
46059 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46060 +#endif
46061 + if (!par->pmi_code) {
46062 + par->pmi_setpal = par->ypan = 0;
46063 + return 0;
46064 + }
46065 +#endif
46066 +
46067 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46068 + task->t.regs.edi);
46069 +
46070 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46071 + pax_open_kernel();
46072 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46073 + pax_close_kernel();
46074 +
46075 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46076 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46077 +#else
46078 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46079 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46080 +#endif
46081 +
46082 printk(KERN_INFO "uvesafb: protected mode interface info at "
46083 "%04x:%04x\n",
46084 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46085 @@ -1799,6 +1822,11 @@ out:
46086 if (par->vbe_modes)
46087 kfree(par->vbe_modes);
46088
46089 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46090 + if (par->pmi_code)
46091 + module_free_exec(NULL, par->pmi_code);
46092 +#endif
46093 +
46094 framebuffer_release(info);
46095 return err;
46096 }
46097 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
46098 kfree(par->vbe_state_orig);
46099 if (par->vbe_state_saved)
46100 kfree(par->vbe_state_saved);
46101 +
46102 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46103 + if (par->pmi_code)
46104 + module_free_exec(NULL, par->pmi_code);
46105 +#endif
46106 +
46107 }
46108
46109 framebuffer_release(info);
46110 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
46111 index bd37ee1..cb827e8 100644
46112 --- a/drivers/video/vesafb.c
46113 +++ b/drivers/video/vesafb.c
46114 @@ -9,6 +9,7 @@
46115 */
46116
46117 #include <linux/module.h>
46118 +#include <linux/moduleloader.h>
46119 #include <linux/kernel.h>
46120 #include <linux/errno.h>
46121 #include <linux/string.h>
46122 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
46123 static int vram_total __initdata; /* Set total amount of memory */
46124 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
46125 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
46126 -static void (*pmi_start)(void) __read_mostly;
46127 -static void (*pmi_pal) (void) __read_mostly;
46128 +static void (*pmi_start)(void) __read_only;
46129 +static void (*pmi_pal) (void) __read_only;
46130 static int depth __read_mostly;
46131 static int vga_compat __read_mostly;
46132 /* --------------------------------------------------------------------- */
46133 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
46134 unsigned int size_vmode;
46135 unsigned int size_remap;
46136 unsigned int size_total;
46137 + void *pmi_code = NULL;
46138
46139 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
46140 return -ENODEV;
46141 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
46142 size_remap = size_total;
46143 vesafb_fix.smem_len = size_remap;
46144
46145 -#ifndef __i386__
46146 - screen_info.vesapm_seg = 0;
46147 -#endif
46148 -
46149 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
46150 printk(KERN_WARNING
46151 "vesafb: cannot reserve video memory at 0x%lx\n",
46152 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
46153 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
46154 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
46155
46156 +#ifdef __i386__
46157 +
46158 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46159 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
46160 + if (!pmi_code)
46161 +#elif !defined(CONFIG_PAX_KERNEXEC)
46162 + if (0)
46163 +#endif
46164 +
46165 +#endif
46166 + screen_info.vesapm_seg = 0;
46167 +
46168 if (screen_info.vesapm_seg) {
46169 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46170 - screen_info.vesapm_seg,screen_info.vesapm_off);
46171 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46172 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46173 }
46174
46175 if (screen_info.vesapm_seg < 0xc000)
46176 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46177
46178 if (ypan || pmi_setpal) {
46179 unsigned short *pmi_base;
46180 +
46181 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46182 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46183 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46184 +
46185 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46186 + pax_open_kernel();
46187 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46188 +#else
46189 + pmi_code = pmi_base;
46190 +#endif
46191 +
46192 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46193 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46194 +
46195 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46196 + pmi_start = ktva_ktla(pmi_start);
46197 + pmi_pal = ktva_ktla(pmi_pal);
46198 + pax_close_kernel();
46199 +#endif
46200 +
46201 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46202 if (pmi_base[3]) {
46203 printk(KERN_INFO "vesafb: pmi: ports = ");
46204 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46205 info->node, info->fix.id);
46206 return 0;
46207 err:
46208 +
46209 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46210 + module_free_exec(NULL, pmi_code);
46211 +#endif
46212 +
46213 if (info->screen_base)
46214 iounmap(info->screen_base);
46215 framebuffer_release(info);
46216 diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46217 index 88a60e0..6783cc2 100644
46218 --- a/drivers/xen/sys-hypervisor.c
46219 +++ b/drivers/xen/sys-hypervisor.c
46220 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46221 return 0;
46222 }
46223
46224 -static struct sysfs_ops hyp_sysfs_ops = {
46225 +static const struct sysfs_ops hyp_sysfs_ops = {
46226 .show = hyp_sysfs_show,
46227 .store = hyp_sysfs_store,
46228 };
46229 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46230 index 18f74ec..3227009 100644
46231 --- a/fs/9p/vfs_inode.c
46232 +++ b/fs/9p/vfs_inode.c
46233 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46234 static void
46235 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46236 {
46237 - char *s = nd_get_link(nd);
46238 + const char *s = nd_get_link(nd);
46239
46240 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46241 IS_ERR(s) ? "<error>" : s);
46242 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46243 index bb4cc5b..df5eaa0 100644
46244 --- a/fs/Kconfig.binfmt
46245 +++ b/fs/Kconfig.binfmt
46246 @@ -86,7 +86,7 @@ config HAVE_AOUT
46247
46248 config BINFMT_AOUT
46249 tristate "Kernel support for a.out and ECOFF binaries"
46250 - depends on HAVE_AOUT
46251 + depends on HAVE_AOUT && BROKEN
46252 ---help---
46253 A.out (Assembler.OUTput) is a set of formats for libraries and
46254 executables used in the earliest versions of UNIX. Linux used
46255 diff --git a/fs/aio.c b/fs/aio.c
46256 index 22a19ad..d484e5b 100644
46257 --- a/fs/aio.c
46258 +++ b/fs/aio.c
46259 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46260 size += sizeof(struct io_event) * nr_events;
46261 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46262
46263 - if (nr_pages < 0)
46264 + if (nr_pages <= 0)
46265 return -EINVAL;
46266
46267 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46268 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46269 struct aio_timeout to;
46270 int retry = 0;
46271
46272 + pax_track_stack();
46273 +
46274 /* needed to zero any padding within an entry (there shouldn't be
46275 * any, but C is fun!
46276 */
46277 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46278 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46279 {
46280 ssize_t ret;
46281 + struct iovec iovstack;
46282
46283 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46284 kiocb->ki_nbytes, 1,
46285 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46286 + &iovstack, &kiocb->ki_iovec);
46287 if (ret < 0)
46288 goto out;
46289
46290 + if (kiocb->ki_iovec == &iovstack) {
46291 + kiocb->ki_inline_vec = iovstack;
46292 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
46293 + }
46294 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46295 kiocb->ki_cur_seg = 0;
46296 /* ki_nbytes/left now reflect bytes instead of segs */
46297 diff --git a/fs/attr.c b/fs/attr.c
46298 index 96d394b..33cf5b4 100644
46299 --- a/fs/attr.c
46300 +++ b/fs/attr.c
46301 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46302 unsigned long limit;
46303
46304 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46305 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46306 if (limit != RLIM_INFINITY && offset > limit)
46307 goto out_sig;
46308 if (offset > inode->i_sb->s_maxbytes)
46309 diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46310 index 4a1401c..05eb5ca 100644
46311 --- a/fs/autofs/root.c
46312 +++ b/fs/autofs/root.c
46313 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46314 set_bit(n,sbi->symlink_bitmap);
46315 sl = &sbi->symlink[n];
46316 sl->len = strlen(symname);
46317 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46318 + slsize = sl->len+1;
46319 + sl->data = kmalloc(slsize, GFP_KERNEL);
46320 if (!sl->data) {
46321 clear_bit(n,sbi->symlink_bitmap);
46322 unlock_kernel();
46323 diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46324 index b4ea829..e63ef18 100644
46325 --- a/fs/autofs4/symlink.c
46326 +++ b/fs/autofs4/symlink.c
46327 @@ -15,7 +15,7 @@
46328 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46329 {
46330 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46331 - nd_set_link(nd, (char *)ino->u.symlink);
46332 + nd_set_link(nd, ino->u.symlink);
46333 return NULL;
46334 }
46335
46336 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46337 index 2341375..df9d1c2 100644
46338 --- a/fs/autofs4/waitq.c
46339 +++ b/fs/autofs4/waitq.c
46340 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46341 {
46342 unsigned long sigpipe, flags;
46343 mm_segment_t fs;
46344 - const char *data = (const char *)addr;
46345 + const char __user *data = (const char __force_user *)addr;
46346 ssize_t wr = 0;
46347
46348 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46349 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46350 index 9158c07..3f06659 100644
46351 --- a/fs/befs/linuxvfs.c
46352 +++ b/fs/befs/linuxvfs.c
46353 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46354 {
46355 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46356 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46357 - char *link = nd_get_link(nd);
46358 + const char *link = nd_get_link(nd);
46359 if (!IS_ERR(link))
46360 kfree(link);
46361 }
46362 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46363 index 0133b5a..b3baa9f 100644
46364 --- a/fs/binfmt_aout.c
46365 +++ b/fs/binfmt_aout.c
46366 @@ -16,6 +16,7 @@
46367 #include <linux/string.h>
46368 #include <linux/fs.h>
46369 #include <linux/file.h>
46370 +#include <linux/security.h>
46371 #include <linux/stat.h>
46372 #include <linux/fcntl.h>
46373 #include <linux/ptrace.h>
46374 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46375 #endif
46376 # define START_STACK(u) (u.start_stack)
46377
46378 + memset(&dump, 0, sizeof(dump));
46379 +
46380 fs = get_fs();
46381 set_fs(KERNEL_DS);
46382 has_dumped = 1;
46383 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46384
46385 /* If the size of the dump file exceeds the rlimit, then see what would happen
46386 if we wrote the stack, but not the data area. */
46387 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46388 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46389 dump.u_dsize = 0;
46390
46391 /* Make sure we have enough room to write the stack and data areas. */
46392 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46393 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46394 dump.u_ssize = 0;
46395
46396 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46397 dump_size = dump.u_ssize << PAGE_SHIFT;
46398 DUMP_WRITE(dump_start,dump_size);
46399 }
46400 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
46401 - set_fs(KERNEL_DS);
46402 - DUMP_WRITE(current,sizeof(*current));
46403 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46404 end_coredump:
46405 set_fs(fs);
46406 return has_dumped;
46407 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46408 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46409 if (rlim >= RLIM_INFINITY)
46410 rlim = ~0;
46411 +
46412 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46413 if (ex.a_data + ex.a_bss > rlim)
46414 return -ENOMEM;
46415
46416 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46417 install_exec_creds(bprm);
46418 current->flags &= ~PF_FORKNOEXEC;
46419
46420 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46421 + current->mm->pax_flags = 0UL;
46422 +#endif
46423 +
46424 +#ifdef CONFIG_PAX_PAGEEXEC
46425 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46426 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46427 +
46428 +#ifdef CONFIG_PAX_EMUTRAMP
46429 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46430 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46431 +#endif
46432 +
46433 +#ifdef CONFIG_PAX_MPROTECT
46434 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46435 + current->mm->pax_flags |= MF_PAX_MPROTECT;
46436 +#endif
46437 +
46438 + }
46439 +#endif
46440 +
46441 if (N_MAGIC(ex) == OMAGIC) {
46442 unsigned long text_addr, map_size;
46443 loff_t pos;
46444 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46445
46446 down_write(&current->mm->mmap_sem);
46447 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46448 - PROT_READ | PROT_WRITE | PROT_EXEC,
46449 + PROT_READ | PROT_WRITE,
46450 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46451 fd_offset + ex.a_text);
46452 up_write(&current->mm->mmap_sem);
46453 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46454 index 1ed37ba..b9c035f 100644
46455 --- a/fs/binfmt_elf.c
46456 +++ b/fs/binfmt_elf.c
46457 @@ -31,6 +31,7 @@
46458 #include <linux/random.h>
46459 #include <linux/elf.h>
46460 #include <linux/utsname.h>
46461 +#include <linux/xattr.h>
46462 #include <asm/uaccess.h>
46463 #include <asm/param.h>
46464 #include <asm/page.h>
46465 @@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46466 #define elf_core_dump NULL
46467 #endif
46468
46469 +#ifdef CONFIG_PAX_MPROTECT
46470 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46471 +#endif
46472 +
46473 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46474 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46475 #else
46476 @@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46477 .load_binary = load_elf_binary,
46478 .load_shlib = load_elf_library,
46479 .core_dump = elf_core_dump,
46480 +
46481 +#ifdef CONFIG_PAX_MPROTECT
46482 + .handle_mprotect= elf_handle_mprotect,
46483 +#endif
46484 +
46485 .min_coredump = ELF_EXEC_PAGESIZE,
46486 .hasvdso = 1
46487 };
46488 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46489
46490 static int set_brk(unsigned long start, unsigned long end)
46491 {
46492 + unsigned long e = end;
46493 +
46494 start = ELF_PAGEALIGN(start);
46495 end = ELF_PAGEALIGN(end);
46496 if (end > start) {
46497 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46498 if (BAD_ADDR(addr))
46499 return addr;
46500 }
46501 - current->mm->start_brk = current->mm->brk = end;
46502 + current->mm->start_brk = current->mm->brk = e;
46503 return 0;
46504 }
46505
46506 @@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46507 elf_addr_t __user *u_rand_bytes;
46508 const char *k_platform = ELF_PLATFORM;
46509 const char *k_base_platform = ELF_BASE_PLATFORM;
46510 - unsigned char k_rand_bytes[16];
46511 + u32 k_rand_bytes[4];
46512 int items;
46513 elf_addr_t *elf_info;
46514 int ei_index = 0;
46515 const struct cred *cred = current_cred();
46516 struct vm_area_struct *vma;
46517 + unsigned long saved_auxv[AT_VECTOR_SIZE];
46518 +
46519 + pax_track_stack();
46520
46521 /*
46522 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46523 @@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46524 * Generate 16 random bytes for userspace PRNG seeding.
46525 */
46526 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46527 - u_rand_bytes = (elf_addr_t __user *)
46528 - STACK_ALLOC(p, sizeof(k_rand_bytes));
46529 + srandom32(k_rand_bytes[0] ^ random32());
46530 + srandom32(k_rand_bytes[1] ^ random32());
46531 + srandom32(k_rand_bytes[2] ^ random32());
46532 + srandom32(k_rand_bytes[3] ^ random32());
46533 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
46534 + u_rand_bytes = (elf_addr_t __user *) p;
46535 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46536 return -EFAULT;
46537
46538 @@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46539 return -EFAULT;
46540 current->mm->env_end = p;
46541
46542 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46543 +
46544 /* Put the elf_info on the stack in the right place. */
46545 sp = (elf_addr_t __user *)envp + 1;
46546 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46547 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46548 return -EFAULT;
46549 return 0;
46550 }
46551 @@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46552 {
46553 struct elf_phdr *elf_phdata;
46554 struct elf_phdr *eppnt;
46555 - unsigned long load_addr = 0;
46556 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46557 int load_addr_set = 0;
46558 unsigned long last_bss = 0, elf_bss = 0;
46559 - unsigned long error = ~0UL;
46560 + unsigned long error = -EINVAL;
46561 unsigned long total_size;
46562 int retval, i, size;
46563
46564 @@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46565 goto out_close;
46566 }
46567
46568 +#ifdef CONFIG_PAX_SEGMEXEC
46569 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46570 + pax_task_size = SEGMEXEC_TASK_SIZE;
46571 +#endif
46572 +
46573 eppnt = elf_phdata;
46574 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46575 if (eppnt->p_type == PT_LOAD) {
46576 @@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46577 k = load_addr + eppnt->p_vaddr;
46578 if (BAD_ADDR(k) ||
46579 eppnt->p_filesz > eppnt->p_memsz ||
46580 - eppnt->p_memsz > TASK_SIZE ||
46581 - TASK_SIZE - eppnt->p_memsz < k) {
46582 + eppnt->p_memsz > pax_task_size ||
46583 + pax_task_size - eppnt->p_memsz < k) {
46584 error = -ENOMEM;
46585 goto out_close;
46586 }
46587 @@ -532,6 +558,348 @@ out:
46588 return error;
46589 }
46590
46591 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46592 +{
46593 + unsigned long pax_flags = 0UL;
46594 +
46595 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46596 +
46597 +#ifdef CONFIG_PAX_PAGEEXEC
46598 + if (elf_phdata->p_flags & PF_PAGEEXEC)
46599 + pax_flags |= MF_PAX_PAGEEXEC;
46600 +#endif
46601 +
46602 +#ifdef CONFIG_PAX_SEGMEXEC
46603 + if (elf_phdata->p_flags & PF_SEGMEXEC)
46604 + pax_flags |= MF_PAX_SEGMEXEC;
46605 +#endif
46606 +
46607 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46608 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46609 + if (nx_enabled)
46610 + pax_flags &= ~MF_PAX_SEGMEXEC;
46611 + else
46612 + pax_flags &= ~MF_PAX_PAGEEXEC;
46613 + }
46614 +#endif
46615 +
46616 +#ifdef CONFIG_PAX_EMUTRAMP
46617 + if (elf_phdata->p_flags & PF_EMUTRAMP)
46618 + pax_flags |= MF_PAX_EMUTRAMP;
46619 +#endif
46620 +
46621 +#ifdef CONFIG_PAX_MPROTECT
46622 + if (elf_phdata->p_flags & PF_MPROTECT)
46623 + pax_flags |= MF_PAX_MPROTECT;
46624 +#endif
46625 +
46626 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46627 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46628 + pax_flags |= MF_PAX_RANDMMAP;
46629 +#endif
46630 +
46631 +#endif
46632 +
46633 + return pax_flags;
46634 +}
46635 +
46636 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46637 +{
46638 + unsigned long pax_flags = 0UL;
46639 +
46640 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46641 +
46642 +#ifdef CONFIG_PAX_PAGEEXEC
46643 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46644 + pax_flags |= MF_PAX_PAGEEXEC;
46645 +#endif
46646 +
46647 +#ifdef CONFIG_PAX_SEGMEXEC
46648 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46649 + pax_flags |= MF_PAX_SEGMEXEC;
46650 +#endif
46651 +
46652 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46653 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46654 + if (nx_enabled)
46655 + pax_flags &= ~MF_PAX_SEGMEXEC;
46656 + else
46657 + pax_flags &= ~MF_PAX_PAGEEXEC;
46658 + }
46659 +#endif
46660 +
46661 +#ifdef CONFIG_PAX_EMUTRAMP
46662 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46663 + pax_flags |= MF_PAX_EMUTRAMP;
46664 +#endif
46665 +
46666 +#ifdef CONFIG_PAX_MPROTECT
46667 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46668 + pax_flags |= MF_PAX_MPROTECT;
46669 +#endif
46670 +
46671 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46672 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46673 + pax_flags |= MF_PAX_RANDMMAP;
46674 +#endif
46675 +
46676 +#endif
46677 +
46678 + return pax_flags;
46679 +}
46680 +
46681 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46682 +{
46683 + unsigned long pax_flags = 0UL;
46684 +
46685 +#ifdef CONFIG_PAX_EI_PAX
46686 +
46687 +#ifdef CONFIG_PAX_PAGEEXEC
46688 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46689 + pax_flags |= MF_PAX_PAGEEXEC;
46690 +#endif
46691 +
46692 +#ifdef CONFIG_PAX_SEGMEXEC
46693 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46694 + pax_flags |= MF_PAX_SEGMEXEC;
46695 +#endif
46696 +
46697 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46698 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46699 + if (nx_enabled)
46700 + pax_flags &= ~MF_PAX_SEGMEXEC;
46701 + else
46702 + pax_flags &= ~MF_PAX_PAGEEXEC;
46703 + }
46704 +#endif
46705 +
46706 +#ifdef CONFIG_PAX_EMUTRAMP
46707 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46708 + pax_flags |= MF_PAX_EMUTRAMP;
46709 +#endif
46710 +
46711 +#ifdef CONFIG_PAX_MPROTECT
46712 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46713 + pax_flags |= MF_PAX_MPROTECT;
46714 +#endif
46715 +
46716 +#ifdef CONFIG_PAX_ASLR
46717 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46718 + pax_flags |= MF_PAX_RANDMMAP;
46719 +#endif
46720 +
46721 +#else
46722 +
46723 +#ifdef CONFIG_PAX_PAGEEXEC
46724 + pax_flags |= MF_PAX_PAGEEXEC;
46725 +#endif
46726 +
46727 +#ifdef CONFIG_PAX_MPROTECT
46728 + pax_flags |= MF_PAX_MPROTECT;
46729 +#endif
46730 +
46731 +#ifdef CONFIG_PAX_RANDMMAP
46732 + pax_flags |= MF_PAX_RANDMMAP;
46733 +#endif
46734 +
46735 +#ifdef CONFIG_PAX_SEGMEXEC
46736 + if (!(__supported_pte_mask & _PAGE_NX)) {
46737 + pax_flags &= ~MF_PAX_PAGEEXEC;
46738 + pax_flags |= MF_PAX_SEGMEXEC;
46739 + }
46740 +#endif
46741 +
46742 +#endif
46743 +
46744 + return pax_flags;
46745 +}
46746 +
46747 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46748 +{
46749 +
46750 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46751 + unsigned long i;
46752 +
46753 + for (i = 0UL; i < elf_ex->e_phnum; i++)
46754 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46755 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46756 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46757 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46758 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46759 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46760 + return ~0UL;
46761 +
46762 +#ifdef CONFIG_PAX_SOFTMODE
46763 + if (pax_softmode)
46764 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46765 + else
46766 +#endif
46767 +
46768 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46769 + break;
46770 + }
46771 +#endif
46772 +
46773 + return ~0UL;
46774 +}
46775 +
46776 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46777 +{
46778 + unsigned long pax_flags = 0UL;
46779 +
46780 +#ifdef CONFIG_PAX_PAGEEXEC
46781 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46782 + pax_flags |= MF_PAX_PAGEEXEC;
46783 +#endif
46784 +
46785 +#ifdef CONFIG_PAX_SEGMEXEC
46786 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46787 + pax_flags |= MF_PAX_SEGMEXEC;
46788 +#endif
46789 +
46790 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46791 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46792 + if ((__supported_pte_mask & _PAGE_NX))
46793 + pax_flags &= ~MF_PAX_SEGMEXEC;
46794 + else
46795 + pax_flags &= ~MF_PAX_PAGEEXEC;
46796 + }
46797 +#endif
46798 +
46799 +#ifdef CONFIG_PAX_EMUTRAMP
46800 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46801 + pax_flags |= MF_PAX_EMUTRAMP;
46802 +#endif
46803 +
46804 +#ifdef CONFIG_PAX_MPROTECT
46805 + if (pax_flags_softmode & MF_PAX_MPROTECT)
46806 + pax_flags |= MF_PAX_MPROTECT;
46807 +#endif
46808 +
46809 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46810 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46811 + pax_flags |= MF_PAX_RANDMMAP;
46812 +#endif
46813 +
46814 + return pax_flags;
46815 +}
46816 +
46817 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46818 +{
46819 + unsigned long pax_flags = 0UL;
46820 +
46821 +#ifdef CONFIG_PAX_PAGEEXEC
46822 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46823 + pax_flags |= MF_PAX_PAGEEXEC;
46824 +#endif
46825 +
46826 +#ifdef CONFIG_PAX_SEGMEXEC
46827 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46828 + pax_flags |= MF_PAX_SEGMEXEC;
46829 +#endif
46830 +
46831 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46832 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46833 + if ((__supported_pte_mask & _PAGE_NX))
46834 + pax_flags &= ~MF_PAX_SEGMEXEC;
46835 + else
46836 + pax_flags &= ~MF_PAX_PAGEEXEC;
46837 + }
46838 +#endif
46839 +
46840 +#ifdef CONFIG_PAX_EMUTRAMP
46841 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46842 + pax_flags |= MF_PAX_EMUTRAMP;
46843 +#endif
46844 +
46845 +#ifdef CONFIG_PAX_MPROTECT
46846 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46847 + pax_flags |= MF_PAX_MPROTECT;
46848 +#endif
46849 +
46850 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46851 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46852 + pax_flags |= MF_PAX_RANDMMAP;
46853 +#endif
46854 +
46855 + return pax_flags;
46856 +}
46857 +
46858 +static unsigned long pax_parse_xattr_pax(struct file * const file)
46859 +{
46860 +
46861 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46862 + ssize_t xattr_size, i;
46863 + unsigned char xattr_value[5];
46864 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46865 +
46866 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46867 + if (xattr_size <= 0)
46868 + return ~0UL;
46869 +
46870 + for (i = 0; i < xattr_size; i++)
46871 + switch (xattr_value[i]) {
46872 + default:
46873 + return ~0UL;
46874 +
46875 +#define parse_flag(option1, option2, flag) \
46876 + case option1: \
46877 + pax_flags_hardmode |= MF_PAX_##flag; \
46878 + break; \
46879 + case option2: \
46880 + pax_flags_softmode |= MF_PAX_##flag; \
46881 + break;
46882 +
46883 + parse_flag('p', 'P', PAGEEXEC);
46884 + parse_flag('e', 'E', EMUTRAMP);
46885 + parse_flag('m', 'M', MPROTECT);
46886 + parse_flag('r', 'R', RANDMMAP);
46887 + parse_flag('s', 'S', SEGMEXEC);
46888 +
46889 +#undef parse_flag
46890 + }
46891 +
46892 + if (pax_flags_hardmode & pax_flags_softmode)
46893 + return ~0UL;
46894 +
46895 +#ifdef CONFIG_PAX_SOFTMODE
46896 + if (pax_softmode)
46897 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
46898 + else
46899 +#endif
46900 +
46901 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
46902 +#else
46903 + return ~0UL;
46904 +#endif
46905 +}
46906 +
46907 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46908 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
46909 +{
46910 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
46911 +
46912 + pax_flags = pax_parse_ei_pax(elf_ex);
46913 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
46914 + xattr_pax_flags = pax_parse_xattr_pax(file);
46915 +
46916 + if (pt_pax_flags == ~0UL)
46917 + pt_pax_flags = xattr_pax_flags;
46918 + else if (xattr_pax_flags == ~0UL)
46919 + xattr_pax_flags = pt_pax_flags;
46920 + if (pt_pax_flags != xattr_pax_flags)
46921 + return -EINVAL;
46922 + if (pt_pax_flags != ~0UL)
46923 + pax_flags = pt_pax_flags;
46924 +
46925 + if (0 > pax_check_flags(&pax_flags))
46926 + return -EINVAL;
46927 +
46928 + current->mm->pax_flags = pax_flags;
46929 + return 0;
46930 +}
46931 +#endif
46932 +
46933 /*
46934 * These are the functions used to load ELF style executables and shared
46935 * libraries. There is no binary dependent code anywhere else.
46936 @@ -548,6 +916,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
46937 {
46938 unsigned int random_variable = 0;
46939
46940 +#ifdef CONFIG_PAX_RANDUSTACK
46941 + if (randomize_va_space)
46942 + return stack_top - current->mm->delta_stack;
46943 +#endif
46944 +
46945 if ((current->flags & PF_RANDOMIZE) &&
46946 !(current->personality & ADDR_NO_RANDOMIZE)) {
46947 random_variable = get_random_int() & STACK_RND_MASK;
46948 @@ -566,7 +939,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46949 unsigned long load_addr = 0, load_bias = 0;
46950 int load_addr_set = 0;
46951 char * elf_interpreter = NULL;
46952 - unsigned long error;
46953 + unsigned long error = 0;
46954 struct elf_phdr *elf_ppnt, *elf_phdata;
46955 unsigned long elf_bss, elf_brk;
46956 int retval, i;
46957 @@ -576,11 +949,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46958 unsigned long start_code, end_code, start_data, end_data;
46959 unsigned long reloc_func_desc = 0;
46960 int executable_stack = EXSTACK_DEFAULT;
46961 - unsigned long def_flags = 0;
46962 struct {
46963 struct elfhdr elf_ex;
46964 struct elfhdr interp_elf_ex;
46965 } *loc;
46966 + unsigned long pax_task_size = TASK_SIZE;
46967
46968 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
46969 if (!loc) {
46970 @@ -718,11 +1091,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46971
46972 /* OK, This is the point of no return */
46973 current->flags &= ~PF_FORKNOEXEC;
46974 - current->mm->def_flags = def_flags;
46975 +
46976 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46977 + current->mm->pax_flags = 0UL;
46978 +#endif
46979 +
46980 +#ifdef CONFIG_PAX_DLRESOLVE
46981 + current->mm->call_dl_resolve = 0UL;
46982 +#endif
46983 +
46984 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
46985 + current->mm->call_syscall = 0UL;
46986 +#endif
46987 +
46988 +#ifdef CONFIG_PAX_ASLR
46989 + current->mm->delta_mmap = 0UL;
46990 + current->mm->delta_stack = 0UL;
46991 +#endif
46992 +
46993 + current->mm->def_flags = 0;
46994 +
46995 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46996 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
46997 + send_sig(SIGKILL, current, 0);
46998 + goto out_free_dentry;
46999 + }
47000 +#endif
47001 +
47002 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47003 + pax_set_initial_flags(bprm);
47004 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47005 + if (pax_set_initial_flags_func)
47006 + (pax_set_initial_flags_func)(bprm);
47007 +#endif
47008 +
47009 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47010 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
47011 + current->mm->context.user_cs_limit = PAGE_SIZE;
47012 + current->mm->def_flags |= VM_PAGEEXEC;
47013 + }
47014 +#endif
47015 +
47016 +#ifdef CONFIG_PAX_SEGMEXEC
47017 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47018 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47019 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47020 + pax_task_size = SEGMEXEC_TASK_SIZE;
47021 + }
47022 +#endif
47023 +
47024 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47025 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47026 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47027 + put_cpu();
47028 + }
47029 +#endif
47030
47031 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47032 may depend on the personality. */
47033 SET_PERSONALITY(loc->elf_ex);
47034 +
47035 +#ifdef CONFIG_PAX_ASLR
47036 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47037 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47038 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47039 + }
47040 +#endif
47041 +
47042 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47043 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47044 + executable_stack = EXSTACK_DISABLE_X;
47045 + current->personality &= ~READ_IMPLIES_EXEC;
47046 + } else
47047 +#endif
47048 +
47049 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47050 current->personality |= READ_IMPLIES_EXEC;
47051
47052 @@ -800,10 +1242,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47053 * might try to exec. This is because the brk will
47054 * follow the loader, and is not movable. */
47055 #ifdef CONFIG_X86
47056 - load_bias = 0;
47057 + if (current->flags & PF_RANDOMIZE)
47058 + load_bias = 0;
47059 + else
47060 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47061 #else
47062 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47063 #endif
47064 +
47065 +#ifdef CONFIG_PAX_RANDMMAP
47066 + /* PaX: randomize base address at the default exe base if requested */
47067 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47068 +#ifdef CONFIG_SPARC64
47069 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47070 +#else
47071 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47072 +#endif
47073 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47074 + elf_flags |= MAP_FIXED;
47075 + }
47076 +#endif
47077 +
47078 }
47079
47080 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47081 @@ -836,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47082 * allowed task size. Note that p_filesz must always be
47083 * <= p_memsz so it is only necessary to check p_memsz.
47084 */
47085 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47086 - elf_ppnt->p_memsz > TASK_SIZE ||
47087 - TASK_SIZE - elf_ppnt->p_memsz < k) {
47088 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47089 + elf_ppnt->p_memsz > pax_task_size ||
47090 + pax_task_size - elf_ppnt->p_memsz < k) {
47091 /* set_brk can never work. Avoid overflows. */
47092 send_sig(SIGKILL, current, 0);
47093 retval = -EINVAL;
47094 @@ -866,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47095 start_data += load_bias;
47096 end_data += load_bias;
47097
47098 +#ifdef CONFIG_PAX_RANDMMAP
47099 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47100 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
47101 +#endif
47102 +
47103 /* Calling set_brk effectively mmaps the pages that we need
47104 * for the bss and break sections. We must do this before
47105 * mapping in the interpreter, to make sure it doesn't wind
47106 @@ -877,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47107 goto out_free_dentry;
47108 }
47109 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
47110 - send_sig(SIGSEGV, current, 0);
47111 - retval = -EFAULT; /* Nobody gets to see this, but.. */
47112 - goto out_free_dentry;
47113 + /*
47114 + * This bss-zeroing can fail if the ELF
47115 + * file specifies odd protections. So
47116 + * we don't check the return value
47117 + */
47118 }
47119
47120 if (elf_interpreter) {
47121 @@ -1112,8 +1578,10 @@ static int dump_seek(struct file *file, loff_t off)
47122 unsigned long n = off;
47123 if (n > PAGE_SIZE)
47124 n = PAGE_SIZE;
47125 - if (!dump_write(file, buf, n))
47126 + if (!dump_write(file, buf, n)) {
47127 + free_page((unsigned long)buf);
47128 return 0;
47129 + }
47130 off -= n;
47131 }
47132 free_page((unsigned long)buf);
47133 @@ -1125,7 +1593,7 @@ static int dump_seek(struct file *file, loff_t off)
47134 * Decide what to dump of a segment, part, all or none.
47135 */
47136 static unsigned long vma_dump_size(struct vm_area_struct *vma,
47137 - unsigned long mm_flags)
47138 + unsigned long mm_flags, long signr)
47139 {
47140 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
47141
47142 @@ -1159,7 +1627,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
47143 if (vma->vm_file == NULL)
47144 return 0;
47145
47146 - if (FILTER(MAPPED_PRIVATE))
47147 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
47148 goto whole;
47149
47150 /*
47151 @@ -1255,8 +1723,11 @@ static int writenote(struct memelfnote *men, struct file *file,
47152 #undef DUMP_WRITE
47153
47154 #define DUMP_WRITE(addr, nr) \
47155 + do { \
47156 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
47157 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
47158 - goto end_coredump;
47159 + goto end_coredump; \
47160 + } while (0);
47161
47162 static void fill_elf_header(struct elfhdr *elf, int segs,
47163 u16 machine, u32 flags, u8 osabi)
47164 @@ -1385,9 +1856,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
47165 {
47166 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47167 int i = 0;
47168 - do
47169 + do {
47170 i += 2;
47171 - while (auxv[i - 2] != AT_NULL);
47172 + } while (auxv[i - 2] != AT_NULL);
47173 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47174 }
47175
47176 @@ -1973,7 +2444,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47177 phdr.p_offset = offset;
47178 phdr.p_vaddr = vma->vm_start;
47179 phdr.p_paddr = 0;
47180 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
47181 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47182 phdr.p_memsz = vma->vm_end - vma->vm_start;
47183 offset += phdr.p_filesz;
47184 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47185 @@ -2006,7 +2477,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47186 unsigned long addr;
47187 unsigned long end;
47188
47189 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
47190 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47191
47192 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47193 struct page *page;
47194 @@ -2015,6 +2486,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47195 page = get_dump_page(addr);
47196 if (page) {
47197 void *kaddr = kmap(page);
47198 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47199 stop = ((size += PAGE_SIZE) > limit) ||
47200 !dump_write(file, kaddr, PAGE_SIZE);
47201 kunmap(page);
47202 @@ -2042,6 +2514,97 @@ out:
47203
47204 #endif /* USE_ELF_CORE_DUMP */
47205
47206 +#ifdef CONFIG_PAX_MPROTECT
47207 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
47208 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47209 + * we'll remove VM_MAYWRITE for good on RELRO segments.
47210 + *
47211 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47212 + * basis because we want to allow the common case and not the special ones.
47213 + */
47214 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47215 +{
47216 + struct elfhdr elf_h;
47217 + struct elf_phdr elf_p;
47218 + unsigned long i;
47219 + unsigned long oldflags;
47220 + bool is_textrel_rw, is_textrel_rx, is_relro;
47221 +
47222 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47223 + return;
47224 +
47225 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47226 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47227 +
47228 +#ifdef CONFIG_PAX_ELFRELOCS
47229 + /* possible TEXTREL */
47230 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47231 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47232 +#else
47233 + is_textrel_rw = false;
47234 + is_textrel_rx = false;
47235 +#endif
47236 +
47237 + /* possible RELRO */
47238 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47239 +
47240 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47241 + return;
47242 +
47243 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47244 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47245 +
47246 +#ifdef CONFIG_PAX_ETEXECRELOCS
47247 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47248 +#else
47249 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47250 +#endif
47251 +
47252 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47253 + !elf_check_arch(&elf_h) ||
47254 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47255 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47256 + return;
47257 +
47258 + for (i = 0UL; i < elf_h.e_phnum; i++) {
47259 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47260 + return;
47261 + switch (elf_p.p_type) {
47262 + case PT_DYNAMIC:
47263 + if (!is_textrel_rw && !is_textrel_rx)
47264 + continue;
47265 + i = 0UL;
47266 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47267 + elf_dyn dyn;
47268 +
47269 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47270 + return;
47271 + if (dyn.d_tag == DT_NULL)
47272 + return;
47273 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47274 + gr_log_textrel(vma);
47275 + if (is_textrel_rw)
47276 + vma->vm_flags |= VM_MAYWRITE;
47277 + else
47278 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47279 + vma->vm_flags &= ~VM_MAYWRITE;
47280 + return;
47281 + }
47282 + i++;
47283 + }
47284 + return;
47285 +
47286 + case PT_GNU_RELRO:
47287 + if (!is_relro)
47288 + continue;
47289 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47290 + vma->vm_flags &= ~VM_MAYWRITE;
47291 + return;
47292 + }
47293 + }
47294 +}
47295 +#endif
47296 +
47297 static int __init init_elf_binfmt(void)
47298 {
47299 return register_binfmt(&elf_format);
47300 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47301 index ca88c46..f155a60 100644
47302 --- a/fs/binfmt_flat.c
47303 +++ b/fs/binfmt_flat.c
47304 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47305 realdatastart = (unsigned long) -ENOMEM;
47306 printk("Unable to allocate RAM for process data, errno %d\n",
47307 (int)-realdatastart);
47308 + down_write(&current->mm->mmap_sem);
47309 do_munmap(current->mm, textpos, text_len);
47310 + up_write(&current->mm->mmap_sem);
47311 ret = realdatastart;
47312 goto err;
47313 }
47314 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47315 }
47316 if (IS_ERR_VALUE(result)) {
47317 printk("Unable to read data+bss, errno %d\n", (int)-result);
47318 + down_write(&current->mm->mmap_sem);
47319 do_munmap(current->mm, textpos, text_len);
47320 do_munmap(current->mm, realdatastart, data_len + extra);
47321 + up_write(&current->mm->mmap_sem);
47322 ret = result;
47323 goto err;
47324 }
47325 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47326 }
47327 if (IS_ERR_VALUE(result)) {
47328 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47329 + down_write(&current->mm->mmap_sem);
47330 do_munmap(current->mm, textpos, text_len + data_len + extra +
47331 MAX_SHARED_LIBS * sizeof(unsigned long));
47332 + up_write(&current->mm->mmap_sem);
47333 ret = result;
47334 goto err;
47335 }
47336 diff --git a/fs/bio.c b/fs/bio.c
47337 index e696713..83de133 100644
47338 --- a/fs/bio.c
47339 +++ b/fs/bio.c
47340 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47341
47342 i = 0;
47343 while (i < bio_slab_nr) {
47344 - struct bio_slab *bslab = &bio_slabs[i];
47345 + bslab = &bio_slabs[i];
47346
47347 if (!bslab->slab && entry == -1)
47348 entry = i;
47349 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47350 const int read = bio_data_dir(bio) == READ;
47351 struct bio_map_data *bmd = bio->bi_private;
47352 int i;
47353 - char *p = bmd->sgvecs[0].iov_base;
47354 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47355
47356 __bio_for_each_segment(bvec, bio, i, 0) {
47357 char *addr = page_address(bvec->bv_page);
47358 diff --git a/fs/block_dev.c b/fs/block_dev.c
47359 index e65efa2..04fae57 100644
47360 --- a/fs/block_dev.c
47361 +++ b/fs/block_dev.c
47362 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47363 else if (bdev->bd_contains == bdev)
47364 res = 0; /* is a whole device which isn't held */
47365
47366 - else if (bdev->bd_contains->bd_holder == bd_claim)
47367 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47368 res = 0; /* is a partition of a device that is being partitioned */
47369 else if (bdev->bd_contains->bd_holder != NULL)
47370 res = -EBUSY; /* is a partition of a held device */
47371 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47372 index c4bc570..42acd8d 100644
47373 --- a/fs/btrfs/ctree.c
47374 +++ b/fs/btrfs/ctree.c
47375 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47376 free_extent_buffer(buf);
47377 add_root_to_dirty_list(root);
47378 } else {
47379 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47380 - parent_start = parent->start;
47381 - else
47382 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47383 + if (parent)
47384 + parent_start = parent->start;
47385 + else
47386 + parent_start = 0;
47387 + } else
47388 parent_start = 0;
47389
47390 WARN_ON(trans->transid != btrfs_header_generation(parent));
47391 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47392
47393 ret = 0;
47394 if (slot == 0) {
47395 - struct btrfs_disk_key disk_key;
47396 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47397 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47398 }
47399 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47400 index f447188..59c17c5 100644
47401 --- a/fs/btrfs/disk-io.c
47402 +++ b/fs/btrfs/disk-io.c
47403 @@ -39,7 +39,7 @@
47404 #include "tree-log.h"
47405 #include "free-space-cache.h"
47406
47407 -static struct extent_io_ops btree_extent_io_ops;
47408 +static const struct extent_io_ops btree_extent_io_ops;
47409 static void end_workqueue_fn(struct btrfs_work *work);
47410 static void free_fs_root(struct btrfs_root *root);
47411
47412 @@ -2607,7 +2607,7 @@ out:
47413 return 0;
47414 }
47415
47416 -static struct extent_io_ops btree_extent_io_ops = {
47417 +static const struct extent_io_ops btree_extent_io_ops = {
47418 .write_cache_pages_lock_hook = btree_lock_page_hook,
47419 .readpage_end_io_hook = btree_readpage_end_io_hook,
47420 .submit_bio_hook = btree_submit_bio_hook,
47421 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47422 index 559f724..a026171 100644
47423 --- a/fs/btrfs/extent-tree.c
47424 +++ b/fs/btrfs/extent-tree.c
47425 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47426 u64 group_start = group->key.objectid;
47427 new_extents = kmalloc(sizeof(*new_extents),
47428 GFP_NOFS);
47429 + if (!new_extents) {
47430 + ret = -ENOMEM;
47431 + goto out;
47432 + }
47433 nr_extents = 1;
47434 ret = get_new_locations(reloc_inode,
47435 extent_key,
47436 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47437 index 36de250..7ec75c7 100644
47438 --- a/fs/btrfs/extent_io.h
47439 +++ b/fs/btrfs/extent_io.h
47440 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47441 struct bio *bio, int mirror_num,
47442 unsigned long bio_flags);
47443 struct extent_io_ops {
47444 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47445 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47446 u64 start, u64 end, int *page_started,
47447 unsigned long *nr_written);
47448 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47449 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47450 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47451 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47452 extent_submit_bio_hook_t *submit_bio_hook;
47453 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
47454 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47455 size_t size, struct bio *bio,
47456 unsigned long bio_flags);
47457 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47458 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47459 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47460 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47461 u64 start, u64 end,
47462 struct extent_state *state);
47463 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47464 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47465 u64 start, u64 end,
47466 struct extent_state *state);
47467 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47468 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47469 struct extent_state *state);
47470 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47471 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47472 struct extent_state *state, int uptodate);
47473 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47474 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47475 unsigned long old, unsigned long bits);
47476 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47477 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47478 unsigned long bits);
47479 - int (*merge_extent_hook)(struct inode *inode,
47480 + int (* const merge_extent_hook)(struct inode *inode,
47481 struct extent_state *new,
47482 struct extent_state *other);
47483 - int (*split_extent_hook)(struct inode *inode,
47484 + int (* const split_extent_hook)(struct inode *inode,
47485 struct extent_state *orig, u64 split);
47486 - int (*write_cache_pages_lock_hook)(struct page *page);
47487 + int (* const write_cache_pages_lock_hook)(struct page *page);
47488 };
47489
47490 struct extent_io_tree {
47491 @@ -88,7 +88,7 @@ struct extent_io_tree {
47492 u64 dirty_bytes;
47493 spinlock_t lock;
47494 spinlock_t buffer_lock;
47495 - struct extent_io_ops *ops;
47496 + const struct extent_io_ops *ops;
47497 };
47498
47499 struct extent_state {
47500 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47501 index cb2849f..3718fb4 100644
47502 --- a/fs/btrfs/free-space-cache.c
47503 +++ b/fs/btrfs/free-space-cache.c
47504 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47505
47506 while(1) {
47507 if (entry->bytes < bytes || entry->offset < min_start) {
47508 - struct rb_node *node;
47509 -
47510 node = rb_next(&entry->offset_index);
47511 if (!node)
47512 break;
47513 @@ -1226,7 +1224,7 @@ again:
47514 */
47515 while (entry->bitmap || found_bitmap ||
47516 (!entry->bitmap && entry->bytes < min_bytes)) {
47517 - struct rb_node *node = rb_next(&entry->offset_index);
47518 + node = rb_next(&entry->offset_index);
47519
47520 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47521 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47522 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47523 index e03a836..323837e 100644
47524 --- a/fs/btrfs/inode.c
47525 +++ b/fs/btrfs/inode.c
47526 @@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47527 static const struct address_space_operations btrfs_aops;
47528 static const struct address_space_operations btrfs_symlink_aops;
47529 static const struct file_operations btrfs_dir_file_operations;
47530 -static struct extent_io_ops btrfs_extent_io_ops;
47531 +static const struct extent_io_ops btrfs_extent_io_ops;
47532
47533 static struct kmem_cache *btrfs_inode_cachep;
47534 struct kmem_cache *btrfs_trans_handle_cachep;
47535 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47536 1, 0, NULL, GFP_NOFS);
47537 while (start < end) {
47538 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47539 + BUG_ON(!async_cow);
47540 async_cow->inode = inode;
47541 async_cow->root = root;
47542 async_cow->locked_page = locked_page;
47543 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47544 inline_size = btrfs_file_extent_inline_item_len(leaf,
47545 btrfs_item_nr(leaf, path->slots[0]));
47546 tmp = kmalloc(inline_size, GFP_NOFS);
47547 + if (!tmp)
47548 + return -ENOMEM;
47549 ptr = btrfs_file_extent_inline_start(item);
47550
47551 read_extent_buffer(leaf, tmp, ptr, inline_size);
47552 @@ -5410,7 +5413,7 @@ fail:
47553 return -ENOMEM;
47554 }
47555
47556 -static int btrfs_getattr(struct vfsmount *mnt,
47557 +int btrfs_getattr(struct vfsmount *mnt,
47558 struct dentry *dentry, struct kstat *stat)
47559 {
47560 struct inode *inode = dentry->d_inode;
47561 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47562 return 0;
47563 }
47564
47565 +EXPORT_SYMBOL(btrfs_getattr);
47566 +
47567 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
47568 +{
47569 + return BTRFS_I(inode)->root->anon_super.s_dev;
47570 +}
47571 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47572 +
47573 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47574 struct inode *new_dir, struct dentry *new_dentry)
47575 {
47576 @@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47577 .fsync = btrfs_sync_file,
47578 };
47579
47580 -static struct extent_io_ops btrfs_extent_io_ops = {
47581 +static const struct extent_io_ops btrfs_extent_io_ops = {
47582 .fill_delalloc = run_delalloc_range,
47583 .submit_bio_hook = btrfs_submit_bio_hook,
47584 .merge_bio_hook = btrfs_merge_bio_hook,
47585 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47586 index ab7ab53..94e0781 100644
47587 --- a/fs/btrfs/relocation.c
47588 +++ b/fs/btrfs/relocation.c
47589 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47590 }
47591 spin_unlock(&rc->reloc_root_tree.lock);
47592
47593 - BUG_ON((struct btrfs_root *)node->data != root);
47594 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
47595
47596 if (!del) {
47597 spin_lock(&rc->reloc_root_tree.lock);
47598 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47599 index a240b6f..4ce16ef 100644
47600 --- a/fs/btrfs/sysfs.c
47601 +++ b/fs/btrfs/sysfs.c
47602 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47603 complete(&root->kobj_unregister);
47604 }
47605
47606 -static struct sysfs_ops btrfs_super_attr_ops = {
47607 +static const struct sysfs_ops btrfs_super_attr_ops = {
47608 .show = btrfs_super_attr_show,
47609 .store = btrfs_super_attr_store,
47610 };
47611
47612 -static struct sysfs_ops btrfs_root_attr_ops = {
47613 +static const struct sysfs_ops btrfs_root_attr_ops = {
47614 .show = btrfs_root_attr_show,
47615 .store = btrfs_root_attr_store,
47616 };
47617 diff --git a/fs/buffer.c b/fs/buffer.c
47618 index 6fa5302..395d9f6 100644
47619 --- a/fs/buffer.c
47620 +++ b/fs/buffer.c
47621 @@ -25,6 +25,7 @@
47622 #include <linux/percpu.h>
47623 #include <linux/slab.h>
47624 #include <linux/capability.h>
47625 +#include <linux/security.h>
47626 #include <linux/blkdev.h>
47627 #include <linux/file.h>
47628 #include <linux/quotaops.h>
47629 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47630 index 3797e00..ce776f6 100644
47631 --- a/fs/cachefiles/bind.c
47632 +++ b/fs/cachefiles/bind.c
47633 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47634 args);
47635
47636 /* start by checking things over */
47637 - ASSERT(cache->fstop_percent >= 0 &&
47638 - cache->fstop_percent < cache->fcull_percent &&
47639 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
47640 cache->fcull_percent < cache->frun_percent &&
47641 cache->frun_percent < 100);
47642
47643 - ASSERT(cache->bstop_percent >= 0 &&
47644 - cache->bstop_percent < cache->bcull_percent &&
47645 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
47646 cache->bcull_percent < cache->brun_percent &&
47647 cache->brun_percent < 100);
47648
47649 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47650 index 4618516..bb30d01 100644
47651 --- a/fs/cachefiles/daemon.c
47652 +++ b/fs/cachefiles/daemon.c
47653 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47654 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47655 return -EIO;
47656
47657 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
47658 + if (datalen > PAGE_SIZE - 1)
47659 return -EOPNOTSUPP;
47660
47661 /* drag the command string into the kernel so we can parse it */
47662 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47663 if (args[0] != '%' || args[1] != '\0')
47664 return -EINVAL;
47665
47666 - if (fstop < 0 || fstop >= cache->fcull_percent)
47667 + if (fstop >= cache->fcull_percent)
47668 return cachefiles_daemon_range_error(cache, args);
47669
47670 cache->fstop_percent = fstop;
47671 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47672 if (args[0] != '%' || args[1] != '\0')
47673 return -EINVAL;
47674
47675 - if (bstop < 0 || bstop >= cache->bcull_percent)
47676 + if (bstop >= cache->bcull_percent)
47677 return cachefiles_daemon_range_error(cache, args);
47678
47679 cache->bstop_percent = bstop;
47680 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47681 index f7c255f..fcd61de 100644
47682 --- a/fs/cachefiles/internal.h
47683 +++ b/fs/cachefiles/internal.h
47684 @@ -56,7 +56,7 @@ struct cachefiles_cache {
47685 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47686 struct rb_root active_nodes; /* active nodes (can't be culled) */
47687 rwlock_t active_lock; /* lock for active_nodes */
47688 - atomic_t gravecounter; /* graveyard uniquifier */
47689 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47690 unsigned frun_percent; /* when to stop culling (% files) */
47691 unsigned fcull_percent; /* when to start culling (% files) */
47692 unsigned fstop_percent; /* when to stop allocating (% files) */
47693 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47694 * proc.c
47695 */
47696 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47697 -extern atomic_t cachefiles_lookup_histogram[HZ];
47698 -extern atomic_t cachefiles_mkdir_histogram[HZ];
47699 -extern atomic_t cachefiles_create_histogram[HZ];
47700 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47701 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47702 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47703
47704 extern int __init cachefiles_proc_init(void);
47705 extern void cachefiles_proc_cleanup(void);
47706 static inline
47707 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47708 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47709 {
47710 unsigned long jif = jiffies - start_jif;
47711 if (jif >= HZ)
47712 jif = HZ - 1;
47713 - atomic_inc(&histogram[jif]);
47714 + atomic_inc_unchecked(&histogram[jif]);
47715 }
47716
47717 #else
47718 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47719 index 14ac480..a62766c 100644
47720 --- a/fs/cachefiles/namei.c
47721 +++ b/fs/cachefiles/namei.c
47722 @@ -250,7 +250,7 @@ try_again:
47723 /* first step is to make up a grave dentry in the graveyard */
47724 sprintf(nbuffer, "%08x%08x",
47725 (uint32_t) get_seconds(),
47726 - (uint32_t) atomic_inc_return(&cache->gravecounter));
47727 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47728
47729 /* do the multiway lock magic */
47730 trap = lock_rename(cache->graveyard, dir);
47731 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47732 index eccd339..4c1d995 100644
47733 --- a/fs/cachefiles/proc.c
47734 +++ b/fs/cachefiles/proc.c
47735 @@ -14,9 +14,9 @@
47736 #include <linux/seq_file.h>
47737 #include "internal.h"
47738
47739 -atomic_t cachefiles_lookup_histogram[HZ];
47740 -atomic_t cachefiles_mkdir_histogram[HZ];
47741 -atomic_t cachefiles_create_histogram[HZ];
47742 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47743 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47744 +atomic_unchecked_t cachefiles_create_histogram[HZ];
47745
47746 /*
47747 * display the latency histogram
47748 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47749 return 0;
47750 default:
47751 index = (unsigned long) v - 3;
47752 - x = atomic_read(&cachefiles_lookup_histogram[index]);
47753 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
47754 - z = atomic_read(&cachefiles_create_histogram[index]);
47755 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47756 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47757 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47758 if (x == 0 && y == 0 && z == 0)
47759 return 0;
47760
47761 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47762 index a6c8c6f..5cf8517 100644
47763 --- a/fs/cachefiles/rdwr.c
47764 +++ b/fs/cachefiles/rdwr.c
47765 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47766 old_fs = get_fs();
47767 set_fs(KERNEL_DS);
47768 ret = file->f_op->write(
47769 - file, (const void __user *) data, len, &pos);
47770 + file, (const void __force_user *) data, len, &pos);
47771 set_fs(old_fs);
47772 kunmap(page);
47773 if (ret != len)
47774 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47775 index 42cec2a..2aba466 100644
47776 --- a/fs/cifs/cifs_debug.c
47777 +++ b/fs/cifs/cifs_debug.c
47778 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47779 tcon = list_entry(tmp3,
47780 struct cifsTconInfo,
47781 tcon_list);
47782 - atomic_set(&tcon->num_smbs_sent, 0);
47783 - atomic_set(&tcon->num_writes, 0);
47784 - atomic_set(&tcon->num_reads, 0);
47785 - atomic_set(&tcon->num_oplock_brks, 0);
47786 - atomic_set(&tcon->num_opens, 0);
47787 - atomic_set(&tcon->num_posixopens, 0);
47788 - atomic_set(&tcon->num_posixmkdirs, 0);
47789 - atomic_set(&tcon->num_closes, 0);
47790 - atomic_set(&tcon->num_deletes, 0);
47791 - atomic_set(&tcon->num_mkdirs, 0);
47792 - atomic_set(&tcon->num_rmdirs, 0);
47793 - atomic_set(&tcon->num_renames, 0);
47794 - atomic_set(&tcon->num_t2renames, 0);
47795 - atomic_set(&tcon->num_ffirst, 0);
47796 - atomic_set(&tcon->num_fnext, 0);
47797 - atomic_set(&tcon->num_fclose, 0);
47798 - atomic_set(&tcon->num_hardlinks, 0);
47799 - atomic_set(&tcon->num_symlinks, 0);
47800 - atomic_set(&tcon->num_locks, 0);
47801 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47802 + atomic_set_unchecked(&tcon->num_writes, 0);
47803 + atomic_set_unchecked(&tcon->num_reads, 0);
47804 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47805 + atomic_set_unchecked(&tcon->num_opens, 0);
47806 + atomic_set_unchecked(&tcon->num_posixopens, 0);
47807 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47808 + atomic_set_unchecked(&tcon->num_closes, 0);
47809 + atomic_set_unchecked(&tcon->num_deletes, 0);
47810 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
47811 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
47812 + atomic_set_unchecked(&tcon->num_renames, 0);
47813 + atomic_set_unchecked(&tcon->num_t2renames, 0);
47814 + atomic_set_unchecked(&tcon->num_ffirst, 0);
47815 + atomic_set_unchecked(&tcon->num_fnext, 0);
47816 + atomic_set_unchecked(&tcon->num_fclose, 0);
47817 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
47818 + atomic_set_unchecked(&tcon->num_symlinks, 0);
47819 + atomic_set_unchecked(&tcon->num_locks, 0);
47820 }
47821 }
47822 }
47823 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47824 if (tcon->need_reconnect)
47825 seq_puts(m, "\tDISCONNECTED ");
47826 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47827 - atomic_read(&tcon->num_smbs_sent),
47828 - atomic_read(&tcon->num_oplock_brks));
47829 + atomic_read_unchecked(&tcon->num_smbs_sent),
47830 + atomic_read_unchecked(&tcon->num_oplock_brks));
47831 seq_printf(m, "\nReads: %d Bytes: %lld",
47832 - atomic_read(&tcon->num_reads),
47833 + atomic_read_unchecked(&tcon->num_reads),
47834 (long long)(tcon->bytes_read));
47835 seq_printf(m, "\nWrites: %d Bytes: %lld",
47836 - atomic_read(&tcon->num_writes),
47837 + atomic_read_unchecked(&tcon->num_writes),
47838 (long long)(tcon->bytes_written));
47839 seq_printf(m, "\nFlushes: %d",
47840 - atomic_read(&tcon->num_flushes));
47841 + atomic_read_unchecked(&tcon->num_flushes));
47842 seq_printf(m, "\nLocks: %d HardLinks: %d "
47843 "Symlinks: %d",
47844 - atomic_read(&tcon->num_locks),
47845 - atomic_read(&tcon->num_hardlinks),
47846 - atomic_read(&tcon->num_symlinks));
47847 + atomic_read_unchecked(&tcon->num_locks),
47848 + atomic_read_unchecked(&tcon->num_hardlinks),
47849 + atomic_read_unchecked(&tcon->num_symlinks));
47850 seq_printf(m, "\nOpens: %d Closes: %d "
47851 "Deletes: %d",
47852 - atomic_read(&tcon->num_opens),
47853 - atomic_read(&tcon->num_closes),
47854 - atomic_read(&tcon->num_deletes));
47855 + atomic_read_unchecked(&tcon->num_opens),
47856 + atomic_read_unchecked(&tcon->num_closes),
47857 + atomic_read_unchecked(&tcon->num_deletes));
47858 seq_printf(m, "\nPosix Opens: %d "
47859 "Posix Mkdirs: %d",
47860 - atomic_read(&tcon->num_posixopens),
47861 - atomic_read(&tcon->num_posixmkdirs));
47862 + atomic_read_unchecked(&tcon->num_posixopens),
47863 + atomic_read_unchecked(&tcon->num_posixmkdirs));
47864 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47865 - atomic_read(&tcon->num_mkdirs),
47866 - atomic_read(&tcon->num_rmdirs));
47867 + atomic_read_unchecked(&tcon->num_mkdirs),
47868 + atomic_read_unchecked(&tcon->num_rmdirs));
47869 seq_printf(m, "\nRenames: %d T2 Renames %d",
47870 - atomic_read(&tcon->num_renames),
47871 - atomic_read(&tcon->num_t2renames));
47872 + atomic_read_unchecked(&tcon->num_renames),
47873 + atomic_read_unchecked(&tcon->num_t2renames));
47874 seq_printf(m, "\nFindFirst: %d FNext %d "
47875 "FClose %d",
47876 - atomic_read(&tcon->num_ffirst),
47877 - atomic_read(&tcon->num_fnext),
47878 - atomic_read(&tcon->num_fclose));
47879 + atomic_read_unchecked(&tcon->num_ffirst),
47880 + atomic_read_unchecked(&tcon->num_fnext),
47881 + atomic_read_unchecked(&tcon->num_fclose));
47882 }
47883 }
47884 }
47885 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47886 index 1445407..68cb0dc 100644
47887 --- a/fs/cifs/cifsfs.c
47888 +++ b/fs/cifs/cifsfs.c
47889 @@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47890 cifs_req_cachep = kmem_cache_create("cifs_request",
47891 CIFSMaxBufSize +
47892 MAX_CIFS_HDR_SIZE, 0,
47893 - SLAB_HWCACHE_ALIGN, NULL);
47894 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47895 if (cifs_req_cachep == NULL)
47896 return -ENOMEM;
47897
47898 @@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47899 efficient to alloc 1 per page off the slab compared to 17K (5page)
47900 alloc of large cifs buffers even when page debugging is on */
47901 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47902 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47903 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47904 NULL);
47905 if (cifs_sm_req_cachep == NULL) {
47906 mempool_destroy(cifs_req_poolp);
47907 @@ -991,8 +991,8 @@ init_cifs(void)
47908 atomic_set(&bufAllocCount, 0);
47909 atomic_set(&smBufAllocCount, 0);
47910 #ifdef CONFIG_CIFS_STATS2
47911 - atomic_set(&totBufAllocCount, 0);
47912 - atomic_set(&totSmBufAllocCount, 0);
47913 + atomic_set_unchecked(&totBufAllocCount, 0);
47914 + atomic_set_unchecked(&totSmBufAllocCount, 0);
47915 #endif /* CONFIG_CIFS_STATS2 */
47916
47917 atomic_set(&midCount, 0);
47918 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
47919 index e29581e..1c22bab 100644
47920 --- a/fs/cifs/cifsglob.h
47921 +++ b/fs/cifs/cifsglob.h
47922 @@ -252,28 +252,28 @@ struct cifsTconInfo {
47923 __u16 Flags; /* optional support bits */
47924 enum statusEnum tidStatus;
47925 #ifdef CONFIG_CIFS_STATS
47926 - atomic_t num_smbs_sent;
47927 - atomic_t num_writes;
47928 - atomic_t num_reads;
47929 - atomic_t num_flushes;
47930 - atomic_t num_oplock_brks;
47931 - atomic_t num_opens;
47932 - atomic_t num_closes;
47933 - atomic_t num_deletes;
47934 - atomic_t num_mkdirs;
47935 - atomic_t num_posixopens;
47936 - atomic_t num_posixmkdirs;
47937 - atomic_t num_rmdirs;
47938 - atomic_t num_renames;
47939 - atomic_t num_t2renames;
47940 - atomic_t num_ffirst;
47941 - atomic_t num_fnext;
47942 - atomic_t num_fclose;
47943 - atomic_t num_hardlinks;
47944 - atomic_t num_symlinks;
47945 - atomic_t num_locks;
47946 - atomic_t num_acl_get;
47947 - atomic_t num_acl_set;
47948 + atomic_unchecked_t num_smbs_sent;
47949 + atomic_unchecked_t num_writes;
47950 + atomic_unchecked_t num_reads;
47951 + atomic_unchecked_t num_flushes;
47952 + atomic_unchecked_t num_oplock_brks;
47953 + atomic_unchecked_t num_opens;
47954 + atomic_unchecked_t num_closes;
47955 + atomic_unchecked_t num_deletes;
47956 + atomic_unchecked_t num_mkdirs;
47957 + atomic_unchecked_t num_posixopens;
47958 + atomic_unchecked_t num_posixmkdirs;
47959 + atomic_unchecked_t num_rmdirs;
47960 + atomic_unchecked_t num_renames;
47961 + atomic_unchecked_t num_t2renames;
47962 + atomic_unchecked_t num_ffirst;
47963 + atomic_unchecked_t num_fnext;
47964 + atomic_unchecked_t num_fclose;
47965 + atomic_unchecked_t num_hardlinks;
47966 + atomic_unchecked_t num_symlinks;
47967 + atomic_unchecked_t num_locks;
47968 + atomic_unchecked_t num_acl_get;
47969 + atomic_unchecked_t num_acl_set;
47970 #ifdef CONFIG_CIFS_STATS2
47971 unsigned long long time_writes;
47972 unsigned long long time_reads;
47973 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
47974 }
47975
47976 #ifdef CONFIG_CIFS_STATS
47977 -#define cifs_stats_inc atomic_inc
47978 +#define cifs_stats_inc atomic_inc_unchecked
47979
47980 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
47981 unsigned int bytes)
47982 @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
47983 /* Various Debug counters */
47984 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
47985 #ifdef CONFIG_CIFS_STATS2
47986 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
47987 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
47988 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
47989 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
47990 #endif
47991 GLOBAL_EXTERN atomic_t smBufAllocCount;
47992 GLOBAL_EXTERN atomic_t midCount;
47993 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
47994 index fc1e048..28b3441 100644
47995 --- a/fs/cifs/link.c
47996 +++ b/fs/cifs/link.c
47997 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
47998
47999 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48000 {
48001 - char *p = nd_get_link(nd);
48002 + const char *p = nd_get_link(nd);
48003 if (!IS_ERR(p))
48004 kfree(p);
48005 }
48006 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48007 index 95b82e8..12a538d 100644
48008 --- a/fs/cifs/misc.c
48009 +++ b/fs/cifs/misc.c
48010 @@ -155,7 +155,7 @@ cifs_buf_get(void)
48011 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
48012 atomic_inc(&bufAllocCount);
48013 #ifdef CONFIG_CIFS_STATS2
48014 - atomic_inc(&totBufAllocCount);
48015 + atomic_inc_unchecked(&totBufAllocCount);
48016 #endif /* CONFIG_CIFS_STATS2 */
48017 }
48018
48019 @@ -190,7 +190,7 @@ cifs_small_buf_get(void)
48020 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48021 atomic_inc(&smBufAllocCount);
48022 #ifdef CONFIG_CIFS_STATS2
48023 - atomic_inc(&totSmBufAllocCount);
48024 + atomic_inc_unchecked(&totSmBufAllocCount);
48025 #endif /* CONFIG_CIFS_STATS2 */
48026
48027 }
48028 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
48029 index a5bf577..6d19845 100644
48030 --- a/fs/coda/cache.c
48031 +++ b/fs/coda/cache.c
48032 @@ -24,14 +24,14 @@
48033 #include <linux/coda_fs_i.h>
48034 #include <linux/coda_cache.h>
48035
48036 -static atomic_t permission_epoch = ATOMIC_INIT(0);
48037 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
48038
48039 /* replace or extend an acl cache hit */
48040 void coda_cache_enter(struct inode *inode, int mask)
48041 {
48042 struct coda_inode_info *cii = ITOC(inode);
48043
48044 - cii->c_cached_epoch = atomic_read(&permission_epoch);
48045 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
48046 if (cii->c_uid != current_fsuid()) {
48047 cii->c_uid = current_fsuid();
48048 cii->c_cached_perm = mask;
48049 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
48050 void coda_cache_clear_inode(struct inode *inode)
48051 {
48052 struct coda_inode_info *cii = ITOC(inode);
48053 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
48054 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
48055 }
48056
48057 /* remove all acl caches */
48058 void coda_cache_clear_all(struct super_block *sb)
48059 {
48060 - atomic_inc(&permission_epoch);
48061 + atomic_inc_unchecked(&permission_epoch);
48062 }
48063
48064
48065 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
48066
48067 hit = (mask & cii->c_cached_perm) == mask &&
48068 cii->c_uid == current_fsuid() &&
48069 - cii->c_cached_epoch == atomic_read(&permission_epoch);
48070 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
48071
48072 return hit;
48073 }
48074 diff --git a/fs/compat.c b/fs/compat.c
48075 index d1e2411..b1eda5d 100644
48076 --- a/fs/compat.c
48077 +++ b/fs/compat.c
48078 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
48079 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
48080 {
48081 compat_ino_t ino = stat->ino;
48082 - typeof(ubuf->st_uid) uid = 0;
48083 - typeof(ubuf->st_gid) gid = 0;
48084 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
48085 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
48086 int err;
48087
48088 SET_UID(uid, stat->uid);
48089 @@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
48090
48091 set_fs(KERNEL_DS);
48092 /* The __user pointer cast is valid because of the set_fs() */
48093 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
48094 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
48095 set_fs(oldfs);
48096 /* truncating is ok because it's a user address */
48097 if (!ret)
48098 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
48099
48100 struct compat_readdir_callback {
48101 struct compat_old_linux_dirent __user *dirent;
48102 + struct file * file;
48103 int result;
48104 };
48105
48106 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
48107 buf->result = -EOVERFLOW;
48108 return -EOVERFLOW;
48109 }
48110 +
48111 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48112 + return 0;
48113 +
48114 buf->result++;
48115 dirent = buf->dirent;
48116 if (!access_ok(VERIFY_WRITE, dirent,
48117 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
48118
48119 buf.result = 0;
48120 buf.dirent = dirent;
48121 + buf.file = file;
48122
48123 error = vfs_readdir(file, compat_fillonedir, &buf);
48124 if (buf.result)
48125 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
48126 struct compat_getdents_callback {
48127 struct compat_linux_dirent __user *current_dir;
48128 struct compat_linux_dirent __user *previous;
48129 + struct file * file;
48130 int count;
48131 int error;
48132 };
48133 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
48134 buf->error = -EOVERFLOW;
48135 return -EOVERFLOW;
48136 }
48137 +
48138 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48139 + return 0;
48140 +
48141 dirent = buf->previous;
48142 if (dirent) {
48143 if (__put_user(offset, &dirent->d_off))
48144 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
48145 buf.previous = NULL;
48146 buf.count = count;
48147 buf.error = 0;
48148 + buf.file = file;
48149
48150 error = vfs_readdir(file, compat_filldir, &buf);
48151 if (error >= 0)
48152 @@ -987,6 +999,7 @@ out:
48153 struct compat_getdents_callback64 {
48154 struct linux_dirent64 __user *current_dir;
48155 struct linux_dirent64 __user *previous;
48156 + struct file * file;
48157 int count;
48158 int error;
48159 };
48160 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
48161 buf->error = -EINVAL; /* only used if we fail.. */
48162 if (reclen > buf->count)
48163 return -EINVAL;
48164 +
48165 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48166 + return 0;
48167 +
48168 dirent = buf->previous;
48169
48170 if (dirent) {
48171 @@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48172 buf.previous = NULL;
48173 buf.count = count;
48174 buf.error = 0;
48175 + buf.file = file;
48176
48177 error = vfs_readdir(file, compat_filldir64, &buf);
48178 if (error >= 0)
48179 error = buf.error;
48180 lastdirent = buf.previous;
48181 if (lastdirent) {
48182 - typeof(lastdirent->d_off) d_off = file->f_pos;
48183 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48184 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48185 error = -EFAULT;
48186 else
48187 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48188 * verify all the pointers
48189 */
48190 ret = -EINVAL;
48191 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48192 + if (nr_segs > UIO_MAXIOV)
48193 goto out;
48194 if (!file->f_op)
48195 goto out;
48196 @@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
48197 compat_uptr_t __user *envp,
48198 struct pt_regs * regs)
48199 {
48200 +#ifdef CONFIG_GRKERNSEC
48201 + struct file *old_exec_file;
48202 + struct acl_subject_label *old_acl;
48203 + struct rlimit old_rlim[RLIM_NLIMITS];
48204 +#endif
48205 struct linux_binprm *bprm;
48206 struct file *file;
48207 struct files_struct *displaced;
48208 bool clear_in_exec;
48209 int retval;
48210 + const struct cred *cred = current_cred();
48211 +
48212 + /*
48213 + * We move the actual failure in case of RLIMIT_NPROC excess from
48214 + * set*uid() to execve() because too many poorly written programs
48215 + * don't check setuid() return code. Here we additionally recheck
48216 + * whether NPROC limit is still exceeded.
48217 + */
48218 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48219 +
48220 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48221 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48222 + retval = -EAGAIN;
48223 + goto out_ret;
48224 + }
48225 +
48226 + /* We're below the limit (still or again), so we don't want to make
48227 + * further execve() calls fail. */
48228 + current->flags &= ~PF_NPROC_EXCEEDED;
48229
48230 retval = unshare_files(&displaced);
48231 if (retval)
48232 @@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename,
48233 if (IS_ERR(file))
48234 goto out_unmark;
48235
48236 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
48237 + retval = -EPERM;
48238 + goto out_file;
48239 + }
48240 +
48241 sched_exec();
48242
48243 bprm->file = file;
48244 bprm->filename = filename;
48245 bprm->interp = filename;
48246
48247 + if (gr_process_user_ban()) {
48248 + retval = -EPERM;
48249 + goto out_file;
48250 + }
48251 +
48252 + retval = -EACCES;
48253 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48254 + goto out_file;
48255 +
48256 retval = bprm_mm_init(bprm);
48257 if (retval)
48258 goto out_file;
48259 @@ -1528,9 +1584,40 @@ int compat_do_execve(char * filename,
48260 if (retval < 0)
48261 goto out;
48262
48263 + if (!gr_tpe_allow(file)) {
48264 + retval = -EACCES;
48265 + goto out;
48266 + }
48267 +
48268 + if (gr_check_crash_exec(file)) {
48269 + retval = -EACCES;
48270 + goto out;
48271 + }
48272 +
48273 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48274 +
48275 + gr_handle_exec_args_compat(bprm, argv);
48276 +
48277 +#ifdef CONFIG_GRKERNSEC
48278 + old_acl = current->acl;
48279 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48280 + old_exec_file = current->exec_file;
48281 + get_file(file);
48282 + current->exec_file = file;
48283 +#endif
48284 +
48285 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48286 + bprm->unsafe);
48287 + if (retval < 0)
48288 + goto out_fail;
48289 +
48290 retval = search_binary_handler(bprm, regs);
48291 if (retval < 0)
48292 - goto out;
48293 + goto out_fail;
48294 +#ifdef CONFIG_GRKERNSEC
48295 + if (old_exec_file)
48296 + fput(old_exec_file);
48297 +#endif
48298
48299 /* execve succeeded */
48300 current->fs->in_exec = 0;
48301 @@ -1541,6 +1628,14 @@ int compat_do_execve(char * filename,
48302 put_files_struct(displaced);
48303 return retval;
48304
48305 +out_fail:
48306 +#ifdef CONFIG_GRKERNSEC
48307 + current->acl = old_acl;
48308 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48309 + fput(current->exec_file);
48310 + current->exec_file = old_exec_file;
48311 +#endif
48312 +
48313 out:
48314 if (bprm->mm) {
48315 acct_arg_size(bprm, 0);
48316 @@ -1711,6 +1806,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48317 struct fdtable *fdt;
48318 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48319
48320 + pax_track_stack();
48321 +
48322 if (n < 0)
48323 goto out_nofds;
48324
48325 @@ -2151,7 +2248,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48326 oldfs = get_fs();
48327 set_fs(KERNEL_DS);
48328 /* The __user pointer casts are valid because of the set_fs() */
48329 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48330 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48331 set_fs(oldfs);
48332
48333 if (err)
48334 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48335 index 0adced2..bbb1b0d 100644
48336 --- a/fs/compat_binfmt_elf.c
48337 +++ b/fs/compat_binfmt_elf.c
48338 @@ -29,10 +29,12 @@
48339 #undef elfhdr
48340 #undef elf_phdr
48341 #undef elf_note
48342 +#undef elf_dyn
48343 #undef elf_addr_t
48344 #define elfhdr elf32_hdr
48345 #define elf_phdr elf32_phdr
48346 #define elf_note elf32_note
48347 +#define elf_dyn Elf32_Dyn
48348 #define elf_addr_t Elf32_Addr
48349
48350 /*
48351 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48352 index d84e705..d8c364c 100644
48353 --- a/fs/compat_ioctl.c
48354 +++ b/fs/compat_ioctl.c
48355 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48356 up = (struct compat_video_spu_palette __user *) arg;
48357 err = get_user(palp, &up->palette);
48358 err |= get_user(length, &up->length);
48359 + if (err)
48360 + return -EFAULT;
48361
48362 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48363 err = put_user(compat_ptr(palp), &up_native->palette);
48364 @@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48365 return -EFAULT;
48366 if (__get_user(udata, &ss32->iomem_base))
48367 return -EFAULT;
48368 - ss.iomem_base = compat_ptr(udata);
48369 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48370 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48371 __get_user(ss.port_high, &ss32->port_high))
48372 return -EFAULT;
48373 @@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48374 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48375 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48376 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48377 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48378 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48379 return -EFAULT;
48380
48381 return ioctl_preallocate(file, p);
48382 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48383 index 8e48b52..f01ed91 100644
48384 --- a/fs/configfs/dir.c
48385 +++ b/fs/configfs/dir.c
48386 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48387 }
48388 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48389 struct configfs_dirent *next;
48390 - const char * name;
48391 + const unsigned char * name;
48392 + char d_name[sizeof(next->s_dentry->d_iname)];
48393 int len;
48394
48395 next = list_entry(p, struct configfs_dirent,
48396 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48397 continue;
48398
48399 name = configfs_get_name(next);
48400 - len = strlen(name);
48401 + if (next->s_dentry && name == next->s_dentry->d_iname) {
48402 + len = next->s_dentry->d_name.len;
48403 + memcpy(d_name, name, len);
48404 + name = d_name;
48405 + } else
48406 + len = strlen(name);
48407 if (next->s_dentry)
48408 ino = next->s_dentry->d_inode->i_ino;
48409 else
48410 diff --git a/fs/dcache.c b/fs/dcache.c
48411 index 44c0aea..2529092 100644
48412 --- a/fs/dcache.c
48413 +++ b/fs/dcache.c
48414 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48415
48416 static struct kmem_cache *dentry_cache __read_mostly;
48417
48418 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48419 -
48420 /*
48421 * This is the single most critical data structure when it comes
48422 * to the dcache: the hashtable for lookups. Somebody should try
48423 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48424 mempages -= reserve;
48425
48426 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48427 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48428 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48429
48430 dcache_init();
48431 inode_init();
48432 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48433 index c010ecf..a8d8c59 100644
48434 --- a/fs/dlm/lockspace.c
48435 +++ b/fs/dlm/lockspace.c
48436 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48437 kfree(ls);
48438 }
48439
48440 -static struct sysfs_ops dlm_attr_ops = {
48441 +static const struct sysfs_ops dlm_attr_ops = {
48442 .show = dlm_attr_show,
48443 .store = dlm_attr_store,
48444 };
48445 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
48446 index 443947f..a871402 100644
48447 --- a/fs/ecryptfs/crypto.c
48448 +++ b/fs/ecryptfs/crypto.c
48449 @@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48450 rc);
48451 goto out;
48452 }
48453 - if (unlikely(ecryptfs_verbosity > 0)) {
48454 - ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
48455 - "with iv:\n");
48456 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48457 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48458 - "encryption:\n");
48459 - ecryptfs_dump_hex((char *)
48460 - (page_address(page)
48461 - + (extent_offset * crypt_stat->extent_size)),
48462 - 8);
48463 - }
48464 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
48465 page, (extent_offset
48466 * crypt_stat->extent_size),
48467 @@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48468 goto out;
48469 }
48470 rc = 0;
48471 - if (unlikely(ecryptfs_verbosity > 0)) {
48472 - ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
48473 - "rc = [%d]\n", (extent_base + extent_offset),
48474 - rc);
48475 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48476 - "encryption:\n");
48477 - ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
48478 - }
48479 out:
48480 return rc;
48481 }
48482 @@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48483 rc);
48484 goto out;
48485 }
48486 - if (unlikely(ecryptfs_verbosity > 0)) {
48487 - ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
48488 - "with iv:\n");
48489 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48490 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48491 - "decryption:\n");
48492 - ecryptfs_dump_hex((char *)
48493 - (page_address(enc_extent_page)
48494 - + (extent_offset * crypt_stat->extent_size)),
48495 - 8);
48496 - }
48497 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
48498 (extent_offset
48499 * crypt_stat->extent_size),
48500 @@ -569,6 +539,7 @@ static int ecryptfs_decrypt_extent(struct page *page,
48501 goto out;
48502 }
48503 rc = 0;
48504 +<<<<<<< HEAD
48505 if (unlikely(ecryptfs_verbosity > 0)) {
48506 ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
48507 "rc = [%d]\n", (extent_base + extent_offset),
48508 @@ -579,6 +550,8 @@ static int ecryptfs_decrypt_extent(struct page *page,
48509 + (extent_offset
48510 * crypt_stat->extent_size)), 8);
48511 }
48512 +=======
48513 +>>>>>>> 58ded24... eCryptfs: Fix oops when printing debug info in extent crypto functions
48514 out:
48515 return rc;
48516 }
48517 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48518 index 88ba4d4..073f003 100644
48519 --- a/fs/ecryptfs/inode.c
48520 +++ b/fs/ecryptfs/inode.c
48521 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48522 old_fs = get_fs();
48523 set_fs(get_ds());
48524 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48525 - (char __user *)lower_buf,
48526 + (char __force_user *)lower_buf,
48527 lower_bufsiz);
48528 set_fs(old_fs);
48529 if (rc < 0)
48530 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48531 }
48532 old_fs = get_fs();
48533 set_fs(get_ds());
48534 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48535 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48536 set_fs(old_fs);
48537 if (rc < 0)
48538 goto out_free;
48539 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
48540 index 4ec8f61..c4b0bc5 100644
48541 --- a/fs/ecryptfs/miscdev.c
48542 +++ b/fs/ecryptfs/miscdev.c
48543 @@ -408,11 +408,47 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
48544 ssize_t sz = 0;
48545 char *data;
48546 uid_t euid = current_euid();
48547 + unsigned char packet_size_peek[3];
48548 int rc;
48549
48550 - if (count == 0)
48551 + if (count == 0) {
48552 goto out;
48553 + } else if (count == (1 + 4)) {
48554 + /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */
48555 + goto memdup;
48556 + } else if (count < (1 + 4 + 1)
48557 + || count > (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
48558 + + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)) {
48559 + printk(KERN_WARNING "%s: Acceptable packet size range is "
48560 + "[%d-%lu], but amount of data written is [%zu].",
48561 + __func__, (1 + 4 + 1),
48562 + (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
48563 + + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES), count);
48564 + return -EINVAL;
48565 + }
48566
48567 + if (copy_from_user(packet_size_peek, (buf + 1 + 4),
48568 + sizeof(packet_size_peek))) {
48569 + printk(KERN_WARNING "%s: Error while inspecting packet size\n",
48570 + __func__);
48571 + return -EFAULT;
48572 + }
48573 +
48574 + rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size,
48575 + &packet_size_length);
48576 + if (rc) {
48577 + printk(KERN_WARNING "%s: Error parsing packet length; "
48578 + "rc = [%d]\n", __func__, rc);
48579 + return rc;
48580 + }
48581 +
48582 + if ((1 + 4 + packet_size_length + packet_size) != count) {
48583 + printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__,
48584 + packet_size);
48585 + return -EINVAL;
48586 + }
48587 +
48588 +memdup:
48589 data = memdup_user(buf, count);
48590 if (IS_ERR(data)) {
48591 printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
48592 @@ -434,23 +470,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
48593 }
48594 memcpy(&counter_nbo, &data[i], 4);
48595 seq = be32_to_cpu(counter_nbo);
48596 - i += 4;
48597 - rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
48598 - &packet_size_length);
48599 - if (rc) {
48600 - printk(KERN_WARNING "%s: Error parsing packet length; "
48601 - "rc = [%d]\n", __func__, rc);
48602 - goto out_free;
48603 - }
48604 - i += packet_size_length;
48605 - if ((1 + 4 + packet_size_length + packet_size) != count) {
48606 - printk(KERN_WARNING "%s: (1 + packet_size_length([%zd])"
48607 - " + packet_size([%zd]))([%zd]) != "
48608 - "count([%zd]). Invalid packet format.\n",
48609 - __func__, packet_size_length, packet_size,
48610 - (1 + packet_size_length + packet_size), count);
48611 - goto out_free;
48612 - }
48613 + i += 4 + packet_size_length;
48614 rc = ecryptfs_miscdev_response(&data[i], packet_size,
48615 euid, current_user_ns(),
48616 task_pid(current), seq);
48617 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
48618 index 0cc4faf..0404659 100644
48619 --- a/fs/ecryptfs/read_write.c
48620 +++ b/fs/ecryptfs/read_write.c
48621 @@ -134,13 +134,18 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
48622 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
48623 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
48624 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
48625 - size_t total_remaining_bytes = ((offset + size) - pos);
48626 + loff_t total_remaining_bytes = ((offset + size) - pos);
48627 +
48628 + if (fatal_signal_pending(current)) {
48629 + rc = -EINTR;
48630 + break;
48631 + }
48632
48633 if (num_bytes > total_remaining_bytes)
48634 num_bytes = total_remaining_bytes;
48635 if (pos < offset) {
48636 /* remaining zeros to write, up to destination offset */
48637 - size_t total_remaining_zeros = (offset - pos);
48638 + loff_t total_remaining_zeros = (offset - pos);
48639
48640 if (num_bytes > total_remaining_zeros)
48641 num_bytes = total_remaining_zeros;
48642 @@ -197,15 +202,19 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
48643 }
48644 pos += num_bytes;
48645 }
48646 - if ((offset + size) > ecryptfs_file_size) {
48647 - i_size_write(ecryptfs_inode, (offset + size));
48648 + if (pos > ecryptfs_file_size) {
48649 + i_size_write(ecryptfs_inode, pos);
48650 if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
48651 - rc = ecryptfs_write_inode_size_to_metadata(
48652 + int rc2;
48653 +
48654 + rc2 = ecryptfs_write_inode_size_to_metadata(
48655 ecryptfs_inode);
48656 - if (rc) {
48657 + if (rc2) {
48658 printk(KERN_ERR "Problem with "
48659 "ecryptfs_write_inode_size_to_metadata; "
48660 - "rc = [%d]\n", rc);
48661 + "rc = [%d]\n", rc2);
48662 + if (!rc)
48663 + rc = rc2;
48664 goto out;
48665 }
48666 }
48667 diff --git a/fs/exec.c b/fs/exec.c
48668 index 86fafc6..5033350 100644
48669 --- a/fs/exec.c
48670 +++ b/fs/exec.c
48671 @@ -56,12 +56,28 @@
48672 #include <linux/fsnotify.h>
48673 #include <linux/fs_struct.h>
48674 #include <linux/pipe_fs_i.h>
48675 +#include <linux/random.h>
48676 +#include <linux/seq_file.h>
48677 +
48678 +#ifdef CONFIG_PAX_REFCOUNT
48679 +#include <linux/kallsyms.h>
48680 +#include <linux/kdebug.h>
48681 +#endif
48682
48683 #include <asm/uaccess.h>
48684 #include <asm/mmu_context.h>
48685 #include <asm/tlb.h>
48686 #include "internal.h"
48687
48688 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48689 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48690 +#endif
48691 +
48692 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48693 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48694 +EXPORT_SYMBOL(pax_set_initial_flags_func);
48695 +#endif
48696 +
48697 int core_uses_pid;
48698 char core_pattern[CORENAME_MAX_SIZE] = "core";
48699 unsigned int core_pipe_limit;
48700 @@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48701 int write)
48702 {
48703 struct page *page;
48704 - int ret;
48705
48706 -#ifdef CONFIG_STACK_GROWSUP
48707 - if (write) {
48708 - ret = expand_stack_downwards(bprm->vma, pos);
48709 - if (ret < 0)
48710 - return NULL;
48711 - }
48712 -#endif
48713 - ret = get_user_pages(current, bprm->mm, pos,
48714 - 1, write, 1, &page, NULL);
48715 - if (ret <= 0)
48716 + if (0 > expand_stack_downwards(bprm->vma, pos))
48717 + return NULL;
48718 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48719 return NULL;
48720
48721 if (write) {
48722 @@ -263,6 +271,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48723 vma->vm_end = STACK_TOP_MAX;
48724 vma->vm_start = vma->vm_end - PAGE_SIZE;
48725 vma->vm_flags = VM_STACK_FLAGS;
48726 +
48727 +#ifdef CONFIG_PAX_SEGMEXEC
48728 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48729 +#endif
48730 +
48731 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48732
48733 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48734 @@ -276,6 +289,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48735 mm->stack_vm = mm->total_vm = 1;
48736 up_write(&mm->mmap_sem);
48737 bprm->p = vma->vm_end - sizeof(void *);
48738 +
48739 +#ifdef CONFIG_PAX_RANDUSTACK
48740 + if (randomize_va_space)
48741 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48742 +#endif
48743 +
48744 return 0;
48745 err:
48746 up_write(&mm->mmap_sem);
48747 @@ -510,7 +529,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48748 int r;
48749 mm_segment_t oldfs = get_fs();
48750 set_fs(KERNEL_DS);
48751 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
48752 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48753 set_fs(oldfs);
48754 return r;
48755 }
48756 @@ -540,7 +559,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48757 unsigned long new_end = old_end - shift;
48758 struct mmu_gather *tlb;
48759
48760 - BUG_ON(new_start > new_end);
48761 + if (new_start >= new_end || new_start < mmap_min_addr)
48762 + return -ENOMEM;
48763
48764 /*
48765 * ensure there are no vmas between where we want to go
48766 @@ -549,6 +569,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48767 if (vma != find_vma(mm, new_start))
48768 return -EFAULT;
48769
48770 +#ifdef CONFIG_PAX_SEGMEXEC
48771 + BUG_ON(pax_find_mirror_vma(vma));
48772 +#endif
48773 +
48774 /*
48775 * cover the whole range: [new_start, old_end)
48776 */
48777 @@ -630,10 +654,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48778 stack_top = arch_align_stack(stack_top);
48779 stack_top = PAGE_ALIGN(stack_top);
48780
48781 - if (unlikely(stack_top < mmap_min_addr) ||
48782 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48783 - return -ENOMEM;
48784 -
48785 stack_shift = vma->vm_end - stack_top;
48786
48787 bprm->p -= stack_shift;
48788 @@ -645,6 +665,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48789 bprm->exec -= stack_shift;
48790
48791 down_write(&mm->mmap_sem);
48792 +
48793 + /* Move stack pages down in memory. */
48794 + if (stack_shift) {
48795 + ret = shift_arg_pages(vma, stack_shift);
48796 + if (ret)
48797 + goto out_unlock;
48798 + }
48799 +
48800 vm_flags = VM_STACK_FLAGS;
48801
48802 /*
48803 @@ -658,19 +686,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48804 vm_flags &= ~VM_EXEC;
48805 vm_flags |= mm->def_flags;
48806
48807 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48808 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48809 + vm_flags &= ~VM_EXEC;
48810 +
48811 +#ifdef CONFIG_PAX_MPROTECT
48812 + if (mm->pax_flags & MF_PAX_MPROTECT)
48813 + vm_flags &= ~VM_MAYEXEC;
48814 +#endif
48815 +
48816 + }
48817 +#endif
48818 +
48819 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48820 vm_flags);
48821 if (ret)
48822 goto out_unlock;
48823 BUG_ON(prev != vma);
48824
48825 - /* Move stack pages down in memory. */
48826 - if (stack_shift) {
48827 - ret = shift_arg_pages(vma, stack_shift);
48828 - if (ret)
48829 - goto out_unlock;
48830 - }
48831 -
48832 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48833 stack_size = vma->vm_end - vma->vm_start;
48834 /*
48835 @@ -744,7 +777,7 @@ int kernel_read(struct file *file, loff_t offset,
48836 old_fs = get_fs();
48837 set_fs(get_ds());
48838 /* The cast to a user pointer is valid due to the set_fs() */
48839 - result = vfs_read(file, (void __user *)addr, count, &pos);
48840 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
48841 set_fs(old_fs);
48842 return result;
48843 }
48844 @@ -1152,7 +1185,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48845 }
48846 rcu_read_unlock();
48847
48848 - if (p->fs->users > n_fs) {
48849 + if (atomic_read(&p->fs->users) > n_fs) {
48850 bprm->unsafe |= LSM_UNSAFE_SHARE;
48851 } else {
48852 res = -EAGAIN;
48853 @@ -1347,11 +1380,35 @@ int do_execve(char * filename,
48854 char __user *__user *envp,
48855 struct pt_regs * regs)
48856 {
48857 +#ifdef CONFIG_GRKERNSEC
48858 + struct file *old_exec_file;
48859 + struct acl_subject_label *old_acl;
48860 + struct rlimit old_rlim[RLIM_NLIMITS];
48861 +#endif
48862 struct linux_binprm *bprm;
48863 struct file *file;
48864 struct files_struct *displaced;
48865 bool clear_in_exec;
48866 int retval;
48867 + const struct cred *cred = current_cred();
48868 +
48869 + /*
48870 + * We move the actual failure in case of RLIMIT_NPROC excess from
48871 + * set*uid() to execve() because too many poorly written programs
48872 + * don't check setuid() return code. Here we additionally recheck
48873 + * whether NPROC limit is still exceeded.
48874 + */
48875 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48876 +
48877 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48878 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48879 + retval = -EAGAIN;
48880 + goto out_ret;
48881 + }
48882 +
48883 + /* We're below the limit (still or again), so we don't want to make
48884 + * further execve() calls fail. */
48885 + current->flags &= ~PF_NPROC_EXCEEDED;
48886
48887 retval = unshare_files(&displaced);
48888 if (retval)
48889 @@ -1377,12 +1434,27 @@ int do_execve(char * filename,
48890 if (IS_ERR(file))
48891 goto out_unmark;
48892
48893 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
48894 + retval = -EPERM;
48895 + goto out_file;
48896 + }
48897 +
48898 sched_exec();
48899
48900 bprm->file = file;
48901 bprm->filename = filename;
48902 bprm->interp = filename;
48903
48904 + if (gr_process_user_ban()) {
48905 + retval = -EPERM;
48906 + goto out_file;
48907 + }
48908 +
48909 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48910 + retval = -EACCES;
48911 + goto out_file;
48912 + }
48913 +
48914 retval = bprm_mm_init(bprm);
48915 if (retval)
48916 goto out_file;
48917 @@ -1412,10 +1484,41 @@ int do_execve(char * filename,
48918 if (retval < 0)
48919 goto out;
48920
48921 + if (!gr_tpe_allow(file)) {
48922 + retval = -EACCES;
48923 + goto out;
48924 + }
48925 +
48926 + if (gr_check_crash_exec(file)) {
48927 + retval = -EACCES;
48928 + goto out;
48929 + }
48930 +
48931 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48932 +
48933 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
48934 +
48935 +#ifdef CONFIG_GRKERNSEC
48936 + old_acl = current->acl;
48937 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48938 + old_exec_file = current->exec_file;
48939 + get_file(file);
48940 + current->exec_file = file;
48941 +#endif
48942 +
48943 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48944 + bprm->unsafe);
48945 + if (retval < 0)
48946 + goto out_fail;
48947 +
48948 current->flags &= ~PF_KTHREAD;
48949 retval = search_binary_handler(bprm,regs);
48950 if (retval < 0)
48951 - goto out;
48952 + goto out_fail;
48953 +#ifdef CONFIG_GRKERNSEC
48954 + if (old_exec_file)
48955 + fput(old_exec_file);
48956 +#endif
48957
48958 /* execve succeeded */
48959 current->fs->in_exec = 0;
48960 @@ -1426,6 +1529,14 @@ int do_execve(char * filename,
48961 put_files_struct(displaced);
48962 return retval;
48963
48964 +out_fail:
48965 +#ifdef CONFIG_GRKERNSEC
48966 + current->acl = old_acl;
48967 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48968 + fput(current->exec_file);
48969 + current->exec_file = old_exec_file;
48970 +#endif
48971 +
48972 out:
48973 if (bprm->mm) {
48974 acct_arg_size(bprm, 0);
48975 @@ -1591,6 +1702,220 @@ out:
48976 return ispipe;
48977 }
48978
48979 +int pax_check_flags(unsigned long *flags)
48980 +{
48981 + int retval = 0;
48982 +
48983 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
48984 + if (*flags & MF_PAX_SEGMEXEC)
48985 + {
48986 + *flags &= ~MF_PAX_SEGMEXEC;
48987 + retval = -EINVAL;
48988 + }
48989 +#endif
48990 +
48991 + if ((*flags & MF_PAX_PAGEEXEC)
48992 +
48993 +#ifdef CONFIG_PAX_PAGEEXEC
48994 + && (*flags & MF_PAX_SEGMEXEC)
48995 +#endif
48996 +
48997 + )
48998 + {
48999 + *flags &= ~MF_PAX_PAGEEXEC;
49000 + retval = -EINVAL;
49001 + }
49002 +
49003 + if ((*flags & MF_PAX_MPROTECT)
49004 +
49005 +#ifdef CONFIG_PAX_MPROTECT
49006 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49007 +#endif
49008 +
49009 + )
49010 + {
49011 + *flags &= ~MF_PAX_MPROTECT;
49012 + retval = -EINVAL;
49013 + }
49014 +
49015 + if ((*flags & MF_PAX_EMUTRAMP)
49016 +
49017 +#ifdef CONFIG_PAX_EMUTRAMP
49018 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49019 +#endif
49020 +
49021 + )
49022 + {
49023 + *flags &= ~MF_PAX_EMUTRAMP;
49024 + retval = -EINVAL;
49025 + }
49026 +
49027 + return retval;
49028 +}
49029 +
49030 +EXPORT_SYMBOL(pax_check_flags);
49031 +
49032 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49033 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
49034 +{
49035 + struct task_struct *tsk = current;
49036 + struct mm_struct *mm = current->mm;
49037 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
49038 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
49039 + char *path_exec = NULL;
49040 + char *path_fault = NULL;
49041 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
49042 +
49043 + if (buffer_exec && buffer_fault) {
49044 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
49045 +
49046 + down_read(&mm->mmap_sem);
49047 + vma = mm->mmap;
49048 + while (vma && (!vma_exec || !vma_fault)) {
49049 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
49050 + vma_exec = vma;
49051 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
49052 + vma_fault = vma;
49053 + vma = vma->vm_next;
49054 + }
49055 + if (vma_exec) {
49056 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
49057 + if (IS_ERR(path_exec))
49058 + path_exec = "<path too long>";
49059 + else {
49060 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
49061 + if (path_exec) {
49062 + *path_exec = 0;
49063 + path_exec = buffer_exec;
49064 + } else
49065 + path_exec = "<path too long>";
49066 + }
49067 + }
49068 + if (vma_fault) {
49069 + start = vma_fault->vm_start;
49070 + end = vma_fault->vm_end;
49071 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
49072 + if (vma_fault->vm_file) {
49073 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
49074 + if (IS_ERR(path_fault))
49075 + path_fault = "<path too long>";
49076 + else {
49077 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
49078 + if (path_fault) {
49079 + *path_fault = 0;
49080 + path_fault = buffer_fault;
49081 + } else
49082 + path_fault = "<path too long>";
49083 + }
49084 + } else
49085 + path_fault = "<anonymous mapping>";
49086 + }
49087 + up_read(&mm->mmap_sem);
49088 + }
49089 + if (tsk->signal->curr_ip)
49090 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
49091 + else
49092 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
49093 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
49094 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
49095 + task_uid(tsk), task_euid(tsk), pc, sp);
49096 + free_page((unsigned long)buffer_exec);
49097 + free_page((unsigned long)buffer_fault);
49098 + pax_report_insns(regs, pc, sp);
49099 + do_coredump(SIGKILL, SIGKILL, regs);
49100 +}
49101 +#endif
49102 +
49103 +#ifdef CONFIG_PAX_REFCOUNT
49104 +void pax_report_refcount_overflow(struct pt_regs *regs)
49105 +{
49106 + if (current->signal->curr_ip)
49107 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49108 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
49109 + else
49110 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49111 + current->comm, task_pid_nr(current), current_uid(), current_euid());
49112 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
49113 + show_regs(regs);
49114 + force_sig_specific(SIGKILL, current);
49115 +}
49116 +#endif
49117 +
49118 +#ifdef CONFIG_PAX_USERCOPY
49119 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
49120 +int object_is_on_stack(const void *obj, unsigned long len)
49121 +{
49122 + const void * const stack = task_stack_page(current);
49123 + const void * const stackend = stack + THREAD_SIZE;
49124 +
49125 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49126 + const void *frame = NULL;
49127 + const void *oldframe;
49128 +#endif
49129 +
49130 + if (obj + len < obj)
49131 + return -1;
49132 +
49133 + if (obj + len <= stack || stackend <= obj)
49134 + return 0;
49135 +
49136 + if (obj < stack || stackend < obj + len)
49137 + return -1;
49138 +
49139 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49140 + oldframe = __builtin_frame_address(1);
49141 + if (oldframe)
49142 + frame = __builtin_frame_address(2);
49143 + /*
49144 + low ----------------------------------------------> high
49145 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
49146 + ^----------------^
49147 + allow copies only within here
49148 + */
49149 + while (stack <= frame && frame < stackend) {
49150 + /* if obj + len extends past the last frame, this
49151 + check won't pass and the next frame will be 0,
49152 + causing us to bail out and correctly report
49153 + the copy as invalid
49154 + */
49155 + if (obj + len <= frame)
49156 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
49157 + oldframe = frame;
49158 + frame = *(const void * const *)frame;
49159 + }
49160 + return -1;
49161 +#else
49162 + return 1;
49163 +#endif
49164 +}
49165 +
49166 +
49167 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
49168 +{
49169 + if (current->signal->curr_ip)
49170 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49171 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49172 + else
49173 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49174 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49175 +
49176 + dump_stack();
49177 + gr_handle_kernel_exploit();
49178 + do_group_exit(SIGKILL);
49179 +}
49180 +#endif
49181 +
49182 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
49183 +void pax_track_stack(void)
49184 +{
49185 + unsigned long sp = (unsigned long)&sp;
49186 + if (sp < current_thread_info()->lowest_stack &&
49187 + sp > (unsigned long)task_stack_page(current))
49188 + current_thread_info()->lowest_stack = sp;
49189 +}
49190 +EXPORT_SYMBOL(pax_track_stack);
49191 +#endif
49192 +
49193 static int zap_process(struct task_struct *start)
49194 {
49195 struct task_struct *t;
49196 @@ -1793,17 +2118,17 @@ static void wait_for_dump_helpers(struct file *file)
49197 pipe = file->f_path.dentry->d_inode->i_pipe;
49198
49199 pipe_lock(pipe);
49200 - pipe->readers++;
49201 - pipe->writers--;
49202 + atomic_inc(&pipe->readers);
49203 + atomic_dec(&pipe->writers);
49204
49205 - while ((pipe->readers > 1) && (!signal_pending(current))) {
49206 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49207 wake_up_interruptible_sync(&pipe->wait);
49208 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49209 pipe_wait(pipe);
49210 }
49211
49212 - pipe->readers--;
49213 - pipe->writers++;
49214 + atomic_dec(&pipe->readers);
49215 + atomic_inc(&pipe->writers);
49216 pipe_unlock(pipe);
49217
49218 }
49219 @@ -1826,10 +2151,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49220 char **helper_argv = NULL;
49221 int helper_argc = 0;
49222 int dump_count = 0;
49223 - static atomic_t core_dump_count = ATOMIC_INIT(0);
49224 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49225
49226 audit_core_dumps(signr);
49227
49228 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49229 + gr_handle_brute_attach(current, mm->flags);
49230 +
49231 binfmt = mm->binfmt;
49232 if (!binfmt || !binfmt->core_dump)
49233 goto fail;
49234 @@ -1874,6 +2202,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49235 */
49236 clear_thread_flag(TIF_SIGPENDING);
49237
49238 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49239 +
49240 /*
49241 * lock_kernel() because format_corename() is controlled by sysctl, which
49242 * uses lock_kernel()
49243 @@ -1908,7 +2238,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49244 goto fail_unlock;
49245 }
49246
49247 - dump_count = atomic_inc_return(&core_dump_count);
49248 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
49249 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49250 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49251 task_tgid_vnr(current), current->comm);
49252 @@ -1972,7 +2302,7 @@ close_fail:
49253 filp_close(file, NULL);
49254 fail_dropcount:
49255 if (dump_count)
49256 - atomic_dec(&core_dump_count);
49257 + atomic_dec_unchecked(&core_dump_count);
49258 fail_unlock:
49259 if (helper_argv)
49260 argv_free(helper_argv);
49261 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49262 index 7f8d2e5..a1abdbb 100644
49263 --- a/fs/ext2/balloc.c
49264 +++ b/fs/ext2/balloc.c
49265 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49266
49267 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49268 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49269 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49270 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49271 sbi->s_resuid != current_fsuid() &&
49272 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49273 return 0;
49274 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49275 index 27967f9..9f2a5fb 100644
49276 --- a/fs/ext3/balloc.c
49277 +++ b/fs/ext3/balloc.c
49278 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49279
49280 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49281 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49282 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49283 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49284 sbi->s_resuid != current_fsuid() &&
49285 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49286 return 0;
49287 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49288 index e85b63c..80398e6 100644
49289 --- a/fs/ext4/balloc.c
49290 +++ b/fs/ext4/balloc.c
49291 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49292 /* Hm, nope. Are (enough) root reserved blocks available? */
49293 if (sbi->s_resuid == current_fsuid() ||
49294 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49295 - capable(CAP_SYS_RESOURCE)) {
49296 + capable_nolog(CAP_SYS_RESOURCE)) {
49297 if (free_blocks >= (nblocks + dirty_blocks))
49298 return 1;
49299 }
49300 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49301 index 67c46ed..1f237e5 100644
49302 --- a/fs/ext4/ext4.h
49303 +++ b/fs/ext4/ext4.h
49304 @@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49305
49306 /* stats for buddy allocator */
49307 spinlock_t s_mb_pa_lock;
49308 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49309 - atomic_t s_bal_success; /* we found long enough chunks */
49310 - atomic_t s_bal_allocated; /* in blocks */
49311 - atomic_t s_bal_ex_scanned; /* total extents scanned */
49312 - atomic_t s_bal_goals; /* goal hits */
49313 - atomic_t s_bal_breaks; /* too long searches */
49314 - atomic_t s_bal_2orders; /* 2^order hits */
49315 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49316 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49317 + atomic_unchecked_t s_bal_allocated; /* in blocks */
49318 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49319 + atomic_unchecked_t s_bal_goals; /* goal hits */
49320 + atomic_unchecked_t s_bal_breaks; /* too long searches */
49321 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49322 spinlock_t s_bal_lock;
49323 unsigned long s_mb_buddies_generated;
49324 unsigned long long s_mb_generation_time;
49325 - atomic_t s_mb_lost_chunks;
49326 - atomic_t s_mb_preallocated;
49327 - atomic_t s_mb_discarded;
49328 + atomic_unchecked_t s_mb_lost_chunks;
49329 + atomic_unchecked_t s_mb_preallocated;
49330 + atomic_unchecked_t s_mb_discarded;
49331 atomic_t s_lock_busy;
49332
49333 /* locality groups */
49334 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49335 index 2a60541..7439d61 100644
49336 --- a/fs/ext4/file.c
49337 +++ b/fs/ext4/file.c
49338 @@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49339 cp = d_path(&path, buf, sizeof(buf));
49340 path_put(&path);
49341 if (!IS_ERR(cp)) {
49342 - memcpy(sbi->s_es->s_last_mounted, cp,
49343 - sizeof(sbi->s_es->s_last_mounted));
49344 + strlcpy(sbi->s_es->s_last_mounted, cp,
49345 + sizeof(sbi->s_es->s_last_mounted));
49346 sb->s_dirt = 1;
49347 }
49348 }
49349 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49350 index 42bac1b..0aab9d8 100644
49351 --- a/fs/ext4/mballoc.c
49352 +++ b/fs/ext4/mballoc.c
49353 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49354 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49355
49356 if (EXT4_SB(sb)->s_mb_stats)
49357 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49358 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49359
49360 break;
49361 }
49362 @@ -2131,7 +2131,7 @@ repeat:
49363 ac->ac_status = AC_STATUS_CONTINUE;
49364 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49365 cr = 3;
49366 - atomic_inc(&sbi->s_mb_lost_chunks);
49367 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49368 goto repeat;
49369 }
49370 }
49371 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49372 ext4_grpblk_t counters[16];
49373 } sg;
49374
49375 + pax_track_stack();
49376 +
49377 group--;
49378 if (group == 0)
49379 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49380 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49381 if (sbi->s_mb_stats) {
49382 printk(KERN_INFO
49383 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49384 - atomic_read(&sbi->s_bal_allocated),
49385 - atomic_read(&sbi->s_bal_reqs),
49386 - atomic_read(&sbi->s_bal_success));
49387 + atomic_read_unchecked(&sbi->s_bal_allocated),
49388 + atomic_read_unchecked(&sbi->s_bal_reqs),
49389 + atomic_read_unchecked(&sbi->s_bal_success));
49390 printk(KERN_INFO
49391 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49392 "%u 2^N hits, %u breaks, %u lost\n",
49393 - atomic_read(&sbi->s_bal_ex_scanned),
49394 - atomic_read(&sbi->s_bal_goals),
49395 - atomic_read(&sbi->s_bal_2orders),
49396 - atomic_read(&sbi->s_bal_breaks),
49397 - atomic_read(&sbi->s_mb_lost_chunks));
49398 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49399 + atomic_read_unchecked(&sbi->s_bal_goals),
49400 + atomic_read_unchecked(&sbi->s_bal_2orders),
49401 + atomic_read_unchecked(&sbi->s_bal_breaks),
49402 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49403 printk(KERN_INFO
49404 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49405 sbi->s_mb_buddies_generated++,
49406 sbi->s_mb_generation_time);
49407 printk(KERN_INFO
49408 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49409 - atomic_read(&sbi->s_mb_preallocated),
49410 - atomic_read(&sbi->s_mb_discarded));
49411 + atomic_read_unchecked(&sbi->s_mb_preallocated),
49412 + atomic_read_unchecked(&sbi->s_mb_discarded));
49413 }
49414
49415 free_percpu(sbi->s_locality_groups);
49416 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49417 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49418
49419 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49420 - atomic_inc(&sbi->s_bal_reqs);
49421 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49422 + atomic_inc_unchecked(&sbi->s_bal_reqs);
49423 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49424 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49425 - atomic_inc(&sbi->s_bal_success);
49426 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49427 + atomic_inc_unchecked(&sbi->s_bal_success);
49428 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49429 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49430 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49431 - atomic_inc(&sbi->s_bal_goals);
49432 + atomic_inc_unchecked(&sbi->s_bal_goals);
49433 if (ac->ac_found > sbi->s_mb_max_to_scan)
49434 - atomic_inc(&sbi->s_bal_breaks);
49435 + atomic_inc_unchecked(&sbi->s_bal_breaks);
49436 }
49437
49438 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49439 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49440 trace_ext4_mb_new_inode_pa(ac, pa);
49441
49442 ext4_mb_use_inode_pa(ac, pa);
49443 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49444 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49445
49446 ei = EXT4_I(ac->ac_inode);
49447 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49448 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49449 trace_ext4_mb_new_group_pa(ac, pa);
49450
49451 ext4_mb_use_group_pa(ac, pa);
49452 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49453 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49454
49455 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49456 lg = ac->ac_lg;
49457 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49458 * from the bitmap and continue.
49459 */
49460 }
49461 - atomic_add(free, &sbi->s_mb_discarded);
49462 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
49463
49464 return err;
49465 }
49466 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49467 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49468 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49469 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49470 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49471 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49472
49473 if (ac) {
49474 ac->ac_sb = sb;
49475 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49476 index f1e7077..edd86b2 100644
49477 --- a/fs/ext4/super.c
49478 +++ b/fs/ext4/super.c
49479 @@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
49480 }
49481
49482
49483 -static struct sysfs_ops ext4_attr_ops = {
49484 +static const struct sysfs_ops ext4_attr_ops = {
49485 .show = ext4_attr_show,
49486 .store = ext4_attr_store,
49487 };
49488 diff --git a/fs/fcntl.c b/fs/fcntl.c
49489 index 97e01dc..e9aab2d 100644
49490 --- a/fs/fcntl.c
49491 +++ b/fs/fcntl.c
49492 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49493 if (err)
49494 return err;
49495
49496 + if (gr_handle_chroot_fowner(pid, type))
49497 + return -ENOENT;
49498 + if (gr_check_protected_task_fowner(pid, type))
49499 + return -EACCES;
49500 +
49501 f_modown(filp, pid, type, force);
49502 return 0;
49503 }
49504 @@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49505
49506 static int f_setown_ex(struct file *filp, unsigned long arg)
49507 {
49508 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49509 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49510 struct f_owner_ex owner;
49511 struct pid *pid;
49512 int type;
49513 @@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49514
49515 static int f_getown_ex(struct file *filp, unsigned long arg)
49516 {
49517 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49518 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49519 struct f_owner_ex owner;
49520 int ret = 0;
49521
49522 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49523 switch (cmd) {
49524 case F_DUPFD:
49525 case F_DUPFD_CLOEXEC:
49526 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49527 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49528 break;
49529 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49530 diff --git a/fs/fifo.c b/fs/fifo.c
49531 index f8f97b8..b1f2259 100644
49532 --- a/fs/fifo.c
49533 +++ b/fs/fifo.c
49534 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49535 */
49536 filp->f_op = &read_pipefifo_fops;
49537 pipe->r_counter++;
49538 - if (pipe->readers++ == 0)
49539 + if (atomic_inc_return(&pipe->readers) == 1)
49540 wake_up_partner(inode);
49541
49542 - if (!pipe->writers) {
49543 + if (!atomic_read(&pipe->writers)) {
49544 if ((filp->f_flags & O_NONBLOCK)) {
49545 /* suppress POLLHUP until we have
49546 * seen a writer */
49547 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49548 * errno=ENXIO when there is no process reading the FIFO.
49549 */
49550 ret = -ENXIO;
49551 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49552 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49553 goto err;
49554
49555 filp->f_op = &write_pipefifo_fops;
49556 pipe->w_counter++;
49557 - if (!pipe->writers++)
49558 + if (atomic_inc_return(&pipe->writers) == 1)
49559 wake_up_partner(inode);
49560
49561 - if (!pipe->readers) {
49562 + if (!atomic_read(&pipe->readers)) {
49563 wait_for_partner(inode, &pipe->r_counter);
49564 if (signal_pending(current))
49565 goto err_wr;
49566 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49567 */
49568 filp->f_op = &rdwr_pipefifo_fops;
49569
49570 - pipe->readers++;
49571 - pipe->writers++;
49572 + atomic_inc(&pipe->readers);
49573 + atomic_inc(&pipe->writers);
49574 pipe->r_counter++;
49575 pipe->w_counter++;
49576 - if (pipe->readers == 1 || pipe->writers == 1)
49577 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49578 wake_up_partner(inode);
49579 break;
49580
49581 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49582 return 0;
49583
49584 err_rd:
49585 - if (!--pipe->readers)
49586 + if (atomic_dec_and_test(&pipe->readers))
49587 wake_up_interruptible(&pipe->wait);
49588 ret = -ERESTARTSYS;
49589 goto err;
49590
49591 err_wr:
49592 - if (!--pipe->writers)
49593 + if (atomic_dec_and_test(&pipe->writers))
49594 wake_up_interruptible(&pipe->wait);
49595 ret = -ERESTARTSYS;
49596 goto err;
49597
49598 err:
49599 - if (!pipe->readers && !pipe->writers)
49600 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49601 free_pipe_info(inode);
49602
49603 err_nocleanup:
49604 diff --git a/fs/file.c b/fs/file.c
49605 index 87e1290..a930cc4 100644
49606 --- a/fs/file.c
49607 +++ b/fs/file.c
49608 @@ -14,6 +14,7 @@
49609 #include <linux/slab.h>
49610 #include <linux/vmalloc.h>
49611 #include <linux/file.h>
49612 +#include <linux/security.h>
49613 #include <linux/fdtable.h>
49614 #include <linux/bitops.h>
49615 #include <linux/interrupt.h>
49616 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49617 * N.B. For clone tasks sharing a files structure, this test
49618 * will limit the total number of files that can be opened.
49619 */
49620 +
49621 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49622 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49623 return -EMFILE;
49624
49625 diff --git a/fs/filesystems.c b/fs/filesystems.c
49626 index a24c58e..53f91ee 100644
49627 --- a/fs/filesystems.c
49628 +++ b/fs/filesystems.c
49629 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49630 int len = dot ? dot - name : strlen(name);
49631
49632 fs = __get_fs_type(name, len);
49633 +
49634 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
49635 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49636 +#else
49637 if (!fs && (request_module("%.*s", len, name) == 0))
49638 +#endif
49639 fs = __get_fs_type(name, len);
49640
49641 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49642 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49643 index eee0590..ef5bc0e 100644
49644 --- a/fs/fs_struct.c
49645 +++ b/fs/fs_struct.c
49646 @@ -4,6 +4,7 @@
49647 #include <linux/path.h>
49648 #include <linux/slab.h>
49649 #include <linux/fs_struct.h>
49650 +#include <linux/grsecurity.h>
49651
49652 /*
49653 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49654 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49655 old_root = fs->root;
49656 fs->root = *path;
49657 path_get(path);
49658 + gr_set_chroot_entries(current, path);
49659 write_unlock(&fs->lock);
49660 if (old_root.dentry)
49661 path_put(&old_root);
49662 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49663 && fs->root.mnt == old_root->mnt) {
49664 path_get(new_root);
49665 fs->root = *new_root;
49666 + gr_set_chroot_entries(p, new_root);
49667 count++;
49668 }
49669 if (fs->pwd.dentry == old_root->dentry
49670 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49671 task_lock(tsk);
49672 write_lock(&fs->lock);
49673 tsk->fs = NULL;
49674 - kill = !--fs->users;
49675 + gr_clear_chroot_entries(tsk);
49676 + kill = !atomic_dec_return(&fs->users);
49677 write_unlock(&fs->lock);
49678 task_unlock(tsk);
49679 if (kill)
49680 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49681 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49682 /* We don't need to lock fs - think why ;-) */
49683 if (fs) {
49684 - fs->users = 1;
49685 + atomic_set(&fs->users, 1);
49686 fs->in_exec = 0;
49687 rwlock_init(&fs->lock);
49688 fs->umask = old->umask;
49689 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49690
49691 task_lock(current);
49692 write_lock(&fs->lock);
49693 - kill = !--fs->users;
49694 + kill = !atomic_dec_return(&fs->users);
49695 current->fs = new_fs;
49696 + gr_set_chroot_entries(current, &new_fs->root);
49697 write_unlock(&fs->lock);
49698 task_unlock(current);
49699
49700 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
49701
49702 /* to be mentioned only in INIT_TASK */
49703 struct fs_struct init_fs = {
49704 - .users = 1,
49705 + .users = ATOMIC_INIT(1),
49706 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49707 .umask = 0022,
49708 };
49709 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49710 task_lock(current);
49711
49712 write_lock(&init_fs.lock);
49713 - init_fs.users++;
49714 + atomic_inc(&init_fs.users);
49715 write_unlock(&init_fs.lock);
49716
49717 write_lock(&fs->lock);
49718 current->fs = &init_fs;
49719 - kill = !--fs->users;
49720 + gr_set_chroot_entries(current, &current->fs->root);
49721 + kill = !atomic_dec_return(&fs->users);
49722 write_unlock(&fs->lock);
49723
49724 task_unlock(current);
49725 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49726 index 9905350..02eaec4 100644
49727 --- a/fs/fscache/cookie.c
49728 +++ b/fs/fscache/cookie.c
49729 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49730 parent ? (char *) parent->def->name : "<no-parent>",
49731 def->name, netfs_data);
49732
49733 - fscache_stat(&fscache_n_acquires);
49734 + fscache_stat_unchecked(&fscache_n_acquires);
49735
49736 /* if there's no parent cookie, then we don't create one here either */
49737 if (!parent) {
49738 - fscache_stat(&fscache_n_acquires_null);
49739 + fscache_stat_unchecked(&fscache_n_acquires_null);
49740 _leave(" [no parent]");
49741 return NULL;
49742 }
49743 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49744 /* allocate and initialise a cookie */
49745 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49746 if (!cookie) {
49747 - fscache_stat(&fscache_n_acquires_oom);
49748 + fscache_stat_unchecked(&fscache_n_acquires_oom);
49749 _leave(" [ENOMEM]");
49750 return NULL;
49751 }
49752 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49753
49754 switch (cookie->def->type) {
49755 case FSCACHE_COOKIE_TYPE_INDEX:
49756 - fscache_stat(&fscache_n_cookie_index);
49757 + fscache_stat_unchecked(&fscache_n_cookie_index);
49758 break;
49759 case FSCACHE_COOKIE_TYPE_DATAFILE:
49760 - fscache_stat(&fscache_n_cookie_data);
49761 + fscache_stat_unchecked(&fscache_n_cookie_data);
49762 break;
49763 default:
49764 - fscache_stat(&fscache_n_cookie_special);
49765 + fscache_stat_unchecked(&fscache_n_cookie_special);
49766 break;
49767 }
49768
49769 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49770 if (fscache_acquire_non_index_cookie(cookie) < 0) {
49771 atomic_dec(&parent->n_children);
49772 __fscache_cookie_put(cookie);
49773 - fscache_stat(&fscache_n_acquires_nobufs);
49774 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
49775 _leave(" = NULL");
49776 return NULL;
49777 }
49778 }
49779
49780 - fscache_stat(&fscache_n_acquires_ok);
49781 + fscache_stat_unchecked(&fscache_n_acquires_ok);
49782 _leave(" = %p", cookie);
49783 return cookie;
49784 }
49785 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49786 cache = fscache_select_cache_for_object(cookie->parent);
49787 if (!cache) {
49788 up_read(&fscache_addremove_sem);
49789 - fscache_stat(&fscache_n_acquires_no_cache);
49790 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49791 _leave(" = -ENOMEDIUM [no cache]");
49792 return -ENOMEDIUM;
49793 }
49794 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49795 object = cache->ops->alloc_object(cache, cookie);
49796 fscache_stat_d(&fscache_n_cop_alloc_object);
49797 if (IS_ERR(object)) {
49798 - fscache_stat(&fscache_n_object_no_alloc);
49799 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
49800 ret = PTR_ERR(object);
49801 goto error;
49802 }
49803
49804 - fscache_stat(&fscache_n_object_alloc);
49805 + fscache_stat_unchecked(&fscache_n_object_alloc);
49806
49807 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49808
49809 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49810 struct fscache_object *object;
49811 struct hlist_node *_p;
49812
49813 - fscache_stat(&fscache_n_updates);
49814 + fscache_stat_unchecked(&fscache_n_updates);
49815
49816 if (!cookie) {
49817 - fscache_stat(&fscache_n_updates_null);
49818 + fscache_stat_unchecked(&fscache_n_updates_null);
49819 _leave(" [no cookie]");
49820 return;
49821 }
49822 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49823 struct fscache_object *object;
49824 unsigned long event;
49825
49826 - fscache_stat(&fscache_n_relinquishes);
49827 + fscache_stat_unchecked(&fscache_n_relinquishes);
49828 if (retire)
49829 - fscache_stat(&fscache_n_relinquishes_retire);
49830 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49831
49832 if (!cookie) {
49833 - fscache_stat(&fscache_n_relinquishes_null);
49834 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
49835 _leave(" [no cookie]");
49836 return;
49837 }
49838 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49839
49840 /* wait for the cookie to finish being instantiated (or to fail) */
49841 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49842 - fscache_stat(&fscache_n_relinquishes_waitcrt);
49843 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49844 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49845 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49846 }
49847 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49848 index edd7434..0725e66 100644
49849 --- a/fs/fscache/internal.h
49850 +++ b/fs/fscache/internal.h
49851 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49852 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49853 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49854
49855 -extern atomic_t fscache_n_op_pend;
49856 -extern atomic_t fscache_n_op_run;
49857 -extern atomic_t fscache_n_op_enqueue;
49858 -extern atomic_t fscache_n_op_deferred_release;
49859 -extern atomic_t fscache_n_op_release;
49860 -extern atomic_t fscache_n_op_gc;
49861 -extern atomic_t fscache_n_op_cancelled;
49862 -extern atomic_t fscache_n_op_rejected;
49863 +extern atomic_unchecked_t fscache_n_op_pend;
49864 +extern atomic_unchecked_t fscache_n_op_run;
49865 +extern atomic_unchecked_t fscache_n_op_enqueue;
49866 +extern atomic_unchecked_t fscache_n_op_deferred_release;
49867 +extern atomic_unchecked_t fscache_n_op_release;
49868 +extern atomic_unchecked_t fscache_n_op_gc;
49869 +extern atomic_unchecked_t fscache_n_op_cancelled;
49870 +extern atomic_unchecked_t fscache_n_op_rejected;
49871
49872 -extern atomic_t fscache_n_attr_changed;
49873 -extern atomic_t fscache_n_attr_changed_ok;
49874 -extern atomic_t fscache_n_attr_changed_nobufs;
49875 -extern atomic_t fscache_n_attr_changed_nomem;
49876 -extern atomic_t fscache_n_attr_changed_calls;
49877 +extern atomic_unchecked_t fscache_n_attr_changed;
49878 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
49879 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49880 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49881 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
49882
49883 -extern atomic_t fscache_n_allocs;
49884 -extern atomic_t fscache_n_allocs_ok;
49885 -extern atomic_t fscache_n_allocs_wait;
49886 -extern atomic_t fscache_n_allocs_nobufs;
49887 -extern atomic_t fscache_n_allocs_intr;
49888 -extern atomic_t fscache_n_allocs_object_dead;
49889 -extern atomic_t fscache_n_alloc_ops;
49890 -extern atomic_t fscache_n_alloc_op_waits;
49891 +extern atomic_unchecked_t fscache_n_allocs;
49892 +extern atomic_unchecked_t fscache_n_allocs_ok;
49893 +extern atomic_unchecked_t fscache_n_allocs_wait;
49894 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
49895 +extern atomic_unchecked_t fscache_n_allocs_intr;
49896 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
49897 +extern atomic_unchecked_t fscache_n_alloc_ops;
49898 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
49899
49900 -extern atomic_t fscache_n_retrievals;
49901 -extern atomic_t fscache_n_retrievals_ok;
49902 -extern atomic_t fscache_n_retrievals_wait;
49903 -extern atomic_t fscache_n_retrievals_nodata;
49904 -extern atomic_t fscache_n_retrievals_nobufs;
49905 -extern atomic_t fscache_n_retrievals_intr;
49906 -extern atomic_t fscache_n_retrievals_nomem;
49907 -extern atomic_t fscache_n_retrievals_object_dead;
49908 -extern atomic_t fscache_n_retrieval_ops;
49909 -extern atomic_t fscache_n_retrieval_op_waits;
49910 +extern atomic_unchecked_t fscache_n_retrievals;
49911 +extern atomic_unchecked_t fscache_n_retrievals_ok;
49912 +extern atomic_unchecked_t fscache_n_retrievals_wait;
49913 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
49914 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
49915 +extern atomic_unchecked_t fscache_n_retrievals_intr;
49916 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
49917 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
49918 +extern atomic_unchecked_t fscache_n_retrieval_ops;
49919 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
49920
49921 -extern atomic_t fscache_n_stores;
49922 -extern atomic_t fscache_n_stores_ok;
49923 -extern atomic_t fscache_n_stores_again;
49924 -extern atomic_t fscache_n_stores_nobufs;
49925 -extern atomic_t fscache_n_stores_oom;
49926 -extern atomic_t fscache_n_store_ops;
49927 -extern atomic_t fscache_n_store_calls;
49928 -extern atomic_t fscache_n_store_pages;
49929 -extern atomic_t fscache_n_store_radix_deletes;
49930 -extern atomic_t fscache_n_store_pages_over_limit;
49931 +extern atomic_unchecked_t fscache_n_stores;
49932 +extern atomic_unchecked_t fscache_n_stores_ok;
49933 +extern atomic_unchecked_t fscache_n_stores_again;
49934 +extern atomic_unchecked_t fscache_n_stores_nobufs;
49935 +extern atomic_unchecked_t fscache_n_stores_oom;
49936 +extern atomic_unchecked_t fscache_n_store_ops;
49937 +extern atomic_unchecked_t fscache_n_store_calls;
49938 +extern atomic_unchecked_t fscache_n_store_pages;
49939 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
49940 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
49941
49942 -extern atomic_t fscache_n_store_vmscan_not_storing;
49943 -extern atomic_t fscache_n_store_vmscan_gone;
49944 -extern atomic_t fscache_n_store_vmscan_busy;
49945 -extern atomic_t fscache_n_store_vmscan_cancelled;
49946 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
49947 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
49948 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
49949 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
49950
49951 -extern atomic_t fscache_n_marks;
49952 -extern atomic_t fscache_n_uncaches;
49953 +extern atomic_unchecked_t fscache_n_marks;
49954 +extern atomic_unchecked_t fscache_n_uncaches;
49955
49956 -extern atomic_t fscache_n_acquires;
49957 -extern atomic_t fscache_n_acquires_null;
49958 -extern atomic_t fscache_n_acquires_no_cache;
49959 -extern atomic_t fscache_n_acquires_ok;
49960 -extern atomic_t fscache_n_acquires_nobufs;
49961 -extern atomic_t fscache_n_acquires_oom;
49962 +extern atomic_unchecked_t fscache_n_acquires;
49963 +extern atomic_unchecked_t fscache_n_acquires_null;
49964 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
49965 +extern atomic_unchecked_t fscache_n_acquires_ok;
49966 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
49967 +extern atomic_unchecked_t fscache_n_acquires_oom;
49968
49969 -extern atomic_t fscache_n_updates;
49970 -extern atomic_t fscache_n_updates_null;
49971 -extern atomic_t fscache_n_updates_run;
49972 +extern atomic_unchecked_t fscache_n_updates;
49973 +extern atomic_unchecked_t fscache_n_updates_null;
49974 +extern atomic_unchecked_t fscache_n_updates_run;
49975
49976 -extern atomic_t fscache_n_relinquishes;
49977 -extern atomic_t fscache_n_relinquishes_null;
49978 -extern atomic_t fscache_n_relinquishes_waitcrt;
49979 -extern atomic_t fscache_n_relinquishes_retire;
49980 +extern atomic_unchecked_t fscache_n_relinquishes;
49981 +extern atomic_unchecked_t fscache_n_relinquishes_null;
49982 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
49983 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
49984
49985 -extern atomic_t fscache_n_cookie_index;
49986 -extern atomic_t fscache_n_cookie_data;
49987 -extern atomic_t fscache_n_cookie_special;
49988 +extern atomic_unchecked_t fscache_n_cookie_index;
49989 +extern atomic_unchecked_t fscache_n_cookie_data;
49990 +extern atomic_unchecked_t fscache_n_cookie_special;
49991
49992 -extern atomic_t fscache_n_object_alloc;
49993 -extern atomic_t fscache_n_object_no_alloc;
49994 -extern atomic_t fscache_n_object_lookups;
49995 -extern atomic_t fscache_n_object_lookups_negative;
49996 -extern atomic_t fscache_n_object_lookups_positive;
49997 -extern atomic_t fscache_n_object_lookups_timed_out;
49998 -extern atomic_t fscache_n_object_created;
49999 -extern atomic_t fscache_n_object_avail;
50000 -extern atomic_t fscache_n_object_dead;
50001 +extern atomic_unchecked_t fscache_n_object_alloc;
50002 +extern atomic_unchecked_t fscache_n_object_no_alloc;
50003 +extern atomic_unchecked_t fscache_n_object_lookups;
50004 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
50005 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
50006 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
50007 +extern atomic_unchecked_t fscache_n_object_created;
50008 +extern atomic_unchecked_t fscache_n_object_avail;
50009 +extern atomic_unchecked_t fscache_n_object_dead;
50010
50011 -extern atomic_t fscache_n_checkaux_none;
50012 -extern atomic_t fscache_n_checkaux_okay;
50013 -extern atomic_t fscache_n_checkaux_update;
50014 -extern atomic_t fscache_n_checkaux_obsolete;
50015 +extern atomic_unchecked_t fscache_n_checkaux_none;
50016 +extern atomic_unchecked_t fscache_n_checkaux_okay;
50017 +extern atomic_unchecked_t fscache_n_checkaux_update;
50018 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
50019
50020 extern atomic_t fscache_n_cop_alloc_object;
50021 extern atomic_t fscache_n_cop_lookup_object;
50022 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
50023 atomic_inc(stat);
50024 }
50025
50026 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
50027 +{
50028 + atomic_inc_unchecked(stat);
50029 +}
50030 +
50031 static inline void fscache_stat_d(atomic_t *stat)
50032 {
50033 atomic_dec(stat);
50034 @@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
50035
50036 #define __fscache_stat(stat) (NULL)
50037 #define fscache_stat(stat) do {} while (0)
50038 +#define fscache_stat_unchecked(stat) do {} while (0)
50039 #define fscache_stat_d(stat) do {} while (0)
50040 #endif
50041
50042 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
50043 index e513ac5..e888d34 100644
50044 --- a/fs/fscache/object.c
50045 +++ b/fs/fscache/object.c
50046 @@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50047 /* update the object metadata on disk */
50048 case FSCACHE_OBJECT_UPDATING:
50049 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
50050 - fscache_stat(&fscache_n_updates_run);
50051 + fscache_stat_unchecked(&fscache_n_updates_run);
50052 fscache_stat(&fscache_n_cop_update_object);
50053 object->cache->ops->update_object(object);
50054 fscache_stat_d(&fscache_n_cop_update_object);
50055 @@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50056 spin_lock(&object->lock);
50057 object->state = FSCACHE_OBJECT_DEAD;
50058 spin_unlock(&object->lock);
50059 - fscache_stat(&fscache_n_object_dead);
50060 + fscache_stat_unchecked(&fscache_n_object_dead);
50061 goto terminal_transit;
50062
50063 /* handle the parent cache of this object being withdrawn from
50064 @@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50065 spin_lock(&object->lock);
50066 object->state = FSCACHE_OBJECT_DEAD;
50067 spin_unlock(&object->lock);
50068 - fscache_stat(&fscache_n_object_dead);
50069 + fscache_stat_unchecked(&fscache_n_object_dead);
50070 goto terminal_transit;
50071
50072 /* complain about the object being woken up once it is
50073 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50074 parent->cookie->def->name, cookie->def->name,
50075 object->cache->tag->name);
50076
50077 - fscache_stat(&fscache_n_object_lookups);
50078 + fscache_stat_unchecked(&fscache_n_object_lookups);
50079 fscache_stat(&fscache_n_cop_lookup_object);
50080 ret = object->cache->ops->lookup_object(object);
50081 fscache_stat_d(&fscache_n_cop_lookup_object);
50082 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50083 if (ret == -ETIMEDOUT) {
50084 /* probably stuck behind another object, so move this one to
50085 * the back of the queue */
50086 - fscache_stat(&fscache_n_object_lookups_timed_out);
50087 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
50088 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50089 }
50090
50091 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
50092
50093 spin_lock(&object->lock);
50094 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50095 - fscache_stat(&fscache_n_object_lookups_negative);
50096 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
50097
50098 /* transit here to allow write requests to begin stacking up
50099 * and read requests to begin returning ENODATA */
50100 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
50101 * result, in which case there may be data available */
50102 spin_lock(&object->lock);
50103 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50104 - fscache_stat(&fscache_n_object_lookups_positive);
50105 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
50106
50107 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
50108
50109 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
50110 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50111 } else {
50112 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
50113 - fscache_stat(&fscache_n_object_created);
50114 + fscache_stat_unchecked(&fscache_n_object_created);
50115
50116 object->state = FSCACHE_OBJECT_AVAILABLE;
50117 spin_unlock(&object->lock);
50118 @@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
50119 fscache_enqueue_dependents(object);
50120
50121 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
50122 - fscache_stat(&fscache_n_object_avail);
50123 + fscache_stat_unchecked(&fscache_n_object_avail);
50124
50125 _leave("");
50126 }
50127 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50128 enum fscache_checkaux result;
50129
50130 if (!object->cookie->def->check_aux) {
50131 - fscache_stat(&fscache_n_checkaux_none);
50132 + fscache_stat_unchecked(&fscache_n_checkaux_none);
50133 return FSCACHE_CHECKAUX_OKAY;
50134 }
50135
50136 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50137 switch (result) {
50138 /* entry okay as is */
50139 case FSCACHE_CHECKAUX_OKAY:
50140 - fscache_stat(&fscache_n_checkaux_okay);
50141 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
50142 break;
50143
50144 /* entry requires update */
50145 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
50146 - fscache_stat(&fscache_n_checkaux_update);
50147 + fscache_stat_unchecked(&fscache_n_checkaux_update);
50148 break;
50149
50150 /* entry requires deletion */
50151 case FSCACHE_CHECKAUX_OBSOLETE:
50152 - fscache_stat(&fscache_n_checkaux_obsolete);
50153 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
50154 break;
50155
50156 default:
50157 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
50158 index 313e79a..775240f 100644
50159 --- a/fs/fscache/operation.c
50160 +++ b/fs/fscache/operation.c
50161 @@ -16,7 +16,7 @@
50162 #include <linux/seq_file.h>
50163 #include "internal.h"
50164
50165 -atomic_t fscache_op_debug_id;
50166 +atomic_unchecked_t fscache_op_debug_id;
50167 EXPORT_SYMBOL(fscache_op_debug_id);
50168
50169 /**
50170 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
50171 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
50172 ASSERTCMP(atomic_read(&op->usage), >, 0);
50173
50174 - fscache_stat(&fscache_n_op_enqueue);
50175 + fscache_stat_unchecked(&fscache_n_op_enqueue);
50176 switch (op->flags & FSCACHE_OP_TYPE) {
50177 case FSCACHE_OP_FAST:
50178 _debug("queue fast");
50179 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
50180 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
50181 if (op->processor)
50182 fscache_enqueue_operation(op);
50183 - fscache_stat(&fscache_n_op_run);
50184 + fscache_stat_unchecked(&fscache_n_op_run);
50185 }
50186
50187 /*
50188 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50189 if (object->n_ops > 0) {
50190 atomic_inc(&op->usage);
50191 list_add_tail(&op->pend_link, &object->pending_ops);
50192 - fscache_stat(&fscache_n_op_pend);
50193 + fscache_stat_unchecked(&fscache_n_op_pend);
50194 } else if (!list_empty(&object->pending_ops)) {
50195 atomic_inc(&op->usage);
50196 list_add_tail(&op->pend_link, &object->pending_ops);
50197 - fscache_stat(&fscache_n_op_pend);
50198 + fscache_stat_unchecked(&fscache_n_op_pend);
50199 fscache_start_operations(object);
50200 } else {
50201 ASSERTCMP(object->n_in_progress, ==, 0);
50202 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50203 object->n_exclusive++; /* reads and writes must wait */
50204 atomic_inc(&op->usage);
50205 list_add_tail(&op->pend_link, &object->pending_ops);
50206 - fscache_stat(&fscache_n_op_pend);
50207 + fscache_stat_unchecked(&fscache_n_op_pend);
50208 ret = 0;
50209 } else {
50210 /* not allowed to submit ops in any other state */
50211 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
50212 if (object->n_exclusive > 0) {
50213 atomic_inc(&op->usage);
50214 list_add_tail(&op->pend_link, &object->pending_ops);
50215 - fscache_stat(&fscache_n_op_pend);
50216 + fscache_stat_unchecked(&fscache_n_op_pend);
50217 } else if (!list_empty(&object->pending_ops)) {
50218 atomic_inc(&op->usage);
50219 list_add_tail(&op->pend_link, &object->pending_ops);
50220 - fscache_stat(&fscache_n_op_pend);
50221 + fscache_stat_unchecked(&fscache_n_op_pend);
50222 fscache_start_operations(object);
50223 } else {
50224 ASSERTCMP(object->n_exclusive, ==, 0);
50225 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
50226 object->n_ops++;
50227 atomic_inc(&op->usage);
50228 list_add_tail(&op->pend_link, &object->pending_ops);
50229 - fscache_stat(&fscache_n_op_pend);
50230 + fscache_stat_unchecked(&fscache_n_op_pend);
50231 ret = 0;
50232 } else if (object->state == FSCACHE_OBJECT_DYING ||
50233 object->state == FSCACHE_OBJECT_LC_DYING ||
50234 object->state == FSCACHE_OBJECT_WITHDRAWING) {
50235 - fscache_stat(&fscache_n_op_rejected);
50236 + fscache_stat_unchecked(&fscache_n_op_rejected);
50237 ret = -ENOBUFS;
50238 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
50239 fscache_report_unexpected_submission(object, op, ostate);
50240 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
50241
50242 ret = -EBUSY;
50243 if (!list_empty(&op->pend_link)) {
50244 - fscache_stat(&fscache_n_op_cancelled);
50245 + fscache_stat_unchecked(&fscache_n_op_cancelled);
50246 list_del_init(&op->pend_link);
50247 object->n_ops--;
50248 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
50249 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
50250 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
50251 BUG();
50252
50253 - fscache_stat(&fscache_n_op_release);
50254 + fscache_stat_unchecked(&fscache_n_op_release);
50255
50256 if (op->release) {
50257 op->release(op);
50258 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50259 * lock, and defer it otherwise */
50260 if (!spin_trylock(&object->lock)) {
50261 _debug("defer put");
50262 - fscache_stat(&fscache_n_op_deferred_release);
50263 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
50264
50265 cache = object->cache;
50266 spin_lock(&cache->op_gc_list_lock);
50267 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50268
50269 _debug("GC DEFERRED REL OBJ%x OP%x",
50270 object->debug_id, op->debug_id);
50271 - fscache_stat(&fscache_n_op_gc);
50272 + fscache_stat_unchecked(&fscache_n_op_gc);
50273
50274 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50275
50276 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50277 index c598ea4..6aac13e 100644
50278 --- a/fs/fscache/page.c
50279 +++ b/fs/fscache/page.c
50280 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50281 val = radix_tree_lookup(&cookie->stores, page->index);
50282 if (!val) {
50283 rcu_read_unlock();
50284 - fscache_stat(&fscache_n_store_vmscan_not_storing);
50285 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50286 __fscache_uncache_page(cookie, page);
50287 return true;
50288 }
50289 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50290 spin_unlock(&cookie->stores_lock);
50291
50292 if (xpage) {
50293 - fscache_stat(&fscache_n_store_vmscan_cancelled);
50294 - fscache_stat(&fscache_n_store_radix_deletes);
50295 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50296 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50297 ASSERTCMP(xpage, ==, page);
50298 } else {
50299 - fscache_stat(&fscache_n_store_vmscan_gone);
50300 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50301 }
50302
50303 wake_up_bit(&cookie->flags, 0);
50304 @@ -106,7 +106,7 @@ page_busy:
50305 /* we might want to wait here, but that could deadlock the allocator as
50306 * the slow-work threads writing to the cache may all end up sleeping
50307 * on memory allocation */
50308 - fscache_stat(&fscache_n_store_vmscan_busy);
50309 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50310 return false;
50311 }
50312 EXPORT_SYMBOL(__fscache_maybe_release_page);
50313 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50314 FSCACHE_COOKIE_STORING_TAG);
50315 if (!radix_tree_tag_get(&cookie->stores, page->index,
50316 FSCACHE_COOKIE_PENDING_TAG)) {
50317 - fscache_stat(&fscache_n_store_radix_deletes);
50318 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50319 xpage = radix_tree_delete(&cookie->stores, page->index);
50320 }
50321 spin_unlock(&cookie->stores_lock);
50322 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50323
50324 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50325
50326 - fscache_stat(&fscache_n_attr_changed_calls);
50327 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50328
50329 if (fscache_object_is_active(object)) {
50330 fscache_set_op_state(op, "CallFS");
50331 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50332
50333 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50334
50335 - fscache_stat(&fscache_n_attr_changed);
50336 + fscache_stat_unchecked(&fscache_n_attr_changed);
50337
50338 op = kzalloc(sizeof(*op), GFP_KERNEL);
50339 if (!op) {
50340 - fscache_stat(&fscache_n_attr_changed_nomem);
50341 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50342 _leave(" = -ENOMEM");
50343 return -ENOMEM;
50344 }
50345 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50346 if (fscache_submit_exclusive_op(object, op) < 0)
50347 goto nobufs;
50348 spin_unlock(&cookie->lock);
50349 - fscache_stat(&fscache_n_attr_changed_ok);
50350 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50351 fscache_put_operation(op);
50352 _leave(" = 0");
50353 return 0;
50354 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50355 nobufs:
50356 spin_unlock(&cookie->lock);
50357 kfree(op);
50358 - fscache_stat(&fscache_n_attr_changed_nobufs);
50359 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50360 _leave(" = %d", -ENOBUFS);
50361 return -ENOBUFS;
50362 }
50363 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50364 /* allocate a retrieval operation and attempt to submit it */
50365 op = kzalloc(sizeof(*op), GFP_NOIO);
50366 if (!op) {
50367 - fscache_stat(&fscache_n_retrievals_nomem);
50368 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50369 return NULL;
50370 }
50371
50372 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50373 return 0;
50374 }
50375
50376 - fscache_stat(&fscache_n_retrievals_wait);
50377 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
50378
50379 jif = jiffies;
50380 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50381 fscache_wait_bit_interruptible,
50382 TASK_INTERRUPTIBLE) != 0) {
50383 - fscache_stat(&fscache_n_retrievals_intr);
50384 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50385 _leave(" = -ERESTARTSYS");
50386 return -ERESTARTSYS;
50387 }
50388 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50389 */
50390 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50391 struct fscache_retrieval *op,
50392 - atomic_t *stat_op_waits,
50393 - atomic_t *stat_object_dead)
50394 + atomic_unchecked_t *stat_op_waits,
50395 + atomic_unchecked_t *stat_object_dead)
50396 {
50397 int ret;
50398
50399 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50400 goto check_if_dead;
50401
50402 _debug(">>> WT");
50403 - fscache_stat(stat_op_waits);
50404 + fscache_stat_unchecked(stat_op_waits);
50405 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50406 fscache_wait_bit_interruptible,
50407 TASK_INTERRUPTIBLE) < 0) {
50408 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50409
50410 check_if_dead:
50411 if (unlikely(fscache_object_is_dead(object))) {
50412 - fscache_stat(stat_object_dead);
50413 + fscache_stat_unchecked(stat_object_dead);
50414 return -ENOBUFS;
50415 }
50416 return 0;
50417 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50418
50419 _enter("%p,%p,,,", cookie, page);
50420
50421 - fscache_stat(&fscache_n_retrievals);
50422 + fscache_stat_unchecked(&fscache_n_retrievals);
50423
50424 if (hlist_empty(&cookie->backing_objects))
50425 goto nobufs;
50426 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50427 goto nobufs_unlock;
50428 spin_unlock(&cookie->lock);
50429
50430 - fscache_stat(&fscache_n_retrieval_ops);
50431 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50432
50433 /* pin the netfs read context in case we need to do the actual netfs
50434 * read because we've encountered a cache read failure */
50435 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50436
50437 error:
50438 if (ret == -ENOMEM)
50439 - fscache_stat(&fscache_n_retrievals_nomem);
50440 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50441 else if (ret == -ERESTARTSYS)
50442 - fscache_stat(&fscache_n_retrievals_intr);
50443 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50444 else if (ret == -ENODATA)
50445 - fscache_stat(&fscache_n_retrievals_nodata);
50446 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50447 else if (ret < 0)
50448 - fscache_stat(&fscache_n_retrievals_nobufs);
50449 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50450 else
50451 - fscache_stat(&fscache_n_retrievals_ok);
50452 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50453
50454 fscache_put_retrieval(op);
50455 _leave(" = %d", ret);
50456 @@ -453,7 +453,7 @@ nobufs_unlock:
50457 spin_unlock(&cookie->lock);
50458 kfree(op);
50459 nobufs:
50460 - fscache_stat(&fscache_n_retrievals_nobufs);
50461 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50462 _leave(" = -ENOBUFS");
50463 return -ENOBUFS;
50464 }
50465 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50466
50467 _enter("%p,,%d,,,", cookie, *nr_pages);
50468
50469 - fscache_stat(&fscache_n_retrievals);
50470 + fscache_stat_unchecked(&fscache_n_retrievals);
50471
50472 if (hlist_empty(&cookie->backing_objects))
50473 goto nobufs;
50474 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50475 goto nobufs_unlock;
50476 spin_unlock(&cookie->lock);
50477
50478 - fscache_stat(&fscache_n_retrieval_ops);
50479 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50480
50481 /* pin the netfs read context in case we need to do the actual netfs
50482 * read because we've encountered a cache read failure */
50483 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50484
50485 error:
50486 if (ret == -ENOMEM)
50487 - fscache_stat(&fscache_n_retrievals_nomem);
50488 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50489 else if (ret == -ERESTARTSYS)
50490 - fscache_stat(&fscache_n_retrievals_intr);
50491 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50492 else if (ret == -ENODATA)
50493 - fscache_stat(&fscache_n_retrievals_nodata);
50494 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50495 else if (ret < 0)
50496 - fscache_stat(&fscache_n_retrievals_nobufs);
50497 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50498 else
50499 - fscache_stat(&fscache_n_retrievals_ok);
50500 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50501
50502 fscache_put_retrieval(op);
50503 _leave(" = %d", ret);
50504 @@ -570,7 +570,7 @@ nobufs_unlock:
50505 spin_unlock(&cookie->lock);
50506 kfree(op);
50507 nobufs:
50508 - fscache_stat(&fscache_n_retrievals_nobufs);
50509 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50510 _leave(" = -ENOBUFS");
50511 return -ENOBUFS;
50512 }
50513 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50514
50515 _enter("%p,%p,,,", cookie, page);
50516
50517 - fscache_stat(&fscache_n_allocs);
50518 + fscache_stat_unchecked(&fscache_n_allocs);
50519
50520 if (hlist_empty(&cookie->backing_objects))
50521 goto nobufs;
50522 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50523 goto nobufs_unlock;
50524 spin_unlock(&cookie->lock);
50525
50526 - fscache_stat(&fscache_n_alloc_ops);
50527 + fscache_stat_unchecked(&fscache_n_alloc_ops);
50528
50529 ret = fscache_wait_for_retrieval_activation(
50530 object, op,
50531 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50532
50533 error:
50534 if (ret == -ERESTARTSYS)
50535 - fscache_stat(&fscache_n_allocs_intr);
50536 + fscache_stat_unchecked(&fscache_n_allocs_intr);
50537 else if (ret < 0)
50538 - fscache_stat(&fscache_n_allocs_nobufs);
50539 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50540 else
50541 - fscache_stat(&fscache_n_allocs_ok);
50542 + fscache_stat_unchecked(&fscache_n_allocs_ok);
50543
50544 fscache_put_retrieval(op);
50545 _leave(" = %d", ret);
50546 @@ -651,7 +651,7 @@ nobufs_unlock:
50547 spin_unlock(&cookie->lock);
50548 kfree(op);
50549 nobufs:
50550 - fscache_stat(&fscache_n_allocs_nobufs);
50551 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50552 _leave(" = -ENOBUFS");
50553 return -ENOBUFS;
50554 }
50555 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50556
50557 spin_lock(&cookie->stores_lock);
50558
50559 - fscache_stat(&fscache_n_store_calls);
50560 + fscache_stat_unchecked(&fscache_n_store_calls);
50561
50562 /* find a page to store */
50563 page = NULL;
50564 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50565 page = results[0];
50566 _debug("gang %d [%lx]", n, page->index);
50567 if (page->index > op->store_limit) {
50568 - fscache_stat(&fscache_n_store_pages_over_limit);
50569 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50570 goto superseded;
50571 }
50572
50573 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50574
50575 if (page) {
50576 fscache_set_op_state(&op->op, "Store");
50577 - fscache_stat(&fscache_n_store_pages);
50578 + fscache_stat_unchecked(&fscache_n_store_pages);
50579 fscache_stat(&fscache_n_cop_write_page);
50580 ret = object->cache->ops->write_page(op, page);
50581 fscache_stat_d(&fscache_n_cop_write_page);
50582 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50583 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50584 ASSERT(PageFsCache(page));
50585
50586 - fscache_stat(&fscache_n_stores);
50587 + fscache_stat_unchecked(&fscache_n_stores);
50588
50589 op = kzalloc(sizeof(*op), GFP_NOIO);
50590 if (!op)
50591 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50592 spin_unlock(&cookie->stores_lock);
50593 spin_unlock(&object->lock);
50594
50595 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50596 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50597 op->store_limit = object->store_limit;
50598
50599 if (fscache_submit_op(object, &op->op) < 0)
50600 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50601
50602 spin_unlock(&cookie->lock);
50603 radix_tree_preload_end();
50604 - fscache_stat(&fscache_n_store_ops);
50605 - fscache_stat(&fscache_n_stores_ok);
50606 + fscache_stat_unchecked(&fscache_n_store_ops);
50607 + fscache_stat_unchecked(&fscache_n_stores_ok);
50608
50609 /* the slow work queue now carries its own ref on the object */
50610 fscache_put_operation(&op->op);
50611 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50612 return 0;
50613
50614 already_queued:
50615 - fscache_stat(&fscache_n_stores_again);
50616 + fscache_stat_unchecked(&fscache_n_stores_again);
50617 already_pending:
50618 spin_unlock(&cookie->stores_lock);
50619 spin_unlock(&object->lock);
50620 spin_unlock(&cookie->lock);
50621 radix_tree_preload_end();
50622 kfree(op);
50623 - fscache_stat(&fscache_n_stores_ok);
50624 + fscache_stat_unchecked(&fscache_n_stores_ok);
50625 _leave(" = 0");
50626 return 0;
50627
50628 @@ -886,14 +886,14 @@ nobufs:
50629 spin_unlock(&cookie->lock);
50630 radix_tree_preload_end();
50631 kfree(op);
50632 - fscache_stat(&fscache_n_stores_nobufs);
50633 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
50634 _leave(" = -ENOBUFS");
50635 return -ENOBUFS;
50636
50637 nomem_free:
50638 kfree(op);
50639 nomem:
50640 - fscache_stat(&fscache_n_stores_oom);
50641 + fscache_stat_unchecked(&fscache_n_stores_oom);
50642 _leave(" = -ENOMEM");
50643 return -ENOMEM;
50644 }
50645 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50646 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50647 ASSERTCMP(page, !=, NULL);
50648
50649 - fscache_stat(&fscache_n_uncaches);
50650 + fscache_stat_unchecked(&fscache_n_uncaches);
50651
50652 /* cache withdrawal may beat us to it */
50653 if (!PageFsCache(page))
50654 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50655 unsigned long loop;
50656
50657 #ifdef CONFIG_FSCACHE_STATS
50658 - atomic_add(pagevec->nr, &fscache_n_marks);
50659 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50660 #endif
50661
50662 for (loop = 0; loop < pagevec->nr; loop++) {
50663 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50664 index 46435f3..8cddf18 100644
50665 --- a/fs/fscache/stats.c
50666 +++ b/fs/fscache/stats.c
50667 @@ -18,95 +18,95 @@
50668 /*
50669 * operation counters
50670 */
50671 -atomic_t fscache_n_op_pend;
50672 -atomic_t fscache_n_op_run;
50673 -atomic_t fscache_n_op_enqueue;
50674 -atomic_t fscache_n_op_requeue;
50675 -atomic_t fscache_n_op_deferred_release;
50676 -atomic_t fscache_n_op_release;
50677 -atomic_t fscache_n_op_gc;
50678 -atomic_t fscache_n_op_cancelled;
50679 -atomic_t fscache_n_op_rejected;
50680 +atomic_unchecked_t fscache_n_op_pend;
50681 +atomic_unchecked_t fscache_n_op_run;
50682 +atomic_unchecked_t fscache_n_op_enqueue;
50683 +atomic_unchecked_t fscache_n_op_requeue;
50684 +atomic_unchecked_t fscache_n_op_deferred_release;
50685 +atomic_unchecked_t fscache_n_op_release;
50686 +atomic_unchecked_t fscache_n_op_gc;
50687 +atomic_unchecked_t fscache_n_op_cancelled;
50688 +atomic_unchecked_t fscache_n_op_rejected;
50689
50690 -atomic_t fscache_n_attr_changed;
50691 -atomic_t fscache_n_attr_changed_ok;
50692 -atomic_t fscache_n_attr_changed_nobufs;
50693 -atomic_t fscache_n_attr_changed_nomem;
50694 -atomic_t fscache_n_attr_changed_calls;
50695 +atomic_unchecked_t fscache_n_attr_changed;
50696 +atomic_unchecked_t fscache_n_attr_changed_ok;
50697 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
50698 +atomic_unchecked_t fscache_n_attr_changed_nomem;
50699 +atomic_unchecked_t fscache_n_attr_changed_calls;
50700
50701 -atomic_t fscache_n_allocs;
50702 -atomic_t fscache_n_allocs_ok;
50703 -atomic_t fscache_n_allocs_wait;
50704 -atomic_t fscache_n_allocs_nobufs;
50705 -atomic_t fscache_n_allocs_intr;
50706 -atomic_t fscache_n_allocs_object_dead;
50707 -atomic_t fscache_n_alloc_ops;
50708 -atomic_t fscache_n_alloc_op_waits;
50709 +atomic_unchecked_t fscache_n_allocs;
50710 +atomic_unchecked_t fscache_n_allocs_ok;
50711 +atomic_unchecked_t fscache_n_allocs_wait;
50712 +atomic_unchecked_t fscache_n_allocs_nobufs;
50713 +atomic_unchecked_t fscache_n_allocs_intr;
50714 +atomic_unchecked_t fscache_n_allocs_object_dead;
50715 +atomic_unchecked_t fscache_n_alloc_ops;
50716 +atomic_unchecked_t fscache_n_alloc_op_waits;
50717
50718 -atomic_t fscache_n_retrievals;
50719 -atomic_t fscache_n_retrievals_ok;
50720 -atomic_t fscache_n_retrievals_wait;
50721 -atomic_t fscache_n_retrievals_nodata;
50722 -atomic_t fscache_n_retrievals_nobufs;
50723 -atomic_t fscache_n_retrievals_intr;
50724 -atomic_t fscache_n_retrievals_nomem;
50725 -atomic_t fscache_n_retrievals_object_dead;
50726 -atomic_t fscache_n_retrieval_ops;
50727 -atomic_t fscache_n_retrieval_op_waits;
50728 +atomic_unchecked_t fscache_n_retrievals;
50729 +atomic_unchecked_t fscache_n_retrievals_ok;
50730 +atomic_unchecked_t fscache_n_retrievals_wait;
50731 +atomic_unchecked_t fscache_n_retrievals_nodata;
50732 +atomic_unchecked_t fscache_n_retrievals_nobufs;
50733 +atomic_unchecked_t fscache_n_retrievals_intr;
50734 +atomic_unchecked_t fscache_n_retrievals_nomem;
50735 +atomic_unchecked_t fscache_n_retrievals_object_dead;
50736 +atomic_unchecked_t fscache_n_retrieval_ops;
50737 +atomic_unchecked_t fscache_n_retrieval_op_waits;
50738
50739 -atomic_t fscache_n_stores;
50740 -atomic_t fscache_n_stores_ok;
50741 -atomic_t fscache_n_stores_again;
50742 -atomic_t fscache_n_stores_nobufs;
50743 -atomic_t fscache_n_stores_oom;
50744 -atomic_t fscache_n_store_ops;
50745 -atomic_t fscache_n_store_calls;
50746 -atomic_t fscache_n_store_pages;
50747 -atomic_t fscache_n_store_radix_deletes;
50748 -atomic_t fscache_n_store_pages_over_limit;
50749 +atomic_unchecked_t fscache_n_stores;
50750 +atomic_unchecked_t fscache_n_stores_ok;
50751 +atomic_unchecked_t fscache_n_stores_again;
50752 +atomic_unchecked_t fscache_n_stores_nobufs;
50753 +atomic_unchecked_t fscache_n_stores_oom;
50754 +atomic_unchecked_t fscache_n_store_ops;
50755 +atomic_unchecked_t fscache_n_store_calls;
50756 +atomic_unchecked_t fscache_n_store_pages;
50757 +atomic_unchecked_t fscache_n_store_radix_deletes;
50758 +atomic_unchecked_t fscache_n_store_pages_over_limit;
50759
50760 -atomic_t fscache_n_store_vmscan_not_storing;
50761 -atomic_t fscache_n_store_vmscan_gone;
50762 -atomic_t fscache_n_store_vmscan_busy;
50763 -atomic_t fscache_n_store_vmscan_cancelled;
50764 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50765 +atomic_unchecked_t fscache_n_store_vmscan_gone;
50766 +atomic_unchecked_t fscache_n_store_vmscan_busy;
50767 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50768
50769 -atomic_t fscache_n_marks;
50770 -atomic_t fscache_n_uncaches;
50771 +atomic_unchecked_t fscache_n_marks;
50772 +atomic_unchecked_t fscache_n_uncaches;
50773
50774 -atomic_t fscache_n_acquires;
50775 -atomic_t fscache_n_acquires_null;
50776 -atomic_t fscache_n_acquires_no_cache;
50777 -atomic_t fscache_n_acquires_ok;
50778 -atomic_t fscache_n_acquires_nobufs;
50779 -atomic_t fscache_n_acquires_oom;
50780 +atomic_unchecked_t fscache_n_acquires;
50781 +atomic_unchecked_t fscache_n_acquires_null;
50782 +atomic_unchecked_t fscache_n_acquires_no_cache;
50783 +atomic_unchecked_t fscache_n_acquires_ok;
50784 +atomic_unchecked_t fscache_n_acquires_nobufs;
50785 +atomic_unchecked_t fscache_n_acquires_oom;
50786
50787 -atomic_t fscache_n_updates;
50788 -atomic_t fscache_n_updates_null;
50789 -atomic_t fscache_n_updates_run;
50790 +atomic_unchecked_t fscache_n_updates;
50791 +atomic_unchecked_t fscache_n_updates_null;
50792 +atomic_unchecked_t fscache_n_updates_run;
50793
50794 -atomic_t fscache_n_relinquishes;
50795 -atomic_t fscache_n_relinquishes_null;
50796 -atomic_t fscache_n_relinquishes_waitcrt;
50797 -atomic_t fscache_n_relinquishes_retire;
50798 +atomic_unchecked_t fscache_n_relinquishes;
50799 +atomic_unchecked_t fscache_n_relinquishes_null;
50800 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50801 +atomic_unchecked_t fscache_n_relinquishes_retire;
50802
50803 -atomic_t fscache_n_cookie_index;
50804 -atomic_t fscache_n_cookie_data;
50805 -atomic_t fscache_n_cookie_special;
50806 +atomic_unchecked_t fscache_n_cookie_index;
50807 +atomic_unchecked_t fscache_n_cookie_data;
50808 +atomic_unchecked_t fscache_n_cookie_special;
50809
50810 -atomic_t fscache_n_object_alloc;
50811 -atomic_t fscache_n_object_no_alloc;
50812 -atomic_t fscache_n_object_lookups;
50813 -atomic_t fscache_n_object_lookups_negative;
50814 -atomic_t fscache_n_object_lookups_positive;
50815 -atomic_t fscache_n_object_lookups_timed_out;
50816 -atomic_t fscache_n_object_created;
50817 -atomic_t fscache_n_object_avail;
50818 -atomic_t fscache_n_object_dead;
50819 +atomic_unchecked_t fscache_n_object_alloc;
50820 +atomic_unchecked_t fscache_n_object_no_alloc;
50821 +atomic_unchecked_t fscache_n_object_lookups;
50822 +atomic_unchecked_t fscache_n_object_lookups_negative;
50823 +atomic_unchecked_t fscache_n_object_lookups_positive;
50824 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
50825 +atomic_unchecked_t fscache_n_object_created;
50826 +atomic_unchecked_t fscache_n_object_avail;
50827 +atomic_unchecked_t fscache_n_object_dead;
50828
50829 -atomic_t fscache_n_checkaux_none;
50830 -atomic_t fscache_n_checkaux_okay;
50831 -atomic_t fscache_n_checkaux_update;
50832 -atomic_t fscache_n_checkaux_obsolete;
50833 +atomic_unchecked_t fscache_n_checkaux_none;
50834 +atomic_unchecked_t fscache_n_checkaux_okay;
50835 +atomic_unchecked_t fscache_n_checkaux_update;
50836 +atomic_unchecked_t fscache_n_checkaux_obsolete;
50837
50838 atomic_t fscache_n_cop_alloc_object;
50839 atomic_t fscache_n_cop_lookup_object;
50840 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50841 seq_puts(m, "FS-Cache statistics\n");
50842
50843 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50844 - atomic_read(&fscache_n_cookie_index),
50845 - atomic_read(&fscache_n_cookie_data),
50846 - atomic_read(&fscache_n_cookie_special));
50847 + atomic_read_unchecked(&fscache_n_cookie_index),
50848 + atomic_read_unchecked(&fscache_n_cookie_data),
50849 + atomic_read_unchecked(&fscache_n_cookie_special));
50850
50851 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50852 - atomic_read(&fscache_n_object_alloc),
50853 - atomic_read(&fscache_n_object_no_alloc),
50854 - atomic_read(&fscache_n_object_avail),
50855 - atomic_read(&fscache_n_object_dead));
50856 + atomic_read_unchecked(&fscache_n_object_alloc),
50857 + atomic_read_unchecked(&fscache_n_object_no_alloc),
50858 + atomic_read_unchecked(&fscache_n_object_avail),
50859 + atomic_read_unchecked(&fscache_n_object_dead));
50860 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50861 - atomic_read(&fscache_n_checkaux_none),
50862 - atomic_read(&fscache_n_checkaux_okay),
50863 - atomic_read(&fscache_n_checkaux_update),
50864 - atomic_read(&fscache_n_checkaux_obsolete));
50865 + atomic_read_unchecked(&fscache_n_checkaux_none),
50866 + atomic_read_unchecked(&fscache_n_checkaux_okay),
50867 + atomic_read_unchecked(&fscache_n_checkaux_update),
50868 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50869
50870 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50871 - atomic_read(&fscache_n_marks),
50872 - atomic_read(&fscache_n_uncaches));
50873 + atomic_read_unchecked(&fscache_n_marks),
50874 + atomic_read_unchecked(&fscache_n_uncaches));
50875
50876 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50877 " oom=%u\n",
50878 - atomic_read(&fscache_n_acquires),
50879 - atomic_read(&fscache_n_acquires_null),
50880 - atomic_read(&fscache_n_acquires_no_cache),
50881 - atomic_read(&fscache_n_acquires_ok),
50882 - atomic_read(&fscache_n_acquires_nobufs),
50883 - atomic_read(&fscache_n_acquires_oom));
50884 + atomic_read_unchecked(&fscache_n_acquires),
50885 + atomic_read_unchecked(&fscache_n_acquires_null),
50886 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
50887 + atomic_read_unchecked(&fscache_n_acquires_ok),
50888 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
50889 + atomic_read_unchecked(&fscache_n_acquires_oom));
50890
50891 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50892 - atomic_read(&fscache_n_object_lookups),
50893 - atomic_read(&fscache_n_object_lookups_negative),
50894 - atomic_read(&fscache_n_object_lookups_positive),
50895 - atomic_read(&fscache_n_object_lookups_timed_out),
50896 - atomic_read(&fscache_n_object_created));
50897 + atomic_read_unchecked(&fscache_n_object_lookups),
50898 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
50899 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
50900 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50901 + atomic_read_unchecked(&fscache_n_object_created));
50902
50903 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
50904 - atomic_read(&fscache_n_updates),
50905 - atomic_read(&fscache_n_updates_null),
50906 - atomic_read(&fscache_n_updates_run));
50907 + atomic_read_unchecked(&fscache_n_updates),
50908 + atomic_read_unchecked(&fscache_n_updates_null),
50909 + atomic_read_unchecked(&fscache_n_updates_run));
50910
50911 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
50912 - atomic_read(&fscache_n_relinquishes),
50913 - atomic_read(&fscache_n_relinquishes_null),
50914 - atomic_read(&fscache_n_relinquishes_waitcrt),
50915 - atomic_read(&fscache_n_relinquishes_retire));
50916 + atomic_read_unchecked(&fscache_n_relinquishes),
50917 + atomic_read_unchecked(&fscache_n_relinquishes_null),
50918 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
50919 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
50920
50921 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
50922 - atomic_read(&fscache_n_attr_changed),
50923 - atomic_read(&fscache_n_attr_changed_ok),
50924 - atomic_read(&fscache_n_attr_changed_nobufs),
50925 - atomic_read(&fscache_n_attr_changed_nomem),
50926 - atomic_read(&fscache_n_attr_changed_calls));
50927 + atomic_read_unchecked(&fscache_n_attr_changed),
50928 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
50929 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
50930 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
50931 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
50932
50933 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
50934 - atomic_read(&fscache_n_allocs),
50935 - atomic_read(&fscache_n_allocs_ok),
50936 - atomic_read(&fscache_n_allocs_wait),
50937 - atomic_read(&fscache_n_allocs_nobufs),
50938 - atomic_read(&fscache_n_allocs_intr));
50939 + atomic_read_unchecked(&fscache_n_allocs),
50940 + atomic_read_unchecked(&fscache_n_allocs_ok),
50941 + atomic_read_unchecked(&fscache_n_allocs_wait),
50942 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
50943 + atomic_read_unchecked(&fscache_n_allocs_intr));
50944 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
50945 - atomic_read(&fscache_n_alloc_ops),
50946 - atomic_read(&fscache_n_alloc_op_waits),
50947 - atomic_read(&fscache_n_allocs_object_dead));
50948 + atomic_read_unchecked(&fscache_n_alloc_ops),
50949 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
50950 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
50951
50952 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
50953 " int=%u oom=%u\n",
50954 - atomic_read(&fscache_n_retrievals),
50955 - atomic_read(&fscache_n_retrievals_ok),
50956 - atomic_read(&fscache_n_retrievals_wait),
50957 - atomic_read(&fscache_n_retrievals_nodata),
50958 - atomic_read(&fscache_n_retrievals_nobufs),
50959 - atomic_read(&fscache_n_retrievals_intr),
50960 - atomic_read(&fscache_n_retrievals_nomem));
50961 + atomic_read_unchecked(&fscache_n_retrievals),
50962 + atomic_read_unchecked(&fscache_n_retrievals_ok),
50963 + atomic_read_unchecked(&fscache_n_retrievals_wait),
50964 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
50965 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
50966 + atomic_read_unchecked(&fscache_n_retrievals_intr),
50967 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
50968 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
50969 - atomic_read(&fscache_n_retrieval_ops),
50970 - atomic_read(&fscache_n_retrieval_op_waits),
50971 - atomic_read(&fscache_n_retrievals_object_dead));
50972 + atomic_read_unchecked(&fscache_n_retrieval_ops),
50973 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
50974 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
50975
50976 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
50977 - atomic_read(&fscache_n_stores),
50978 - atomic_read(&fscache_n_stores_ok),
50979 - atomic_read(&fscache_n_stores_again),
50980 - atomic_read(&fscache_n_stores_nobufs),
50981 - atomic_read(&fscache_n_stores_oom));
50982 + atomic_read_unchecked(&fscache_n_stores),
50983 + atomic_read_unchecked(&fscache_n_stores_ok),
50984 + atomic_read_unchecked(&fscache_n_stores_again),
50985 + atomic_read_unchecked(&fscache_n_stores_nobufs),
50986 + atomic_read_unchecked(&fscache_n_stores_oom));
50987 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
50988 - atomic_read(&fscache_n_store_ops),
50989 - atomic_read(&fscache_n_store_calls),
50990 - atomic_read(&fscache_n_store_pages),
50991 - atomic_read(&fscache_n_store_radix_deletes),
50992 - atomic_read(&fscache_n_store_pages_over_limit));
50993 + atomic_read_unchecked(&fscache_n_store_ops),
50994 + atomic_read_unchecked(&fscache_n_store_calls),
50995 + atomic_read_unchecked(&fscache_n_store_pages),
50996 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
50997 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
50998
50999 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
51000 - atomic_read(&fscache_n_store_vmscan_not_storing),
51001 - atomic_read(&fscache_n_store_vmscan_gone),
51002 - atomic_read(&fscache_n_store_vmscan_busy),
51003 - atomic_read(&fscache_n_store_vmscan_cancelled));
51004 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
51005 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
51006 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
51007 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
51008
51009 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
51010 - atomic_read(&fscache_n_op_pend),
51011 - atomic_read(&fscache_n_op_run),
51012 - atomic_read(&fscache_n_op_enqueue),
51013 - atomic_read(&fscache_n_op_cancelled),
51014 - atomic_read(&fscache_n_op_rejected));
51015 + atomic_read_unchecked(&fscache_n_op_pend),
51016 + atomic_read_unchecked(&fscache_n_op_run),
51017 + atomic_read_unchecked(&fscache_n_op_enqueue),
51018 + atomic_read_unchecked(&fscache_n_op_cancelled),
51019 + atomic_read_unchecked(&fscache_n_op_rejected));
51020 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
51021 - atomic_read(&fscache_n_op_deferred_release),
51022 - atomic_read(&fscache_n_op_release),
51023 - atomic_read(&fscache_n_op_gc));
51024 + atomic_read_unchecked(&fscache_n_op_deferred_release),
51025 + atomic_read_unchecked(&fscache_n_op_release),
51026 + atomic_read_unchecked(&fscache_n_op_gc));
51027
51028 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
51029 atomic_read(&fscache_n_cop_alloc_object),
51030 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
51031 index de792dc..448b532 100644
51032 --- a/fs/fuse/cuse.c
51033 +++ b/fs/fuse/cuse.c
51034 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
51035 INIT_LIST_HEAD(&cuse_conntbl[i]);
51036
51037 /* inherit and extend fuse_dev_operations */
51038 - cuse_channel_fops = fuse_dev_operations;
51039 - cuse_channel_fops.owner = THIS_MODULE;
51040 - cuse_channel_fops.open = cuse_channel_open;
51041 - cuse_channel_fops.release = cuse_channel_release;
51042 + pax_open_kernel();
51043 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
51044 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
51045 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
51046 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
51047 + pax_close_kernel();
51048
51049 cuse_class = class_create(THIS_MODULE, "cuse");
51050 if (IS_ERR(cuse_class))
51051 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
51052 index 1facb39..7f48557 100644
51053 --- a/fs/fuse/dev.c
51054 +++ b/fs/fuse/dev.c
51055 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51056 {
51057 struct fuse_notify_inval_entry_out outarg;
51058 int err = -EINVAL;
51059 - char buf[FUSE_NAME_MAX+1];
51060 + char *buf = NULL;
51061 struct qstr name;
51062
51063 if (size < sizeof(outarg))
51064 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51065 if (outarg.namelen > FUSE_NAME_MAX)
51066 goto err;
51067
51068 + err = -ENOMEM;
51069 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
51070 + if (!buf)
51071 + goto err;
51072 +
51073 err = -EINVAL;
51074 if (size != sizeof(outarg) + outarg.namelen + 1)
51075 goto err;
51076 @@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51077
51078 down_read(&fc->killsb);
51079 err = -ENOENT;
51080 - if (!fc->sb)
51081 - goto err_unlock;
51082 -
51083 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51084 -
51085 -err_unlock:
51086 + if (fc->sb)
51087 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51088 up_read(&fc->killsb);
51089 + kfree(buf);
51090 return err;
51091
51092 err:
51093 fuse_copy_finish(cs);
51094 + kfree(buf);
51095 return err;
51096 }
51097
51098 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
51099 index 4787ae6..73efff7 100644
51100 --- a/fs/fuse/dir.c
51101 +++ b/fs/fuse/dir.c
51102 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
51103 return link;
51104 }
51105
51106 -static void free_link(char *link)
51107 +static void free_link(const char *link)
51108 {
51109 if (!IS_ERR(link))
51110 free_page((unsigned long) link);
51111 diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
51112 index 247436c..e650ccb 100644
51113 --- a/fs/gfs2/ops_inode.c
51114 +++ b/fs/gfs2/ops_inode.c
51115 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
51116 unsigned int x;
51117 int error;
51118
51119 + pax_track_stack();
51120 +
51121 if (ndentry->d_inode) {
51122 nip = GFS2_I(ndentry->d_inode);
51123 if (ip == nip)
51124 diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
51125 index 4463297..4fed53b 100644
51126 --- a/fs/gfs2/sys.c
51127 +++ b/fs/gfs2/sys.c
51128 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
51129 return a->store ? a->store(sdp, buf, len) : len;
51130 }
51131
51132 -static struct sysfs_ops gfs2_attr_ops = {
51133 +static const struct sysfs_ops gfs2_attr_ops = {
51134 .show = gfs2_attr_show,
51135 .store = gfs2_attr_store,
51136 };
51137 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
51138 return 0;
51139 }
51140
51141 -static struct kset_uevent_ops gfs2_uevent_ops = {
51142 +static const struct kset_uevent_ops gfs2_uevent_ops = {
51143 .uevent = gfs2_uevent,
51144 };
51145
51146 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
51147 index f6874ac..7cd98a8 100644
51148 --- a/fs/hfsplus/catalog.c
51149 +++ b/fs/hfsplus/catalog.c
51150 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
51151 int err;
51152 u16 type;
51153
51154 + pax_track_stack();
51155 +
51156 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
51157 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
51158 if (err)
51159 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
51160 int entry_size;
51161 int err;
51162
51163 + pax_track_stack();
51164 +
51165 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
51166 sb = dir->i_sb;
51167 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
51168 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
51169 int entry_size, type;
51170 int err = 0;
51171
51172 + pax_track_stack();
51173 +
51174 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
51175 dst_dir->i_ino, dst_name->name);
51176 sb = src_dir->i_sb;
51177 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
51178 index 5f40236..dac3421 100644
51179 --- a/fs/hfsplus/dir.c
51180 +++ b/fs/hfsplus/dir.c
51181 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
51182 struct hfsplus_readdir_data *rd;
51183 u16 type;
51184
51185 + pax_track_stack();
51186 +
51187 if (filp->f_pos >= inode->i_size)
51188 return 0;
51189
51190 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
51191 index 1bcf597..905a251 100644
51192 --- a/fs/hfsplus/inode.c
51193 +++ b/fs/hfsplus/inode.c
51194 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
51195 int res = 0;
51196 u16 type;
51197
51198 + pax_track_stack();
51199 +
51200 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
51201
51202 HFSPLUS_I(inode).dev = 0;
51203 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
51204 struct hfs_find_data fd;
51205 hfsplus_cat_entry entry;
51206
51207 + pax_track_stack();
51208 +
51209 if (HFSPLUS_IS_RSRC(inode))
51210 main_inode = HFSPLUS_I(inode).rsrc_inode;
51211
51212 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
51213 index f457d2c..7ef4ad5 100644
51214 --- a/fs/hfsplus/ioctl.c
51215 +++ b/fs/hfsplus/ioctl.c
51216 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
51217 struct hfsplus_cat_file *file;
51218 int res;
51219
51220 + pax_track_stack();
51221 +
51222 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51223 return -EOPNOTSUPP;
51224
51225 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
51226 struct hfsplus_cat_file *file;
51227 ssize_t res = 0;
51228
51229 + pax_track_stack();
51230 +
51231 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51232 return -EOPNOTSUPP;
51233
51234 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
51235 index 43022f3..7298079 100644
51236 --- a/fs/hfsplus/super.c
51237 +++ b/fs/hfsplus/super.c
51238 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
51239 struct nls_table *nls = NULL;
51240 int err = -EINVAL;
51241
51242 + pax_track_stack();
51243 +
51244 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
51245 if (!sbi)
51246 return -ENOMEM;
51247 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
51248 index 87a1258..5694d91 100644
51249 --- a/fs/hugetlbfs/inode.c
51250 +++ b/fs/hugetlbfs/inode.c
51251 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51252 .kill_sb = kill_litter_super,
51253 };
51254
51255 -static struct vfsmount *hugetlbfs_vfsmount;
51256 +struct vfsmount *hugetlbfs_vfsmount;
51257
51258 static int can_do_hugetlb_shm(void)
51259 {
51260 diff --git a/fs/ioctl.c b/fs/ioctl.c
51261 index 6c75110..19d2c3c 100644
51262 --- a/fs/ioctl.c
51263 +++ b/fs/ioctl.c
51264 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51265 u64 phys, u64 len, u32 flags)
51266 {
51267 struct fiemap_extent extent;
51268 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
51269 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51270
51271 /* only count the extents */
51272 if (fieinfo->fi_extents_max == 0) {
51273 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51274
51275 fieinfo.fi_flags = fiemap.fm_flags;
51276 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51277 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51278 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51279
51280 if (fiemap.fm_extent_count != 0 &&
51281 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51282 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51283 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51284 fiemap.fm_flags = fieinfo.fi_flags;
51285 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51286 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51287 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51288 error = -EFAULT;
51289
51290 return error;
51291 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51292 index b0435dd..81ee0be 100644
51293 --- a/fs/jbd/checkpoint.c
51294 +++ b/fs/jbd/checkpoint.c
51295 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51296 tid_t this_tid;
51297 int result;
51298
51299 + pax_track_stack();
51300 +
51301 jbd_debug(1, "Start checkpoint\n");
51302
51303 /*
51304 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51305 index 546d153..736896c 100644
51306 --- a/fs/jffs2/compr_rtime.c
51307 +++ b/fs/jffs2/compr_rtime.c
51308 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51309 int outpos = 0;
51310 int pos=0;
51311
51312 + pax_track_stack();
51313 +
51314 memset(positions,0,sizeof(positions));
51315
51316 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51317 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51318 int outpos = 0;
51319 int pos=0;
51320
51321 + pax_track_stack();
51322 +
51323 memset(positions,0,sizeof(positions));
51324
51325 while (outpos<destlen) {
51326 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51327 index 170d289..3254b98 100644
51328 --- a/fs/jffs2/compr_rubin.c
51329 +++ b/fs/jffs2/compr_rubin.c
51330 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51331 int ret;
51332 uint32_t mysrclen, mydstlen;
51333
51334 + pax_track_stack();
51335 +
51336 mysrclen = *sourcelen;
51337 mydstlen = *dstlen - 8;
51338
51339 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51340 index b47679b..00d65d3 100644
51341 --- a/fs/jffs2/erase.c
51342 +++ b/fs/jffs2/erase.c
51343 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51344 struct jffs2_unknown_node marker = {
51345 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51346 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51347 - .totlen = cpu_to_je32(c->cleanmarker_size)
51348 + .totlen = cpu_to_je32(c->cleanmarker_size),
51349 + .hdr_crc = cpu_to_je32(0)
51350 };
51351
51352 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51353 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51354 index 5ef7bac..4fd1e3c 100644
51355 --- a/fs/jffs2/wbuf.c
51356 +++ b/fs/jffs2/wbuf.c
51357 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51358 {
51359 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51360 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51361 - .totlen = constant_cpu_to_je32(8)
51362 + .totlen = constant_cpu_to_je32(8),
51363 + .hdr_crc = constant_cpu_to_je32(0)
51364 };
51365
51366 /*
51367 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51368 index 082e844..52012a1 100644
51369 --- a/fs/jffs2/xattr.c
51370 +++ b/fs/jffs2/xattr.c
51371 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51372
51373 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51374
51375 + pax_track_stack();
51376 +
51377 /* Phase.1 : Merge same xref */
51378 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51379 xref_tmphash[i] = NULL;
51380 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51381 index 2234c73..f6e6e6b 100644
51382 --- a/fs/jfs/super.c
51383 +++ b/fs/jfs/super.c
51384 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51385
51386 jfs_inode_cachep =
51387 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51388 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51389 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51390 init_once);
51391 if (jfs_inode_cachep == NULL)
51392 return -ENOMEM;
51393 diff --git a/fs/libfs.c b/fs/libfs.c
51394 index ba36e93..3153fce 100644
51395 --- a/fs/libfs.c
51396 +++ b/fs/libfs.c
51397 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51398
51399 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51400 struct dentry *next;
51401 + char d_name[sizeof(next->d_iname)];
51402 + const unsigned char *name;
51403 +
51404 next = list_entry(p, struct dentry, d_u.d_child);
51405 if (d_unhashed(next) || !next->d_inode)
51406 continue;
51407
51408 spin_unlock(&dcache_lock);
51409 - if (filldir(dirent, next->d_name.name,
51410 + name = next->d_name.name;
51411 + if (name == next->d_iname) {
51412 + memcpy(d_name, name, next->d_name.len);
51413 + name = d_name;
51414 + }
51415 + if (filldir(dirent, name,
51416 next->d_name.len, filp->f_pos,
51417 next->d_inode->i_ino,
51418 dt_type(next->d_inode)) < 0)
51419 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51420 index c325a83..d15b07b 100644
51421 --- a/fs/lockd/clntproc.c
51422 +++ b/fs/lockd/clntproc.c
51423 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51424 /*
51425 * Cookie counter for NLM requests
51426 */
51427 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51428 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51429
51430 void nlmclnt_next_cookie(struct nlm_cookie *c)
51431 {
51432 - u32 cookie = atomic_inc_return(&nlm_cookie);
51433 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51434
51435 memcpy(c->data, &cookie, 4);
51436 c->len=4;
51437 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51438 struct nlm_rqst reqst, *req;
51439 int status;
51440
51441 + pax_track_stack();
51442 +
51443 req = &reqst;
51444 memset(req, 0, sizeof(*req));
51445 locks_init_lock(&req->a_args.lock.fl);
51446 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51447 index 1a54ae1..6a16c27 100644
51448 --- a/fs/lockd/svc.c
51449 +++ b/fs/lockd/svc.c
51450 @@ -43,7 +43,7 @@
51451
51452 static struct svc_program nlmsvc_program;
51453
51454 -struct nlmsvc_binding * nlmsvc_ops;
51455 +const struct nlmsvc_binding * nlmsvc_ops;
51456 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51457
51458 static DEFINE_MUTEX(nlmsvc_mutex);
51459 diff --git a/fs/locks.c b/fs/locks.c
51460 index a8794f2..4041e55 100644
51461 --- a/fs/locks.c
51462 +++ b/fs/locks.c
51463 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51464
51465 static struct kmem_cache *filelock_cache __read_mostly;
51466
51467 +static void locks_init_lock_always(struct file_lock *fl)
51468 +{
51469 + fl->fl_next = NULL;
51470 + fl->fl_fasync = NULL;
51471 + fl->fl_owner = NULL;
51472 + fl->fl_pid = 0;
51473 + fl->fl_nspid = NULL;
51474 + fl->fl_file = NULL;
51475 + fl->fl_flags = 0;
51476 + fl->fl_type = 0;
51477 + fl->fl_start = fl->fl_end = 0;
51478 +}
51479 +
51480 /* Allocate an empty lock structure. */
51481 static struct file_lock *locks_alloc_lock(void)
51482 {
51483 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51484 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51485 +
51486 + if (fl)
51487 + locks_init_lock_always(fl);
51488 +
51489 + return fl;
51490 }
51491
51492 void locks_release_private(struct file_lock *fl)
51493 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51494 INIT_LIST_HEAD(&fl->fl_link);
51495 INIT_LIST_HEAD(&fl->fl_block);
51496 init_waitqueue_head(&fl->fl_wait);
51497 - fl->fl_next = NULL;
51498 - fl->fl_fasync = NULL;
51499 - fl->fl_owner = NULL;
51500 - fl->fl_pid = 0;
51501 - fl->fl_nspid = NULL;
51502 - fl->fl_file = NULL;
51503 - fl->fl_flags = 0;
51504 - fl->fl_type = 0;
51505 - fl->fl_start = fl->fl_end = 0;
51506 fl->fl_ops = NULL;
51507 fl->fl_lmops = NULL;
51508 + locks_init_lock_always(fl);
51509 }
51510
51511 EXPORT_SYMBOL(locks_init_lock);
51512 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51513 return;
51514
51515 if (filp->f_op && filp->f_op->flock) {
51516 - struct file_lock fl = {
51517 + struct file_lock flock = {
51518 .fl_pid = current->tgid,
51519 .fl_file = filp,
51520 .fl_flags = FL_FLOCK,
51521 .fl_type = F_UNLCK,
51522 .fl_end = OFFSET_MAX,
51523 };
51524 - filp->f_op->flock(filp, F_SETLKW, &fl);
51525 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
51526 - fl.fl_ops->fl_release_private(&fl);
51527 + filp->f_op->flock(filp, F_SETLKW, &flock);
51528 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
51529 + flock.fl_ops->fl_release_private(&flock);
51530 }
51531
51532 lock_kernel();
51533 diff --git a/fs/mbcache.c b/fs/mbcache.c
51534 index ec88ff3..b843a82 100644
51535 --- a/fs/mbcache.c
51536 +++ b/fs/mbcache.c
51537 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51538 if (!cache)
51539 goto fail;
51540 cache->c_name = name;
51541 - cache->c_op.free = NULL;
51542 + *(void **)&cache->c_op.free = NULL;
51543 if (cache_op)
51544 - cache->c_op.free = cache_op->free;
51545 + *(void **)&cache->c_op.free = cache_op->free;
51546 atomic_set(&cache->c_entry_count, 0);
51547 cache->c_bucket_bits = bucket_bits;
51548 #ifdef MB_CACHE_INDEXES_COUNT
51549 diff --git a/fs/namei.c b/fs/namei.c
51550 index b0afbd4..8d065a1 100644
51551 --- a/fs/namei.c
51552 +++ b/fs/namei.c
51553 @@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51554 return ret;
51555
51556 /*
51557 + * Searching includes executable on directories, else just read.
51558 + */
51559 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51560 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51561 + if (capable(CAP_DAC_READ_SEARCH))
51562 + return 0;
51563 +
51564 + /*
51565 * Read/write DACs are always overridable.
51566 * Executable DACs are overridable if at least one exec bit is set.
51567 */
51568 @@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51569 if (capable(CAP_DAC_OVERRIDE))
51570 return 0;
51571
51572 - /*
51573 - * Searching includes executable on directories, else just read.
51574 - */
51575 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51576 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51577 - if (capable(CAP_DAC_READ_SEARCH))
51578 - return 0;
51579 -
51580 return -EACCES;
51581 }
51582
51583 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51584 if (!ret)
51585 goto ok;
51586
51587 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51588 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51589 + capable(CAP_DAC_OVERRIDE))
51590 goto ok;
51591
51592 return ret;
51593 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51594 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51595 error = PTR_ERR(cookie);
51596 if (!IS_ERR(cookie)) {
51597 - char *s = nd_get_link(nd);
51598 + const char *s = nd_get_link(nd);
51599 error = 0;
51600 if (s)
51601 error = __vfs_follow_link(nd, s);
51602 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51603 err = security_inode_follow_link(path->dentry, nd);
51604 if (err)
51605 goto loop;
51606 +
51607 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51608 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51609 + err = -EACCES;
51610 + goto loop;
51611 + }
51612 +
51613 current->link_count++;
51614 current->total_link_count++;
51615 nd->depth++;
51616 @@ -1016,11 +1024,19 @@ return_reval:
51617 break;
51618 }
51619 return_base:
51620 + if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51621 + !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51622 + path_put(&nd->path);
51623 + return -ENOENT;
51624 + }
51625 return 0;
51626 out_dput:
51627 path_put_conditional(&next, nd);
51628 break;
51629 }
51630 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51631 + err = -ENOENT;
51632 +
51633 path_put(&nd->path);
51634 return_err:
51635 return err;
51636 @@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51637 int retval = path_init(dfd, name, flags, nd);
51638 if (!retval)
51639 retval = path_walk(name, nd);
51640 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51641 - nd->path.dentry->d_inode))
51642 - audit_inode(name, nd->path.dentry);
51643 +
51644 + if (likely(!retval)) {
51645 + if (nd->path.dentry && nd->path.dentry->d_inode) {
51646 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51647 + retval = -ENOENT;
51648 + if (!audit_dummy_context())
51649 + audit_inode(name, nd->path.dentry);
51650 + }
51651 + }
51652 if (nd->root.mnt) {
51653 path_put(&nd->root);
51654 nd->root.mnt = NULL;
51655 }
51656 +
51657 return retval;
51658 }
51659
51660 @@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51661 if (error)
51662 goto err_out;
51663
51664 +
51665 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51666 + error = -EPERM;
51667 + goto err_out;
51668 + }
51669 + if (gr_handle_rawio(inode)) {
51670 + error = -EPERM;
51671 + goto err_out;
51672 + }
51673 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51674 + error = -EACCES;
51675 + goto err_out;
51676 + }
51677 +
51678 if (flag & O_TRUNC) {
51679 error = get_write_access(inode);
51680 if (error)
51681 @@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51682 {
51683 int error;
51684 struct dentry *dir = nd->path.dentry;
51685 + int acc_mode = ACC_MODE(flag);
51686 +
51687 + if (flag & O_TRUNC)
51688 + acc_mode |= MAY_WRITE;
51689 + if (flag & O_APPEND)
51690 + acc_mode |= MAY_APPEND;
51691 +
51692 + if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51693 + error = -EACCES;
51694 + goto out_unlock;
51695 + }
51696
51697 if (!IS_POSIXACL(dir->d_inode))
51698 mode &= ~current_umask();
51699 @@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51700 if (error)
51701 goto out_unlock;
51702 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51703 + if (!error)
51704 + gr_handle_create(path->dentry, nd->path.mnt);
51705 out_unlock:
51706 mutex_unlock(&dir->d_inode->i_mutex);
51707 dput(nd->path.dentry);
51708 @@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51709 &nd, flag);
51710 if (error)
51711 return ERR_PTR(error);
51712 +
51713 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51714 + error = -EPERM;
51715 + goto exit;
51716 + }
51717 +
51718 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51719 + error = -EPERM;
51720 + goto exit;
51721 + }
51722 +
51723 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51724 + error = -EACCES;
51725 + goto exit;
51726 + }
51727 +
51728 goto ok;
51729 }
51730
51731 @@ -1795,6 +1861,19 @@ do_last:
51732 /*
51733 * It already exists.
51734 */
51735 +
51736 + if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51737 + error = -ENOENT;
51738 + goto exit_mutex_unlock;
51739 + }
51740 +
51741 + /* only check if O_CREAT is specified, all other checks need
51742 + to go into may_open */
51743 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51744 + error = -EACCES;
51745 + goto exit_mutex_unlock;
51746 + }
51747 +
51748 mutex_unlock(&dir->d_inode->i_mutex);
51749 audit_inode(pathname, path.dentry);
51750
51751 @@ -1887,6 +1966,13 @@ do_link:
51752 error = security_inode_follow_link(path.dentry, &nd);
51753 if (error)
51754 goto exit_dput;
51755 +
51756 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51757 + path.dentry, nd.path.mnt)) {
51758 + error = -EACCES;
51759 + goto exit_dput;
51760 + }
51761 +
51762 error = __do_follow_link(&path, &nd);
51763 if (error) {
51764 /* Does someone understand code flow here? Or it is only
51765 @@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51766 }
51767 return dentry;
51768 eexist:
51769 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51770 + dput(dentry);
51771 + return ERR_PTR(-ENOENT);
51772 + }
51773 dput(dentry);
51774 dentry = ERR_PTR(-EEXIST);
51775 fail:
51776 @@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51777 error = may_mknod(mode);
51778 if (error)
51779 goto out_dput;
51780 +
51781 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51782 + error = -EPERM;
51783 + goto out_dput;
51784 + }
51785 +
51786 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51787 + error = -EACCES;
51788 + goto out_dput;
51789 + }
51790 +
51791 error = mnt_want_write(nd.path.mnt);
51792 if (error)
51793 goto out_dput;
51794 @@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51795 }
51796 out_drop_write:
51797 mnt_drop_write(nd.path.mnt);
51798 +
51799 + if (!error)
51800 + gr_handle_create(dentry, nd.path.mnt);
51801 out_dput:
51802 dput(dentry);
51803 out_unlock:
51804 @@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51805 if (IS_ERR(dentry))
51806 goto out_unlock;
51807
51808 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51809 + error = -EACCES;
51810 + goto out_dput;
51811 + }
51812 +
51813 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51814 mode &= ~current_umask();
51815 error = mnt_want_write(nd.path.mnt);
51816 @@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51817 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51818 out_drop_write:
51819 mnt_drop_write(nd.path.mnt);
51820 +
51821 + if (!error)
51822 + gr_handle_create(dentry, nd.path.mnt);
51823 +
51824 out_dput:
51825 dput(dentry);
51826 out_unlock:
51827 @@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51828 char * name;
51829 struct dentry *dentry;
51830 struct nameidata nd;
51831 + ino_t saved_ino = 0;
51832 + dev_t saved_dev = 0;
51833
51834 error = user_path_parent(dfd, pathname, &nd, &name);
51835 if (error)
51836 @@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51837 error = PTR_ERR(dentry);
51838 if (IS_ERR(dentry))
51839 goto exit2;
51840 +
51841 + if (dentry->d_inode != NULL) {
51842 + saved_ino = dentry->d_inode->i_ino;
51843 + saved_dev = gr_get_dev_from_dentry(dentry);
51844 +
51845 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51846 + error = -EACCES;
51847 + goto exit3;
51848 + }
51849 + }
51850 +
51851 error = mnt_want_write(nd.path.mnt);
51852 if (error)
51853 goto exit3;
51854 @@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51855 if (error)
51856 goto exit4;
51857 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51858 + if (!error && (saved_dev || saved_ino))
51859 + gr_handle_delete(saved_ino, saved_dev);
51860 exit4:
51861 mnt_drop_write(nd.path.mnt);
51862 exit3:
51863 @@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51864 struct dentry *dentry;
51865 struct nameidata nd;
51866 struct inode *inode = NULL;
51867 + ino_t saved_ino = 0;
51868 + dev_t saved_dev = 0;
51869
51870 error = user_path_parent(dfd, pathname, &nd, &name);
51871 if (error)
51872 @@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51873 if (nd.last.name[nd.last.len])
51874 goto slashes;
51875 inode = dentry->d_inode;
51876 - if (inode)
51877 + if (inode) {
51878 + if (inode->i_nlink <= 1) {
51879 + saved_ino = inode->i_ino;
51880 + saved_dev = gr_get_dev_from_dentry(dentry);
51881 + }
51882 +
51883 atomic_inc(&inode->i_count);
51884 +
51885 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51886 + error = -EACCES;
51887 + goto exit2;
51888 + }
51889 + }
51890 error = mnt_want_write(nd.path.mnt);
51891 if (error)
51892 goto exit2;
51893 @@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51894 if (error)
51895 goto exit3;
51896 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51897 + if (!error && (saved_ino || saved_dev))
51898 + gr_handle_delete(saved_ino, saved_dev);
51899 exit3:
51900 mnt_drop_write(nd.path.mnt);
51901 exit2:
51902 @@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51903 if (IS_ERR(dentry))
51904 goto out_unlock;
51905
51906 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
51907 + error = -EACCES;
51908 + goto out_dput;
51909 + }
51910 +
51911 error = mnt_want_write(nd.path.mnt);
51912 if (error)
51913 goto out_dput;
51914 @@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51915 if (error)
51916 goto out_drop_write;
51917 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
51918 + if (!error)
51919 + gr_handle_create(dentry, nd.path.mnt);
51920 out_drop_write:
51921 mnt_drop_write(nd.path.mnt);
51922 out_dput:
51923 @@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51924 error = PTR_ERR(new_dentry);
51925 if (IS_ERR(new_dentry))
51926 goto out_unlock;
51927 +
51928 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
51929 + old_path.dentry->d_inode,
51930 + old_path.dentry->d_inode->i_mode, to)) {
51931 + error = -EACCES;
51932 + goto out_dput;
51933 + }
51934 +
51935 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
51936 + old_path.dentry, old_path.mnt, to)) {
51937 + error = -EACCES;
51938 + goto out_dput;
51939 + }
51940 +
51941 error = mnt_want_write(nd.path.mnt);
51942 if (error)
51943 goto out_dput;
51944 @@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51945 if (error)
51946 goto out_drop_write;
51947 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
51948 + if (!error)
51949 + gr_handle_create(new_dentry, nd.path.mnt);
51950 out_drop_write:
51951 mnt_drop_write(nd.path.mnt);
51952 out_dput:
51953 @@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51954 char *to;
51955 int error;
51956
51957 + pax_track_stack();
51958 +
51959 error = user_path_parent(olddfd, oldname, &oldnd, &from);
51960 if (error)
51961 goto exit;
51962 @@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51963 if (new_dentry == trap)
51964 goto exit5;
51965
51966 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
51967 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
51968 + to);
51969 + if (error)
51970 + goto exit5;
51971 +
51972 error = mnt_want_write(oldnd.path.mnt);
51973 if (error)
51974 goto exit5;
51975 @@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51976 goto exit6;
51977 error = vfs_rename(old_dir->d_inode, old_dentry,
51978 new_dir->d_inode, new_dentry);
51979 + if (!error)
51980 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
51981 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
51982 exit6:
51983 mnt_drop_write(oldnd.path.mnt);
51984 exit5:
51985 @@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
51986
51987 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
51988 {
51989 + char tmpbuf[64];
51990 + const char *newlink;
51991 int len;
51992
51993 len = PTR_ERR(link);
51994 @@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
51995 len = strlen(link);
51996 if (len > (unsigned) buflen)
51997 len = buflen;
51998 - if (copy_to_user(buffer, link, len))
51999 +
52000 + if (len < sizeof(tmpbuf)) {
52001 + memcpy(tmpbuf, link, len);
52002 + newlink = tmpbuf;
52003 + } else
52004 + newlink = link;
52005 +
52006 + if (copy_to_user(buffer, newlink, len))
52007 len = -EFAULT;
52008 out:
52009 return len;
52010 diff --git a/fs/namespace.c b/fs/namespace.c
52011 index 2beb0fb..11a95a5 100644
52012 --- a/fs/namespace.c
52013 +++ b/fs/namespace.c
52014 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52015 if (!(sb->s_flags & MS_RDONLY))
52016 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
52017 up_write(&sb->s_umount);
52018 +
52019 + gr_log_remount(mnt->mnt_devname, retval);
52020 +
52021 return retval;
52022 }
52023
52024 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52025 security_sb_umount_busy(mnt);
52026 up_write(&namespace_sem);
52027 release_mounts(&umount_list);
52028 +
52029 + gr_log_unmount(mnt->mnt_devname, retval);
52030 +
52031 return retval;
52032 }
52033
52034 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52035 if (retval)
52036 goto dput_out;
52037
52038 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
52039 + retval = -EPERM;
52040 + goto dput_out;
52041 + }
52042 +
52043 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
52044 + retval = -EPERM;
52045 + goto dput_out;
52046 + }
52047 +
52048 if (flags & MS_REMOUNT)
52049 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
52050 data_page);
52051 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52052 dev_name, data_page);
52053 dput_out:
52054 path_put(&path);
52055 +
52056 + gr_log_mount(dev_name, dir_name, retval);
52057 +
52058 return retval;
52059 }
52060
52061 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
52062 goto out1;
52063 }
52064
52065 + if (gr_handle_chroot_pivot()) {
52066 + error = -EPERM;
52067 + path_put(&old);
52068 + goto out1;
52069 + }
52070 +
52071 read_lock(&current->fs->lock);
52072 root = current->fs->root;
52073 path_get(&current->fs->root);
52074 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
52075 index b8b5b30..2bd9ccb 100644
52076 --- a/fs/ncpfs/dir.c
52077 +++ b/fs/ncpfs/dir.c
52078 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
52079 int res, val = 0, len;
52080 __u8 __name[NCP_MAXPATHLEN + 1];
52081
52082 + pax_track_stack();
52083 +
52084 parent = dget_parent(dentry);
52085 dir = parent->d_inode;
52086
52087 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
52088 int error, res, len;
52089 __u8 __name[NCP_MAXPATHLEN + 1];
52090
52091 + pax_track_stack();
52092 +
52093 lock_kernel();
52094 error = -EIO;
52095 if (!ncp_conn_valid(server))
52096 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
52097 int error, result, len;
52098 int opmode;
52099 __u8 __name[NCP_MAXPATHLEN + 1];
52100 -
52101 +
52102 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
52103 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
52104
52105 + pax_track_stack();
52106 +
52107 error = -EIO;
52108 lock_kernel();
52109 if (!ncp_conn_valid(server))
52110 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52111 int error, len;
52112 __u8 __name[NCP_MAXPATHLEN + 1];
52113
52114 + pax_track_stack();
52115 +
52116 DPRINTK("ncp_mkdir: making %s/%s\n",
52117 dentry->d_parent->d_name.name, dentry->d_name.name);
52118
52119 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52120 if (!ncp_conn_valid(server))
52121 goto out;
52122
52123 + pax_track_stack();
52124 +
52125 ncp_age_dentry(server, dentry);
52126 len = sizeof(__name);
52127 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
52128 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
52129 int old_len, new_len;
52130 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
52131
52132 + pax_track_stack();
52133 +
52134 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
52135 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
52136 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
52137 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
52138 index cf98da1..da890a9 100644
52139 --- a/fs/ncpfs/inode.c
52140 +++ b/fs/ncpfs/inode.c
52141 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
52142 #endif
52143 struct ncp_entry_info finfo;
52144
52145 + pax_track_stack();
52146 +
52147 data.wdog_pid = NULL;
52148 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
52149 if (!server)
52150 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
52151 index bfaef7b..e9d03ca 100644
52152 --- a/fs/nfs/inode.c
52153 +++ b/fs/nfs/inode.c
52154 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
52155 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
52156 nfsi->attrtimeo_timestamp = jiffies;
52157
52158 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
52159 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
52160 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
52161 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
52162 else
52163 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
52164 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
52165 }
52166
52167 -static atomic_long_t nfs_attr_generation_counter;
52168 +static atomic_long_unchecked_t nfs_attr_generation_counter;
52169
52170 static unsigned long nfs_read_attr_generation_counter(void)
52171 {
52172 - return atomic_long_read(&nfs_attr_generation_counter);
52173 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
52174 }
52175
52176 unsigned long nfs_inc_attr_generation_counter(void)
52177 {
52178 - return atomic_long_inc_return(&nfs_attr_generation_counter);
52179 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
52180 }
52181
52182 void nfs_fattr_init(struct nfs_fattr *fattr)
52183 diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
52184 index cc2f505..f6a236f 100644
52185 --- a/fs/nfsd/lockd.c
52186 +++ b/fs/nfsd/lockd.c
52187 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
52188 fput(filp);
52189 }
52190
52191 -static struct nlmsvc_binding nfsd_nlm_ops = {
52192 +static const struct nlmsvc_binding nfsd_nlm_ops = {
52193 .fopen = nlm_fopen, /* open file for locking */
52194 .fclose = nlm_fclose, /* close file */
52195 };
52196 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
52197 index cfc3391..dcc083a 100644
52198 --- a/fs/nfsd/nfs4state.c
52199 +++ b/fs/nfsd/nfs4state.c
52200 @@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
52201 unsigned int cmd;
52202 int err;
52203
52204 + pax_track_stack();
52205 +
52206 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
52207 (long long) lock->lk_offset,
52208 (long long) lock->lk_length);
52209 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
52210 index 4a82a96..0d5fb49 100644
52211 --- a/fs/nfsd/nfs4xdr.c
52212 +++ b/fs/nfsd/nfs4xdr.c
52213 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
52214 struct nfsd4_compoundres *resp = rqstp->rq_resp;
52215 u32 minorversion = resp->cstate.minorversion;
52216
52217 + pax_track_stack();
52218 +
52219 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
52220 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
52221 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
52222 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
52223 index 2e09588..596421d 100644
52224 --- a/fs/nfsd/vfs.c
52225 +++ b/fs/nfsd/vfs.c
52226 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52227 } else {
52228 oldfs = get_fs();
52229 set_fs(KERNEL_DS);
52230 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
52231 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
52232 set_fs(oldfs);
52233 }
52234
52235 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52236
52237 /* Write the data. */
52238 oldfs = get_fs(); set_fs(KERNEL_DS);
52239 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
52240 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
52241 set_fs(oldfs);
52242 if (host_err < 0)
52243 goto out_nfserr;
52244 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
52245 */
52246
52247 oldfs = get_fs(); set_fs(KERNEL_DS);
52248 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
52249 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
52250 set_fs(oldfs);
52251
52252 if (host_err < 0)
52253 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52254 index f6af760..d0adf34 100644
52255 --- a/fs/nilfs2/ioctl.c
52256 +++ b/fs/nilfs2/ioctl.c
52257 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52258 unsigned int cmd, void __user *argp)
52259 {
52260 struct nilfs_argv argv[5];
52261 - const static size_t argsz[5] = {
52262 + static const size_t argsz[5] = {
52263 sizeof(struct nilfs_vdesc),
52264 sizeof(struct nilfs_period),
52265 sizeof(__u64),
52266 @@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52267 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52268 goto out_free;
52269
52270 + if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52271 + goto out_free;
52272 +
52273 len = argv[n].v_size * argv[n].v_nmembs;
52274 base = (void __user *)(unsigned long)argv[n].v_base;
52275 if (len == 0) {
52276 diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52277 index 7e54e52..9337248 100644
52278 --- a/fs/notify/dnotify/dnotify.c
52279 +++ b/fs/notify/dnotify/dnotify.c
52280 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52281 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52282 }
52283
52284 -static struct fsnotify_ops dnotify_fsnotify_ops = {
52285 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
52286 .handle_event = dnotify_handle_event,
52287 .should_send_event = dnotify_should_send_event,
52288 .free_group_priv = NULL,
52289 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52290 index b8bf53b..c518688 100644
52291 --- a/fs/notify/notification.c
52292 +++ b/fs/notify/notification.c
52293 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52294 * get set to 0 so it will never get 'freed'
52295 */
52296 static struct fsnotify_event q_overflow_event;
52297 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52298 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52299
52300 /**
52301 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52302 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52303 */
52304 u32 fsnotify_get_cookie(void)
52305 {
52306 - return atomic_inc_return(&fsnotify_sync_cookie);
52307 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52308 }
52309 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52310
52311 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52312 index 5a9e344..0f8cd28 100644
52313 --- a/fs/ntfs/dir.c
52314 +++ b/fs/ntfs/dir.c
52315 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
52316 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52317 ~(s64)(ndir->itype.index.block_size - 1)));
52318 /* Bounds checks. */
52319 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52320 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52321 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52322 "inode 0x%lx or driver bug.", vdir->i_ino);
52323 goto err_out;
52324 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52325 index 663c0e3..b6868e9 100644
52326 --- a/fs/ntfs/file.c
52327 +++ b/fs/ntfs/file.c
52328 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52329 #endif /* NTFS_RW */
52330 };
52331
52332 -const struct file_operations ntfs_empty_file_ops = {};
52333 +const struct file_operations ntfs_empty_file_ops __read_only;
52334
52335 -const struct inode_operations ntfs_empty_inode_ops = {};
52336 +const struct inode_operations ntfs_empty_inode_ops __read_only;
52337 diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52338 index 1cd2934..880b5d2 100644
52339 --- a/fs/ocfs2/cluster/masklog.c
52340 +++ b/fs/ocfs2/cluster/masklog.c
52341 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52342 return mlog_mask_store(mlog_attr->mask, buf, count);
52343 }
52344
52345 -static struct sysfs_ops mlog_attr_ops = {
52346 +static const struct sysfs_ops mlog_attr_ops = {
52347 .show = mlog_show,
52348 .store = mlog_store,
52349 };
52350 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52351 index ac10f83..2cd2607 100644
52352 --- a/fs/ocfs2/localalloc.c
52353 +++ b/fs/ocfs2/localalloc.c
52354 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52355 goto bail;
52356 }
52357
52358 - atomic_inc(&osb->alloc_stats.moves);
52359 + atomic_inc_unchecked(&osb->alloc_stats.moves);
52360
52361 status = 0;
52362 bail:
52363 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52364 index f010b22..9f9ed34 100644
52365 --- a/fs/ocfs2/namei.c
52366 +++ b/fs/ocfs2/namei.c
52367 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52368 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52369 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52370
52371 + pax_track_stack();
52372 +
52373 /* At some point it might be nice to break this function up a
52374 * bit. */
52375
52376 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52377 index d963d86..914cfbd 100644
52378 --- a/fs/ocfs2/ocfs2.h
52379 +++ b/fs/ocfs2/ocfs2.h
52380 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
52381
52382 struct ocfs2_alloc_stats
52383 {
52384 - atomic_t moves;
52385 - atomic_t local_data;
52386 - atomic_t bitmap_data;
52387 - atomic_t bg_allocs;
52388 - atomic_t bg_extends;
52389 + atomic_unchecked_t moves;
52390 + atomic_unchecked_t local_data;
52391 + atomic_unchecked_t bitmap_data;
52392 + atomic_unchecked_t bg_allocs;
52393 + atomic_unchecked_t bg_extends;
52394 };
52395
52396 enum ocfs2_local_alloc_state
52397 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52398 index 79b5dac..d322952 100644
52399 --- a/fs/ocfs2/suballoc.c
52400 +++ b/fs/ocfs2/suballoc.c
52401 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52402 mlog_errno(status);
52403 goto bail;
52404 }
52405 - atomic_inc(&osb->alloc_stats.bg_extends);
52406 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52407
52408 /* You should never ask for this much metadata */
52409 BUG_ON(bits_wanted >
52410 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52411 mlog_errno(status);
52412 goto bail;
52413 }
52414 - atomic_inc(&osb->alloc_stats.bg_allocs);
52415 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52416
52417 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52418 ac->ac_bits_given += (*num_bits);
52419 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52420 mlog_errno(status);
52421 goto bail;
52422 }
52423 - atomic_inc(&osb->alloc_stats.bg_allocs);
52424 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52425
52426 BUG_ON(num_bits != 1);
52427
52428 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52429 cluster_start,
52430 num_clusters);
52431 if (!status)
52432 - atomic_inc(&osb->alloc_stats.local_data);
52433 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
52434 } else {
52435 if (min_clusters > (osb->bitmap_cpg - 1)) {
52436 /* The only paths asking for contiguousness
52437 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52438 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52439 bg_blkno,
52440 bg_bit_off);
52441 - atomic_inc(&osb->alloc_stats.bitmap_data);
52442 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52443 }
52444 }
52445 if (status < 0) {
52446 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52447 index 9f55be4..a3f8048 100644
52448 --- a/fs/ocfs2/super.c
52449 +++ b/fs/ocfs2/super.c
52450 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52451 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52452 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52453 "Stats",
52454 - atomic_read(&osb->alloc_stats.bitmap_data),
52455 - atomic_read(&osb->alloc_stats.local_data),
52456 - atomic_read(&osb->alloc_stats.bg_allocs),
52457 - atomic_read(&osb->alloc_stats.moves),
52458 - atomic_read(&osb->alloc_stats.bg_extends));
52459 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52460 + atomic_read_unchecked(&osb->alloc_stats.local_data),
52461 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52462 + atomic_read_unchecked(&osb->alloc_stats.moves),
52463 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52464
52465 out += snprintf(buf + out, len - out,
52466 "%10s => State: %u Descriptor: %llu Size: %u bits "
52467 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52468 spin_lock_init(&osb->osb_xattr_lock);
52469 ocfs2_init_inode_steal_slot(osb);
52470
52471 - atomic_set(&osb->alloc_stats.moves, 0);
52472 - atomic_set(&osb->alloc_stats.local_data, 0);
52473 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
52474 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
52475 - atomic_set(&osb->alloc_stats.bg_extends, 0);
52476 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52477 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52478 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52479 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52480 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52481
52482 /* Copy the blockcheck stats from the superblock probe */
52483 osb->osb_ecc_stats = *stats;
52484 diff --git a/fs/open.c b/fs/open.c
52485 index 4f01e06..091f6c3 100644
52486 --- a/fs/open.c
52487 +++ b/fs/open.c
52488 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52489 error = locks_verify_truncate(inode, NULL, length);
52490 if (!error)
52491 error = security_path_truncate(&path, length, 0);
52492 +
52493 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52494 + error = -EACCES;
52495 +
52496 if (!error) {
52497 vfs_dq_init(inode);
52498 error = do_truncate(path.dentry, length, 0, NULL);
52499 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52500 if (__mnt_is_readonly(path.mnt))
52501 res = -EROFS;
52502
52503 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52504 + res = -EACCES;
52505 +
52506 out_path_release:
52507 path_put(&path);
52508 out:
52509 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52510 if (error)
52511 goto dput_and_out;
52512
52513 + gr_log_chdir(path.dentry, path.mnt);
52514 +
52515 set_fs_pwd(current->fs, &path);
52516
52517 dput_and_out:
52518 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52519 goto out_putf;
52520
52521 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52522 +
52523 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52524 + error = -EPERM;
52525 +
52526 + if (!error)
52527 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52528 +
52529 if (!error)
52530 set_fs_pwd(current->fs, &file->f_path);
52531 out_putf:
52532 @@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52533 if (!capable(CAP_SYS_CHROOT))
52534 goto dput_and_out;
52535
52536 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52537 + goto dput_and_out;
52538 +
52539 set_fs_root(current->fs, &path);
52540 +
52541 + gr_handle_chroot_chdir(&path);
52542 +
52543 error = 0;
52544 dput_and_out:
52545 path_put(&path);
52546 @@ -616,12 +638,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52547 err = mnt_want_write_file(file);
52548 if (err)
52549 goto out_putf;
52550 +
52551 mutex_lock(&inode->i_mutex);
52552 +
52553 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
52554 + err = -EACCES;
52555 + goto out_unlock;
52556 + }
52557 +
52558 if (mode == (mode_t) -1)
52559 mode = inode->i_mode;
52560 +
52561 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
52562 + err = -EPERM;
52563 + goto out_unlock;
52564 + }
52565 +
52566 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52567 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52568 err = notify_change(dentry, &newattrs);
52569 +
52570 +out_unlock:
52571 mutex_unlock(&inode->i_mutex);
52572 mnt_drop_write(file->f_path.mnt);
52573 out_putf:
52574 @@ -645,12 +682,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52575 error = mnt_want_write(path.mnt);
52576 if (error)
52577 goto dput_and_out;
52578 +
52579 mutex_lock(&inode->i_mutex);
52580 +
52581 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
52582 + error = -EACCES;
52583 + goto out_unlock;
52584 + }
52585 +
52586 if (mode == (mode_t) -1)
52587 mode = inode->i_mode;
52588 +
52589 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
52590 + error = -EACCES;
52591 + goto out_unlock;
52592 + }
52593 +
52594 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52595 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52596 error = notify_change(path.dentry, &newattrs);
52597 +
52598 +out_unlock:
52599 mutex_unlock(&inode->i_mutex);
52600 mnt_drop_write(path.mnt);
52601 dput_and_out:
52602 @@ -664,12 +716,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52603 return sys_fchmodat(AT_FDCWD, filename, mode);
52604 }
52605
52606 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52607 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52608 {
52609 struct inode *inode = dentry->d_inode;
52610 int error;
52611 struct iattr newattrs;
52612
52613 + if (!gr_acl_handle_chown(dentry, mnt))
52614 + return -EACCES;
52615 +
52616 newattrs.ia_valid = ATTR_CTIME;
52617 if (user != (uid_t) -1) {
52618 newattrs.ia_valid |= ATTR_UID;
52619 @@ -700,7 +755,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52620 error = mnt_want_write(path.mnt);
52621 if (error)
52622 goto out_release;
52623 - error = chown_common(path.dentry, user, group);
52624 + error = chown_common(path.dentry, user, group, path.mnt);
52625 mnt_drop_write(path.mnt);
52626 out_release:
52627 path_put(&path);
52628 @@ -725,7 +780,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52629 error = mnt_want_write(path.mnt);
52630 if (error)
52631 goto out_release;
52632 - error = chown_common(path.dentry, user, group);
52633 + error = chown_common(path.dentry, user, group, path.mnt);
52634 mnt_drop_write(path.mnt);
52635 out_release:
52636 path_put(&path);
52637 @@ -744,7 +799,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52638 error = mnt_want_write(path.mnt);
52639 if (error)
52640 goto out_release;
52641 - error = chown_common(path.dentry, user, group);
52642 + error = chown_common(path.dentry, user, group, path.mnt);
52643 mnt_drop_write(path.mnt);
52644 out_release:
52645 path_put(&path);
52646 @@ -767,7 +822,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52647 goto out_fput;
52648 dentry = file->f_path.dentry;
52649 audit_inode(NULL, dentry);
52650 - error = chown_common(dentry, user, group);
52651 + error = chown_common(dentry, user, group, file->f_path.mnt);
52652 mnt_drop_write(file->f_path.mnt);
52653 out_fput:
52654 fput(file);
52655 @@ -1036,7 +1091,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52656 if (!IS_ERR(tmp)) {
52657 fd = get_unused_fd_flags(flags);
52658 if (fd >= 0) {
52659 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52660 + struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52661 if (IS_ERR(f)) {
52662 put_unused_fd(fd);
52663 fd = PTR_ERR(f);
52664 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52665 index 6ab70f4..f4103d1 100644
52666 --- a/fs/partitions/efi.c
52667 +++ b/fs/partitions/efi.c
52668 @@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52669 if (!bdev || !gpt)
52670 return NULL;
52671
52672 + if (!le32_to_cpu(gpt->num_partition_entries))
52673 + return NULL;
52674 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52675 + if (!pte)
52676 + return NULL;
52677 +
52678 count = le32_to_cpu(gpt->num_partition_entries) *
52679 le32_to_cpu(gpt->sizeof_partition_entry);
52680 - if (!count)
52681 - return NULL;
52682 - pte = kzalloc(count, GFP_KERNEL);
52683 - if (!pte)
52684 - return NULL;
52685 -
52686 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52687 (u8 *) pte,
52688 count) < count) {
52689 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52690 index dd6efdb..3babc6c 100644
52691 --- a/fs/partitions/ldm.c
52692 +++ b/fs/partitions/ldm.c
52693 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52694 ldm_error ("A VBLK claims to have %d parts.", num);
52695 return false;
52696 }
52697 +
52698 if (rec >= num) {
52699 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52700 return false;
52701 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52702 goto found;
52703 }
52704
52705 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
52706 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
52707 if (!f) {
52708 ldm_crit ("Out of memory.");
52709 return false;
52710 diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
52711 index 5765198..7f8e9e0 100644
52712 --- a/fs/partitions/mac.c
52713 +++ b/fs/partitions/mac.c
52714 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
52715 return 0; /* not a MacOS disk */
52716 }
52717 blocks_in_map = be32_to_cpu(part->map_count);
52718 - if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52719 - put_dev_sector(sect);
52720 - return 0;
52721 - }
52722 printk(" [mac]");
52723 + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52724 + put_dev_sector(sect);
52725 + return 0;
52726 + }
52727 for (slot = 1; slot <= blocks_in_map; ++slot) {
52728 int pos = slot * secsize;
52729 put_dev_sector(sect);
52730 diff --git a/fs/pipe.c b/fs/pipe.c
52731 index d0cc080..8a6f211 100644
52732 --- a/fs/pipe.c
52733 +++ b/fs/pipe.c
52734 @@ -401,9 +401,9 @@ redo:
52735 }
52736 if (bufs) /* More to do? */
52737 continue;
52738 - if (!pipe->writers)
52739 + if (!atomic_read(&pipe->writers))
52740 break;
52741 - if (!pipe->waiting_writers) {
52742 + if (!atomic_read(&pipe->waiting_writers)) {
52743 /* syscall merging: Usually we must not sleep
52744 * if O_NONBLOCK is set, or if we got some data.
52745 * But if a writer sleeps in kernel space, then
52746 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
52747 mutex_lock(&inode->i_mutex);
52748 pipe = inode->i_pipe;
52749
52750 - if (!pipe->readers) {
52751 + if (!atomic_read(&pipe->readers)) {
52752 send_sig(SIGPIPE, current, 0);
52753 ret = -EPIPE;
52754 goto out;
52755 @@ -511,7 +511,7 @@ redo1:
52756 for (;;) {
52757 int bufs;
52758
52759 - if (!pipe->readers) {
52760 + if (!atomic_read(&pipe->readers)) {
52761 send_sig(SIGPIPE, current, 0);
52762 if (!ret)
52763 ret = -EPIPE;
52764 @@ -597,9 +597,9 @@ redo2:
52765 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52766 do_wakeup = 0;
52767 }
52768 - pipe->waiting_writers++;
52769 + atomic_inc(&pipe->waiting_writers);
52770 pipe_wait(pipe);
52771 - pipe->waiting_writers--;
52772 + atomic_dec(&pipe->waiting_writers);
52773 }
52774 out:
52775 mutex_unlock(&inode->i_mutex);
52776 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52777 mask = 0;
52778 if (filp->f_mode & FMODE_READ) {
52779 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52780 - if (!pipe->writers && filp->f_version != pipe->w_counter)
52781 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52782 mask |= POLLHUP;
52783 }
52784
52785 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52786 * Most Unices do not set POLLERR for FIFOs but on Linux they
52787 * behave exactly like pipes for poll().
52788 */
52789 - if (!pipe->readers)
52790 + if (!atomic_read(&pipe->readers))
52791 mask |= POLLERR;
52792 }
52793
52794 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52795
52796 mutex_lock(&inode->i_mutex);
52797 pipe = inode->i_pipe;
52798 - pipe->readers -= decr;
52799 - pipe->writers -= decw;
52800 + atomic_sub(decr, &pipe->readers);
52801 + atomic_sub(decw, &pipe->writers);
52802
52803 - if (!pipe->readers && !pipe->writers) {
52804 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52805 free_pipe_info(inode);
52806 } else {
52807 wake_up_interruptible_sync(&pipe->wait);
52808 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52809
52810 if (inode->i_pipe) {
52811 ret = 0;
52812 - inode->i_pipe->readers++;
52813 + atomic_inc(&inode->i_pipe->readers);
52814 }
52815
52816 mutex_unlock(&inode->i_mutex);
52817 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52818
52819 if (inode->i_pipe) {
52820 ret = 0;
52821 - inode->i_pipe->writers++;
52822 + atomic_inc(&inode->i_pipe->writers);
52823 }
52824
52825 mutex_unlock(&inode->i_mutex);
52826 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52827 if (inode->i_pipe) {
52828 ret = 0;
52829 if (filp->f_mode & FMODE_READ)
52830 - inode->i_pipe->readers++;
52831 + atomic_inc(&inode->i_pipe->readers);
52832 if (filp->f_mode & FMODE_WRITE)
52833 - inode->i_pipe->writers++;
52834 + atomic_inc(&inode->i_pipe->writers);
52835 }
52836
52837 mutex_unlock(&inode->i_mutex);
52838 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52839 inode->i_pipe = NULL;
52840 }
52841
52842 -static struct vfsmount *pipe_mnt __read_mostly;
52843 +struct vfsmount *pipe_mnt __read_mostly;
52844 static int pipefs_delete_dentry(struct dentry *dentry)
52845 {
52846 /*
52847 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52848 goto fail_iput;
52849 inode->i_pipe = pipe;
52850
52851 - pipe->readers = pipe->writers = 1;
52852 + atomic_set(&pipe->readers, 1);
52853 + atomic_set(&pipe->writers, 1);
52854 inode->i_fop = &rdwr_pipefifo_fops;
52855
52856 /*
52857 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
52858 index 50f8f06..c5755df 100644
52859 --- a/fs/proc/Kconfig
52860 +++ b/fs/proc/Kconfig
52861 @@ -30,12 +30,12 @@ config PROC_FS
52862
52863 config PROC_KCORE
52864 bool "/proc/kcore support" if !ARM
52865 - depends on PROC_FS && MMU
52866 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
52867
52868 config PROC_VMCORE
52869 bool "/proc/vmcore support (EXPERIMENTAL)"
52870 - depends on PROC_FS && CRASH_DUMP
52871 - default y
52872 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
52873 + default n
52874 help
52875 Exports the dump image of crashed kernel in ELF format.
52876
52877 @@ -59,8 +59,8 @@ config PROC_SYSCTL
52878 limited in memory.
52879
52880 config PROC_PAGE_MONITOR
52881 - default y
52882 - depends on PROC_FS && MMU
52883 + default n
52884 + depends on PROC_FS && MMU && !GRKERNSEC
52885 bool "Enable /proc page monitoring" if EMBEDDED
52886 help
52887 Various /proc files exist to monitor process memory utilization:
52888 diff --git a/fs/proc/array.c b/fs/proc/array.c
52889 index c5ef152..1363194 100644
52890 --- a/fs/proc/array.c
52891 +++ b/fs/proc/array.c
52892 @@ -60,6 +60,7 @@
52893 #include <linux/tty.h>
52894 #include <linux/string.h>
52895 #include <linux/mman.h>
52896 +#include <linux/grsecurity.h>
52897 #include <linux/proc_fs.h>
52898 #include <linux/ioport.h>
52899 #include <linux/uaccess.h>
52900 @@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
52901 p->nivcsw);
52902 }
52903
52904 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52905 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
52906 +{
52907 + if (p->mm)
52908 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
52909 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
52910 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
52911 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
52912 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
52913 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
52914 + else
52915 + seq_printf(m, "PaX:\t-----\n");
52916 +}
52917 +#endif
52918 +
52919 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52920 struct pid *pid, struct task_struct *task)
52921 {
52922 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52923 task_cap(m, task);
52924 cpuset_task_status_allowed(m, task);
52925 task_context_switch_counts(m, task);
52926 +
52927 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52928 + task_pax(m, task);
52929 +#endif
52930 +
52931 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
52932 + task_grsec_rbac(m, task);
52933 +#endif
52934 +
52935 return 0;
52936 }
52937
52938 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52939 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52940 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
52941 + _mm->pax_flags & MF_PAX_SEGMEXEC))
52942 +#endif
52943 +
52944 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52945 struct pid *pid, struct task_struct *task, int whole)
52946 {
52947 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52948 cputime_t cutime, cstime, utime, stime;
52949 cputime_t cgtime, gtime;
52950 unsigned long rsslim = 0;
52951 - char tcomm[sizeof(task->comm)];
52952 + char tcomm[sizeof(task->comm)] = { 0 };
52953 unsigned long flags;
52954
52955 + pax_track_stack();
52956 +
52957 state = *get_task_state(task);
52958 vsize = eip = esp = 0;
52959 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
52960 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52961 gtime = task_gtime(task);
52962 }
52963
52964 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52965 + if (PAX_RAND_FLAGS(mm)) {
52966 + eip = 0;
52967 + esp = 0;
52968 + wchan = 0;
52969 + }
52970 +#endif
52971 +#ifdef CONFIG_GRKERNSEC_HIDESYM
52972 + wchan = 0;
52973 + eip =0;
52974 + esp =0;
52975 +#endif
52976 +
52977 /* scale priority and nice values from timeslices to -20..20 */
52978 /* to make it look like a "normal" Unix priority/nice value */
52979 priority = task_prio(task);
52980 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52981 vsize,
52982 mm ? get_mm_rss(mm) : 0,
52983 rsslim,
52984 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52985 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
52986 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
52987 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
52988 +#else
52989 mm ? (permitted ? mm->start_code : 1) : 0,
52990 mm ? (permitted ? mm->end_code : 1) : 0,
52991 (permitted && mm) ? mm->start_stack : 0,
52992 +#endif
52993 esp,
52994 eip,
52995 /* The signal information here is obsolete.
52996 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52997
52998 return 0;
52999 }
53000 +
53001 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53002 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
53003 +{
53004 + u32 curr_ip = 0;
53005 + unsigned long flags;
53006 +
53007 + if (lock_task_sighand(task, &flags)) {
53008 + curr_ip = task->signal->curr_ip;
53009 + unlock_task_sighand(task, &flags);
53010 + }
53011 +
53012 + return sprintf(buffer, "%pI4\n", &curr_ip);
53013 +}
53014 +#endif
53015 diff --git a/fs/proc/base.c b/fs/proc/base.c
53016 index 67f7dc0..67ab883 100644
53017 --- a/fs/proc/base.c
53018 +++ b/fs/proc/base.c
53019 @@ -102,6 +102,22 @@ struct pid_entry {
53020 union proc_op op;
53021 };
53022
53023 +struct getdents_callback {
53024 + struct linux_dirent __user * current_dir;
53025 + struct linux_dirent __user * previous;
53026 + struct file * file;
53027 + int count;
53028 + int error;
53029 +};
53030 +
53031 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
53032 + loff_t offset, u64 ino, unsigned int d_type)
53033 +{
53034 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
53035 + buf->error = -EINVAL;
53036 + return 0;
53037 +}
53038 +
53039 #define NOD(NAME, MODE, IOP, FOP, OP) { \
53040 .name = (NAME), \
53041 .len = sizeof(NAME) - 1, \
53042 @@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
53043 if (task == current)
53044 return 0;
53045
53046 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
53047 + return -EPERM;
53048 +
53049 /*
53050 * If current is actively ptrace'ing, and would also be
53051 * permitted to freshly attach with ptrace now, permit it.
53052 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
53053 if (!mm->arg_end)
53054 goto out_mm; /* Shh! No looking before we're done */
53055
53056 + if (gr_acl_handle_procpidmem(task))
53057 + goto out_mm;
53058 +
53059 len = mm->arg_end - mm->arg_start;
53060
53061 if (len > PAGE_SIZE)
53062 @@ -287,12 +309,28 @@ out:
53063 return res;
53064 }
53065
53066 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53067 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53068 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53069 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53070 +#endif
53071 +
53072 static int proc_pid_auxv(struct task_struct *task, char *buffer)
53073 {
53074 int res = 0;
53075 struct mm_struct *mm = get_task_mm(task);
53076 if (mm) {
53077 unsigned int nwords = 0;
53078 +
53079 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53080 + /* allow if we're currently ptracing this task */
53081 + if (PAX_RAND_FLAGS(mm) &&
53082 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
53083 + mmput(mm);
53084 + return 0;
53085 + }
53086 +#endif
53087 +
53088 do {
53089 nwords += 2;
53090 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
53091 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
53092 }
53093
53094
53095 -#ifdef CONFIG_KALLSYMS
53096 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53097 /*
53098 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
53099 * Returns the resolved symbol. If that fails, simply return the address.
53100 @@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
53101 mutex_unlock(&task->cred_guard_mutex);
53102 }
53103
53104 -#ifdef CONFIG_STACKTRACE
53105 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53106
53107 #define MAX_STACK_TRACE_DEPTH 64
53108
53109 @@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
53110 return count;
53111 }
53112
53113 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53114 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53115 static int proc_pid_syscall(struct task_struct *task, char *buffer)
53116 {
53117 long nr;
53118 @@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
53119 /************************************************************************/
53120
53121 /* permission checks */
53122 -static int proc_fd_access_allowed(struct inode *inode)
53123 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
53124 {
53125 struct task_struct *task;
53126 int allowed = 0;
53127 @@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
53128 */
53129 task = get_proc_task(inode);
53130 if (task) {
53131 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53132 + if (log)
53133 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
53134 + else
53135 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53136 put_task_struct(task);
53137 }
53138 return allowed;
53139 @@ -809,6 +850,8 @@ static int mem_open(struct inode* inode, struct file* file)
53140 return 0;
53141 }
53142
53143 +static int task_dumpable(struct task_struct *task);
53144 +
53145 static ssize_t mem_read(struct file * file, char __user * buf,
53146 size_t count, loff_t *ppos)
53147 {
53148 @@ -824,6 +867,12 @@ static ssize_t mem_read(struct file * file, char __user * buf,
53149 if (check_mem_permission(task))
53150 goto out;
53151
53152 + // XXX: temporary workaround
53153 + if (!task_dumpable(task) && task == current) {
53154 + ret = -EACCES;
53155 + goto out;
53156 + }
53157 +
53158 ret = -ENOMEM;
53159 page = (char *)__get_free_page(GFP_TEMPORARY);
53160 if (!page)
53161 @@ -963,6 +1012,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
53162 if (!task)
53163 goto out_no_task;
53164
53165 + if (gr_acl_handle_procpidmem(task))
53166 + goto out;
53167 +
53168 if (!ptrace_may_access(task, PTRACE_MODE_READ))
53169 goto out;
53170
53171 @@ -1377,7 +1429,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
53172 path_put(&nd->path);
53173
53174 /* Are we allowed to snoop on the tasks file descriptors? */
53175 - if (!proc_fd_access_allowed(inode))
53176 + if (!proc_fd_access_allowed(inode,0))
53177 goto out;
53178
53179 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
53180 @@ -1417,8 +1469,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
53181 struct path path;
53182
53183 /* Are we allowed to snoop on the tasks file descriptors? */
53184 - if (!proc_fd_access_allowed(inode))
53185 - goto out;
53186 + /* logging this is needed for learning on chromium to work properly,
53187 + but we don't want to flood the logs from 'ps' which does a readlink
53188 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
53189 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
53190 + */
53191 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
53192 + if (!proc_fd_access_allowed(inode,0))
53193 + goto out;
53194 + } else {
53195 + if (!proc_fd_access_allowed(inode,1))
53196 + goto out;
53197 + }
53198
53199 error = PROC_I(inode)->op.proc_get_link(inode, &path);
53200 if (error)
53201 @@ -1483,7 +1545,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
53202 rcu_read_lock();
53203 cred = __task_cred(task);
53204 inode->i_uid = cred->euid;
53205 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53206 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53207 +#else
53208 inode->i_gid = cred->egid;
53209 +#endif
53210 rcu_read_unlock();
53211 }
53212 security_task_to_inode(task, inode);
53213 @@ -1501,6 +1567,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53214 struct inode *inode = dentry->d_inode;
53215 struct task_struct *task;
53216 const struct cred *cred;
53217 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53218 + const struct cred *tmpcred = current_cred();
53219 +#endif
53220
53221 generic_fillattr(inode, stat);
53222
53223 @@ -1508,13 +1577,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53224 stat->uid = 0;
53225 stat->gid = 0;
53226 task = pid_task(proc_pid(inode), PIDTYPE_PID);
53227 +
53228 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
53229 + rcu_read_unlock();
53230 + return -ENOENT;
53231 + }
53232 +
53233 if (task) {
53234 + cred = __task_cred(task);
53235 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53236 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
53237 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53238 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53239 +#endif
53240 + ) {
53241 +#endif
53242 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53243 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53244 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53245 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53246 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53247 +#endif
53248 task_dumpable(task)) {
53249 - cred = __task_cred(task);
53250 stat->uid = cred->euid;
53251 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53252 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53253 +#else
53254 stat->gid = cred->egid;
53255 +#endif
53256 }
53257 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53258 + } else {
53259 + rcu_read_unlock();
53260 + return -ENOENT;
53261 + }
53262 +#endif
53263 }
53264 rcu_read_unlock();
53265 return 0;
53266 @@ -1545,11 +1642,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53267
53268 if (task) {
53269 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53270 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53271 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53272 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53273 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53274 +#endif
53275 task_dumpable(task)) {
53276 rcu_read_lock();
53277 cred = __task_cred(task);
53278 inode->i_uid = cred->euid;
53279 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53280 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53281 +#else
53282 inode->i_gid = cred->egid;
53283 +#endif
53284 rcu_read_unlock();
53285 } else {
53286 inode->i_uid = 0;
53287 @@ -1670,7 +1776,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53288 int fd = proc_fd(inode);
53289
53290 if (task) {
53291 - files = get_files_struct(task);
53292 + if (!gr_acl_handle_procpidmem(task))
53293 + files = get_files_struct(task);
53294 put_task_struct(task);
53295 }
53296 if (files) {
53297 @@ -1922,12 +2029,22 @@ static const struct file_operations proc_fd_operations = {
53298 static int proc_fd_permission(struct inode *inode, int mask)
53299 {
53300 int rv;
53301 + struct task_struct *task;
53302
53303 rv = generic_permission(inode, mask, NULL);
53304 - if (rv == 0)
53305 - return 0;
53306 +
53307 if (task_pid(current) == proc_pid(inode))
53308 rv = 0;
53309 +
53310 + task = get_proc_task(inode);
53311 + if (task == NULL)
53312 + return rv;
53313 +
53314 + if (gr_acl_handle_procpidmem(task))
53315 + rv = -EACCES;
53316 +
53317 + put_task_struct(task);
53318 +
53319 return rv;
53320 }
53321
53322 @@ -2036,6 +2153,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53323 if (!task)
53324 goto out_no_task;
53325
53326 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53327 + goto out;
53328 +
53329 /*
53330 * Yes, it does not scale. And it should not. Don't add
53331 * new entries into /proc/<tgid>/ without very good reasons.
53332 @@ -2080,6 +2200,9 @@ static int proc_pident_readdir(struct file *filp,
53333 if (!task)
53334 goto out_no_task;
53335
53336 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53337 + goto out;
53338 +
53339 ret = 0;
53340 i = filp->f_pos;
53341 switch (i) {
53342 @@ -2347,7 +2470,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53343 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53344 void *cookie)
53345 {
53346 - char *s = nd_get_link(nd);
53347 + const char *s = nd_get_link(nd);
53348 if (!IS_ERR(s))
53349 __putname(s);
53350 }
53351 @@ -2553,7 +2676,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53352 #ifdef CONFIG_SCHED_DEBUG
53353 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53354 #endif
53355 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53356 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53357 INF("syscall", S_IRUGO, proc_pid_syscall),
53358 #endif
53359 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53360 @@ -2578,10 +2701,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53361 #ifdef CONFIG_SECURITY
53362 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53363 #endif
53364 -#ifdef CONFIG_KALLSYMS
53365 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53366 INF("wchan", S_IRUGO, proc_pid_wchan),
53367 #endif
53368 -#ifdef CONFIG_STACKTRACE
53369 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53370 ONE("stack", S_IRUGO, proc_pid_stack),
53371 #endif
53372 #ifdef CONFIG_SCHEDSTATS
53373 @@ -2611,6 +2734,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53374 #ifdef CONFIG_TASK_IO_ACCOUNTING
53375 INF("io", S_IRUSR, proc_tgid_io_accounting),
53376 #endif
53377 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53378 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53379 +#endif
53380 };
53381
53382 static int proc_tgid_base_readdir(struct file * filp,
53383 @@ -2735,7 +2861,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53384 if (!inode)
53385 goto out;
53386
53387 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53388 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53389 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53390 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53391 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53392 +#else
53393 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53394 +#endif
53395 inode->i_op = &proc_tgid_base_inode_operations;
53396 inode->i_fop = &proc_tgid_base_operations;
53397 inode->i_flags|=S_IMMUTABLE;
53398 @@ -2777,7 +2910,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53399 if (!task)
53400 goto out;
53401
53402 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53403 + goto out_put_task;
53404 +
53405 result = proc_pid_instantiate(dir, dentry, task, NULL);
53406 +out_put_task:
53407 put_task_struct(task);
53408 out:
53409 return result;
53410 @@ -2842,6 +2979,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53411 {
53412 unsigned int nr;
53413 struct task_struct *reaper;
53414 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53415 + const struct cred *tmpcred = current_cred();
53416 + const struct cred *itercred;
53417 +#endif
53418 + filldir_t __filldir = filldir;
53419 struct tgid_iter iter;
53420 struct pid_namespace *ns;
53421
53422 @@ -2865,8 +3007,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53423 for (iter = next_tgid(ns, iter);
53424 iter.task;
53425 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53426 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53427 + rcu_read_lock();
53428 + itercred = __task_cred(iter.task);
53429 +#endif
53430 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53431 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53432 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53433 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53434 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53435 +#endif
53436 + )
53437 +#endif
53438 + )
53439 + __filldir = &gr_fake_filldir;
53440 + else
53441 + __filldir = filldir;
53442 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53443 + rcu_read_unlock();
53444 +#endif
53445 filp->f_pos = iter.tgid + TGID_OFFSET;
53446 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53447 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53448 put_task_struct(iter.task);
53449 goto out;
53450 }
53451 @@ -2892,7 +3053,7 @@ static const struct pid_entry tid_base_stuff[] = {
53452 #ifdef CONFIG_SCHED_DEBUG
53453 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53454 #endif
53455 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53456 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53457 INF("syscall", S_IRUGO, proc_pid_syscall),
53458 #endif
53459 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53460 @@ -2916,10 +3077,10 @@ static const struct pid_entry tid_base_stuff[] = {
53461 #ifdef CONFIG_SECURITY
53462 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53463 #endif
53464 -#ifdef CONFIG_KALLSYMS
53465 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53466 INF("wchan", S_IRUGO, proc_pid_wchan),
53467 #endif
53468 -#ifdef CONFIG_STACKTRACE
53469 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53470 ONE("stack", S_IRUGO, proc_pid_stack),
53471 #endif
53472 #ifdef CONFIG_SCHEDSTATS
53473 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53474 index 82676e3..5f8518a 100644
53475 --- a/fs/proc/cmdline.c
53476 +++ b/fs/proc/cmdline.c
53477 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53478
53479 static int __init proc_cmdline_init(void)
53480 {
53481 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53482 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53483 +#else
53484 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53485 +#endif
53486 return 0;
53487 }
53488 module_init(proc_cmdline_init);
53489 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53490 index 59ee7da..469b4b6 100644
53491 --- a/fs/proc/devices.c
53492 +++ b/fs/proc/devices.c
53493 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53494
53495 static int __init proc_devices_init(void)
53496 {
53497 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53498 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53499 +#else
53500 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53501 +#endif
53502 return 0;
53503 }
53504 module_init(proc_devices_init);
53505 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53506 index d78ade3..81767f9 100644
53507 --- a/fs/proc/inode.c
53508 +++ b/fs/proc/inode.c
53509 @@ -18,12 +18,19 @@
53510 #include <linux/module.h>
53511 #include <linux/smp_lock.h>
53512 #include <linux/sysctl.h>
53513 +#include <linux/grsecurity.h>
53514
53515 #include <asm/system.h>
53516 #include <asm/uaccess.h>
53517
53518 #include "internal.h"
53519
53520 +#ifdef CONFIG_PROC_SYSCTL
53521 +extern const struct inode_operations proc_sys_inode_operations;
53522 +extern const struct inode_operations proc_sys_dir_operations;
53523 +#endif
53524 +
53525 +
53526 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53527 {
53528 atomic_inc(&de->count);
53529 @@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53530 de_put(de);
53531 if (PROC_I(inode)->sysctl)
53532 sysctl_head_put(PROC_I(inode)->sysctl);
53533 +
53534 +#ifdef CONFIG_PROC_SYSCTL
53535 + if (inode->i_op == &proc_sys_inode_operations ||
53536 + inode->i_op == &proc_sys_dir_operations)
53537 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53538 +#endif
53539 +
53540 clear_inode(inode);
53541 }
53542
53543 @@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53544 if (de->mode) {
53545 inode->i_mode = de->mode;
53546 inode->i_uid = de->uid;
53547 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53548 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53549 +#else
53550 inode->i_gid = de->gid;
53551 +#endif
53552 }
53553 if (de->size)
53554 inode->i_size = de->size;
53555 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53556 index 753ca37..26bcf3b 100644
53557 --- a/fs/proc/internal.h
53558 +++ b/fs/proc/internal.h
53559 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53560 struct pid *pid, struct task_struct *task);
53561 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53562 struct pid *pid, struct task_struct *task);
53563 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53564 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53565 +#endif
53566 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53567
53568 extern const struct file_operations proc_maps_operations;
53569 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53570 index b442dac..aab29cb 100644
53571 --- a/fs/proc/kcore.c
53572 +++ b/fs/proc/kcore.c
53573 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53574 off_t offset = 0;
53575 struct kcore_list *m;
53576
53577 + pax_track_stack();
53578 +
53579 /* setup ELF header */
53580 elf = (struct elfhdr *) bufp;
53581 bufp += sizeof(struct elfhdr);
53582 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53583 * the addresses in the elf_phdr on our list.
53584 */
53585 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53586 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53587 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53588 + if (tsz > buflen)
53589 tsz = buflen;
53590 -
53591 +
53592 while (buflen) {
53593 struct kcore_list *m;
53594
53595 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53596 kfree(elf_buf);
53597 } else {
53598 if (kern_addr_valid(start)) {
53599 - unsigned long n;
53600 + char *elf_buf;
53601 + mm_segment_t oldfs;
53602
53603 - n = copy_to_user(buffer, (char *)start, tsz);
53604 - /*
53605 - * We cannot distingush between fault on source
53606 - * and fault on destination. When this happens
53607 - * we clear too and hope it will trigger the
53608 - * EFAULT again.
53609 - */
53610 - if (n) {
53611 - if (clear_user(buffer + tsz - n,
53612 - n))
53613 + elf_buf = kmalloc(tsz, GFP_KERNEL);
53614 + if (!elf_buf)
53615 + return -ENOMEM;
53616 + oldfs = get_fs();
53617 + set_fs(KERNEL_DS);
53618 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
53619 + set_fs(oldfs);
53620 + if (copy_to_user(buffer, elf_buf, tsz)) {
53621 + kfree(elf_buf);
53622 return -EFAULT;
53623 + }
53624 }
53625 + set_fs(oldfs);
53626 + kfree(elf_buf);
53627 } else {
53628 if (clear_user(buffer, tsz))
53629 return -EFAULT;
53630 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53631
53632 static int open_kcore(struct inode *inode, struct file *filp)
53633 {
53634 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
53635 + return -EPERM;
53636 +#endif
53637 if (!capable(CAP_SYS_RAWIO))
53638 return -EPERM;
53639 if (kcore_need_update)
53640 diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
53641 index 7ca7834..cfe90a4 100644
53642 --- a/fs/proc/kmsg.c
53643 +++ b/fs/proc/kmsg.c
53644 @@ -12,37 +12,37 @@
53645 #include <linux/poll.h>
53646 #include <linux/proc_fs.h>
53647 #include <linux/fs.h>
53648 +#include <linux/syslog.h>
53649
53650 #include <asm/uaccess.h>
53651 #include <asm/io.h>
53652
53653 extern wait_queue_head_t log_wait;
53654
53655 -extern int do_syslog(int type, char __user *bug, int count);
53656 -
53657 static int kmsg_open(struct inode * inode, struct file * file)
53658 {
53659 - return do_syslog(1,NULL,0);
53660 + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
53661 }
53662
53663 static int kmsg_release(struct inode * inode, struct file * file)
53664 {
53665 - (void) do_syslog(0,NULL,0);
53666 + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
53667 return 0;
53668 }
53669
53670 static ssize_t kmsg_read(struct file *file, char __user *buf,
53671 size_t count, loff_t *ppos)
53672 {
53673 - if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
53674 + if ((file->f_flags & O_NONBLOCK) &&
53675 + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53676 return -EAGAIN;
53677 - return do_syslog(2, buf, count);
53678 + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
53679 }
53680
53681 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
53682 {
53683 poll_wait(file, &log_wait, wait);
53684 - if (do_syslog(9, NULL, 0))
53685 + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53686 return POLLIN | POLLRDNORM;
53687 return 0;
53688 }
53689 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
53690 index a65239c..ad1182a 100644
53691 --- a/fs/proc/meminfo.c
53692 +++ b/fs/proc/meminfo.c
53693 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53694 unsigned long pages[NR_LRU_LISTS];
53695 int lru;
53696
53697 + pax_track_stack();
53698 +
53699 /*
53700 * display in kilobytes.
53701 */
53702 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53703 vmi.used >> 10,
53704 vmi.largest_chunk >> 10
53705 #ifdef CONFIG_MEMORY_FAILURE
53706 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
53707 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
53708 #endif
53709 );
53710
53711 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
53712 index 9fe7d7e..cdb62c9 100644
53713 --- a/fs/proc/nommu.c
53714 +++ b/fs/proc/nommu.c
53715 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
53716 if (len < 1)
53717 len = 1;
53718 seq_printf(m, "%*c", len, ' ');
53719 - seq_path(m, &file->f_path, "");
53720 + seq_path(m, &file->f_path, "\n\\");
53721 }
53722
53723 seq_putc(m, '\n');
53724 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
53725 index 04d1270..25e1173 100644
53726 --- a/fs/proc/proc_net.c
53727 +++ b/fs/proc/proc_net.c
53728 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
53729 struct task_struct *task;
53730 struct nsproxy *ns;
53731 struct net *net = NULL;
53732 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53733 + const struct cred *cred = current_cred();
53734 +#endif
53735 +
53736 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53737 + if (cred->fsuid)
53738 + return net;
53739 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53740 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
53741 + return net;
53742 +#endif
53743
53744 rcu_read_lock();
53745 task = pid_task(proc_pid(dir), PIDTYPE_PID);
53746 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
53747 index f667e8a..55f4d96 100644
53748 --- a/fs/proc/proc_sysctl.c
53749 +++ b/fs/proc/proc_sysctl.c
53750 @@ -7,11 +7,13 @@
53751 #include <linux/security.h>
53752 #include "internal.h"
53753
53754 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
53755 +
53756 static const struct dentry_operations proc_sys_dentry_operations;
53757 static const struct file_operations proc_sys_file_operations;
53758 -static const struct inode_operations proc_sys_inode_operations;
53759 +const struct inode_operations proc_sys_inode_operations;
53760 static const struct file_operations proc_sys_dir_file_operations;
53761 -static const struct inode_operations proc_sys_dir_operations;
53762 +const struct inode_operations proc_sys_dir_operations;
53763
53764 static struct inode *proc_sys_make_inode(struct super_block *sb,
53765 struct ctl_table_header *head, struct ctl_table *table)
53766 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53767 if (!p)
53768 goto out;
53769
53770 + if (gr_handle_sysctl(p, MAY_EXEC))
53771 + goto out;
53772 +
53773 err = ERR_PTR(-ENOMEM);
53774 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
53775 if (h)
53776 @@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53777
53778 err = NULL;
53779 dentry->d_op = &proc_sys_dentry_operations;
53780 +
53781 + gr_handle_proc_create(dentry, inode);
53782 +
53783 d_add(dentry, inode);
53784
53785 out:
53786 @@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
53787 return -ENOMEM;
53788 } else {
53789 child->d_op = &proc_sys_dentry_operations;
53790 +
53791 + gr_handle_proc_create(child, inode);
53792 +
53793 d_add(child, inode);
53794 }
53795 } else {
53796 @@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
53797 if (*pos < file->f_pos)
53798 continue;
53799
53800 + if (gr_handle_sysctl(table, 0))
53801 + continue;
53802 +
53803 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
53804 if (res)
53805 return res;
53806 @@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
53807 if (IS_ERR(head))
53808 return PTR_ERR(head);
53809
53810 + if (table && gr_handle_sysctl(table, MAY_EXEC))
53811 + return -ENOENT;
53812 +
53813 generic_fillattr(inode, stat);
53814 if (table)
53815 stat->mode = (stat->mode & S_IFMT) | table->mode;
53816 @@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
53817 };
53818
53819 static const struct file_operations proc_sys_dir_file_operations = {
53820 + .read = generic_read_dir,
53821 .readdir = proc_sys_readdir,
53822 .llseek = generic_file_llseek,
53823 };
53824
53825 -static const struct inode_operations proc_sys_inode_operations = {
53826 +const struct inode_operations proc_sys_inode_operations = {
53827 .permission = proc_sys_permission,
53828 .setattr = proc_sys_setattr,
53829 .getattr = proc_sys_getattr,
53830 };
53831
53832 -static const struct inode_operations proc_sys_dir_operations = {
53833 +const struct inode_operations proc_sys_dir_operations = {
53834 .lookup = proc_sys_lookup,
53835 .permission = proc_sys_permission,
53836 .setattr = proc_sys_setattr,
53837 diff --git a/fs/proc/root.c b/fs/proc/root.c
53838 index b080b79..d957e63 100644
53839 --- a/fs/proc/root.c
53840 +++ b/fs/proc/root.c
53841 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
53842 #ifdef CONFIG_PROC_DEVICETREE
53843 proc_device_tree_init();
53844 #endif
53845 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53846 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53847 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
53848 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53849 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
53850 +#endif
53851 +#else
53852 proc_mkdir("bus", NULL);
53853 +#endif
53854 proc_sys_init();
53855 }
53856
53857 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
53858 index 3b7b82a..7dbb571 100644
53859 --- a/fs/proc/task_mmu.c
53860 +++ b/fs/proc/task_mmu.c
53861 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53862 "VmStk:\t%8lu kB\n"
53863 "VmExe:\t%8lu kB\n"
53864 "VmLib:\t%8lu kB\n"
53865 - "VmPTE:\t%8lu kB\n",
53866 - hiwater_vm << (PAGE_SHIFT-10),
53867 + "VmPTE:\t%8lu kB\n"
53868 +
53869 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53870 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
53871 +#endif
53872 +
53873 + ,hiwater_vm << (PAGE_SHIFT-10),
53874 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
53875 mm->locked_vm << (PAGE_SHIFT-10),
53876 hiwater_rss << (PAGE_SHIFT-10),
53877 total_rss << (PAGE_SHIFT-10),
53878 data << (PAGE_SHIFT-10),
53879 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
53880 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
53881 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
53882 +
53883 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53884 + , mm->context.user_cs_base, mm->context.user_cs_limit
53885 +#endif
53886 +
53887 + );
53888 }
53889
53890 unsigned long task_vsize(struct mm_struct *mm)
53891 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, void *v)
53892 struct proc_maps_private *priv = m->private;
53893 struct vm_area_struct *vma = v;
53894
53895 - vma_stop(priv, vma);
53896 + if (!IS_ERR(vma))
53897 + vma_stop(priv, vma);
53898 if (priv->task)
53899 put_task_struct(priv->task);
53900 }
53901 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
53902 return ret;
53903 }
53904
53905 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53906 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53907 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53908 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53909 +#endif
53910 +
53911 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53912 {
53913 struct mm_struct *mm = vma->vm_mm;
53914 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53915 int flags = vma->vm_flags;
53916 unsigned long ino = 0;
53917 unsigned long long pgoff = 0;
53918 - unsigned long start;
53919 dev_t dev = 0;
53920 int len;
53921
53922 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53923 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
53924 }
53925
53926 - /* We don't show the stack guard page in /proc/maps */
53927 - start = vma->vm_start;
53928 - if (vma->vm_flags & VM_GROWSDOWN)
53929 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
53930 - start += PAGE_SIZE;
53931 -
53932 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
53933 - start,
53934 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53935 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
53936 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
53937 +#else
53938 + vma->vm_start,
53939 vma->vm_end,
53940 +#endif
53941 flags & VM_READ ? 'r' : '-',
53942 flags & VM_WRITE ? 'w' : '-',
53943 flags & VM_EXEC ? 'x' : '-',
53944 flags & VM_MAYSHARE ? 's' : 'p',
53945 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53946 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
53947 +#else
53948 pgoff,
53949 +#endif
53950 MAJOR(dev), MINOR(dev), ino, &len);
53951
53952 /*
53953 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53954 */
53955 if (file) {
53956 pad_len_spaces(m, len);
53957 - seq_path(m, &file->f_path, "\n");
53958 + seq_path(m, &file->f_path, "\n\\");
53959 } else {
53960 const char *name = arch_vma_name(vma);
53961 if (!name) {
53962 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53963 if (vma->vm_start <= mm->brk &&
53964 vma->vm_end >= mm->start_brk) {
53965 name = "[heap]";
53966 - } else if (vma->vm_start <= mm->start_stack &&
53967 - vma->vm_end >= mm->start_stack) {
53968 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
53969 + (vma->vm_start <= mm->start_stack &&
53970 + vma->vm_end >= mm->start_stack)) {
53971 name = "[stack]";
53972 }
53973 } else {
53974 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m, void *v)
53975 };
53976
53977 memset(&mss, 0, sizeof mss);
53978 - mss.vma = vma;
53979 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53980 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53981 +
53982 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53983 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
53984 +#endif
53985 + mss.vma = vma;
53986 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53987 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53988 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53989 + }
53990 +#endif
53991
53992 show_map_vma(m, vma);
53993
53994 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m, void *v)
53995 "Swap: %8lu kB\n"
53996 "KernelPageSize: %8lu kB\n"
53997 "MMUPageSize: %8lu kB\n",
53998 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53999 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
54000 +#else
54001 (vma->vm_end - vma->vm_start) >> 10,
54002 +#endif
54003 mss.resident >> 10,
54004 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
54005 mss.shared_clean >> 10,
54006 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
54007 index 8f5c05d..c99c76d 100644
54008 --- a/fs/proc/task_nommu.c
54009 +++ b/fs/proc/task_nommu.c
54010 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54011 else
54012 bytes += kobjsize(mm);
54013
54014 - if (current->fs && current->fs->users > 1)
54015 + if (current->fs && atomic_read(&current->fs->users) > 1)
54016 sbytes += kobjsize(current->fs);
54017 else
54018 bytes += kobjsize(current->fs);
54019 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
54020 if (len < 1)
54021 len = 1;
54022 seq_printf(m, "%*c", len, ' ');
54023 - seq_path(m, &file->f_path, "");
54024 + seq_path(m, &file->f_path, "\n\\");
54025 }
54026
54027 seq_putc(m, '\n');
54028 diff --git a/fs/readdir.c b/fs/readdir.c
54029 index 7723401..30059a6 100644
54030 --- a/fs/readdir.c
54031 +++ b/fs/readdir.c
54032 @@ -16,6 +16,7 @@
54033 #include <linux/security.h>
54034 #include <linux/syscalls.h>
54035 #include <linux/unistd.h>
54036 +#include <linux/namei.h>
54037
54038 #include <asm/uaccess.h>
54039
54040 @@ -67,6 +68,7 @@ struct old_linux_dirent {
54041
54042 struct readdir_callback {
54043 struct old_linux_dirent __user * dirent;
54044 + struct file * file;
54045 int result;
54046 };
54047
54048 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
54049 buf->result = -EOVERFLOW;
54050 return -EOVERFLOW;
54051 }
54052 +
54053 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54054 + return 0;
54055 +
54056 buf->result++;
54057 dirent = buf->dirent;
54058 if (!access_ok(VERIFY_WRITE, dirent,
54059 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
54060
54061 buf.result = 0;
54062 buf.dirent = dirent;
54063 + buf.file = file;
54064
54065 error = vfs_readdir(file, fillonedir, &buf);
54066 if (buf.result)
54067 @@ -142,6 +149,7 @@ struct linux_dirent {
54068 struct getdents_callback {
54069 struct linux_dirent __user * current_dir;
54070 struct linux_dirent __user * previous;
54071 + struct file * file;
54072 int count;
54073 int error;
54074 };
54075 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
54076 buf->error = -EOVERFLOW;
54077 return -EOVERFLOW;
54078 }
54079 +
54080 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54081 + return 0;
54082 +
54083 dirent = buf->previous;
54084 if (dirent) {
54085 if (__put_user(offset, &dirent->d_off))
54086 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
54087 buf.previous = NULL;
54088 buf.count = count;
54089 buf.error = 0;
54090 + buf.file = file;
54091
54092 error = vfs_readdir(file, filldir, &buf);
54093 if (error >= 0)
54094 @@ -228,6 +241,7 @@ out:
54095 struct getdents_callback64 {
54096 struct linux_dirent64 __user * current_dir;
54097 struct linux_dirent64 __user * previous;
54098 + struct file *file;
54099 int count;
54100 int error;
54101 };
54102 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
54103 buf->error = -EINVAL; /* only used if we fail.. */
54104 if (reclen > buf->count)
54105 return -EINVAL;
54106 +
54107 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54108 + return 0;
54109 +
54110 dirent = buf->previous;
54111 if (dirent) {
54112 if (__put_user(offset, &dirent->d_off))
54113 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54114
54115 buf.current_dir = dirent;
54116 buf.previous = NULL;
54117 + buf.file = file;
54118 buf.count = count;
54119 buf.error = 0;
54120
54121 @@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54122 error = buf.error;
54123 lastdirent = buf.previous;
54124 if (lastdirent) {
54125 - typeof(lastdirent->d_off) d_off = file->f_pos;
54126 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
54127 if (__put_user(d_off, &lastdirent->d_off))
54128 error = -EFAULT;
54129 else
54130 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
54131 index d42c30c..4fd8718 100644
54132 --- a/fs/reiserfs/dir.c
54133 +++ b/fs/reiserfs/dir.c
54134 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
54135 struct reiserfs_dir_entry de;
54136 int ret = 0;
54137
54138 + pax_track_stack();
54139 +
54140 reiserfs_write_lock(inode->i_sb);
54141
54142 reiserfs_check_lock_depth(inode->i_sb, "readdir");
54143 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
54144 index 128d3f7..8840d44 100644
54145 --- a/fs/reiserfs/do_balan.c
54146 +++ b/fs/reiserfs/do_balan.c
54147 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
54148 return;
54149 }
54150
54151 - atomic_inc(&(fs_generation(tb->tb_sb)));
54152 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
54153 do_balance_starts(tb);
54154
54155 /* balance leaf returns 0 except if combining L R and S into
54156 diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
54157 index 72cb1cc..d0e3181 100644
54158 --- a/fs/reiserfs/item_ops.c
54159 +++ b/fs/reiserfs/item_ops.c
54160 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
54161 vi->vi_index, vi->vi_type, vi->vi_ih);
54162 }
54163
54164 -static struct item_operations stat_data_ops = {
54165 +static const struct item_operations stat_data_ops = {
54166 .bytes_number = sd_bytes_number,
54167 .decrement_key = sd_decrement_key,
54168 .is_left_mergeable = sd_is_left_mergeable,
54169 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
54170 vi->vi_index, vi->vi_type, vi->vi_ih);
54171 }
54172
54173 -static struct item_operations direct_ops = {
54174 +static const struct item_operations direct_ops = {
54175 .bytes_number = direct_bytes_number,
54176 .decrement_key = direct_decrement_key,
54177 .is_left_mergeable = direct_is_left_mergeable,
54178 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
54179 vi->vi_index, vi->vi_type, vi->vi_ih);
54180 }
54181
54182 -static struct item_operations indirect_ops = {
54183 +static const struct item_operations indirect_ops = {
54184 .bytes_number = indirect_bytes_number,
54185 .decrement_key = indirect_decrement_key,
54186 .is_left_mergeable = indirect_is_left_mergeable,
54187 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
54188 printk("\n");
54189 }
54190
54191 -static struct item_operations direntry_ops = {
54192 +static const struct item_operations direntry_ops = {
54193 .bytes_number = direntry_bytes_number,
54194 .decrement_key = direntry_decrement_key,
54195 .is_left_mergeable = direntry_is_left_mergeable,
54196 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
54197 "Invalid item type observed, run fsck ASAP");
54198 }
54199
54200 -static struct item_operations errcatch_ops = {
54201 +static const struct item_operations errcatch_ops = {
54202 errcatch_bytes_number,
54203 errcatch_decrement_key,
54204 errcatch_is_left_mergeable,
54205 @@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
54206 #error Item types must use disk-format assigned values.
54207 #endif
54208
54209 -struct item_operations *item_ops[TYPE_ANY + 1] = {
54210 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
54211 &stat_data_ops,
54212 &indirect_ops,
54213 &direct_ops,
54214 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
54215 index b5fe0aa..e0e25c4 100644
54216 --- a/fs/reiserfs/journal.c
54217 +++ b/fs/reiserfs/journal.c
54218 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
54219 struct buffer_head *bh;
54220 int i, j;
54221
54222 + pax_track_stack();
54223 +
54224 bh = __getblk(dev, block, bufsize);
54225 if (buffer_uptodate(bh))
54226 return (bh);
54227 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
54228 index 2715791..b8996db 100644
54229 --- a/fs/reiserfs/namei.c
54230 +++ b/fs/reiserfs/namei.c
54231 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
54232 unsigned long savelink = 1;
54233 struct timespec ctime;
54234
54235 + pax_track_stack();
54236 +
54237 /* three balancings: (1) old name removal, (2) new name insertion
54238 and (3) maybe "save" link insertion
54239 stat data updates: (1) old directory,
54240 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
54241 index 9229e55..3d2e3b7 100644
54242 --- a/fs/reiserfs/procfs.c
54243 +++ b/fs/reiserfs/procfs.c
54244 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
54245 "SMALL_TAILS " : "NO_TAILS ",
54246 replay_only(sb) ? "REPLAY_ONLY " : "",
54247 convert_reiserfs(sb) ? "CONV " : "",
54248 - atomic_read(&r->s_generation_counter),
54249 + atomic_read_unchecked(&r->s_generation_counter),
54250 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
54251 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54252 SF(s_good_search_by_key_reada), SF(s_bmaps),
54253 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54254 struct journal_params *jp = &rs->s_v1.s_journal;
54255 char b[BDEVNAME_SIZE];
54256
54257 + pax_track_stack();
54258 +
54259 seq_printf(m, /* on-disk fields */
54260 "jp_journal_1st_block: \t%i\n"
54261 "jp_journal_dev: \t%s[%x]\n"
54262 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54263 index d036ee5..4c7dca1 100644
54264 --- a/fs/reiserfs/stree.c
54265 +++ b/fs/reiserfs/stree.c
54266 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54267 int iter = 0;
54268 #endif
54269
54270 + pax_track_stack();
54271 +
54272 BUG_ON(!th->t_trans_id);
54273
54274 init_tb_struct(th, &s_del_balance, sb, path,
54275 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54276 int retval;
54277 int quota_cut_bytes = 0;
54278
54279 + pax_track_stack();
54280 +
54281 BUG_ON(!th->t_trans_id);
54282
54283 le_key2cpu_key(&cpu_key, key);
54284 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54285 int quota_cut_bytes;
54286 loff_t tail_pos = 0;
54287
54288 + pax_track_stack();
54289 +
54290 BUG_ON(!th->t_trans_id);
54291
54292 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54293 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54294 int retval;
54295 int fs_gen;
54296
54297 + pax_track_stack();
54298 +
54299 BUG_ON(!th->t_trans_id);
54300
54301 fs_gen = get_generation(inode->i_sb);
54302 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54303 int fs_gen = 0;
54304 int quota_bytes = 0;
54305
54306 + pax_track_stack();
54307 +
54308 BUG_ON(!th->t_trans_id);
54309
54310 if (inode) { /* Do we count quotas for item? */
54311 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54312 index 7cb1285..c726cd0 100644
54313 --- a/fs/reiserfs/super.c
54314 +++ b/fs/reiserfs/super.c
54315 @@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54316 {.option_name = NULL}
54317 };
54318
54319 + pax_track_stack();
54320 +
54321 *blocks = 0;
54322 if (!options || !*options)
54323 /* use default configuration: create tails, journaling on, no
54324 diff --git a/fs/select.c b/fs/select.c
54325 index fd38ce2..f5381b8 100644
54326 --- a/fs/select.c
54327 +++ b/fs/select.c
54328 @@ -20,6 +20,7 @@
54329 #include <linux/module.h>
54330 #include <linux/slab.h>
54331 #include <linux/poll.h>
54332 +#include <linux/security.h>
54333 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54334 #include <linux/file.h>
54335 #include <linux/fdtable.h>
54336 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54337 int retval, i, timed_out = 0;
54338 unsigned long slack = 0;
54339
54340 + pax_track_stack();
54341 +
54342 rcu_read_lock();
54343 retval = max_select_fd(n, fds);
54344 rcu_read_unlock();
54345 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54346 /* Allocate small arguments on the stack to save memory and be faster */
54347 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54348
54349 + pax_track_stack();
54350 +
54351 ret = -EINVAL;
54352 if (n < 0)
54353 goto out_nofds;
54354 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54355 struct poll_list *walk = head;
54356 unsigned long todo = nfds;
54357
54358 + pax_track_stack();
54359 +
54360 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54361 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54362 return -EINVAL;
54363
54364 diff --git a/fs/seq_file.c b/fs/seq_file.c
54365 index eae7d9d..679f099 100644
54366 --- a/fs/seq_file.c
54367 +++ b/fs/seq_file.c
54368 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54369 return 0;
54370 }
54371 if (!m->buf) {
54372 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54373 + m->size = PAGE_SIZE;
54374 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54375 if (!m->buf)
54376 return -ENOMEM;
54377 }
54378 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54379 Eoverflow:
54380 m->op->stop(m, p);
54381 kfree(m->buf);
54382 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54383 + m->size <<= 1;
54384 + m->buf = kmalloc(m->size, GFP_KERNEL);
54385 return !m->buf ? -ENOMEM : -EAGAIN;
54386 }
54387
54388 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54389 m->version = file->f_version;
54390 /* grab buffer if we didn't have one */
54391 if (!m->buf) {
54392 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54393 + m->size = PAGE_SIZE;
54394 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54395 if (!m->buf)
54396 goto Enomem;
54397 }
54398 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54399 goto Fill;
54400 m->op->stop(m, p);
54401 kfree(m->buf);
54402 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54403 + m->size <<= 1;
54404 + m->buf = kmalloc(m->size, GFP_KERNEL);
54405 if (!m->buf)
54406 goto Enomem;
54407 m->count = 0;
54408 @@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
54409 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54410 void *data)
54411 {
54412 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54413 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54414 int res = -ENOMEM;
54415
54416 if (op) {
54417 diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54418 index 71c29b6..54694dd 100644
54419 --- a/fs/smbfs/proc.c
54420 +++ b/fs/smbfs/proc.c
54421 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54422
54423 out:
54424 if (server->local_nls != NULL && server->remote_nls != NULL)
54425 - server->ops->convert = convert_cp;
54426 + *(void **)&server->ops->convert = convert_cp;
54427 else
54428 - server->ops->convert = convert_memcpy;
54429 + *(void **)&server->ops->convert = convert_memcpy;
54430
54431 smb_unlock_server(server);
54432 return n;
54433 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54434
54435 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54436 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54437 - server->ops->getattr = smb_proc_getattr_core;
54438 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
54439 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54440 - server->ops->getattr = smb_proc_getattr_ff;
54441 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54442 }
54443
54444 /* Decode server capabilities */
54445 @@ -3439,7 +3439,7 @@ out:
54446 static void
54447 install_ops(struct smb_ops *dst, struct smb_ops *src)
54448 {
54449 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54450 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54451 }
54452
54453 /* < LANMAN2 */
54454 diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54455 index 00b2909..2ace383 100644
54456 --- a/fs/smbfs/symlink.c
54457 +++ b/fs/smbfs/symlink.c
54458 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54459
54460 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54461 {
54462 - char *s = nd_get_link(nd);
54463 + const char *s = nd_get_link(nd);
54464 if (!IS_ERR(s))
54465 __putname(s);
54466 }
54467 diff --git a/fs/splice.c b/fs/splice.c
54468 index bb92b7c..5aa72b0 100644
54469 --- a/fs/splice.c
54470 +++ b/fs/splice.c
54471 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54472 pipe_lock(pipe);
54473
54474 for (;;) {
54475 - if (!pipe->readers) {
54476 + if (!atomic_read(&pipe->readers)) {
54477 send_sig(SIGPIPE, current, 0);
54478 if (!ret)
54479 ret = -EPIPE;
54480 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54481 do_wakeup = 0;
54482 }
54483
54484 - pipe->waiting_writers++;
54485 + atomic_inc(&pipe->waiting_writers);
54486 pipe_wait(pipe);
54487 - pipe->waiting_writers--;
54488 + atomic_dec(&pipe->waiting_writers);
54489 }
54490
54491 pipe_unlock(pipe);
54492 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
54493 .spd_release = spd_release_page,
54494 };
54495
54496 + pax_track_stack();
54497 +
54498 index = *ppos >> PAGE_CACHE_SHIFT;
54499 loff = *ppos & ~PAGE_CACHE_MASK;
54500 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54501 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
54502 old_fs = get_fs();
54503 set_fs(get_ds());
54504 /* The cast to a user pointer is valid due to the set_fs() */
54505 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
54506 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
54507 set_fs(old_fs);
54508
54509 return res;
54510 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
54511 old_fs = get_fs();
54512 set_fs(get_ds());
54513 /* The cast to a user pointer is valid due to the set_fs() */
54514 - res = vfs_write(file, (const char __user *)buf, count, &pos);
54515 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
54516 set_fs(old_fs);
54517
54518 return res;
54519 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54520 .spd_release = spd_release_page,
54521 };
54522
54523 + pax_track_stack();
54524 +
54525 index = *ppos >> PAGE_CACHE_SHIFT;
54526 offset = *ppos & ~PAGE_CACHE_MASK;
54527 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54528 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54529 goto err;
54530
54531 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54532 - vec[i].iov_base = (void __user *) page_address(page);
54533 + vec[i].iov_base = (__force void __user *) page_address(page);
54534 vec[i].iov_len = this_len;
54535 pages[i] = page;
54536 spd.nr_pages++;
54537 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54538 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54539 {
54540 while (!pipe->nrbufs) {
54541 - if (!pipe->writers)
54542 + if (!atomic_read(&pipe->writers))
54543 return 0;
54544
54545 - if (!pipe->waiting_writers && sd->num_spliced)
54546 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54547 return 0;
54548
54549 if (sd->flags & SPLICE_F_NONBLOCK)
54550 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54551 * out of the pipe right after the splice_to_pipe(). So set
54552 * PIPE_READERS appropriately.
54553 */
54554 - pipe->readers = 1;
54555 + atomic_set(&pipe->readers, 1);
54556
54557 current->splice_pipe = pipe;
54558 }
54559 @@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54560 .spd_release = spd_release_page,
54561 };
54562
54563 + pax_track_stack();
54564 +
54565 pipe = pipe_info(file->f_path.dentry->d_inode);
54566 if (!pipe)
54567 return -EBADF;
54568 @@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54569 ret = -ERESTARTSYS;
54570 break;
54571 }
54572 - if (!pipe->writers)
54573 + if (!atomic_read(&pipe->writers))
54574 break;
54575 - if (!pipe->waiting_writers) {
54576 + if (!atomic_read(&pipe->waiting_writers)) {
54577 if (flags & SPLICE_F_NONBLOCK) {
54578 ret = -EAGAIN;
54579 break;
54580 @@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54581 pipe_lock(pipe);
54582
54583 while (pipe->nrbufs >= PIPE_BUFFERS) {
54584 - if (!pipe->readers) {
54585 + if (!atomic_read(&pipe->readers)) {
54586 send_sig(SIGPIPE, current, 0);
54587 ret = -EPIPE;
54588 break;
54589 @@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54590 ret = -ERESTARTSYS;
54591 break;
54592 }
54593 - pipe->waiting_writers++;
54594 + atomic_inc(&pipe->waiting_writers);
54595 pipe_wait(pipe);
54596 - pipe->waiting_writers--;
54597 + atomic_dec(&pipe->waiting_writers);
54598 }
54599
54600 pipe_unlock(pipe);
54601 @@ -1786,14 +1792,14 @@ retry:
54602 pipe_double_lock(ipipe, opipe);
54603
54604 do {
54605 - if (!opipe->readers) {
54606 + if (!atomic_read(&opipe->readers)) {
54607 send_sig(SIGPIPE, current, 0);
54608 if (!ret)
54609 ret = -EPIPE;
54610 break;
54611 }
54612
54613 - if (!ipipe->nrbufs && !ipipe->writers)
54614 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
54615 break;
54616
54617 /*
54618 @@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54619 pipe_double_lock(ipipe, opipe);
54620
54621 do {
54622 - if (!opipe->readers) {
54623 + if (!atomic_read(&opipe->readers)) {
54624 send_sig(SIGPIPE, current, 0);
54625 if (!ret)
54626 ret = -EPIPE;
54627 @@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54628 * return EAGAIN if we have the potential of some data in the
54629 * future, otherwise just return 0
54630 */
54631 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
54632 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
54633 ret = -EAGAIN;
54634
54635 pipe_unlock(ipipe);
54636 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
54637 index 7118a38..70af853 100644
54638 --- a/fs/sysfs/file.c
54639 +++ b/fs/sysfs/file.c
54640 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
54641
54642 struct sysfs_open_dirent {
54643 atomic_t refcnt;
54644 - atomic_t event;
54645 + atomic_unchecked_t event;
54646 wait_queue_head_t poll;
54647 struct list_head buffers; /* goes through sysfs_buffer.list */
54648 };
54649 @@ -53,7 +53,7 @@ struct sysfs_buffer {
54650 size_t count;
54651 loff_t pos;
54652 char * page;
54653 - struct sysfs_ops * ops;
54654 + const struct sysfs_ops * ops;
54655 struct mutex mutex;
54656 int needs_read_fill;
54657 int event;
54658 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54659 {
54660 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54661 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54662 - struct sysfs_ops * ops = buffer->ops;
54663 + const struct sysfs_ops * ops = buffer->ops;
54664 int ret = 0;
54665 ssize_t count;
54666
54667 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54668 if (!sysfs_get_active_two(attr_sd))
54669 return -ENODEV;
54670
54671 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
54672 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
54673 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
54674
54675 sysfs_put_active_two(attr_sd);
54676 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
54677 {
54678 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54679 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54680 - struct sysfs_ops * ops = buffer->ops;
54681 + const struct sysfs_ops * ops = buffer->ops;
54682 int rc;
54683
54684 /* need attr_sd for attr and ops, its parent for kobj */
54685 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
54686 return -ENOMEM;
54687
54688 atomic_set(&new_od->refcnt, 0);
54689 - atomic_set(&new_od->event, 1);
54690 + atomic_set_unchecked(&new_od->event, 1);
54691 init_waitqueue_head(&new_od->poll);
54692 INIT_LIST_HEAD(&new_od->buffers);
54693 goto retry;
54694 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
54695 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
54696 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54697 struct sysfs_buffer *buffer;
54698 - struct sysfs_ops *ops;
54699 + const struct sysfs_ops *ops;
54700 int error = -EACCES;
54701 char *p;
54702
54703 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
54704
54705 sysfs_put_active_two(attr_sd);
54706
54707 - if (buffer->event != atomic_read(&od->event))
54708 + if (buffer->event != atomic_read_unchecked(&od->event))
54709 goto trigger;
54710
54711 return DEFAULT_POLLMASK;
54712 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
54713
54714 od = sd->s_attr.open;
54715 if (od) {
54716 - atomic_inc(&od->event);
54717 + atomic_inc_unchecked(&od->event);
54718 wake_up_interruptible(&od->poll);
54719 }
54720
54721 diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
54722 index 4974995..c26609c 100644
54723 --- a/fs/sysfs/mount.c
54724 +++ b/fs/sysfs/mount.c
54725 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
54726 .s_name = "",
54727 .s_count = ATOMIC_INIT(1),
54728 .s_flags = SYSFS_DIR,
54729 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54730 + .s_mode = S_IFDIR | S_IRWXU,
54731 +#else
54732 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
54733 +#endif
54734 .s_ino = 1,
54735 };
54736
54737 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
54738 index c5081ad..342ea86 100644
54739 --- a/fs/sysfs/symlink.c
54740 +++ b/fs/sysfs/symlink.c
54741 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54742
54743 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
54744 {
54745 - char *page = nd_get_link(nd);
54746 + const char *page = nd_get_link(nd);
54747 if (!IS_ERR(page))
54748 free_page((unsigned long)page);
54749 }
54750 diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
54751 index 1e06853..b06d325 100644
54752 --- a/fs/udf/balloc.c
54753 +++ b/fs/udf/balloc.c
54754 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
54755
54756 mutex_lock(&sbi->s_alloc_mutex);
54757 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54758 - if (bloc->logicalBlockNum < 0 ||
54759 - (bloc->logicalBlockNum + count) >
54760 - partmap->s_partition_len) {
54761 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54762 udf_debug("%d < %d || %d + %d > %d\n",
54763 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
54764 count, partmap->s_partition_len);
54765 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
54766
54767 mutex_lock(&sbi->s_alloc_mutex);
54768 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54769 - if (bloc->logicalBlockNum < 0 ||
54770 - (bloc->logicalBlockNum + count) >
54771 - partmap->s_partition_len) {
54772 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54773 udf_debug("%d < %d || %d + %d > %d\n",
54774 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
54775 partmap->s_partition_len);
54776 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
54777 index 6d24c2c..fff470f 100644
54778 --- a/fs/udf/inode.c
54779 +++ b/fs/udf/inode.c
54780 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
54781 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
54782 int lastblock = 0;
54783
54784 + pax_track_stack();
54785 +
54786 prev_epos.offset = udf_file_entry_alloc_offset(inode);
54787 prev_epos.block = iinfo->i_location;
54788 prev_epos.bh = NULL;
54789 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
54790 index 9215700..bf1f68e 100644
54791 --- a/fs/udf/misc.c
54792 +++ b/fs/udf/misc.c
54793 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
54794
54795 u8 udf_tag_checksum(const struct tag *t)
54796 {
54797 - u8 *data = (u8 *)t;
54798 + const u8 *data = (const u8 *)t;
54799 u8 checksum = 0;
54800 int i;
54801 for (i = 0; i < sizeof(struct tag); ++i)
54802 diff --git a/fs/utimes.c b/fs/utimes.c
54803 index e4c75db..b4df0e0 100644
54804 --- a/fs/utimes.c
54805 +++ b/fs/utimes.c
54806 @@ -1,6 +1,7 @@
54807 #include <linux/compiler.h>
54808 #include <linux/file.h>
54809 #include <linux/fs.h>
54810 +#include <linux/security.h>
54811 #include <linux/linkage.h>
54812 #include <linux/mount.h>
54813 #include <linux/namei.h>
54814 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
54815 goto mnt_drop_write_and_out;
54816 }
54817 }
54818 +
54819 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
54820 + error = -EACCES;
54821 + goto mnt_drop_write_and_out;
54822 + }
54823 +
54824 mutex_lock(&inode->i_mutex);
54825 error = notify_change(path->dentry, &newattrs);
54826 mutex_unlock(&inode->i_mutex);
54827 diff --git a/fs/xattr.c b/fs/xattr.c
54828 index 6d4f6d3..cda3958 100644
54829 --- a/fs/xattr.c
54830 +++ b/fs/xattr.c
54831 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
54832 * Extended attribute SET operations
54833 */
54834 static long
54835 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
54836 +setxattr(struct path *path, const char __user *name, const void __user *value,
54837 size_t size, int flags)
54838 {
54839 int error;
54840 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
54841 return PTR_ERR(kvalue);
54842 }
54843
54844 - error = vfs_setxattr(d, kname, kvalue, size, flags);
54845 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
54846 + error = -EACCES;
54847 + goto out;
54848 + }
54849 +
54850 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
54851 +out:
54852 kfree(kvalue);
54853 return error;
54854 }
54855 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
54856 return error;
54857 error = mnt_want_write(path.mnt);
54858 if (!error) {
54859 - error = setxattr(path.dentry, name, value, size, flags);
54860 + error = setxattr(&path, name, value, size, flags);
54861 mnt_drop_write(path.mnt);
54862 }
54863 path_put(&path);
54864 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
54865 return error;
54866 error = mnt_want_write(path.mnt);
54867 if (!error) {
54868 - error = setxattr(path.dentry, name, value, size, flags);
54869 + error = setxattr(&path, name, value, size, flags);
54870 mnt_drop_write(path.mnt);
54871 }
54872 path_put(&path);
54873 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
54874 const void __user *,value, size_t, size, int, flags)
54875 {
54876 struct file *f;
54877 - struct dentry *dentry;
54878 int error = -EBADF;
54879
54880 f = fget(fd);
54881 if (!f)
54882 return error;
54883 - dentry = f->f_path.dentry;
54884 - audit_inode(NULL, dentry);
54885 + audit_inode(NULL, f->f_path.dentry);
54886 error = mnt_want_write_file(f);
54887 if (!error) {
54888 - error = setxattr(dentry, name, value, size, flags);
54889 + error = setxattr(&f->f_path, name, value, size, flags);
54890 mnt_drop_write(f->f_path.mnt);
54891 }
54892 fput(f);
54893 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
54894 index c6ad7c7..f2847a7 100644
54895 --- a/fs/xattr_acl.c
54896 +++ b/fs/xattr_acl.c
54897 @@ -17,8 +17,8 @@
54898 struct posix_acl *
54899 posix_acl_from_xattr(const void *value, size_t size)
54900 {
54901 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
54902 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
54903 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
54904 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
54905 int count;
54906 struct posix_acl *acl;
54907 struct posix_acl_entry *acl_e;
54908 diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
54909 index 942362f..88f96f5 100644
54910 --- a/fs/xfs/linux-2.6/xfs_ioctl.c
54911 +++ b/fs/xfs/linux-2.6/xfs_ioctl.c
54912 @@ -134,7 +134,7 @@ xfs_find_handle(
54913 }
54914
54915 error = -EFAULT;
54916 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
54917 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
54918 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
54919 goto out_put;
54920
54921 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
54922 if (IS_ERR(dentry))
54923 return PTR_ERR(dentry);
54924
54925 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
54926 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
54927 if (!kbuf)
54928 goto out_dput;
54929
54930 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
54931 xfs_mount_t *mp,
54932 void __user *arg)
54933 {
54934 - xfs_fsop_geom_t fsgeo;
54935 + xfs_fsop_geom_t fsgeo;
54936 int error;
54937
54938 error = xfs_fs_geometry(mp, &fsgeo, 3);
54939 diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
54940 index bad485a..479bd32 100644
54941 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c
54942 +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
54943 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
54944 xfs_fsop_geom_t fsgeo;
54945 int error;
54946
54947 + memset(&fsgeo, 0, sizeof(fsgeo));
54948 error = xfs_fs_geometry(mp, &fsgeo, 3);
54949 if (error)
54950 return -error;
54951 diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
54952 index 1f3b4b8..6102f6d 100644
54953 --- a/fs/xfs/linux-2.6/xfs_iops.c
54954 +++ b/fs/xfs/linux-2.6/xfs_iops.c
54955 @@ -468,7 +468,7 @@ xfs_vn_put_link(
54956 struct nameidata *nd,
54957 void *p)
54958 {
54959 - char *s = nd_get_link(nd);
54960 + const char *s = nd_get_link(nd);
54961
54962 if (!IS_ERR(s))
54963 kfree(s);
54964 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
54965 index 8971fb0..5fc1eb2 100644
54966 --- a/fs/xfs/xfs_bmap.c
54967 +++ b/fs/xfs/xfs_bmap.c
54968 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
54969 int nmap,
54970 int ret_nmap);
54971 #else
54972 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
54973 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
54974 #endif /* DEBUG */
54975
54976 #if defined(XFS_RW_TRACE)
54977 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
54978 index e89734e..5e84d8d 100644
54979 --- a/fs/xfs/xfs_dir2_sf.c
54980 +++ b/fs/xfs/xfs_dir2_sf.c
54981 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
54982 }
54983
54984 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
54985 - if (filldir(dirent, sfep->name, sfep->namelen,
54986 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
54987 + char name[sfep->namelen];
54988 + memcpy(name, sfep->name, sfep->namelen);
54989 + if (filldir(dirent, name, sfep->namelen,
54990 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
54991 + *offset = off & 0x7fffffff;
54992 + return 0;
54993 + }
54994 + } else if (filldir(dirent, sfep->name, sfep->namelen,
54995 off & 0x7fffffff, ino, DT_UNKNOWN)) {
54996 *offset = off & 0x7fffffff;
54997 return 0;
54998 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
54999 index 8f32f50..b6a41e8 100644
55000 --- a/fs/xfs/xfs_vnodeops.c
55001 +++ b/fs/xfs/xfs_vnodeops.c
55002 @@ -564,13 +564,18 @@ xfs_readlink(
55003
55004 xfs_ilock(ip, XFS_ILOCK_SHARED);
55005
55006 - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
55007 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
55008 -
55009 pathlen = ip->i_d.di_size;
55010 if (!pathlen)
55011 goto out;
55012
55013 + if (pathlen > MAXPATHLEN) {
55014 + xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
55015 + __func__, (unsigned long long)ip->i_ino, pathlen);
55016 + ASSERT(0);
55017 + error = XFS_ERROR(EFSCORRUPTED);
55018 + goto out;
55019 + }
55020 +
55021 if (ip->i_df.if_flags & XFS_IFINLINE) {
55022 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
55023 link[pathlen] = '\0';
55024 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
55025 new file mode 100644
55026 index 0000000..883b00b
55027 --- /dev/null
55028 +++ b/grsecurity/Kconfig
55029 @@ -0,0 +1,1064 @@
55030 +#
55031 +# grecurity configuration
55032 +#
55033 +
55034 +menu "Grsecurity"
55035 +
55036 +config GRKERNSEC
55037 + bool "Grsecurity"
55038 + select CRYPTO
55039 + select CRYPTO_SHA256
55040 + help
55041 + If you say Y here, you will be able to configure many features
55042 + that will enhance the security of your system. It is highly
55043 + recommended that you say Y here and read through the help
55044 + for each option so that you fully understand the features and
55045 + can evaluate their usefulness for your machine.
55046 +
55047 +choice
55048 + prompt "Security Level"
55049 + depends on GRKERNSEC
55050 + default GRKERNSEC_CUSTOM
55051 +
55052 +config GRKERNSEC_LOW
55053 + bool "Low"
55054 + select GRKERNSEC_LINK
55055 + select GRKERNSEC_FIFO
55056 + select GRKERNSEC_RANDNET
55057 + select GRKERNSEC_DMESG
55058 + select GRKERNSEC_CHROOT
55059 + select GRKERNSEC_CHROOT_CHDIR
55060 +
55061 + help
55062 + If you choose this option, several of the grsecurity options will
55063 + be enabled that will give you greater protection against a number
55064 + of attacks, while assuring that none of your software will have any
55065 + conflicts with the additional security measures. If you run a lot
55066 + of unusual software, or you are having problems with the higher
55067 + security levels, you should say Y here. With this option, the
55068 + following features are enabled:
55069 +
55070 + - Linking restrictions
55071 + - FIFO restrictions
55072 + - Restricted dmesg
55073 + - Enforced chdir("/") on chroot
55074 + - Runtime module disabling
55075 +
55076 +config GRKERNSEC_MEDIUM
55077 + bool "Medium"
55078 + select PAX
55079 + select PAX_EI_PAX
55080 + select PAX_PT_PAX_FLAGS
55081 + select PAX_HAVE_ACL_FLAGS
55082 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55083 + select GRKERNSEC_CHROOT
55084 + select GRKERNSEC_CHROOT_SYSCTL
55085 + select GRKERNSEC_LINK
55086 + select GRKERNSEC_FIFO
55087 + select GRKERNSEC_DMESG
55088 + select GRKERNSEC_RANDNET
55089 + select GRKERNSEC_FORKFAIL
55090 + select GRKERNSEC_TIME
55091 + select GRKERNSEC_SIGNAL
55092 + select GRKERNSEC_CHROOT
55093 + select GRKERNSEC_CHROOT_UNIX
55094 + select GRKERNSEC_CHROOT_MOUNT
55095 + select GRKERNSEC_CHROOT_PIVOT
55096 + select GRKERNSEC_CHROOT_DOUBLE
55097 + select GRKERNSEC_CHROOT_CHDIR
55098 + select GRKERNSEC_CHROOT_MKNOD
55099 + select GRKERNSEC_PROC
55100 + select GRKERNSEC_PROC_USERGROUP
55101 + select PAX_RANDUSTACK
55102 + select PAX_ASLR
55103 + select PAX_RANDMMAP
55104 + select PAX_REFCOUNT if (X86 || SPARC64)
55105 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55106 +
55107 + help
55108 + If you say Y here, several features in addition to those included
55109 + in the low additional security level will be enabled. These
55110 + features provide even more security to your system, though in rare
55111 + cases they may be incompatible with very old or poorly written
55112 + software. If you enable this option, make sure that your auth
55113 + service (identd) is running as gid 1001. With this option,
55114 + the following features (in addition to those provided in the
55115 + low additional security level) will be enabled:
55116 +
55117 + - Failed fork logging
55118 + - Time change logging
55119 + - Signal logging
55120 + - Deny mounts in chroot
55121 + - Deny double chrooting
55122 + - Deny sysctl writes in chroot
55123 + - Deny mknod in chroot
55124 + - Deny access to abstract AF_UNIX sockets out of chroot
55125 + - Deny pivot_root in chroot
55126 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
55127 + - /proc restrictions with special GID set to 10 (usually wheel)
55128 + - Address Space Layout Randomization (ASLR)
55129 + - Prevent exploitation of most refcount overflows
55130 + - Bounds checking of copying between the kernel and userland
55131 +
55132 +config GRKERNSEC_HIGH
55133 + bool "High"
55134 + select GRKERNSEC_LINK
55135 + select GRKERNSEC_FIFO
55136 + select GRKERNSEC_DMESG
55137 + select GRKERNSEC_FORKFAIL
55138 + select GRKERNSEC_TIME
55139 + select GRKERNSEC_SIGNAL
55140 + select GRKERNSEC_CHROOT
55141 + select GRKERNSEC_CHROOT_SHMAT
55142 + select GRKERNSEC_CHROOT_UNIX
55143 + select GRKERNSEC_CHROOT_MOUNT
55144 + select GRKERNSEC_CHROOT_FCHDIR
55145 + select GRKERNSEC_CHROOT_PIVOT
55146 + select GRKERNSEC_CHROOT_DOUBLE
55147 + select GRKERNSEC_CHROOT_CHDIR
55148 + select GRKERNSEC_CHROOT_MKNOD
55149 + select GRKERNSEC_CHROOT_CAPS
55150 + select GRKERNSEC_CHROOT_SYSCTL
55151 + select GRKERNSEC_CHROOT_FINDTASK
55152 + select GRKERNSEC_SYSFS_RESTRICT
55153 + select GRKERNSEC_PROC
55154 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55155 + select GRKERNSEC_HIDESYM
55156 + select GRKERNSEC_BRUTE
55157 + select GRKERNSEC_PROC_USERGROUP
55158 + select GRKERNSEC_KMEM
55159 + select GRKERNSEC_RESLOG
55160 + select GRKERNSEC_RANDNET
55161 + select GRKERNSEC_PROC_ADD
55162 + select GRKERNSEC_CHROOT_CHMOD
55163 + select GRKERNSEC_CHROOT_NICE
55164 + select GRKERNSEC_SETXID
55165 + select GRKERNSEC_AUDIT_MOUNT
55166 + select GRKERNSEC_MODHARDEN if (MODULES)
55167 + select GRKERNSEC_HARDEN_PTRACE
55168 + select GRKERNSEC_PTRACE_READEXEC
55169 + select GRKERNSEC_VM86 if (X86_32)
55170 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55171 + select PAX
55172 + select PAX_RANDUSTACK
55173 + select PAX_ASLR
55174 + select PAX_RANDMMAP
55175 + select PAX_NOEXEC
55176 + select PAX_MPROTECT
55177 + select PAX_EI_PAX
55178 + select PAX_PT_PAX_FLAGS
55179 + select PAX_HAVE_ACL_FLAGS
55180 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55181 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55182 + select PAX_RANDKSTACK if (X86_TSC && X86)
55183 + select PAX_SEGMEXEC if (X86_32)
55184 + select PAX_PAGEEXEC
55185 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55186 + select PAX_EMUTRAMP if (PARISC)
55187 + select PAX_EMUSIGRT if (PARISC)
55188 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55189 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55190 + select PAX_REFCOUNT if (X86 || SPARC64)
55191 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55192 + help
55193 + If you say Y here, many of the features of grsecurity will be
55194 + enabled, which will protect you against many kinds of attacks
55195 + against your system. The heightened security comes at a cost
55196 + of an increased chance of incompatibilities with rare software
55197 + on your machine. Since this security level enables PaX, you should
55198 + view <http://pax.grsecurity.net> and read about the PaX
55199 + project. While you are there, download chpax and run it on
55200 + binaries that cause problems with PaX. Also remember that
55201 + since the /proc restrictions are enabled, you must run your
55202 + identd as gid 1001. This security level enables the following
55203 + features in addition to those listed in the low and medium
55204 + security levels:
55205 +
55206 + - Additional /proc restrictions
55207 + - Chmod restrictions in chroot
55208 + - No signals, ptrace, or viewing of processes outside of chroot
55209 + - Capability restrictions in chroot
55210 + - Deny fchdir out of chroot
55211 + - Priority restrictions in chroot
55212 + - Segmentation-based implementation of PaX
55213 + - Mprotect restrictions
55214 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55215 + - Kernel stack randomization
55216 + - Mount/unmount/remount logging
55217 + - Kernel symbol hiding
55218 + - Hardening of module auto-loading
55219 + - Ptrace restrictions
55220 + - Restricted vm86 mode
55221 + - Restricted sysfs/debugfs
55222 + - Active kernel exploit response
55223 +
55224 +config GRKERNSEC_CUSTOM
55225 + bool "Custom"
55226 + help
55227 + If you say Y here, you will be able to configure every grsecurity
55228 + option, which allows you to enable many more features that aren't
55229 + covered in the basic security levels. These additional features
55230 + include TPE, socket restrictions, and the sysctl system for
55231 + grsecurity. It is advised that you read through the help for
55232 + each option to determine its usefulness in your situation.
55233 +
55234 +endchoice
55235 +
55236 +menu "Address Space Protection"
55237 +depends on GRKERNSEC
55238 +
55239 +config GRKERNSEC_KMEM
55240 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55241 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55242 + help
55243 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55244 + be written to or read from to modify or leak the contents of the running
55245 + kernel. /dev/port will also not be allowed to be opened. If you have module
55246 + support disabled, enabling this will close up four ways that are
55247 + currently used to insert malicious code into the running kernel.
55248 + Even with all these features enabled, we still highly recommend that
55249 + you use the RBAC system, as it is still possible for an attacker to
55250 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55251 + If you are not using XFree86, you may be able to stop this additional
55252 + case by enabling the 'Disable privileged I/O' option. Though nothing
55253 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55254 + but only to video memory, which is the only writing we allow in this
55255 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55256 + not be allowed to mprotect it with PROT_WRITE later.
55257 + It is highly recommended that you say Y here if you meet all the
55258 + conditions above.
55259 +
55260 +config GRKERNSEC_VM86
55261 + bool "Restrict VM86 mode"
55262 + depends on X86_32
55263 +
55264 + help
55265 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55266 + make use of a special execution mode on 32bit x86 processors called
55267 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55268 + video cards and will still work with this option enabled. The purpose
55269 + of the option is to prevent exploitation of emulation errors in
55270 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55271 + Nearly all users should be able to enable this option.
55272 +
55273 +config GRKERNSEC_IO
55274 + bool "Disable privileged I/O"
55275 + depends on X86
55276 + select RTC_CLASS
55277 + select RTC_INTF_DEV
55278 + select RTC_DRV_CMOS
55279 +
55280 + help
55281 + If you say Y here, all ioperm and iopl calls will return an error.
55282 + Ioperm and iopl can be used to modify the running kernel.
55283 + Unfortunately, some programs need this access to operate properly,
55284 + the most notable of which are XFree86 and hwclock. hwclock can be
55285 + remedied by having RTC support in the kernel, so real-time
55286 + clock support is enabled if this option is enabled, to ensure
55287 + that hwclock operates correctly. XFree86 still will not
55288 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55289 + IF YOU USE XFree86. If you use XFree86 and you still want to
55290 + protect your kernel against modification, use the RBAC system.
55291 +
55292 +config GRKERNSEC_PROC_MEMMAP
55293 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55294 + default y if (PAX_NOEXEC || PAX_ASLR)
55295 + depends on PAX_NOEXEC || PAX_ASLR
55296 + help
55297 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55298 + give no information about the addresses of its mappings if
55299 + PaX features that rely on random addresses are enabled on the task.
55300 + If you use PaX it is greatly recommended that you say Y here as it
55301 + closes up a hole that makes the full ASLR useless for suid
55302 + binaries.
55303 +
55304 +config GRKERNSEC_BRUTE
55305 + bool "Deter exploit bruteforcing"
55306 + help
55307 + If you say Y here, attempts to bruteforce exploits against forking
55308 + daemons such as apache or sshd, as well as against suid/sgid binaries
55309 + will be deterred. When a child of a forking daemon is killed by PaX
55310 + or crashes due to an illegal instruction or other suspicious signal,
55311 + the parent process will be delayed 30 seconds upon every subsequent
55312 + fork until the administrator is able to assess the situation and
55313 + restart the daemon.
55314 + In the suid/sgid case, the attempt is logged, the user has all their
55315 + processes terminated, and they are prevented from executing any further
55316 + processes for 15 minutes.
55317 + It is recommended that you also enable signal logging in the auditing
55318 + section so that logs are generated when a process triggers a suspicious
55319 + signal.
55320 + If the sysctl option is enabled, a sysctl option with name
55321 + "deter_bruteforce" is created.
55322 +
55323 +config GRKERNSEC_MODHARDEN
55324 + bool "Harden module auto-loading"
55325 + depends on MODULES
55326 + help
55327 + If you say Y here, module auto-loading in response to use of some
55328 + feature implemented by an unloaded module will be restricted to
55329 + root users. Enabling this option helps defend against attacks
55330 + by unprivileged users who abuse the auto-loading behavior to
55331 + cause a vulnerable module to load that is then exploited.
55332 +
55333 + If this option prevents a legitimate use of auto-loading for a
55334 + non-root user, the administrator can execute modprobe manually
55335 + with the exact name of the module mentioned in the alert log.
55336 + Alternatively, the administrator can add the module to the list
55337 + of modules loaded at boot by modifying init scripts.
55338 +
55339 + Modification of init scripts will most likely be needed on
55340 + Ubuntu servers with encrypted home directory support enabled,
55341 + as the first non-root user logging in will cause the ecb(aes),
55342 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55343 +
55344 +config GRKERNSEC_HIDESYM
55345 + bool "Hide kernel symbols"
55346 + help
55347 + If you say Y here, getting information on loaded modules, and
55348 + displaying all kernel symbols through a syscall will be restricted
55349 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55350 + /proc/kallsyms will be restricted to the root user. The RBAC
55351 + system can hide that entry even from root.
55352 +
55353 + This option also prevents leaking of kernel addresses through
55354 + several /proc entries.
55355 +
55356 + Note that this option is only effective provided the following
55357 + conditions are met:
55358 + 1) The kernel using grsecurity is not precompiled by some distribution
55359 + 2) You have also enabled GRKERNSEC_DMESG
55360 + 3) You are using the RBAC system and hiding other files such as your
55361 + kernel image and System.map. Alternatively, enabling this option
55362 + causes the permissions on /boot, /lib/modules, and the kernel
55363 + source directory to change at compile time to prevent
55364 + reading by non-root users.
55365 + If the above conditions are met, this option will aid in providing a
55366 + useful protection against local kernel exploitation of overflows
55367 + and arbitrary read/write vulnerabilities.
55368 +
55369 +config GRKERNSEC_KERN_LOCKOUT
55370 + bool "Active kernel exploit response"
55371 + depends on X86 || ARM || PPC || SPARC
55372 + help
55373 + If you say Y here, when a PaX alert is triggered due to suspicious
55374 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55375 + or an OOPs occurs due to bad memory accesses, instead of just
55376 + terminating the offending process (and potentially allowing
55377 + a subsequent exploit from the same user), we will take one of two
55378 + actions:
55379 + If the user was root, we will panic the system
55380 + If the user was non-root, we will log the attempt, terminate
55381 + all processes owned by the user, then prevent them from creating
55382 + any new processes until the system is restarted
55383 + This deters repeated kernel exploitation/bruteforcing attempts
55384 + and is useful for later forensics.
55385 +
55386 +endmenu
55387 +menu "Role Based Access Control Options"
55388 +depends on GRKERNSEC
55389 +
55390 +config GRKERNSEC_RBAC_DEBUG
55391 + bool
55392 +
55393 +config GRKERNSEC_NO_RBAC
55394 + bool "Disable RBAC system"
55395 + help
55396 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55397 + preventing the RBAC system from being enabled. You should only say Y
55398 + here if you have no intention of using the RBAC system, so as to prevent
55399 + an attacker with root access from misusing the RBAC system to hide files
55400 + and processes when loadable module support and /dev/[k]mem have been
55401 + locked down.
55402 +
55403 +config GRKERNSEC_ACL_HIDEKERN
55404 + bool "Hide kernel processes"
55405 + help
55406 + If you say Y here, all kernel threads will be hidden to all
55407 + processes but those whose subject has the "view hidden processes"
55408 + flag.
55409 +
55410 +config GRKERNSEC_ACL_MAXTRIES
55411 + int "Maximum tries before password lockout"
55412 + default 3
55413 + help
55414 + This option enforces the maximum number of times a user can attempt
55415 + to authorize themselves with the grsecurity RBAC system before being
55416 + denied the ability to attempt authorization again for a specified time.
55417 + The lower the number, the harder it will be to brute-force a password.
55418 +
55419 +config GRKERNSEC_ACL_TIMEOUT
55420 + int "Time to wait after max password tries, in seconds"
55421 + default 30
55422 + help
55423 + This option specifies the time the user must wait after attempting to
55424 + authorize to the RBAC system with the maximum number of invalid
55425 + passwords. The higher the number, the harder it will be to brute-force
55426 + a password.
55427 +
55428 +endmenu
55429 +menu "Filesystem Protections"
55430 +depends on GRKERNSEC
55431 +
55432 +config GRKERNSEC_PROC
55433 + bool "Proc restrictions"
55434 + help
55435 + If you say Y here, the permissions of the /proc filesystem
55436 + will be altered to enhance system security and privacy. You MUST
55437 + choose either a user only restriction or a user and group restriction.
55438 + Depending upon the option you choose, you can either restrict users to
55439 + see only the processes they themselves run, or choose a group that can
55440 + view all processes and files normally restricted to root if you choose
55441 + the "restrict to user only" option. NOTE: If you're running identd as
55442 + a non-root user, you will have to run it as the group you specify here.
55443 +
55444 +config GRKERNSEC_PROC_USER
55445 + bool "Restrict /proc to user only"
55446 + depends on GRKERNSEC_PROC
55447 + help
55448 + If you say Y here, non-root users will only be able to view their own
55449 + processes, and restricts them from viewing network-related information,
55450 + and viewing kernel symbol and module information.
55451 +
55452 +config GRKERNSEC_PROC_USERGROUP
55453 + bool "Allow special group"
55454 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55455 + help
55456 + If you say Y here, you will be able to select a group that will be
55457 + able to view all processes and network-related information. If you've
55458 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55459 + remain hidden. This option is useful if you want to run identd as
55460 + a non-root user.
55461 +
55462 +config GRKERNSEC_PROC_GID
55463 + int "GID for special group"
55464 + depends on GRKERNSEC_PROC_USERGROUP
55465 + default 1001
55466 +
55467 +config GRKERNSEC_PROC_ADD
55468 + bool "Additional restrictions"
55469 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55470 + help
55471 + If you say Y here, additional restrictions will be placed on
55472 + /proc that keep normal users from viewing device information and
55473 + slabinfo information that could be useful for exploits.
55474 +
55475 +config GRKERNSEC_LINK
55476 + bool "Linking restrictions"
55477 + help
55478 + If you say Y here, /tmp race exploits will be prevented, since users
55479 + will no longer be able to follow symlinks owned by other users in
55480 + world-writable +t directories (e.g. /tmp), unless the owner of the
55481 + symlink is the owner of the directory. users will also not be
55482 + able to hardlink to files they do not own. If the sysctl option is
55483 + enabled, a sysctl option with name "linking_restrictions" is created.
55484 +
55485 +config GRKERNSEC_FIFO
55486 + bool "FIFO restrictions"
55487 + help
55488 + If you say Y here, users will not be able to write to FIFOs they don't
55489 + own in world-writable +t directories (e.g. /tmp), unless the owner of
55490 + the FIFO is the same owner of the directory it's held in. If the sysctl
55491 + option is enabled, a sysctl option with name "fifo_restrictions" is
55492 + created.
55493 +
55494 +config GRKERNSEC_SYSFS_RESTRICT
55495 + bool "Sysfs/debugfs restriction"
55496 + depends on SYSFS
55497 + help
55498 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55499 + any filesystem normally mounted under it (e.g. debugfs) will only
55500 + be accessible by root. These filesystems generally provide access
55501 + to hardware and debug information that isn't appropriate for unprivileged
55502 + users of the system. Sysfs and debugfs have also become a large source
55503 + of new vulnerabilities, ranging from infoleaks to local compromise.
55504 + There has been very little oversight with an eye toward security involved
55505 + in adding new exporters of information to these filesystems, so their
55506 + use is discouraged.
55507 + This option is equivalent to a chmod 0700 of the mount paths.
55508 +
55509 +config GRKERNSEC_ROFS
55510 + bool "Runtime read-only mount protection"
55511 + help
55512 + If you say Y here, a sysctl option with name "romount_protect" will
55513 + be created. By setting this option to 1 at runtime, filesystems
55514 + will be protected in the following ways:
55515 + * No new writable mounts will be allowed
55516 + * Existing read-only mounts won't be able to be remounted read/write
55517 + * Write operations will be denied on all block devices
55518 + This option acts independently of grsec_lock: once it is set to 1,
55519 + it cannot be turned off. Therefore, please be mindful of the resulting
55520 + behavior if this option is enabled in an init script on a read-only
55521 + filesystem. This feature is mainly intended for secure embedded systems.
55522 +
55523 +config GRKERNSEC_CHROOT
55524 + bool "Chroot jail restrictions"
55525 + help
55526 + If you say Y here, you will be able to choose several options that will
55527 + make breaking out of a chrooted jail much more difficult. If you
55528 + encounter no software incompatibilities with the following options, it
55529 + is recommended that you enable each one.
55530 +
55531 +config GRKERNSEC_CHROOT_MOUNT
55532 + bool "Deny mounts"
55533 + depends on GRKERNSEC_CHROOT
55534 + help
55535 + If you say Y here, processes inside a chroot will not be able to
55536 + mount or remount filesystems. If the sysctl option is enabled, a
55537 + sysctl option with name "chroot_deny_mount" is created.
55538 +
55539 +config GRKERNSEC_CHROOT_DOUBLE
55540 + bool "Deny double-chroots"
55541 + depends on GRKERNSEC_CHROOT
55542 + help
55543 + If you say Y here, processes inside a chroot will not be able to chroot
55544 + again outside the chroot. This is a widely used method of breaking
55545 + out of a chroot jail and should not be allowed. If the sysctl
55546 + option is enabled, a sysctl option with name
55547 + "chroot_deny_chroot" is created.
55548 +
55549 +config GRKERNSEC_CHROOT_PIVOT
55550 + bool "Deny pivot_root in chroot"
55551 + depends on GRKERNSEC_CHROOT
55552 + help
55553 + If you say Y here, processes inside a chroot will not be able to use
55554 + a function called pivot_root() that was introduced in Linux 2.3.41. It
55555 + works similar to chroot in that it changes the root filesystem. This
55556 + function could be misused in a chrooted process to attempt to break out
55557 + of the chroot, and therefore should not be allowed. If the sysctl
55558 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
55559 + created.
55560 +
55561 +config GRKERNSEC_CHROOT_CHDIR
55562 + bool "Enforce chdir(\"/\") on all chroots"
55563 + depends on GRKERNSEC_CHROOT
55564 + help
55565 + If you say Y here, the current working directory of all newly-chrooted
55566 + applications will be set to the the root directory of the chroot.
55567 + The man page on chroot(2) states:
55568 + Note that this call does not change the current working
55569 + directory, so that `.' can be outside the tree rooted at
55570 + `/'. In particular, the super-user can escape from a
55571 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55572 +
55573 + It is recommended that you say Y here, since it's not known to break
55574 + any software. If the sysctl option is enabled, a sysctl option with
55575 + name "chroot_enforce_chdir" is created.
55576 +
55577 +config GRKERNSEC_CHROOT_CHMOD
55578 + bool "Deny (f)chmod +s"
55579 + depends on GRKERNSEC_CHROOT
55580 + help
55581 + If you say Y here, processes inside a chroot will not be able to chmod
55582 + or fchmod files to make them have suid or sgid bits. This protects
55583 + against another published method of breaking a chroot. If the sysctl
55584 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
55585 + created.
55586 +
55587 +config GRKERNSEC_CHROOT_FCHDIR
55588 + bool "Deny fchdir out of chroot"
55589 + depends on GRKERNSEC_CHROOT
55590 + help
55591 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
55592 + to a file descriptor of the chrooting process that points to a directory
55593 + outside the filesystem will be stopped. If the sysctl option
55594 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55595 +
55596 +config GRKERNSEC_CHROOT_MKNOD
55597 + bool "Deny mknod"
55598 + depends on GRKERNSEC_CHROOT
55599 + help
55600 + If you say Y here, processes inside a chroot will not be allowed to
55601 + mknod. The problem with using mknod inside a chroot is that it
55602 + would allow an attacker to create a device entry that is the same
55603 + as one on the physical root of your system, which could range from
55604 + anything from the console device to a device for your harddrive (which
55605 + they could then use to wipe the drive or steal data). It is recommended
55606 + that you say Y here, unless you run into software incompatibilities.
55607 + If the sysctl option is enabled, a sysctl option with name
55608 + "chroot_deny_mknod" is created.
55609 +
55610 +config GRKERNSEC_CHROOT_SHMAT
55611 + bool "Deny shmat() out of chroot"
55612 + depends on GRKERNSEC_CHROOT
55613 + help
55614 + If you say Y here, processes inside a chroot will not be able to attach
55615 + to shared memory segments that were created outside of the chroot jail.
55616 + It is recommended that you say Y here. If the sysctl option is enabled,
55617 + a sysctl option with name "chroot_deny_shmat" is created.
55618 +
55619 +config GRKERNSEC_CHROOT_UNIX
55620 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
55621 + depends on GRKERNSEC_CHROOT
55622 + help
55623 + If you say Y here, processes inside a chroot will not be able to
55624 + connect to abstract (meaning not belonging to a filesystem) Unix
55625 + domain sockets that were bound outside of a chroot. It is recommended
55626 + that you say Y here. If the sysctl option is enabled, a sysctl option
55627 + with name "chroot_deny_unix" is created.
55628 +
55629 +config GRKERNSEC_CHROOT_FINDTASK
55630 + bool "Protect outside processes"
55631 + depends on GRKERNSEC_CHROOT
55632 + help
55633 + If you say Y here, processes inside a chroot will not be able to
55634 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55635 + getsid, or view any process outside of the chroot. If the sysctl
55636 + option is enabled, a sysctl option with name "chroot_findtask" is
55637 + created.
55638 +
55639 +config GRKERNSEC_CHROOT_NICE
55640 + bool "Restrict priority changes"
55641 + depends on GRKERNSEC_CHROOT
55642 + help
55643 + If you say Y here, processes inside a chroot will not be able to raise
55644 + the priority of processes in the chroot, or alter the priority of
55645 + processes outside the chroot. This provides more security than simply
55646 + removing CAP_SYS_NICE from the process' capability set. If the
55647 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55648 + is created.
55649 +
55650 +config GRKERNSEC_CHROOT_SYSCTL
55651 + bool "Deny sysctl writes"
55652 + depends on GRKERNSEC_CHROOT
55653 + help
55654 + If you say Y here, an attacker in a chroot will not be able to
55655 + write to sysctl entries, either by sysctl(2) or through a /proc
55656 + interface. It is strongly recommended that you say Y here. If the
55657 + sysctl option is enabled, a sysctl option with name
55658 + "chroot_deny_sysctl" is created.
55659 +
55660 +config GRKERNSEC_CHROOT_CAPS
55661 + bool "Capability restrictions"
55662 + depends on GRKERNSEC_CHROOT
55663 + help
55664 + If you say Y here, the capabilities on all processes within a
55665 + chroot jail will be lowered to stop module insertion, raw i/o,
55666 + system and net admin tasks, rebooting the system, modifying immutable
55667 + files, modifying IPC owned by another, and changing the system time.
55668 + This is left an option because it can break some apps. Disable this
55669 + if your chrooted apps are having problems performing those kinds of
55670 + tasks. If the sysctl option is enabled, a sysctl option with
55671 + name "chroot_caps" is created.
55672 +
55673 +endmenu
55674 +menu "Kernel Auditing"
55675 +depends on GRKERNSEC
55676 +
55677 +config GRKERNSEC_AUDIT_GROUP
55678 + bool "Single group for auditing"
55679 + help
55680 + If you say Y here, the exec, chdir, and (un)mount logging features
55681 + will only operate on a group you specify. This option is recommended
55682 + if you only want to watch certain users instead of having a large
55683 + amount of logs from the entire system. If the sysctl option is enabled,
55684 + a sysctl option with name "audit_group" is created.
55685 +
55686 +config GRKERNSEC_AUDIT_GID
55687 + int "GID for auditing"
55688 + depends on GRKERNSEC_AUDIT_GROUP
55689 + default 1007
55690 +
55691 +config GRKERNSEC_EXECLOG
55692 + bool "Exec logging"
55693 + help
55694 + If you say Y here, all execve() calls will be logged (since the
55695 + other exec*() calls are frontends to execve(), all execution
55696 + will be logged). Useful for shell-servers that like to keep track
55697 + of their users. If the sysctl option is enabled, a sysctl option with
55698 + name "exec_logging" is created.
55699 + WARNING: This option when enabled will produce a LOT of logs, especially
55700 + on an active system.
55701 +
55702 +config GRKERNSEC_RESLOG
55703 + bool "Resource logging"
55704 + help
55705 + If you say Y here, all attempts to overstep resource limits will
55706 + be logged with the resource name, the requested size, and the current
55707 + limit. It is highly recommended that you say Y here. If the sysctl
55708 + option is enabled, a sysctl option with name "resource_logging" is
55709 + created. If the RBAC system is enabled, the sysctl value is ignored.
55710 +
55711 +config GRKERNSEC_CHROOT_EXECLOG
55712 + bool "Log execs within chroot"
55713 + help
55714 + If you say Y here, all executions inside a chroot jail will be logged
55715 + to syslog. This can cause a large amount of logs if certain
55716 + applications (eg. djb's daemontools) are installed on the system, and
55717 + is therefore left as an option. If the sysctl option is enabled, a
55718 + sysctl option with name "chroot_execlog" is created.
55719 +
55720 +config GRKERNSEC_AUDIT_PTRACE
55721 + bool "Ptrace logging"
55722 + help
55723 + If you say Y here, all attempts to attach to a process via ptrace
55724 + will be logged. If the sysctl option is enabled, a sysctl option
55725 + with name "audit_ptrace" is created.
55726 +
55727 +config GRKERNSEC_AUDIT_CHDIR
55728 + bool "Chdir logging"
55729 + help
55730 + If you say Y here, all chdir() calls will be logged. If the sysctl
55731 + option is enabled, a sysctl option with name "audit_chdir" is created.
55732 +
55733 +config GRKERNSEC_AUDIT_MOUNT
55734 + bool "(Un)Mount logging"
55735 + help
55736 + If you say Y here, all mounts and unmounts will be logged. If the
55737 + sysctl option is enabled, a sysctl option with name "audit_mount" is
55738 + created.
55739 +
55740 +config GRKERNSEC_SIGNAL
55741 + bool "Signal logging"
55742 + help
55743 + If you say Y here, certain important signals will be logged, such as
55744 + SIGSEGV, which will as a result inform you of when a error in a program
55745 + occurred, which in some cases could mean a possible exploit attempt.
55746 + If the sysctl option is enabled, a sysctl option with name
55747 + "signal_logging" is created.
55748 +
55749 +config GRKERNSEC_FORKFAIL
55750 + bool "Fork failure logging"
55751 + help
55752 + If you say Y here, all failed fork() attempts will be logged.
55753 + This could suggest a fork bomb, or someone attempting to overstep
55754 + their process limit. If the sysctl option is enabled, a sysctl option
55755 + with name "forkfail_logging" is created.
55756 +
55757 +config GRKERNSEC_TIME
55758 + bool "Time change logging"
55759 + help
55760 + If you say Y here, any changes of the system clock will be logged.
55761 + If the sysctl option is enabled, a sysctl option with name
55762 + "timechange_logging" is created.
55763 +
55764 +config GRKERNSEC_PROC_IPADDR
55765 + bool "/proc/<pid>/ipaddr support"
55766 + help
55767 + If you say Y here, a new entry will be added to each /proc/<pid>
55768 + directory that contains the IP address of the person using the task.
55769 + The IP is carried across local TCP and AF_UNIX stream sockets.
55770 + This information can be useful for IDS/IPSes to perform remote response
55771 + to a local attack. The entry is readable by only the owner of the
55772 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
55773 + the RBAC system), and thus does not create privacy concerns.
55774 +
55775 +config GRKERNSEC_RWXMAP_LOG
55776 + bool 'Denied RWX mmap/mprotect logging'
55777 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
55778 + help
55779 + If you say Y here, calls to mmap() and mprotect() with explicit
55780 + usage of PROT_WRITE and PROT_EXEC together will be logged when
55781 + denied by the PAX_MPROTECT feature. If the sysctl option is
55782 + enabled, a sysctl option with name "rwxmap_logging" is created.
55783 +
55784 +config GRKERNSEC_AUDIT_TEXTREL
55785 + bool 'ELF text relocations logging (READ HELP)'
55786 + depends on PAX_MPROTECT
55787 + help
55788 + If you say Y here, text relocations will be logged with the filename
55789 + of the offending library or binary. The purpose of the feature is
55790 + to help Linux distribution developers get rid of libraries and
55791 + binaries that need text relocations which hinder the future progress
55792 + of PaX. Only Linux distribution developers should say Y here, and
55793 + never on a production machine, as this option creates an information
55794 + leak that could aid an attacker in defeating the randomization of
55795 + a single memory region. If the sysctl option is enabled, a sysctl
55796 + option with name "audit_textrel" is created.
55797 +
55798 +endmenu
55799 +
55800 +menu "Executable Protections"
55801 +depends on GRKERNSEC
55802 +
55803 +config GRKERNSEC_DMESG
55804 + bool "Dmesg(8) restriction"
55805 + help
55806 + If you say Y here, non-root users will not be able to use dmesg(8)
55807 + to view up to the last 4kb of messages in the kernel's log buffer.
55808 + The kernel's log buffer often contains kernel addresses and other
55809 + identifying information useful to an attacker in fingerprinting a
55810 + system for a targeted exploit.
55811 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
55812 + created.
55813 +
55814 +config GRKERNSEC_HARDEN_PTRACE
55815 + bool "Deter ptrace-based process snooping"
55816 + help
55817 + If you say Y here, TTY sniffers and other malicious monitoring
55818 + programs implemented through ptrace will be defeated. If you
55819 + have been using the RBAC system, this option has already been
55820 + enabled for several years for all users, with the ability to make
55821 + fine-grained exceptions.
55822 +
55823 + This option only affects the ability of non-root users to ptrace
55824 + processes that are not a descendent of the ptracing process.
55825 + This means that strace ./binary and gdb ./binary will still work,
55826 + but attaching to arbitrary processes will not. If the sysctl
55827 + option is enabled, a sysctl option with name "harden_ptrace" is
55828 + created.
55829 +
55830 +config GRKERNSEC_PTRACE_READEXEC
55831 + bool "Require read access to ptrace sensitive binaries"
55832 + help
55833 + If you say Y here, unprivileged users will not be able to ptrace unreadable
55834 + binaries. This option is useful in environments that
55835 + remove the read bits (e.g. file mode 4711) from suid binaries to
55836 + prevent infoleaking of their contents. This option adds
55837 + consistency to the use of that file mode, as the binary could normally
55838 + be read out when run without privileges while ptracing.
55839 +
55840 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
55841 + is created.
55842 +
55843 +config GRKERNSEC_SETXID
55844 + bool "Enforce consistent multithreaded privileges"
55845 + help
55846 + If you say Y here, a change from a root uid to a non-root uid
55847 + in a multithreaded application will cause the resulting uids,
55848 + gids, supplementary groups, and capabilities in that thread
55849 + to be propagated to the other threads of the process. In most
55850 + cases this is unnecessary, as glibc will emulate this behavior
55851 + on behalf of the application. Other libcs do not act in the
55852 + same way, allowing the other threads of the process to continue
55853 + running with root privileges. If the sysctl option is enabled,
55854 + a sysctl option with name "consistent_setxid" is created.
55855 +
55856 +config GRKERNSEC_TPE
55857 + bool "Trusted Path Execution (TPE)"
55858 + help
55859 + If you say Y here, you will be able to choose a gid to add to the
55860 + supplementary groups of users you want to mark as "untrusted."
55861 + These users will not be able to execute any files that are not in
55862 + root-owned directories writable only by root. If the sysctl option
55863 + is enabled, a sysctl option with name "tpe" is created.
55864 +
55865 +config GRKERNSEC_TPE_ALL
55866 + bool "Partially restrict all non-root users"
55867 + depends on GRKERNSEC_TPE
55868 + help
55869 + If you say Y here, all non-root users will be covered under
55870 + a weaker TPE restriction. This is separate from, and in addition to,
55871 + the main TPE options that you have selected elsewhere. Thus, if a
55872 + "trusted" GID is chosen, this restriction applies to even that GID.
55873 + Under this restriction, all non-root users will only be allowed to
55874 + execute files in directories they own that are not group or
55875 + world-writable, or in directories owned by root and writable only by
55876 + root. If the sysctl option is enabled, a sysctl option with name
55877 + "tpe_restrict_all" is created.
55878 +
55879 +config GRKERNSEC_TPE_INVERT
55880 + bool "Invert GID option"
55881 + depends on GRKERNSEC_TPE
55882 + help
55883 + If you say Y here, the group you specify in the TPE configuration will
55884 + decide what group TPE restrictions will be *disabled* for. This
55885 + option is useful if you want TPE restrictions to be applied to most
55886 + users on the system. If the sysctl option is enabled, a sysctl option
55887 + with name "tpe_invert" is created. Unlike other sysctl options, this
55888 + entry will default to on for backward-compatibility.
55889 +
55890 +config GRKERNSEC_TPE_GID
55891 + int "GID for untrusted users"
55892 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
55893 + default 1005
55894 + help
55895 + Setting this GID determines what group TPE restrictions will be
55896 + *enabled* for. If the sysctl option is enabled, a sysctl option
55897 + with name "tpe_gid" is created.
55898 +
55899 +config GRKERNSEC_TPE_GID
55900 + int "GID for trusted users"
55901 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
55902 + default 1005
55903 + help
55904 + Setting this GID determines what group TPE restrictions will be
55905 + *disabled* for. If the sysctl option is enabled, a sysctl option
55906 + with name "tpe_gid" is created.
55907 +
55908 +endmenu
55909 +menu "Network Protections"
55910 +depends on GRKERNSEC
55911 +
55912 +config GRKERNSEC_RANDNET
55913 + bool "Larger entropy pools"
55914 + help
55915 + If you say Y here, the entropy pools used for many features of Linux
55916 + and grsecurity will be doubled in size. Since several grsecurity
55917 + features use additional randomness, it is recommended that you say Y
55918 + here. Saying Y here has a similar effect as modifying
55919 + /proc/sys/kernel/random/poolsize.
55920 +
55921 +config GRKERNSEC_BLACKHOLE
55922 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
55923 + depends on NET
55924 + help
55925 + If you say Y here, neither TCP resets nor ICMP
55926 + destination-unreachable packets will be sent in response to packets
55927 + sent to ports for which no associated listening process exists.
55928 + This feature supports both IPV4 and IPV6 and exempts the
55929 + loopback interface from blackholing. Enabling this feature
55930 + makes a host more resilient to DoS attacks and reduces network
55931 + visibility against scanners.
55932 +
55933 + The blackhole feature as-implemented is equivalent to the FreeBSD
55934 + blackhole feature, as it prevents RST responses to all packets, not
55935 + just SYNs. Under most application behavior this causes no
55936 + problems, but applications (like haproxy) may not close certain
55937 + connections in a way that cleanly terminates them on the remote
55938 + end, leaving the remote host in LAST_ACK state. Because of this
55939 + side-effect and to prevent intentional LAST_ACK DoSes, this
55940 + feature also adds automatic mitigation against such attacks.
55941 + The mitigation drastically reduces the amount of time a socket
55942 + can spend in LAST_ACK state. If you're using haproxy and not
55943 + all servers it connects to have this option enabled, consider
55944 + disabling this feature on the haproxy host.
55945 +
55946 + If the sysctl option is enabled, two sysctl options with names
55947 + "ip_blackhole" and "lastack_retries" will be created.
55948 + While "ip_blackhole" takes the standard zero/non-zero on/off
55949 + toggle, "lastack_retries" uses the same kinds of values as
55950 + "tcp_retries1" and "tcp_retries2". The default value of 4
55951 + prevents a socket from lasting more than 45 seconds in LAST_ACK
55952 + state.
55953 +
55954 +config GRKERNSEC_SOCKET
55955 + bool "Socket restrictions"
55956 + depends on NET
55957 + help
55958 + If you say Y here, you will be able to choose from several options.
55959 + If you assign a GID on your system and add it to the supplementary
55960 + groups of users you want to restrict socket access to, this patch
55961 + will perform up to three things, based on the option(s) you choose.
55962 +
55963 +config GRKERNSEC_SOCKET_ALL
55964 + bool "Deny any sockets to group"
55965 + depends on GRKERNSEC_SOCKET
55966 + help
55967 + If you say Y here, you will be able to choose a GID of whose users will
55968 + be unable to connect to other hosts from your machine or run server
55969 + applications from your machine. If the sysctl option is enabled, a
55970 + sysctl option with name "socket_all" is created.
55971 +
55972 +config GRKERNSEC_SOCKET_ALL_GID
55973 + int "GID to deny all sockets for"
55974 + depends on GRKERNSEC_SOCKET_ALL
55975 + default 1004
55976 + help
55977 + Here you can choose the GID to disable socket access for. Remember to
55978 + add the users you want socket access disabled for to the GID
55979 + specified here. If the sysctl option is enabled, a sysctl option
55980 + with name "socket_all_gid" is created.
55981 +
55982 +config GRKERNSEC_SOCKET_CLIENT
55983 + bool "Deny client sockets to group"
55984 + depends on GRKERNSEC_SOCKET
55985 + help
55986 + If you say Y here, you will be able to choose a GID of whose users will
55987 + be unable to connect to other hosts from your machine, but will be
55988 + able to run servers. If this option is enabled, all users in the group
55989 + you specify will have to use passive mode when initiating ftp transfers
55990 + from the shell on your machine. If the sysctl option is enabled, a
55991 + sysctl option with name "socket_client" is created.
55992 +
55993 +config GRKERNSEC_SOCKET_CLIENT_GID
55994 + int "GID to deny client sockets for"
55995 + depends on GRKERNSEC_SOCKET_CLIENT
55996 + default 1003
55997 + help
55998 + Here you can choose the GID to disable client socket access for.
55999 + Remember to add the users you want client socket access disabled for to
56000 + the GID specified here. If the sysctl option is enabled, a sysctl
56001 + option with name "socket_client_gid" is created.
56002 +
56003 +config GRKERNSEC_SOCKET_SERVER
56004 + bool "Deny server sockets to group"
56005 + depends on GRKERNSEC_SOCKET
56006 + help
56007 + If you say Y here, you will be able to choose a GID of whose users will
56008 + be unable to run server applications from your machine. If the sysctl
56009 + option is enabled, a sysctl option with name "socket_server" is created.
56010 +
56011 +config GRKERNSEC_SOCKET_SERVER_GID
56012 + int "GID to deny server sockets for"
56013 + depends on GRKERNSEC_SOCKET_SERVER
56014 + default 1002
56015 + help
56016 + Here you can choose the GID to disable server socket access for.
56017 + Remember to add the users you want server socket access disabled for to
56018 + the GID specified here. If the sysctl option is enabled, a sysctl
56019 + option with name "socket_server_gid" is created.
56020 +
56021 +endmenu
56022 +menu "Sysctl support"
56023 +depends on GRKERNSEC && SYSCTL
56024 +
56025 +config GRKERNSEC_SYSCTL
56026 + bool "Sysctl support"
56027 + help
56028 + If you say Y here, you will be able to change the options that
56029 + grsecurity runs with at bootup, without having to recompile your
56030 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56031 + to enable (1) or disable (0) various features. All the sysctl entries
56032 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56033 + All features enabled in the kernel configuration are disabled at boot
56034 + if you do not say Y to the "Turn on features by default" option.
56035 + All options should be set at startup, and the grsec_lock entry should
56036 + be set to a non-zero value after all the options are set.
56037 + *THIS IS EXTREMELY IMPORTANT*
56038 +
56039 +config GRKERNSEC_SYSCTL_DISTRO
56040 + bool "Extra sysctl support for distro makers (READ HELP)"
56041 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56042 + help
56043 + If you say Y here, additional sysctl options will be created
56044 + for features that affect processes running as root. Therefore,
56045 + it is critical when using this option that the grsec_lock entry be
56046 + enabled after boot. Only distros with prebuilt kernel packages
56047 + with this option enabled that can ensure grsec_lock is enabled
56048 + after boot should use this option.
56049 + *Failure to set grsec_lock after boot makes all grsec features
56050 + this option covers useless*
56051 +
56052 + Currently this option creates the following sysctl entries:
56053 + "Disable Privileged I/O": "disable_priv_io"
56054 +
56055 +config GRKERNSEC_SYSCTL_ON
56056 + bool "Turn on features by default"
56057 + depends on GRKERNSEC_SYSCTL
56058 + help
56059 + If you say Y here, instead of having all features enabled in the
56060 + kernel configuration disabled at boot time, the features will be
56061 + enabled at boot time. It is recommended you say Y here unless
56062 + there is some reason you would want all sysctl-tunable features to
56063 + be disabled by default. As mentioned elsewhere, it is important
56064 + to enable the grsec_lock entry once you have finished modifying
56065 + the sysctl entries.
56066 +
56067 +endmenu
56068 +menu "Logging Options"
56069 +depends on GRKERNSEC
56070 +
56071 +config GRKERNSEC_FLOODTIME
56072 + int "Seconds in between log messages (minimum)"
56073 + default 10
56074 + help
56075 + This option allows you to enforce the number of seconds between
56076 + grsecurity log messages. The default should be suitable for most
56077 + people, however, if you choose to change it, choose a value small enough
56078 + to allow informative logs to be produced, but large enough to
56079 + prevent flooding.
56080 +
56081 +config GRKERNSEC_FLOODBURST
56082 + int "Number of messages in a burst (maximum)"
56083 + default 6
56084 + help
56085 + This option allows you to choose the maximum number of messages allowed
56086 + within the flood time interval you chose in a separate option. The
56087 + default should be suitable for most people, however if you find that
56088 + many of your logs are being interpreted as flooding, you may want to
56089 + raise this value.
56090 +
56091 +endmenu
56092 +
56093 +endmenu
56094 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
56095 new file mode 100644
56096 index 0000000..be9ae3a
56097 --- /dev/null
56098 +++ b/grsecurity/Makefile
56099 @@ -0,0 +1,36 @@
56100 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56101 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56102 +# into an RBAC system
56103 +#
56104 +# All code in this directory and various hooks inserted throughout the kernel
56105 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56106 +# under the GPL v2 or higher
56107 +
56108 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56109 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56110 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56111 +
56112 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56113 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56114 + gracl_learn.o grsec_log.o
56115 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56116 +
56117 +ifdef CONFIG_NET
56118 +obj-y += grsec_sock.o
56119 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56120 +endif
56121 +
56122 +ifndef CONFIG_GRKERNSEC
56123 +obj-y += grsec_disabled.o
56124 +endif
56125 +
56126 +ifdef CONFIG_GRKERNSEC_HIDESYM
56127 +extra-y := grsec_hidesym.o
56128 +$(obj)/grsec_hidesym.o:
56129 + @-chmod -f 500 /boot
56130 + @-chmod -f 500 /lib/modules
56131 + @-chmod -f 500 /lib64/modules
56132 + @-chmod -f 500 /lib32/modules
56133 + @-chmod -f 700 .
56134 + @echo ' grsec: protected kernel image paths'
56135 +endif
56136 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
56137 new file mode 100644
56138 index 0000000..71cb167
56139 --- /dev/null
56140 +++ b/grsecurity/gracl.c
56141 @@ -0,0 +1,4140 @@
56142 +#include <linux/kernel.h>
56143 +#include <linux/module.h>
56144 +#include <linux/sched.h>
56145 +#include <linux/mm.h>
56146 +#include <linux/file.h>
56147 +#include <linux/fs.h>
56148 +#include <linux/namei.h>
56149 +#include <linux/mount.h>
56150 +#include <linux/tty.h>
56151 +#include <linux/proc_fs.h>
56152 +#include <linux/smp_lock.h>
56153 +#include <linux/slab.h>
56154 +#include <linux/vmalloc.h>
56155 +#include <linux/types.h>
56156 +#include <linux/sysctl.h>
56157 +#include <linux/netdevice.h>
56158 +#include <linux/ptrace.h>
56159 +#include <linux/gracl.h>
56160 +#include <linux/gralloc.h>
56161 +#include <linux/security.h>
56162 +#include <linux/grinternal.h>
56163 +#include <linux/pid_namespace.h>
56164 +#include <linux/fdtable.h>
56165 +#include <linux/percpu.h>
56166 +
56167 +#include <asm/uaccess.h>
56168 +#include <asm/errno.h>
56169 +#include <asm/mman.h>
56170 +
56171 +static struct acl_role_db acl_role_set;
56172 +static struct name_db name_set;
56173 +static struct inodev_db inodev_set;
56174 +
56175 +/* for keeping track of userspace pointers used for subjects, so we
56176 + can share references in the kernel as well
56177 +*/
56178 +
56179 +static struct dentry *real_root;
56180 +static struct vfsmount *real_root_mnt;
56181 +
56182 +static struct acl_subj_map_db subj_map_set;
56183 +
56184 +static struct acl_role_label *default_role;
56185 +
56186 +static struct acl_role_label *role_list;
56187 +
56188 +static u16 acl_sp_role_value;
56189 +
56190 +extern char *gr_shared_page[4];
56191 +static DEFINE_MUTEX(gr_dev_mutex);
56192 +DEFINE_RWLOCK(gr_inode_lock);
56193 +
56194 +struct gr_arg *gr_usermode;
56195 +
56196 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
56197 +
56198 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56199 +extern void gr_clear_learn_entries(void);
56200 +
56201 +#ifdef CONFIG_GRKERNSEC_RESLOG
56202 +extern void gr_log_resource(const struct task_struct *task,
56203 + const int res, const unsigned long wanted, const int gt);
56204 +#endif
56205 +
56206 +unsigned char *gr_system_salt;
56207 +unsigned char *gr_system_sum;
56208 +
56209 +static struct sprole_pw **acl_special_roles = NULL;
56210 +static __u16 num_sprole_pws = 0;
56211 +
56212 +static struct acl_role_label *kernel_role = NULL;
56213 +
56214 +static unsigned int gr_auth_attempts = 0;
56215 +static unsigned long gr_auth_expires = 0UL;
56216 +
56217 +#ifdef CONFIG_NET
56218 +extern struct vfsmount *sock_mnt;
56219 +#endif
56220 +extern struct vfsmount *pipe_mnt;
56221 +extern struct vfsmount *shm_mnt;
56222 +#ifdef CONFIG_HUGETLBFS
56223 +extern struct vfsmount *hugetlbfs_vfsmount;
56224 +#endif
56225 +
56226 +static struct acl_object_label *fakefs_obj_rw;
56227 +static struct acl_object_label *fakefs_obj_rwx;
56228 +
56229 +extern int gr_init_uidset(void);
56230 +extern void gr_free_uidset(void);
56231 +extern void gr_remove_uid(uid_t uid);
56232 +extern int gr_find_uid(uid_t uid);
56233 +
56234 +__inline__ int
56235 +gr_acl_is_enabled(void)
56236 +{
56237 + return (gr_status & GR_READY);
56238 +}
56239 +
56240 +#ifdef CONFIG_BTRFS_FS
56241 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56242 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56243 +#endif
56244 +
56245 +static inline dev_t __get_dev(const struct dentry *dentry)
56246 +{
56247 +#ifdef CONFIG_BTRFS_FS
56248 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56249 + return get_btrfs_dev_from_inode(dentry->d_inode);
56250 + else
56251 +#endif
56252 + return dentry->d_inode->i_sb->s_dev;
56253 +}
56254 +
56255 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56256 +{
56257 + return __get_dev(dentry);
56258 +}
56259 +
56260 +static char gr_task_roletype_to_char(struct task_struct *task)
56261 +{
56262 + switch (task->role->roletype &
56263 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56264 + GR_ROLE_SPECIAL)) {
56265 + case GR_ROLE_DEFAULT:
56266 + return 'D';
56267 + case GR_ROLE_USER:
56268 + return 'U';
56269 + case GR_ROLE_GROUP:
56270 + return 'G';
56271 + case GR_ROLE_SPECIAL:
56272 + return 'S';
56273 + }
56274 +
56275 + return 'X';
56276 +}
56277 +
56278 +char gr_roletype_to_char(void)
56279 +{
56280 + return gr_task_roletype_to_char(current);
56281 +}
56282 +
56283 +__inline__ int
56284 +gr_acl_tpe_check(void)
56285 +{
56286 + if (unlikely(!(gr_status & GR_READY)))
56287 + return 0;
56288 + if (current->role->roletype & GR_ROLE_TPE)
56289 + return 1;
56290 + else
56291 + return 0;
56292 +}
56293 +
56294 +int
56295 +gr_handle_rawio(const struct inode *inode)
56296 +{
56297 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56298 + if (inode && S_ISBLK(inode->i_mode) &&
56299 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56300 + !capable(CAP_SYS_RAWIO))
56301 + return 1;
56302 +#endif
56303 + return 0;
56304 +}
56305 +
56306 +static int
56307 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56308 +{
56309 + if (likely(lena != lenb))
56310 + return 0;
56311 +
56312 + return !memcmp(a, b, lena);
56313 +}
56314 +
56315 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56316 +{
56317 + *buflen -= namelen;
56318 + if (*buflen < 0)
56319 + return -ENAMETOOLONG;
56320 + *buffer -= namelen;
56321 + memcpy(*buffer, str, namelen);
56322 + return 0;
56323 +}
56324 +
56325 +/* this must be called with vfsmount_lock and dcache_lock held */
56326 +
56327 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56328 + struct dentry *root, struct vfsmount *rootmnt,
56329 + char *buffer, int buflen)
56330 +{
56331 + char * end = buffer+buflen;
56332 + char * retval;
56333 + int namelen;
56334 +
56335 + *--end = '\0';
56336 + buflen--;
56337 +
56338 + if (buflen < 1)
56339 + goto Elong;
56340 + /* Get '/' right */
56341 + retval = end-1;
56342 + *retval = '/';
56343 +
56344 + for (;;) {
56345 + struct dentry * parent;
56346 +
56347 + if (dentry == root && vfsmnt == rootmnt)
56348 + break;
56349 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56350 + /* Global root? */
56351 + if (vfsmnt->mnt_parent == vfsmnt)
56352 + goto global_root;
56353 + dentry = vfsmnt->mnt_mountpoint;
56354 + vfsmnt = vfsmnt->mnt_parent;
56355 + continue;
56356 + }
56357 + parent = dentry->d_parent;
56358 + prefetch(parent);
56359 + namelen = dentry->d_name.len;
56360 + buflen -= namelen + 1;
56361 + if (buflen < 0)
56362 + goto Elong;
56363 + end -= namelen;
56364 + memcpy(end, dentry->d_name.name, namelen);
56365 + *--end = '/';
56366 + retval = end;
56367 + dentry = parent;
56368 + }
56369 +
56370 +out:
56371 + return retval;
56372 +
56373 +global_root:
56374 + namelen = dentry->d_name.len;
56375 + buflen -= namelen;
56376 + if (buflen < 0)
56377 + goto Elong;
56378 + retval -= namelen-1; /* hit the slash */
56379 + memcpy(retval, dentry->d_name.name, namelen);
56380 + goto out;
56381 +Elong:
56382 + retval = ERR_PTR(-ENAMETOOLONG);
56383 + goto out;
56384 +}
56385 +
56386 +static char *
56387 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56388 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56389 +{
56390 + char *retval;
56391 +
56392 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56393 + if (unlikely(IS_ERR(retval)))
56394 + retval = strcpy(buf, "<path too long>");
56395 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56396 + retval[1] = '\0';
56397 +
56398 + return retval;
56399 +}
56400 +
56401 +static char *
56402 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56403 + char *buf, int buflen)
56404 +{
56405 + char *res;
56406 +
56407 + /* we can use real_root, real_root_mnt, because this is only called
56408 + by the RBAC system */
56409 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56410 +
56411 + return res;
56412 +}
56413 +
56414 +static char *
56415 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56416 + char *buf, int buflen)
56417 +{
56418 + char *res;
56419 + struct dentry *root;
56420 + struct vfsmount *rootmnt;
56421 + struct task_struct *reaper = &init_task;
56422 +
56423 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56424 + read_lock(&reaper->fs->lock);
56425 + root = dget(reaper->fs->root.dentry);
56426 + rootmnt = mntget(reaper->fs->root.mnt);
56427 + read_unlock(&reaper->fs->lock);
56428 +
56429 + spin_lock(&dcache_lock);
56430 + spin_lock(&vfsmount_lock);
56431 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56432 + spin_unlock(&vfsmount_lock);
56433 + spin_unlock(&dcache_lock);
56434 +
56435 + dput(root);
56436 + mntput(rootmnt);
56437 + return res;
56438 +}
56439 +
56440 +static char *
56441 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56442 +{
56443 + char *ret;
56444 + spin_lock(&dcache_lock);
56445 + spin_lock(&vfsmount_lock);
56446 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56447 + PAGE_SIZE);
56448 + spin_unlock(&vfsmount_lock);
56449 + spin_unlock(&dcache_lock);
56450 + return ret;
56451 +}
56452 +
56453 +static char *
56454 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56455 +{
56456 + char *ret;
56457 + char *buf;
56458 + int buflen;
56459 +
56460 + spin_lock(&dcache_lock);
56461 + spin_lock(&vfsmount_lock);
56462 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56463 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56464 + buflen = (int)(ret - buf);
56465 + if (buflen >= 5)
56466 + prepend(&ret, &buflen, "/proc", 5);
56467 + else
56468 + ret = strcpy(buf, "<path too long>");
56469 + spin_unlock(&vfsmount_lock);
56470 + spin_unlock(&dcache_lock);
56471 + return ret;
56472 +}
56473 +
56474 +char *
56475 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
56476 +{
56477 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56478 + PAGE_SIZE);
56479 +}
56480 +
56481 +char *
56482 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
56483 +{
56484 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
56485 + PAGE_SIZE);
56486 +}
56487 +
56488 +char *
56489 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
56490 +{
56491 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
56492 + PAGE_SIZE);
56493 +}
56494 +
56495 +char *
56496 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
56497 +{
56498 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
56499 + PAGE_SIZE);
56500 +}
56501 +
56502 +char *
56503 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
56504 +{
56505 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
56506 + PAGE_SIZE);
56507 +}
56508 +
56509 +__inline__ __u32
56510 +to_gr_audit(const __u32 reqmode)
56511 +{
56512 + /* masks off auditable permission flags, then shifts them to create
56513 + auditing flags, and adds the special case of append auditing if
56514 + we're requesting write */
56515 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
56516 +}
56517 +
56518 +struct acl_subject_label *
56519 +lookup_subject_map(const struct acl_subject_label *userp)
56520 +{
56521 + unsigned int index = shash(userp, subj_map_set.s_size);
56522 + struct subject_map *match;
56523 +
56524 + match = subj_map_set.s_hash[index];
56525 +
56526 + while (match && match->user != userp)
56527 + match = match->next;
56528 +
56529 + if (match != NULL)
56530 + return match->kernel;
56531 + else
56532 + return NULL;
56533 +}
56534 +
56535 +static void
56536 +insert_subj_map_entry(struct subject_map *subjmap)
56537 +{
56538 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
56539 + struct subject_map **curr;
56540 +
56541 + subjmap->prev = NULL;
56542 +
56543 + curr = &subj_map_set.s_hash[index];
56544 + if (*curr != NULL)
56545 + (*curr)->prev = subjmap;
56546 +
56547 + subjmap->next = *curr;
56548 + *curr = subjmap;
56549 +
56550 + return;
56551 +}
56552 +
56553 +static struct acl_role_label *
56554 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
56555 + const gid_t gid)
56556 +{
56557 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
56558 + struct acl_role_label *match;
56559 + struct role_allowed_ip *ipp;
56560 + unsigned int x;
56561 + u32 curr_ip = task->signal->curr_ip;
56562 +
56563 + task->signal->saved_ip = curr_ip;
56564 +
56565 + match = acl_role_set.r_hash[index];
56566 +
56567 + while (match) {
56568 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
56569 + for (x = 0; x < match->domain_child_num; x++) {
56570 + if (match->domain_children[x] == uid)
56571 + goto found;
56572 + }
56573 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
56574 + break;
56575 + match = match->next;
56576 + }
56577 +found:
56578 + if (match == NULL) {
56579 + try_group:
56580 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
56581 + match = acl_role_set.r_hash[index];
56582 +
56583 + while (match) {
56584 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
56585 + for (x = 0; x < match->domain_child_num; x++) {
56586 + if (match->domain_children[x] == gid)
56587 + goto found2;
56588 + }
56589 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
56590 + break;
56591 + match = match->next;
56592 + }
56593 +found2:
56594 + if (match == NULL)
56595 + match = default_role;
56596 + if (match->allowed_ips == NULL)
56597 + return match;
56598 + else {
56599 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56600 + if (likely
56601 + ((ntohl(curr_ip) & ipp->netmask) ==
56602 + (ntohl(ipp->addr) & ipp->netmask)))
56603 + return match;
56604 + }
56605 + match = default_role;
56606 + }
56607 + } else if (match->allowed_ips == NULL) {
56608 + return match;
56609 + } else {
56610 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56611 + if (likely
56612 + ((ntohl(curr_ip) & ipp->netmask) ==
56613 + (ntohl(ipp->addr) & ipp->netmask)))
56614 + return match;
56615 + }
56616 + goto try_group;
56617 + }
56618 +
56619 + return match;
56620 +}
56621 +
56622 +struct acl_subject_label *
56623 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
56624 + const struct acl_role_label *role)
56625 +{
56626 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
56627 + struct acl_subject_label *match;
56628 +
56629 + match = role->subj_hash[index];
56630 +
56631 + while (match && (match->inode != ino || match->device != dev ||
56632 + (match->mode & GR_DELETED))) {
56633 + match = match->next;
56634 + }
56635 +
56636 + if (match && !(match->mode & GR_DELETED))
56637 + return match;
56638 + else
56639 + return NULL;
56640 +}
56641 +
56642 +struct acl_subject_label *
56643 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
56644 + const struct acl_role_label *role)
56645 +{
56646 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
56647 + struct acl_subject_label *match;
56648 +
56649 + match = role->subj_hash[index];
56650 +
56651 + while (match && (match->inode != ino || match->device != dev ||
56652 + !(match->mode & GR_DELETED))) {
56653 + match = match->next;
56654 + }
56655 +
56656 + if (match && (match->mode & GR_DELETED))
56657 + return match;
56658 + else
56659 + return NULL;
56660 +}
56661 +
56662 +static struct acl_object_label *
56663 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
56664 + const struct acl_subject_label *subj)
56665 +{
56666 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56667 + struct acl_object_label *match;
56668 +
56669 + match = subj->obj_hash[index];
56670 +
56671 + while (match && (match->inode != ino || match->device != dev ||
56672 + (match->mode & GR_DELETED))) {
56673 + match = match->next;
56674 + }
56675 +
56676 + if (match && !(match->mode & GR_DELETED))
56677 + return match;
56678 + else
56679 + return NULL;
56680 +}
56681 +
56682 +static struct acl_object_label *
56683 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
56684 + const struct acl_subject_label *subj)
56685 +{
56686 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56687 + struct acl_object_label *match;
56688 +
56689 + match = subj->obj_hash[index];
56690 +
56691 + while (match && (match->inode != ino || match->device != dev ||
56692 + !(match->mode & GR_DELETED))) {
56693 + match = match->next;
56694 + }
56695 +
56696 + if (match && (match->mode & GR_DELETED))
56697 + return match;
56698 +
56699 + match = subj->obj_hash[index];
56700 +
56701 + while (match && (match->inode != ino || match->device != dev ||
56702 + (match->mode & GR_DELETED))) {
56703 + match = match->next;
56704 + }
56705 +
56706 + if (match && !(match->mode & GR_DELETED))
56707 + return match;
56708 + else
56709 + return NULL;
56710 +}
56711 +
56712 +static struct name_entry *
56713 +lookup_name_entry(const char *name)
56714 +{
56715 + unsigned int len = strlen(name);
56716 + unsigned int key = full_name_hash(name, len);
56717 + unsigned int index = key % name_set.n_size;
56718 + struct name_entry *match;
56719 +
56720 + match = name_set.n_hash[index];
56721 +
56722 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
56723 + match = match->next;
56724 +
56725 + return match;
56726 +}
56727 +
56728 +static struct name_entry *
56729 +lookup_name_entry_create(const char *name)
56730 +{
56731 + unsigned int len = strlen(name);
56732 + unsigned int key = full_name_hash(name, len);
56733 + unsigned int index = key % name_set.n_size;
56734 + struct name_entry *match;
56735 +
56736 + match = name_set.n_hash[index];
56737 +
56738 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56739 + !match->deleted))
56740 + match = match->next;
56741 +
56742 + if (match && match->deleted)
56743 + return match;
56744 +
56745 + match = name_set.n_hash[index];
56746 +
56747 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56748 + match->deleted))
56749 + match = match->next;
56750 +
56751 + if (match && !match->deleted)
56752 + return match;
56753 + else
56754 + return NULL;
56755 +}
56756 +
56757 +static struct inodev_entry *
56758 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
56759 +{
56760 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
56761 + struct inodev_entry *match;
56762 +
56763 + match = inodev_set.i_hash[index];
56764 +
56765 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
56766 + match = match->next;
56767 +
56768 + return match;
56769 +}
56770 +
56771 +static void
56772 +insert_inodev_entry(struct inodev_entry *entry)
56773 +{
56774 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
56775 + inodev_set.i_size);
56776 + struct inodev_entry **curr;
56777 +
56778 + entry->prev = NULL;
56779 +
56780 + curr = &inodev_set.i_hash[index];
56781 + if (*curr != NULL)
56782 + (*curr)->prev = entry;
56783 +
56784 + entry->next = *curr;
56785 + *curr = entry;
56786 +
56787 + return;
56788 +}
56789 +
56790 +static void
56791 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
56792 +{
56793 + unsigned int index =
56794 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
56795 + struct acl_role_label **curr;
56796 + struct acl_role_label *tmp;
56797 +
56798 + curr = &acl_role_set.r_hash[index];
56799 +
56800 + /* if role was already inserted due to domains and already has
56801 + a role in the same bucket as it attached, then we need to
56802 + combine these two buckets
56803 + */
56804 + if (role->next) {
56805 + tmp = role->next;
56806 + while (tmp->next)
56807 + tmp = tmp->next;
56808 + tmp->next = *curr;
56809 + } else
56810 + role->next = *curr;
56811 + *curr = role;
56812 +
56813 + return;
56814 +}
56815 +
56816 +static void
56817 +insert_acl_role_label(struct acl_role_label *role)
56818 +{
56819 + int i;
56820 +
56821 + if (role_list == NULL) {
56822 + role_list = role;
56823 + role->prev = NULL;
56824 + } else {
56825 + role->prev = role_list;
56826 + role_list = role;
56827 + }
56828 +
56829 + /* used for hash chains */
56830 + role->next = NULL;
56831 +
56832 + if (role->roletype & GR_ROLE_DOMAIN) {
56833 + for (i = 0; i < role->domain_child_num; i++)
56834 + __insert_acl_role_label(role, role->domain_children[i]);
56835 + } else
56836 + __insert_acl_role_label(role, role->uidgid);
56837 +}
56838 +
56839 +static int
56840 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
56841 +{
56842 + struct name_entry **curr, *nentry;
56843 + struct inodev_entry *ientry;
56844 + unsigned int len = strlen(name);
56845 + unsigned int key = full_name_hash(name, len);
56846 + unsigned int index = key % name_set.n_size;
56847 +
56848 + curr = &name_set.n_hash[index];
56849 +
56850 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
56851 + curr = &((*curr)->next);
56852 +
56853 + if (*curr != NULL)
56854 + return 1;
56855 +
56856 + nentry = acl_alloc(sizeof (struct name_entry));
56857 + if (nentry == NULL)
56858 + return 0;
56859 + ientry = acl_alloc(sizeof (struct inodev_entry));
56860 + if (ientry == NULL)
56861 + return 0;
56862 + ientry->nentry = nentry;
56863 +
56864 + nentry->key = key;
56865 + nentry->name = name;
56866 + nentry->inode = inode;
56867 + nentry->device = device;
56868 + nentry->len = len;
56869 + nentry->deleted = deleted;
56870 +
56871 + nentry->prev = NULL;
56872 + curr = &name_set.n_hash[index];
56873 + if (*curr != NULL)
56874 + (*curr)->prev = nentry;
56875 + nentry->next = *curr;
56876 + *curr = nentry;
56877 +
56878 + /* insert us into the table searchable by inode/dev */
56879 + insert_inodev_entry(ientry);
56880 +
56881 + return 1;
56882 +}
56883 +
56884 +static void
56885 +insert_acl_obj_label(struct acl_object_label *obj,
56886 + struct acl_subject_label *subj)
56887 +{
56888 + unsigned int index =
56889 + fhash(obj->inode, obj->device, subj->obj_hash_size);
56890 + struct acl_object_label **curr;
56891 +
56892 +
56893 + obj->prev = NULL;
56894 +
56895 + curr = &subj->obj_hash[index];
56896 + if (*curr != NULL)
56897 + (*curr)->prev = obj;
56898 +
56899 + obj->next = *curr;
56900 + *curr = obj;
56901 +
56902 + return;
56903 +}
56904 +
56905 +static void
56906 +insert_acl_subj_label(struct acl_subject_label *obj,
56907 + struct acl_role_label *role)
56908 +{
56909 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
56910 + struct acl_subject_label **curr;
56911 +
56912 + obj->prev = NULL;
56913 +
56914 + curr = &role->subj_hash[index];
56915 + if (*curr != NULL)
56916 + (*curr)->prev = obj;
56917 +
56918 + obj->next = *curr;
56919 + *curr = obj;
56920 +
56921 + return;
56922 +}
56923 +
56924 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
56925 +
56926 +static void *
56927 +create_table(__u32 * len, int elementsize)
56928 +{
56929 + unsigned int table_sizes[] = {
56930 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
56931 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
56932 + 4194301, 8388593, 16777213, 33554393, 67108859
56933 + };
56934 + void *newtable = NULL;
56935 + unsigned int pwr = 0;
56936 +
56937 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
56938 + table_sizes[pwr] <= *len)
56939 + pwr++;
56940 +
56941 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
56942 + return newtable;
56943 +
56944 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
56945 + newtable =
56946 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
56947 + else
56948 + newtable = vmalloc(table_sizes[pwr] * elementsize);
56949 +
56950 + *len = table_sizes[pwr];
56951 +
56952 + return newtable;
56953 +}
56954 +
56955 +static int
56956 +init_variables(const struct gr_arg *arg)
56957 +{
56958 + struct task_struct *reaper = &init_task;
56959 + unsigned int stacksize;
56960 +
56961 + subj_map_set.s_size = arg->role_db.num_subjects;
56962 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
56963 + name_set.n_size = arg->role_db.num_objects;
56964 + inodev_set.i_size = arg->role_db.num_objects;
56965 +
56966 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
56967 + !name_set.n_size || !inodev_set.i_size)
56968 + return 1;
56969 +
56970 + if (!gr_init_uidset())
56971 + return 1;
56972 +
56973 + /* set up the stack that holds allocation info */
56974 +
56975 + stacksize = arg->role_db.num_pointers + 5;
56976 +
56977 + if (!acl_alloc_stack_init(stacksize))
56978 + return 1;
56979 +
56980 + /* grab reference for the real root dentry and vfsmount */
56981 + read_lock(&reaper->fs->lock);
56982 + real_root = dget(reaper->fs->root.dentry);
56983 + real_root_mnt = mntget(reaper->fs->root.mnt);
56984 + read_unlock(&reaper->fs->lock);
56985 +
56986 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56987 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
56988 +#endif
56989 +
56990 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
56991 + if (fakefs_obj_rw == NULL)
56992 + return 1;
56993 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
56994 +
56995 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
56996 + if (fakefs_obj_rwx == NULL)
56997 + return 1;
56998 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
56999 +
57000 + subj_map_set.s_hash =
57001 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
57002 + acl_role_set.r_hash =
57003 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
57004 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
57005 + inodev_set.i_hash =
57006 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
57007 +
57008 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
57009 + !name_set.n_hash || !inodev_set.i_hash)
57010 + return 1;
57011 +
57012 + memset(subj_map_set.s_hash, 0,
57013 + sizeof(struct subject_map *) * subj_map_set.s_size);
57014 + memset(acl_role_set.r_hash, 0,
57015 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
57016 + memset(name_set.n_hash, 0,
57017 + sizeof (struct name_entry *) * name_set.n_size);
57018 + memset(inodev_set.i_hash, 0,
57019 + sizeof (struct inodev_entry *) * inodev_set.i_size);
57020 +
57021 + return 0;
57022 +}
57023 +
57024 +/* free information not needed after startup
57025 + currently contains user->kernel pointer mappings for subjects
57026 +*/
57027 +
57028 +static void
57029 +free_init_variables(void)
57030 +{
57031 + __u32 i;
57032 +
57033 + if (subj_map_set.s_hash) {
57034 + for (i = 0; i < subj_map_set.s_size; i++) {
57035 + if (subj_map_set.s_hash[i]) {
57036 + kfree(subj_map_set.s_hash[i]);
57037 + subj_map_set.s_hash[i] = NULL;
57038 + }
57039 + }
57040 +
57041 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
57042 + PAGE_SIZE)
57043 + kfree(subj_map_set.s_hash);
57044 + else
57045 + vfree(subj_map_set.s_hash);
57046 + }
57047 +
57048 + return;
57049 +}
57050 +
57051 +static void
57052 +free_variables(void)
57053 +{
57054 + struct acl_subject_label *s;
57055 + struct acl_role_label *r;
57056 + struct task_struct *task, *task2;
57057 + unsigned int x;
57058 +
57059 + gr_clear_learn_entries();
57060 +
57061 + read_lock(&tasklist_lock);
57062 + do_each_thread(task2, task) {
57063 + task->acl_sp_role = 0;
57064 + task->acl_role_id = 0;
57065 + task->acl = NULL;
57066 + task->role = NULL;
57067 + } while_each_thread(task2, task);
57068 + read_unlock(&tasklist_lock);
57069 +
57070 + /* release the reference to the real root dentry and vfsmount */
57071 + if (real_root)
57072 + dput(real_root);
57073 + real_root = NULL;
57074 + if (real_root_mnt)
57075 + mntput(real_root_mnt);
57076 + real_root_mnt = NULL;
57077 +
57078 + /* free all object hash tables */
57079 +
57080 + FOR_EACH_ROLE_START(r)
57081 + if (r->subj_hash == NULL)
57082 + goto next_role;
57083 + FOR_EACH_SUBJECT_START(r, s, x)
57084 + if (s->obj_hash == NULL)
57085 + break;
57086 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57087 + kfree(s->obj_hash);
57088 + else
57089 + vfree(s->obj_hash);
57090 + FOR_EACH_SUBJECT_END(s, x)
57091 + FOR_EACH_NESTED_SUBJECT_START(r, s)
57092 + if (s->obj_hash == NULL)
57093 + break;
57094 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57095 + kfree(s->obj_hash);
57096 + else
57097 + vfree(s->obj_hash);
57098 + FOR_EACH_NESTED_SUBJECT_END(s)
57099 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
57100 + kfree(r->subj_hash);
57101 + else
57102 + vfree(r->subj_hash);
57103 + r->subj_hash = NULL;
57104 +next_role:
57105 + FOR_EACH_ROLE_END(r)
57106 +
57107 + acl_free_all();
57108 +
57109 + if (acl_role_set.r_hash) {
57110 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
57111 + PAGE_SIZE)
57112 + kfree(acl_role_set.r_hash);
57113 + else
57114 + vfree(acl_role_set.r_hash);
57115 + }
57116 + if (name_set.n_hash) {
57117 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
57118 + PAGE_SIZE)
57119 + kfree(name_set.n_hash);
57120 + else
57121 + vfree(name_set.n_hash);
57122 + }
57123 +
57124 + if (inodev_set.i_hash) {
57125 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
57126 + PAGE_SIZE)
57127 + kfree(inodev_set.i_hash);
57128 + else
57129 + vfree(inodev_set.i_hash);
57130 + }
57131 +
57132 + gr_free_uidset();
57133 +
57134 + memset(&name_set, 0, sizeof (struct name_db));
57135 + memset(&inodev_set, 0, sizeof (struct inodev_db));
57136 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
57137 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
57138 +
57139 + default_role = NULL;
57140 + role_list = NULL;
57141 +
57142 + return;
57143 +}
57144 +
57145 +static __u32
57146 +count_user_objs(struct acl_object_label *userp)
57147 +{
57148 + struct acl_object_label o_tmp;
57149 + __u32 num = 0;
57150 +
57151 + while (userp) {
57152 + if (copy_from_user(&o_tmp, userp,
57153 + sizeof (struct acl_object_label)))
57154 + break;
57155 +
57156 + userp = o_tmp.prev;
57157 + num++;
57158 + }
57159 +
57160 + return num;
57161 +}
57162 +
57163 +static struct acl_subject_label *
57164 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
57165 +
57166 +static int
57167 +copy_user_glob(struct acl_object_label *obj)
57168 +{
57169 + struct acl_object_label *g_tmp, **guser;
57170 + unsigned int len;
57171 + char *tmp;
57172 +
57173 + if (obj->globbed == NULL)
57174 + return 0;
57175 +
57176 + guser = &obj->globbed;
57177 + while (*guser) {
57178 + g_tmp = (struct acl_object_label *)
57179 + acl_alloc(sizeof (struct acl_object_label));
57180 + if (g_tmp == NULL)
57181 + return -ENOMEM;
57182 +
57183 + if (copy_from_user(g_tmp, *guser,
57184 + sizeof (struct acl_object_label)))
57185 + return -EFAULT;
57186 +
57187 + len = strnlen_user(g_tmp->filename, PATH_MAX);
57188 +
57189 + if (!len || len >= PATH_MAX)
57190 + return -EINVAL;
57191 +
57192 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57193 + return -ENOMEM;
57194 +
57195 + if (copy_from_user(tmp, g_tmp->filename, len))
57196 + return -EFAULT;
57197 + tmp[len-1] = '\0';
57198 + g_tmp->filename = tmp;
57199 +
57200 + *guser = g_tmp;
57201 + guser = &(g_tmp->next);
57202 + }
57203 +
57204 + return 0;
57205 +}
57206 +
57207 +static int
57208 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57209 + struct acl_role_label *role)
57210 +{
57211 + struct acl_object_label *o_tmp;
57212 + unsigned int len;
57213 + int ret;
57214 + char *tmp;
57215 +
57216 + while (userp) {
57217 + if ((o_tmp = (struct acl_object_label *)
57218 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
57219 + return -ENOMEM;
57220 +
57221 + if (copy_from_user(o_tmp, userp,
57222 + sizeof (struct acl_object_label)))
57223 + return -EFAULT;
57224 +
57225 + userp = o_tmp->prev;
57226 +
57227 + len = strnlen_user(o_tmp->filename, PATH_MAX);
57228 +
57229 + if (!len || len >= PATH_MAX)
57230 + return -EINVAL;
57231 +
57232 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57233 + return -ENOMEM;
57234 +
57235 + if (copy_from_user(tmp, o_tmp->filename, len))
57236 + return -EFAULT;
57237 + tmp[len-1] = '\0';
57238 + o_tmp->filename = tmp;
57239 +
57240 + insert_acl_obj_label(o_tmp, subj);
57241 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57242 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57243 + return -ENOMEM;
57244 +
57245 + ret = copy_user_glob(o_tmp);
57246 + if (ret)
57247 + return ret;
57248 +
57249 + if (o_tmp->nested) {
57250 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
57251 + if (IS_ERR(o_tmp->nested))
57252 + return PTR_ERR(o_tmp->nested);
57253 +
57254 + /* insert into nested subject list */
57255 + o_tmp->nested->next = role->hash->first;
57256 + role->hash->first = o_tmp->nested;
57257 + }
57258 + }
57259 +
57260 + return 0;
57261 +}
57262 +
57263 +static __u32
57264 +count_user_subjs(struct acl_subject_label *userp)
57265 +{
57266 + struct acl_subject_label s_tmp;
57267 + __u32 num = 0;
57268 +
57269 + while (userp) {
57270 + if (copy_from_user(&s_tmp, userp,
57271 + sizeof (struct acl_subject_label)))
57272 + break;
57273 +
57274 + userp = s_tmp.prev;
57275 + /* do not count nested subjects against this count, since
57276 + they are not included in the hash table, but are
57277 + attached to objects. We have already counted
57278 + the subjects in userspace for the allocation
57279 + stack
57280 + */
57281 + if (!(s_tmp.mode & GR_NESTED))
57282 + num++;
57283 + }
57284 +
57285 + return num;
57286 +}
57287 +
57288 +static int
57289 +copy_user_allowedips(struct acl_role_label *rolep)
57290 +{
57291 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57292 +
57293 + ruserip = rolep->allowed_ips;
57294 +
57295 + while (ruserip) {
57296 + rlast = rtmp;
57297 +
57298 + if ((rtmp = (struct role_allowed_ip *)
57299 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57300 + return -ENOMEM;
57301 +
57302 + if (copy_from_user(rtmp, ruserip,
57303 + sizeof (struct role_allowed_ip)))
57304 + return -EFAULT;
57305 +
57306 + ruserip = rtmp->prev;
57307 +
57308 + if (!rlast) {
57309 + rtmp->prev = NULL;
57310 + rolep->allowed_ips = rtmp;
57311 + } else {
57312 + rlast->next = rtmp;
57313 + rtmp->prev = rlast;
57314 + }
57315 +
57316 + if (!ruserip)
57317 + rtmp->next = NULL;
57318 + }
57319 +
57320 + return 0;
57321 +}
57322 +
57323 +static int
57324 +copy_user_transitions(struct acl_role_label *rolep)
57325 +{
57326 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
57327 +
57328 + unsigned int len;
57329 + char *tmp;
57330 +
57331 + rusertp = rolep->transitions;
57332 +
57333 + while (rusertp) {
57334 + rlast = rtmp;
57335 +
57336 + if ((rtmp = (struct role_transition *)
57337 + acl_alloc(sizeof (struct role_transition))) == NULL)
57338 + return -ENOMEM;
57339 +
57340 + if (copy_from_user(rtmp, rusertp,
57341 + sizeof (struct role_transition)))
57342 + return -EFAULT;
57343 +
57344 + rusertp = rtmp->prev;
57345 +
57346 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57347 +
57348 + if (!len || len >= GR_SPROLE_LEN)
57349 + return -EINVAL;
57350 +
57351 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57352 + return -ENOMEM;
57353 +
57354 + if (copy_from_user(tmp, rtmp->rolename, len))
57355 + return -EFAULT;
57356 + tmp[len-1] = '\0';
57357 + rtmp->rolename = tmp;
57358 +
57359 + if (!rlast) {
57360 + rtmp->prev = NULL;
57361 + rolep->transitions = rtmp;
57362 + } else {
57363 + rlast->next = rtmp;
57364 + rtmp->prev = rlast;
57365 + }
57366 +
57367 + if (!rusertp)
57368 + rtmp->next = NULL;
57369 + }
57370 +
57371 + return 0;
57372 +}
57373 +
57374 +static struct acl_subject_label *
57375 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57376 +{
57377 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57378 + unsigned int len;
57379 + char *tmp;
57380 + __u32 num_objs;
57381 + struct acl_ip_label **i_tmp, *i_utmp2;
57382 + struct gr_hash_struct ghash;
57383 + struct subject_map *subjmap;
57384 + unsigned int i_num;
57385 + int err;
57386 +
57387 + s_tmp = lookup_subject_map(userp);
57388 +
57389 + /* we've already copied this subject into the kernel, just return
57390 + the reference to it, and don't copy it over again
57391 + */
57392 + if (s_tmp)
57393 + return(s_tmp);
57394 +
57395 + if ((s_tmp = (struct acl_subject_label *)
57396 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57397 + return ERR_PTR(-ENOMEM);
57398 +
57399 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57400 + if (subjmap == NULL)
57401 + return ERR_PTR(-ENOMEM);
57402 +
57403 + subjmap->user = userp;
57404 + subjmap->kernel = s_tmp;
57405 + insert_subj_map_entry(subjmap);
57406 +
57407 + if (copy_from_user(s_tmp, userp,
57408 + sizeof (struct acl_subject_label)))
57409 + return ERR_PTR(-EFAULT);
57410 +
57411 + len = strnlen_user(s_tmp->filename, PATH_MAX);
57412 +
57413 + if (!len || len >= PATH_MAX)
57414 + return ERR_PTR(-EINVAL);
57415 +
57416 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57417 + return ERR_PTR(-ENOMEM);
57418 +
57419 + if (copy_from_user(tmp, s_tmp->filename, len))
57420 + return ERR_PTR(-EFAULT);
57421 + tmp[len-1] = '\0';
57422 + s_tmp->filename = tmp;
57423 +
57424 + if (!strcmp(s_tmp->filename, "/"))
57425 + role->root_label = s_tmp;
57426 +
57427 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57428 + return ERR_PTR(-EFAULT);
57429 +
57430 + /* copy user and group transition tables */
57431 +
57432 + if (s_tmp->user_trans_num) {
57433 + uid_t *uidlist;
57434 +
57435 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57436 + if (uidlist == NULL)
57437 + return ERR_PTR(-ENOMEM);
57438 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57439 + return ERR_PTR(-EFAULT);
57440 +
57441 + s_tmp->user_transitions = uidlist;
57442 + }
57443 +
57444 + if (s_tmp->group_trans_num) {
57445 + gid_t *gidlist;
57446 +
57447 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57448 + if (gidlist == NULL)
57449 + return ERR_PTR(-ENOMEM);
57450 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57451 + return ERR_PTR(-EFAULT);
57452 +
57453 + s_tmp->group_transitions = gidlist;
57454 + }
57455 +
57456 + /* set up object hash table */
57457 + num_objs = count_user_objs(ghash.first);
57458 +
57459 + s_tmp->obj_hash_size = num_objs;
57460 + s_tmp->obj_hash =
57461 + (struct acl_object_label **)
57462 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57463 +
57464 + if (!s_tmp->obj_hash)
57465 + return ERR_PTR(-ENOMEM);
57466 +
57467 + memset(s_tmp->obj_hash, 0,
57468 + s_tmp->obj_hash_size *
57469 + sizeof (struct acl_object_label *));
57470 +
57471 + /* add in objects */
57472 + err = copy_user_objs(ghash.first, s_tmp, role);
57473 +
57474 + if (err)
57475 + return ERR_PTR(err);
57476 +
57477 + /* set pointer for parent subject */
57478 + if (s_tmp->parent_subject) {
57479 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
57480 +
57481 + if (IS_ERR(s_tmp2))
57482 + return s_tmp2;
57483 +
57484 + s_tmp->parent_subject = s_tmp2;
57485 + }
57486 +
57487 + /* add in ip acls */
57488 +
57489 + if (!s_tmp->ip_num) {
57490 + s_tmp->ips = NULL;
57491 + goto insert;
57492 + }
57493 +
57494 + i_tmp =
57495 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
57496 + sizeof (struct acl_ip_label *));
57497 +
57498 + if (!i_tmp)
57499 + return ERR_PTR(-ENOMEM);
57500 +
57501 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
57502 + *(i_tmp + i_num) =
57503 + (struct acl_ip_label *)
57504 + acl_alloc(sizeof (struct acl_ip_label));
57505 + if (!*(i_tmp + i_num))
57506 + return ERR_PTR(-ENOMEM);
57507 +
57508 + if (copy_from_user
57509 + (&i_utmp2, s_tmp->ips + i_num,
57510 + sizeof (struct acl_ip_label *)))
57511 + return ERR_PTR(-EFAULT);
57512 +
57513 + if (copy_from_user
57514 + (*(i_tmp + i_num), i_utmp2,
57515 + sizeof (struct acl_ip_label)))
57516 + return ERR_PTR(-EFAULT);
57517 +
57518 + if ((*(i_tmp + i_num))->iface == NULL)
57519 + continue;
57520 +
57521 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
57522 + if (!len || len >= IFNAMSIZ)
57523 + return ERR_PTR(-EINVAL);
57524 + tmp = acl_alloc(len);
57525 + if (tmp == NULL)
57526 + return ERR_PTR(-ENOMEM);
57527 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
57528 + return ERR_PTR(-EFAULT);
57529 + (*(i_tmp + i_num))->iface = tmp;
57530 + }
57531 +
57532 + s_tmp->ips = i_tmp;
57533 +
57534 +insert:
57535 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
57536 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
57537 + return ERR_PTR(-ENOMEM);
57538 +
57539 + return s_tmp;
57540 +}
57541 +
57542 +static int
57543 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
57544 +{
57545 + struct acl_subject_label s_pre;
57546 + struct acl_subject_label * ret;
57547 + int err;
57548 +
57549 + while (userp) {
57550 + if (copy_from_user(&s_pre, userp,
57551 + sizeof (struct acl_subject_label)))
57552 + return -EFAULT;
57553 +
57554 + /* do not add nested subjects here, add
57555 + while parsing objects
57556 + */
57557 +
57558 + if (s_pre.mode & GR_NESTED) {
57559 + userp = s_pre.prev;
57560 + continue;
57561 + }
57562 +
57563 + ret = do_copy_user_subj(userp, role);
57564 +
57565 + err = PTR_ERR(ret);
57566 + if (IS_ERR(ret))
57567 + return err;
57568 +
57569 + insert_acl_subj_label(ret, role);
57570 +
57571 + userp = s_pre.prev;
57572 + }
57573 +
57574 + return 0;
57575 +}
57576 +
57577 +static int
57578 +copy_user_acl(struct gr_arg *arg)
57579 +{
57580 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
57581 + struct sprole_pw *sptmp;
57582 + struct gr_hash_struct *ghash;
57583 + uid_t *domainlist;
57584 + unsigned int r_num;
57585 + unsigned int len;
57586 + char *tmp;
57587 + int err = 0;
57588 + __u16 i;
57589 + __u32 num_subjs;
57590 +
57591 + /* we need a default and kernel role */
57592 + if (arg->role_db.num_roles < 2)
57593 + return -EINVAL;
57594 +
57595 + /* copy special role authentication info from userspace */
57596 +
57597 + num_sprole_pws = arg->num_sprole_pws;
57598 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
57599 +
57600 + if (!acl_special_roles) {
57601 + err = -ENOMEM;
57602 + goto cleanup;
57603 + }
57604 +
57605 + for (i = 0; i < num_sprole_pws; i++) {
57606 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
57607 + if (!sptmp) {
57608 + err = -ENOMEM;
57609 + goto cleanup;
57610 + }
57611 + if (copy_from_user(sptmp, arg->sprole_pws + i,
57612 + sizeof (struct sprole_pw))) {
57613 + err = -EFAULT;
57614 + goto cleanup;
57615 + }
57616 +
57617 + len =
57618 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
57619 +
57620 + if (!len || len >= GR_SPROLE_LEN) {
57621 + err = -EINVAL;
57622 + goto cleanup;
57623 + }
57624 +
57625 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
57626 + err = -ENOMEM;
57627 + goto cleanup;
57628 + }
57629 +
57630 + if (copy_from_user(tmp, sptmp->rolename, len)) {
57631 + err = -EFAULT;
57632 + goto cleanup;
57633 + }
57634 + tmp[len-1] = '\0';
57635 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57636 + printk(KERN_ALERT "Copying special role %s\n", tmp);
57637 +#endif
57638 + sptmp->rolename = tmp;
57639 + acl_special_roles[i] = sptmp;
57640 + }
57641 +
57642 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
57643 +
57644 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
57645 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
57646 +
57647 + if (!r_tmp) {
57648 + err = -ENOMEM;
57649 + goto cleanup;
57650 + }
57651 +
57652 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
57653 + sizeof (struct acl_role_label *))) {
57654 + err = -EFAULT;
57655 + goto cleanup;
57656 + }
57657 +
57658 + if (copy_from_user(r_tmp, r_utmp2,
57659 + sizeof (struct acl_role_label))) {
57660 + err = -EFAULT;
57661 + goto cleanup;
57662 + }
57663 +
57664 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
57665 +
57666 + if (!len || len >= PATH_MAX) {
57667 + err = -EINVAL;
57668 + goto cleanup;
57669 + }
57670 +
57671 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
57672 + err = -ENOMEM;
57673 + goto cleanup;
57674 + }
57675 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
57676 + err = -EFAULT;
57677 + goto cleanup;
57678 + }
57679 + tmp[len-1] = '\0';
57680 + r_tmp->rolename = tmp;
57681 +
57682 + if (!strcmp(r_tmp->rolename, "default")
57683 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
57684 + default_role = r_tmp;
57685 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
57686 + kernel_role = r_tmp;
57687 + }
57688 +
57689 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
57690 + err = -ENOMEM;
57691 + goto cleanup;
57692 + }
57693 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
57694 + err = -EFAULT;
57695 + goto cleanup;
57696 + }
57697 +
57698 + r_tmp->hash = ghash;
57699 +
57700 + num_subjs = count_user_subjs(r_tmp->hash->first);
57701 +
57702 + r_tmp->subj_hash_size = num_subjs;
57703 + r_tmp->subj_hash =
57704 + (struct acl_subject_label **)
57705 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
57706 +
57707 + if (!r_tmp->subj_hash) {
57708 + err = -ENOMEM;
57709 + goto cleanup;
57710 + }
57711 +
57712 + err = copy_user_allowedips(r_tmp);
57713 + if (err)
57714 + goto cleanup;
57715 +
57716 + /* copy domain info */
57717 + if (r_tmp->domain_children != NULL) {
57718 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
57719 + if (domainlist == NULL) {
57720 + err = -ENOMEM;
57721 + goto cleanup;
57722 + }
57723 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
57724 + err = -EFAULT;
57725 + goto cleanup;
57726 + }
57727 + r_tmp->domain_children = domainlist;
57728 + }
57729 +
57730 + err = copy_user_transitions(r_tmp);
57731 + if (err)
57732 + goto cleanup;
57733 +
57734 + memset(r_tmp->subj_hash, 0,
57735 + r_tmp->subj_hash_size *
57736 + sizeof (struct acl_subject_label *));
57737 +
57738 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
57739 +
57740 + if (err)
57741 + goto cleanup;
57742 +
57743 + /* set nested subject list to null */
57744 + r_tmp->hash->first = NULL;
57745 +
57746 + insert_acl_role_label(r_tmp);
57747 + }
57748 +
57749 + goto return_err;
57750 + cleanup:
57751 + free_variables();
57752 + return_err:
57753 + return err;
57754 +
57755 +}
57756 +
57757 +static int
57758 +gracl_init(struct gr_arg *args)
57759 +{
57760 + int error = 0;
57761 +
57762 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
57763 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
57764 +
57765 + if (init_variables(args)) {
57766 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
57767 + error = -ENOMEM;
57768 + free_variables();
57769 + goto out;
57770 + }
57771 +
57772 + error = copy_user_acl(args);
57773 + free_init_variables();
57774 + if (error) {
57775 + free_variables();
57776 + goto out;
57777 + }
57778 +
57779 + if ((error = gr_set_acls(0))) {
57780 + free_variables();
57781 + goto out;
57782 + }
57783 +
57784 + pax_open_kernel();
57785 + gr_status |= GR_READY;
57786 + pax_close_kernel();
57787 +
57788 + out:
57789 + return error;
57790 +}
57791 +
57792 +/* derived from glibc fnmatch() 0: match, 1: no match*/
57793 +
57794 +static int
57795 +glob_match(const char *p, const char *n)
57796 +{
57797 + char c;
57798 +
57799 + while ((c = *p++) != '\0') {
57800 + switch (c) {
57801 + case '?':
57802 + if (*n == '\0')
57803 + return 1;
57804 + else if (*n == '/')
57805 + return 1;
57806 + break;
57807 + case '\\':
57808 + if (*n != c)
57809 + return 1;
57810 + break;
57811 + case '*':
57812 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
57813 + if (*n == '/')
57814 + return 1;
57815 + else if (c == '?') {
57816 + if (*n == '\0')
57817 + return 1;
57818 + else
57819 + ++n;
57820 + }
57821 + }
57822 + if (c == '\0') {
57823 + return 0;
57824 + } else {
57825 + const char *endp;
57826 +
57827 + if ((endp = strchr(n, '/')) == NULL)
57828 + endp = n + strlen(n);
57829 +
57830 + if (c == '[') {
57831 + for (--p; n < endp; ++n)
57832 + if (!glob_match(p, n))
57833 + return 0;
57834 + } else if (c == '/') {
57835 + while (*n != '\0' && *n != '/')
57836 + ++n;
57837 + if (*n == '/' && !glob_match(p, n + 1))
57838 + return 0;
57839 + } else {
57840 + for (--p; n < endp; ++n)
57841 + if (*n == c && !glob_match(p, n))
57842 + return 0;
57843 + }
57844 +
57845 + return 1;
57846 + }
57847 + case '[':
57848 + {
57849 + int not;
57850 + char cold;
57851 +
57852 + if (*n == '\0' || *n == '/')
57853 + return 1;
57854 +
57855 + not = (*p == '!' || *p == '^');
57856 + if (not)
57857 + ++p;
57858 +
57859 + c = *p++;
57860 + for (;;) {
57861 + unsigned char fn = (unsigned char)*n;
57862 +
57863 + if (c == '\0')
57864 + return 1;
57865 + else {
57866 + if (c == fn)
57867 + goto matched;
57868 + cold = c;
57869 + c = *p++;
57870 +
57871 + if (c == '-' && *p != ']') {
57872 + unsigned char cend = *p++;
57873 +
57874 + if (cend == '\0')
57875 + return 1;
57876 +
57877 + if (cold <= fn && fn <= cend)
57878 + goto matched;
57879 +
57880 + c = *p++;
57881 + }
57882 + }
57883 +
57884 + if (c == ']')
57885 + break;
57886 + }
57887 + if (!not)
57888 + return 1;
57889 + break;
57890 + matched:
57891 + while (c != ']') {
57892 + if (c == '\0')
57893 + return 1;
57894 +
57895 + c = *p++;
57896 + }
57897 + if (not)
57898 + return 1;
57899 + }
57900 + break;
57901 + default:
57902 + if (c != *n)
57903 + return 1;
57904 + }
57905 +
57906 + ++n;
57907 + }
57908 +
57909 + if (*n == '\0')
57910 + return 0;
57911 +
57912 + if (*n == '/')
57913 + return 0;
57914 +
57915 + return 1;
57916 +}
57917 +
57918 +static struct acl_object_label *
57919 +chk_glob_label(struct acl_object_label *globbed,
57920 + struct dentry *dentry, struct vfsmount *mnt, char **path)
57921 +{
57922 + struct acl_object_label *tmp;
57923 +
57924 + if (*path == NULL)
57925 + *path = gr_to_filename_nolock(dentry, mnt);
57926 +
57927 + tmp = globbed;
57928 +
57929 + while (tmp) {
57930 + if (!glob_match(tmp->filename, *path))
57931 + return tmp;
57932 + tmp = tmp->next;
57933 + }
57934 +
57935 + return NULL;
57936 +}
57937 +
57938 +static struct acl_object_label *
57939 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57940 + const ino_t curr_ino, const dev_t curr_dev,
57941 + const struct acl_subject_label *subj, char **path, const int checkglob)
57942 +{
57943 + struct acl_subject_label *tmpsubj;
57944 + struct acl_object_label *retval;
57945 + struct acl_object_label *retval2;
57946 +
57947 + tmpsubj = (struct acl_subject_label *) subj;
57948 + read_lock(&gr_inode_lock);
57949 + do {
57950 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
57951 + if (retval) {
57952 + if (checkglob && retval->globbed) {
57953 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
57954 + (struct vfsmount *)orig_mnt, path);
57955 + if (retval2)
57956 + retval = retval2;
57957 + }
57958 + break;
57959 + }
57960 + } while ((tmpsubj = tmpsubj->parent_subject));
57961 + read_unlock(&gr_inode_lock);
57962 +
57963 + return retval;
57964 +}
57965 +
57966 +static __inline__ struct acl_object_label *
57967 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57968 + const struct dentry *curr_dentry,
57969 + const struct acl_subject_label *subj, char **path, const int checkglob)
57970 +{
57971 + int newglob = checkglob;
57972 +
57973 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
57974 + as we don't want a / * rule to match instead of the / object
57975 + don't do this for create lookups that call this function though, since they're looking up
57976 + on the parent and thus need globbing checks on all paths
57977 + */
57978 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
57979 + newglob = GR_NO_GLOB;
57980 +
57981 + return __full_lookup(orig_dentry, orig_mnt,
57982 + curr_dentry->d_inode->i_ino,
57983 + __get_dev(curr_dentry), subj, path, newglob);
57984 +}
57985 +
57986 +static struct acl_object_label *
57987 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57988 + const struct acl_subject_label *subj, char *path, const int checkglob)
57989 +{
57990 + struct dentry *dentry = (struct dentry *) l_dentry;
57991 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57992 + struct acl_object_label *retval;
57993 +
57994 + spin_lock(&dcache_lock);
57995 + spin_lock(&vfsmount_lock);
57996 +
57997 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
57998 +#ifdef CONFIG_NET
57999 + mnt == sock_mnt ||
58000 +#endif
58001 +#ifdef CONFIG_HUGETLBFS
58002 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
58003 +#endif
58004 + /* ignore Eric Biederman */
58005 + IS_PRIVATE(l_dentry->d_inode))) {
58006 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58007 + goto out;
58008 + }
58009 +
58010 + for (;;) {
58011 + if (dentry == real_root && mnt == real_root_mnt)
58012 + break;
58013 +
58014 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58015 + if (mnt->mnt_parent == mnt)
58016 + break;
58017 +
58018 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58019 + if (retval != NULL)
58020 + goto out;
58021 +
58022 + dentry = mnt->mnt_mountpoint;
58023 + mnt = mnt->mnt_parent;
58024 + continue;
58025 + }
58026 +
58027 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58028 + if (retval != NULL)
58029 + goto out;
58030 +
58031 + dentry = dentry->d_parent;
58032 + }
58033 +
58034 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58035 +
58036 + if (retval == NULL)
58037 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
58038 +out:
58039 + spin_unlock(&vfsmount_lock);
58040 + spin_unlock(&dcache_lock);
58041 +
58042 + BUG_ON(retval == NULL);
58043 +
58044 + return retval;
58045 +}
58046 +
58047 +static __inline__ struct acl_object_label *
58048 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58049 + const struct acl_subject_label *subj)
58050 +{
58051 + char *path = NULL;
58052 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58053 +}
58054 +
58055 +static __inline__ struct acl_object_label *
58056 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58057 + const struct acl_subject_label *subj)
58058 +{
58059 + char *path = NULL;
58060 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58061 +}
58062 +
58063 +static __inline__ struct acl_object_label *
58064 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58065 + const struct acl_subject_label *subj, char *path)
58066 +{
58067 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58068 +}
58069 +
58070 +static struct acl_subject_label *
58071 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58072 + const struct acl_role_label *role)
58073 +{
58074 + struct dentry *dentry = (struct dentry *) l_dentry;
58075 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58076 + struct acl_subject_label *retval;
58077 +
58078 + spin_lock(&dcache_lock);
58079 + spin_lock(&vfsmount_lock);
58080 +
58081 + for (;;) {
58082 + if (dentry == real_root && mnt == real_root_mnt)
58083 + break;
58084 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58085 + if (mnt->mnt_parent == mnt)
58086 + break;
58087 +
58088 + read_lock(&gr_inode_lock);
58089 + retval =
58090 + lookup_acl_subj_label(dentry->d_inode->i_ino,
58091 + __get_dev(dentry), role);
58092 + read_unlock(&gr_inode_lock);
58093 + if (retval != NULL)
58094 + goto out;
58095 +
58096 + dentry = mnt->mnt_mountpoint;
58097 + mnt = mnt->mnt_parent;
58098 + continue;
58099 + }
58100 +
58101 + read_lock(&gr_inode_lock);
58102 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58103 + __get_dev(dentry), role);
58104 + read_unlock(&gr_inode_lock);
58105 + if (retval != NULL)
58106 + goto out;
58107 +
58108 + dentry = dentry->d_parent;
58109 + }
58110 +
58111 + read_lock(&gr_inode_lock);
58112 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58113 + __get_dev(dentry), role);
58114 + read_unlock(&gr_inode_lock);
58115 +
58116 + if (unlikely(retval == NULL)) {
58117 + read_lock(&gr_inode_lock);
58118 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
58119 + __get_dev(real_root), role);
58120 + read_unlock(&gr_inode_lock);
58121 + }
58122 +out:
58123 + spin_unlock(&vfsmount_lock);
58124 + spin_unlock(&dcache_lock);
58125 +
58126 + BUG_ON(retval == NULL);
58127 +
58128 + return retval;
58129 +}
58130 +
58131 +static void
58132 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
58133 +{
58134 + struct task_struct *task = current;
58135 + const struct cred *cred = current_cred();
58136 +
58137 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58138 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58139 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58140 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58141 +
58142 + return;
58143 +}
58144 +
58145 +static void
58146 +gr_log_learn_sysctl(const char *path, const __u32 mode)
58147 +{
58148 + struct task_struct *task = current;
58149 + const struct cred *cred = current_cred();
58150 +
58151 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58152 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58153 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58154 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
58155 +
58156 + return;
58157 +}
58158 +
58159 +static void
58160 +gr_log_learn_id_change(const char type, const unsigned int real,
58161 + const unsigned int effective, const unsigned int fs)
58162 +{
58163 + struct task_struct *task = current;
58164 + const struct cred *cred = current_cred();
58165 +
58166 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58167 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58168 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58169 + type, real, effective, fs, &task->signal->saved_ip);
58170 +
58171 + return;
58172 +}
58173 +
58174 +__u32
58175 +gr_search_file(const struct dentry * dentry, const __u32 mode,
58176 + const struct vfsmount * mnt)
58177 +{
58178 + __u32 retval = mode;
58179 + struct acl_subject_label *curracl;
58180 + struct acl_object_label *currobj;
58181 +
58182 + if (unlikely(!(gr_status & GR_READY)))
58183 + return (mode & ~GR_AUDITS);
58184 +
58185 + curracl = current->acl;
58186 +
58187 + currobj = chk_obj_label(dentry, mnt, curracl);
58188 + retval = currobj->mode & mode;
58189 +
58190 + /* if we're opening a specified transfer file for writing
58191 + (e.g. /dev/initctl), then transfer our role to init
58192 + */
58193 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58194 + current->role->roletype & GR_ROLE_PERSIST)) {
58195 + struct task_struct *task = init_pid_ns.child_reaper;
58196 +
58197 + if (task->role != current->role) {
58198 + task->acl_sp_role = 0;
58199 + task->acl_role_id = current->acl_role_id;
58200 + task->role = current->role;
58201 + rcu_read_lock();
58202 + read_lock(&grsec_exec_file_lock);
58203 + gr_apply_subject_to_task(task);
58204 + read_unlock(&grsec_exec_file_lock);
58205 + rcu_read_unlock();
58206 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58207 + }
58208 + }
58209 +
58210 + if (unlikely
58211 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58212 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58213 + __u32 new_mode = mode;
58214 +
58215 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58216 +
58217 + retval = new_mode;
58218 +
58219 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58220 + new_mode |= GR_INHERIT;
58221 +
58222 + if (!(mode & GR_NOLEARN))
58223 + gr_log_learn(dentry, mnt, new_mode);
58224 + }
58225 +
58226 + return retval;
58227 +}
58228 +
58229 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58230 + const struct dentry *parent,
58231 + const struct vfsmount *mnt)
58232 +{
58233 + struct name_entry *match;
58234 + struct acl_object_label *matchpo;
58235 + struct acl_subject_label *curracl;
58236 + char *path;
58237 +
58238 + if (unlikely(!(gr_status & GR_READY)))
58239 + return NULL;
58240 +
58241 + preempt_disable();
58242 + path = gr_to_filename_rbac(new_dentry, mnt);
58243 + match = lookup_name_entry_create(path);
58244 +
58245 + curracl = current->acl;
58246 +
58247 + if (match) {
58248 + read_lock(&gr_inode_lock);
58249 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58250 + read_unlock(&gr_inode_lock);
58251 +
58252 + if (matchpo) {
58253 + preempt_enable();
58254 + return matchpo;
58255 + }
58256 + }
58257 +
58258 + // lookup parent
58259 +
58260 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58261 +
58262 + preempt_enable();
58263 + return matchpo;
58264 +}
58265 +
58266 +__u32
58267 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58268 + const struct vfsmount * mnt, const __u32 mode)
58269 +{
58270 + struct acl_object_label *matchpo;
58271 + __u32 retval;
58272 +
58273 + if (unlikely(!(gr_status & GR_READY)))
58274 + return (mode & ~GR_AUDITS);
58275 +
58276 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
58277 +
58278 + retval = matchpo->mode & mode;
58279 +
58280 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58281 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58282 + __u32 new_mode = mode;
58283 +
58284 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58285 +
58286 + gr_log_learn(new_dentry, mnt, new_mode);
58287 + return new_mode;
58288 + }
58289 +
58290 + return retval;
58291 +}
58292 +
58293 +__u32
58294 +gr_check_link(const struct dentry * new_dentry,
58295 + const struct dentry * parent_dentry,
58296 + const struct vfsmount * parent_mnt,
58297 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58298 +{
58299 + struct acl_object_label *obj;
58300 + __u32 oldmode, newmode;
58301 + __u32 needmode;
58302 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58303 + GR_DELETE | GR_INHERIT;
58304 +
58305 + if (unlikely(!(gr_status & GR_READY)))
58306 + return (GR_CREATE | GR_LINK);
58307 +
58308 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58309 + oldmode = obj->mode;
58310 +
58311 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58312 + newmode = obj->mode;
58313 +
58314 + needmode = newmode & checkmodes;
58315 +
58316 + // old name for hardlink must have at least the permissions of the new name
58317 + if ((oldmode & needmode) != needmode)
58318 + goto bad;
58319 +
58320 + // if old name had restrictions/auditing, make sure the new name does as well
58321 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58322 +
58323 + // don't allow hardlinking of suid/sgid files without permission
58324 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58325 + needmode |= GR_SETID;
58326 +
58327 + if ((newmode & needmode) != needmode)
58328 + goto bad;
58329 +
58330 + // enforce minimum permissions
58331 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58332 + return newmode;
58333 +bad:
58334 + needmode = oldmode;
58335 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58336 + needmode |= GR_SETID;
58337 +
58338 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58339 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58340 + return (GR_CREATE | GR_LINK);
58341 + } else if (newmode & GR_SUPPRESS)
58342 + return GR_SUPPRESS;
58343 + else
58344 + return 0;
58345 +}
58346 +
58347 +int
58348 +gr_check_hidden_task(const struct task_struct *task)
58349 +{
58350 + if (unlikely(!(gr_status & GR_READY)))
58351 + return 0;
58352 +
58353 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58354 + return 1;
58355 +
58356 + return 0;
58357 +}
58358 +
58359 +int
58360 +gr_check_protected_task(const struct task_struct *task)
58361 +{
58362 + if (unlikely(!(gr_status & GR_READY) || !task))
58363 + return 0;
58364 +
58365 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58366 + task->acl != current->acl)
58367 + return 1;
58368 +
58369 + return 0;
58370 +}
58371 +
58372 +int
58373 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58374 +{
58375 + struct task_struct *p;
58376 + int ret = 0;
58377 +
58378 + if (unlikely(!(gr_status & GR_READY) || !pid))
58379 + return ret;
58380 +
58381 + read_lock(&tasklist_lock);
58382 + do_each_pid_task(pid, type, p) {
58383 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58384 + p->acl != current->acl) {
58385 + ret = 1;
58386 + goto out;
58387 + }
58388 + } while_each_pid_task(pid, type, p);
58389 +out:
58390 + read_unlock(&tasklist_lock);
58391 +
58392 + return ret;
58393 +}
58394 +
58395 +void
58396 +gr_copy_label(struct task_struct *tsk)
58397 +{
58398 + tsk->signal->used_accept = 0;
58399 + tsk->acl_sp_role = 0;
58400 + tsk->acl_role_id = current->acl_role_id;
58401 + tsk->acl = current->acl;
58402 + tsk->role = current->role;
58403 + tsk->signal->curr_ip = current->signal->curr_ip;
58404 + tsk->signal->saved_ip = current->signal->saved_ip;
58405 + if (current->exec_file)
58406 + get_file(current->exec_file);
58407 + tsk->exec_file = current->exec_file;
58408 + tsk->is_writable = current->is_writable;
58409 + if (unlikely(current->signal->used_accept)) {
58410 + current->signal->curr_ip = 0;
58411 + current->signal->saved_ip = 0;
58412 + }
58413 +
58414 + return;
58415 +}
58416 +
58417 +static void
58418 +gr_set_proc_res(struct task_struct *task)
58419 +{
58420 + struct acl_subject_label *proc;
58421 + unsigned short i;
58422 +
58423 + proc = task->acl;
58424 +
58425 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58426 + return;
58427 +
58428 + for (i = 0; i < RLIM_NLIMITS; i++) {
58429 + if (!(proc->resmask & (1 << i)))
58430 + continue;
58431 +
58432 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58433 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58434 + }
58435 +
58436 + return;
58437 +}
58438 +
58439 +extern int __gr_process_user_ban(struct user_struct *user);
58440 +
58441 +int
58442 +gr_check_user_change(int real, int effective, int fs)
58443 +{
58444 + unsigned int i;
58445 + __u16 num;
58446 + uid_t *uidlist;
58447 + int curuid;
58448 + int realok = 0;
58449 + int effectiveok = 0;
58450 + int fsok = 0;
58451 +
58452 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58453 + struct user_struct *user;
58454 +
58455 + if (real == -1)
58456 + goto skipit;
58457 +
58458 + user = find_user(real);
58459 + if (user == NULL)
58460 + goto skipit;
58461 +
58462 + if (__gr_process_user_ban(user)) {
58463 + /* for find_user */
58464 + free_uid(user);
58465 + return 1;
58466 + }
58467 +
58468 + /* for find_user */
58469 + free_uid(user);
58470 +
58471 +skipit:
58472 +#endif
58473 +
58474 + if (unlikely(!(gr_status & GR_READY)))
58475 + return 0;
58476 +
58477 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58478 + gr_log_learn_id_change('u', real, effective, fs);
58479 +
58480 + num = current->acl->user_trans_num;
58481 + uidlist = current->acl->user_transitions;
58482 +
58483 + if (uidlist == NULL)
58484 + return 0;
58485 +
58486 + if (real == -1)
58487 + realok = 1;
58488 + if (effective == -1)
58489 + effectiveok = 1;
58490 + if (fs == -1)
58491 + fsok = 1;
58492 +
58493 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
58494 + for (i = 0; i < num; i++) {
58495 + curuid = (int)uidlist[i];
58496 + if (real == curuid)
58497 + realok = 1;
58498 + if (effective == curuid)
58499 + effectiveok = 1;
58500 + if (fs == curuid)
58501 + fsok = 1;
58502 + }
58503 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
58504 + for (i = 0; i < num; i++) {
58505 + curuid = (int)uidlist[i];
58506 + if (real == curuid)
58507 + break;
58508 + if (effective == curuid)
58509 + break;
58510 + if (fs == curuid)
58511 + break;
58512 + }
58513 + /* not in deny list */
58514 + if (i == num) {
58515 + realok = 1;
58516 + effectiveok = 1;
58517 + fsok = 1;
58518 + }
58519 + }
58520 +
58521 + if (realok && effectiveok && fsok)
58522 + return 0;
58523 + else {
58524 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58525 + return 1;
58526 + }
58527 +}
58528 +
58529 +int
58530 +gr_check_group_change(int real, int effective, int fs)
58531 +{
58532 + unsigned int i;
58533 + __u16 num;
58534 + gid_t *gidlist;
58535 + int curgid;
58536 + int realok = 0;
58537 + int effectiveok = 0;
58538 + int fsok = 0;
58539 +
58540 + if (unlikely(!(gr_status & GR_READY)))
58541 + return 0;
58542 +
58543 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58544 + gr_log_learn_id_change('g', real, effective, fs);
58545 +
58546 + num = current->acl->group_trans_num;
58547 + gidlist = current->acl->group_transitions;
58548 +
58549 + if (gidlist == NULL)
58550 + return 0;
58551 +
58552 + if (real == -1)
58553 + realok = 1;
58554 + if (effective == -1)
58555 + effectiveok = 1;
58556 + if (fs == -1)
58557 + fsok = 1;
58558 +
58559 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
58560 + for (i = 0; i < num; i++) {
58561 + curgid = (int)gidlist[i];
58562 + if (real == curgid)
58563 + realok = 1;
58564 + if (effective == curgid)
58565 + effectiveok = 1;
58566 + if (fs == curgid)
58567 + fsok = 1;
58568 + }
58569 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
58570 + for (i = 0; i < num; i++) {
58571 + curgid = (int)gidlist[i];
58572 + if (real == curgid)
58573 + break;
58574 + if (effective == curgid)
58575 + break;
58576 + if (fs == curgid)
58577 + break;
58578 + }
58579 + /* not in deny list */
58580 + if (i == num) {
58581 + realok = 1;
58582 + effectiveok = 1;
58583 + fsok = 1;
58584 + }
58585 + }
58586 +
58587 + if (realok && effectiveok && fsok)
58588 + return 0;
58589 + else {
58590 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58591 + return 1;
58592 + }
58593 +}
58594 +
58595 +void
58596 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
58597 +{
58598 + struct acl_role_label *role = task->role;
58599 + struct acl_subject_label *subj = NULL;
58600 + struct acl_object_label *obj;
58601 + struct file *filp;
58602 +
58603 + if (unlikely(!(gr_status & GR_READY)))
58604 + return;
58605 +
58606 + filp = task->exec_file;
58607 +
58608 + /* kernel process, we'll give them the kernel role */
58609 + if (unlikely(!filp)) {
58610 + task->role = kernel_role;
58611 + task->acl = kernel_role->root_label;
58612 + return;
58613 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
58614 + role = lookup_acl_role_label(task, uid, gid);
58615 +
58616 + /* perform subject lookup in possibly new role
58617 + we can use this result below in the case where role == task->role
58618 + */
58619 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
58620 +
58621 + /* if we changed uid/gid, but result in the same role
58622 + and are using inheritance, don't lose the inherited subject
58623 + if current subject is other than what normal lookup
58624 + would result in, we arrived via inheritance, don't
58625 + lose subject
58626 + */
58627 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
58628 + (subj == task->acl)))
58629 + task->acl = subj;
58630 +
58631 + task->role = role;
58632 +
58633 + task->is_writable = 0;
58634 +
58635 + /* ignore additional mmap checks for processes that are writable
58636 + by the default ACL */
58637 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58638 + if (unlikely(obj->mode & GR_WRITE))
58639 + task->is_writable = 1;
58640 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58641 + if (unlikely(obj->mode & GR_WRITE))
58642 + task->is_writable = 1;
58643 +
58644 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58645 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58646 +#endif
58647 +
58648 + gr_set_proc_res(task);
58649 +
58650 + return;
58651 +}
58652 +
58653 +int
58654 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
58655 + const int unsafe_flags)
58656 +{
58657 + struct task_struct *task = current;
58658 + struct acl_subject_label *newacl;
58659 + struct acl_object_label *obj;
58660 + __u32 retmode;
58661 +
58662 + if (unlikely(!(gr_status & GR_READY)))
58663 + return 0;
58664 +
58665 + newacl = chk_subj_label(dentry, mnt, task->role);
58666 +
58667 + task_lock(task);
58668 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58669 + !(task->role->roletype & GR_ROLE_GOD) &&
58670 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
58671 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58672 + task_unlock(task);
58673 + if (unsafe_flags & LSM_UNSAFE_SHARE)
58674 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
58675 + else
58676 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58677 + return -EACCES;
58678 + }
58679 + task_unlock(task);
58680 +
58681 + obj = chk_obj_label(dentry, mnt, task->acl);
58682 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
58683 +
58684 + if (!(task->acl->mode & GR_INHERITLEARN) &&
58685 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
58686 + if (obj->nested)
58687 + task->acl = obj->nested;
58688 + else
58689 + task->acl = newacl;
58690 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
58691 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
58692 +
58693 + task->is_writable = 0;
58694 +
58695 + /* ignore additional mmap checks for processes that are writable
58696 + by the default ACL */
58697 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
58698 + if (unlikely(obj->mode & GR_WRITE))
58699 + task->is_writable = 1;
58700 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
58701 + if (unlikely(obj->mode & GR_WRITE))
58702 + task->is_writable = 1;
58703 +
58704 + gr_set_proc_res(task);
58705 +
58706 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58707 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58708 +#endif
58709 + return 0;
58710 +}
58711 +
58712 +/* always called with valid inodev ptr */
58713 +static void
58714 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
58715 +{
58716 + struct acl_object_label *matchpo;
58717 + struct acl_subject_label *matchps;
58718 + struct acl_subject_label *subj;
58719 + struct acl_role_label *role;
58720 + unsigned int x;
58721 +
58722 + FOR_EACH_ROLE_START(role)
58723 + FOR_EACH_SUBJECT_START(role, subj, x)
58724 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
58725 + matchpo->mode |= GR_DELETED;
58726 + FOR_EACH_SUBJECT_END(subj,x)
58727 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
58728 + if (subj->inode == ino && subj->device == dev)
58729 + subj->mode |= GR_DELETED;
58730 + FOR_EACH_NESTED_SUBJECT_END(subj)
58731 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
58732 + matchps->mode |= GR_DELETED;
58733 + FOR_EACH_ROLE_END(role)
58734 +
58735 + inodev->nentry->deleted = 1;
58736 +
58737 + return;
58738 +}
58739 +
58740 +void
58741 +gr_handle_delete(const ino_t ino, const dev_t dev)
58742 +{
58743 + struct inodev_entry *inodev;
58744 +
58745 + if (unlikely(!(gr_status & GR_READY)))
58746 + return;
58747 +
58748 + write_lock(&gr_inode_lock);
58749 + inodev = lookup_inodev_entry(ino, dev);
58750 + if (inodev != NULL)
58751 + do_handle_delete(inodev, ino, dev);
58752 + write_unlock(&gr_inode_lock);
58753 +
58754 + return;
58755 +}
58756 +
58757 +static void
58758 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
58759 + const ino_t newinode, const dev_t newdevice,
58760 + struct acl_subject_label *subj)
58761 +{
58762 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
58763 + struct acl_object_label *match;
58764 +
58765 + match = subj->obj_hash[index];
58766 +
58767 + while (match && (match->inode != oldinode ||
58768 + match->device != olddevice ||
58769 + !(match->mode & GR_DELETED)))
58770 + match = match->next;
58771 +
58772 + if (match && (match->inode == oldinode)
58773 + && (match->device == olddevice)
58774 + && (match->mode & GR_DELETED)) {
58775 + if (match->prev == NULL) {
58776 + subj->obj_hash[index] = match->next;
58777 + if (match->next != NULL)
58778 + match->next->prev = NULL;
58779 + } else {
58780 + match->prev->next = match->next;
58781 + if (match->next != NULL)
58782 + match->next->prev = match->prev;
58783 + }
58784 + match->prev = NULL;
58785 + match->next = NULL;
58786 + match->inode = newinode;
58787 + match->device = newdevice;
58788 + match->mode &= ~GR_DELETED;
58789 +
58790 + insert_acl_obj_label(match, subj);
58791 + }
58792 +
58793 + return;
58794 +}
58795 +
58796 +static void
58797 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
58798 + const ino_t newinode, const dev_t newdevice,
58799 + struct acl_role_label *role)
58800 +{
58801 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
58802 + struct acl_subject_label *match;
58803 +
58804 + match = role->subj_hash[index];
58805 +
58806 + while (match && (match->inode != oldinode ||
58807 + match->device != olddevice ||
58808 + !(match->mode & GR_DELETED)))
58809 + match = match->next;
58810 +
58811 + if (match && (match->inode == oldinode)
58812 + && (match->device == olddevice)
58813 + && (match->mode & GR_DELETED)) {
58814 + if (match->prev == NULL) {
58815 + role->subj_hash[index] = match->next;
58816 + if (match->next != NULL)
58817 + match->next->prev = NULL;
58818 + } else {
58819 + match->prev->next = match->next;
58820 + if (match->next != NULL)
58821 + match->next->prev = match->prev;
58822 + }
58823 + match->prev = NULL;
58824 + match->next = NULL;
58825 + match->inode = newinode;
58826 + match->device = newdevice;
58827 + match->mode &= ~GR_DELETED;
58828 +
58829 + insert_acl_subj_label(match, role);
58830 + }
58831 +
58832 + return;
58833 +}
58834 +
58835 +static void
58836 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
58837 + const ino_t newinode, const dev_t newdevice)
58838 +{
58839 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
58840 + struct inodev_entry *match;
58841 +
58842 + match = inodev_set.i_hash[index];
58843 +
58844 + while (match && (match->nentry->inode != oldinode ||
58845 + match->nentry->device != olddevice || !match->nentry->deleted))
58846 + match = match->next;
58847 +
58848 + if (match && (match->nentry->inode == oldinode)
58849 + && (match->nentry->device == olddevice) &&
58850 + match->nentry->deleted) {
58851 + if (match->prev == NULL) {
58852 + inodev_set.i_hash[index] = match->next;
58853 + if (match->next != NULL)
58854 + match->next->prev = NULL;
58855 + } else {
58856 + match->prev->next = match->next;
58857 + if (match->next != NULL)
58858 + match->next->prev = match->prev;
58859 + }
58860 + match->prev = NULL;
58861 + match->next = NULL;
58862 + match->nentry->inode = newinode;
58863 + match->nentry->device = newdevice;
58864 + match->nentry->deleted = 0;
58865 +
58866 + insert_inodev_entry(match);
58867 + }
58868 +
58869 + return;
58870 +}
58871 +
58872 +static void
58873 +__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
58874 +{
58875 + struct acl_subject_label *subj;
58876 + struct acl_role_label *role;
58877 + unsigned int x;
58878 +
58879 + FOR_EACH_ROLE_START(role)
58880 + update_acl_subj_label(matchn->inode, matchn->device,
58881 + inode, dev, role);
58882 +
58883 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
58884 + if ((subj->inode == inode) && (subj->device == dev)) {
58885 + subj->inode = inode;
58886 + subj->device = dev;
58887 + }
58888 + FOR_EACH_NESTED_SUBJECT_END(subj)
58889 + FOR_EACH_SUBJECT_START(role, subj, x)
58890 + update_acl_obj_label(matchn->inode, matchn->device,
58891 + inode, dev, subj);
58892 + FOR_EACH_SUBJECT_END(subj,x)
58893 + FOR_EACH_ROLE_END(role)
58894 +
58895 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
58896 +
58897 + return;
58898 +}
58899 +
58900 +static void
58901 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
58902 + const struct vfsmount *mnt)
58903 +{
58904 + ino_t ino = dentry->d_inode->i_ino;
58905 + dev_t dev = __get_dev(dentry);
58906 +
58907 + __do_handle_create(matchn, ino, dev);
58908 +
58909 + return;
58910 +}
58911 +
58912 +void
58913 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
58914 +{
58915 + struct name_entry *matchn;
58916 +
58917 + if (unlikely(!(gr_status & GR_READY)))
58918 + return;
58919 +
58920 + preempt_disable();
58921 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
58922 +
58923 + if (unlikely((unsigned long)matchn)) {
58924 + write_lock(&gr_inode_lock);
58925 + do_handle_create(matchn, dentry, mnt);
58926 + write_unlock(&gr_inode_lock);
58927 + }
58928 + preempt_enable();
58929 +
58930 + return;
58931 +}
58932 +
58933 +void
58934 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
58935 +{
58936 + struct name_entry *matchn;
58937 +
58938 + if (unlikely(!(gr_status & GR_READY)))
58939 + return;
58940 +
58941 + preempt_disable();
58942 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
58943 +
58944 + if (unlikely((unsigned long)matchn)) {
58945 + write_lock(&gr_inode_lock);
58946 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
58947 + write_unlock(&gr_inode_lock);
58948 + }
58949 + preempt_enable();
58950 +
58951 + return;
58952 +}
58953 +
58954 +void
58955 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58956 + struct dentry *old_dentry,
58957 + struct dentry *new_dentry,
58958 + struct vfsmount *mnt, const __u8 replace)
58959 +{
58960 + struct name_entry *matchn;
58961 + struct inodev_entry *inodev;
58962 + struct inode *inode = new_dentry->d_inode;
58963 + ino_t oldinode = old_dentry->d_inode->i_ino;
58964 + dev_t olddev = __get_dev(old_dentry);
58965 +
58966 + /* vfs_rename swaps the name and parent link for old_dentry and
58967 + new_dentry
58968 + at this point, old_dentry has the new name, parent link, and inode
58969 + for the renamed file
58970 + if a file is being replaced by a rename, new_dentry has the inode
58971 + and name for the replaced file
58972 + */
58973 +
58974 + if (unlikely(!(gr_status & GR_READY)))
58975 + return;
58976 +
58977 + preempt_disable();
58978 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
58979 +
58980 + /* we wouldn't have to check d_inode if it weren't for
58981 + NFS silly-renaming
58982 + */
58983 +
58984 + write_lock(&gr_inode_lock);
58985 + if (unlikely(replace && inode)) {
58986 + ino_t newinode = inode->i_ino;
58987 + dev_t newdev = __get_dev(new_dentry);
58988 + inodev = lookup_inodev_entry(newinode, newdev);
58989 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
58990 + do_handle_delete(inodev, newinode, newdev);
58991 + }
58992 +
58993 + inodev = lookup_inodev_entry(oldinode, olddev);
58994 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
58995 + do_handle_delete(inodev, oldinode, olddev);
58996 +
58997 + if (unlikely((unsigned long)matchn))
58998 + do_handle_create(matchn, old_dentry, mnt);
58999 +
59000 + write_unlock(&gr_inode_lock);
59001 + preempt_enable();
59002 +
59003 + return;
59004 +}
59005 +
59006 +static int
59007 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
59008 + unsigned char **sum)
59009 +{
59010 + struct acl_role_label *r;
59011 + struct role_allowed_ip *ipp;
59012 + struct role_transition *trans;
59013 + unsigned int i;
59014 + int found = 0;
59015 + u32 curr_ip = current->signal->curr_ip;
59016 +
59017 + current->signal->saved_ip = curr_ip;
59018 +
59019 + /* check transition table */
59020 +
59021 + for (trans = current->role->transitions; trans; trans = trans->next) {
59022 + if (!strcmp(rolename, trans->rolename)) {
59023 + found = 1;
59024 + break;
59025 + }
59026 + }
59027 +
59028 + if (!found)
59029 + return 0;
59030 +
59031 + /* handle special roles that do not require authentication
59032 + and check ip */
59033 +
59034 + FOR_EACH_ROLE_START(r)
59035 + if (!strcmp(rolename, r->rolename) &&
59036 + (r->roletype & GR_ROLE_SPECIAL)) {
59037 + found = 0;
59038 + if (r->allowed_ips != NULL) {
59039 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
59040 + if ((ntohl(curr_ip) & ipp->netmask) ==
59041 + (ntohl(ipp->addr) & ipp->netmask))
59042 + found = 1;
59043 + }
59044 + } else
59045 + found = 2;
59046 + if (!found)
59047 + return 0;
59048 +
59049 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
59050 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
59051 + *salt = NULL;
59052 + *sum = NULL;
59053 + return 1;
59054 + }
59055 + }
59056 + FOR_EACH_ROLE_END(r)
59057 +
59058 + for (i = 0; i < num_sprole_pws; i++) {
59059 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
59060 + *salt = acl_special_roles[i]->salt;
59061 + *sum = acl_special_roles[i]->sum;
59062 + return 1;
59063 + }
59064 + }
59065 +
59066 + return 0;
59067 +}
59068 +
59069 +static void
59070 +assign_special_role(char *rolename)
59071 +{
59072 + struct acl_object_label *obj;
59073 + struct acl_role_label *r;
59074 + struct acl_role_label *assigned = NULL;
59075 + struct task_struct *tsk;
59076 + struct file *filp;
59077 +
59078 + FOR_EACH_ROLE_START(r)
59079 + if (!strcmp(rolename, r->rolename) &&
59080 + (r->roletype & GR_ROLE_SPECIAL)) {
59081 + assigned = r;
59082 + break;
59083 + }
59084 + FOR_EACH_ROLE_END(r)
59085 +
59086 + if (!assigned)
59087 + return;
59088 +
59089 + read_lock(&tasklist_lock);
59090 + read_lock(&grsec_exec_file_lock);
59091 +
59092 + tsk = current->real_parent;
59093 + if (tsk == NULL)
59094 + goto out_unlock;
59095 +
59096 + filp = tsk->exec_file;
59097 + if (filp == NULL)
59098 + goto out_unlock;
59099 +
59100 + tsk->is_writable = 0;
59101 +
59102 + tsk->acl_sp_role = 1;
59103 + tsk->acl_role_id = ++acl_sp_role_value;
59104 + tsk->role = assigned;
59105 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
59106 +
59107 + /* ignore additional mmap checks for processes that are writable
59108 + by the default ACL */
59109 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59110 + if (unlikely(obj->mode & GR_WRITE))
59111 + tsk->is_writable = 1;
59112 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
59113 + if (unlikely(obj->mode & GR_WRITE))
59114 + tsk->is_writable = 1;
59115 +
59116 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59117 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
59118 +#endif
59119 +
59120 +out_unlock:
59121 + read_unlock(&grsec_exec_file_lock);
59122 + read_unlock(&tasklist_lock);
59123 + return;
59124 +}
59125 +
59126 +int gr_check_secure_terminal(struct task_struct *task)
59127 +{
59128 + struct task_struct *p, *p2, *p3;
59129 + struct files_struct *files;
59130 + struct fdtable *fdt;
59131 + struct file *our_file = NULL, *file;
59132 + int i;
59133 +
59134 + if (task->signal->tty == NULL)
59135 + return 1;
59136 +
59137 + files = get_files_struct(task);
59138 + if (files != NULL) {
59139 + rcu_read_lock();
59140 + fdt = files_fdtable(files);
59141 + for (i=0; i < fdt->max_fds; i++) {
59142 + file = fcheck_files(files, i);
59143 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
59144 + get_file(file);
59145 + our_file = file;
59146 + }
59147 + }
59148 + rcu_read_unlock();
59149 + put_files_struct(files);
59150 + }
59151 +
59152 + if (our_file == NULL)
59153 + return 1;
59154 +
59155 + read_lock(&tasklist_lock);
59156 + do_each_thread(p2, p) {
59157 + files = get_files_struct(p);
59158 + if (files == NULL ||
59159 + (p->signal && p->signal->tty == task->signal->tty)) {
59160 + if (files != NULL)
59161 + put_files_struct(files);
59162 + continue;
59163 + }
59164 + rcu_read_lock();
59165 + fdt = files_fdtable(files);
59166 + for (i=0; i < fdt->max_fds; i++) {
59167 + file = fcheck_files(files, i);
59168 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59169 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59170 + p3 = task;
59171 + while (p3->pid > 0) {
59172 + if (p3 == p)
59173 + break;
59174 + p3 = p3->real_parent;
59175 + }
59176 + if (p3 == p)
59177 + break;
59178 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59179 + gr_handle_alertkill(p);
59180 + rcu_read_unlock();
59181 + put_files_struct(files);
59182 + read_unlock(&tasklist_lock);
59183 + fput(our_file);
59184 + return 0;
59185 + }
59186 + }
59187 + rcu_read_unlock();
59188 + put_files_struct(files);
59189 + } while_each_thread(p2, p);
59190 + read_unlock(&tasklist_lock);
59191 +
59192 + fput(our_file);
59193 + return 1;
59194 +}
59195 +
59196 +ssize_t
59197 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59198 +{
59199 + struct gr_arg_wrapper uwrap;
59200 + unsigned char *sprole_salt = NULL;
59201 + unsigned char *sprole_sum = NULL;
59202 + int error = sizeof (struct gr_arg_wrapper);
59203 + int error2 = 0;
59204 +
59205 + mutex_lock(&gr_dev_mutex);
59206 +
59207 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59208 + error = -EPERM;
59209 + goto out;
59210 + }
59211 +
59212 + if (count != sizeof (struct gr_arg_wrapper)) {
59213 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59214 + error = -EINVAL;
59215 + goto out;
59216 + }
59217 +
59218 +
59219 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59220 + gr_auth_expires = 0;
59221 + gr_auth_attempts = 0;
59222 + }
59223 +
59224 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59225 + error = -EFAULT;
59226 + goto out;
59227 + }
59228 +
59229 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59230 + error = -EINVAL;
59231 + goto out;
59232 + }
59233 +
59234 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59235 + error = -EFAULT;
59236 + goto out;
59237 + }
59238 +
59239 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59240 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59241 + time_after(gr_auth_expires, get_seconds())) {
59242 + error = -EBUSY;
59243 + goto out;
59244 + }
59245 +
59246 + /* if non-root trying to do anything other than use a special role,
59247 + do not attempt authentication, do not count towards authentication
59248 + locking
59249 + */
59250 +
59251 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59252 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59253 + current_uid()) {
59254 + error = -EPERM;
59255 + goto out;
59256 + }
59257 +
59258 + /* ensure pw and special role name are null terminated */
59259 +
59260 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59261 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59262 +
59263 + /* Okay.
59264 + * We have our enough of the argument structure..(we have yet
59265 + * to copy_from_user the tables themselves) . Copy the tables
59266 + * only if we need them, i.e. for loading operations. */
59267 +
59268 + switch (gr_usermode->mode) {
59269 + case GR_STATUS:
59270 + if (gr_status & GR_READY) {
59271 + error = 1;
59272 + if (!gr_check_secure_terminal(current))
59273 + error = 3;
59274 + } else
59275 + error = 2;
59276 + goto out;
59277 + case GR_SHUTDOWN:
59278 + if ((gr_status & GR_READY)
59279 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59280 + pax_open_kernel();
59281 + gr_status &= ~GR_READY;
59282 + pax_close_kernel();
59283 +
59284 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59285 + free_variables();
59286 + memset(gr_usermode, 0, sizeof (struct gr_arg));
59287 + memset(gr_system_salt, 0, GR_SALT_LEN);
59288 + memset(gr_system_sum, 0, GR_SHA_LEN);
59289 + } else if (gr_status & GR_READY) {
59290 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59291 + error = -EPERM;
59292 + } else {
59293 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59294 + error = -EAGAIN;
59295 + }
59296 + break;
59297 + case GR_ENABLE:
59298 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59299 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59300 + else {
59301 + if (gr_status & GR_READY)
59302 + error = -EAGAIN;
59303 + else
59304 + error = error2;
59305 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59306 + }
59307 + break;
59308 + case GR_RELOAD:
59309 + if (!(gr_status & GR_READY)) {
59310 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59311 + error = -EAGAIN;
59312 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59313 + lock_kernel();
59314 +
59315 + pax_open_kernel();
59316 + gr_status &= ~GR_READY;
59317 + pax_close_kernel();
59318 +
59319 + free_variables();
59320 + if (!(error2 = gracl_init(gr_usermode))) {
59321 + unlock_kernel();
59322 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59323 + } else {
59324 + unlock_kernel();
59325 + error = error2;
59326 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59327 + }
59328 + } else {
59329 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59330 + error = -EPERM;
59331 + }
59332 + break;
59333 + case GR_SEGVMOD:
59334 + if (unlikely(!(gr_status & GR_READY))) {
59335 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59336 + error = -EAGAIN;
59337 + break;
59338 + }
59339 +
59340 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59341 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59342 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59343 + struct acl_subject_label *segvacl;
59344 + segvacl =
59345 + lookup_acl_subj_label(gr_usermode->segv_inode,
59346 + gr_usermode->segv_device,
59347 + current->role);
59348 + if (segvacl) {
59349 + segvacl->crashes = 0;
59350 + segvacl->expires = 0;
59351 + }
59352 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59353 + gr_remove_uid(gr_usermode->segv_uid);
59354 + }
59355 + } else {
59356 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59357 + error = -EPERM;
59358 + }
59359 + break;
59360 + case GR_SPROLE:
59361 + case GR_SPROLEPAM:
59362 + if (unlikely(!(gr_status & GR_READY))) {
59363 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59364 + error = -EAGAIN;
59365 + break;
59366 + }
59367 +
59368 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59369 + current->role->expires = 0;
59370 + current->role->auth_attempts = 0;
59371 + }
59372 +
59373 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59374 + time_after(current->role->expires, get_seconds())) {
59375 + error = -EBUSY;
59376 + goto out;
59377 + }
59378 +
59379 + if (lookup_special_role_auth
59380 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59381 + && ((!sprole_salt && !sprole_sum)
59382 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59383 + char *p = "";
59384 + assign_special_role(gr_usermode->sp_role);
59385 + read_lock(&tasklist_lock);
59386 + if (current->real_parent)
59387 + p = current->real_parent->role->rolename;
59388 + read_unlock(&tasklist_lock);
59389 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59390 + p, acl_sp_role_value);
59391 + } else {
59392 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59393 + error = -EPERM;
59394 + if(!(current->role->auth_attempts++))
59395 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59396 +
59397 + goto out;
59398 + }
59399 + break;
59400 + case GR_UNSPROLE:
59401 + if (unlikely(!(gr_status & GR_READY))) {
59402 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59403 + error = -EAGAIN;
59404 + break;
59405 + }
59406 +
59407 + if (current->role->roletype & GR_ROLE_SPECIAL) {
59408 + char *p = "";
59409 + int i = 0;
59410 +
59411 + read_lock(&tasklist_lock);
59412 + if (current->real_parent) {
59413 + p = current->real_parent->role->rolename;
59414 + i = current->real_parent->acl_role_id;
59415 + }
59416 + read_unlock(&tasklist_lock);
59417 +
59418 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59419 + gr_set_acls(1);
59420 + } else {
59421 + error = -EPERM;
59422 + goto out;
59423 + }
59424 + break;
59425 + default:
59426 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59427 + error = -EINVAL;
59428 + break;
59429 + }
59430 +
59431 + if (error != -EPERM)
59432 + goto out;
59433 +
59434 + if(!(gr_auth_attempts++))
59435 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59436 +
59437 + out:
59438 + mutex_unlock(&gr_dev_mutex);
59439 + return error;
59440 +}
59441 +
59442 +/* must be called with
59443 + rcu_read_lock();
59444 + read_lock(&tasklist_lock);
59445 + read_lock(&grsec_exec_file_lock);
59446 +*/
59447 +int gr_apply_subject_to_task(struct task_struct *task)
59448 +{
59449 + struct acl_object_label *obj;
59450 + char *tmpname;
59451 + struct acl_subject_label *tmpsubj;
59452 + struct file *filp;
59453 + struct name_entry *nmatch;
59454 +
59455 + filp = task->exec_file;
59456 + if (filp == NULL)
59457 + return 0;
59458 +
59459 + /* the following is to apply the correct subject
59460 + on binaries running when the RBAC system
59461 + is enabled, when the binaries have been
59462 + replaced or deleted since their execution
59463 + -----
59464 + when the RBAC system starts, the inode/dev
59465 + from exec_file will be one the RBAC system
59466 + is unaware of. It only knows the inode/dev
59467 + of the present file on disk, or the absence
59468 + of it.
59469 + */
59470 + preempt_disable();
59471 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
59472 +
59473 + nmatch = lookup_name_entry(tmpname);
59474 + preempt_enable();
59475 + tmpsubj = NULL;
59476 + if (nmatch) {
59477 + if (nmatch->deleted)
59478 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
59479 + else
59480 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
59481 + if (tmpsubj != NULL)
59482 + task->acl = tmpsubj;
59483 + }
59484 + if (tmpsubj == NULL)
59485 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
59486 + task->role);
59487 + if (task->acl) {
59488 + task->is_writable = 0;
59489 + /* ignore additional mmap checks for processes that are writable
59490 + by the default ACL */
59491 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59492 + if (unlikely(obj->mode & GR_WRITE))
59493 + task->is_writable = 1;
59494 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59495 + if (unlikely(obj->mode & GR_WRITE))
59496 + task->is_writable = 1;
59497 +
59498 + gr_set_proc_res(task);
59499 +
59500 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59501 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59502 +#endif
59503 + } else {
59504 + return 1;
59505 + }
59506 +
59507 + return 0;
59508 +}
59509 +
59510 +int
59511 +gr_set_acls(const int type)
59512 +{
59513 + struct task_struct *task, *task2;
59514 + struct acl_role_label *role = current->role;
59515 + __u16 acl_role_id = current->acl_role_id;
59516 + const struct cred *cred;
59517 + int ret;
59518 +
59519 + rcu_read_lock();
59520 + read_lock(&tasklist_lock);
59521 + read_lock(&grsec_exec_file_lock);
59522 + do_each_thread(task2, task) {
59523 + /* check to see if we're called from the exit handler,
59524 + if so, only replace ACLs that have inherited the admin
59525 + ACL */
59526 +
59527 + if (type && (task->role != role ||
59528 + task->acl_role_id != acl_role_id))
59529 + continue;
59530 +
59531 + task->acl_role_id = 0;
59532 + task->acl_sp_role = 0;
59533 +
59534 + if (task->exec_file) {
59535 + cred = __task_cred(task);
59536 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
59537 +
59538 + ret = gr_apply_subject_to_task(task);
59539 + if (ret) {
59540 + read_unlock(&grsec_exec_file_lock);
59541 + read_unlock(&tasklist_lock);
59542 + rcu_read_unlock();
59543 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
59544 + return ret;
59545 + }
59546 + } else {
59547 + // it's a kernel process
59548 + task->role = kernel_role;
59549 + task->acl = kernel_role->root_label;
59550 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
59551 + task->acl->mode &= ~GR_PROCFIND;
59552 +#endif
59553 + }
59554 + } while_each_thread(task2, task);
59555 + read_unlock(&grsec_exec_file_lock);
59556 + read_unlock(&tasklist_lock);
59557 + rcu_read_unlock();
59558 +
59559 + return 0;
59560 +}
59561 +
59562 +void
59563 +gr_learn_resource(const struct task_struct *task,
59564 + const int res, const unsigned long wanted, const int gt)
59565 +{
59566 + struct acl_subject_label *acl;
59567 + const struct cred *cred;
59568 +
59569 + if (unlikely((gr_status & GR_READY) &&
59570 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
59571 + goto skip_reslog;
59572 +
59573 +#ifdef CONFIG_GRKERNSEC_RESLOG
59574 + gr_log_resource(task, res, wanted, gt);
59575 +#endif
59576 + skip_reslog:
59577 +
59578 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
59579 + return;
59580 +
59581 + acl = task->acl;
59582 +
59583 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
59584 + !(acl->resmask & (1 << (unsigned short) res))))
59585 + return;
59586 +
59587 + if (wanted >= acl->res[res].rlim_cur) {
59588 + unsigned long res_add;
59589 +
59590 + res_add = wanted;
59591 + switch (res) {
59592 + case RLIMIT_CPU:
59593 + res_add += GR_RLIM_CPU_BUMP;
59594 + break;
59595 + case RLIMIT_FSIZE:
59596 + res_add += GR_RLIM_FSIZE_BUMP;
59597 + break;
59598 + case RLIMIT_DATA:
59599 + res_add += GR_RLIM_DATA_BUMP;
59600 + break;
59601 + case RLIMIT_STACK:
59602 + res_add += GR_RLIM_STACK_BUMP;
59603 + break;
59604 + case RLIMIT_CORE:
59605 + res_add += GR_RLIM_CORE_BUMP;
59606 + break;
59607 + case RLIMIT_RSS:
59608 + res_add += GR_RLIM_RSS_BUMP;
59609 + break;
59610 + case RLIMIT_NPROC:
59611 + res_add += GR_RLIM_NPROC_BUMP;
59612 + break;
59613 + case RLIMIT_NOFILE:
59614 + res_add += GR_RLIM_NOFILE_BUMP;
59615 + break;
59616 + case RLIMIT_MEMLOCK:
59617 + res_add += GR_RLIM_MEMLOCK_BUMP;
59618 + break;
59619 + case RLIMIT_AS:
59620 + res_add += GR_RLIM_AS_BUMP;
59621 + break;
59622 + case RLIMIT_LOCKS:
59623 + res_add += GR_RLIM_LOCKS_BUMP;
59624 + break;
59625 + case RLIMIT_SIGPENDING:
59626 + res_add += GR_RLIM_SIGPENDING_BUMP;
59627 + break;
59628 + case RLIMIT_MSGQUEUE:
59629 + res_add += GR_RLIM_MSGQUEUE_BUMP;
59630 + break;
59631 + case RLIMIT_NICE:
59632 + res_add += GR_RLIM_NICE_BUMP;
59633 + break;
59634 + case RLIMIT_RTPRIO:
59635 + res_add += GR_RLIM_RTPRIO_BUMP;
59636 + break;
59637 + case RLIMIT_RTTIME:
59638 + res_add += GR_RLIM_RTTIME_BUMP;
59639 + break;
59640 + }
59641 +
59642 + acl->res[res].rlim_cur = res_add;
59643 +
59644 + if (wanted > acl->res[res].rlim_max)
59645 + acl->res[res].rlim_max = res_add;
59646 +
59647 + /* only log the subject filename, since resource logging is supported for
59648 + single-subject learning only */
59649 + rcu_read_lock();
59650 + cred = __task_cred(task);
59651 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59652 + task->role->roletype, cred->uid, cred->gid, acl->filename,
59653 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
59654 + "", (unsigned long) res, &task->signal->saved_ip);
59655 + rcu_read_unlock();
59656 + }
59657 +
59658 + return;
59659 +}
59660 +
59661 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
59662 +void
59663 +pax_set_initial_flags(struct linux_binprm *bprm)
59664 +{
59665 + struct task_struct *task = current;
59666 + struct acl_subject_label *proc;
59667 + unsigned long flags;
59668 +
59669 + if (unlikely(!(gr_status & GR_READY)))
59670 + return;
59671 +
59672 + flags = pax_get_flags(task);
59673 +
59674 + proc = task->acl;
59675 +
59676 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
59677 + flags &= ~MF_PAX_PAGEEXEC;
59678 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
59679 + flags &= ~MF_PAX_SEGMEXEC;
59680 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
59681 + flags &= ~MF_PAX_RANDMMAP;
59682 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
59683 + flags &= ~MF_PAX_EMUTRAMP;
59684 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
59685 + flags &= ~MF_PAX_MPROTECT;
59686 +
59687 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
59688 + flags |= MF_PAX_PAGEEXEC;
59689 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
59690 + flags |= MF_PAX_SEGMEXEC;
59691 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
59692 + flags |= MF_PAX_RANDMMAP;
59693 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
59694 + flags |= MF_PAX_EMUTRAMP;
59695 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
59696 + flags |= MF_PAX_MPROTECT;
59697 +
59698 + pax_set_flags(task, flags);
59699 +
59700 + return;
59701 +}
59702 +#endif
59703 +
59704 +#ifdef CONFIG_SYSCTL
59705 +/* Eric Biederman likes breaking userland ABI and every inode-based security
59706 + system to save 35kb of memory */
59707 +
59708 +/* we modify the passed in filename, but adjust it back before returning */
59709 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
59710 +{
59711 + struct name_entry *nmatch;
59712 + char *p, *lastp = NULL;
59713 + struct acl_object_label *obj = NULL, *tmp;
59714 + struct acl_subject_label *tmpsubj;
59715 + char c = '\0';
59716 +
59717 + read_lock(&gr_inode_lock);
59718 +
59719 + p = name + len - 1;
59720 + do {
59721 + nmatch = lookup_name_entry(name);
59722 + if (lastp != NULL)
59723 + *lastp = c;
59724 +
59725 + if (nmatch == NULL)
59726 + goto next_component;
59727 + tmpsubj = current->acl;
59728 + do {
59729 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
59730 + if (obj != NULL) {
59731 + tmp = obj->globbed;
59732 + while (tmp) {
59733 + if (!glob_match(tmp->filename, name)) {
59734 + obj = tmp;
59735 + goto found_obj;
59736 + }
59737 + tmp = tmp->next;
59738 + }
59739 + goto found_obj;
59740 + }
59741 + } while ((tmpsubj = tmpsubj->parent_subject));
59742 +next_component:
59743 + /* end case */
59744 + if (p == name)
59745 + break;
59746 +
59747 + while (*p != '/')
59748 + p--;
59749 + if (p == name)
59750 + lastp = p + 1;
59751 + else {
59752 + lastp = p;
59753 + p--;
59754 + }
59755 + c = *lastp;
59756 + *lastp = '\0';
59757 + } while (1);
59758 +found_obj:
59759 + read_unlock(&gr_inode_lock);
59760 + /* obj returned will always be non-null */
59761 + return obj;
59762 +}
59763 +
59764 +/* returns 0 when allowing, non-zero on error
59765 + op of 0 is used for readdir, so we don't log the names of hidden files
59766 +*/
59767 +__u32
59768 +gr_handle_sysctl(const struct ctl_table *table, const int op)
59769 +{
59770 + ctl_table *tmp;
59771 + const char *proc_sys = "/proc/sys";
59772 + char *path;
59773 + struct acl_object_label *obj;
59774 + unsigned short len = 0, pos = 0, depth = 0, i;
59775 + __u32 err = 0;
59776 + __u32 mode = 0;
59777 +
59778 + if (unlikely(!(gr_status & GR_READY)))
59779 + return 0;
59780 +
59781 + /* for now, ignore operations on non-sysctl entries if it's not a
59782 + readdir*/
59783 + if (table->child != NULL && op != 0)
59784 + return 0;
59785 +
59786 + mode |= GR_FIND;
59787 + /* it's only a read if it's an entry, read on dirs is for readdir */
59788 + if (op & MAY_READ)
59789 + mode |= GR_READ;
59790 + if (op & MAY_WRITE)
59791 + mode |= GR_WRITE;
59792 +
59793 + preempt_disable();
59794 +
59795 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
59796 +
59797 + /* it's only a read/write if it's an actual entry, not a dir
59798 + (which are opened for readdir)
59799 + */
59800 +
59801 + /* convert the requested sysctl entry into a pathname */
59802 +
59803 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59804 + len += strlen(tmp->procname);
59805 + len++;
59806 + depth++;
59807 + }
59808 +
59809 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
59810 + /* deny */
59811 + goto out;
59812 + }
59813 +
59814 + memset(path, 0, PAGE_SIZE);
59815 +
59816 + memcpy(path, proc_sys, strlen(proc_sys));
59817 +
59818 + pos += strlen(proc_sys);
59819 +
59820 + for (; depth > 0; depth--) {
59821 + path[pos] = '/';
59822 + pos++;
59823 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59824 + if (depth == i) {
59825 + memcpy(path + pos, tmp->procname,
59826 + strlen(tmp->procname));
59827 + pos += strlen(tmp->procname);
59828 + }
59829 + i++;
59830 + }
59831 + }
59832 +
59833 + obj = gr_lookup_by_name(path, pos);
59834 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
59835 +
59836 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
59837 + ((err & mode) != mode))) {
59838 + __u32 new_mode = mode;
59839 +
59840 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59841 +
59842 + err = 0;
59843 + gr_log_learn_sysctl(path, new_mode);
59844 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
59845 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
59846 + err = -ENOENT;
59847 + } else if (!(err & GR_FIND)) {
59848 + err = -ENOENT;
59849 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
59850 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
59851 + path, (mode & GR_READ) ? " reading" : "",
59852 + (mode & GR_WRITE) ? " writing" : "");
59853 + err = -EACCES;
59854 + } else if ((err & mode) != mode) {
59855 + err = -EACCES;
59856 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
59857 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
59858 + path, (mode & GR_READ) ? " reading" : "",
59859 + (mode & GR_WRITE) ? " writing" : "");
59860 + err = 0;
59861 + } else
59862 + err = 0;
59863 +
59864 + out:
59865 + preempt_enable();
59866 +
59867 + return err;
59868 +}
59869 +#endif
59870 +
59871 +int
59872 +gr_handle_proc_ptrace(struct task_struct *task)
59873 +{
59874 + struct file *filp;
59875 + struct task_struct *tmp = task;
59876 + struct task_struct *curtemp = current;
59877 + __u32 retmode;
59878 +
59879 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59880 + if (unlikely(!(gr_status & GR_READY)))
59881 + return 0;
59882 +#endif
59883 +
59884 + read_lock(&tasklist_lock);
59885 + read_lock(&grsec_exec_file_lock);
59886 + filp = task->exec_file;
59887 +
59888 + while (tmp->pid > 0) {
59889 + if (tmp == curtemp)
59890 + break;
59891 + tmp = tmp->real_parent;
59892 + }
59893 +
59894 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59895 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
59896 + read_unlock(&grsec_exec_file_lock);
59897 + read_unlock(&tasklist_lock);
59898 + return 1;
59899 + }
59900 +
59901 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59902 + if (!(gr_status & GR_READY)) {
59903 + read_unlock(&grsec_exec_file_lock);
59904 + read_unlock(&tasklist_lock);
59905 + return 0;
59906 + }
59907 +#endif
59908 +
59909 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
59910 + read_unlock(&grsec_exec_file_lock);
59911 + read_unlock(&tasklist_lock);
59912 +
59913 + if (retmode & GR_NOPTRACE)
59914 + return 1;
59915 +
59916 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
59917 + && (current->acl != task->acl || (current->acl != current->role->root_label
59918 + && current->pid != task->pid)))
59919 + return 1;
59920 +
59921 + return 0;
59922 +}
59923 +
59924 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
59925 +{
59926 + if (unlikely(!(gr_status & GR_READY)))
59927 + return;
59928 +
59929 + if (!(current->role->roletype & GR_ROLE_GOD))
59930 + return;
59931 +
59932 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
59933 + p->role->rolename, gr_task_roletype_to_char(p),
59934 + p->acl->filename);
59935 +}
59936 +
59937 +int
59938 +gr_handle_ptrace(struct task_struct *task, const long request)
59939 +{
59940 + struct task_struct *tmp = task;
59941 + struct task_struct *curtemp = current;
59942 + __u32 retmode;
59943 +
59944 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59945 + if (unlikely(!(gr_status & GR_READY)))
59946 + return 0;
59947 +#endif
59948 +
59949 + read_lock(&tasklist_lock);
59950 + while (tmp->pid > 0) {
59951 + if (tmp == curtemp)
59952 + break;
59953 + tmp = tmp->real_parent;
59954 + }
59955 +
59956 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59957 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
59958 + read_unlock(&tasklist_lock);
59959 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59960 + return 1;
59961 + }
59962 + read_unlock(&tasklist_lock);
59963 +
59964 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59965 + if (!(gr_status & GR_READY))
59966 + return 0;
59967 +#endif
59968 +
59969 + read_lock(&grsec_exec_file_lock);
59970 + if (unlikely(!task->exec_file)) {
59971 + read_unlock(&grsec_exec_file_lock);
59972 + return 0;
59973 + }
59974 +
59975 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
59976 + read_unlock(&grsec_exec_file_lock);
59977 +
59978 + if (retmode & GR_NOPTRACE) {
59979 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59980 + return 1;
59981 + }
59982 +
59983 + if (retmode & GR_PTRACERD) {
59984 + switch (request) {
59985 + case PTRACE_POKETEXT:
59986 + case PTRACE_POKEDATA:
59987 + case PTRACE_POKEUSR:
59988 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
59989 + case PTRACE_SETREGS:
59990 + case PTRACE_SETFPREGS:
59991 +#endif
59992 +#ifdef CONFIG_X86
59993 + case PTRACE_SETFPXREGS:
59994 +#endif
59995 +#ifdef CONFIG_ALTIVEC
59996 + case PTRACE_SETVRREGS:
59997 +#endif
59998 + return 1;
59999 + default:
60000 + return 0;
60001 + }
60002 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
60003 + !(current->role->roletype & GR_ROLE_GOD) &&
60004 + (current->acl != task->acl)) {
60005 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60006 + return 1;
60007 + }
60008 +
60009 + return 0;
60010 +}
60011 +
60012 +static int is_writable_mmap(const struct file *filp)
60013 +{
60014 + struct task_struct *task = current;
60015 + struct acl_object_label *obj, *obj2;
60016 +
60017 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
60018 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
60019 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60020 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
60021 + task->role->root_label);
60022 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
60023 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
60024 + return 1;
60025 + }
60026 + }
60027 + return 0;
60028 +}
60029 +
60030 +int
60031 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
60032 +{
60033 + __u32 mode;
60034 +
60035 + if (unlikely(!file || !(prot & PROT_EXEC)))
60036 + return 1;
60037 +
60038 + if (is_writable_mmap(file))
60039 + return 0;
60040 +
60041 + mode =
60042 + gr_search_file(file->f_path.dentry,
60043 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60044 + file->f_path.mnt);
60045 +
60046 + if (!gr_tpe_allow(file))
60047 + return 0;
60048 +
60049 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60050 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60051 + return 0;
60052 + } else if (unlikely(!(mode & GR_EXEC))) {
60053 + return 0;
60054 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60055 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60056 + return 1;
60057 + }
60058 +
60059 + return 1;
60060 +}
60061 +
60062 +int
60063 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
60064 +{
60065 + __u32 mode;
60066 +
60067 + if (unlikely(!file || !(prot & PROT_EXEC)))
60068 + return 1;
60069 +
60070 + if (is_writable_mmap(file))
60071 + return 0;
60072 +
60073 + mode =
60074 + gr_search_file(file->f_path.dentry,
60075 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60076 + file->f_path.mnt);
60077 +
60078 + if (!gr_tpe_allow(file))
60079 + return 0;
60080 +
60081 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60082 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60083 + return 0;
60084 + } else if (unlikely(!(mode & GR_EXEC))) {
60085 + return 0;
60086 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60087 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60088 + return 1;
60089 + }
60090 +
60091 + return 1;
60092 +}
60093 +
60094 +void
60095 +gr_acl_handle_psacct(struct task_struct *task, const long code)
60096 +{
60097 + unsigned long runtime;
60098 + unsigned long cputime;
60099 + unsigned int wday, cday;
60100 + __u8 whr, chr;
60101 + __u8 wmin, cmin;
60102 + __u8 wsec, csec;
60103 + struct timespec timeval;
60104 +
60105 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
60106 + !(task->acl->mode & GR_PROCACCT)))
60107 + return;
60108 +
60109 + do_posix_clock_monotonic_gettime(&timeval);
60110 + runtime = timeval.tv_sec - task->start_time.tv_sec;
60111 + wday = runtime / (3600 * 24);
60112 + runtime -= wday * (3600 * 24);
60113 + whr = runtime / 3600;
60114 + runtime -= whr * 3600;
60115 + wmin = runtime / 60;
60116 + runtime -= wmin * 60;
60117 + wsec = runtime;
60118 +
60119 + cputime = (task->utime + task->stime) / HZ;
60120 + cday = cputime / (3600 * 24);
60121 + cputime -= cday * (3600 * 24);
60122 + chr = cputime / 3600;
60123 + cputime -= chr * 3600;
60124 + cmin = cputime / 60;
60125 + cputime -= cmin * 60;
60126 + csec = cputime;
60127 +
60128 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
60129 +
60130 + return;
60131 +}
60132 +
60133 +void gr_set_kernel_label(struct task_struct *task)
60134 +{
60135 + if (gr_status & GR_READY) {
60136 + task->role = kernel_role;
60137 + task->acl = kernel_role->root_label;
60138 + }
60139 + return;
60140 +}
60141 +
60142 +#ifdef CONFIG_TASKSTATS
60143 +int gr_is_taskstats_denied(int pid)
60144 +{
60145 + struct task_struct *task;
60146 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60147 + const struct cred *cred;
60148 +#endif
60149 + int ret = 0;
60150 +
60151 + /* restrict taskstats viewing to un-chrooted root users
60152 + who have the 'view' subject flag if the RBAC system is enabled
60153 + */
60154 +
60155 + rcu_read_lock();
60156 + read_lock(&tasklist_lock);
60157 + task = find_task_by_vpid(pid);
60158 + if (task) {
60159 +#ifdef CONFIG_GRKERNSEC_CHROOT
60160 + if (proc_is_chrooted(task))
60161 + ret = -EACCES;
60162 +#endif
60163 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60164 + cred = __task_cred(task);
60165 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60166 + if (cred->uid != 0)
60167 + ret = -EACCES;
60168 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60169 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
60170 + ret = -EACCES;
60171 +#endif
60172 +#endif
60173 + if (gr_status & GR_READY) {
60174 + if (!(task->acl->mode & GR_VIEW))
60175 + ret = -EACCES;
60176 + }
60177 + } else
60178 + ret = -ENOENT;
60179 +
60180 + read_unlock(&tasklist_lock);
60181 + rcu_read_unlock();
60182 +
60183 + return ret;
60184 +}
60185 +#endif
60186 +
60187 +/* AUXV entries are filled via a descendant of search_binary_handler
60188 + after we've already applied the subject for the target
60189 +*/
60190 +int gr_acl_enable_at_secure(void)
60191 +{
60192 + if (unlikely(!(gr_status & GR_READY)))
60193 + return 0;
60194 +
60195 + if (current->acl->mode & GR_ATSECURE)
60196 + return 1;
60197 +
60198 + return 0;
60199 +}
60200 +
60201 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60202 +{
60203 + struct task_struct *task = current;
60204 + struct dentry *dentry = file->f_path.dentry;
60205 + struct vfsmount *mnt = file->f_path.mnt;
60206 + struct acl_object_label *obj, *tmp;
60207 + struct acl_subject_label *subj;
60208 + unsigned int bufsize;
60209 + int is_not_root;
60210 + char *path;
60211 + dev_t dev = __get_dev(dentry);
60212 +
60213 + if (unlikely(!(gr_status & GR_READY)))
60214 + return 1;
60215 +
60216 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60217 + return 1;
60218 +
60219 + /* ignore Eric Biederman */
60220 + if (IS_PRIVATE(dentry->d_inode))
60221 + return 1;
60222 +
60223 + subj = task->acl;
60224 + do {
60225 + obj = lookup_acl_obj_label(ino, dev, subj);
60226 + if (obj != NULL)
60227 + return (obj->mode & GR_FIND) ? 1 : 0;
60228 + } while ((subj = subj->parent_subject));
60229 +
60230 + /* this is purely an optimization since we're looking for an object
60231 + for the directory we're doing a readdir on
60232 + if it's possible for any globbed object to match the entry we're
60233 + filling into the directory, then the object we find here will be
60234 + an anchor point with attached globbed objects
60235 + */
60236 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60237 + if (obj->globbed == NULL)
60238 + return (obj->mode & GR_FIND) ? 1 : 0;
60239 +
60240 + is_not_root = ((obj->filename[0] == '/') &&
60241 + (obj->filename[1] == '\0')) ? 0 : 1;
60242 + bufsize = PAGE_SIZE - namelen - is_not_root;
60243 +
60244 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
60245 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60246 + return 1;
60247 +
60248 + preempt_disable();
60249 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60250 + bufsize);
60251 +
60252 + bufsize = strlen(path);
60253 +
60254 + /* if base is "/", don't append an additional slash */
60255 + if (is_not_root)
60256 + *(path + bufsize) = '/';
60257 + memcpy(path + bufsize + is_not_root, name, namelen);
60258 + *(path + bufsize + namelen + is_not_root) = '\0';
60259 +
60260 + tmp = obj->globbed;
60261 + while (tmp) {
60262 + if (!glob_match(tmp->filename, path)) {
60263 + preempt_enable();
60264 + return (tmp->mode & GR_FIND) ? 1 : 0;
60265 + }
60266 + tmp = tmp->next;
60267 + }
60268 + preempt_enable();
60269 + return (obj->mode & GR_FIND) ? 1 : 0;
60270 +}
60271 +
60272 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60273 +EXPORT_SYMBOL(gr_acl_is_enabled);
60274 +#endif
60275 +EXPORT_SYMBOL(gr_learn_resource);
60276 +EXPORT_SYMBOL(gr_set_kernel_label);
60277 +#ifdef CONFIG_SECURITY
60278 +EXPORT_SYMBOL(gr_check_user_change);
60279 +EXPORT_SYMBOL(gr_check_group_change);
60280 +#endif
60281 +
60282 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60283 new file mode 100644
60284 index 0000000..34fefda
60285 --- /dev/null
60286 +++ b/grsecurity/gracl_alloc.c
60287 @@ -0,0 +1,105 @@
60288 +#include <linux/kernel.h>
60289 +#include <linux/mm.h>
60290 +#include <linux/slab.h>
60291 +#include <linux/vmalloc.h>
60292 +#include <linux/gracl.h>
60293 +#include <linux/grsecurity.h>
60294 +
60295 +static unsigned long alloc_stack_next = 1;
60296 +static unsigned long alloc_stack_size = 1;
60297 +static void **alloc_stack;
60298 +
60299 +static __inline__ int
60300 +alloc_pop(void)
60301 +{
60302 + if (alloc_stack_next == 1)
60303 + return 0;
60304 +
60305 + kfree(alloc_stack[alloc_stack_next - 2]);
60306 +
60307 + alloc_stack_next--;
60308 +
60309 + return 1;
60310 +}
60311 +
60312 +static __inline__ int
60313 +alloc_push(void *buf)
60314 +{
60315 + if (alloc_stack_next >= alloc_stack_size)
60316 + return 1;
60317 +
60318 + alloc_stack[alloc_stack_next - 1] = buf;
60319 +
60320 + alloc_stack_next++;
60321 +
60322 + return 0;
60323 +}
60324 +
60325 +void *
60326 +acl_alloc(unsigned long len)
60327 +{
60328 + void *ret = NULL;
60329 +
60330 + if (!len || len > PAGE_SIZE)
60331 + goto out;
60332 +
60333 + ret = kmalloc(len, GFP_KERNEL);
60334 +
60335 + if (ret) {
60336 + if (alloc_push(ret)) {
60337 + kfree(ret);
60338 + ret = NULL;
60339 + }
60340 + }
60341 +
60342 +out:
60343 + return ret;
60344 +}
60345 +
60346 +void *
60347 +acl_alloc_num(unsigned long num, unsigned long len)
60348 +{
60349 + if (!len || (num > (PAGE_SIZE / len)))
60350 + return NULL;
60351 +
60352 + return acl_alloc(num * len);
60353 +}
60354 +
60355 +void
60356 +acl_free_all(void)
60357 +{
60358 + if (gr_acl_is_enabled() || !alloc_stack)
60359 + return;
60360 +
60361 + while (alloc_pop()) ;
60362 +
60363 + if (alloc_stack) {
60364 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60365 + kfree(alloc_stack);
60366 + else
60367 + vfree(alloc_stack);
60368 + }
60369 +
60370 + alloc_stack = NULL;
60371 + alloc_stack_size = 1;
60372 + alloc_stack_next = 1;
60373 +
60374 + return;
60375 +}
60376 +
60377 +int
60378 +acl_alloc_stack_init(unsigned long size)
60379 +{
60380 + if ((size * sizeof (void *)) <= PAGE_SIZE)
60381 + alloc_stack =
60382 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60383 + else
60384 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
60385 +
60386 + alloc_stack_size = size;
60387 +
60388 + if (!alloc_stack)
60389 + return 0;
60390 + else
60391 + return 1;
60392 +}
60393 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60394 new file mode 100644
60395 index 0000000..955ddfb
60396 --- /dev/null
60397 +++ b/grsecurity/gracl_cap.c
60398 @@ -0,0 +1,101 @@
60399 +#include <linux/kernel.h>
60400 +#include <linux/module.h>
60401 +#include <linux/sched.h>
60402 +#include <linux/gracl.h>
60403 +#include <linux/grsecurity.h>
60404 +#include <linux/grinternal.h>
60405 +
60406 +extern const char *captab_log[];
60407 +extern int captab_log_entries;
60408 +
60409 +int
60410 +gr_acl_is_capable(const int cap)
60411 +{
60412 + struct task_struct *task = current;
60413 + const struct cred *cred = current_cred();
60414 + struct acl_subject_label *curracl;
60415 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60416 + kernel_cap_t cap_audit = __cap_empty_set;
60417 +
60418 + if (!gr_acl_is_enabled())
60419 + return 1;
60420 +
60421 + curracl = task->acl;
60422 +
60423 + cap_drop = curracl->cap_lower;
60424 + cap_mask = curracl->cap_mask;
60425 + cap_audit = curracl->cap_invert_audit;
60426 +
60427 + while ((curracl = curracl->parent_subject)) {
60428 + /* if the cap isn't specified in the current computed mask but is specified in the
60429 + current level subject, and is lowered in the current level subject, then add
60430 + it to the set of dropped capabilities
60431 + otherwise, add the current level subject's mask to the current computed mask
60432 + */
60433 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60434 + cap_raise(cap_mask, cap);
60435 + if (cap_raised(curracl->cap_lower, cap))
60436 + cap_raise(cap_drop, cap);
60437 + if (cap_raised(curracl->cap_invert_audit, cap))
60438 + cap_raise(cap_audit, cap);
60439 + }
60440 + }
60441 +
60442 + if (!cap_raised(cap_drop, cap)) {
60443 + if (cap_raised(cap_audit, cap))
60444 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60445 + return 1;
60446 + }
60447 +
60448 + curracl = task->acl;
60449 +
60450 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60451 + && cap_raised(cred->cap_effective, cap)) {
60452 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60453 + task->role->roletype, cred->uid,
60454 + cred->gid, task->exec_file ?
60455 + gr_to_filename(task->exec_file->f_path.dentry,
60456 + task->exec_file->f_path.mnt) : curracl->filename,
60457 + curracl->filename, 0UL,
60458 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60459 + return 1;
60460 + }
60461 +
60462 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60463 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60464 + return 0;
60465 +}
60466 +
60467 +int
60468 +gr_acl_is_capable_nolog(const int cap)
60469 +{
60470 + struct acl_subject_label *curracl;
60471 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60472 +
60473 + if (!gr_acl_is_enabled())
60474 + return 1;
60475 +
60476 + curracl = current->acl;
60477 +
60478 + cap_drop = curracl->cap_lower;
60479 + cap_mask = curracl->cap_mask;
60480 +
60481 + while ((curracl = curracl->parent_subject)) {
60482 + /* if the cap isn't specified in the current computed mask but is specified in the
60483 + current level subject, and is lowered in the current level subject, then add
60484 + it to the set of dropped capabilities
60485 + otherwise, add the current level subject's mask to the current computed mask
60486 + */
60487 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60488 + cap_raise(cap_mask, cap);
60489 + if (cap_raised(curracl->cap_lower, cap))
60490 + cap_raise(cap_drop, cap);
60491 + }
60492 + }
60493 +
60494 + if (!cap_raised(cap_drop, cap))
60495 + return 1;
60496 +
60497 + return 0;
60498 +}
60499 +
60500 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60501 new file mode 100644
60502 index 0000000..d5f210c
60503 --- /dev/null
60504 +++ b/grsecurity/gracl_fs.c
60505 @@ -0,0 +1,433 @@
60506 +#include <linux/kernel.h>
60507 +#include <linux/sched.h>
60508 +#include <linux/types.h>
60509 +#include <linux/fs.h>
60510 +#include <linux/file.h>
60511 +#include <linux/stat.h>
60512 +#include <linux/grsecurity.h>
60513 +#include <linux/grinternal.h>
60514 +#include <linux/gracl.h>
60515 +
60516 +__u32
60517 +gr_acl_handle_hidden_file(const struct dentry * dentry,
60518 + const struct vfsmount * mnt)
60519 +{
60520 + __u32 mode;
60521 +
60522 + if (unlikely(!dentry->d_inode))
60523 + return GR_FIND;
60524 +
60525 + mode =
60526 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
60527 +
60528 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
60529 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60530 + return mode;
60531 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
60532 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60533 + return 0;
60534 + } else if (unlikely(!(mode & GR_FIND)))
60535 + return 0;
60536 +
60537 + return GR_FIND;
60538 +}
60539 +
60540 +__u32
60541 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
60542 + int acc_mode)
60543 +{
60544 + __u32 reqmode = GR_FIND;
60545 + __u32 mode;
60546 +
60547 + if (unlikely(!dentry->d_inode))
60548 + return reqmode;
60549 +
60550 + if (acc_mode & MAY_APPEND)
60551 + reqmode |= GR_APPEND;
60552 + else if (acc_mode & MAY_WRITE)
60553 + reqmode |= GR_WRITE;
60554 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
60555 + reqmode |= GR_READ;
60556 +
60557 + mode =
60558 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60559 + mnt);
60560 +
60561 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60562 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60563 + reqmode & GR_READ ? " reading" : "",
60564 + reqmode & GR_WRITE ? " writing" : reqmode &
60565 + GR_APPEND ? " appending" : "");
60566 + return reqmode;
60567 + } else
60568 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60569 + {
60570 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60571 + reqmode & GR_READ ? " reading" : "",
60572 + reqmode & GR_WRITE ? " writing" : reqmode &
60573 + GR_APPEND ? " appending" : "");
60574 + return 0;
60575 + } else if (unlikely((mode & reqmode) != reqmode))
60576 + return 0;
60577 +
60578 + return reqmode;
60579 +}
60580 +
60581 +__u32
60582 +gr_acl_handle_creat(const struct dentry * dentry,
60583 + const struct dentry * p_dentry,
60584 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60585 + const int imode)
60586 +{
60587 + __u32 reqmode = GR_WRITE | GR_CREATE;
60588 + __u32 mode;
60589 +
60590 + if (acc_mode & MAY_APPEND)
60591 + reqmode |= GR_APPEND;
60592 + // if a directory was required or the directory already exists, then
60593 + // don't count this open as a read
60594 + if ((acc_mode & MAY_READ) &&
60595 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
60596 + reqmode |= GR_READ;
60597 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
60598 + reqmode |= GR_SETID;
60599 +
60600 + mode =
60601 + gr_check_create(dentry, p_dentry, p_mnt,
60602 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60603 +
60604 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60605 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60606 + reqmode & GR_READ ? " reading" : "",
60607 + reqmode & GR_WRITE ? " writing" : reqmode &
60608 + GR_APPEND ? " appending" : "");
60609 + return reqmode;
60610 + } else
60611 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60612 + {
60613 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60614 + reqmode & GR_READ ? " reading" : "",
60615 + reqmode & GR_WRITE ? " writing" : reqmode &
60616 + GR_APPEND ? " appending" : "");
60617 + return 0;
60618 + } else if (unlikely((mode & reqmode) != reqmode))
60619 + return 0;
60620 +
60621 + return reqmode;
60622 +}
60623 +
60624 +__u32
60625 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
60626 + const int fmode)
60627 +{
60628 + __u32 mode, reqmode = GR_FIND;
60629 +
60630 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
60631 + reqmode |= GR_EXEC;
60632 + if (fmode & S_IWOTH)
60633 + reqmode |= GR_WRITE;
60634 + if (fmode & S_IROTH)
60635 + reqmode |= GR_READ;
60636 +
60637 + mode =
60638 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60639 + mnt);
60640 +
60641 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60642 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60643 + reqmode & GR_READ ? " reading" : "",
60644 + reqmode & GR_WRITE ? " writing" : "",
60645 + reqmode & GR_EXEC ? " executing" : "");
60646 + return reqmode;
60647 + } else
60648 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60649 + {
60650 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60651 + reqmode & GR_READ ? " reading" : "",
60652 + reqmode & GR_WRITE ? " writing" : "",
60653 + reqmode & GR_EXEC ? " executing" : "");
60654 + return 0;
60655 + } else if (unlikely((mode & reqmode) != reqmode))
60656 + return 0;
60657 +
60658 + return reqmode;
60659 +}
60660 +
60661 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
60662 +{
60663 + __u32 mode;
60664 +
60665 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
60666 +
60667 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60668 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
60669 + return mode;
60670 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60671 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
60672 + return 0;
60673 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
60674 + return 0;
60675 +
60676 + return (reqmode);
60677 +}
60678 +
60679 +__u32
60680 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
60681 +{
60682 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
60683 +}
60684 +
60685 +__u32
60686 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
60687 +{
60688 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
60689 +}
60690 +
60691 +__u32
60692 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
60693 +{
60694 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
60695 +}
60696 +
60697 +__u32
60698 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
60699 +{
60700 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
60701 +}
60702 +
60703 +__u32
60704 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
60705 + mode_t mode)
60706 +{
60707 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
60708 + return 1;
60709 +
60710 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60711 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60712 + GR_FCHMOD_ACL_MSG);
60713 + } else {
60714 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
60715 + }
60716 +}
60717 +
60718 +__u32
60719 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
60720 + mode_t mode)
60721 +{
60722 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60723 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60724 + GR_CHMOD_ACL_MSG);
60725 + } else {
60726 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
60727 + }
60728 +}
60729 +
60730 +__u32
60731 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
60732 +{
60733 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
60734 +}
60735 +
60736 +__u32
60737 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
60738 +{
60739 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
60740 +}
60741 +
60742 +__u32
60743 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
60744 +{
60745 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
60746 +}
60747 +
60748 +__u32
60749 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
60750 +{
60751 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
60752 + GR_UNIXCONNECT_ACL_MSG);
60753 +}
60754 +
60755 +/* hardlinks require at minimum create and link permission,
60756 + any additional privilege required is based on the
60757 + privilege of the file being linked to
60758 +*/
60759 +__u32
60760 +gr_acl_handle_link(const struct dentry * new_dentry,
60761 + const struct dentry * parent_dentry,
60762 + const struct vfsmount * parent_mnt,
60763 + const struct dentry * old_dentry,
60764 + const struct vfsmount * old_mnt, const char *to)
60765 +{
60766 + __u32 mode;
60767 + __u32 needmode = GR_CREATE | GR_LINK;
60768 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
60769 +
60770 + mode =
60771 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
60772 + old_mnt);
60773 +
60774 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
60775 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60776 + return mode;
60777 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60778 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60779 + return 0;
60780 + } else if (unlikely((mode & needmode) != needmode))
60781 + return 0;
60782 +
60783 + return 1;
60784 +}
60785 +
60786 +__u32
60787 +gr_acl_handle_symlink(const struct dentry * new_dentry,
60788 + const struct dentry * parent_dentry,
60789 + const struct vfsmount * parent_mnt, const char *from)
60790 +{
60791 + __u32 needmode = GR_WRITE | GR_CREATE;
60792 + __u32 mode;
60793 +
60794 + mode =
60795 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
60796 + GR_CREATE | GR_AUDIT_CREATE |
60797 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
60798 +
60799 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
60800 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60801 + return mode;
60802 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60803 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60804 + return 0;
60805 + } else if (unlikely((mode & needmode) != needmode))
60806 + return 0;
60807 +
60808 + return (GR_WRITE | GR_CREATE);
60809 +}
60810 +
60811 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
60812 +{
60813 + __u32 mode;
60814 +
60815 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60816 +
60817 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60818 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
60819 + return mode;
60820 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60821 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
60822 + return 0;
60823 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
60824 + return 0;
60825 +
60826 + return (reqmode);
60827 +}
60828 +
60829 +__u32
60830 +gr_acl_handle_mknod(const struct dentry * new_dentry,
60831 + const struct dentry * parent_dentry,
60832 + const struct vfsmount * parent_mnt,
60833 + const int mode)
60834 +{
60835 + __u32 reqmode = GR_WRITE | GR_CREATE;
60836 + if (unlikely(mode & (S_ISUID | S_ISGID)))
60837 + reqmode |= GR_SETID;
60838 +
60839 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60840 + reqmode, GR_MKNOD_ACL_MSG);
60841 +}
60842 +
60843 +__u32
60844 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
60845 + const struct dentry *parent_dentry,
60846 + const struct vfsmount *parent_mnt)
60847 +{
60848 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60849 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
60850 +}
60851 +
60852 +#define RENAME_CHECK_SUCCESS(old, new) \
60853 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
60854 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
60855 +
60856 +int
60857 +gr_acl_handle_rename(struct dentry *new_dentry,
60858 + struct dentry *parent_dentry,
60859 + const struct vfsmount *parent_mnt,
60860 + struct dentry *old_dentry,
60861 + struct inode *old_parent_inode,
60862 + struct vfsmount *old_mnt, const char *newname)
60863 +{
60864 + __u32 comp1, comp2;
60865 + int error = 0;
60866 +
60867 + if (unlikely(!gr_acl_is_enabled()))
60868 + return 0;
60869 +
60870 + if (!new_dentry->d_inode) {
60871 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
60872 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
60873 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
60874 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
60875 + GR_DELETE | GR_AUDIT_DELETE |
60876 + GR_AUDIT_READ | GR_AUDIT_WRITE |
60877 + GR_SUPPRESS, old_mnt);
60878 + } else {
60879 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
60880 + GR_CREATE | GR_DELETE |
60881 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
60882 + GR_AUDIT_READ | GR_AUDIT_WRITE |
60883 + GR_SUPPRESS, parent_mnt);
60884 + comp2 =
60885 + gr_search_file(old_dentry,
60886 + GR_READ | GR_WRITE | GR_AUDIT_READ |
60887 + GR_DELETE | GR_AUDIT_DELETE |
60888 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
60889 + }
60890 +
60891 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
60892 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
60893 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60894 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
60895 + && !(comp2 & GR_SUPPRESS)) {
60896 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60897 + error = -EACCES;
60898 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
60899 + error = -EACCES;
60900 +
60901 + return error;
60902 +}
60903 +
60904 +void
60905 +gr_acl_handle_exit(void)
60906 +{
60907 + u16 id;
60908 + char *rolename;
60909 + struct file *exec_file;
60910 +
60911 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
60912 + !(current->role->roletype & GR_ROLE_PERSIST))) {
60913 + id = current->acl_role_id;
60914 + rolename = current->role->rolename;
60915 + gr_set_acls(1);
60916 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
60917 + }
60918 +
60919 + write_lock(&grsec_exec_file_lock);
60920 + exec_file = current->exec_file;
60921 + current->exec_file = NULL;
60922 + write_unlock(&grsec_exec_file_lock);
60923 +
60924 + if (exec_file)
60925 + fput(exec_file);
60926 +}
60927 +
60928 +int
60929 +gr_acl_handle_procpidmem(const struct task_struct *task)
60930 +{
60931 + if (unlikely(!gr_acl_is_enabled()))
60932 + return 0;
60933 +
60934 + if (task != current && task->acl->mode & GR_PROTPROCFD)
60935 + return -EACCES;
60936 +
60937 + return 0;
60938 +}
60939 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
60940 new file mode 100644
60941 index 0000000..cd07b96
60942 --- /dev/null
60943 +++ b/grsecurity/gracl_ip.c
60944 @@ -0,0 +1,382 @@
60945 +#include <linux/kernel.h>
60946 +#include <asm/uaccess.h>
60947 +#include <asm/errno.h>
60948 +#include <net/sock.h>
60949 +#include <linux/file.h>
60950 +#include <linux/fs.h>
60951 +#include <linux/net.h>
60952 +#include <linux/in.h>
60953 +#include <linux/skbuff.h>
60954 +#include <linux/ip.h>
60955 +#include <linux/udp.h>
60956 +#include <linux/smp_lock.h>
60957 +#include <linux/types.h>
60958 +#include <linux/sched.h>
60959 +#include <linux/netdevice.h>
60960 +#include <linux/inetdevice.h>
60961 +#include <linux/gracl.h>
60962 +#include <linux/grsecurity.h>
60963 +#include <linux/grinternal.h>
60964 +
60965 +#define GR_BIND 0x01
60966 +#define GR_CONNECT 0x02
60967 +#define GR_INVERT 0x04
60968 +#define GR_BINDOVERRIDE 0x08
60969 +#define GR_CONNECTOVERRIDE 0x10
60970 +#define GR_SOCK_FAMILY 0x20
60971 +
60972 +static const char * gr_protocols[IPPROTO_MAX] = {
60973 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
60974 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
60975 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
60976 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
60977 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
60978 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
60979 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
60980 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
60981 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
60982 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
60983 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
60984 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
60985 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
60986 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
60987 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
60988 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
60989 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
60990 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
60991 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
60992 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
60993 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
60994 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
60995 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
60996 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
60997 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
60998 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
60999 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
61000 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
61001 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
61002 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
61003 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
61004 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
61005 + };
61006 +
61007 +static const char * gr_socktypes[SOCK_MAX] = {
61008 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
61009 + "unknown:7", "unknown:8", "unknown:9", "packet"
61010 + };
61011 +
61012 +static const char * gr_sockfamilies[AF_MAX+1] = {
61013 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
61014 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
61015 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
61016 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
61017 + };
61018 +
61019 +const char *
61020 +gr_proto_to_name(unsigned char proto)
61021 +{
61022 + return gr_protocols[proto];
61023 +}
61024 +
61025 +const char *
61026 +gr_socktype_to_name(unsigned char type)
61027 +{
61028 + return gr_socktypes[type];
61029 +}
61030 +
61031 +const char *
61032 +gr_sockfamily_to_name(unsigned char family)
61033 +{
61034 + return gr_sockfamilies[family];
61035 +}
61036 +
61037 +int
61038 +gr_search_socket(const int domain, const int type, const int protocol)
61039 +{
61040 + struct acl_subject_label *curr;
61041 + const struct cred *cred = current_cred();
61042 +
61043 + if (unlikely(!gr_acl_is_enabled()))
61044 + goto exit;
61045 +
61046 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
61047 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
61048 + goto exit; // let the kernel handle it
61049 +
61050 + curr = current->acl;
61051 +
61052 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
61053 + /* the family is allowed, if this is PF_INET allow it only if
61054 + the extra sock type/protocol checks pass */
61055 + if (domain == PF_INET)
61056 + goto inet_check;
61057 + goto exit;
61058 + } else {
61059 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61060 + __u32 fakeip = 0;
61061 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61062 + current->role->roletype, cred->uid,
61063 + cred->gid, current->exec_file ?
61064 + gr_to_filename(current->exec_file->f_path.dentry,
61065 + current->exec_file->f_path.mnt) :
61066 + curr->filename, curr->filename,
61067 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
61068 + &current->signal->saved_ip);
61069 + goto exit;
61070 + }
61071 + goto exit_fail;
61072 + }
61073 +
61074 +inet_check:
61075 + /* the rest of this checking is for IPv4 only */
61076 + if (!curr->ips)
61077 + goto exit;
61078 +
61079 + if ((curr->ip_type & (1 << type)) &&
61080 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
61081 + goto exit;
61082 +
61083 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61084 + /* we don't place acls on raw sockets , and sometimes
61085 + dgram/ip sockets are opened for ioctl and not
61086 + bind/connect, so we'll fake a bind learn log */
61087 + if (type == SOCK_RAW || type == SOCK_PACKET) {
61088 + __u32 fakeip = 0;
61089 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61090 + current->role->roletype, cred->uid,
61091 + cred->gid, current->exec_file ?
61092 + gr_to_filename(current->exec_file->f_path.dentry,
61093 + current->exec_file->f_path.mnt) :
61094 + curr->filename, curr->filename,
61095 + &fakeip, 0, type,
61096 + protocol, GR_CONNECT, &current->signal->saved_ip);
61097 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
61098 + __u32 fakeip = 0;
61099 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61100 + current->role->roletype, cred->uid,
61101 + cred->gid, current->exec_file ?
61102 + gr_to_filename(current->exec_file->f_path.dentry,
61103 + current->exec_file->f_path.mnt) :
61104 + curr->filename, curr->filename,
61105 + &fakeip, 0, type,
61106 + protocol, GR_BIND, &current->signal->saved_ip);
61107 + }
61108 + /* we'll log when they use connect or bind */
61109 + goto exit;
61110 + }
61111 +
61112 +exit_fail:
61113 + if (domain == PF_INET)
61114 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
61115 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
61116 + else
61117 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
61118 + gr_socktype_to_name(type), protocol);
61119 +
61120 + return 0;
61121 +exit:
61122 + return 1;
61123 +}
61124 +
61125 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
61126 +{
61127 + if ((ip->mode & mode) &&
61128 + (ip_port >= ip->low) &&
61129 + (ip_port <= ip->high) &&
61130 + ((ntohl(ip_addr) & our_netmask) ==
61131 + (ntohl(our_addr) & our_netmask))
61132 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
61133 + && (ip->type & (1 << type))) {
61134 + if (ip->mode & GR_INVERT)
61135 + return 2; // specifically denied
61136 + else
61137 + return 1; // allowed
61138 + }
61139 +
61140 + return 0; // not specifically allowed, may continue parsing
61141 +}
61142 +
61143 +static int
61144 +gr_search_connectbind(const int full_mode, struct sock *sk,
61145 + struct sockaddr_in *addr, const int type)
61146 +{
61147 + char iface[IFNAMSIZ] = {0};
61148 + struct acl_subject_label *curr;
61149 + struct acl_ip_label *ip;
61150 + struct inet_sock *isk;
61151 + struct net_device *dev;
61152 + struct in_device *idev;
61153 + unsigned long i;
61154 + int ret;
61155 + int mode = full_mode & (GR_BIND | GR_CONNECT);
61156 + __u32 ip_addr = 0;
61157 + __u32 our_addr;
61158 + __u32 our_netmask;
61159 + char *p;
61160 + __u16 ip_port = 0;
61161 + const struct cred *cred = current_cred();
61162 +
61163 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61164 + return 0;
61165 +
61166 + curr = current->acl;
61167 + isk = inet_sk(sk);
61168 +
61169 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61170 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61171 + addr->sin_addr.s_addr = curr->inaddr_any_override;
61172 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61173 + struct sockaddr_in saddr;
61174 + int err;
61175 +
61176 + saddr.sin_family = AF_INET;
61177 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
61178 + saddr.sin_port = isk->sport;
61179 +
61180 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61181 + if (err)
61182 + return err;
61183 +
61184 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61185 + if (err)
61186 + return err;
61187 + }
61188 +
61189 + if (!curr->ips)
61190 + return 0;
61191 +
61192 + ip_addr = addr->sin_addr.s_addr;
61193 + ip_port = ntohs(addr->sin_port);
61194 +
61195 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61196 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61197 + current->role->roletype, cred->uid,
61198 + cred->gid, current->exec_file ?
61199 + gr_to_filename(current->exec_file->f_path.dentry,
61200 + current->exec_file->f_path.mnt) :
61201 + curr->filename, curr->filename,
61202 + &ip_addr, ip_port, type,
61203 + sk->sk_protocol, mode, &current->signal->saved_ip);
61204 + return 0;
61205 + }
61206 +
61207 + for (i = 0; i < curr->ip_num; i++) {
61208 + ip = *(curr->ips + i);
61209 + if (ip->iface != NULL) {
61210 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
61211 + p = strchr(iface, ':');
61212 + if (p != NULL)
61213 + *p = '\0';
61214 + dev = dev_get_by_name(sock_net(sk), iface);
61215 + if (dev == NULL)
61216 + continue;
61217 + idev = in_dev_get(dev);
61218 + if (idev == NULL) {
61219 + dev_put(dev);
61220 + continue;
61221 + }
61222 + rcu_read_lock();
61223 + for_ifa(idev) {
61224 + if (!strcmp(ip->iface, ifa->ifa_label)) {
61225 + our_addr = ifa->ifa_address;
61226 + our_netmask = 0xffffffff;
61227 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61228 + if (ret == 1) {
61229 + rcu_read_unlock();
61230 + in_dev_put(idev);
61231 + dev_put(dev);
61232 + return 0;
61233 + } else if (ret == 2) {
61234 + rcu_read_unlock();
61235 + in_dev_put(idev);
61236 + dev_put(dev);
61237 + goto denied;
61238 + }
61239 + }
61240 + } endfor_ifa(idev);
61241 + rcu_read_unlock();
61242 + in_dev_put(idev);
61243 + dev_put(dev);
61244 + } else {
61245 + our_addr = ip->addr;
61246 + our_netmask = ip->netmask;
61247 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61248 + if (ret == 1)
61249 + return 0;
61250 + else if (ret == 2)
61251 + goto denied;
61252 + }
61253 + }
61254 +
61255 +denied:
61256 + if (mode == GR_BIND)
61257 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61258 + else if (mode == GR_CONNECT)
61259 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61260 +
61261 + return -EACCES;
61262 +}
61263 +
61264 +int
61265 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61266 +{
61267 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61268 +}
61269 +
61270 +int
61271 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61272 +{
61273 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61274 +}
61275 +
61276 +int gr_search_listen(struct socket *sock)
61277 +{
61278 + struct sock *sk = sock->sk;
61279 + struct sockaddr_in addr;
61280 +
61281 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61282 + addr.sin_port = inet_sk(sk)->sport;
61283 +
61284 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61285 +}
61286 +
61287 +int gr_search_accept(struct socket *sock)
61288 +{
61289 + struct sock *sk = sock->sk;
61290 + struct sockaddr_in addr;
61291 +
61292 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61293 + addr.sin_port = inet_sk(sk)->sport;
61294 +
61295 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61296 +}
61297 +
61298 +int
61299 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61300 +{
61301 + if (addr)
61302 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61303 + else {
61304 + struct sockaddr_in sin;
61305 + const struct inet_sock *inet = inet_sk(sk);
61306 +
61307 + sin.sin_addr.s_addr = inet->daddr;
61308 + sin.sin_port = inet->dport;
61309 +
61310 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61311 + }
61312 +}
61313 +
61314 +int
61315 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61316 +{
61317 + struct sockaddr_in sin;
61318 +
61319 + if (unlikely(skb->len < sizeof (struct udphdr)))
61320 + return 0; // skip this packet
61321 +
61322 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61323 + sin.sin_port = udp_hdr(skb)->source;
61324 +
61325 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61326 +}
61327 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61328 new file mode 100644
61329 index 0000000..34bdd46
61330 --- /dev/null
61331 +++ b/grsecurity/gracl_learn.c
61332 @@ -0,0 +1,208 @@
61333 +#include <linux/kernel.h>
61334 +#include <linux/mm.h>
61335 +#include <linux/sched.h>
61336 +#include <linux/poll.h>
61337 +#include <linux/smp_lock.h>
61338 +#include <linux/string.h>
61339 +#include <linux/file.h>
61340 +#include <linux/types.h>
61341 +#include <linux/vmalloc.h>
61342 +#include <linux/grinternal.h>
61343 +
61344 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61345 + size_t count, loff_t *ppos);
61346 +extern int gr_acl_is_enabled(void);
61347 +
61348 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61349 +static int gr_learn_attached;
61350 +
61351 +/* use a 512k buffer */
61352 +#define LEARN_BUFFER_SIZE (512 * 1024)
61353 +
61354 +static DEFINE_SPINLOCK(gr_learn_lock);
61355 +static DEFINE_MUTEX(gr_learn_user_mutex);
61356 +
61357 +/* we need to maintain two buffers, so that the kernel context of grlearn
61358 + uses a semaphore around the userspace copying, and the other kernel contexts
61359 + use a spinlock when copying into the buffer, since they cannot sleep
61360 +*/
61361 +static char *learn_buffer;
61362 +static char *learn_buffer_user;
61363 +static int learn_buffer_len;
61364 +static int learn_buffer_user_len;
61365 +
61366 +static ssize_t
61367 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61368 +{
61369 + DECLARE_WAITQUEUE(wait, current);
61370 + ssize_t retval = 0;
61371 +
61372 + add_wait_queue(&learn_wait, &wait);
61373 + set_current_state(TASK_INTERRUPTIBLE);
61374 + do {
61375 + mutex_lock(&gr_learn_user_mutex);
61376 + spin_lock(&gr_learn_lock);
61377 + if (learn_buffer_len)
61378 + break;
61379 + spin_unlock(&gr_learn_lock);
61380 + mutex_unlock(&gr_learn_user_mutex);
61381 + if (file->f_flags & O_NONBLOCK) {
61382 + retval = -EAGAIN;
61383 + goto out;
61384 + }
61385 + if (signal_pending(current)) {
61386 + retval = -ERESTARTSYS;
61387 + goto out;
61388 + }
61389 +
61390 + schedule();
61391 + } while (1);
61392 +
61393 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61394 + learn_buffer_user_len = learn_buffer_len;
61395 + retval = learn_buffer_len;
61396 + learn_buffer_len = 0;
61397 +
61398 + spin_unlock(&gr_learn_lock);
61399 +
61400 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61401 + retval = -EFAULT;
61402 +
61403 + mutex_unlock(&gr_learn_user_mutex);
61404 +out:
61405 + set_current_state(TASK_RUNNING);
61406 + remove_wait_queue(&learn_wait, &wait);
61407 + return retval;
61408 +}
61409 +
61410 +static unsigned int
61411 +poll_learn(struct file * file, poll_table * wait)
61412 +{
61413 + poll_wait(file, &learn_wait, wait);
61414 +
61415 + if (learn_buffer_len)
61416 + return (POLLIN | POLLRDNORM);
61417 +
61418 + return 0;
61419 +}
61420 +
61421 +void
61422 +gr_clear_learn_entries(void)
61423 +{
61424 + char *tmp;
61425 +
61426 + mutex_lock(&gr_learn_user_mutex);
61427 + spin_lock(&gr_learn_lock);
61428 + tmp = learn_buffer;
61429 + learn_buffer = NULL;
61430 + spin_unlock(&gr_learn_lock);
61431 + if (tmp)
61432 + vfree(tmp);
61433 + if (learn_buffer_user != NULL) {
61434 + vfree(learn_buffer_user);
61435 + learn_buffer_user = NULL;
61436 + }
61437 + learn_buffer_len = 0;
61438 + mutex_unlock(&gr_learn_user_mutex);
61439 +
61440 + return;
61441 +}
61442 +
61443 +void
61444 +gr_add_learn_entry(const char *fmt, ...)
61445 +{
61446 + va_list args;
61447 + unsigned int len;
61448 +
61449 + if (!gr_learn_attached)
61450 + return;
61451 +
61452 + spin_lock(&gr_learn_lock);
61453 +
61454 + /* leave a gap at the end so we know when it's "full" but don't have to
61455 + compute the exact length of the string we're trying to append
61456 + */
61457 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61458 + spin_unlock(&gr_learn_lock);
61459 + wake_up_interruptible(&learn_wait);
61460 + return;
61461 + }
61462 + if (learn_buffer == NULL) {
61463 + spin_unlock(&gr_learn_lock);
61464 + return;
61465 + }
61466 +
61467 + va_start(args, fmt);
61468 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61469 + va_end(args);
61470 +
61471 + learn_buffer_len += len + 1;
61472 +
61473 + spin_unlock(&gr_learn_lock);
61474 + wake_up_interruptible(&learn_wait);
61475 +
61476 + return;
61477 +}
61478 +
61479 +static int
61480 +open_learn(struct inode *inode, struct file *file)
61481 +{
61482 + if (file->f_mode & FMODE_READ && gr_learn_attached)
61483 + return -EBUSY;
61484 + if (file->f_mode & FMODE_READ) {
61485 + int retval = 0;
61486 + mutex_lock(&gr_learn_user_mutex);
61487 + if (learn_buffer == NULL)
61488 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61489 + if (learn_buffer_user == NULL)
61490 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61491 + if (learn_buffer == NULL) {
61492 + retval = -ENOMEM;
61493 + goto out_error;
61494 + }
61495 + if (learn_buffer_user == NULL) {
61496 + retval = -ENOMEM;
61497 + goto out_error;
61498 + }
61499 + learn_buffer_len = 0;
61500 + learn_buffer_user_len = 0;
61501 + gr_learn_attached = 1;
61502 +out_error:
61503 + mutex_unlock(&gr_learn_user_mutex);
61504 + return retval;
61505 + }
61506 + return 0;
61507 +}
61508 +
61509 +static int
61510 +close_learn(struct inode *inode, struct file *file)
61511 +{
61512 + if (file->f_mode & FMODE_READ) {
61513 + char *tmp = NULL;
61514 + mutex_lock(&gr_learn_user_mutex);
61515 + spin_lock(&gr_learn_lock);
61516 + tmp = learn_buffer;
61517 + learn_buffer = NULL;
61518 + spin_unlock(&gr_learn_lock);
61519 + if (tmp)
61520 + vfree(tmp);
61521 + if (learn_buffer_user != NULL) {
61522 + vfree(learn_buffer_user);
61523 + learn_buffer_user = NULL;
61524 + }
61525 + learn_buffer_len = 0;
61526 + learn_buffer_user_len = 0;
61527 + gr_learn_attached = 0;
61528 + mutex_unlock(&gr_learn_user_mutex);
61529 + }
61530 +
61531 + return 0;
61532 +}
61533 +
61534 +const struct file_operations grsec_fops = {
61535 + .read = read_learn,
61536 + .write = write_grsec_handler,
61537 + .open = open_learn,
61538 + .release = close_learn,
61539 + .poll = poll_learn,
61540 +};
61541 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
61542 new file mode 100644
61543 index 0000000..70b2179
61544 --- /dev/null
61545 +++ b/grsecurity/gracl_res.c
61546 @@ -0,0 +1,67 @@
61547 +#include <linux/kernel.h>
61548 +#include <linux/sched.h>
61549 +#include <linux/gracl.h>
61550 +#include <linux/grinternal.h>
61551 +
61552 +static const char *restab_log[] = {
61553 + [RLIMIT_CPU] = "RLIMIT_CPU",
61554 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
61555 + [RLIMIT_DATA] = "RLIMIT_DATA",
61556 + [RLIMIT_STACK] = "RLIMIT_STACK",
61557 + [RLIMIT_CORE] = "RLIMIT_CORE",
61558 + [RLIMIT_RSS] = "RLIMIT_RSS",
61559 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
61560 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
61561 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
61562 + [RLIMIT_AS] = "RLIMIT_AS",
61563 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
61564 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
61565 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
61566 + [RLIMIT_NICE] = "RLIMIT_NICE",
61567 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
61568 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
61569 + [GR_CRASH_RES] = "RLIMIT_CRASH"
61570 +};
61571 +
61572 +void
61573 +gr_log_resource(const struct task_struct *task,
61574 + const int res, const unsigned long wanted, const int gt)
61575 +{
61576 + const struct cred *cred;
61577 + unsigned long rlim;
61578 +
61579 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
61580 + return;
61581 +
61582 + // not yet supported resource
61583 + if (unlikely(!restab_log[res]))
61584 + return;
61585 +
61586 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
61587 + rlim = task->signal->rlim[res].rlim_max;
61588 + else
61589 + rlim = task->signal->rlim[res].rlim_cur;
61590 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
61591 + return;
61592 +
61593 + rcu_read_lock();
61594 + cred = __task_cred(task);
61595 +
61596 + if (res == RLIMIT_NPROC &&
61597 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
61598 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
61599 + goto out_rcu_unlock;
61600 + else if (res == RLIMIT_MEMLOCK &&
61601 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
61602 + goto out_rcu_unlock;
61603 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
61604 + goto out_rcu_unlock;
61605 + rcu_read_unlock();
61606 +
61607 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
61608 +
61609 + return;
61610 +out_rcu_unlock:
61611 + rcu_read_unlock();
61612 + return;
61613 +}
61614 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
61615 new file mode 100644
61616 index 0000000..1d1b734
61617 --- /dev/null
61618 +++ b/grsecurity/gracl_segv.c
61619 @@ -0,0 +1,284 @@
61620 +#include <linux/kernel.h>
61621 +#include <linux/mm.h>
61622 +#include <asm/uaccess.h>
61623 +#include <asm/errno.h>
61624 +#include <asm/mman.h>
61625 +#include <net/sock.h>
61626 +#include <linux/file.h>
61627 +#include <linux/fs.h>
61628 +#include <linux/net.h>
61629 +#include <linux/in.h>
61630 +#include <linux/smp_lock.h>
61631 +#include <linux/slab.h>
61632 +#include <linux/types.h>
61633 +#include <linux/sched.h>
61634 +#include <linux/timer.h>
61635 +#include <linux/gracl.h>
61636 +#include <linux/grsecurity.h>
61637 +#include <linux/grinternal.h>
61638 +
61639 +static struct crash_uid *uid_set;
61640 +static unsigned short uid_used;
61641 +static DEFINE_SPINLOCK(gr_uid_lock);
61642 +extern rwlock_t gr_inode_lock;
61643 +extern struct acl_subject_label *
61644 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
61645 + struct acl_role_label *role);
61646 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
61647 +
61648 +int
61649 +gr_init_uidset(void)
61650 +{
61651 + uid_set =
61652 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
61653 + uid_used = 0;
61654 +
61655 + return uid_set ? 1 : 0;
61656 +}
61657 +
61658 +void
61659 +gr_free_uidset(void)
61660 +{
61661 + if (uid_set)
61662 + kfree(uid_set);
61663 +
61664 + return;
61665 +}
61666 +
61667 +int
61668 +gr_find_uid(const uid_t uid)
61669 +{
61670 + struct crash_uid *tmp = uid_set;
61671 + uid_t buid;
61672 + int low = 0, high = uid_used - 1, mid;
61673 +
61674 + while (high >= low) {
61675 + mid = (low + high) >> 1;
61676 + buid = tmp[mid].uid;
61677 + if (buid == uid)
61678 + return mid;
61679 + if (buid > uid)
61680 + high = mid - 1;
61681 + if (buid < uid)
61682 + low = mid + 1;
61683 + }
61684 +
61685 + return -1;
61686 +}
61687 +
61688 +static __inline__ void
61689 +gr_insertsort(void)
61690 +{
61691 + unsigned short i, j;
61692 + struct crash_uid index;
61693 +
61694 + for (i = 1; i < uid_used; i++) {
61695 + index = uid_set[i];
61696 + j = i;
61697 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
61698 + uid_set[j] = uid_set[j - 1];
61699 + j--;
61700 + }
61701 + uid_set[j] = index;
61702 + }
61703 +
61704 + return;
61705 +}
61706 +
61707 +static __inline__ void
61708 +gr_insert_uid(const uid_t uid, const unsigned long expires)
61709 +{
61710 + int loc;
61711 +
61712 + if (uid_used == GR_UIDTABLE_MAX)
61713 + return;
61714 +
61715 + loc = gr_find_uid(uid);
61716 +
61717 + if (loc >= 0) {
61718 + uid_set[loc].expires = expires;
61719 + return;
61720 + }
61721 +
61722 + uid_set[uid_used].uid = uid;
61723 + uid_set[uid_used].expires = expires;
61724 + uid_used++;
61725 +
61726 + gr_insertsort();
61727 +
61728 + return;
61729 +}
61730 +
61731 +void
61732 +gr_remove_uid(const unsigned short loc)
61733 +{
61734 + unsigned short i;
61735 +
61736 + for (i = loc + 1; i < uid_used; i++)
61737 + uid_set[i - 1] = uid_set[i];
61738 +
61739 + uid_used--;
61740 +
61741 + return;
61742 +}
61743 +
61744 +int
61745 +gr_check_crash_uid(const uid_t uid)
61746 +{
61747 + int loc;
61748 + int ret = 0;
61749 +
61750 + if (unlikely(!gr_acl_is_enabled()))
61751 + return 0;
61752 +
61753 + spin_lock(&gr_uid_lock);
61754 + loc = gr_find_uid(uid);
61755 +
61756 + if (loc < 0)
61757 + goto out_unlock;
61758 +
61759 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
61760 + gr_remove_uid(loc);
61761 + else
61762 + ret = 1;
61763 +
61764 +out_unlock:
61765 + spin_unlock(&gr_uid_lock);
61766 + return ret;
61767 +}
61768 +
61769 +static __inline__ int
61770 +proc_is_setxid(const struct cred *cred)
61771 +{
61772 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
61773 + cred->uid != cred->fsuid)
61774 + return 1;
61775 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
61776 + cred->gid != cred->fsgid)
61777 + return 1;
61778 +
61779 + return 0;
61780 +}
61781 +
61782 +void
61783 +gr_handle_crash(struct task_struct *task, const int sig)
61784 +{
61785 + struct acl_subject_label *curr;
61786 + struct task_struct *tsk, *tsk2;
61787 + const struct cred *cred;
61788 + const struct cred *cred2;
61789 +
61790 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
61791 + return;
61792 +
61793 + if (unlikely(!gr_acl_is_enabled()))
61794 + return;
61795 +
61796 + curr = task->acl;
61797 +
61798 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
61799 + return;
61800 +
61801 + if (time_before_eq(curr->expires, get_seconds())) {
61802 + curr->expires = 0;
61803 + curr->crashes = 0;
61804 + }
61805 +
61806 + curr->crashes++;
61807 +
61808 + if (!curr->expires)
61809 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
61810 +
61811 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61812 + time_after(curr->expires, get_seconds())) {
61813 + rcu_read_lock();
61814 + cred = __task_cred(task);
61815 + if (cred->uid && proc_is_setxid(cred)) {
61816 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61817 + spin_lock(&gr_uid_lock);
61818 + gr_insert_uid(cred->uid, curr->expires);
61819 + spin_unlock(&gr_uid_lock);
61820 + curr->expires = 0;
61821 + curr->crashes = 0;
61822 + read_lock(&tasklist_lock);
61823 + do_each_thread(tsk2, tsk) {
61824 + cred2 = __task_cred(tsk);
61825 + if (tsk != task && cred2->uid == cred->uid)
61826 + gr_fake_force_sig(SIGKILL, tsk);
61827 + } while_each_thread(tsk2, tsk);
61828 + read_unlock(&tasklist_lock);
61829 + } else {
61830 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61831 + read_lock(&tasklist_lock);
61832 + read_lock(&grsec_exec_file_lock);
61833 + do_each_thread(tsk2, tsk) {
61834 + if (likely(tsk != task)) {
61835 + // if this thread has the same subject as the one that triggered
61836 + // RES_CRASH and it's the same binary, kill it
61837 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
61838 + gr_fake_force_sig(SIGKILL, tsk);
61839 + }
61840 + } while_each_thread(tsk2, tsk);
61841 + read_unlock(&grsec_exec_file_lock);
61842 + read_unlock(&tasklist_lock);
61843 + }
61844 + rcu_read_unlock();
61845 + }
61846 +
61847 + return;
61848 +}
61849 +
61850 +int
61851 +gr_check_crash_exec(const struct file *filp)
61852 +{
61853 + struct acl_subject_label *curr;
61854 +
61855 + if (unlikely(!gr_acl_is_enabled()))
61856 + return 0;
61857 +
61858 + read_lock(&gr_inode_lock);
61859 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
61860 + filp->f_path.dentry->d_inode->i_sb->s_dev,
61861 + current->role);
61862 + read_unlock(&gr_inode_lock);
61863 +
61864 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
61865 + (!curr->crashes && !curr->expires))
61866 + return 0;
61867 +
61868 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61869 + time_after(curr->expires, get_seconds()))
61870 + return 1;
61871 + else if (time_before_eq(curr->expires, get_seconds())) {
61872 + curr->crashes = 0;
61873 + curr->expires = 0;
61874 + }
61875 +
61876 + return 0;
61877 +}
61878 +
61879 +void
61880 +gr_handle_alertkill(struct task_struct *task)
61881 +{
61882 + struct acl_subject_label *curracl;
61883 + __u32 curr_ip;
61884 + struct task_struct *p, *p2;
61885 +
61886 + if (unlikely(!gr_acl_is_enabled()))
61887 + return;
61888 +
61889 + curracl = task->acl;
61890 + curr_ip = task->signal->curr_ip;
61891 +
61892 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
61893 + read_lock(&tasklist_lock);
61894 + do_each_thread(p2, p) {
61895 + if (p->signal->curr_ip == curr_ip)
61896 + gr_fake_force_sig(SIGKILL, p);
61897 + } while_each_thread(p2, p);
61898 + read_unlock(&tasklist_lock);
61899 + } else if (curracl->mode & GR_KILLPROC)
61900 + gr_fake_force_sig(SIGKILL, task);
61901 +
61902 + return;
61903 +}
61904 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
61905 new file mode 100644
61906 index 0000000..9d83a69
61907 --- /dev/null
61908 +++ b/grsecurity/gracl_shm.c
61909 @@ -0,0 +1,40 @@
61910 +#include <linux/kernel.h>
61911 +#include <linux/mm.h>
61912 +#include <linux/sched.h>
61913 +#include <linux/file.h>
61914 +#include <linux/ipc.h>
61915 +#include <linux/gracl.h>
61916 +#include <linux/grsecurity.h>
61917 +#include <linux/grinternal.h>
61918 +
61919 +int
61920 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61921 + const time_t shm_createtime, const uid_t cuid, const int shmid)
61922 +{
61923 + struct task_struct *task;
61924 +
61925 + if (!gr_acl_is_enabled())
61926 + return 1;
61927 +
61928 + rcu_read_lock();
61929 + read_lock(&tasklist_lock);
61930 +
61931 + task = find_task_by_vpid(shm_cprid);
61932 +
61933 + if (unlikely(!task))
61934 + task = find_task_by_vpid(shm_lapid);
61935 +
61936 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
61937 + (task->pid == shm_lapid)) &&
61938 + (task->acl->mode & GR_PROTSHM) &&
61939 + (task->acl != current->acl))) {
61940 + read_unlock(&tasklist_lock);
61941 + rcu_read_unlock();
61942 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
61943 + return 0;
61944 + }
61945 + read_unlock(&tasklist_lock);
61946 + rcu_read_unlock();
61947 +
61948 + return 1;
61949 +}
61950 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
61951 new file mode 100644
61952 index 0000000..bc0be01
61953 --- /dev/null
61954 +++ b/grsecurity/grsec_chdir.c
61955 @@ -0,0 +1,19 @@
61956 +#include <linux/kernel.h>
61957 +#include <linux/sched.h>
61958 +#include <linux/fs.h>
61959 +#include <linux/file.h>
61960 +#include <linux/grsecurity.h>
61961 +#include <linux/grinternal.h>
61962 +
61963 +void
61964 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
61965 +{
61966 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61967 + if ((grsec_enable_chdir && grsec_enable_group &&
61968 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
61969 + !grsec_enable_group)) {
61970 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
61971 + }
61972 +#endif
61973 + return;
61974 +}
61975 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
61976 new file mode 100644
61977 index 0000000..197bdd5
61978 --- /dev/null
61979 +++ b/grsecurity/grsec_chroot.c
61980 @@ -0,0 +1,386 @@
61981 +#include <linux/kernel.h>
61982 +#include <linux/module.h>
61983 +#include <linux/sched.h>
61984 +#include <linux/file.h>
61985 +#include <linux/fs.h>
61986 +#include <linux/mount.h>
61987 +#include <linux/types.h>
61988 +#include <linux/pid_namespace.h>
61989 +#include <linux/grsecurity.h>
61990 +#include <linux/grinternal.h>
61991 +
61992 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
61993 +{
61994 +#ifdef CONFIG_GRKERNSEC
61995 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
61996 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
61997 + task->gr_is_chrooted = 1;
61998 + else
61999 + task->gr_is_chrooted = 0;
62000 +
62001 + task->gr_chroot_dentry = path->dentry;
62002 +#endif
62003 + return;
62004 +}
62005 +
62006 +void gr_clear_chroot_entries(struct task_struct *task)
62007 +{
62008 +#ifdef CONFIG_GRKERNSEC
62009 + task->gr_is_chrooted = 0;
62010 + task->gr_chroot_dentry = NULL;
62011 +#endif
62012 + return;
62013 +}
62014 +
62015 +int
62016 +gr_handle_chroot_unix(const pid_t pid)
62017 +{
62018 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62019 + struct task_struct *p;
62020 +
62021 + if (unlikely(!grsec_enable_chroot_unix))
62022 + return 1;
62023 +
62024 + if (likely(!proc_is_chrooted(current)))
62025 + return 1;
62026 +
62027 + rcu_read_lock();
62028 + read_lock(&tasklist_lock);
62029 +
62030 + p = find_task_by_vpid_unrestricted(pid);
62031 + if (unlikely(p && !have_same_root(current, p))) {
62032 + read_unlock(&tasklist_lock);
62033 + rcu_read_unlock();
62034 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
62035 + return 0;
62036 + }
62037 + read_unlock(&tasklist_lock);
62038 + rcu_read_unlock();
62039 +#endif
62040 + return 1;
62041 +}
62042 +
62043 +int
62044 +gr_handle_chroot_nice(void)
62045 +{
62046 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62047 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
62048 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
62049 + return -EPERM;
62050 + }
62051 +#endif
62052 + return 0;
62053 +}
62054 +
62055 +int
62056 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
62057 +{
62058 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62059 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
62060 + && proc_is_chrooted(current)) {
62061 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
62062 + return -EACCES;
62063 + }
62064 +#endif
62065 + return 0;
62066 +}
62067 +
62068 +int
62069 +gr_handle_chroot_rawio(const struct inode *inode)
62070 +{
62071 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62072 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62073 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
62074 + return 1;
62075 +#endif
62076 + return 0;
62077 +}
62078 +
62079 +int
62080 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
62081 +{
62082 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62083 + struct task_struct *p;
62084 + int ret = 0;
62085 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
62086 + return ret;
62087 +
62088 + read_lock(&tasklist_lock);
62089 + do_each_pid_task(pid, type, p) {
62090 + if (!have_same_root(current, p)) {
62091 + ret = 1;
62092 + goto out;
62093 + }
62094 + } while_each_pid_task(pid, type, p);
62095 +out:
62096 + read_unlock(&tasklist_lock);
62097 + return ret;
62098 +#endif
62099 + return 0;
62100 +}
62101 +
62102 +int
62103 +gr_pid_is_chrooted(struct task_struct *p)
62104 +{
62105 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62106 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
62107 + return 0;
62108 +
62109 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
62110 + !have_same_root(current, p)) {
62111 + return 1;
62112 + }
62113 +#endif
62114 + return 0;
62115 +}
62116 +
62117 +EXPORT_SYMBOL(gr_pid_is_chrooted);
62118 +
62119 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
62120 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
62121 +{
62122 + struct dentry *dentry = (struct dentry *)u_dentry;
62123 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
62124 + struct dentry *realroot;
62125 + struct vfsmount *realrootmnt;
62126 + struct dentry *currentroot;
62127 + struct vfsmount *currentmnt;
62128 + struct task_struct *reaper = &init_task;
62129 + int ret = 1;
62130 +
62131 + read_lock(&reaper->fs->lock);
62132 + realrootmnt = mntget(reaper->fs->root.mnt);
62133 + realroot = dget(reaper->fs->root.dentry);
62134 + read_unlock(&reaper->fs->lock);
62135 +
62136 + read_lock(&current->fs->lock);
62137 + currentmnt = mntget(current->fs->root.mnt);
62138 + currentroot = dget(current->fs->root.dentry);
62139 + read_unlock(&current->fs->lock);
62140 +
62141 + spin_lock(&dcache_lock);
62142 + for (;;) {
62143 + if (unlikely((dentry == realroot && mnt == realrootmnt)
62144 + || (dentry == currentroot && mnt == currentmnt)))
62145 + break;
62146 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
62147 + if (mnt->mnt_parent == mnt)
62148 + break;
62149 + dentry = mnt->mnt_mountpoint;
62150 + mnt = mnt->mnt_parent;
62151 + continue;
62152 + }
62153 + dentry = dentry->d_parent;
62154 + }
62155 + spin_unlock(&dcache_lock);
62156 +
62157 + dput(currentroot);
62158 + mntput(currentmnt);
62159 +
62160 + /* access is outside of chroot */
62161 + if (dentry == realroot && mnt == realrootmnt)
62162 + ret = 0;
62163 +
62164 + dput(realroot);
62165 + mntput(realrootmnt);
62166 + return ret;
62167 +}
62168 +#endif
62169 +
62170 +int
62171 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62172 +{
62173 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62174 + if (!grsec_enable_chroot_fchdir)
62175 + return 1;
62176 +
62177 + if (!proc_is_chrooted(current))
62178 + return 1;
62179 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62180 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62181 + return 0;
62182 + }
62183 +#endif
62184 + return 1;
62185 +}
62186 +
62187 +int
62188 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62189 + const time_t shm_createtime)
62190 +{
62191 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62192 + struct task_struct *p;
62193 + time_t starttime;
62194 +
62195 + if (unlikely(!grsec_enable_chroot_shmat))
62196 + return 1;
62197 +
62198 + if (likely(!proc_is_chrooted(current)))
62199 + return 1;
62200 +
62201 + rcu_read_lock();
62202 + read_lock(&tasklist_lock);
62203 +
62204 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62205 + starttime = p->start_time.tv_sec;
62206 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62207 + if (have_same_root(current, p)) {
62208 + goto allow;
62209 + } else {
62210 + read_unlock(&tasklist_lock);
62211 + rcu_read_unlock();
62212 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62213 + return 0;
62214 + }
62215 + }
62216 + /* creator exited, pid reuse, fall through to next check */
62217 + }
62218 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62219 + if (unlikely(!have_same_root(current, p))) {
62220 + read_unlock(&tasklist_lock);
62221 + rcu_read_unlock();
62222 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62223 + return 0;
62224 + }
62225 + }
62226 +
62227 +allow:
62228 + read_unlock(&tasklist_lock);
62229 + rcu_read_unlock();
62230 +#endif
62231 + return 1;
62232 +}
62233 +
62234 +void
62235 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62236 +{
62237 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62238 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62239 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62240 +#endif
62241 + return;
62242 +}
62243 +
62244 +int
62245 +gr_handle_chroot_mknod(const struct dentry *dentry,
62246 + const struct vfsmount *mnt, const int mode)
62247 +{
62248 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62249 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62250 + proc_is_chrooted(current)) {
62251 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62252 + return -EPERM;
62253 + }
62254 +#endif
62255 + return 0;
62256 +}
62257 +
62258 +int
62259 +gr_handle_chroot_mount(const struct dentry *dentry,
62260 + const struct vfsmount *mnt, const char *dev_name)
62261 +{
62262 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62263 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62264 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62265 + return -EPERM;
62266 + }
62267 +#endif
62268 + return 0;
62269 +}
62270 +
62271 +int
62272 +gr_handle_chroot_pivot(void)
62273 +{
62274 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62275 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62276 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62277 + return -EPERM;
62278 + }
62279 +#endif
62280 + return 0;
62281 +}
62282 +
62283 +int
62284 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62285 +{
62286 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62287 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62288 + !gr_is_outside_chroot(dentry, mnt)) {
62289 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62290 + return -EPERM;
62291 + }
62292 +#endif
62293 + return 0;
62294 +}
62295 +
62296 +extern const char *captab_log[];
62297 +extern int captab_log_entries;
62298 +
62299 +int
62300 +gr_chroot_is_capable(const int cap)
62301 +{
62302 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62303 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62304 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62305 + if (cap_raised(chroot_caps, cap)) {
62306 + const struct cred *creds = current_cred();
62307 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62308 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62309 + }
62310 + return 0;
62311 + }
62312 + }
62313 +#endif
62314 + return 1;
62315 +}
62316 +
62317 +int
62318 +gr_chroot_is_capable_nolog(const int cap)
62319 +{
62320 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62321 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62322 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62323 + if (cap_raised(chroot_caps, cap)) {
62324 + return 0;
62325 + }
62326 + }
62327 +#endif
62328 + return 1;
62329 +}
62330 +
62331 +int
62332 +gr_handle_chroot_sysctl(const int op)
62333 +{
62334 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62335 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62336 + && (op & MAY_WRITE))
62337 + return -EACCES;
62338 +#endif
62339 + return 0;
62340 +}
62341 +
62342 +void
62343 +gr_handle_chroot_chdir(struct path *path)
62344 +{
62345 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62346 + if (grsec_enable_chroot_chdir)
62347 + set_fs_pwd(current->fs, path);
62348 +#endif
62349 + return;
62350 +}
62351 +
62352 +int
62353 +gr_handle_chroot_chmod(const struct dentry *dentry,
62354 + const struct vfsmount *mnt, const int mode)
62355 +{
62356 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62357 + /* allow chmod +s on directories, but not on files */
62358 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62359 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62360 + proc_is_chrooted(current)) {
62361 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62362 + return -EPERM;
62363 + }
62364 +#endif
62365 + return 0;
62366 +}
62367 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62368 new file mode 100644
62369 index 0000000..b81db5b
62370 --- /dev/null
62371 +++ b/grsecurity/grsec_disabled.c
62372 @@ -0,0 +1,439 @@
62373 +#include <linux/kernel.h>
62374 +#include <linux/module.h>
62375 +#include <linux/sched.h>
62376 +#include <linux/file.h>
62377 +#include <linux/fs.h>
62378 +#include <linux/kdev_t.h>
62379 +#include <linux/net.h>
62380 +#include <linux/in.h>
62381 +#include <linux/ip.h>
62382 +#include <linux/skbuff.h>
62383 +#include <linux/sysctl.h>
62384 +
62385 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62386 +void
62387 +pax_set_initial_flags(struct linux_binprm *bprm)
62388 +{
62389 + return;
62390 +}
62391 +#endif
62392 +
62393 +#ifdef CONFIG_SYSCTL
62394 +__u32
62395 +gr_handle_sysctl(const struct ctl_table * table, const int op)
62396 +{
62397 + return 0;
62398 +}
62399 +#endif
62400 +
62401 +#ifdef CONFIG_TASKSTATS
62402 +int gr_is_taskstats_denied(int pid)
62403 +{
62404 + return 0;
62405 +}
62406 +#endif
62407 +
62408 +int
62409 +gr_acl_is_enabled(void)
62410 +{
62411 + return 0;
62412 +}
62413 +
62414 +void
62415 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62416 +{
62417 + return;
62418 +}
62419 +
62420 +int
62421 +gr_handle_rawio(const struct inode *inode)
62422 +{
62423 + return 0;
62424 +}
62425 +
62426 +void
62427 +gr_acl_handle_psacct(struct task_struct *task, const long code)
62428 +{
62429 + return;
62430 +}
62431 +
62432 +int
62433 +gr_handle_ptrace(struct task_struct *task, const long request)
62434 +{
62435 + return 0;
62436 +}
62437 +
62438 +int
62439 +gr_handle_proc_ptrace(struct task_struct *task)
62440 +{
62441 + return 0;
62442 +}
62443 +
62444 +void
62445 +gr_learn_resource(const struct task_struct *task,
62446 + const int res, const unsigned long wanted, const int gt)
62447 +{
62448 + return;
62449 +}
62450 +
62451 +int
62452 +gr_set_acls(const int type)
62453 +{
62454 + return 0;
62455 +}
62456 +
62457 +int
62458 +gr_check_hidden_task(const struct task_struct *tsk)
62459 +{
62460 + return 0;
62461 +}
62462 +
62463 +int
62464 +gr_check_protected_task(const struct task_struct *task)
62465 +{
62466 + return 0;
62467 +}
62468 +
62469 +int
62470 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62471 +{
62472 + return 0;
62473 +}
62474 +
62475 +void
62476 +gr_copy_label(struct task_struct *tsk)
62477 +{
62478 + return;
62479 +}
62480 +
62481 +void
62482 +gr_set_pax_flags(struct task_struct *task)
62483 +{
62484 + return;
62485 +}
62486 +
62487 +int
62488 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62489 + const int unsafe_share)
62490 +{
62491 + return 0;
62492 +}
62493 +
62494 +void
62495 +gr_handle_delete(const ino_t ino, const dev_t dev)
62496 +{
62497 + return;
62498 +}
62499 +
62500 +void
62501 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62502 +{
62503 + return;
62504 +}
62505 +
62506 +void
62507 +gr_handle_crash(struct task_struct *task, const int sig)
62508 +{
62509 + return;
62510 +}
62511 +
62512 +int
62513 +gr_check_crash_exec(const struct file *filp)
62514 +{
62515 + return 0;
62516 +}
62517 +
62518 +int
62519 +gr_check_crash_uid(const uid_t uid)
62520 +{
62521 + return 0;
62522 +}
62523 +
62524 +void
62525 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62526 + struct dentry *old_dentry,
62527 + struct dentry *new_dentry,
62528 + struct vfsmount *mnt, const __u8 replace)
62529 +{
62530 + return;
62531 +}
62532 +
62533 +int
62534 +gr_search_socket(const int family, const int type, const int protocol)
62535 +{
62536 + return 1;
62537 +}
62538 +
62539 +int
62540 +gr_search_connectbind(const int mode, const struct socket *sock,
62541 + const struct sockaddr_in *addr)
62542 +{
62543 + return 0;
62544 +}
62545 +
62546 +void
62547 +gr_handle_alertkill(struct task_struct *task)
62548 +{
62549 + return;
62550 +}
62551 +
62552 +__u32
62553 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
62554 +{
62555 + return 1;
62556 +}
62557 +
62558 +__u32
62559 +gr_acl_handle_hidden_file(const struct dentry * dentry,
62560 + const struct vfsmount * mnt)
62561 +{
62562 + return 1;
62563 +}
62564 +
62565 +__u32
62566 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62567 + int acc_mode)
62568 +{
62569 + return 1;
62570 +}
62571 +
62572 +__u32
62573 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62574 +{
62575 + return 1;
62576 +}
62577 +
62578 +__u32
62579 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
62580 +{
62581 + return 1;
62582 +}
62583 +
62584 +int
62585 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
62586 + unsigned int *vm_flags)
62587 +{
62588 + return 1;
62589 +}
62590 +
62591 +__u32
62592 +gr_acl_handle_truncate(const struct dentry * dentry,
62593 + const struct vfsmount * mnt)
62594 +{
62595 + return 1;
62596 +}
62597 +
62598 +__u32
62599 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
62600 +{
62601 + return 1;
62602 +}
62603 +
62604 +__u32
62605 +gr_acl_handle_access(const struct dentry * dentry,
62606 + const struct vfsmount * mnt, const int fmode)
62607 +{
62608 + return 1;
62609 +}
62610 +
62611 +__u32
62612 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
62613 + mode_t mode)
62614 +{
62615 + return 1;
62616 +}
62617 +
62618 +__u32
62619 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
62620 + mode_t mode)
62621 +{
62622 + return 1;
62623 +}
62624 +
62625 +__u32
62626 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
62627 +{
62628 + return 1;
62629 +}
62630 +
62631 +__u32
62632 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
62633 +{
62634 + return 1;
62635 +}
62636 +
62637 +void
62638 +grsecurity_init(void)
62639 +{
62640 + return;
62641 +}
62642 +
62643 +__u32
62644 +gr_acl_handle_mknod(const struct dentry * new_dentry,
62645 + const struct dentry * parent_dentry,
62646 + const struct vfsmount * parent_mnt,
62647 + const int mode)
62648 +{
62649 + return 1;
62650 +}
62651 +
62652 +__u32
62653 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
62654 + const struct dentry * parent_dentry,
62655 + const struct vfsmount * parent_mnt)
62656 +{
62657 + return 1;
62658 +}
62659 +
62660 +__u32
62661 +gr_acl_handle_symlink(const struct dentry * new_dentry,
62662 + const struct dentry * parent_dentry,
62663 + const struct vfsmount * parent_mnt, const char *from)
62664 +{
62665 + return 1;
62666 +}
62667 +
62668 +__u32
62669 +gr_acl_handle_link(const struct dentry * new_dentry,
62670 + const struct dentry * parent_dentry,
62671 + const struct vfsmount * parent_mnt,
62672 + const struct dentry * old_dentry,
62673 + const struct vfsmount * old_mnt, const char *to)
62674 +{
62675 + return 1;
62676 +}
62677 +
62678 +int
62679 +gr_acl_handle_rename(const struct dentry *new_dentry,
62680 + const struct dentry *parent_dentry,
62681 + const struct vfsmount *parent_mnt,
62682 + const struct dentry *old_dentry,
62683 + const struct inode *old_parent_inode,
62684 + const struct vfsmount *old_mnt, const char *newname)
62685 +{
62686 + return 0;
62687 +}
62688 +
62689 +int
62690 +gr_acl_handle_filldir(const struct file *file, const char *name,
62691 + const int namelen, const ino_t ino)
62692 +{
62693 + return 1;
62694 +}
62695 +
62696 +int
62697 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62698 + const time_t shm_createtime, const uid_t cuid, const int shmid)
62699 +{
62700 + return 1;
62701 +}
62702 +
62703 +int
62704 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
62705 +{
62706 + return 0;
62707 +}
62708 +
62709 +int
62710 +gr_search_accept(const struct socket *sock)
62711 +{
62712 + return 0;
62713 +}
62714 +
62715 +int
62716 +gr_search_listen(const struct socket *sock)
62717 +{
62718 + return 0;
62719 +}
62720 +
62721 +int
62722 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
62723 +{
62724 + return 0;
62725 +}
62726 +
62727 +__u32
62728 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
62729 +{
62730 + return 1;
62731 +}
62732 +
62733 +__u32
62734 +gr_acl_handle_creat(const struct dentry * dentry,
62735 + const struct dentry * p_dentry,
62736 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62737 + const int imode)
62738 +{
62739 + return 1;
62740 +}
62741 +
62742 +void
62743 +gr_acl_handle_exit(void)
62744 +{
62745 + return;
62746 +}
62747 +
62748 +int
62749 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62750 +{
62751 + return 1;
62752 +}
62753 +
62754 +void
62755 +gr_set_role_label(const uid_t uid, const gid_t gid)
62756 +{
62757 + return;
62758 +}
62759 +
62760 +int
62761 +gr_acl_handle_procpidmem(const struct task_struct *task)
62762 +{
62763 + return 0;
62764 +}
62765 +
62766 +int
62767 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
62768 +{
62769 + return 0;
62770 +}
62771 +
62772 +int
62773 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
62774 +{
62775 + return 0;
62776 +}
62777 +
62778 +void
62779 +gr_set_kernel_label(struct task_struct *task)
62780 +{
62781 + return;
62782 +}
62783 +
62784 +int
62785 +gr_check_user_change(int real, int effective, int fs)
62786 +{
62787 + return 0;
62788 +}
62789 +
62790 +int
62791 +gr_check_group_change(int real, int effective, int fs)
62792 +{
62793 + return 0;
62794 +}
62795 +
62796 +int gr_acl_enable_at_secure(void)
62797 +{
62798 + return 0;
62799 +}
62800 +
62801 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
62802 +{
62803 + return dentry->d_inode->i_sb->s_dev;
62804 +}
62805 +
62806 +EXPORT_SYMBOL(gr_learn_resource);
62807 +EXPORT_SYMBOL(gr_set_kernel_label);
62808 +#ifdef CONFIG_SECURITY
62809 +EXPORT_SYMBOL(gr_check_user_change);
62810 +EXPORT_SYMBOL(gr_check_group_change);
62811 +#endif
62812 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
62813 new file mode 100644
62814 index 0000000..a96e155
62815 --- /dev/null
62816 +++ b/grsecurity/grsec_exec.c
62817 @@ -0,0 +1,204 @@
62818 +#include <linux/kernel.h>
62819 +#include <linux/sched.h>
62820 +#include <linux/file.h>
62821 +#include <linux/binfmts.h>
62822 +#include <linux/smp_lock.h>
62823 +#include <linux/fs.h>
62824 +#include <linux/types.h>
62825 +#include <linux/grdefs.h>
62826 +#include <linux/grinternal.h>
62827 +#include <linux/capability.h>
62828 +#include <linux/compat.h>
62829 +#include <linux/module.h>
62830 +
62831 +#include <asm/uaccess.h>
62832 +
62833 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62834 +static char gr_exec_arg_buf[132];
62835 +static DEFINE_MUTEX(gr_exec_arg_mutex);
62836 +#endif
62837 +
62838 +void
62839 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
62840 +{
62841 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62842 + char *grarg = gr_exec_arg_buf;
62843 + unsigned int i, x, execlen = 0;
62844 + char c;
62845 +
62846 + if (!((grsec_enable_execlog && grsec_enable_group &&
62847 + in_group_p(grsec_audit_gid))
62848 + || (grsec_enable_execlog && !grsec_enable_group)))
62849 + return;
62850 +
62851 + mutex_lock(&gr_exec_arg_mutex);
62852 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
62853 +
62854 + if (unlikely(argv == NULL))
62855 + goto log;
62856 +
62857 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
62858 + const char __user *p;
62859 + unsigned int len;
62860 +
62861 + if (copy_from_user(&p, argv + i, sizeof(p)))
62862 + goto log;
62863 + if (!p)
62864 + goto log;
62865 + len = strnlen_user(p, 128 - execlen);
62866 + if (len > 128 - execlen)
62867 + len = 128 - execlen;
62868 + else if (len > 0)
62869 + len--;
62870 + if (copy_from_user(grarg + execlen, p, len))
62871 + goto log;
62872 +
62873 + /* rewrite unprintable characters */
62874 + for (x = 0; x < len; x++) {
62875 + c = *(grarg + execlen + x);
62876 + if (c < 32 || c > 126)
62877 + *(grarg + execlen + x) = ' ';
62878 + }
62879 +
62880 + execlen += len;
62881 + *(grarg + execlen) = ' ';
62882 + *(grarg + execlen + 1) = '\0';
62883 + execlen++;
62884 + }
62885 +
62886 + log:
62887 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62888 + bprm->file->f_path.mnt, grarg);
62889 + mutex_unlock(&gr_exec_arg_mutex);
62890 +#endif
62891 + return;
62892 +}
62893 +
62894 +#ifdef CONFIG_COMPAT
62895 +void
62896 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
62897 +{
62898 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62899 + char *grarg = gr_exec_arg_buf;
62900 + unsigned int i, x, execlen = 0;
62901 + char c;
62902 +
62903 + if (!((grsec_enable_execlog && grsec_enable_group &&
62904 + in_group_p(grsec_audit_gid))
62905 + || (grsec_enable_execlog && !grsec_enable_group)))
62906 + return;
62907 +
62908 + mutex_lock(&gr_exec_arg_mutex);
62909 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
62910 +
62911 + if (unlikely(argv == NULL))
62912 + goto log;
62913 +
62914 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
62915 + compat_uptr_t p;
62916 + unsigned int len;
62917 +
62918 + if (get_user(p, argv + i))
62919 + goto log;
62920 + len = strnlen_user(compat_ptr(p), 128 - execlen);
62921 + if (len > 128 - execlen)
62922 + len = 128 - execlen;
62923 + else if (len > 0)
62924 + len--;
62925 + else
62926 + goto log;
62927 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
62928 + goto log;
62929 +
62930 + /* rewrite unprintable characters */
62931 + for (x = 0; x < len; x++) {
62932 + c = *(grarg + execlen + x);
62933 + if (c < 32 || c > 126)
62934 + *(grarg + execlen + x) = ' ';
62935 + }
62936 +
62937 + execlen += len;
62938 + *(grarg + execlen) = ' ';
62939 + *(grarg + execlen + 1) = '\0';
62940 + execlen++;
62941 + }
62942 +
62943 + log:
62944 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62945 + bprm->file->f_path.mnt, grarg);
62946 + mutex_unlock(&gr_exec_arg_mutex);
62947 +#endif
62948 + return;
62949 +}
62950 +#endif
62951 +
62952 +#ifdef CONFIG_GRKERNSEC
62953 +extern int gr_acl_is_capable(const int cap);
62954 +extern int gr_acl_is_capable_nolog(const int cap);
62955 +extern int gr_chroot_is_capable(const int cap);
62956 +extern int gr_chroot_is_capable_nolog(const int cap);
62957 +#endif
62958 +
62959 +const char *captab_log[] = {
62960 + "CAP_CHOWN",
62961 + "CAP_DAC_OVERRIDE",
62962 + "CAP_DAC_READ_SEARCH",
62963 + "CAP_FOWNER",
62964 + "CAP_FSETID",
62965 + "CAP_KILL",
62966 + "CAP_SETGID",
62967 + "CAP_SETUID",
62968 + "CAP_SETPCAP",
62969 + "CAP_LINUX_IMMUTABLE",
62970 + "CAP_NET_BIND_SERVICE",
62971 + "CAP_NET_BROADCAST",
62972 + "CAP_NET_ADMIN",
62973 + "CAP_NET_RAW",
62974 + "CAP_IPC_LOCK",
62975 + "CAP_IPC_OWNER",
62976 + "CAP_SYS_MODULE",
62977 + "CAP_SYS_RAWIO",
62978 + "CAP_SYS_CHROOT",
62979 + "CAP_SYS_PTRACE",
62980 + "CAP_SYS_PACCT",
62981 + "CAP_SYS_ADMIN",
62982 + "CAP_SYS_BOOT",
62983 + "CAP_SYS_NICE",
62984 + "CAP_SYS_RESOURCE",
62985 + "CAP_SYS_TIME",
62986 + "CAP_SYS_TTY_CONFIG",
62987 + "CAP_MKNOD",
62988 + "CAP_LEASE",
62989 + "CAP_AUDIT_WRITE",
62990 + "CAP_AUDIT_CONTROL",
62991 + "CAP_SETFCAP",
62992 + "CAP_MAC_OVERRIDE",
62993 + "CAP_MAC_ADMIN"
62994 +};
62995 +
62996 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
62997 +
62998 +int gr_is_capable(const int cap)
62999 +{
63000 +#ifdef CONFIG_GRKERNSEC
63001 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
63002 + return 1;
63003 + return 0;
63004 +#else
63005 + return 1;
63006 +#endif
63007 +}
63008 +
63009 +int gr_is_capable_nolog(const int cap)
63010 +{
63011 +#ifdef CONFIG_GRKERNSEC
63012 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
63013 + return 1;
63014 + return 0;
63015 +#else
63016 + return 1;
63017 +#endif
63018 +}
63019 +
63020 +EXPORT_SYMBOL(gr_is_capable);
63021 +EXPORT_SYMBOL(gr_is_capable_nolog);
63022 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
63023 new file mode 100644
63024 index 0000000..d3ee748
63025 --- /dev/null
63026 +++ b/grsecurity/grsec_fifo.c
63027 @@ -0,0 +1,24 @@
63028 +#include <linux/kernel.h>
63029 +#include <linux/sched.h>
63030 +#include <linux/fs.h>
63031 +#include <linux/file.h>
63032 +#include <linux/grinternal.h>
63033 +
63034 +int
63035 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
63036 + const struct dentry *dir, const int flag, const int acc_mode)
63037 +{
63038 +#ifdef CONFIG_GRKERNSEC_FIFO
63039 + const struct cred *cred = current_cred();
63040 +
63041 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
63042 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
63043 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
63044 + (cred->fsuid != dentry->d_inode->i_uid)) {
63045 + if (!inode_permission(dentry->d_inode, acc_mode))
63046 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
63047 + return -EACCES;
63048 + }
63049 +#endif
63050 + return 0;
63051 +}
63052 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
63053 new file mode 100644
63054 index 0000000..8ca18bf
63055 --- /dev/null
63056 +++ b/grsecurity/grsec_fork.c
63057 @@ -0,0 +1,23 @@
63058 +#include <linux/kernel.h>
63059 +#include <linux/sched.h>
63060 +#include <linux/grsecurity.h>
63061 +#include <linux/grinternal.h>
63062 +#include <linux/errno.h>
63063 +
63064 +void
63065 +gr_log_forkfail(const int retval)
63066 +{
63067 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63068 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
63069 + switch (retval) {
63070 + case -EAGAIN:
63071 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
63072 + break;
63073 + case -ENOMEM:
63074 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
63075 + break;
63076 + }
63077 + }
63078 +#endif
63079 + return;
63080 +}
63081 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
63082 new file mode 100644
63083 index 0000000..1e995d3
63084 --- /dev/null
63085 +++ b/grsecurity/grsec_init.c
63086 @@ -0,0 +1,278 @@
63087 +#include <linux/kernel.h>
63088 +#include <linux/sched.h>
63089 +#include <linux/mm.h>
63090 +#include <linux/smp_lock.h>
63091 +#include <linux/gracl.h>
63092 +#include <linux/slab.h>
63093 +#include <linux/vmalloc.h>
63094 +#include <linux/percpu.h>
63095 +#include <linux/module.h>
63096 +
63097 +int grsec_enable_ptrace_readexec;
63098 +int grsec_enable_setxid;
63099 +int grsec_enable_brute;
63100 +int grsec_enable_link;
63101 +int grsec_enable_dmesg;
63102 +int grsec_enable_harden_ptrace;
63103 +int grsec_enable_fifo;
63104 +int grsec_enable_execlog;
63105 +int grsec_enable_signal;
63106 +int grsec_enable_forkfail;
63107 +int grsec_enable_audit_ptrace;
63108 +int grsec_enable_time;
63109 +int grsec_enable_audit_textrel;
63110 +int grsec_enable_group;
63111 +int grsec_audit_gid;
63112 +int grsec_enable_chdir;
63113 +int grsec_enable_mount;
63114 +int grsec_enable_rofs;
63115 +int grsec_enable_chroot_findtask;
63116 +int grsec_enable_chroot_mount;
63117 +int grsec_enable_chroot_shmat;
63118 +int grsec_enable_chroot_fchdir;
63119 +int grsec_enable_chroot_double;
63120 +int grsec_enable_chroot_pivot;
63121 +int grsec_enable_chroot_chdir;
63122 +int grsec_enable_chroot_chmod;
63123 +int grsec_enable_chroot_mknod;
63124 +int grsec_enable_chroot_nice;
63125 +int grsec_enable_chroot_execlog;
63126 +int grsec_enable_chroot_caps;
63127 +int grsec_enable_chroot_sysctl;
63128 +int grsec_enable_chroot_unix;
63129 +int grsec_enable_tpe;
63130 +int grsec_tpe_gid;
63131 +int grsec_enable_blackhole;
63132 +#ifdef CONFIG_IPV6_MODULE
63133 +EXPORT_SYMBOL(grsec_enable_blackhole);
63134 +#endif
63135 +int grsec_lastack_retries;
63136 +int grsec_enable_tpe_all;
63137 +int grsec_enable_tpe_invert;
63138 +int grsec_enable_socket_all;
63139 +int grsec_socket_all_gid;
63140 +int grsec_enable_socket_client;
63141 +int grsec_socket_client_gid;
63142 +int grsec_enable_socket_server;
63143 +int grsec_socket_server_gid;
63144 +int grsec_resource_logging;
63145 +int grsec_disable_privio;
63146 +int grsec_enable_log_rwxmaps;
63147 +int grsec_lock;
63148 +
63149 +DEFINE_SPINLOCK(grsec_alert_lock);
63150 +unsigned long grsec_alert_wtime = 0;
63151 +unsigned long grsec_alert_fyet = 0;
63152 +
63153 +DEFINE_SPINLOCK(grsec_audit_lock);
63154 +
63155 +DEFINE_RWLOCK(grsec_exec_file_lock);
63156 +
63157 +char *gr_shared_page[4];
63158 +
63159 +char *gr_alert_log_fmt;
63160 +char *gr_audit_log_fmt;
63161 +char *gr_alert_log_buf;
63162 +char *gr_audit_log_buf;
63163 +
63164 +extern struct gr_arg *gr_usermode;
63165 +extern unsigned char *gr_system_salt;
63166 +extern unsigned char *gr_system_sum;
63167 +
63168 +void __init
63169 +grsecurity_init(void)
63170 +{
63171 + int j;
63172 + /* create the per-cpu shared pages */
63173 +
63174 +#ifdef CONFIG_X86
63175 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63176 +#endif
63177 +
63178 + for (j = 0; j < 4; j++) {
63179 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63180 + if (gr_shared_page[j] == NULL) {
63181 + panic("Unable to allocate grsecurity shared page");
63182 + return;
63183 + }
63184 + }
63185 +
63186 + /* allocate log buffers */
63187 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63188 + if (!gr_alert_log_fmt) {
63189 + panic("Unable to allocate grsecurity alert log format buffer");
63190 + return;
63191 + }
63192 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63193 + if (!gr_audit_log_fmt) {
63194 + panic("Unable to allocate grsecurity audit log format buffer");
63195 + return;
63196 + }
63197 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63198 + if (!gr_alert_log_buf) {
63199 + panic("Unable to allocate grsecurity alert log buffer");
63200 + return;
63201 + }
63202 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63203 + if (!gr_audit_log_buf) {
63204 + panic("Unable to allocate grsecurity audit log buffer");
63205 + return;
63206 + }
63207 +
63208 + /* allocate memory for authentication structure */
63209 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63210 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63211 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63212 +
63213 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63214 + panic("Unable to allocate grsecurity authentication structure");
63215 + return;
63216 + }
63217 +
63218 +
63219 +#ifdef CONFIG_GRKERNSEC_IO
63220 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63221 + grsec_disable_privio = 1;
63222 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63223 + grsec_disable_privio = 1;
63224 +#else
63225 + grsec_disable_privio = 0;
63226 +#endif
63227 +#endif
63228 +
63229 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63230 + /* for backward compatibility, tpe_invert always defaults to on if
63231 + enabled in the kernel
63232 + */
63233 + grsec_enable_tpe_invert = 1;
63234 +#endif
63235 +
63236 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63237 +#ifndef CONFIG_GRKERNSEC_SYSCTL
63238 + grsec_lock = 1;
63239 +#endif
63240 +
63241 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63242 + grsec_enable_audit_textrel = 1;
63243 +#endif
63244 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63245 + grsec_enable_log_rwxmaps = 1;
63246 +#endif
63247 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63248 + grsec_enable_group = 1;
63249 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
63250 +#endif
63251 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63252 + grsec_enable_chdir = 1;
63253 +#endif
63254 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63255 + grsec_enable_harden_ptrace = 1;
63256 +#endif
63257 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63258 + grsec_enable_mount = 1;
63259 +#endif
63260 +#ifdef CONFIG_GRKERNSEC_LINK
63261 + grsec_enable_link = 1;
63262 +#endif
63263 +#ifdef CONFIG_GRKERNSEC_BRUTE
63264 + grsec_enable_brute = 1;
63265 +#endif
63266 +#ifdef CONFIG_GRKERNSEC_DMESG
63267 + grsec_enable_dmesg = 1;
63268 +#endif
63269 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63270 + grsec_enable_blackhole = 1;
63271 + grsec_lastack_retries = 4;
63272 +#endif
63273 +#ifdef CONFIG_GRKERNSEC_FIFO
63274 + grsec_enable_fifo = 1;
63275 +#endif
63276 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63277 + grsec_enable_execlog = 1;
63278 +#endif
63279 +#ifdef CONFIG_GRKERNSEC_SETXID
63280 + grsec_enable_setxid = 1;
63281 +#endif
63282 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63283 + grsec_enable_ptrace_readexec = 1;
63284 +#endif
63285 +#ifdef CONFIG_GRKERNSEC_SIGNAL
63286 + grsec_enable_signal = 1;
63287 +#endif
63288 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63289 + grsec_enable_forkfail = 1;
63290 +#endif
63291 +#ifdef CONFIG_GRKERNSEC_TIME
63292 + grsec_enable_time = 1;
63293 +#endif
63294 +#ifdef CONFIG_GRKERNSEC_RESLOG
63295 + grsec_resource_logging = 1;
63296 +#endif
63297 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63298 + grsec_enable_chroot_findtask = 1;
63299 +#endif
63300 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63301 + grsec_enable_chroot_unix = 1;
63302 +#endif
63303 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63304 + grsec_enable_chroot_mount = 1;
63305 +#endif
63306 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63307 + grsec_enable_chroot_fchdir = 1;
63308 +#endif
63309 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63310 + grsec_enable_chroot_shmat = 1;
63311 +#endif
63312 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63313 + grsec_enable_audit_ptrace = 1;
63314 +#endif
63315 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63316 + grsec_enable_chroot_double = 1;
63317 +#endif
63318 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63319 + grsec_enable_chroot_pivot = 1;
63320 +#endif
63321 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63322 + grsec_enable_chroot_chdir = 1;
63323 +#endif
63324 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63325 + grsec_enable_chroot_chmod = 1;
63326 +#endif
63327 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63328 + grsec_enable_chroot_mknod = 1;
63329 +#endif
63330 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63331 + grsec_enable_chroot_nice = 1;
63332 +#endif
63333 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63334 + grsec_enable_chroot_execlog = 1;
63335 +#endif
63336 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63337 + grsec_enable_chroot_caps = 1;
63338 +#endif
63339 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63340 + grsec_enable_chroot_sysctl = 1;
63341 +#endif
63342 +#ifdef CONFIG_GRKERNSEC_TPE
63343 + grsec_enable_tpe = 1;
63344 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63345 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
63346 + grsec_enable_tpe_all = 1;
63347 +#endif
63348 +#endif
63349 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63350 + grsec_enable_socket_all = 1;
63351 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63352 +#endif
63353 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63354 + grsec_enable_socket_client = 1;
63355 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63356 +#endif
63357 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63358 + grsec_enable_socket_server = 1;
63359 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63360 +#endif
63361 +#endif
63362 +
63363 + return;
63364 +}
63365 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63366 new file mode 100644
63367 index 0000000..3efe141
63368 --- /dev/null
63369 +++ b/grsecurity/grsec_link.c
63370 @@ -0,0 +1,43 @@
63371 +#include <linux/kernel.h>
63372 +#include <linux/sched.h>
63373 +#include <linux/fs.h>
63374 +#include <linux/file.h>
63375 +#include <linux/grinternal.h>
63376 +
63377 +int
63378 +gr_handle_follow_link(const struct inode *parent,
63379 + const struct inode *inode,
63380 + const struct dentry *dentry, const struct vfsmount *mnt)
63381 +{
63382 +#ifdef CONFIG_GRKERNSEC_LINK
63383 + const struct cred *cred = current_cred();
63384 +
63385 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63386 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63387 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63388 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63389 + return -EACCES;
63390 + }
63391 +#endif
63392 + return 0;
63393 +}
63394 +
63395 +int
63396 +gr_handle_hardlink(const struct dentry *dentry,
63397 + const struct vfsmount *mnt,
63398 + struct inode *inode, const int mode, const char *to)
63399 +{
63400 +#ifdef CONFIG_GRKERNSEC_LINK
63401 + const struct cred *cred = current_cred();
63402 +
63403 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63404 + (!S_ISREG(mode) || (mode & S_ISUID) ||
63405 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63406 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63407 + !capable(CAP_FOWNER) && cred->uid) {
63408 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63409 + return -EPERM;
63410 + }
63411 +#endif
63412 + return 0;
63413 +}
63414 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63415 new file mode 100644
63416 index 0000000..a45d2e9
63417 --- /dev/null
63418 +++ b/grsecurity/grsec_log.c
63419 @@ -0,0 +1,322 @@
63420 +#include <linux/kernel.h>
63421 +#include <linux/sched.h>
63422 +#include <linux/file.h>
63423 +#include <linux/tty.h>
63424 +#include <linux/fs.h>
63425 +#include <linux/grinternal.h>
63426 +
63427 +#ifdef CONFIG_TREE_PREEMPT_RCU
63428 +#define DISABLE_PREEMPT() preempt_disable()
63429 +#define ENABLE_PREEMPT() preempt_enable()
63430 +#else
63431 +#define DISABLE_PREEMPT()
63432 +#define ENABLE_PREEMPT()
63433 +#endif
63434 +
63435 +#define BEGIN_LOCKS(x) \
63436 + DISABLE_PREEMPT(); \
63437 + rcu_read_lock(); \
63438 + read_lock(&tasklist_lock); \
63439 + read_lock(&grsec_exec_file_lock); \
63440 + if (x != GR_DO_AUDIT) \
63441 + spin_lock(&grsec_alert_lock); \
63442 + else \
63443 + spin_lock(&grsec_audit_lock)
63444 +
63445 +#define END_LOCKS(x) \
63446 + if (x != GR_DO_AUDIT) \
63447 + spin_unlock(&grsec_alert_lock); \
63448 + else \
63449 + spin_unlock(&grsec_audit_lock); \
63450 + read_unlock(&grsec_exec_file_lock); \
63451 + read_unlock(&tasklist_lock); \
63452 + rcu_read_unlock(); \
63453 + ENABLE_PREEMPT(); \
63454 + if (x == GR_DONT_AUDIT) \
63455 + gr_handle_alertkill(current)
63456 +
63457 +enum {
63458 + FLOODING,
63459 + NO_FLOODING
63460 +};
63461 +
63462 +extern char *gr_alert_log_fmt;
63463 +extern char *gr_audit_log_fmt;
63464 +extern char *gr_alert_log_buf;
63465 +extern char *gr_audit_log_buf;
63466 +
63467 +static int gr_log_start(int audit)
63468 +{
63469 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63470 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63471 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63472 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63473 + unsigned long curr_secs = get_seconds();
63474 +
63475 + if (audit == GR_DO_AUDIT)
63476 + goto set_fmt;
63477 +
63478 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63479 + grsec_alert_wtime = curr_secs;
63480 + grsec_alert_fyet = 0;
63481 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63482 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63483 + grsec_alert_fyet++;
63484 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63485 + grsec_alert_wtime = curr_secs;
63486 + grsec_alert_fyet++;
63487 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63488 + return FLOODING;
63489 + }
63490 + else return FLOODING;
63491 +
63492 +set_fmt:
63493 +#endif
63494 + memset(buf, 0, PAGE_SIZE);
63495 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
63496 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63497 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63498 + } else if (current->signal->curr_ip) {
63499 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63500 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63501 + } else if (gr_acl_is_enabled()) {
63502 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63503 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63504 + } else {
63505 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
63506 + strcpy(buf, fmt);
63507 + }
63508 +
63509 + return NO_FLOODING;
63510 +}
63511 +
63512 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63513 + __attribute__ ((format (printf, 2, 0)));
63514 +
63515 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63516 +{
63517 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63518 + unsigned int len = strlen(buf);
63519 +
63520 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63521 +
63522 + return;
63523 +}
63524 +
63525 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63526 + __attribute__ ((format (printf, 2, 3)));
63527 +
63528 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63529 +{
63530 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63531 + unsigned int len = strlen(buf);
63532 + va_list ap;
63533 +
63534 + va_start(ap, msg);
63535 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63536 + va_end(ap);
63537 +
63538 + return;
63539 +}
63540 +
63541 +static void gr_log_end(int audit, int append_default)
63542 +{
63543 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63544 +
63545 + if (append_default) {
63546 + unsigned int len = strlen(buf);
63547 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
63548 + }
63549 +
63550 + printk("%s\n", buf);
63551 +
63552 + return;
63553 +}
63554 +
63555 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
63556 +{
63557 + int logtype;
63558 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
63559 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
63560 + void *voidptr = NULL;
63561 + int num1 = 0, num2 = 0;
63562 + unsigned long ulong1 = 0, ulong2 = 0;
63563 + struct dentry *dentry = NULL;
63564 + struct vfsmount *mnt = NULL;
63565 + struct file *file = NULL;
63566 + struct task_struct *task = NULL;
63567 + const struct cred *cred, *pcred;
63568 + va_list ap;
63569 +
63570 + BEGIN_LOCKS(audit);
63571 + logtype = gr_log_start(audit);
63572 + if (logtype == FLOODING) {
63573 + END_LOCKS(audit);
63574 + return;
63575 + }
63576 + va_start(ap, argtypes);
63577 + switch (argtypes) {
63578 + case GR_TTYSNIFF:
63579 + task = va_arg(ap, struct task_struct *);
63580 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
63581 + break;
63582 + case GR_SYSCTL_HIDDEN:
63583 + str1 = va_arg(ap, char *);
63584 + gr_log_middle_varargs(audit, msg, result, str1);
63585 + break;
63586 + case GR_RBAC:
63587 + dentry = va_arg(ap, struct dentry *);
63588 + mnt = va_arg(ap, struct vfsmount *);
63589 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
63590 + break;
63591 + case GR_RBAC_STR:
63592 + dentry = va_arg(ap, struct dentry *);
63593 + mnt = va_arg(ap, struct vfsmount *);
63594 + str1 = va_arg(ap, char *);
63595 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
63596 + break;
63597 + case GR_STR_RBAC:
63598 + str1 = va_arg(ap, char *);
63599 + dentry = va_arg(ap, struct dentry *);
63600 + mnt = va_arg(ap, struct vfsmount *);
63601 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
63602 + break;
63603 + case GR_RBAC_MODE2:
63604 + dentry = va_arg(ap, struct dentry *);
63605 + mnt = va_arg(ap, struct vfsmount *);
63606 + str1 = va_arg(ap, char *);
63607 + str2 = va_arg(ap, char *);
63608 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
63609 + break;
63610 + case GR_RBAC_MODE3:
63611 + dentry = va_arg(ap, struct dentry *);
63612 + mnt = va_arg(ap, struct vfsmount *);
63613 + str1 = va_arg(ap, char *);
63614 + str2 = va_arg(ap, char *);
63615 + str3 = va_arg(ap, char *);
63616 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
63617 + break;
63618 + case GR_FILENAME:
63619 + dentry = va_arg(ap, struct dentry *);
63620 + mnt = va_arg(ap, struct vfsmount *);
63621 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
63622 + break;
63623 + case GR_STR_FILENAME:
63624 + str1 = va_arg(ap, char *);
63625 + dentry = va_arg(ap, struct dentry *);
63626 + mnt = va_arg(ap, struct vfsmount *);
63627 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
63628 + break;
63629 + case GR_FILENAME_STR:
63630 + dentry = va_arg(ap, struct dentry *);
63631 + mnt = va_arg(ap, struct vfsmount *);
63632 + str1 = va_arg(ap, char *);
63633 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
63634 + break;
63635 + case GR_FILENAME_TWO_INT:
63636 + dentry = va_arg(ap, struct dentry *);
63637 + mnt = va_arg(ap, struct vfsmount *);
63638 + num1 = va_arg(ap, int);
63639 + num2 = va_arg(ap, int);
63640 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
63641 + break;
63642 + case GR_FILENAME_TWO_INT_STR:
63643 + dentry = va_arg(ap, struct dentry *);
63644 + mnt = va_arg(ap, struct vfsmount *);
63645 + num1 = va_arg(ap, int);
63646 + num2 = va_arg(ap, int);
63647 + str1 = va_arg(ap, char *);
63648 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
63649 + break;
63650 + case GR_TEXTREL:
63651 + file = va_arg(ap, struct file *);
63652 + ulong1 = va_arg(ap, unsigned long);
63653 + ulong2 = va_arg(ap, unsigned long);
63654 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
63655 + break;
63656 + case GR_PTRACE:
63657 + task = va_arg(ap, struct task_struct *);
63658 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
63659 + break;
63660 + case GR_RESOURCE:
63661 + task = va_arg(ap, struct task_struct *);
63662 + cred = __task_cred(task);
63663 + pcred = __task_cred(task->real_parent);
63664 + ulong1 = va_arg(ap, unsigned long);
63665 + str1 = va_arg(ap, char *);
63666 + ulong2 = va_arg(ap, unsigned long);
63667 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63668 + break;
63669 + case GR_CAP:
63670 + task = va_arg(ap, struct task_struct *);
63671 + cred = __task_cred(task);
63672 + pcred = __task_cred(task->real_parent);
63673 + str1 = va_arg(ap, char *);
63674 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63675 + break;
63676 + case GR_SIG:
63677 + str1 = va_arg(ap, char *);
63678 + voidptr = va_arg(ap, void *);
63679 + gr_log_middle_varargs(audit, msg, str1, voidptr);
63680 + break;
63681 + case GR_SIG2:
63682 + task = va_arg(ap, struct task_struct *);
63683 + cred = __task_cred(task);
63684 + pcred = __task_cred(task->real_parent);
63685 + num1 = va_arg(ap, int);
63686 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63687 + break;
63688 + case GR_CRASH1:
63689 + task = va_arg(ap, struct task_struct *);
63690 + cred = __task_cred(task);
63691 + pcred = __task_cred(task->real_parent);
63692 + ulong1 = va_arg(ap, unsigned long);
63693 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
63694 + break;
63695 + case GR_CRASH2:
63696 + task = va_arg(ap, struct task_struct *);
63697 + cred = __task_cred(task);
63698 + pcred = __task_cred(task->real_parent);
63699 + ulong1 = va_arg(ap, unsigned long);
63700 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
63701 + break;
63702 + case GR_RWXMAP:
63703 + file = va_arg(ap, struct file *);
63704 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
63705 + break;
63706 + case GR_PSACCT:
63707 + {
63708 + unsigned int wday, cday;
63709 + __u8 whr, chr;
63710 + __u8 wmin, cmin;
63711 + __u8 wsec, csec;
63712 + char cur_tty[64] = { 0 };
63713 + char parent_tty[64] = { 0 };
63714 +
63715 + task = va_arg(ap, struct task_struct *);
63716 + wday = va_arg(ap, unsigned int);
63717 + cday = va_arg(ap, unsigned int);
63718 + whr = va_arg(ap, int);
63719 + chr = va_arg(ap, int);
63720 + wmin = va_arg(ap, int);
63721 + cmin = va_arg(ap, int);
63722 + wsec = va_arg(ap, int);
63723 + csec = va_arg(ap, int);
63724 + ulong1 = va_arg(ap, unsigned long);
63725 + cred = __task_cred(task);
63726 + pcred = __task_cred(task->real_parent);
63727 +
63728 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63729 + }
63730 + break;
63731 + default:
63732 + gr_log_middle(audit, msg, ap);
63733 + }
63734 + va_end(ap);
63735 + // these don't need DEFAULTSECARGS printed on the end
63736 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
63737 + gr_log_end(audit, 0);
63738 + else
63739 + gr_log_end(audit, 1);
63740 + END_LOCKS(audit);
63741 +}
63742 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
63743 new file mode 100644
63744 index 0000000..6c0416b
63745 --- /dev/null
63746 +++ b/grsecurity/grsec_mem.c
63747 @@ -0,0 +1,33 @@
63748 +#include <linux/kernel.h>
63749 +#include <linux/sched.h>
63750 +#include <linux/mm.h>
63751 +#include <linux/mman.h>
63752 +#include <linux/grinternal.h>
63753 +
63754 +void
63755 +gr_handle_ioperm(void)
63756 +{
63757 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
63758 + return;
63759 +}
63760 +
63761 +void
63762 +gr_handle_iopl(void)
63763 +{
63764 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
63765 + return;
63766 +}
63767 +
63768 +void
63769 +gr_handle_mem_readwrite(u64 from, u64 to)
63770 +{
63771 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
63772 + return;
63773 +}
63774 +
63775 +void
63776 +gr_handle_vm86(void)
63777 +{
63778 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
63779 + return;
63780 +}
63781 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
63782 new file mode 100644
63783 index 0000000..2131422
63784 --- /dev/null
63785 +++ b/grsecurity/grsec_mount.c
63786 @@ -0,0 +1,62 @@
63787 +#include <linux/kernel.h>
63788 +#include <linux/sched.h>
63789 +#include <linux/mount.h>
63790 +#include <linux/grsecurity.h>
63791 +#include <linux/grinternal.h>
63792 +
63793 +void
63794 +gr_log_remount(const char *devname, const int retval)
63795 +{
63796 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63797 + if (grsec_enable_mount && (retval >= 0))
63798 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
63799 +#endif
63800 + return;
63801 +}
63802 +
63803 +void
63804 +gr_log_unmount(const char *devname, const int retval)
63805 +{
63806 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63807 + if (grsec_enable_mount && (retval >= 0))
63808 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
63809 +#endif
63810 + return;
63811 +}
63812 +
63813 +void
63814 +gr_log_mount(const char *from, const char *to, const int retval)
63815 +{
63816 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63817 + if (grsec_enable_mount && (retval >= 0))
63818 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
63819 +#endif
63820 + return;
63821 +}
63822 +
63823 +int
63824 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
63825 +{
63826 +#ifdef CONFIG_GRKERNSEC_ROFS
63827 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
63828 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
63829 + return -EPERM;
63830 + } else
63831 + return 0;
63832 +#endif
63833 + return 0;
63834 +}
63835 +
63836 +int
63837 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
63838 +{
63839 +#ifdef CONFIG_GRKERNSEC_ROFS
63840 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
63841 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
63842 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
63843 + return -EPERM;
63844 + } else
63845 + return 0;
63846 +#endif
63847 + return 0;
63848 +}
63849 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
63850 new file mode 100644
63851 index 0000000..a3b12a0
63852 --- /dev/null
63853 +++ b/grsecurity/grsec_pax.c
63854 @@ -0,0 +1,36 @@
63855 +#include <linux/kernel.h>
63856 +#include <linux/sched.h>
63857 +#include <linux/mm.h>
63858 +#include <linux/file.h>
63859 +#include <linux/grinternal.h>
63860 +#include <linux/grsecurity.h>
63861 +
63862 +void
63863 +gr_log_textrel(struct vm_area_struct * vma)
63864 +{
63865 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63866 + if (grsec_enable_audit_textrel)
63867 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
63868 +#endif
63869 + return;
63870 +}
63871 +
63872 +void
63873 +gr_log_rwxmmap(struct file *file)
63874 +{
63875 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63876 + if (grsec_enable_log_rwxmaps)
63877 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
63878 +#endif
63879 + return;
63880 +}
63881 +
63882 +void
63883 +gr_log_rwxmprotect(struct file *file)
63884 +{
63885 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63886 + if (grsec_enable_log_rwxmaps)
63887 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
63888 +#endif
63889 + return;
63890 +}
63891 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
63892 new file mode 100644
63893 index 0000000..78f8733
63894 --- /dev/null
63895 +++ b/grsecurity/grsec_ptrace.c
63896 @@ -0,0 +1,30 @@
63897 +#include <linux/kernel.h>
63898 +#include <linux/sched.h>
63899 +#include <linux/grinternal.h>
63900 +#include <linux/security.h>
63901 +
63902 +void
63903 +gr_audit_ptrace(struct task_struct *task)
63904 +{
63905 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63906 + if (grsec_enable_audit_ptrace)
63907 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
63908 +#endif
63909 + return;
63910 +}
63911 +
63912 +int
63913 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
63914 +{
63915 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63916 + const struct dentry *dentry = file->f_path.dentry;
63917 + const struct vfsmount *mnt = file->f_path.mnt;
63918 +
63919 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
63920 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
63921 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
63922 + return -EACCES;
63923 + }
63924 +#endif
63925 + return 0;
63926 +}
63927 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
63928 new file mode 100644
63929 index 0000000..c648492
63930 --- /dev/null
63931 +++ b/grsecurity/grsec_sig.c
63932 @@ -0,0 +1,206 @@
63933 +#include <linux/kernel.h>
63934 +#include <linux/sched.h>
63935 +#include <linux/delay.h>
63936 +#include <linux/grsecurity.h>
63937 +#include <linux/grinternal.h>
63938 +#include <linux/hardirq.h>
63939 +
63940 +char *signames[] = {
63941 + [SIGSEGV] = "Segmentation fault",
63942 + [SIGILL] = "Illegal instruction",
63943 + [SIGABRT] = "Abort",
63944 + [SIGBUS] = "Invalid alignment/Bus error"
63945 +};
63946 +
63947 +void
63948 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
63949 +{
63950 +#ifdef CONFIG_GRKERNSEC_SIGNAL
63951 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
63952 + (sig == SIGABRT) || (sig == SIGBUS))) {
63953 + if (t->pid == current->pid) {
63954 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
63955 + } else {
63956 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
63957 + }
63958 + }
63959 +#endif
63960 + return;
63961 +}
63962 +
63963 +int
63964 +gr_handle_signal(const struct task_struct *p, const int sig)
63965 +{
63966 +#ifdef CONFIG_GRKERNSEC
63967 + /* ignore the 0 signal for protected task checks */
63968 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
63969 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
63970 + return -EPERM;
63971 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
63972 + return -EPERM;
63973 + }
63974 +#endif
63975 + return 0;
63976 +}
63977 +
63978 +#ifdef CONFIG_GRKERNSEC
63979 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
63980 +
63981 +int gr_fake_force_sig(int sig, struct task_struct *t)
63982 +{
63983 + unsigned long int flags;
63984 + int ret, blocked, ignored;
63985 + struct k_sigaction *action;
63986 +
63987 + spin_lock_irqsave(&t->sighand->siglock, flags);
63988 + action = &t->sighand->action[sig-1];
63989 + ignored = action->sa.sa_handler == SIG_IGN;
63990 + blocked = sigismember(&t->blocked, sig);
63991 + if (blocked || ignored) {
63992 + action->sa.sa_handler = SIG_DFL;
63993 + if (blocked) {
63994 + sigdelset(&t->blocked, sig);
63995 + recalc_sigpending_and_wake(t);
63996 + }
63997 + }
63998 + if (action->sa.sa_handler == SIG_DFL)
63999 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
64000 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
64001 +
64002 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
64003 +
64004 + return ret;
64005 +}
64006 +#endif
64007 +
64008 +#ifdef CONFIG_GRKERNSEC_BRUTE
64009 +#define GR_USER_BAN_TIME (15 * 60)
64010 +
64011 +static int __get_dumpable(unsigned long mm_flags)
64012 +{
64013 + int ret;
64014 +
64015 + ret = mm_flags & MMF_DUMPABLE_MASK;
64016 + return (ret >= 2) ? 2 : ret;
64017 +}
64018 +#endif
64019 +
64020 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
64021 +{
64022 +#ifdef CONFIG_GRKERNSEC_BRUTE
64023 + uid_t uid = 0;
64024 +
64025 + if (!grsec_enable_brute)
64026 + return;
64027 +
64028 + rcu_read_lock();
64029 + read_lock(&tasklist_lock);
64030 + read_lock(&grsec_exec_file_lock);
64031 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
64032 + p->real_parent->brute = 1;
64033 + else {
64034 + const struct cred *cred = __task_cred(p), *cred2;
64035 + struct task_struct *tsk, *tsk2;
64036 +
64037 + if (!__get_dumpable(mm_flags) && cred->uid) {
64038 + struct user_struct *user;
64039 +
64040 + uid = cred->uid;
64041 +
64042 + /* this is put upon execution past expiration */
64043 + user = find_user(uid);
64044 + if (user == NULL)
64045 + goto unlock;
64046 + user->banned = 1;
64047 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
64048 + if (user->ban_expires == ~0UL)
64049 + user->ban_expires--;
64050 +
64051 + do_each_thread(tsk2, tsk) {
64052 + cred2 = __task_cred(tsk);
64053 + if (tsk != p && cred2->uid == uid)
64054 + gr_fake_force_sig(SIGKILL, tsk);
64055 + } while_each_thread(tsk2, tsk);
64056 + }
64057 + }
64058 +unlock:
64059 + read_unlock(&grsec_exec_file_lock);
64060 + read_unlock(&tasklist_lock);
64061 + rcu_read_unlock();
64062 +
64063 + if (uid)
64064 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
64065 +#endif
64066 + return;
64067 +}
64068 +
64069 +void gr_handle_brute_check(void)
64070 +{
64071 +#ifdef CONFIG_GRKERNSEC_BRUTE
64072 + if (current->brute)
64073 + msleep(30 * 1000);
64074 +#endif
64075 + return;
64076 +}
64077 +
64078 +void gr_handle_kernel_exploit(void)
64079 +{
64080 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
64081 + const struct cred *cred;
64082 + struct task_struct *tsk, *tsk2;
64083 + struct user_struct *user;
64084 + uid_t uid;
64085 +
64086 + if (in_irq() || in_serving_softirq() || in_nmi())
64087 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
64088 +
64089 + uid = current_uid();
64090 +
64091 + if (uid == 0)
64092 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
64093 + else {
64094 + /* kill all the processes of this user, hold a reference
64095 + to their creds struct, and prevent them from creating
64096 + another process until system reset
64097 + */
64098 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
64099 + /* we intentionally leak this ref */
64100 + user = get_uid(current->cred->user);
64101 + if (user) {
64102 + user->banned = 1;
64103 + user->ban_expires = ~0UL;
64104 + }
64105 +
64106 + read_lock(&tasklist_lock);
64107 + do_each_thread(tsk2, tsk) {
64108 + cred = __task_cred(tsk);
64109 + if (cred->uid == uid)
64110 + gr_fake_force_sig(SIGKILL, tsk);
64111 + } while_each_thread(tsk2, tsk);
64112 + read_unlock(&tasklist_lock);
64113 + }
64114 +#endif
64115 +}
64116 +
64117 +int __gr_process_user_ban(struct user_struct *user)
64118 +{
64119 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64120 + if (unlikely(user->banned)) {
64121 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
64122 + user->banned = 0;
64123 + user->ban_expires = 0;
64124 + free_uid(user);
64125 + } else
64126 + return -EPERM;
64127 + }
64128 +#endif
64129 + return 0;
64130 +}
64131 +
64132 +int gr_process_user_ban(void)
64133 +{
64134 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64135 + return __gr_process_user_ban(current->cred->user);
64136 +#endif
64137 + return 0;
64138 +}
64139 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
64140 new file mode 100644
64141 index 0000000..7512ea9
64142 --- /dev/null
64143 +++ b/grsecurity/grsec_sock.c
64144 @@ -0,0 +1,275 @@
64145 +#include <linux/kernel.h>
64146 +#include <linux/module.h>
64147 +#include <linux/sched.h>
64148 +#include <linux/file.h>
64149 +#include <linux/net.h>
64150 +#include <linux/in.h>
64151 +#include <linux/ip.h>
64152 +#include <net/sock.h>
64153 +#include <net/inet_sock.h>
64154 +#include <linux/grsecurity.h>
64155 +#include <linux/grinternal.h>
64156 +#include <linux/gracl.h>
64157 +
64158 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
64159 +EXPORT_SYMBOL(gr_cap_rtnetlink);
64160 +
64161 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64162 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64163 +
64164 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
64165 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
64166 +
64167 +#ifdef CONFIG_UNIX_MODULE
64168 +EXPORT_SYMBOL(gr_acl_handle_unix);
64169 +EXPORT_SYMBOL(gr_acl_handle_mknod);
64170 +EXPORT_SYMBOL(gr_handle_chroot_unix);
64171 +EXPORT_SYMBOL(gr_handle_create);
64172 +#endif
64173 +
64174 +#ifdef CONFIG_GRKERNSEC
64175 +#define gr_conn_table_size 32749
64176 +struct conn_table_entry {
64177 + struct conn_table_entry *next;
64178 + struct signal_struct *sig;
64179 +};
64180 +
64181 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64182 +DEFINE_SPINLOCK(gr_conn_table_lock);
64183 +
64184 +extern const char * gr_socktype_to_name(unsigned char type);
64185 +extern const char * gr_proto_to_name(unsigned char proto);
64186 +extern const char * gr_sockfamily_to_name(unsigned char family);
64187 +
64188 +static __inline__ int
64189 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64190 +{
64191 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64192 +}
64193 +
64194 +static __inline__ int
64195 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64196 + __u16 sport, __u16 dport)
64197 +{
64198 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64199 + sig->gr_sport == sport && sig->gr_dport == dport))
64200 + return 1;
64201 + else
64202 + return 0;
64203 +}
64204 +
64205 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64206 +{
64207 + struct conn_table_entry **match;
64208 + unsigned int index;
64209 +
64210 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64211 + sig->gr_sport, sig->gr_dport,
64212 + gr_conn_table_size);
64213 +
64214 + newent->sig = sig;
64215 +
64216 + match = &gr_conn_table[index];
64217 + newent->next = *match;
64218 + *match = newent;
64219 +
64220 + return;
64221 +}
64222 +
64223 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64224 +{
64225 + struct conn_table_entry *match, *last = NULL;
64226 + unsigned int index;
64227 +
64228 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64229 + sig->gr_sport, sig->gr_dport,
64230 + gr_conn_table_size);
64231 +
64232 + match = gr_conn_table[index];
64233 + while (match && !conn_match(match->sig,
64234 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64235 + sig->gr_dport)) {
64236 + last = match;
64237 + match = match->next;
64238 + }
64239 +
64240 + if (match) {
64241 + if (last)
64242 + last->next = match->next;
64243 + else
64244 + gr_conn_table[index] = NULL;
64245 + kfree(match);
64246 + }
64247 +
64248 + return;
64249 +}
64250 +
64251 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64252 + __u16 sport, __u16 dport)
64253 +{
64254 + struct conn_table_entry *match;
64255 + unsigned int index;
64256 +
64257 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64258 +
64259 + match = gr_conn_table[index];
64260 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64261 + match = match->next;
64262 +
64263 + if (match)
64264 + return match->sig;
64265 + else
64266 + return NULL;
64267 +}
64268 +
64269 +#endif
64270 +
64271 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64272 +{
64273 +#ifdef CONFIG_GRKERNSEC
64274 + struct signal_struct *sig = task->signal;
64275 + struct conn_table_entry *newent;
64276 +
64277 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64278 + if (newent == NULL)
64279 + return;
64280 + /* no bh lock needed since we are called with bh disabled */
64281 + spin_lock(&gr_conn_table_lock);
64282 + gr_del_task_from_ip_table_nolock(sig);
64283 + sig->gr_saddr = inet->rcv_saddr;
64284 + sig->gr_daddr = inet->daddr;
64285 + sig->gr_sport = inet->sport;
64286 + sig->gr_dport = inet->dport;
64287 + gr_add_to_task_ip_table_nolock(sig, newent);
64288 + spin_unlock(&gr_conn_table_lock);
64289 +#endif
64290 + return;
64291 +}
64292 +
64293 +void gr_del_task_from_ip_table(struct task_struct *task)
64294 +{
64295 +#ifdef CONFIG_GRKERNSEC
64296 + spin_lock_bh(&gr_conn_table_lock);
64297 + gr_del_task_from_ip_table_nolock(task->signal);
64298 + spin_unlock_bh(&gr_conn_table_lock);
64299 +#endif
64300 + return;
64301 +}
64302 +
64303 +void
64304 +gr_attach_curr_ip(const struct sock *sk)
64305 +{
64306 +#ifdef CONFIG_GRKERNSEC
64307 + struct signal_struct *p, *set;
64308 + const struct inet_sock *inet = inet_sk(sk);
64309 +
64310 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64311 + return;
64312 +
64313 + set = current->signal;
64314 +
64315 + spin_lock_bh(&gr_conn_table_lock);
64316 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64317 + inet->dport, inet->sport);
64318 + if (unlikely(p != NULL)) {
64319 + set->curr_ip = p->curr_ip;
64320 + set->used_accept = 1;
64321 + gr_del_task_from_ip_table_nolock(p);
64322 + spin_unlock_bh(&gr_conn_table_lock);
64323 + return;
64324 + }
64325 + spin_unlock_bh(&gr_conn_table_lock);
64326 +
64327 + set->curr_ip = inet->daddr;
64328 + set->used_accept = 1;
64329 +#endif
64330 + return;
64331 +}
64332 +
64333 +int
64334 +gr_handle_sock_all(const int family, const int type, const int protocol)
64335 +{
64336 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64337 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64338 + (family != AF_UNIX)) {
64339 + if (family == AF_INET)
64340 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64341 + else
64342 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64343 + return -EACCES;
64344 + }
64345 +#endif
64346 + return 0;
64347 +}
64348 +
64349 +int
64350 +gr_handle_sock_server(const struct sockaddr *sck)
64351 +{
64352 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64353 + if (grsec_enable_socket_server &&
64354 + in_group_p(grsec_socket_server_gid) &&
64355 + sck && (sck->sa_family != AF_UNIX) &&
64356 + (sck->sa_family != AF_LOCAL)) {
64357 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64358 + return -EACCES;
64359 + }
64360 +#endif
64361 + return 0;
64362 +}
64363 +
64364 +int
64365 +gr_handle_sock_server_other(const struct sock *sck)
64366 +{
64367 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64368 + if (grsec_enable_socket_server &&
64369 + in_group_p(grsec_socket_server_gid) &&
64370 + sck && (sck->sk_family != AF_UNIX) &&
64371 + (sck->sk_family != AF_LOCAL)) {
64372 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64373 + return -EACCES;
64374 + }
64375 +#endif
64376 + return 0;
64377 +}
64378 +
64379 +int
64380 +gr_handle_sock_client(const struct sockaddr *sck)
64381 +{
64382 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64383 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64384 + sck && (sck->sa_family != AF_UNIX) &&
64385 + (sck->sa_family != AF_LOCAL)) {
64386 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64387 + return -EACCES;
64388 + }
64389 +#endif
64390 + return 0;
64391 +}
64392 +
64393 +kernel_cap_t
64394 +gr_cap_rtnetlink(struct sock *sock)
64395 +{
64396 +#ifdef CONFIG_GRKERNSEC
64397 + if (!gr_acl_is_enabled())
64398 + return current_cap();
64399 + else if (sock->sk_protocol == NETLINK_ISCSI &&
64400 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64401 + gr_is_capable(CAP_SYS_ADMIN))
64402 + return current_cap();
64403 + else if (sock->sk_protocol == NETLINK_AUDIT &&
64404 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64405 + gr_is_capable(CAP_AUDIT_WRITE) &&
64406 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64407 + gr_is_capable(CAP_AUDIT_CONTROL))
64408 + return current_cap();
64409 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64410 + ((sock->sk_protocol == NETLINK_ROUTE) ?
64411 + gr_is_capable_nolog(CAP_NET_ADMIN) :
64412 + gr_is_capable(CAP_NET_ADMIN)))
64413 + return current_cap();
64414 + else
64415 + return __cap_empty_set;
64416 +#else
64417 + return current_cap();
64418 +#endif
64419 +}
64420 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64421 new file mode 100644
64422 index 0000000..31f3258
64423 --- /dev/null
64424 +++ b/grsecurity/grsec_sysctl.c
64425 @@ -0,0 +1,499 @@
64426 +#include <linux/kernel.h>
64427 +#include <linux/sched.h>
64428 +#include <linux/sysctl.h>
64429 +#include <linux/grsecurity.h>
64430 +#include <linux/grinternal.h>
64431 +
64432 +int
64433 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64434 +{
64435 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64436 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64437 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64438 + return -EACCES;
64439 + }
64440 +#endif
64441 + return 0;
64442 +}
64443 +
64444 +#ifdef CONFIG_GRKERNSEC_ROFS
64445 +static int __maybe_unused one = 1;
64446 +#endif
64447 +
64448 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64449 +ctl_table grsecurity_table[] = {
64450 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64451 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64452 +#ifdef CONFIG_GRKERNSEC_IO
64453 + {
64454 + .ctl_name = CTL_UNNUMBERED,
64455 + .procname = "disable_priv_io",
64456 + .data = &grsec_disable_privio,
64457 + .maxlen = sizeof(int),
64458 + .mode = 0600,
64459 + .proc_handler = &proc_dointvec,
64460 + },
64461 +#endif
64462 +#endif
64463 +#ifdef CONFIG_GRKERNSEC_LINK
64464 + {
64465 + .ctl_name = CTL_UNNUMBERED,
64466 + .procname = "linking_restrictions",
64467 + .data = &grsec_enable_link,
64468 + .maxlen = sizeof(int),
64469 + .mode = 0600,
64470 + .proc_handler = &proc_dointvec,
64471 + },
64472 +#endif
64473 +#ifdef CONFIG_GRKERNSEC_BRUTE
64474 + {
64475 + .ctl_name = CTL_UNNUMBERED,
64476 + .procname = "deter_bruteforce",
64477 + .data = &grsec_enable_brute,
64478 + .maxlen = sizeof(int),
64479 + .mode = 0600,
64480 + .proc_handler = &proc_dointvec,
64481 + },
64482 +#endif
64483 +#ifdef CONFIG_GRKERNSEC_FIFO
64484 + {
64485 + .ctl_name = CTL_UNNUMBERED,
64486 + .procname = "fifo_restrictions",
64487 + .data = &grsec_enable_fifo,
64488 + .maxlen = sizeof(int),
64489 + .mode = 0600,
64490 + .proc_handler = &proc_dointvec,
64491 + },
64492 +#endif
64493 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64494 + {
64495 + .ctl_name = CTL_UNNUMBERED,
64496 + .procname = "ptrace_readexec",
64497 + .data = &grsec_enable_ptrace_readexec,
64498 + .maxlen = sizeof(int),
64499 + .mode = 0600,
64500 + .proc_handler = &proc_dointvec,
64501 + },
64502 +#endif
64503 +#ifdef CONFIG_GRKERNSEC_SETXID
64504 + {
64505 + .ctl_name = CTL_UNNUMBERED,
64506 + .procname = "consistent_setxid",
64507 + .data = &grsec_enable_setxid,
64508 + .maxlen = sizeof(int),
64509 + .mode = 0600,
64510 + .proc_handler = &proc_dointvec,
64511 + },
64512 +#endif
64513 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64514 + {
64515 + .ctl_name = CTL_UNNUMBERED,
64516 + .procname = "ip_blackhole",
64517 + .data = &grsec_enable_blackhole,
64518 + .maxlen = sizeof(int),
64519 + .mode = 0600,
64520 + .proc_handler = &proc_dointvec,
64521 + },
64522 + {
64523 + .ctl_name = CTL_UNNUMBERED,
64524 + .procname = "lastack_retries",
64525 + .data = &grsec_lastack_retries,
64526 + .maxlen = sizeof(int),
64527 + .mode = 0600,
64528 + .proc_handler = &proc_dointvec,
64529 + },
64530 +#endif
64531 +#ifdef CONFIG_GRKERNSEC_EXECLOG
64532 + {
64533 + .ctl_name = CTL_UNNUMBERED,
64534 + .procname = "exec_logging",
64535 + .data = &grsec_enable_execlog,
64536 + .maxlen = sizeof(int),
64537 + .mode = 0600,
64538 + .proc_handler = &proc_dointvec,
64539 + },
64540 +#endif
64541 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64542 + {
64543 + .ctl_name = CTL_UNNUMBERED,
64544 + .procname = "rwxmap_logging",
64545 + .data = &grsec_enable_log_rwxmaps,
64546 + .maxlen = sizeof(int),
64547 + .mode = 0600,
64548 + .proc_handler = &proc_dointvec,
64549 + },
64550 +#endif
64551 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64552 + {
64553 + .ctl_name = CTL_UNNUMBERED,
64554 + .procname = "signal_logging",
64555 + .data = &grsec_enable_signal,
64556 + .maxlen = sizeof(int),
64557 + .mode = 0600,
64558 + .proc_handler = &proc_dointvec,
64559 + },
64560 +#endif
64561 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
64562 + {
64563 + .ctl_name = CTL_UNNUMBERED,
64564 + .procname = "forkfail_logging",
64565 + .data = &grsec_enable_forkfail,
64566 + .maxlen = sizeof(int),
64567 + .mode = 0600,
64568 + .proc_handler = &proc_dointvec,
64569 + },
64570 +#endif
64571 +#ifdef CONFIG_GRKERNSEC_TIME
64572 + {
64573 + .ctl_name = CTL_UNNUMBERED,
64574 + .procname = "timechange_logging",
64575 + .data = &grsec_enable_time,
64576 + .maxlen = sizeof(int),
64577 + .mode = 0600,
64578 + .proc_handler = &proc_dointvec,
64579 + },
64580 +#endif
64581 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64582 + {
64583 + .ctl_name = CTL_UNNUMBERED,
64584 + .procname = "chroot_deny_shmat",
64585 + .data = &grsec_enable_chroot_shmat,
64586 + .maxlen = sizeof(int),
64587 + .mode = 0600,
64588 + .proc_handler = &proc_dointvec,
64589 + },
64590 +#endif
64591 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64592 + {
64593 + .ctl_name = CTL_UNNUMBERED,
64594 + .procname = "chroot_deny_unix",
64595 + .data = &grsec_enable_chroot_unix,
64596 + .maxlen = sizeof(int),
64597 + .mode = 0600,
64598 + .proc_handler = &proc_dointvec,
64599 + },
64600 +#endif
64601 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64602 + {
64603 + .ctl_name = CTL_UNNUMBERED,
64604 + .procname = "chroot_deny_mount",
64605 + .data = &grsec_enable_chroot_mount,
64606 + .maxlen = sizeof(int),
64607 + .mode = 0600,
64608 + .proc_handler = &proc_dointvec,
64609 + },
64610 +#endif
64611 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64612 + {
64613 + .ctl_name = CTL_UNNUMBERED,
64614 + .procname = "chroot_deny_fchdir",
64615 + .data = &grsec_enable_chroot_fchdir,
64616 + .maxlen = sizeof(int),
64617 + .mode = 0600,
64618 + .proc_handler = &proc_dointvec,
64619 + },
64620 +#endif
64621 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64622 + {
64623 + .ctl_name = CTL_UNNUMBERED,
64624 + .procname = "chroot_deny_chroot",
64625 + .data = &grsec_enable_chroot_double,
64626 + .maxlen = sizeof(int),
64627 + .mode = 0600,
64628 + .proc_handler = &proc_dointvec,
64629 + },
64630 +#endif
64631 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64632 + {
64633 + .ctl_name = CTL_UNNUMBERED,
64634 + .procname = "chroot_deny_pivot",
64635 + .data = &grsec_enable_chroot_pivot,
64636 + .maxlen = sizeof(int),
64637 + .mode = 0600,
64638 + .proc_handler = &proc_dointvec,
64639 + },
64640 +#endif
64641 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64642 + {
64643 + .ctl_name = CTL_UNNUMBERED,
64644 + .procname = "chroot_enforce_chdir",
64645 + .data = &grsec_enable_chroot_chdir,
64646 + .maxlen = sizeof(int),
64647 + .mode = 0600,
64648 + .proc_handler = &proc_dointvec,
64649 + },
64650 +#endif
64651 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64652 + {
64653 + .ctl_name = CTL_UNNUMBERED,
64654 + .procname = "chroot_deny_chmod",
64655 + .data = &grsec_enable_chroot_chmod,
64656 + .maxlen = sizeof(int),
64657 + .mode = 0600,
64658 + .proc_handler = &proc_dointvec,
64659 + },
64660 +#endif
64661 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64662 + {
64663 + .ctl_name = CTL_UNNUMBERED,
64664 + .procname = "chroot_deny_mknod",
64665 + .data = &grsec_enable_chroot_mknod,
64666 + .maxlen = sizeof(int),
64667 + .mode = 0600,
64668 + .proc_handler = &proc_dointvec,
64669 + },
64670 +#endif
64671 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64672 + {
64673 + .ctl_name = CTL_UNNUMBERED,
64674 + .procname = "chroot_restrict_nice",
64675 + .data = &grsec_enable_chroot_nice,
64676 + .maxlen = sizeof(int),
64677 + .mode = 0600,
64678 + .proc_handler = &proc_dointvec,
64679 + },
64680 +#endif
64681 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64682 + {
64683 + .ctl_name = CTL_UNNUMBERED,
64684 + .procname = "chroot_execlog",
64685 + .data = &grsec_enable_chroot_execlog,
64686 + .maxlen = sizeof(int),
64687 + .mode = 0600,
64688 + .proc_handler = &proc_dointvec,
64689 + },
64690 +#endif
64691 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64692 + {
64693 + .ctl_name = CTL_UNNUMBERED,
64694 + .procname = "chroot_caps",
64695 + .data = &grsec_enable_chroot_caps,
64696 + .maxlen = sizeof(int),
64697 + .mode = 0600,
64698 + .proc_handler = &proc_dointvec,
64699 + },
64700 +#endif
64701 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64702 + {
64703 + .ctl_name = CTL_UNNUMBERED,
64704 + .procname = "chroot_deny_sysctl",
64705 + .data = &grsec_enable_chroot_sysctl,
64706 + .maxlen = sizeof(int),
64707 + .mode = 0600,
64708 + .proc_handler = &proc_dointvec,
64709 + },
64710 +#endif
64711 +#ifdef CONFIG_GRKERNSEC_TPE
64712 + {
64713 + .ctl_name = CTL_UNNUMBERED,
64714 + .procname = "tpe",
64715 + .data = &grsec_enable_tpe,
64716 + .maxlen = sizeof(int),
64717 + .mode = 0600,
64718 + .proc_handler = &proc_dointvec,
64719 + },
64720 + {
64721 + .ctl_name = CTL_UNNUMBERED,
64722 + .procname = "tpe_gid",
64723 + .data = &grsec_tpe_gid,
64724 + .maxlen = sizeof(int),
64725 + .mode = 0600,
64726 + .proc_handler = &proc_dointvec,
64727 + },
64728 +#endif
64729 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64730 + {
64731 + .ctl_name = CTL_UNNUMBERED,
64732 + .procname = "tpe_invert",
64733 + .data = &grsec_enable_tpe_invert,
64734 + .maxlen = sizeof(int),
64735 + .mode = 0600,
64736 + .proc_handler = &proc_dointvec,
64737 + },
64738 +#endif
64739 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
64740 + {
64741 + .ctl_name = CTL_UNNUMBERED,
64742 + .procname = "tpe_restrict_all",
64743 + .data = &grsec_enable_tpe_all,
64744 + .maxlen = sizeof(int),
64745 + .mode = 0600,
64746 + .proc_handler = &proc_dointvec,
64747 + },
64748 +#endif
64749 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64750 + {
64751 + .ctl_name = CTL_UNNUMBERED,
64752 + .procname = "socket_all",
64753 + .data = &grsec_enable_socket_all,
64754 + .maxlen = sizeof(int),
64755 + .mode = 0600,
64756 + .proc_handler = &proc_dointvec,
64757 + },
64758 + {
64759 + .ctl_name = CTL_UNNUMBERED,
64760 + .procname = "socket_all_gid",
64761 + .data = &grsec_socket_all_gid,
64762 + .maxlen = sizeof(int),
64763 + .mode = 0600,
64764 + .proc_handler = &proc_dointvec,
64765 + },
64766 +#endif
64767 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64768 + {
64769 + .ctl_name = CTL_UNNUMBERED,
64770 + .procname = "socket_client",
64771 + .data = &grsec_enable_socket_client,
64772 + .maxlen = sizeof(int),
64773 + .mode = 0600,
64774 + .proc_handler = &proc_dointvec,
64775 + },
64776 + {
64777 + .ctl_name = CTL_UNNUMBERED,
64778 + .procname = "socket_client_gid",
64779 + .data = &grsec_socket_client_gid,
64780 + .maxlen = sizeof(int),
64781 + .mode = 0600,
64782 + .proc_handler = &proc_dointvec,
64783 + },
64784 +#endif
64785 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64786 + {
64787 + .ctl_name = CTL_UNNUMBERED,
64788 + .procname = "socket_server",
64789 + .data = &grsec_enable_socket_server,
64790 + .maxlen = sizeof(int),
64791 + .mode = 0600,
64792 + .proc_handler = &proc_dointvec,
64793 + },
64794 + {
64795 + .ctl_name = CTL_UNNUMBERED,
64796 + .procname = "socket_server_gid",
64797 + .data = &grsec_socket_server_gid,
64798 + .maxlen = sizeof(int),
64799 + .mode = 0600,
64800 + .proc_handler = &proc_dointvec,
64801 + },
64802 +#endif
64803 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64804 + {
64805 + .ctl_name = CTL_UNNUMBERED,
64806 + .procname = "audit_group",
64807 + .data = &grsec_enable_group,
64808 + .maxlen = sizeof(int),
64809 + .mode = 0600,
64810 + .proc_handler = &proc_dointvec,
64811 + },
64812 + {
64813 + .ctl_name = CTL_UNNUMBERED,
64814 + .procname = "audit_gid",
64815 + .data = &grsec_audit_gid,
64816 + .maxlen = sizeof(int),
64817 + .mode = 0600,
64818 + .proc_handler = &proc_dointvec,
64819 + },
64820 +#endif
64821 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64822 + {
64823 + .ctl_name = CTL_UNNUMBERED,
64824 + .procname = "audit_chdir",
64825 + .data = &grsec_enable_chdir,
64826 + .maxlen = sizeof(int),
64827 + .mode = 0600,
64828 + .proc_handler = &proc_dointvec,
64829 + },
64830 +#endif
64831 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64832 + {
64833 + .ctl_name = CTL_UNNUMBERED,
64834 + .procname = "audit_mount",
64835 + .data = &grsec_enable_mount,
64836 + .maxlen = sizeof(int),
64837 + .mode = 0600,
64838 + .proc_handler = &proc_dointvec,
64839 + },
64840 +#endif
64841 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64842 + {
64843 + .ctl_name = CTL_UNNUMBERED,
64844 + .procname = "audit_textrel",
64845 + .data = &grsec_enable_audit_textrel,
64846 + .maxlen = sizeof(int),
64847 + .mode = 0600,
64848 + .proc_handler = &proc_dointvec,
64849 + },
64850 +#endif
64851 +#ifdef CONFIG_GRKERNSEC_DMESG
64852 + {
64853 + .ctl_name = CTL_UNNUMBERED,
64854 + .procname = "dmesg",
64855 + .data = &grsec_enable_dmesg,
64856 + .maxlen = sizeof(int),
64857 + .mode = 0600,
64858 + .proc_handler = &proc_dointvec,
64859 + },
64860 +#endif
64861 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64862 + {
64863 + .ctl_name = CTL_UNNUMBERED,
64864 + .procname = "chroot_findtask",
64865 + .data = &grsec_enable_chroot_findtask,
64866 + .maxlen = sizeof(int),
64867 + .mode = 0600,
64868 + .proc_handler = &proc_dointvec,
64869 + },
64870 +#endif
64871 +#ifdef CONFIG_GRKERNSEC_RESLOG
64872 + {
64873 + .ctl_name = CTL_UNNUMBERED,
64874 + .procname = "resource_logging",
64875 + .data = &grsec_resource_logging,
64876 + .maxlen = sizeof(int),
64877 + .mode = 0600,
64878 + .proc_handler = &proc_dointvec,
64879 + },
64880 +#endif
64881 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64882 + {
64883 + .ctl_name = CTL_UNNUMBERED,
64884 + .procname = "audit_ptrace",
64885 + .data = &grsec_enable_audit_ptrace,
64886 + .maxlen = sizeof(int),
64887 + .mode = 0600,
64888 + .proc_handler = &proc_dointvec,
64889 + },
64890 +#endif
64891 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64892 + {
64893 + .ctl_name = CTL_UNNUMBERED,
64894 + .procname = "harden_ptrace",
64895 + .data = &grsec_enable_harden_ptrace,
64896 + .maxlen = sizeof(int),
64897 + .mode = 0600,
64898 + .proc_handler = &proc_dointvec,
64899 + },
64900 +#endif
64901 + {
64902 + .ctl_name = CTL_UNNUMBERED,
64903 + .procname = "grsec_lock",
64904 + .data = &grsec_lock,
64905 + .maxlen = sizeof(int),
64906 + .mode = 0600,
64907 + .proc_handler = &proc_dointvec,
64908 + },
64909 +#endif
64910 +#ifdef CONFIG_GRKERNSEC_ROFS
64911 + {
64912 + .ctl_name = CTL_UNNUMBERED,
64913 + .procname = "romount_protect",
64914 + .data = &grsec_enable_rofs,
64915 + .maxlen = sizeof(int),
64916 + .mode = 0600,
64917 + .proc_handler = &proc_dointvec_minmax,
64918 + .extra1 = &one,
64919 + .extra2 = &one,
64920 + },
64921 +#endif
64922 + { .ctl_name = 0 }
64923 +};
64924 +#endif
64925 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
64926 new file mode 100644
64927 index 0000000..0dc13c3
64928 --- /dev/null
64929 +++ b/grsecurity/grsec_time.c
64930 @@ -0,0 +1,16 @@
64931 +#include <linux/kernel.h>
64932 +#include <linux/sched.h>
64933 +#include <linux/grinternal.h>
64934 +#include <linux/module.h>
64935 +
64936 +void
64937 +gr_log_timechange(void)
64938 +{
64939 +#ifdef CONFIG_GRKERNSEC_TIME
64940 + if (grsec_enable_time)
64941 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
64942 +#endif
64943 + return;
64944 +}
64945 +
64946 +EXPORT_SYMBOL(gr_log_timechange);
64947 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
64948 new file mode 100644
64949 index 0000000..a35ba33
64950 --- /dev/null
64951 +++ b/grsecurity/grsec_tpe.c
64952 @@ -0,0 +1,73 @@
64953 +#include <linux/kernel.h>
64954 +#include <linux/sched.h>
64955 +#include <linux/file.h>
64956 +#include <linux/fs.h>
64957 +#include <linux/grinternal.h>
64958 +
64959 +extern int gr_acl_tpe_check(void);
64960 +
64961 +int
64962 +gr_tpe_allow(const struct file *file)
64963 +{
64964 +#ifdef CONFIG_GRKERNSEC
64965 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
64966 + const struct cred *cred = current_cred();
64967 + char *msg = NULL;
64968 + char *msg2 = NULL;
64969 +
64970 + // never restrict root
64971 + if (!cred->uid)
64972 + return 1;
64973 +
64974 + if (grsec_enable_tpe) {
64975 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64976 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
64977 + msg = "not being in trusted group";
64978 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
64979 + msg = "being in untrusted group";
64980 +#else
64981 + if (in_group_p(grsec_tpe_gid))
64982 + msg = "being in untrusted group";
64983 +#endif
64984 + }
64985 + if (!msg && gr_acl_tpe_check())
64986 + msg = "being in untrusted role";
64987 +
64988 + // not in any affected group/role
64989 + if (!msg)
64990 + goto next_check;
64991 +
64992 + if (inode->i_uid)
64993 + msg2 = "file in non-root-owned directory";
64994 + else if (inode->i_mode & S_IWOTH)
64995 + msg2 = "file in world-writable directory";
64996 + else if (inode->i_mode & S_IWGRP)
64997 + msg2 = "file in group-writable directory";
64998 +
64999 + if (msg && msg2) {
65000 + char fullmsg[64] = {0};
65001 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
65002 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
65003 + return 0;
65004 + }
65005 + msg = NULL;
65006 +next_check:
65007 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
65008 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
65009 + return 1;
65010 +
65011 + if (inode->i_uid && (inode->i_uid != cred->uid))
65012 + msg = "directory not owned by user";
65013 + else if (inode->i_mode & S_IWOTH)
65014 + msg = "file in world-writable directory";
65015 + else if (inode->i_mode & S_IWGRP)
65016 + msg = "file in group-writable directory";
65017 +
65018 + if (msg) {
65019 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
65020 + return 0;
65021 + }
65022 +#endif
65023 +#endif
65024 + return 1;
65025 +}
65026 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
65027 new file mode 100644
65028 index 0000000..9f7b1ac
65029 --- /dev/null
65030 +++ b/grsecurity/grsum.c
65031 @@ -0,0 +1,61 @@
65032 +#include <linux/err.h>
65033 +#include <linux/kernel.h>
65034 +#include <linux/sched.h>
65035 +#include <linux/mm.h>
65036 +#include <linux/scatterlist.h>
65037 +#include <linux/crypto.h>
65038 +#include <linux/gracl.h>
65039 +
65040 +
65041 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
65042 +#error "crypto and sha256 must be built into the kernel"
65043 +#endif
65044 +
65045 +int
65046 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
65047 +{
65048 + char *p;
65049 + struct crypto_hash *tfm;
65050 + struct hash_desc desc;
65051 + struct scatterlist sg;
65052 + unsigned char temp_sum[GR_SHA_LEN];
65053 + volatile int retval = 0;
65054 + volatile int dummy = 0;
65055 + unsigned int i;
65056 +
65057 + sg_init_table(&sg, 1);
65058 +
65059 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
65060 + if (IS_ERR(tfm)) {
65061 + /* should never happen, since sha256 should be built in */
65062 + return 1;
65063 + }
65064 +
65065 + desc.tfm = tfm;
65066 + desc.flags = 0;
65067 +
65068 + crypto_hash_init(&desc);
65069 +
65070 + p = salt;
65071 + sg_set_buf(&sg, p, GR_SALT_LEN);
65072 + crypto_hash_update(&desc, &sg, sg.length);
65073 +
65074 + p = entry->pw;
65075 + sg_set_buf(&sg, p, strlen(p));
65076 +
65077 + crypto_hash_update(&desc, &sg, sg.length);
65078 +
65079 + crypto_hash_final(&desc, temp_sum);
65080 +
65081 + memset(entry->pw, 0, GR_PW_LEN);
65082 +
65083 + for (i = 0; i < GR_SHA_LEN; i++)
65084 + if (sum[i] != temp_sum[i])
65085 + retval = 1;
65086 + else
65087 + dummy = 1; // waste a cycle
65088 +
65089 + crypto_free_hash(tfm);
65090 +
65091 + return retval;
65092 +}
65093 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
65094 index 3cd9ccd..fe16d47 100644
65095 --- a/include/acpi/acpi_bus.h
65096 +++ b/include/acpi/acpi_bus.h
65097 @@ -107,7 +107,7 @@ struct acpi_device_ops {
65098 acpi_op_bind bind;
65099 acpi_op_unbind unbind;
65100 acpi_op_notify notify;
65101 -};
65102 +} __no_const;
65103
65104 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
65105
65106 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
65107 index f4906f6..71feb73 100644
65108 --- a/include/acpi/acpi_drivers.h
65109 +++ b/include/acpi/acpi_drivers.h
65110 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
65111 Dock Station
65112 -------------------------------------------------------------------------- */
65113 struct acpi_dock_ops {
65114 - acpi_notify_handler handler;
65115 - acpi_notify_handler uevent;
65116 + const acpi_notify_handler handler;
65117 + const acpi_notify_handler uevent;
65118 };
65119
65120 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
65121 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
65122 extern int register_dock_notifier(struct notifier_block *nb);
65123 extern void unregister_dock_notifier(struct notifier_block *nb);
65124 extern int register_hotplug_dock_device(acpi_handle handle,
65125 - struct acpi_dock_ops *ops,
65126 + const struct acpi_dock_ops *ops,
65127 void *context);
65128 extern void unregister_hotplug_dock_device(acpi_handle handle);
65129 #else
65130 @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
65131 {
65132 }
65133 static inline int register_hotplug_dock_device(acpi_handle handle,
65134 - struct acpi_dock_ops *ops,
65135 + const struct acpi_dock_ops *ops,
65136 void *context)
65137 {
65138 return -ENODEV;
65139 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
65140 index b7babf0..a9ac9fc 100644
65141 --- a/include/asm-generic/atomic-long.h
65142 +++ b/include/asm-generic/atomic-long.h
65143 @@ -22,6 +22,12 @@
65144
65145 typedef atomic64_t atomic_long_t;
65146
65147 +#ifdef CONFIG_PAX_REFCOUNT
65148 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
65149 +#else
65150 +typedef atomic64_t atomic_long_unchecked_t;
65151 +#endif
65152 +
65153 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
65154
65155 static inline long atomic_long_read(atomic_long_t *l)
65156 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65157 return (long)atomic64_read(v);
65158 }
65159
65160 +#ifdef CONFIG_PAX_REFCOUNT
65161 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65162 +{
65163 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65164 +
65165 + return (long)atomic64_read_unchecked(v);
65166 +}
65167 +#endif
65168 +
65169 static inline void atomic_long_set(atomic_long_t *l, long i)
65170 {
65171 atomic64_t *v = (atomic64_t *)l;
65172 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65173 atomic64_set(v, i);
65174 }
65175
65176 +#ifdef CONFIG_PAX_REFCOUNT
65177 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65178 +{
65179 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65180 +
65181 + atomic64_set_unchecked(v, i);
65182 +}
65183 +#endif
65184 +
65185 static inline void atomic_long_inc(atomic_long_t *l)
65186 {
65187 atomic64_t *v = (atomic64_t *)l;
65188 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65189 atomic64_inc(v);
65190 }
65191
65192 +#ifdef CONFIG_PAX_REFCOUNT
65193 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65194 +{
65195 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65196 +
65197 + atomic64_inc_unchecked(v);
65198 +}
65199 +#endif
65200 +
65201 static inline void atomic_long_dec(atomic_long_t *l)
65202 {
65203 atomic64_t *v = (atomic64_t *)l;
65204 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65205 atomic64_dec(v);
65206 }
65207
65208 +#ifdef CONFIG_PAX_REFCOUNT
65209 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65210 +{
65211 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65212 +
65213 + atomic64_dec_unchecked(v);
65214 +}
65215 +#endif
65216 +
65217 static inline void atomic_long_add(long i, atomic_long_t *l)
65218 {
65219 atomic64_t *v = (atomic64_t *)l;
65220 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65221 atomic64_add(i, v);
65222 }
65223
65224 +#ifdef CONFIG_PAX_REFCOUNT
65225 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65226 +{
65227 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65228 +
65229 + atomic64_add_unchecked(i, v);
65230 +}
65231 +#endif
65232 +
65233 static inline void atomic_long_sub(long i, atomic_long_t *l)
65234 {
65235 atomic64_t *v = (atomic64_t *)l;
65236 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65237 return (long)atomic64_inc_return(v);
65238 }
65239
65240 +#ifdef CONFIG_PAX_REFCOUNT
65241 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65242 +{
65243 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65244 +
65245 + return (long)atomic64_inc_return_unchecked(v);
65246 +}
65247 +#endif
65248 +
65249 static inline long atomic_long_dec_return(atomic_long_t *l)
65250 {
65251 atomic64_t *v = (atomic64_t *)l;
65252 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65253
65254 typedef atomic_t atomic_long_t;
65255
65256 +#ifdef CONFIG_PAX_REFCOUNT
65257 +typedef atomic_unchecked_t atomic_long_unchecked_t;
65258 +#else
65259 +typedef atomic_t atomic_long_unchecked_t;
65260 +#endif
65261 +
65262 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65263 static inline long atomic_long_read(atomic_long_t *l)
65264 {
65265 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65266 return (long)atomic_read(v);
65267 }
65268
65269 +#ifdef CONFIG_PAX_REFCOUNT
65270 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65271 +{
65272 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65273 +
65274 + return (long)atomic_read_unchecked(v);
65275 +}
65276 +#endif
65277 +
65278 static inline void atomic_long_set(atomic_long_t *l, long i)
65279 {
65280 atomic_t *v = (atomic_t *)l;
65281 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65282 atomic_set(v, i);
65283 }
65284
65285 +#ifdef CONFIG_PAX_REFCOUNT
65286 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65287 +{
65288 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65289 +
65290 + atomic_set_unchecked(v, i);
65291 +}
65292 +#endif
65293 +
65294 static inline void atomic_long_inc(atomic_long_t *l)
65295 {
65296 atomic_t *v = (atomic_t *)l;
65297 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65298 atomic_inc(v);
65299 }
65300
65301 +#ifdef CONFIG_PAX_REFCOUNT
65302 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65303 +{
65304 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65305 +
65306 + atomic_inc_unchecked(v);
65307 +}
65308 +#endif
65309 +
65310 static inline void atomic_long_dec(atomic_long_t *l)
65311 {
65312 atomic_t *v = (atomic_t *)l;
65313 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65314 atomic_dec(v);
65315 }
65316
65317 +#ifdef CONFIG_PAX_REFCOUNT
65318 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65319 +{
65320 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65321 +
65322 + atomic_dec_unchecked(v);
65323 +}
65324 +#endif
65325 +
65326 static inline void atomic_long_add(long i, atomic_long_t *l)
65327 {
65328 atomic_t *v = (atomic_t *)l;
65329 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65330 atomic_add(i, v);
65331 }
65332
65333 +#ifdef CONFIG_PAX_REFCOUNT
65334 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65335 +{
65336 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65337 +
65338 + atomic_add_unchecked(i, v);
65339 +}
65340 +#endif
65341 +
65342 static inline void atomic_long_sub(long i, atomic_long_t *l)
65343 {
65344 atomic_t *v = (atomic_t *)l;
65345 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65346 return (long)atomic_inc_return(v);
65347 }
65348
65349 +#ifdef CONFIG_PAX_REFCOUNT
65350 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65351 +{
65352 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65353 +
65354 + return (long)atomic_inc_return_unchecked(v);
65355 +}
65356 +#endif
65357 +
65358 static inline long atomic_long_dec_return(atomic_long_t *l)
65359 {
65360 atomic_t *v = (atomic_t *)l;
65361 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65362
65363 #endif /* BITS_PER_LONG == 64 */
65364
65365 +#ifdef CONFIG_PAX_REFCOUNT
65366 +static inline void pax_refcount_needs_these_functions(void)
65367 +{
65368 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
65369 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65370 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65371 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65372 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65373 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65374 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65375 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65376 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65377 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65378 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65379 +
65380 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65381 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65382 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65383 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65384 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65385 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65386 +}
65387 +#else
65388 +#define atomic_read_unchecked(v) atomic_read(v)
65389 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65390 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65391 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65392 +#define atomic_inc_unchecked(v) atomic_inc(v)
65393 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65394 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65395 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65396 +#define atomic_dec_unchecked(v) atomic_dec(v)
65397 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65398 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65399 +
65400 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
65401 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65402 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65403 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65404 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65405 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65406 +#endif
65407 +
65408 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65409 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65410 index b18ce4f..2ee2843 100644
65411 --- a/include/asm-generic/atomic64.h
65412 +++ b/include/asm-generic/atomic64.h
65413 @@ -16,6 +16,8 @@ typedef struct {
65414 long long counter;
65415 } atomic64_t;
65416
65417 +typedef atomic64_t atomic64_unchecked_t;
65418 +
65419 #define ATOMIC64_INIT(i) { (i) }
65420
65421 extern long long atomic64_read(const atomic64_t *v);
65422 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65423 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65424 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65425
65426 +#define atomic64_read_unchecked(v) atomic64_read(v)
65427 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65428 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65429 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65430 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65431 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
65432 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65433 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
65434 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65435 +
65436 #endif /* _ASM_GENERIC_ATOMIC64_H */
65437 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65438 index d48ddf0..656a0ac 100644
65439 --- a/include/asm-generic/bug.h
65440 +++ b/include/asm-generic/bug.h
65441 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65442
65443 #else /* !CONFIG_BUG */
65444 #ifndef HAVE_ARCH_BUG
65445 -#define BUG() do {} while(0)
65446 +#define BUG() do { for (;;) ; } while(0)
65447 #endif
65448
65449 #ifndef HAVE_ARCH_BUG_ON
65450 -#define BUG_ON(condition) do { if (condition) ; } while(0)
65451 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
65452 #endif
65453
65454 #ifndef HAVE_ARCH_WARN_ON
65455 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65456 index 1bfcfe5..e04c5c9 100644
65457 --- a/include/asm-generic/cache.h
65458 +++ b/include/asm-generic/cache.h
65459 @@ -6,7 +6,7 @@
65460 * cache lines need to provide their own cache.h.
65461 */
65462
65463 -#define L1_CACHE_SHIFT 5
65464 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65465 +#define L1_CACHE_SHIFT 5UL
65466 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65467
65468 #endif /* __ASM_GENERIC_CACHE_H */
65469 diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
65470 index 6920695..41038bc 100644
65471 --- a/include/asm-generic/dma-mapping-common.h
65472 +++ b/include/asm-generic/dma-mapping-common.h
65473 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
65474 enum dma_data_direction dir,
65475 struct dma_attrs *attrs)
65476 {
65477 - struct dma_map_ops *ops = get_dma_ops(dev);
65478 + const struct dma_map_ops *ops = get_dma_ops(dev);
65479 dma_addr_t addr;
65480
65481 kmemcheck_mark_initialized(ptr, size);
65482 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
65483 enum dma_data_direction dir,
65484 struct dma_attrs *attrs)
65485 {
65486 - struct dma_map_ops *ops = get_dma_ops(dev);
65487 + const struct dma_map_ops *ops = get_dma_ops(dev);
65488
65489 BUG_ON(!valid_dma_direction(dir));
65490 if (ops->unmap_page)
65491 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
65492 int nents, enum dma_data_direction dir,
65493 struct dma_attrs *attrs)
65494 {
65495 - struct dma_map_ops *ops = get_dma_ops(dev);
65496 + const struct dma_map_ops *ops = get_dma_ops(dev);
65497 int i, ents;
65498 struct scatterlist *s;
65499
65500 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
65501 int nents, enum dma_data_direction dir,
65502 struct dma_attrs *attrs)
65503 {
65504 - struct dma_map_ops *ops = get_dma_ops(dev);
65505 + const struct dma_map_ops *ops = get_dma_ops(dev);
65506
65507 BUG_ON(!valid_dma_direction(dir));
65508 debug_dma_unmap_sg(dev, sg, nents, dir);
65509 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65510 size_t offset, size_t size,
65511 enum dma_data_direction dir)
65512 {
65513 - struct dma_map_ops *ops = get_dma_ops(dev);
65514 + const struct dma_map_ops *ops = get_dma_ops(dev);
65515 dma_addr_t addr;
65516
65517 kmemcheck_mark_initialized(page_address(page) + offset, size);
65518 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65519 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
65520 size_t size, enum dma_data_direction dir)
65521 {
65522 - struct dma_map_ops *ops = get_dma_ops(dev);
65523 + const struct dma_map_ops *ops = get_dma_ops(dev);
65524
65525 BUG_ON(!valid_dma_direction(dir));
65526 if (ops->unmap_page)
65527 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
65528 size_t size,
65529 enum dma_data_direction dir)
65530 {
65531 - struct dma_map_ops *ops = get_dma_ops(dev);
65532 + const struct dma_map_ops *ops = get_dma_ops(dev);
65533
65534 BUG_ON(!valid_dma_direction(dir));
65535 if (ops->sync_single_for_cpu)
65536 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
65537 dma_addr_t addr, size_t size,
65538 enum dma_data_direction dir)
65539 {
65540 - struct dma_map_ops *ops = get_dma_ops(dev);
65541 + const struct dma_map_ops *ops = get_dma_ops(dev);
65542
65543 BUG_ON(!valid_dma_direction(dir));
65544 if (ops->sync_single_for_device)
65545 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
65546 size_t size,
65547 enum dma_data_direction dir)
65548 {
65549 - struct dma_map_ops *ops = get_dma_ops(dev);
65550 + const struct dma_map_ops *ops = get_dma_ops(dev);
65551
65552 BUG_ON(!valid_dma_direction(dir));
65553 if (ops->sync_single_range_for_cpu) {
65554 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
65555 size_t size,
65556 enum dma_data_direction dir)
65557 {
65558 - struct dma_map_ops *ops = get_dma_ops(dev);
65559 + const struct dma_map_ops *ops = get_dma_ops(dev);
65560
65561 BUG_ON(!valid_dma_direction(dir));
65562 if (ops->sync_single_range_for_device) {
65563 @@ -155,7 +155,7 @@ static inline void
65564 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
65565 int nelems, enum dma_data_direction dir)
65566 {
65567 - struct dma_map_ops *ops = get_dma_ops(dev);
65568 + const struct dma_map_ops *ops = get_dma_ops(dev);
65569
65570 BUG_ON(!valid_dma_direction(dir));
65571 if (ops->sync_sg_for_cpu)
65572 @@ -167,7 +167,7 @@ static inline void
65573 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
65574 int nelems, enum dma_data_direction dir)
65575 {
65576 - struct dma_map_ops *ops = get_dma_ops(dev);
65577 + const struct dma_map_ops *ops = get_dma_ops(dev);
65578
65579 BUG_ON(!valid_dma_direction(dir));
65580 if (ops->sync_sg_for_device)
65581 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65582 index 0d68a1e..b74a761 100644
65583 --- a/include/asm-generic/emergency-restart.h
65584 +++ b/include/asm-generic/emergency-restart.h
65585 @@ -1,7 +1,7 @@
65586 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65587 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65588
65589 -static inline void machine_emergency_restart(void)
65590 +static inline __noreturn void machine_emergency_restart(void)
65591 {
65592 machine_restart(NULL);
65593 }
65594 diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
65595 index 3c2344f..4590a7d 100644
65596 --- a/include/asm-generic/futex.h
65597 +++ b/include/asm-generic/futex.h
65598 @@ -6,7 +6,7 @@
65599 #include <asm/errno.h>
65600
65601 static inline int
65602 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65603 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
65604 {
65605 int op = (encoded_op >> 28) & 7;
65606 int cmp = (encoded_op >> 24) & 15;
65607 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65608 }
65609
65610 static inline int
65611 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
65612 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
65613 {
65614 return -ENOSYS;
65615 }
65616 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
65617 index 1ca3efc..e3dc852 100644
65618 --- a/include/asm-generic/int-l64.h
65619 +++ b/include/asm-generic/int-l64.h
65620 @@ -46,6 +46,8 @@ typedef unsigned int u32;
65621 typedef signed long s64;
65622 typedef unsigned long u64;
65623
65624 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
65625 +
65626 #define S8_C(x) x
65627 #define U8_C(x) x ## U
65628 #define S16_C(x) x
65629 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
65630 index f394147..b6152b9 100644
65631 --- a/include/asm-generic/int-ll64.h
65632 +++ b/include/asm-generic/int-ll64.h
65633 @@ -51,6 +51,8 @@ typedef unsigned int u32;
65634 typedef signed long long s64;
65635 typedef unsigned long long u64;
65636
65637 +typedef unsigned long long intoverflow_t;
65638 +
65639 #define S8_C(x) x
65640 #define U8_C(x) x ## U
65641 #define S16_C(x) x
65642 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65643 index e5f234a..cdb16b3 100644
65644 --- a/include/asm-generic/kmap_types.h
65645 +++ b/include/asm-generic/kmap_types.h
65646 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
65647 KMAP_D(16) KM_IRQ_PTE,
65648 KMAP_D(17) KM_NMI,
65649 KMAP_D(18) KM_NMI_PTE,
65650 -KMAP_D(19) KM_TYPE_NR
65651 +KMAP_D(19) KM_CLEARPAGE,
65652 +KMAP_D(20) KM_TYPE_NR
65653 };
65654
65655 #undef KMAP_D
65656 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
65657 index 725612b..9cc513a 100644
65658 --- a/include/asm-generic/pgtable-nopmd.h
65659 +++ b/include/asm-generic/pgtable-nopmd.h
65660 @@ -1,14 +1,19 @@
65661 #ifndef _PGTABLE_NOPMD_H
65662 #define _PGTABLE_NOPMD_H
65663
65664 -#ifndef __ASSEMBLY__
65665 -
65666 #include <asm-generic/pgtable-nopud.h>
65667
65668 -struct mm_struct;
65669 -
65670 #define __PAGETABLE_PMD_FOLDED
65671
65672 +#define PMD_SHIFT PUD_SHIFT
65673 +#define PTRS_PER_PMD 1
65674 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
65675 +#define PMD_MASK (~(PMD_SIZE-1))
65676 +
65677 +#ifndef __ASSEMBLY__
65678 +
65679 +struct mm_struct;
65680 +
65681 /*
65682 * Having the pmd type consist of a pud gets the size right, and allows
65683 * us to conceptually access the pud entry that this pmd is folded into
65684 @@ -16,11 +21,6 @@ struct mm_struct;
65685 */
65686 typedef struct { pud_t pud; } pmd_t;
65687
65688 -#define PMD_SHIFT PUD_SHIFT
65689 -#define PTRS_PER_PMD 1
65690 -#define PMD_SIZE (1UL << PMD_SHIFT)
65691 -#define PMD_MASK (~(PMD_SIZE-1))
65692 -
65693 /*
65694 * The "pud_xxx()" functions here are trivial for a folded two-level
65695 * setup: the pmd is never bad, and a pmd always exists (as it's folded
65696 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
65697 index 810431d..ccc3638 100644
65698 --- a/include/asm-generic/pgtable-nopud.h
65699 +++ b/include/asm-generic/pgtable-nopud.h
65700 @@ -1,10 +1,15 @@
65701 #ifndef _PGTABLE_NOPUD_H
65702 #define _PGTABLE_NOPUD_H
65703
65704 -#ifndef __ASSEMBLY__
65705 -
65706 #define __PAGETABLE_PUD_FOLDED
65707
65708 +#define PUD_SHIFT PGDIR_SHIFT
65709 +#define PTRS_PER_PUD 1
65710 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
65711 +#define PUD_MASK (~(PUD_SIZE-1))
65712 +
65713 +#ifndef __ASSEMBLY__
65714 +
65715 /*
65716 * Having the pud type consist of a pgd gets the size right, and allows
65717 * us to conceptually access the pgd entry that this pud is folded into
65718 @@ -12,11 +17,6 @@
65719 */
65720 typedef struct { pgd_t pgd; } pud_t;
65721
65722 -#define PUD_SHIFT PGDIR_SHIFT
65723 -#define PTRS_PER_PUD 1
65724 -#define PUD_SIZE (1UL << PUD_SHIFT)
65725 -#define PUD_MASK (~(PUD_SIZE-1))
65726 -
65727 /*
65728 * The "pgd_xxx()" functions here are trivial for a folded two-level
65729 * setup: the pud is never bad, and a pud always exists (as it's folded
65730 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
65731 index e2bd73e..fea8ed3 100644
65732 --- a/include/asm-generic/pgtable.h
65733 +++ b/include/asm-generic/pgtable.h
65734 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
65735 unsigned long size);
65736 #endif
65737
65738 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
65739 +static inline unsigned long pax_open_kernel(void) { return 0; }
65740 +#endif
65741 +
65742 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
65743 +static inline unsigned long pax_close_kernel(void) { return 0; }
65744 +#endif
65745 +
65746 #endif /* !__ASSEMBLY__ */
65747
65748 #endif /* _ASM_GENERIC_PGTABLE_H */
65749 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
65750 index b6e818f..21aa58a 100644
65751 --- a/include/asm-generic/vmlinux.lds.h
65752 +++ b/include/asm-generic/vmlinux.lds.h
65753 @@ -199,6 +199,7 @@
65754 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
65755 VMLINUX_SYMBOL(__start_rodata) = .; \
65756 *(.rodata) *(.rodata.*) \
65757 + *(.data.read_only) \
65758 *(__vermagic) /* Kernel version magic */ \
65759 *(__markers_strings) /* Markers: strings */ \
65760 *(__tracepoints_strings)/* Tracepoints: strings */ \
65761 @@ -656,22 +657,24 @@
65762 * section in the linker script will go there too. @phdr should have
65763 * a leading colon.
65764 *
65765 - * Note that this macros defines __per_cpu_load as an absolute symbol.
65766 + * Note that this macros defines per_cpu_load as an absolute symbol.
65767 * If there is no need to put the percpu section at a predetermined
65768 * address, use PERCPU().
65769 */
65770 #define PERCPU_VADDR(vaddr, phdr) \
65771 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
65772 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
65773 + per_cpu_load = .; \
65774 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
65775 - LOAD_OFFSET) { \
65776 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
65777 VMLINUX_SYMBOL(__per_cpu_start) = .; \
65778 *(.data.percpu.first) \
65779 - *(.data.percpu.page_aligned) \
65780 *(.data.percpu) \
65781 + . = ALIGN(PAGE_SIZE); \
65782 + *(.data.percpu.page_aligned) \
65783 *(.data.percpu.shared_aligned) \
65784 VMLINUX_SYMBOL(__per_cpu_end) = .; \
65785 } phdr \
65786 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
65787 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
65788
65789 /**
65790 * PERCPU - define output section for percpu area, simple version
65791 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
65792 index 66713c6..98c0460 100644
65793 --- a/include/drm/drmP.h
65794 +++ b/include/drm/drmP.h
65795 @@ -71,6 +71,7 @@
65796 #include <linux/workqueue.h>
65797 #include <linux/poll.h>
65798 #include <asm/pgalloc.h>
65799 +#include <asm/local.h>
65800 #include "drm.h"
65801
65802 #include <linux/idr.h>
65803 @@ -814,7 +815,7 @@ struct drm_driver {
65804 void (*vgaarb_irq)(struct drm_device *dev, bool state);
65805
65806 /* Driver private ops for this object */
65807 - struct vm_operations_struct *gem_vm_ops;
65808 + const struct vm_operations_struct *gem_vm_ops;
65809
65810 int major;
65811 int minor;
65812 @@ -917,7 +918,7 @@ struct drm_device {
65813
65814 /** \name Usage Counters */
65815 /*@{ */
65816 - int open_count; /**< Outstanding files open */
65817 + local_t open_count; /**< Outstanding files open */
65818 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
65819 atomic_t vma_count; /**< Outstanding vma areas open */
65820 int buf_use; /**< Buffers in use -- cannot alloc */
65821 @@ -928,7 +929,7 @@ struct drm_device {
65822 /*@{ */
65823 unsigned long counters;
65824 enum drm_stat_type types[15];
65825 - atomic_t counts[15];
65826 + atomic_unchecked_t counts[15];
65827 /*@} */
65828
65829 struct list_head filelist;
65830 @@ -1016,7 +1017,7 @@ struct drm_device {
65831 struct pci_controller *hose;
65832 #endif
65833 struct drm_sg_mem *sg; /**< Scatter gather memory */
65834 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
65835 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
65836 void *dev_private; /**< device private data */
65837 void *mm_private;
65838 struct address_space *dev_mapping;
65839 @@ -1042,11 +1043,11 @@ struct drm_device {
65840 spinlock_t object_name_lock;
65841 struct idr object_name_idr;
65842 atomic_t object_count;
65843 - atomic_t object_memory;
65844 + atomic_unchecked_t object_memory;
65845 atomic_t pin_count;
65846 - atomic_t pin_memory;
65847 + atomic_unchecked_t pin_memory;
65848 atomic_t gtt_count;
65849 - atomic_t gtt_memory;
65850 + atomic_unchecked_t gtt_memory;
65851 uint32_t gtt_total;
65852 uint32_t invalidate_domains; /* domains pending invalidation */
65853 uint32_t flush_domains; /* domains pending flush */
65854 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
65855 index b29e201..3413cc9 100644
65856 --- a/include/drm/drm_crtc_helper.h
65857 +++ b/include/drm/drm_crtc_helper.h
65858 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
65859
65860 /* reload the current crtc LUT */
65861 void (*load_lut)(struct drm_crtc *crtc);
65862 -};
65863 +} __no_const;
65864
65865 struct drm_encoder_helper_funcs {
65866 void (*dpms)(struct drm_encoder *encoder, int mode);
65867 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
65868 struct drm_connector *connector);
65869 /* disable encoder when not in use - more explicit than dpms off */
65870 void (*disable)(struct drm_encoder *encoder);
65871 -};
65872 +} __no_const;
65873
65874 struct drm_connector_helper_funcs {
65875 int (*get_modes)(struct drm_connector *connector);
65876 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
65877 index b199170..6f9e64c 100644
65878 --- a/include/drm/ttm/ttm_memory.h
65879 +++ b/include/drm/ttm/ttm_memory.h
65880 @@ -47,7 +47,7 @@
65881
65882 struct ttm_mem_shrink {
65883 int (*do_shrink) (struct ttm_mem_shrink *);
65884 -};
65885 +} __no_const;
65886
65887 /**
65888 * struct ttm_mem_global - Global memory accounting structure.
65889 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
65890 index e86dfca..40cc55f 100644
65891 --- a/include/linux/a.out.h
65892 +++ b/include/linux/a.out.h
65893 @@ -39,6 +39,14 @@ enum machine_type {
65894 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
65895 };
65896
65897 +/* Constants for the N_FLAGS field */
65898 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65899 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
65900 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
65901 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
65902 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65903 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65904 +
65905 #if !defined (N_MAGIC)
65906 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
65907 #endif
65908 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
65909 index 817b237..62c10bc 100644
65910 --- a/include/linux/atmdev.h
65911 +++ b/include/linux/atmdev.h
65912 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
65913 #endif
65914
65915 struct k_atm_aal_stats {
65916 -#define __HANDLE_ITEM(i) atomic_t i
65917 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
65918 __AAL_STAT_ITEMS
65919 #undef __HANDLE_ITEM
65920 };
65921 diff --git a/include/linux/backlight.h b/include/linux/backlight.h
65922 index 0f5f578..8c4f884 100644
65923 --- a/include/linux/backlight.h
65924 +++ b/include/linux/backlight.h
65925 @@ -36,18 +36,18 @@ struct backlight_device;
65926 struct fb_info;
65927
65928 struct backlight_ops {
65929 - unsigned int options;
65930 + const unsigned int options;
65931
65932 #define BL_CORE_SUSPENDRESUME (1 << 0)
65933
65934 /* Notify the backlight driver some property has changed */
65935 - int (*update_status)(struct backlight_device *);
65936 + int (* const update_status)(struct backlight_device *);
65937 /* Return the current backlight brightness (accounting for power,
65938 fb_blank etc.) */
65939 - int (*get_brightness)(struct backlight_device *);
65940 + int (* const get_brightness)(struct backlight_device *);
65941 /* Check if given framebuffer device is the one bound to this backlight;
65942 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
65943 - int (*check_fb)(struct fb_info *);
65944 + int (* const check_fb)(struct fb_info *);
65945 };
65946
65947 /* This structure defines all the properties of a backlight */
65948 @@ -86,7 +86,7 @@ struct backlight_device {
65949 registered this device has been unloaded, and if class_get_devdata()
65950 points to something in the body of that driver, it is also invalid. */
65951 struct mutex ops_lock;
65952 - struct backlight_ops *ops;
65953 + const struct backlight_ops *ops;
65954
65955 /* The framebuffer notifier block */
65956 struct notifier_block fb_notif;
65957 @@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
65958 }
65959
65960 extern struct backlight_device *backlight_device_register(const char *name,
65961 - struct device *dev, void *devdata, struct backlight_ops *ops);
65962 + struct device *dev, void *devdata, const struct backlight_ops *ops);
65963 extern void backlight_device_unregister(struct backlight_device *bd);
65964 extern void backlight_force_update(struct backlight_device *bd,
65965 enum backlight_update_reason reason);
65966 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
65967 index a3d802e..482f69c 100644
65968 --- a/include/linux/binfmts.h
65969 +++ b/include/linux/binfmts.h
65970 @@ -83,6 +83,7 @@ struct linux_binfmt {
65971 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
65972 int (*load_shlib)(struct file *);
65973 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
65974 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
65975 unsigned long min_coredump; /* minimal dump size */
65976 int hasvdso;
65977 };
65978 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
65979 index 63070ad..a2906d2 100644
65980 --- a/include/linux/blkdev.h
65981 +++ b/include/linux/blkdev.h
65982 @@ -777,6 +777,7 @@ extern void blk_plug_device(struct request_queue *);
65983 extern void blk_plug_device_unlocked(struct request_queue *);
65984 extern int blk_remove_plug(struct request_queue *);
65985 extern void blk_recount_segments(struct request_queue *, struct bio *);
65986 +extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
65987 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
65988 unsigned int, void __user *);
65989 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
65990 @@ -1280,7 +1281,7 @@ struct block_device_operations {
65991 int (*revalidate_disk) (struct gendisk *);
65992 int (*getgeo)(struct block_device *, struct hd_geometry *);
65993 struct module *owner;
65994 -};
65995 +} __do_const;
65996
65997 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
65998 unsigned long);
65999 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
66000 index 3b73b99..629d21b 100644
66001 --- a/include/linux/blktrace_api.h
66002 +++ b/include/linux/blktrace_api.h
66003 @@ -160,7 +160,7 @@ struct blk_trace {
66004 struct dentry *dir;
66005 struct dentry *dropped_file;
66006 struct dentry *msg_file;
66007 - atomic_t dropped;
66008 + atomic_unchecked_t dropped;
66009 };
66010
66011 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
66012 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
66013 index 83195fb..0b0f77d 100644
66014 --- a/include/linux/byteorder/little_endian.h
66015 +++ b/include/linux/byteorder/little_endian.h
66016 @@ -42,51 +42,51 @@
66017
66018 static inline __le64 __cpu_to_le64p(const __u64 *p)
66019 {
66020 - return (__force __le64)*p;
66021 + return (__force const __le64)*p;
66022 }
66023 static inline __u64 __le64_to_cpup(const __le64 *p)
66024 {
66025 - return (__force __u64)*p;
66026 + return (__force const __u64)*p;
66027 }
66028 static inline __le32 __cpu_to_le32p(const __u32 *p)
66029 {
66030 - return (__force __le32)*p;
66031 + return (__force const __le32)*p;
66032 }
66033 static inline __u32 __le32_to_cpup(const __le32 *p)
66034 {
66035 - return (__force __u32)*p;
66036 + return (__force const __u32)*p;
66037 }
66038 static inline __le16 __cpu_to_le16p(const __u16 *p)
66039 {
66040 - return (__force __le16)*p;
66041 + return (__force const __le16)*p;
66042 }
66043 static inline __u16 __le16_to_cpup(const __le16 *p)
66044 {
66045 - return (__force __u16)*p;
66046 + return (__force const __u16)*p;
66047 }
66048 static inline __be64 __cpu_to_be64p(const __u64 *p)
66049 {
66050 - return (__force __be64)__swab64p(p);
66051 + return (__force const __be64)__swab64p(p);
66052 }
66053 static inline __u64 __be64_to_cpup(const __be64 *p)
66054 {
66055 - return __swab64p((__u64 *)p);
66056 + return __swab64p((const __u64 *)p);
66057 }
66058 static inline __be32 __cpu_to_be32p(const __u32 *p)
66059 {
66060 - return (__force __be32)__swab32p(p);
66061 + return (__force const __be32)__swab32p(p);
66062 }
66063 static inline __u32 __be32_to_cpup(const __be32 *p)
66064 {
66065 - return __swab32p((__u32 *)p);
66066 + return __swab32p((const __u32 *)p);
66067 }
66068 static inline __be16 __cpu_to_be16p(const __u16 *p)
66069 {
66070 - return (__force __be16)__swab16p(p);
66071 + return (__force const __be16)__swab16p(p);
66072 }
66073 static inline __u16 __be16_to_cpup(const __be16 *p)
66074 {
66075 - return __swab16p((__u16 *)p);
66076 + return __swab16p((const __u16 *)p);
66077 }
66078 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
66079 #define __le64_to_cpus(x) do { (void)(x); } while (0)
66080 diff --git a/include/linux/cache.h b/include/linux/cache.h
66081 index 97e2488..e7576b9 100644
66082 --- a/include/linux/cache.h
66083 +++ b/include/linux/cache.h
66084 @@ -16,6 +16,10 @@
66085 #define __read_mostly
66086 #endif
66087
66088 +#ifndef __read_only
66089 +#define __read_only __read_mostly
66090 +#endif
66091 +
66092 #ifndef ____cacheline_aligned
66093 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66094 #endif
66095 diff --git a/include/linux/capability.h b/include/linux/capability.h
66096 index c8f2a5f7..1618a5c 100644
66097 --- a/include/linux/capability.h
66098 +++ b/include/linux/capability.h
66099 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
66100 (security_real_capable_noaudit((t), (cap)) == 0)
66101
66102 extern int capable(int cap);
66103 +int capable_nolog(int cap);
66104
66105 /* audit system wants to get cap info from files as well */
66106 struct dentry;
66107 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66108 index 450fa59..86019fb 100644
66109 --- a/include/linux/compiler-gcc4.h
66110 +++ b/include/linux/compiler-gcc4.h
66111 @@ -36,4 +36,16 @@
66112 the kernel context */
66113 #define __cold __attribute__((__cold__))
66114
66115 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66116 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66117 +#define __bos0(ptr) __bos((ptr), 0)
66118 +#define __bos1(ptr) __bos((ptr), 1)
66119 +
66120 +#if __GNUC_MINOR__ >= 5
66121 +#ifdef CONSTIFY_PLUGIN
66122 +#define __no_const __attribute__((no_const))
66123 +#define __do_const __attribute__((do_const))
66124 +#endif
66125 +#endif
66126 +
66127 #endif
66128 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66129 index 04fb513..fd6477b 100644
66130 --- a/include/linux/compiler.h
66131 +++ b/include/linux/compiler.h
66132 @@ -5,11 +5,14 @@
66133
66134 #ifdef __CHECKER__
66135 # define __user __attribute__((noderef, address_space(1)))
66136 +# define __force_user __force __user
66137 # define __kernel /* default address space */
66138 +# define __force_kernel __force __kernel
66139 # define __safe __attribute__((safe))
66140 # define __force __attribute__((force))
66141 # define __nocast __attribute__((nocast))
66142 # define __iomem __attribute__((noderef, address_space(2)))
66143 +# define __force_iomem __force __iomem
66144 # define __acquires(x) __attribute__((context(x,0,1)))
66145 # define __releases(x) __attribute__((context(x,1,0)))
66146 # define __acquire(x) __context__(x,1)
66147 @@ -17,13 +20,34 @@
66148 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66149 extern void __chk_user_ptr(const volatile void __user *);
66150 extern void __chk_io_ptr(const volatile void __iomem *);
66151 +#elif defined(CHECKER_PLUGIN)
66152 +//# define __user
66153 +//# define __force_user
66154 +//# define __kernel
66155 +//# define __force_kernel
66156 +# define __safe
66157 +# define __force
66158 +# define __nocast
66159 +# define __iomem
66160 +# define __force_iomem
66161 +# define __chk_user_ptr(x) (void)0
66162 +# define __chk_io_ptr(x) (void)0
66163 +# define __builtin_warning(x, y...) (1)
66164 +# define __acquires(x)
66165 +# define __releases(x)
66166 +# define __acquire(x) (void)0
66167 +# define __release(x) (void)0
66168 +# define __cond_lock(x,c) (c)
66169 #else
66170 # define __user
66171 +# define __force_user
66172 # define __kernel
66173 +# define __force_kernel
66174 # define __safe
66175 # define __force
66176 # define __nocast
66177 # define __iomem
66178 +# define __force_iomem
66179 # define __chk_user_ptr(x) (void)0
66180 # define __chk_io_ptr(x) (void)0
66181 # define __builtin_warning(x, y...) (1)
66182 @@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66183 # define __attribute_const__ /* unimplemented */
66184 #endif
66185
66186 +#ifndef __no_const
66187 +# define __no_const
66188 +#endif
66189 +
66190 +#ifndef __do_const
66191 +# define __do_const
66192 +#endif
66193 +
66194 /*
66195 * Tell gcc if a function is cold. The compiler will assume any path
66196 * directly leading to the call is unlikely.
66197 @@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66198 #define __cold
66199 #endif
66200
66201 +#ifndef __alloc_size
66202 +#define __alloc_size(...)
66203 +#endif
66204 +
66205 +#ifndef __bos
66206 +#define __bos(ptr, arg)
66207 +#endif
66208 +
66209 +#ifndef __bos0
66210 +#define __bos0(ptr)
66211 +#endif
66212 +
66213 +#ifndef __bos1
66214 +#define __bos1(ptr)
66215 +#endif
66216 +
66217 /* Simple shorthand for a section definition */
66218 #ifndef __section
66219 # define __section(S) __attribute__ ((__section__(#S)))
66220 @@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66221 * use is to mediate communication between process-level code and irq/NMI
66222 * handlers, all running on the same CPU.
66223 */
66224 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66225 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66226 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66227
66228 #endif /* __LINUX_COMPILER_H */
66229 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66230 index fd92988..a3164bd 100644
66231 --- a/include/linux/crypto.h
66232 +++ b/include/linux/crypto.h
66233 @@ -394,7 +394,7 @@ struct cipher_tfm {
66234 const u8 *key, unsigned int keylen);
66235 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66236 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66237 -};
66238 +} __no_const;
66239
66240 struct hash_tfm {
66241 int (*init)(struct hash_desc *desc);
66242 @@ -415,13 +415,13 @@ struct compress_tfm {
66243 int (*cot_decompress)(struct crypto_tfm *tfm,
66244 const u8 *src, unsigned int slen,
66245 u8 *dst, unsigned int *dlen);
66246 -};
66247 +} __no_const;
66248
66249 struct rng_tfm {
66250 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66251 unsigned int dlen);
66252 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66253 -};
66254 +} __no_const;
66255
66256 #define crt_ablkcipher crt_u.ablkcipher
66257 #define crt_aead crt_u.aead
66258 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
66259 index 30b93b2..cd7a8db 100644
66260 --- a/include/linux/dcache.h
66261 +++ b/include/linux/dcache.h
66262 @@ -119,6 +119,8 @@ struct dentry {
66263 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
66264 };
66265
66266 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66267 +
66268 /*
66269 * dentry->d_lock spinlock nesting subclasses:
66270 *
66271 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66272 index 3e9bd6a..f4e1aa0 100644
66273 --- a/include/linux/decompress/mm.h
66274 +++ b/include/linux/decompress/mm.h
66275 @@ -78,7 +78,7 @@ static void free(void *where)
66276 * warnings when not needed (indeed large_malloc / large_free are not
66277 * needed by inflate */
66278
66279 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66280 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66281 #define free(a) kfree(a)
66282
66283 #define large_malloc(a) vmalloc(a)
66284 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66285 index 91b7618..92a93d32 100644
66286 --- a/include/linux/dma-mapping.h
66287 +++ b/include/linux/dma-mapping.h
66288 @@ -16,51 +16,51 @@ enum dma_data_direction {
66289 };
66290
66291 struct dma_map_ops {
66292 - void* (*alloc_coherent)(struct device *dev, size_t size,
66293 + void* (* const alloc_coherent)(struct device *dev, size_t size,
66294 dma_addr_t *dma_handle, gfp_t gfp);
66295 - void (*free_coherent)(struct device *dev, size_t size,
66296 + void (* const free_coherent)(struct device *dev, size_t size,
66297 void *vaddr, dma_addr_t dma_handle);
66298 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
66299 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
66300 unsigned long offset, size_t size,
66301 enum dma_data_direction dir,
66302 struct dma_attrs *attrs);
66303 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
66304 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
66305 size_t size, enum dma_data_direction dir,
66306 struct dma_attrs *attrs);
66307 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
66308 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
66309 int nents, enum dma_data_direction dir,
66310 struct dma_attrs *attrs);
66311 - void (*unmap_sg)(struct device *dev,
66312 + void (* const unmap_sg)(struct device *dev,
66313 struct scatterlist *sg, int nents,
66314 enum dma_data_direction dir,
66315 struct dma_attrs *attrs);
66316 - void (*sync_single_for_cpu)(struct device *dev,
66317 + void (* const sync_single_for_cpu)(struct device *dev,
66318 dma_addr_t dma_handle, size_t size,
66319 enum dma_data_direction dir);
66320 - void (*sync_single_for_device)(struct device *dev,
66321 + void (* const sync_single_for_device)(struct device *dev,
66322 dma_addr_t dma_handle, size_t size,
66323 enum dma_data_direction dir);
66324 - void (*sync_single_range_for_cpu)(struct device *dev,
66325 + void (* const sync_single_range_for_cpu)(struct device *dev,
66326 dma_addr_t dma_handle,
66327 unsigned long offset,
66328 size_t size,
66329 enum dma_data_direction dir);
66330 - void (*sync_single_range_for_device)(struct device *dev,
66331 + void (* const sync_single_range_for_device)(struct device *dev,
66332 dma_addr_t dma_handle,
66333 unsigned long offset,
66334 size_t size,
66335 enum dma_data_direction dir);
66336 - void (*sync_sg_for_cpu)(struct device *dev,
66337 + void (* const sync_sg_for_cpu)(struct device *dev,
66338 struct scatterlist *sg, int nents,
66339 enum dma_data_direction dir);
66340 - void (*sync_sg_for_device)(struct device *dev,
66341 + void (* const sync_sg_for_device)(struct device *dev,
66342 struct scatterlist *sg, int nents,
66343 enum dma_data_direction dir);
66344 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66345 - int (*dma_supported)(struct device *dev, u64 mask);
66346 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66347 + int (* const dma_supported)(struct device *dev, u64 mask);
66348 int (*set_dma_mask)(struct device *dev, u64 mask);
66349 int is_phys;
66350 -};
66351 +} __do_const;
66352
66353 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66354
66355 diff --git a/include/linux/dst.h b/include/linux/dst.h
66356 index e26fed8..b976d9f 100644
66357 --- a/include/linux/dst.h
66358 +++ b/include/linux/dst.h
66359 @@ -380,7 +380,7 @@ struct dst_node
66360 struct thread_pool *pool;
66361
66362 /* Transaction IDs live here */
66363 - atomic_long_t gen;
66364 + atomic_long_unchecked_t gen;
66365
66366 /*
66367 * How frequently and how many times transaction
66368 diff --git a/include/linux/elf.h b/include/linux/elf.h
66369 index 90a4ed0..d652617 100644
66370 --- a/include/linux/elf.h
66371 +++ b/include/linux/elf.h
66372 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66373 #define PT_GNU_EH_FRAME 0x6474e550
66374
66375 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66376 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66377 +
66378 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66379 +
66380 +/* Constants for the e_flags field */
66381 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66382 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66383 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66384 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66385 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66386 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66387
66388 /* These constants define the different elf file types */
66389 #define ET_NONE 0
66390 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66391 #define DT_DEBUG 21
66392 #define DT_TEXTREL 22
66393 #define DT_JMPREL 23
66394 +#define DT_FLAGS 30
66395 + #define DF_TEXTREL 0x00000004
66396 #define DT_ENCODING 32
66397 #define OLD_DT_LOOS 0x60000000
66398 #define DT_LOOS 0x6000000d
66399 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66400 #define PF_W 0x2
66401 #define PF_X 0x1
66402
66403 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66404 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66405 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66406 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66407 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66408 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66409 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66410 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66411 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66412 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66413 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66414 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66415 +
66416 typedef struct elf32_phdr{
66417 Elf32_Word p_type;
66418 Elf32_Off p_offset;
66419 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66420 #define EI_OSABI 7
66421 #define EI_PAD 8
66422
66423 +#define EI_PAX 14
66424 +
66425 #define ELFMAG0 0x7f /* EI_MAG */
66426 #define ELFMAG1 'E'
66427 #define ELFMAG2 'L'
66428 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66429 #define elf_phdr elf32_phdr
66430 #define elf_note elf32_note
66431 #define elf_addr_t Elf32_Off
66432 +#define elf_dyn Elf32_Dyn
66433
66434 #else
66435
66436 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66437 #define elf_phdr elf64_phdr
66438 #define elf_note elf64_note
66439 #define elf_addr_t Elf64_Off
66440 +#define elf_dyn Elf64_Dyn
66441
66442 #endif
66443
66444 diff --git a/include/linux/fs.h b/include/linux/fs.h
66445 index 1b9a47a..6fe2934 100644
66446 --- a/include/linux/fs.h
66447 +++ b/include/linux/fs.h
66448 @@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
66449 unsigned long, unsigned long);
66450
66451 struct address_space_operations {
66452 - int (*writepage)(struct page *page, struct writeback_control *wbc);
66453 - int (*readpage)(struct file *, struct page *);
66454 - void (*sync_page)(struct page *);
66455 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
66456 + int (* const readpage)(struct file *, struct page *);
66457 + void (* const sync_page)(struct page *);
66458
66459 /* Write back some dirty pages from this mapping. */
66460 - int (*writepages)(struct address_space *, struct writeback_control *);
66461 + int (* const writepages)(struct address_space *, struct writeback_control *);
66462
66463 /* Set a page dirty. Return true if this dirtied it */
66464 - int (*set_page_dirty)(struct page *page);
66465 + int (* const set_page_dirty)(struct page *page);
66466
66467 - int (*readpages)(struct file *filp, struct address_space *mapping,
66468 + int (* const readpages)(struct file *filp, struct address_space *mapping,
66469 struct list_head *pages, unsigned nr_pages);
66470
66471 - int (*write_begin)(struct file *, struct address_space *mapping,
66472 + int (* const write_begin)(struct file *, struct address_space *mapping,
66473 loff_t pos, unsigned len, unsigned flags,
66474 struct page **pagep, void **fsdata);
66475 - int (*write_end)(struct file *, struct address_space *mapping,
66476 + int (* const write_end)(struct file *, struct address_space *mapping,
66477 loff_t pos, unsigned len, unsigned copied,
66478 struct page *page, void *fsdata);
66479
66480 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
66481 - sector_t (*bmap)(struct address_space *, sector_t);
66482 - void (*invalidatepage) (struct page *, unsigned long);
66483 - int (*releasepage) (struct page *, gfp_t);
66484 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
66485 + sector_t (* const bmap)(struct address_space *, sector_t);
66486 + void (* const invalidatepage) (struct page *, unsigned long);
66487 + int (* const releasepage) (struct page *, gfp_t);
66488 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
66489 loff_t offset, unsigned long nr_segs);
66490 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
66491 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
66492 void **, unsigned long *);
66493 /* migrate the contents of a page to the specified target */
66494 - int (*migratepage) (struct address_space *,
66495 + int (* const migratepage) (struct address_space *,
66496 struct page *, struct page *);
66497 - int (*launder_page) (struct page *);
66498 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
66499 + int (* const launder_page) (struct page *);
66500 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
66501 unsigned long);
66502 - int (*error_remove_page)(struct address_space *, struct page *);
66503 + int (* const error_remove_page)(struct address_space *, struct page *);
66504 };
66505
66506 /*
66507 @@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
66508 typedef struct files_struct *fl_owner_t;
66509
66510 struct file_lock_operations {
66511 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66512 - void (*fl_release_private)(struct file_lock *);
66513 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66514 + void (* const fl_release_private)(struct file_lock *);
66515 };
66516
66517 struct lock_manager_operations {
66518 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
66519 - void (*fl_notify)(struct file_lock *); /* unblock callback */
66520 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
66521 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66522 - void (*fl_release_private)(struct file_lock *);
66523 - void (*fl_break)(struct file_lock *);
66524 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
66525 - int (*fl_change)(struct file_lock **, int);
66526 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
66527 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
66528 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
66529 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66530 + void (* const fl_release_private)(struct file_lock *);
66531 + void (* const fl_break)(struct file_lock *);
66532 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
66533 + int (* const fl_change)(struct file_lock **, int);
66534 };
66535
66536 struct lock_manager {
66537 @@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
66538 unsigned int fi_flags; /* Flags as passed from user */
66539 unsigned int fi_extents_mapped; /* Number of mapped extents */
66540 unsigned int fi_extents_max; /* Size of fiemap_extent array */
66541 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
66542 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
66543 * array */
66544 };
66545 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
66546 @@ -1512,7 +1512,8 @@ struct file_operations {
66547 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
66548 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
66549 int (*setlease)(struct file *, long, struct file_lock **);
66550 -};
66551 +} __do_const;
66552 +typedef struct file_operations __no_const file_operations_no_const;
66553
66554 struct inode_operations {
66555 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
66556 @@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
66557 unsigned long, loff_t *);
66558
66559 struct super_operations {
66560 - struct inode *(*alloc_inode)(struct super_block *sb);
66561 - void (*destroy_inode)(struct inode *);
66562 + struct inode *(* const alloc_inode)(struct super_block *sb);
66563 + void (* const destroy_inode)(struct inode *);
66564
66565 - void (*dirty_inode) (struct inode *);
66566 - int (*write_inode) (struct inode *, int);
66567 - void (*drop_inode) (struct inode *);
66568 - void (*delete_inode) (struct inode *);
66569 - void (*put_super) (struct super_block *);
66570 - void (*write_super) (struct super_block *);
66571 - int (*sync_fs)(struct super_block *sb, int wait);
66572 - int (*freeze_fs) (struct super_block *);
66573 - int (*unfreeze_fs) (struct super_block *);
66574 - int (*statfs) (struct dentry *, struct kstatfs *);
66575 - int (*remount_fs) (struct super_block *, int *, char *);
66576 - void (*clear_inode) (struct inode *);
66577 - void (*umount_begin) (struct super_block *);
66578 + void (* const dirty_inode) (struct inode *);
66579 + int (* const write_inode) (struct inode *, int);
66580 + void (* const drop_inode) (struct inode *);
66581 + void (* const delete_inode) (struct inode *);
66582 + void (* const put_super) (struct super_block *);
66583 + void (* const write_super) (struct super_block *);
66584 + int (* const sync_fs)(struct super_block *sb, int wait);
66585 + int (* const freeze_fs) (struct super_block *);
66586 + int (* const unfreeze_fs) (struct super_block *);
66587 + int (* const statfs) (struct dentry *, struct kstatfs *);
66588 + int (* const remount_fs) (struct super_block *, int *, char *);
66589 + void (* const clear_inode) (struct inode *);
66590 + void (* const umount_begin) (struct super_block *);
66591
66592 - int (*show_options)(struct seq_file *, struct vfsmount *);
66593 - int (*show_stats)(struct seq_file *, struct vfsmount *);
66594 + int (* const show_options)(struct seq_file *, struct vfsmount *);
66595 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
66596 #ifdef CONFIG_QUOTA
66597 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
66598 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66599 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
66600 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66601 #endif
66602 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66603 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66604 };
66605
66606 /*
66607 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
66608 index 78a05bf..2a7d3e1 100644
66609 --- a/include/linux/fs_struct.h
66610 +++ b/include/linux/fs_struct.h
66611 @@ -4,7 +4,7 @@
66612 #include <linux/path.h>
66613
66614 struct fs_struct {
66615 - int users;
66616 + atomic_t users;
66617 rwlock_t lock;
66618 int umask;
66619 int in_exec;
66620 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
66621 index 7be0c6f..2f63a2b 100644
66622 --- a/include/linux/fscache-cache.h
66623 +++ b/include/linux/fscache-cache.h
66624 @@ -116,7 +116,7 @@ struct fscache_operation {
66625 #endif
66626 };
66627
66628 -extern atomic_t fscache_op_debug_id;
66629 +extern atomic_unchecked_t fscache_op_debug_id;
66630 extern const struct slow_work_ops fscache_op_slow_work_ops;
66631
66632 extern void fscache_enqueue_operation(struct fscache_operation *);
66633 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
66634 fscache_operation_release_t release)
66635 {
66636 atomic_set(&op->usage, 1);
66637 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
66638 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
66639 op->release = release;
66640 INIT_LIST_HEAD(&op->pend_link);
66641 fscache_set_op_state(op, "Init");
66642 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
66643 index 4d6f47b..00bcedb 100644
66644 --- a/include/linux/fsnotify_backend.h
66645 +++ b/include/linux/fsnotify_backend.h
66646 @@ -86,6 +86,7 @@ struct fsnotify_ops {
66647 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
66648 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
66649 };
66650 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
66651
66652 /*
66653 * A group is a "thing" that wants to receive notification about filesystem
66654 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66655 index 4ec5e67..42f1eb9 100644
66656 --- a/include/linux/ftrace_event.h
66657 +++ b/include/linux/ftrace_event.h
66658 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
66659 int filter_type);
66660 extern int trace_define_common_fields(struct ftrace_event_call *call);
66661
66662 -#define is_signed_type(type) (((type)(-1)) < 0)
66663 +#define is_signed_type(type) (((type)(-1)) < (type)1)
66664
66665 int trace_set_clr_event(const char *system, const char *event, int set);
66666
66667 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
66668 index 297df45..b6a74ff 100644
66669 --- a/include/linux/genhd.h
66670 +++ b/include/linux/genhd.h
66671 @@ -161,7 +161,7 @@ struct gendisk {
66672
66673 struct timer_rand_state *random;
66674
66675 - atomic_t sync_io; /* RAID */
66676 + atomic_unchecked_t sync_io; /* RAID */
66677 struct work_struct async_notify;
66678 #ifdef CONFIG_BLK_DEV_INTEGRITY
66679 struct blk_integrity *integrity;
66680 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
66681 new file mode 100644
66682 index 0000000..0dc3943
66683 --- /dev/null
66684 +++ b/include/linux/gracl.h
66685 @@ -0,0 +1,317 @@
66686 +#ifndef GR_ACL_H
66687 +#define GR_ACL_H
66688 +
66689 +#include <linux/grdefs.h>
66690 +#include <linux/resource.h>
66691 +#include <linux/capability.h>
66692 +#include <linux/dcache.h>
66693 +#include <asm/resource.h>
66694 +
66695 +/* Major status information */
66696 +
66697 +#define GR_VERSION "grsecurity 2.2.2"
66698 +#define GRSECURITY_VERSION 0x2202
66699 +
66700 +enum {
66701 + GR_SHUTDOWN = 0,
66702 + GR_ENABLE = 1,
66703 + GR_SPROLE = 2,
66704 + GR_RELOAD = 3,
66705 + GR_SEGVMOD = 4,
66706 + GR_STATUS = 5,
66707 + GR_UNSPROLE = 6,
66708 + GR_PASSSET = 7,
66709 + GR_SPROLEPAM = 8,
66710 +};
66711 +
66712 +/* Password setup definitions
66713 + * kernel/grhash.c */
66714 +enum {
66715 + GR_PW_LEN = 128,
66716 + GR_SALT_LEN = 16,
66717 + GR_SHA_LEN = 32,
66718 +};
66719 +
66720 +enum {
66721 + GR_SPROLE_LEN = 64,
66722 +};
66723 +
66724 +enum {
66725 + GR_NO_GLOB = 0,
66726 + GR_REG_GLOB,
66727 + GR_CREATE_GLOB
66728 +};
66729 +
66730 +#define GR_NLIMITS 32
66731 +
66732 +/* Begin Data Structures */
66733 +
66734 +struct sprole_pw {
66735 + unsigned char *rolename;
66736 + unsigned char salt[GR_SALT_LEN];
66737 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
66738 +};
66739 +
66740 +struct name_entry {
66741 + __u32 key;
66742 + ino_t inode;
66743 + dev_t device;
66744 + char *name;
66745 + __u16 len;
66746 + __u8 deleted;
66747 + struct name_entry *prev;
66748 + struct name_entry *next;
66749 +};
66750 +
66751 +struct inodev_entry {
66752 + struct name_entry *nentry;
66753 + struct inodev_entry *prev;
66754 + struct inodev_entry *next;
66755 +};
66756 +
66757 +struct acl_role_db {
66758 + struct acl_role_label **r_hash;
66759 + __u32 r_size;
66760 +};
66761 +
66762 +struct inodev_db {
66763 + struct inodev_entry **i_hash;
66764 + __u32 i_size;
66765 +};
66766 +
66767 +struct name_db {
66768 + struct name_entry **n_hash;
66769 + __u32 n_size;
66770 +};
66771 +
66772 +struct crash_uid {
66773 + uid_t uid;
66774 + unsigned long expires;
66775 +};
66776 +
66777 +struct gr_hash_struct {
66778 + void **table;
66779 + void **nametable;
66780 + void *first;
66781 + __u32 table_size;
66782 + __u32 used_size;
66783 + int type;
66784 +};
66785 +
66786 +/* Userspace Grsecurity ACL data structures */
66787 +
66788 +struct acl_subject_label {
66789 + char *filename;
66790 + ino_t inode;
66791 + dev_t device;
66792 + __u32 mode;
66793 + kernel_cap_t cap_mask;
66794 + kernel_cap_t cap_lower;
66795 + kernel_cap_t cap_invert_audit;
66796 +
66797 + struct rlimit res[GR_NLIMITS];
66798 + __u32 resmask;
66799 +
66800 + __u8 user_trans_type;
66801 + __u8 group_trans_type;
66802 + uid_t *user_transitions;
66803 + gid_t *group_transitions;
66804 + __u16 user_trans_num;
66805 + __u16 group_trans_num;
66806 +
66807 + __u32 sock_families[2];
66808 + __u32 ip_proto[8];
66809 + __u32 ip_type;
66810 + struct acl_ip_label **ips;
66811 + __u32 ip_num;
66812 + __u32 inaddr_any_override;
66813 +
66814 + __u32 crashes;
66815 + unsigned long expires;
66816 +
66817 + struct acl_subject_label *parent_subject;
66818 + struct gr_hash_struct *hash;
66819 + struct acl_subject_label *prev;
66820 + struct acl_subject_label *next;
66821 +
66822 + struct acl_object_label **obj_hash;
66823 + __u32 obj_hash_size;
66824 + __u16 pax_flags;
66825 +};
66826 +
66827 +struct role_allowed_ip {
66828 + __u32 addr;
66829 + __u32 netmask;
66830 +
66831 + struct role_allowed_ip *prev;
66832 + struct role_allowed_ip *next;
66833 +};
66834 +
66835 +struct role_transition {
66836 + char *rolename;
66837 +
66838 + struct role_transition *prev;
66839 + struct role_transition *next;
66840 +};
66841 +
66842 +struct acl_role_label {
66843 + char *rolename;
66844 + uid_t uidgid;
66845 + __u16 roletype;
66846 +
66847 + __u16 auth_attempts;
66848 + unsigned long expires;
66849 +
66850 + struct acl_subject_label *root_label;
66851 + struct gr_hash_struct *hash;
66852 +
66853 + struct acl_role_label *prev;
66854 + struct acl_role_label *next;
66855 +
66856 + struct role_transition *transitions;
66857 + struct role_allowed_ip *allowed_ips;
66858 + uid_t *domain_children;
66859 + __u16 domain_child_num;
66860 +
66861 + struct acl_subject_label **subj_hash;
66862 + __u32 subj_hash_size;
66863 +};
66864 +
66865 +struct user_acl_role_db {
66866 + struct acl_role_label **r_table;
66867 + __u32 num_pointers; /* Number of allocations to track */
66868 + __u32 num_roles; /* Number of roles */
66869 + __u32 num_domain_children; /* Number of domain children */
66870 + __u32 num_subjects; /* Number of subjects */
66871 + __u32 num_objects; /* Number of objects */
66872 +};
66873 +
66874 +struct acl_object_label {
66875 + char *filename;
66876 + ino_t inode;
66877 + dev_t device;
66878 + __u32 mode;
66879 +
66880 + struct acl_subject_label *nested;
66881 + struct acl_object_label *globbed;
66882 +
66883 + /* next two structures not used */
66884 +
66885 + struct acl_object_label *prev;
66886 + struct acl_object_label *next;
66887 +};
66888 +
66889 +struct acl_ip_label {
66890 + char *iface;
66891 + __u32 addr;
66892 + __u32 netmask;
66893 + __u16 low, high;
66894 + __u8 mode;
66895 + __u32 type;
66896 + __u32 proto[8];
66897 +
66898 + /* next two structures not used */
66899 +
66900 + struct acl_ip_label *prev;
66901 + struct acl_ip_label *next;
66902 +};
66903 +
66904 +struct gr_arg {
66905 + struct user_acl_role_db role_db;
66906 + unsigned char pw[GR_PW_LEN];
66907 + unsigned char salt[GR_SALT_LEN];
66908 + unsigned char sum[GR_SHA_LEN];
66909 + unsigned char sp_role[GR_SPROLE_LEN];
66910 + struct sprole_pw *sprole_pws;
66911 + dev_t segv_device;
66912 + ino_t segv_inode;
66913 + uid_t segv_uid;
66914 + __u16 num_sprole_pws;
66915 + __u16 mode;
66916 +};
66917 +
66918 +struct gr_arg_wrapper {
66919 + struct gr_arg *arg;
66920 + __u32 version;
66921 + __u32 size;
66922 +};
66923 +
66924 +struct subject_map {
66925 + struct acl_subject_label *user;
66926 + struct acl_subject_label *kernel;
66927 + struct subject_map *prev;
66928 + struct subject_map *next;
66929 +};
66930 +
66931 +struct acl_subj_map_db {
66932 + struct subject_map **s_hash;
66933 + __u32 s_size;
66934 +};
66935 +
66936 +/* End Data Structures Section */
66937 +
66938 +/* Hash functions generated by empirical testing by Brad Spengler
66939 + Makes good use of the low bits of the inode. Generally 0-1 times
66940 + in loop for successful match. 0-3 for unsuccessful match.
66941 + Shift/add algorithm with modulus of table size and an XOR*/
66942 +
66943 +static __inline__ unsigned int
66944 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
66945 +{
66946 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
66947 +}
66948 +
66949 + static __inline__ unsigned int
66950 +shash(const struct acl_subject_label *userp, const unsigned int sz)
66951 +{
66952 + return ((const unsigned long)userp % sz);
66953 +}
66954 +
66955 +static __inline__ unsigned int
66956 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
66957 +{
66958 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
66959 +}
66960 +
66961 +static __inline__ unsigned int
66962 +nhash(const char *name, const __u16 len, const unsigned int sz)
66963 +{
66964 + return full_name_hash((const unsigned char *)name, len) % sz;
66965 +}
66966 +
66967 +#define FOR_EACH_ROLE_START(role) \
66968 + role = role_list; \
66969 + while (role) {
66970 +
66971 +#define FOR_EACH_ROLE_END(role) \
66972 + role = role->prev; \
66973 + }
66974 +
66975 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
66976 + subj = NULL; \
66977 + iter = 0; \
66978 + while (iter < role->subj_hash_size) { \
66979 + if (subj == NULL) \
66980 + subj = role->subj_hash[iter]; \
66981 + if (subj == NULL) { \
66982 + iter++; \
66983 + continue; \
66984 + }
66985 +
66986 +#define FOR_EACH_SUBJECT_END(subj,iter) \
66987 + subj = subj->next; \
66988 + if (subj == NULL) \
66989 + iter++; \
66990 + }
66991 +
66992 +
66993 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
66994 + subj = role->hash->first; \
66995 + while (subj != NULL) {
66996 +
66997 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
66998 + subj = subj->next; \
66999 + }
67000 +
67001 +#endif
67002 +
67003 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
67004 new file mode 100644
67005 index 0000000..323ecf2
67006 --- /dev/null
67007 +++ b/include/linux/gralloc.h
67008 @@ -0,0 +1,9 @@
67009 +#ifndef __GRALLOC_H
67010 +#define __GRALLOC_H
67011 +
67012 +void acl_free_all(void);
67013 +int acl_alloc_stack_init(unsigned long size);
67014 +void *acl_alloc(unsigned long len);
67015 +void *acl_alloc_num(unsigned long num, unsigned long len);
67016 +
67017 +#endif
67018 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
67019 new file mode 100644
67020 index 0000000..70d6cd5
67021 --- /dev/null
67022 +++ b/include/linux/grdefs.h
67023 @@ -0,0 +1,140 @@
67024 +#ifndef GRDEFS_H
67025 +#define GRDEFS_H
67026 +
67027 +/* Begin grsecurity status declarations */
67028 +
67029 +enum {
67030 + GR_READY = 0x01,
67031 + GR_STATUS_INIT = 0x00 // disabled state
67032 +};
67033 +
67034 +/* Begin ACL declarations */
67035 +
67036 +/* Role flags */
67037 +
67038 +enum {
67039 + GR_ROLE_USER = 0x0001,
67040 + GR_ROLE_GROUP = 0x0002,
67041 + GR_ROLE_DEFAULT = 0x0004,
67042 + GR_ROLE_SPECIAL = 0x0008,
67043 + GR_ROLE_AUTH = 0x0010,
67044 + GR_ROLE_NOPW = 0x0020,
67045 + GR_ROLE_GOD = 0x0040,
67046 + GR_ROLE_LEARN = 0x0080,
67047 + GR_ROLE_TPE = 0x0100,
67048 + GR_ROLE_DOMAIN = 0x0200,
67049 + GR_ROLE_PAM = 0x0400,
67050 + GR_ROLE_PERSIST = 0x800
67051 +};
67052 +
67053 +/* ACL Subject and Object mode flags */
67054 +enum {
67055 + GR_DELETED = 0x80000000
67056 +};
67057 +
67058 +/* ACL Object-only mode flags */
67059 +enum {
67060 + GR_READ = 0x00000001,
67061 + GR_APPEND = 0x00000002,
67062 + GR_WRITE = 0x00000004,
67063 + GR_EXEC = 0x00000008,
67064 + GR_FIND = 0x00000010,
67065 + GR_INHERIT = 0x00000020,
67066 + GR_SETID = 0x00000040,
67067 + GR_CREATE = 0x00000080,
67068 + GR_DELETE = 0x00000100,
67069 + GR_LINK = 0x00000200,
67070 + GR_AUDIT_READ = 0x00000400,
67071 + GR_AUDIT_APPEND = 0x00000800,
67072 + GR_AUDIT_WRITE = 0x00001000,
67073 + GR_AUDIT_EXEC = 0x00002000,
67074 + GR_AUDIT_FIND = 0x00004000,
67075 + GR_AUDIT_INHERIT= 0x00008000,
67076 + GR_AUDIT_SETID = 0x00010000,
67077 + GR_AUDIT_CREATE = 0x00020000,
67078 + GR_AUDIT_DELETE = 0x00040000,
67079 + GR_AUDIT_LINK = 0x00080000,
67080 + GR_PTRACERD = 0x00100000,
67081 + GR_NOPTRACE = 0x00200000,
67082 + GR_SUPPRESS = 0x00400000,
67083 + GR_NOLEARN = 0x00800000,
67084 + GR_INIT_TRANSFER= 0x01000000
67085 +};
67086 +
67087 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
67088 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
67089 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
67090 +
67091 +/* ACL subject-only mode flags */
67092 +enum {
67093 + GR_KILL = 0x00000001,
67094 + GR_VIEW = 0x00000002,
67095 + GR_PROTECTED = 0x00000004,
67096 + GR_LEARN = 0x00000008,
67097 + GR_OVERRIDE = 0x00000010,
67098 + /* just a placeholder, this mode is only used in userspace */
67099 + GR_DUMMY = 0x00000020,
67100 + GR_PROTSHM = 0x00000040,
67101 + GR_KILLPROC = 0x00000080,
67102 + GR_KILLIPPROC = 0x00000100,
67103 + /* just a placeholder, this mode is only used in userspace */
67104 + GR_NOTROJAN = 0x00000200,
67105 + GR_PROTPROCFD = 0x00000400,
67106 + GR_PROCACCT = 0x00000800,
67107 + GR_RELAXPTRACE = 0x00001000,
67108 + GR_NESTED = 0x00002000,
67109 + GR_INHERITLEARN = 0x00004000,
67110 + GR_PROCFIND = 0x00008000,
67111 + GR_POVERRIDE = 0x00010000,
67112 + GR_KERNELAUTH = 0x00020000,
67113 + GR_ATSECURE = 0x00040000,
67114 + GR_SHMEXEC = 0x00080000
67115 +};
67116 +
67117 +enum {
67118 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
67119 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
67120 + GR_PAX_ENABLE_MPROTECT = 0x0004,
67121 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
67122 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
67123 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
67124 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
67125 + GR_PAX_DISABLE_MPROTECT = 0x0400,
67126 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
67127 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
67128 +};
67129 +
67130 +enum {
67131 + GR_ID_USER = 0x01,
67132 + GR_ID_GROUP = 0x02,
67133 +};
67134 +
67135 +enum {
67136 + GR_ID_ALLOW = 0x01,
67137 + GR_ID_DENY = 0x02,
67138 +};
67139 +
67140 +#define GR_CRASH_RES 31
67141 +#define GR_UIDTABLE_MAX 500
67142 +
67143 +/* begin resource learning section */
67144 +enum {
67145 + GR_RLIM_CPU_BUMP = 60,
67146 + GR_RLIM_FSIZE_BUMP = 50000,
67147 + GR_RLIM_DATA_BUMP = 10000,
67148 + GR_RLIM_STACK_BUMP = 1000,
67149 + GR_RLIM_CORE_BUMP = 10000,
67150 + GR_RLIM_RSS_BUMP = 500000,
67151 + GR_RLIM_NPROC_BUMP = 1,
67152 + GR_RLIM_NOFILE_BUMP = 5,
67153 + GR_RLIM_MEMLOCK_BUMP = 50000,
67154 + GR_RLIM_AS_BUMP = 500000,
67155 + GR_RLIM_LOCKS_BUMP = 2,
67156 + GR_RLIM_SIGPENDING_BUMP = 5,
67157 + GR_RLIM_MSGQUEUE_BUMP = 10000,
67158 + GR_RLIM_NICE_BUMP = 1,
67159 + GR_RLIM_RTPRIO_BUMP = 1,
67160 + GR_RLIM_RTTIME_BUMP = 1000000
67161 +};
67162 +
67163 +#endif
67164 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67165 new file mode 100644
67166 index 0000000..3826b91
67167 --- /dev/null
67168 +++ b/include/linux/grinternal.h
67169 @@ -0,0 +1,219 @@
67170 +#ifndef __GRINTERNAL_H
67171 +#define __GRINTERNAL_H
67172 +
67173 +#ifdef CONFIG_GRKERNSEC
67174 +
67175 +#include <linux/fs.h>
67176 +#include <linux/mnt_namespace.h>
67177 +#include <linux/nsproxy.h>
67178 +#include <linux/gracl.h>
67179 +#include <linux/grdefs.h>
67180 +#include <linux/grmsg.h>
67181 +
67182 +void gr_add_learn_entry(const char *fmt, ...)
67183 + __attribute__ ((format (printf, 1, 2)));
67184 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67185 + const struct vfsmount *mnt);
67186 +__u32 gr_check_create(const struct dentry *new_dentry,
67187 + const struct dentry *parent,
67188 + const struct vfsmount *mnt, const __u32 mode);
67189 +int gr_check_protected_task(const struct task_struct *task);
67190 +__u32 to_gr_audit(const __u32 reqmode);
67191 +int gr_set_acls(const int type);
67192 +int gr_apply_subject_to_task(struct task_struct *task);
67193 +int gr_acl_is_enabled(void);
67194 +char gr_roletype_to_char(void);
67195 +
67196 +void gr_handle_alertkill(struct task_struct *task);
67197 +char *gr_to_filename(const struct dentry *dentry,
67198 + const struct vfsmount *mnt);
67199 +char *gr_to_filename1(const struct dentry *dentry,
67200 + const struct vfsmount *mnt);
67201 +char *gr_to_filename2(const struct dentry *dentry,
67202 + const struct vfsmount *mnt);
67203 +char *gr_to_filename3(const struct dentry *dentry,
67204 + const struct vfsmount *mnt);
67205 +
67206 +extern int grsec_enable_ptrace_readexec;
67207 +extern int grsec_enable_harden_ptrace;
67208 +extern int grsec_enable_link;
67209 +extern int grsec_enable_fifo;
67210 +extern int grsec_enable_shm;
67211 +extern int grsec_enable_execlog;
67212 +extern int grsec_enable_signal;
67213 +extern int grsec_enable_audit_ptrace;
67214 +extern int grsec_enable_forkfail;
67215 +extern int grsec_enable_time;
67216 +extern int grsec_enable_rofs;
67217 +extern int grsec_enable_chroot_shmat;
67218 +extern int grsec_enable_chroot_mount;
67219 +extern int grsec_enable_chroot_double;
67220 +extern int grsec_enable_chroot_pivot;
67221 +extern int grsec_enable_chroot_chdir;
67222 +extern int grsec_enable_chroot_chmod;
67223 +extern int grsec_enable_chroot_mknod;
67224 +extern int grsec_enable_chroot_fchdir;
67225 +extern int grsec_enable_chroot_nice;
67226 +extern int grsec_enable_chroot_execlog;
67227 +extern int grsec_enable_chroot_caps;
67228 +extern int grsec_enable_chroot_sysctl;
67229 +extern int grsec_enable_chroot_unix;
67230 +extern int grsec_enable_tpe;
67231 +extern int grsec_tpe_gid;
67232 +extern int grsec_enable_tpe_all;
67233 +extern int grsec_enable_tpe_invert;
67234 +extern int grsec_enable_socket_all;
67235 +extern int grsec_socket_all_gid;
67236 +extern int grsec_enable_socket_client;
67237 +extern int grsec_socket_client_gid;
67238 +extern int grsec_enable_socket_server;
67239 +extern int grsec_socket_server_gid;
67240 +extern int grsec_audit_gid;
67241 +extern int grsec_enable_group;
67242 +extern int grsec_enable_audit_textrel;
67243 +extern int grsec_enable_log_rwxmaps;
67244 +extern int grsec_enable_mount;
67245 +extern int grsec_enable_chdir;
67246 +extern int grsec_resource_logging;
67247 +extern int grsec_enable_blackhole;
67248 +extern int grsec_lastack_retries;
67249 +extern int grsec_enable_brute;
67250 +extern int grsec_lock;
67251 +
67252 +extern spinlock_t grsec_alert_lock;
67253 +extern unsigned long grsec_alert_wtime;
67254 +extern unsigned long grsec_alert_fyet;
67255 +
67256 +extern spinlock_t grsec_audit_lock;
67257 +
67258 +extern rwlock_t grsec_exec_file_lock;
67259 +
67260 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67261 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67262 + (tsk)->exec_file->f_vfsmnt) : "/")
67263 +
67264 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67265 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67266 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67267 +
67268 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67269 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
67270 + (tsk)->exec_file->f_vfsmnt) : "/")
67271 +
67272 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67273 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67274 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67275 +
67276 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67277 +
67278 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67279 +
67280 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
67281 + (task)->pid, (cred)->uid, \
67282 + (cred)->euid, (cred)->gid, (cred)->egid, \
67283 + gr_parent_task_fullpath(task), \
67284 + (task)->real_parent->comm, (task)->real_parent->pid, \
67285 + (pcred)->uid, (pcred)->euid, \
67286 + (pcred)->gid, (pcred)->egid
67287 +
67288 +#define GR_CHROOT_CAPS {{ \
67289 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67290 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67291 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67292 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67293 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67294 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67295 + CAP_TO_MASK(CAP_MAC_ADMIN) }}
67296 +
67297 +#define security_learn(normal_msg,args...) \
67298 +({ \
67299 + read_lock(&grsec_exec_file_lock); \
67300 + gr_add_learn_entry(normal_msg "\n", ## args); \
67301 + read_unlock(&grsec_exec_file_lock); \
67302 +})
67303 +
67304 +enum {
67305 + GR_DO_AUDIT,
67306 + GR_DONT_AUDIT,
67307 + GR_DONT_AUDIT_GOOD
67308 +};
67309 +
67310 +enum {
67311 + GR_TTYSNIFF,
67312 + GR_RBAC,
67313 + GR_RBAC_STR,
67314 + GR_STR_RBAC,
67315 + GR_RBAC_MODE2,
67316 + GR_RBAC_MODE3,
67317 + GR_FILENAME,
67318 + GR_SYSCTL_HIDDEN,
67319 + GR_NOARGS,
67320 + GR_ONE_INT,
67321 + GR_ONE_INT_TWO_STR,
67322 + GR_ONE_STR,
67323 + GR_STR_INT,
67324 + GR_TWO_STR_INT,
67325 + GR_TWO_INT,
67326 + GR_TWO_U64,
67327 + GR_THREE_INT,
67328 + GR_FIVE_INT_TWO_STR,
67329 + GR_TWO_STR,
67330 + GR_THREE_STR,
67331 + GR_FOUR_STR,
67332 + GR_STR_FILENAME,
67333 + GR_FILENAME_STR,
67334 + GR_FILENAME_TWO_INT,
67335 + GR_FILENAME_TWO_INT_STR,
67336 + GR_TEXTREL,
67337 + GR_PTRACE,
67338 + GR_RESOURCE,
67339 + GR_CAP,
67340 + GR_SIG,
67341 + GR_SIG2,
67342 + GR_CRASH1,
67343 + GR_CRASH2,
67344 + GR_PSACCT,
67345 + GR_RWXMAP
67346 +};
67347 +
67348 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67349 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67350 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67351 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67352 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67353 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67354 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67355 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67356 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67357 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67358 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67359 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67360 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67361 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67362 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67363 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67364 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67365 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67366 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67367 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67368 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67369 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67370 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67371 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67372 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67373 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67374 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67375 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67376 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67377 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67378 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67379 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67380 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67381 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67382 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67383 +
67384 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67385 +
67386 +#endif
67387 +
67388 +#endif
67389 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67390 new file mode 100644
67391 index 0000000..b3347e2
67392 --- /dev/null
67393 +++ b/include/linux/grmsg.h
67394 @@ -0,0 +1,109 @@
67395 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67396 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67397 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67398 +#define GR_STOPMOD_MSG "denied modification of module state by "
67399 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67400 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67401 +#define GR_IOPERM_MSG "denied use of ioperm() by "
67402 +#define GR_IOPL_MSG "denied use of iopl() by "
67403 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67404 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67405 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67406 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67407 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67408 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67409 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67410 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67411 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67412 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67413 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67414 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67415 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67416 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67417 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67418 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67419 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67420 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67421 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67422 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67423 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67424 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67425 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67426 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67427 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67428 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67429 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.64s) of %.950s by "
67430 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67431 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67432 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67433 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67434 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67435 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67436 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67437 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67438 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
67439 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67440 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67441 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67442 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67443 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67444 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67445 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67446 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67447 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
67448 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67449 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67450 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67451 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67452 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67453 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67454 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67455 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67456 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67457 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67458 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67459 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67460 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67461 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67462 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67463 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67464 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67465 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67466 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67467 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
67468 +#define GR_NICE_CHROOT_MSG "denied priority change by "
67469 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67470 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67471 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67472 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67473 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67474 +#define GR_TIME_MSG "time set by "
67475 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67476 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67477 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67478 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67479 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67480 +#define GR_BIND_MSG "denied bind() by "
67481 +#define GR_CONNECT_MSG "denied connect() by "
67482 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67483 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67484 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67485 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67486 +#define GR_CAP_ACL_MSG "use of %s denied for "
67487 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67488 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67489 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67490 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67491 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67492 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67493 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67494 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67495 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67496 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67497 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67498 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67499 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67500 +#define GR_VM86_MSG "denied use of vm86 by "
67501 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67502 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67503 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67504 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67505 new file mode 100644
67506 index 0000000..ebba836
67507 --- /dev/null
67508 +++ b/include/linux/grsecurity.h
67509 @@ -0,0 +1,223 @@
67510 +#ifndef GR_SECURITY_H
67511 +#define GR_SECURITY_H
67512 +#include <linux/fs.h>
67513 +#include <linux/fs_struct.h>
67514 +#include <linux/binfmts.h>
67515 +#include <linux/gracl.h>
67516 +#include <linux/compat.h>
67517 +
67518 +/* notify of brain-dead configs */
67519 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67520 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67521 +#endif
67522 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67523 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67524 +#endif
67525 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
67526 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
67527 +#endif
67528 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
67529 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
67530 +#endif
67531 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67532 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67533 +#endif
67534 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67535 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
67536 +#endif
67537 +
67538 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
67539 +void gr_handle_brute_check(void);
67540 +void gr_handle_kernel_exploit(void);
67541 +int gr_process_user_ban(void);
67542 +
67543 +char gr_roletype_to_char(void);
67544 +
67545 +int gr_acl_enable_at_secure(void);
67546 +
67547 +int gr_check_user_change(int real, int effective, int fs);
67548 +int gr_check_group_change(int real, int effective, int fs);
67549 +
67550 +void gr_del_task_from_ip_table(struct task_struct *p);
67551 +
67552 +int gr_pid_is_chrooted(struct task_struct *p);
67553 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67554 +int gr_handle_chroot_nice(void);
67555 +int gr_handle_chroot_sysctl(const int op);
67556 +int gr_handle_chroot_setpriority(struct task_struct *p,
67557 + const int niceval);
67558 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67559 +int gr_handle_chroot_chroot(const struct dentry *dentry,
67560 + const struct vfsmount *mnt);
67561 +void gr_handle_chroot_chdir(struct path *path);
67562 +int gr_handle_chroot_chmod(const struct dentry *dentry,
67563 + const struct vfsmount *mnt, const int mode);
67564 +int gr_handle_chroot_mknod(const struct dentry *dentry,
67565 + const struct vfsmount *mnt, const int mode);
67566 +int gr_handle_chroot_mount(const struct dentry *dentry,
67567 + const struct vfsmount *mnt,
67568 + const char *dev_name);
67569 +int gr_handle_chroot_pivot(void);
67570 +int gr_handle_chroot_unix(const pid_t pid);
67571 +
67572 +int gr_handle_rawio(const struct inode *inode);
67573 +
67574 +void gr_handle_ioperm(void);
67575 +void gr_handle_iopl(void);
67576 +
67577 +int gr_tpe_allow(const struct file *file);
67578 +
67579 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67580 +void gr_clear_chroot_entries(struct task_struct *task);
67581 +
67582 +void gr_log_forkfail(const int retval);
67583 +void gr_log_timechange(void);
67584 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
67585 +void gr_log_chdir(const struct dentry *dentry,
67586 + const struct vfsmount *mnt);
67587 +void gr_log_chroot_exec(const struct dentry *dentry,
67588 + const struct vfsmount *mnt);
67589 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
67590 +#ifdef CONFIG_COMPAT
67591 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
67592 +#endif
67593 +void gr_log_remount(const char *devname, const int retval);
67594 +void gr_log_unmount(const char *devname, const int retval);
67595 +void gr_log_mount(const char *from, const char *to, const int retval);
67596 +void gr_log_textrel(struct vm_area_struct *vma);
67597 +void gr_log_rwxmmap(struct file *file);
67598 +void gr_log_rwxmprotect(struct file *file);
67599 +
67600 +int gr_handle_follow_link(const struct inode *parent,
67601 + const struct inode *inode,
67602 + const struct dentry *dentry,
67603 + const struct vfsmount *mnt);
67604 +int gr_handle_fifo(const struct dentry *dentry,
67605 + const struct vfsmount *mnt,
67606 + const struct dentry *dir, const int flag,
67607 + const int acc_mode);
67608 +int gr_handle_hardlink(const struct dentry *dentry,
67609 + const struct vfsmount *mnt,
67610 + struct inode *inode,
67611 + const int mode, const char *to);
67612 +
67613 +int gr_is_capable(const int cap);
67614 +int gr_is_capable_nolog(const int cap);
67615 +void gr_learn_resource(const struct task_struct *task, const int limit,
67616 + const unsigned long wanted, const int gt);
67617 +void gr_copy_label(struct task_struct *tsk);
67618 +void gr_handle_crash(struct task_struct *task, const int sig);
67619 +int gr_handle_signal(const struct task_struct *p, const int sig);
67620 +int gr_check_crash_uid(const uid_t uid);
67621 +int gr_check_protected_task(const struct task_struct *task);
67622 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
67623 +int gr_acl_handle_mmap(const struct file *file,
67624 + const unsigned long prot);
67625 +int gr_acl_handle_mprotect(const struct file *file,
67626 + const unsigned long prot);
67627 +int gr_check_hidden_task(const struct task_struct *tsk);
67628 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
67629 + const struct vfsmount *mnt);
67630 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
67631 + const struct vfsmount *mnt);
67632 +__u32 gr_acl_handle_access(const struct dentry *dentry,
67633 + const struct vfsmount *mnt, const int fmode);
67634 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
67635 + const struct vfsmount *mnt, mode_t mode);
67636 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
67637 + const struct vfsmount *mnt, mode_t mode);
67638 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
67639 + const struct vfsmount *mnt);
67640 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
67641 + const struct vfsmount *mnt);
67642 +int gr_handle_ptrace(struct task_struct *task, const long request);
67643 +int gr_handle_proc_ptrace(struct task_struct *task);
67644 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
67645 + const struct vfsmount *mnt);
67646 +int gr_check_crash_exec(const struct file *filp);
67647 +int gr_acl_is_enabled(void);
67648 +void gr_set_kernel_label(struct task_struct *task);
67649 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
67650 + const gid_t gid);
67651 +int gr_set_proc_label(const struct dentry *dentry,
67652 + const struct vfsmount *mnt,
67653 + const int unsafe_flags);
67654 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
67655 + const struct vfsmount *mnt);
67656 +__u32 gr_acl_handle_open(const struct dentry *dentry,
67657 + const struct vfsmount *mnt, int acc_mode);
67658 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
67659 + const struct dentry *p_dentry,
67660 + const struct vfsmount *p_mnt,
67661 + int open_flags, int acc_mode, const int imode);
67662 +void gr_handle_create(const struct dentry *dentry,
67663 + const struct vfsmount *mnt);
67664 +void gr_handle_proc_create(const struct dentry *dentry,
67665 + const struct inode *inode);
67666 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
67667 + const struct dentry *parent_dentry,
67668 + const struct vfsmount *parent_mnt,
67669 + const int mode);
67670 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
67671 + const struct dentry *parent_dentry,
67672 + const struct vfsmount *parent_mnt);
67673 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
67674 + const struct vfsmount *mnt);
67675 +void gr_handle_delete(const ino_t ino, const dev_t dev);
67676 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
67677 + const struct vfsmount *mnt);
67678 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
67679 + const struct dentry *parent_dentry,
67680 + const struct vfsmount *parent_mnt,
67681 + const char *from);
67682 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
67683 + const struct dentry *parent_dentry,
67684 + const struct vfsmount *parent_mnt,
67685 + const struct dentry *old_dentry,
67686 + const struct vfsmount *old_mnt, const char *to);
67687 +int gr_acl_handle_rename(struct dentry *new_dentry,
67688 + struct dentry *parent_dentry,
67689 + const struct vfsmount *parent_mnt,
67690 + struct dentry *old_dentry,
67691 + struct inode *old_parent_inode,
67692 + struct vfsmount *old_mnt, const char *newname);
67693 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67694 + struct dentry *old_dentry,
67695 + struct dentry *new_dentry,
67696 + struct vfsmount *mnt, const __u8 replace);
67697 +__u32 gr_check_link(const struct dentry *new_dentry,
67698 + const struct dentry *parent_dentry,
67699 + const struct vfsmount *parent_mnt,
67700 + const struct dentry *old_dentry,
67701 + const struct vfsmount *old_mnt);
67702 +int gr_acl_handle_filldir(const struct file *file, const char *name,
67703 + const unsigned int namelen, const ino_t ino);
67704 +
67705 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
67706 + const struct vfsmount *mnt);
67707 +void gr_acl_handle_exit(void);
67708 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
67709 +int gr_acl_handle_procpidmem(const struct task_struct *task);
67710 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
67711 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
67712 +void gr_audit_ptrace(struct task_struct *task);
67713 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
67714 +
67715 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
67716 +
67717 +#ifdef CONFIG_GRKERNSEC
67718 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
67719 +void gr_handle_vm86(void);
67720 +void gr_handle_mem_readwrite(u64 from, u64 to);
67721 +
67722 +extern int grsec_enable_dmesg;
67723 +extern int grsec_disable_privio;
67724 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67725 +extern int grsec_enable_chroot_findtask;
67726 +#endif
67727 +#ifdef CONFIG_GRKERNSEC_SETXID
67728 +extern int grsec_enable_setxid;
67729 +#endif
67730 +#endif
67731 +
67732 +#endif
67733 diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
67734 index 6a87154..a3ce57b 100644
67735 --- a/include/linux/hdpu_features.h
67736 +++ b/include/linux/hdpu_features.h
67737 @@ -3,7 +3,7 @@
67738 struct cpustate_t {
67739 spinlock_t lock;
67740 int excl;
67741 - int open_count;
67742 + atomic_t open_count;
67743 unsigned char cached_val;
67744 int inited;
67745 unsigned long *set_addr;
67746 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
67747 index 211ff44..00ab6d7 100644
67748 --- a/include/linux/highmem.h
67749 +++ b/include/linux/highmem.h
67750 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
67751 kunmap_atomic(kaddr, KM_USER0);
67752 }
67753
67754 +static inline void sanitize_highpage(struct page *page)
67755 +{
67756 + void *kaddr;
67757 + unsigned long flags;
67758 +
67759 + local_irq_save(flags);
67760 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
67761 + clear_page(kaddr);
67762 + kunmap_atomic(kaddr, KM_CLEARPAGE);
67763 + local_irq_restore(flags);
67764 +}
67765 +
67766 static inline void zero_user_segments(struct page *page,
67767 unsigned start1, unsigned end1,
67768 unsigned start2, unsigned end2)
67769 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
67770 index 7b40cda..24eb44e 100644
67771 --- a/include/linux/i2c.h
67772 +++ b/include/linux/i2c.h
67773 @@ -325,6 +325,7 @@ struct i2c_algorithm {
67774 /* To determine what the adapter supports */
67775 u32 (*functionality) (struct i2c_adapter *);
67776 };
67777 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
67778
67779 /*
67780 * i2c_adapter is the structure used to identify a physical i2c bus along
67781 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
67782 index 4c4e57d..f3c5303 100644
67783 --- a/include/linux/i2o.h
67784 +++ b/include/linux/i2o.h
67785 @@ -564,7 +564,7 @@ struct i2o_controller {
67786 struct i2o_device *exec; /* Executive */
67787 #if BITS_PER_LONG == 64
67788 spinlock_t context_list_lock; /* lock for context_list */
67789 - atomic_t context_list_counter; /* needed for unique contexts */
67790 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
67791 struct list_head context_list; /* list of context id's
67792 and pointers */
67793 #endif
67794 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
67795 index 21a6f5d..dc42eab 100644
67796 --- a/include/linux/init_task.h
67797 +++ b/include/linux/init_task.h
67798 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
67799 #define INIT_IDS
67800 #endif
67801
67802 +#ifdef CONFIG_X86
67803 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
67804 +#else
67805 +#define INIT_TASK_THREAD_INFO
67806 +#endif
67807 +
67808 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
67809 /*
67810 * Because of the reduced scope of CAP_SETPCAP when filesystem
67811 @@ -156,6 +162,7 @@ extern struct cred init_cred;
67812 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
67813 .comm = "swapper", \
67814 .thread = INIT_THREAD, \
67815 + INIT_TASK_THREAD_INFO \
67816 .fs = &init_fs, \
67817 .files = &init_files, \
67818 .signal = &init_signals, \
67819 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
67820 index 4f0a72a..a849599 100644
67821 --- a/include/linux/intel-iommu.h
67822 +++ b/include/linux/intel-iommu.h
67823 @@ -296,7 +296,7 @@ struct iommu_flush {
67824 u8 fm, u64 type);
67825 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
67826 unsigned int size_order, u64 type);
67827 -};
67828 +} __no_const;
67829
67830 enum {
67831 SR_DMAR_FECTL_REG,
67832 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
67833 index c739150..be577b5 100644
67834 --- a/include/linux/interrupt.h
67835 +++ b/include/linux/interrupt.h
67836 @@ -369,7 +369,7 @@ enum
67837 /* map softirq index to softirq name. update 'softirq_to_name' in
67838 * kernel/softirq.c when adding a new softirq.
67839 */
67840 -extern char *softirq_to_name[NR_SOFTIRQS];
67841 +extern const char * const softirq_to_name[NR_SOFTIRQS];
67842
67843 /* softirq mask and active fields moved to irq_cpustat_t in
67844 * asm/hardirq.h to get better cache usage. KAO
67845 @@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
67846
67847 struct softirq_action
67848 {
67849 - void (*action)(struct softirq_action *);
67850 + void (*action)(void);
67851 };
67852
67853 asmlinkage void do_softirq(void);
67854 asmlinkage void __do_softirq(void);
67855 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
67856 +extern void open_softirq(int nr, void (*action)(void));
67857 extern void softirq_init(void);
67858 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
67859 extern void raise_softirq_irqoff(unsigned int nr);
67860 diff --git a/include/linux/irq.h b/include/linux/irq.h
67861 index 9e5f45a..025865b 100644
67862 --- a/include/linux/irq.h
67863 +++ b/include/linux/irq.h
67864 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
67865 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
67866 bool boot)
67867 {
67868 +#ifdef CONFIG_CPUMASK_OFFSTACK
67869 gfp_t gfp = GFP_ATOMIC;
67870
67871 if (boot)
67872 gfp = GFP_NOWAIT;
67873
67874 -#ifdef CONFIG_CPUMASK_OFFSTACK
67875 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
67876 return false;
67877
67878 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
67879 index 7922742..27306a2 100644
67880 --- a/include/linux/kallsyms.h
67881 +++ b/include/linux/kallsyms.h
67882 @@ -15,7 +15,8 @@
67883
67884 struct module;
67885
67886 -#ifdef CONFIG_KALLSYMS
67887 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
67888 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67889 /* Lookup the address for a symbol. Returns 0 if not found. */
67890 unsigned long kallsyms_lookup_name(const char *name);
67891
67892 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
67893 /* Stupid that this does nothing, but I didn't create this mess. */
67894 #define __print_symbol(fmt, addr)
67895 #endif /*CONFIG_KALLSYMS*/
67896 +#else /* when included by kallsyms.c, vsnprintf.c, or
67897 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
67898 +extern void __print_symbol(const char *fmt, unsigned long address);
67899 +extern int sprint_symbol(char *buffer, unsigned long address);
67900 +const char *kallsyms_lookup(unsigned long addr,
67901 + unsigned long *symbolsize,
67902 + unsigned long *offset,
67903 + char **modname, char *namebuf);
67904 +#endif
67905
67906 /* This macro allows us to keep printk typechecking */
67907 static void __check_printsym_format(const char *fmt, ...)
67908 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
67909 index 6adcc29..13369e8 100644
67910 --- a/include/linux/kgdb.h
67911 +++ b/include/linux/kgdb.h
67912 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
67913
67914 extern int kgdb_connected;
67915
67916 -extern atomic_t kgdb_setting_breakpoint;
67917 -extern atomic_t kgdb_cpu_doing_single_step;
67918 +extern atomic_unchecked_t kgdb_setting_breakpoint;
67919 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
67920
67921 extern struct task_struct *kgdb_usethread;
67922 extern struct task_struct *kgdb_contthread;
67923 @@ -235,7 +235,7 @@ struct kgdb_arch {
67924 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
67925 void (*remove_all_hw_break)(void);
67926 void (*correct_hw_break)(void);
67927 -};
67928 +} __do_const;
67929
67930 /**
67931 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
67932 @@ -257,14 +257,14 @@ struct kgdb_io {
67933 int (*init) (void);
67934 void (*pre_exception) (void);
67935 void (*post_exception) (void);
67936 -};
67937 +} __do_const;
67938
67939 -extern struct kgdb_arch arch_kgdb_ops;
67940 +extern const struct kgdb_arch arch_kgdb_ops;
67941
67942 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
67943
67944 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
67945 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
67946 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
67947 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
67948
67949 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
67950 extern int kgdb_mem2hex(char *mem, char *buf, int count);
67951 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
67952 index 384ca8b..83dd97d 100644
67953 --- a/include/linux/kmod.h
67954 +++ b/include/linux/kmod.h
67955 @@ -31,6 +31,8 @@
67956 * usually useless though. */
67957 extern int __request_module(bool wait, const char *name, ...) \
67958 __attribute__((format(printf, 2, 3)));
67959 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
67960 + __attribute__((format(printf, 3, 4)));
67961 #define request_module(mod...) __request_module(true, mod)
67962 #define request_module_nowait(mod...) __request_module(false, mod)
67963 #define try_then_request_module(x, mod...) \
67964 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
67965 index 58ae8e0..3950d3c 100644
67966 --- a/include/linux/kobject.h
67967 +++ b/include/linux/kobject.h
67968 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
67969
67970 struct kobj_type {
67971 void (*release)(struct kobject *kobj);
67972 - struct sysfs_ops *sysfs_ops;
67973 + const struct sysfs_ops *sysfs_ops;
67974 struct attribute **default_attrs;
67975 };
67976
67977 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
67978 };
67979
67980 struct kset_uevent_ops {
67981 - int (*filter)(struct kset *kset, struct kobject *kobj);
67982 - const char *(*name)(struct kset *kset, struct kobject *kobj);
67983 - int (*uevent)(struct kset *kset, struct kobject *kobj,
67984 + int (* const filter)(struct kset *kset, struct kobject *kobj);
67985 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
67986 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
67987 struct kobj_uevent_env *env);
67988 };
67989
67990 @@ -132,7 +132,7 @@ struct kobj_attribute {
67991 const char *buf, size_t count);
67992 };
67993
67994 -extern struct sysfs_ops kobj_sysfs_ops;
67995 +extern const struct sysfs_ops kobj_sysfs_ops;
67996
67997 /**
67998 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
67999 @@ -155,14 +155,14 @@ struct kset {
68000 struct list_head list;
68001 spinlock_t list_lock;
68002 struct kobject kobj;
68003 - struct kset_uevent_ops *uevent_ops;
68004 + const struct kset_uevent_ops *uevent_ops;
68005 };
68006
68007 extern void kset_init(struct kset *kset);
68008 extern int __must_check kset_register(struct kset *kset);
68009 extern void kset_unregister(struct kset *kset);
68010 extern struct kset * __must_check kset_create_and_add(const char *name,
68011 - struct kset_uevent_ops *u,
68012 + const struct kset_uevent_ops *u,
68013 struct kobject *parent_kobj);
68014
68015 static inline struct kset *to_kset(struct kobject *kobj)
68016 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
68017 index c728a50..752d821 100644
68018 --- a/include/linux/kvm_host.h
68019 +++ b/include/linux/kvm_host.h
68020 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
68021 void vcpu_load(struct kvm_vcpu *vcpu);
68022 void vcpu_put(struct kvm_vcpu *vcpu);
68023
68024 -int kvm_init(void *opaque, unsigned int vcpu_size,
68025 +int kvm_init(const void *opaque, unsigned int vcpu_size,
68026 struct module *module);
68027 void kvm_exit(void);
68028
68029 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
68030 struct kvm_guest_debug *dbg);
68031 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
68032
68033 -int kvm_arch_init(void *opaque);
68034 +int kvm_arch_init(const void *opaque);
68035 void kvm_arch_exit(void);
68036
68037 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
68038 diff --git a/include/linux/libata.h b/include/linux/libata.h
68039 index a069916..223edde 100644
68040 --- a/include/linux/libata.h
68041 +++ b/include/linux/libata.h
68042 @@ -525,11 +525,11 @@ struct ata_ioports {
68043
68044 struct ata_host {
68045 spinlock_t lock;
68046 - struct device *dev;
68047 + struct device *dev;
68048 void __iomem * const *iomap;
68049 unsigned int n_ports;
68050 void *private_data;
68051 - struct ata_port_operations *ops;
68052 + const struct ata_port_operations *ops;
68053 unsigned long flags;
68054 #ifdef CONFIG_ATA_ACPI
68055 acpi_handle acpi_handle;
68056 @@ -710,7 +710,7 @@ struct ata_link {
68057
68058 struct ata_port {
68059 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
68060 - struct ata_port_operations *ops;
68061 + const struct ata_port_operations *ops;
68062 spinlock_t *lock;
68063 /* Flags owned by the EH context. Only EH should touch these once the
68064 port is active */
68065 @@ -884,7 +884,7 @@ struct ata_port_operations {
68066 * fields must be pointers.
68067 */
68068 const struct ata_port_operations *inherits;
68069 -};
68070 +} __do_const;
68071
68072 struct ata_port_info {
68073 unsigned long flags;
68074 @@ -892,7 +892,7 @@ struct ata_port_info {
68075 unsigned long pio_mask;
68076 unsigned long mwdma_mask;
68077 unsigned long udma_mask;
68078 - struct ata_port_operations *port_ops;
68079 + const struct ata_port_operations *port_ops;
68080 void *private_data;
68081 };
68082
68083 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
68084 extern const unsigned long sata_deb_timing_hotplug[];
68085 extern const unsigned long sata_deb_timing_long[];
68086
68087 -extern struct ata_port_operations ata_dummy_port_ops;
68088 +extern const struct ata_port_operations ata_dummy_port_ops;
68089 extern const struct ata_port_info ata_dummy_port_info;
68090
68091 static inline const unsigned long *
68092 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
68093 struct scsi_host_template *sht);
68094 extern void ata_host_detach(struct ata_host *host);
68095 extern void ata_host_init(struct ata_host *, struct device *,
68096 - unsigned long, struct ata_port_operations *);
68097 + unsigned long, const struct ata_port_operations *);
68098 extern int ata_scsi_detect(struct scsi_host_template *sht);
68099 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
68100 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
68101 diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
68102 index fbc48f8..0886e57 100644
68103 --- a/include/linux/lockd/bind.h
68104 +++ b/include/linux/lockd/bind.h
68105 @@ -23,13 +23,13 @@ struct svc_rqst;
68106 * This is the set of functions for lockd->nfsd communication
68107 */
68108 struct nlmsvc_binding {
68109 - __be32 (*fopen)(struct svc_rqst *,
68110 + __be32 (* const fopen)(struct svc_rqst *,
68111 struct nfs_fh *,
68112 struct file **);
68113 - void (*fclose)(struct file *);
68114 + void (* const fclose)(struct file *);
68115 };
68116
68117 -extern struct nlmsvc_binding * nlmsvc_ops;
68118 +extern const struct nlmsvc_binding * nlmsvc_ops;
68119
68120 /*
68121 * Similar to nfs_client_initdata, but without the NFS-specific
68122 diff --git a/include/linux/mca.h b/include/linux/mca.h
68123 index 3797270..7765ede 100644
68124 --- a/include/linux/mca.h
68125 +++ b/include/linux/mca.h
68126 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
68127 int region);
68128 void * (*mca_transform_memory)(struct mca_device *,
68129 void *memory);
68130 -};
68131 +} __no_const;
68132
68133 struct mca_bus {
68134 u64 default_dma_mask;
68135 diff --git a/include/linux/memory.h b/include/linux/memory.h
68136 index 37fa19b..b597c85 100644
68137 --- a/include/linux/memory.h
68138 +++ b/include/linux/memory.h
68139 @@ -108,7 +108,7 @@ struct memory_accessor {
68140 size_t count);
68141 ssize_t (*write)(struct memory_accessor *, const char *buf,
68142 off_t offset, size_t count);
68143 -};
68144 +} __no_const;
68145
68146 /*
68147 * Kernel text modification mutex, used for code patching. Users of this lock
68148 diff --git a/include/linux/mm.h b/include/linux/mm.h
68149 index 11e5be6..1ff2423 100644
68150 --- a/include/linux/mm.h
68151 +++ b/include/linux/mm.h
68152 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
68153
68154 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
68155 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
68156 +
68157 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68158 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
68159 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
68160 +#else
68161 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
68162 +#endif
68163 +
68164 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
68165 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
68166
68167 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
68168 int set_page_dirty_lock(struct page *page);
68169 int clear_page_dirty_for_io(struct page *page);
68170
68171 -/* Is the vma a continuation of the stack vma above it? */
68172 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
68173 -{
68174 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68175 -}
68176 -
68177 extern unsigned long move_page_tables(struct vm_area_struct *vma,
68178 unsigned long old_addr, struct vm_area_struct *new_vma,
68179 unsigned long new_addr, unsigned long len);
68180 @@ -890,6 +891,8 @@ struct shrinker {
68181 extern void register_shrinker(struct shrinker *);
68182 extern void unregister_shrinker(struct shrinker *);
68183
68184 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
68185 +
68186 int vma_wants_writenotify(struct vm_area_struct *vma);
68187
68188 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
68189 @@ -1162,6 +1165,7 @@ out:
68190 }
68191
68192 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68193 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68194
68195 extern unsigned long do_brk(unsigned long, unsigned long);
68196
68197 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68198 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68199 struct vm_area_struct **pprev);
68200
68201 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68202 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68203 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68204 +
68205 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68206 NULL if none. Assume start_addr < end_addr. */
68207 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68208 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
68209 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
68210 }
68211
68212 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
68213 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
68214 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
68215 unsigned long pfn, unsigned long size, pgprot_t);
68216 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
68217 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
68218 extern int sysctl_memory_failure_early_kill;
68219 extern int sysctl_memory_failure_recovery;
68220 -extern atomic_long_t mce_bad_pages;
68221 +extern atomic_long_unchecked_t mce_bad_pages;
68222 +
68223 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68224 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68225 +#else
68226 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68227 +#endif
68228
68229 #endif /* __KERNEL__ */
68230 #endif /* _LINUX_MM_H */
68231 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68232 index 9d12ed5..6d9707a 100644
68233 --- a/include/linux/mm_types.h
68234 +++ b/include/linux/mm_types.h
68235 @@ -186,6 +186,8 @@ struct vm_area_struct {
68236 #ifdef CONFIG_NUMA
68237 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68238 #endif
68239 +
68240 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68241 };
68242
68243 struct core_thread {
68244 @@ -287,6 +289,24 @@ struct mm_struct {
68245 #ifdef CONFIG_MMU_NOTIFIER
68246 struct mmu_notifier_mm *mmu_notifier_mm;
68247 #endif
68248 +
68249 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68250 + unsigned long pax_flags;
68251 +#endif
68252 +
68253 +#ifdef CONFIG_PAX_DLRESOLVE
68254 + unsigned long call_dl_resolve;
68255 +#endif
68256 +
68257 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68258 + unsigned long call_syscall;
68259 +#endif
68260 +
68261 +#ifdef CONFIG_PAX_ASLR
68262 + unsigned long delta_mmap; /* randomized offset */
68263 + unsigned long delta_stack; /* randomized offset */
68264 +#endif
68265 +
68266 };
68267
68268 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
68269 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
68270 index 4e02ee2..afb159e 100644
68271 --- a/include/linux/mmu_notifier.h
68272 +++ b/include/linux/mmu_notifier.h
68273 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
68274 */
68275 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
68276 ({ \
68277 - pte_t __pte; \
68278 + pte_t ___pte; \
68279 struct vm_area_struct *___vma = __vma; \
68280 unsigned long ___address = __address; \
68281 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
68282 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
68283 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
68284 - __pte; \
68285 + ___pte; \
68286 })
68287
68288 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
68289 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68290 index 6c31a2a..4b0e930 100644
68291 --- a/include/linux/mmzone.h
68292 +++ b/include/linux/mmzone.h
68293 @@ -350,7 +350,7 @@ struct zone {
68294 unsigned long flags; /* zone flags, see below */
68295
68296 /* Zone statistics */
68297 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68298 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68299
68300 /*
68301 * prev_priority holds the scanning priority for this zone. It is
68302 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68303 index f58e9d8..3503935 100644
68304 --- a/include/linux/mod_devicetable.h
68305 +++ b/include/linux/mod_devicetable.h
68306 @@ -12,7 +12,7 @@
68307 typedef unsigned long kernel_ulong_t;
68308 #endif
68309
68310 -#define PCI_ANY_ID (~0)
68311 +#define PCI_ANY_ID ((__u16)~0)
68312
68313 struct pci_device_id {
68314 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68315 @@ -131,7 +131,7 @@ struct usb_device_id {
68316 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
68317 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68318
68319 -#define HID_ANY_ID (~0)
68320 +#define HID_ANY_ID (~0U)
68321
68322 struct hid_device_id {
68323 __u16 bus;
68324 diff --git a/include/linux/module.h b/include/linux/module.h
68325 index 482efc8..642032b 100644
68326 --- a/include/linux/module.h
68327 +++ b/include/linux/module.h
68328 @@ -16,6 +16,7 @@
68329 #include <linux/kobject.h>
68330 #include <linux/moduleparam.h>
68331 #include <linux/tracepoint.h>
68332 +#include <linux/fs.h>
68333
68334 #include <asm/local.h>
68335 #include <asm/module.h>
68336 @@ -287,16 +288,16 @@ struct module
68337 int (*init)(void);
68338
68339 /* If this is non-NULL, vfree after init() returns */
68340 - void *module_init;
68341 + void *module_init_rx, *module_init_rw;
68342
68343 /* Here is the actual code + data, vfree'd on unload. */
68344 - void *module_core;
68345 + void *module_core_rx, *module_core_rw;
68346
68347 /* Here are the sizes of the init and core sections */
68348 - unsigned int init_size, core_size;
68349 + unsigned int init_size_rw, core_size_rw;
68350
68351 /* The size of the executable code in each section. */
68352 - unsigned int init_text_size, core_text_size;
68353 + unsigned int init_size_rx, core_size_rx;
68354
68355 /* Arch-specific module values */
68356 struct mod_arch_specific arch;
68357 @@ -345,6 +346,10 @@ struct module
68358 #ifdef CONFIG_EVENT_TRACING
68359 struct ftrace_event_call *trace_events;
68360 unsigned int num_trace_events;
68361 + struct file_operations trace_id;
68362 + struct file_operations trace_enable;
68363 + struct file_operations trace_format;
68364 + struct file_operations trace_filter;
68365 #endif
68366 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68367 unsigned long *ftrace_callsites;
68368 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68369 bool is_module_address(unsigned long addr);
68370 bool is_module_text_address(unsigned long addr);
68371
68372 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68373 +{
68374 +
68375 +#ifdef CONFIG_PAX_KERNEXEC
68376 + if (ktla_ktva(addr) >= (unsigned long)start &&
68377 + ktla_ktva(addr) < (unsigned long)start + size)
68378 + return 1;
68379 +#endif
68380 +
68381 + return ((void *)addr >= start && (void *)addr < start + size);
68382 +}
68383 +
68384 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68385 +{
68386 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68387 +}
68388 +
68389 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68390 +{
68391 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68392 +}
68393 +
68394 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68395 +{
68396 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68397 +}
68398 +
68399 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68400 +{
68401 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68402 +}
68403 +
68404 static inline int within_module_core(unsigned long addr, struct module *mod)
68405 {
68406 - return (unsigned long)mod->module_core <= addr &&
68407 - addr < (unsigned long)mod->module_core + mod->core_size;
68408 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68409 }
68410
68411 static inline int within_module_init(unsigned long addr, struct module *mod)
68412 {
68413 - return (unsigned long)mod->module_init <= addr &&
68414 - addr < (unsigned long)mod->module_init + mod->init_size;
68415 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68416 }
68417
68418 /* Search for module by name: must hold module_mutex. */
68419 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68420 index c1f40c2..682ca53 100644
68421 --- a/include/linux/moduleloader.h
68422 +++ b/include/linux/moduleloader.h
68423 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68424 sections. Returns NULL on failure. */
68425 void *module_alloc(unsigned long size);
68426
68427 +#ifdef CONFIG_PAX_KERNEXEC
68428 +void *module_alloc_exec(unsigned long size);
68429 +#else
68430 +#define module_alloc_exec(x) module_alloc(x)
68431 +#endif
68432 +
68433 /* Free memory returned from module_alloc. */
68434 void module_free(struct module *mod, void *module_region);
68435
68436 +#ifdef CONFIG_PAX_KERNEXEC
68437 +void module_free_exec(struct module *mod, void *module_region);
68438 +#else
68439 +#define module_free_exec(x, y) module_free((x), (y))
68440 +#endif
68441 +
68442 /* Apply the given relocation to the (simplified) ELF. Return -error
68443 or 0. */
68444 int apply_relocate(Elf_Shdr *sechdrs,
68445 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
68446 index 82a9124..8a5f622 100644
68447 --- a/include/linux/moduleparam.h
68448 +++ b/include/linux/moduleparam.h
68449 @@ -132,7 +132,7 @@ struct kparam_array
68450
68451 /* Actually copy string: maxlen param is usually sizeof(string). */
68452 #define module_param_string(name, string, len, perm) \
68453 - static const struct kparam_string __param_string_##name \
68454 + static const struct kparam_string __param_string_##name __used \
68455 = { len, string }; \
68456 __module_param_call(MODULE_PARAM_PREFIX, name, \
68457 param_set_copystring, param_get_string, \
68458 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
68459
68460 /* Comma-separated array: *nump is set to number they actually specified. */
68461 #define module_param_array_named(name, array, type, nump, perm) \
68462 - static const struct kparam_array __param_arr_##name \
68463 + static const struct kparam_array __param_arr_##name __used \
68464 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
68465 sizeof(array[0]), array }; \
68466 __module_param_call(MODULE_PARAM_PREFIX, name, \
68467 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
68468 index 878cab4..c92cb3e 100644
68469 --- a/include/linux/mutex.h
68470 +++ b/include/linux/mutex.h
68471 @@ -51,7 +51,7 @@ struct mutex {
68472 spinlock_t wait_lock;
68473 struct list_head wait_list;
68474 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
68475 - struct thread_info *owner;
68476 + struct task_struct *owner;
68477 #endif
68478 #ifdef CONFIG_DEBUG_MUTEXES
68479 const char *name;
68480 diff --git a/include/linux/namei.h b/include/linux/namei.h
68481 index ec0f607..d19e675 100644
68482 --- a/include/linux/namei.h
68483 +++ b/include/linux/namei.h
68484 @@ -22,7 +22,7 @@ struct nameidata {
68485 unsigned int flags;
68486 int last_type;
68487 unsigned depth;
68488 - char *saved_names[MAX_NESTED_LINKS + 1];
68489 + const char *saved_names[MAX_NESTED_LINKS + 1];
68490
68491 /* Intent data */
68492 union {
68493 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
68494 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
68495 extern void unlock_rename(struct dentry *, struct dentry *);
68496
68497 -static inline void nd_set_link(struct nameidata *nd, char *path)
68498 +static inline void nd_set_link(struct nameidata *nd, const char *path)
68499 {
68500 nd->saved_names[nd->depth] = path;
68501 }
68502
68503 -static inline char *nd_get_link(struct nameidata *nd)
68504 +static inline const char *nd_get_link(const struct nameidata *nd)
68505 {
68506 return nd->saved_names[nd->depth];
68507 }
68508 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
68509 index 9d7e8f7..04428c5 100644
68510 --- a/include/linux/netdevice.h
68511 +++ b/include/linux/netdevice.h
68512 @@ -637,6 +637,7 @@ struct net_device_ops {
68513 u16 xid);
68514 #endif
68515 };
68516 +typedef struct net_device_ops __no_const net_device_ops_no_const;
68517
68518 /*
68519 * The DEVICE structure.
68520 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
68521 new file mode 100644
68522 index 0000000..33f4af8
68523 --- /dev/null
68524 +++ b/include/linux/netfilter/xt_gradm.h
68525 @@ -0,0 +1,9 @@
68526 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
68527 +#define _LINUX_NETFILTER_XT_GRADM_H 1
68528 +
68529 +struct xt_gradm_mtinfo {
68530 + __u16 flags;
68531 + __u16 invflags;
68532 +};
68533 +
68534 +#endif
68535 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
68536 index b359c4a..c08b334 100644
68537 --- a/include/linux/nodemask.h
68538 +++ b/include/linux/nodemask.h
68539 @@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
68540
68541 #define any_online_node(mask) \
68542 ({ \
68543 - int node; \
68544 - for_each_node_mask(node, (mask)) \
68545 - if (node_online(node)) \
68546 + int __node; \
68547 + for_each_node_mask(__node, (mask)) \
68548 + if (node_online(__node)) \
68549 break; \
68550 - node; \
68551 + __node; \
68552 })
68553
68554 #define num_online_nodes() num_node_state(N_ONLINE)
68555 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
68556 index 5171639..7cf4235 100644
68557 --- a/include/linux/oprofile.h
68558 +++ b/include/linux/oprofile.h
68559 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
68560 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
68561 char const * name, ulong * val);
68562
68563 -/** Create a file for read-only access to an atomic_t. */
68564 +/** Create a file for read-only access to an atomic_unchecked_t. */
68565 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
68566 - char const * name, atomic_t * val);
68567 + char const * name, atomic_unchecked_t * val);
68568
68569 /** create a directory */
68570 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
68571 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
68572 index 3c62ed4..8924c7c 100644
68573 --- a/include/linux/pagemap.h
68574 +++ b/include/linux/pagemap.h
68575 @@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
68576 if (((unsigned long)uaddr & PAGE_MASK) !=
68577 ((unsigned long)end & PAGE_MASK))
68578 ret = __get_user(c, end);
68579 + (void)c;
68580 }
68581 + (void)c;
68582 return ret;
68583 }
68584
68585 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
68586 index 81c9689..a567a55 100644
68587 --- a/include/linux/perf_event.h
68588 +++ b/include/linux/perf_event.h
68589 @@ -476,7 +476,7 @@ struct hw_perf_event {
68590 struct hrtimer hrtimer;
68591 };
68592 };
68593 - atomic64_t prev_count;
68594 + atomic64_unchecked_t prev_count;
68595 u64 sample_period;
68596 u64 last_period;
68597 atomic64_t period_left;
68598 @@ -557,7 +557,7 @@ struct perf_event {
68599 const struct pmu *pmu;
68600
68601 enum perf_event_active_state state;
68602 - atomic64_t count;
68603 + atomic64_unchecked_t count;
68604
68605 /*
68606 * These are the total time in nanoseconds that the event
68607 @@ -595,8 +595,8 @@ struct perf_event {
68608 * These accumulate total time (in nanoseconds) that children
68609 * events have been enabled and running, respectively.
68610 */
68611 - atomic64_t child_total_time_enabled;
68612 - atomic64_t child_total_time_running;
68613 + atomic64_unchecked_t child_total_time_enabled;
68614 + atomic64_unchecked_t child_total_time_running;
68615
68616 /*
68617 * Protect attach/detach and child_list:
68618 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
68619 index b43a9e0..b77d869 100644
68620 --- a/include/linux/pipe_fs_i.h
68621 +++ b/include/linux/pipe_fs_i.h
68622 @@ -46,9 +46,9 @@ struct pipe_inode_info {
68623 wait_queue_head_t wait;
68624 unsigned int nrbufs, curbuf;
68625 struct page *tmp_page;
68626 - unsigned int readers;
68627 - unsigned int writers;
68628 - unsigned int waiting_writers;
68629 + atomic_t readers;
68630 + atomic_t writers;
68631 + atomic_t waiting_writers;
68632 unsigned int r_counter;
68633 unsigned int w_counter;
68634 struct fasync_struct *fasync_readers;
68635 diff --git a/include/linux/poison.h b/include/linux/poison.h
68636 index 34066ff..e95d744 100644
68637 --- a/include/linux/poison.h
68638 +++ b/include/linux/poison.h
68639 @@ -19,8 +19,8 @@
68640 * under normal circumstances, used to verify that nobody uses
68641 * non-initialized list entries.
68642 */
68643 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
68644 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
68645 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
68646 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
68647
68648 /********** include/linux/timer.h **********/
68649 /*
68650 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
68651 index 4f71bf4..77ffa64 100644
68652 --- a/include/linux/posix-timers.h
68653 +++ b/include/linux/posix-timers.h
68654 @@ -67,7 +67,7 @@ struct k_itimer {
68655 };
68656
68657 struct k_clock {
68658 - int res; /* in nanoseconds */
68659 + const int res; /* in nanoseconds */
68660 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
68661 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
68662 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
68663 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
68664 index 72b1a10..13303a9 100644
68665 --- a/include/linux/preempt.h
68666 +++ b/include/linux/preempt.h
68667 @@ -110,7 +110,7 @@ struct preempt_ops {
68668 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
68669 void (*sched_out)(struct preempt_notifier *notifier,
68670 struct task_struct *next);
68671 -};
68672 +} __no_const;
68673
68674 /**
68675 * preempt_notifier - key for installing preemption notifiers
68676 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
68677 index 379eaed..1bf73e3 100644
68678 --- a/include/linux/proc_fs.h
68679 +++ b/include/linux/proc_fs.h
68680 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
68681 return proc_create_data(name, mode, parent, proc_fops, NULL);
68682 }
68683
68684 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
68685 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
68686 +{
68687 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68688 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
68689 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68690 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
68691 +#else
68692 + return proc_create_data(name, mode, parent, proc_fops, NULL);
68693 +#endif
68694 +}
68695 +
68696 +
68697 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
68698 mode_t mode, struct proc_dir_entry *base,
68699 read_proc_t *read_proc, void * data)
68700 @@ -256,7 +269,7 @@ union proc_op {
68701 int (*proc_show)(struct seq_file *m,
68702 struct pid_namespace *ns, struct pid *pid,
68703 struct task_struct *task);
68704 -};
68705 +} __no_const;
68706
68707 struct ctl_table_header;
68708 struct ctl_table;
68709 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
68710 index 7456d7d..6c1cfc9 100644
68711 --- a/include/linux/ptrace.h
68712 +++ b/include/linux/ptrace.h
68713 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
68714 extern void exit_ptrace(struct task_struct *tracer);
68715 #define PTRACE_MODE_READ 1
68716 #define PTRACE_MODE_ATTACH 2
68717 -/* Returns 0 on success, -errno on denial. */
68718 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
68719 /* Returns true on success, false on denial. */
68720 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
68721 +/* Returns true on success, false on denial. */
68722 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
68723
68724 static inline int ptrace_reparented(struct task_struct *child)
68725 {
68726 diff --git a/include/linux/random.h b/include/linux/random.h
68727 index 2948046..3262567 100644
68728 --- a/include/linux/random.h
68729 +++ b/include/linux/random.h
68730 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
68731 u32 random32(void);
68732 void srandom32(u32 seed);
68733
68734 +static inline unsigned long pax_get_random_long(void)
68735 +{
68736 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
68737 +}
68738 +
68739 #endif /* __KERNEL___ */
68740
68741 #endif /* _LINUX_RANDOM_H */
68742 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
68743 index 988e55f..17cb4ef 100644
68744 --- a/include/linux/reboot.h
68745 +++ b/include/linux/reboot.h
68746 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
68747 * Architecture-specific implementations of sys_reboot commands.
68748 */
68749
68750 -extern void machine_restart(char *cmd);
68751 -extern void machine_halt(void);
68752 -extern void machine_power_off(void);
68753 +extern void machine_restart(char *cmd) __noreturn;
68754 +extern void machine_halt(void) __noreturn;
68755 +extern void machine_power_off(void) __noreturn;
68756
68757 extern void machine_shutdown(void);
68758 struct pt_regs;
68759 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
68760 */
68761
68762 extern void kernel_restart_prepare(char *cmd);
68763 -extern void kernel_restart(char *cmd);
68764 -extern void kernel_halt(void);
68765 -extern void kernel_power_off(void);
68766 +extern void kernel_restart(char *cmd) __noreturn;
68767 +extern void kernel_halt(void) __noreturn;
68768 +extern void kernel_power_off(void) __noreturn;
68769
68770 void ctrl_alt_del(void);
68771
68772 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
68773 * Emergency restart, callable from an interrupt handler.
68774 */
68775
68776 -extern void emergency_restart(void);
68777 +extern void emergency_restart(void) __noreturn;
68778 #include <asm/emergency-restart.h>
68779
68780 #endif
68781 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
68782 index dd31e7b..5b03c5c 100644
68783 --- a/include/linux/reiserfs_fs.h
68784 +++ b/include/linux/reiserfs_fs.h
68785 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68786 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
68787
68788 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68789 -#define get_generation(s) atomic_read (&fs_generation(s))
68790 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68791 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68792 #define __fs_changed(gen,s) (gen != get_generation (s))
68793 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
68794 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
68795 */
68796
68797 struct item_operations {
68798 - int (*bytes_number) (struct item_head * ih, int block_size);
68799 - void (*decrement_key) (struct cpu_key *);
68800 - int (*is_left_mergeable) (struct reiserfs_key * ih,
68801 + int (* const bytes_number) (struct item_head * ih, int block_size);
68802 + void (* const decrement_key) (struct cpu_key *);
68803 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
68804 unsigned long bsize);
68805 - void (*print_item) (struct item_head *, char *item);
68806 - void (*check_item) (struct item_head *, char *item);
68807 + void (* const print_item) (struct item_head *, char *item);
68808 + void (* const check_item) (struct item_head *, char *item);
68809
68810 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68811 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68812 int is_affected, int insert_size);
68813 - int (*check_left) (struct virtual_item * vi, int free,
68814 + int (* const check_left) (struct virtual_item * vi, int free,
68815 int start_skip, int end_skip);
68816 - int (*check_right) (struct virtual_item * vi, int free);
68817 - int (*part_size) (struct virtual_item * vi, int from, int to);
68818 - int (*unit_num) (struct virtual_item * vi);
68819 - void (*print_vi) (struct virtual_item * vi);
68820 + int (* const check_right) (struct virtual_item * vi, int free);
68821 + int (* const part_size) (struct virtual_item * vi, int from, int to);
68822 + int (* const unit_num) (struct virtual_item * vi);
68823 + void (* const print_vi) (struct virtual_item * vi);
68824 };
68825
68826 -extern struct item_operations *item_ops[TYPE_ANY + 1];
68827 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
68828
68829 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
68830 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
68831 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
68832 index dab68bb..0688727 100644
68833 --- a/include/linux/reiserfs_fs_sb.h
68834 +++ b/include/linux/reiserfs_fs_sb.h
68835 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
68836 /* Comment? -Hans */
68837 wait_queue_head_t s_wait;
68838 /* To be obsoleted soon by per buffer seals.. -Hans */
68839 - atomic_t s_generation_counter; // increased by one every time the
68840 + atomic_unchecked_t s_generation_counter; // increased by one every time the
68841 // tree gets re-balanced
68842 unsigned long s_properties; /* File system properties. Currently holds
68843 on-disk FS format */
68844 diff --git a/include/linux/relay.h b/include/linux/relay.h
68845 index 14a86bc..17d0700 100644
68846 --- a/include/linux/relay.h
68847 +++ b/include/linux/relay.h
68848 @@ -159,7 +159,7 @@ struct rchan_callbacks
68849 * The callback should return 0 if successful, negative if not.
68850 */
68851 int (*remove_buf_file)(struct dentry *dentry);
68852 -};
68853 +} __no_const;
68854
68855 /*
68856 * CONFIG_RELAY kernel API, kernel/relay.c
68857 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
68858 index 3392c59..a746428 100644
68859 --- a/include/linux/rfkill.h
68860 +++ b/include/linux/rfkill.h
68861 @@ -144,6 +144,7 @@ struct rfkill_ops {
68862 void (*query)(struct rfkill *rfkill, void *data);
68863 int (*set_block)(void *data, bool blocked);
68864 };
68865 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
68866
68867 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
68868 /**
68869 diff --git a/include/linux/sched.h b/include/linux/sched.h
68870 index 71849bf..0ad2f74 100644
68871 --- a/include/linux/sched.h
68872 +++ b/include/linux/sched.h
68873 @@ -101,6 +101,7 @@ struct bio;
68874 struct fs_struct;
68875 struct bts_context;
68876 struct perf_event_context;
68877 +struct linux_binprm;
68878
68879 /*
68880 * List of flags we want to share for kernel threads,
68881 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
68882 extern signed long schedule_timeout_uninterruptible(signed long timeout);
68883 asmlinkage void __schedule(void);
68884 asmlinkage void schedule(void);
68885 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
68886 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
68887
68888 struct nsproxy;
68889 struct user_namespace;
68890 @@ -371,9 +372,12 @@ struct user_namespace;
68891 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
68892
68893 extern int sysctl_max_map_count;
68894 +extern unsigned long sysctl_heap_stack_gap;
68895
68896 #include <linux/aio.h>
68897
68898 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
68899 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
68900 extern unsigned long
68901 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
68902 unsigned long, unsigned long);
68903 @@ -666,6 +670,16 @@ struct signal_struct {
68904 struct tty_audit_buf *tty_audit_buf;
68905 #endif
68906
68907 +#ifdef CONFIG_GRKERNSEC
68908 + u32 curr_ip;
68909 + u32 saved_ip;
68910 + u32 gr_saddr;
68911 + u32 gr_daddr;
68912 + u16 gr_sport;
68913 + u16 gr_dport;
68914 + u8 used_accept:1;
68915 +#endif
68916 +
68917 int oom_adj; /* OOM kill score adjustment (bit shift) */
68918 };
68919
68920 @@ -723,6 +737,11 @@ struct user_struct {
68921 struct key *session_keyring; /* UID's default session keyring */
68922 #endif
68923
68924 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
68925 + unsigned int banned;
68926 + unsigned long ban_expires;
68927 +#endif
68928 +
68929 /* Hash table maintenance information */
68930 struct hlist_node uidhash_node;
68931 uid_t uid;
68932 @@ -1328,8 +1347,8 @@ struct task_struct {
68933 struct list_head thread_group;
68934
68935 struct completion *vfork_done; /* for vfork() */
68936 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
68937 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68938 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
68939 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68940
68941 cputime_t utime, stime, utimescaled, stimescaled;
68942 cputime_t gtime;
68943 @@ -1343,16 +1362,6 @@ struct task_struct {
68944 struct task_cputime cputime_expires;
68945 struct list_head cpu_timers[3];
68946
68947 -/* process credentials */
68948 - const struct cred *real_cred; /* objective and real subjective task
68949 - * credentials (COW) */
68950 - const struct cred *cred; /* effective (overridable) subjective task
68951 - * credentials (COW) */
68952 - struct mutex cred_guard_mutex; /* guard against foreign influences on
68953 - * credential calculations
68954 - * (notably. ptrace) */
68955 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68956 -
68957 char comm[TASK_COMM_LEN]; /* executable name excluding path
68958 - access with [gs]et_task_comm (which lock
68959 it with task_lock())
68960 @@ -1369,6 +1378,10 @@ struct task_struct {
68961 #endif
68962 /* CPU-specific state of this task */
68963 struct thread_struct thread;
68964 +/* thread_info moved to task_struct */
68965 +#ifdef CONFIG_X86
68966 + struct thread_info tinfo;
68967 +#endif
68968 /* filesystem information */
68969 struct fs_struct *fs;
68970 /* open file information */
68971 @@ -1436,6 +1449,15 @@ struct task_struct {
68972 int hardirq_context;
68973 int softirq_context;
68974 #endif
68975 +
68976 +/* process credentials */
68977 + const struct cred *real_cred; /* objective and real subjective task
68978 + * credentials (COW) */
68979 + struct mutex cred_guard_mutex; /* guard against foreign influences on
68980 + * credential calculations
68981 + * (notably. ptrace) */
68982 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68983 +
68984 #ifdef CONFIG_LOCKDEP
68985 # define MAX_LOCK_DEPTH 48UL
68986 u64 curr_chain_key;
68987 @@ -1456,6 +1478,9 @@ struct task_struct {
68988
68989 struct backing_dev_info *backing_dev_info;
68990
68991 + const struct cred *cred; /* effective (overridable) subjective task
68992 + * credentials (COW) */
68993 +
68994 struct io_context *io_context;
68995
68996 unsigned long ptrace_message;
68997 @@ -1519,6 +1544,24 @@ struct task_struct {
68998 unsigned long default_timer_slack_ns;
68999
69000 struct list_head *scm_work_list;
69001 +
69002 +#ifdef CONFIG_GRKERNSEC
69003 + /* grsecurity */
69004 +#ifdef CONFIG_GRKERNSEC_SETXID
69005 + const struct cred *delayed_cred;
69006 +#endif
69007 + struct dentry *gr_chroot_dentry;
69008 + struct acl_subject_label *acl;
69009 + struct acl_role_label *role;
69010 + struct file *exec_file;
69011 + u16 acl_role_id;
69012 + /* is this the task that authenticated to the special role */
69013 + u8 acl_sp_role;
69014 + u8 is_writable;
69015 + u8 brute;
69016 + u8 gr_is_chrooted;
69017 +#endif
69018 +
69019 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
69020 /* Index of current stored adress in ret_stack */
69021 int curr_ret_stack;
69022 @@ -1542,6 +1585,57 @@ struct task_struct {
69023 #endif /* CONFIG_TRACING */
69024 };
69025
69026 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
69027 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
69028 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
69029 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
69030 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
69031 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
69032 +
69033 +#ifdef CONFIG_PAX_SOFTMODE
69034 +extern int pax_softmode;
69035 +#endif
69036 +
69037 +extern int pax_check_flags(unsigned long *);
69038 +
69039 +/* if tsk != current then task_lock must be held on it */
69040 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69041 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
69042 +{
69043 + if (likely(tsk->mm))
69044 + return tsk->mm->pax_flags;
69045 + else
69046 + return 0UL;
69047 +}
69048 +
69049 +/* if tsk != current then task_lock must be held on it */
69050 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
69051 +{
69052 + if (likely(tsk->mm)) {
69053 + tsk->mm->pax_flags = flags;
69054 + return 0;
69055 + }
69056 + return -EINVAL;
69057 +}
69058 +#endif
69059 +
69060 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
69061 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
69062 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
69063 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
69064 +#endif
69065 +
69066 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
69067 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
69068 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
69069 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
69070 +
69071 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69072 +extern void pax_track_stack(void);
69073 +#else
69074 +static inline void pax_track_stack(void) {}
69075 +#endif
69076 +
69077 /* Future-safe accessor for struct task_struct's cpus_allowed. */
69078 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
69079
69080 @@ -1740,7 +1834,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
69081 #define PF_DUMPCORE 0x00000200 /* dumped core */
69082 #define PF_SIGNALED 0x00000400 /* killed by a signal */
69083 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
69084 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
69085 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
69086 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
69087 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
69088 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
69089 @@ -1978,7 +2072,9 @@ void yield(void);
69090 extern struct exec_domain default_exec_domain;
69091
69092 union thread_union {
69093 +#ifndef CONFIG_X86
69094 struct thread_info thread_info;
69095 +#endif
69096 unsigned long stack[THREAD_SIZE/sizeof(long)];
69097 };
69098
69099 @@ -2011,6 +2107,7 @@ extern struct pid_namespace init_pid_ns;
69100 */
69101
69102 extern struct task_struct *find_task_by_vpid(pid_t nr);
69103 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
69104 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
69105 struct pid_namespace *ns);
69106
69107 @@ -2155,7 +2252,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
69108 extern void exit_itimers(struct signal_struct *);
69109 extern void flush_itimer_signals(void);
69110
69111 -extern NORET_TYPE void do_group_exit(int);
69112 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
69113
69114 extern void daemonize(const char *, ...);
69115 extern int allow_signal(int);
69116 @@ -2284,13 +2381,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
69117
69118 #endif
69119
69120 -static inline int object_is_on_stack(void *obj)
69121 +static inline int object_starts_on_stack(void *obj)
69122 {
69123 - void *stack = task_stack_page(current);
69124 + const void *stack = task_stack_page(current);
69125
69126 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
69127 }
69128
69129 +#ifdef CONFIG_PAX_USERCOPY
69130 +extern int object_is_on_stack(const void *obj, unsigned long len);
69131 +#endif
69132 +
69133 extern void thread_info_cache_init(void);
69134
69135 #ifdef CONFIG_DEBUG_STACK_USAGE
69136 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
69137 index 1ee2c05..81b7ec4 100644
69138 --- a/include/linux/screen_info.h
69139 +++ b/include/linux/screen_info.h
69140 @@ -42,7 +42,8 @@ struct screen_info {
69141 __u16 pages; /* 0x32 */
69142 __u16 vesa_attributes; /* 0x34 */
69143 __u32 capabilities; /* 0x36 */
69144 - __u8 _reserved[6]; /* 0x3a */
69145 + __u16 vesapm_size; /* 0x3a */
69146 + __u8 _reserved[4]; /* 0x3c */
69147 } __attribute__((packed));
69148
69149 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
69150 diff --git a/include/linux/security.h b/include/linux/security.h
69151 index d40d23f..d739b08 100644
69152 --- a/include/linux/security.h
69153 +++ b/include/linux/security.h
69154 @@ -34,6 +34,7 @@
69155 #include <linux/key.h>
69156 #include <linux/xfrm.h>
69157 #include <linux/gfp.h>
69158 +#include <linux/grsecurity.h>
69159 #include <net/flow.h>
69160
69161 /* Maximum number of letters for an LSM name string */
69162 @@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
69163 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
69164 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
69165 extern int cap_task_setnice(struct task_struct *p, int nice);
69166 -extern int cap_syslog(int type);
69167 +extern int cap_syslog(int type, bool from_file);
69168 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
69169
69170 struct msghdr;
69171 @@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
69172 * logging to the console.
69173 * See the syslog(2) manual page for an explanation of the @type values.
69174 * @type contains the type of action.
69175 + * @from_file indicates the context of action (if it came from /proc).
69176 * Return 0 if permission is granted.
69177 * @settime:
69178 * Check permission to change the system time.
69179 @@ -1445,7 +1447,7 @@ struct security_operations {
69180 int (*sysctl) (struct ctl_table *table, int op);
69181 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
69182 int (*quota_on) (struct dentry *dentry);
69183 - int (*syslog) (int type);
69184 + int (*syslog) (int type, bool from_file);
69185 int (*settime) (struct timespec *ts, struct timezone *tz);
69186 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
69187
69188 @@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
69189 int security_sysctl(struct ctl_table *table, int op);
69190 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
69191 int security_quota_on(struct dentry *dentry);
69192 -int security_syslog(int type);
69193 +int security_syslog(int type, bool from_file);
69194 int security_settime(struct timespec *ts, struct timezone *tz);
69195 int security_vm_enough_memory(long pages);
69196 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
69197 @@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
69198 return 0;
69199 }
69200
69201 -static inline int security_syslog(int type)
69202 +static inline int security_syslog(int type, bool from_file)
69203 {
69204 - return cap_syslog(type);
69205 + return cap_syslog(type, from_file);
69206 }
69207
69208 static inline int security_settime(struct timespec *ts, struct timezone *tz)
69209 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69210 index 8366d8f..2307490 100644
69211 --- a/include/linux/seq_file.h
69212 +++ b/include/linux/seq_file.h
69213 @@ -32,6 +32,7 @@ struct seq_operations {
69214 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69215 int (*show) (struct seq_file *m, void *v);
69216 };
69217 +typedef struct seq_operations __no_const seq_operations_no_const;
69218
69219 #define SEQ_SKIP 1
69220
69221 diff --git a/include/linux/shm.h b/include/linux/shm.h
69222 index eca6235..c7417ed 100644
69223 --- a/include/linux/shm.h
69224 +++ b/include/linux/shm.h
69225 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
69226 pid_t shm_cprid;
69227 pid_t shm_lprid;
69228 struct user_struct *mlock_user;
69229 +#ifdef CONFIG_GRKERNSEC
69230 + time_t shm_createtime;
69231 + pid_t shm_lapid;
69232 +#endif
69233 };
69234
69235 /* shm_mode upper byte flags */
69236 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69237 index bcdd660..6e12e11 100644
69238 --- a/include/linux/skbuff.h
69239 +++ b/include/linux/skbuff.h
69240 @@ -14,6 +14,7 @@
69241 #ifndef _LINUX_SKBUFF_H
69242 #define _LINUX_SKBUFF_H
69243
69244 +#include <linux/const.h>
69245 #include <linux/kernel.h>
69246 #include <linux/kmemcheck.h>
69247 #include <linux/compiler.h>
69248 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
69249 */
69250 static inline int skb_queue_empty(const struct sk_buff_head *list)
69251 {
69252 - return list->next == (struct sk_buff *)list;
69253 + return list->next == (const struct sk_buff *)list;
69254 }
69255
69256 /**
69257 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69258 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69259 const struct sk_buff *skb)
69260 {
69261 - return (skb->next == (struct sk_buff *) list);
69262 + return (skb->next == (const struct sk_buff *) list);
69263 }
69264
69265 /**
69266 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69267 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69268 const struct sk_buff *skb)
69269 {
69270 - return (skb->prev == (struct sk_buff *) list);
69271 + return (skb->prev == (const struct sk_buff *) list);
69272 }
69273
69274 /**
69275 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
69276 * headroom, you should not reduce this.
69277 */
69278 #ifndef NET_SKB_PAD
69279 -#define NET_SKB_PAD 32
69280 +#define NET_SKB_PAD (_AC(32,UL))
69281 #endif
69282
69283 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69284 diff --git a/include/linux/slab.h b/include/linux/slab.h
69285 index 2da8372..a3be824 100644
69286 --- a/include/linux/slab.h
69287 +++ b/include/linux/slab.h
69288 @@ -11,12 +11,20 @@
69289
69290 #include <linux/gfp.h>
69291 #include <linux/types.h>
69292 +#include <linux/err.h>
69293
69294 /*
69295 * Flags to pass to kmem_cache_create().
69296 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
69297 */
69298 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
69299 +
69300 +#ifdef CONFIG_PAX_USERCOPY
69301 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
69302 +#else
69303 +#define SLAB_USERCOPY 0x00000000UL
69304 +#endif
69305 +
69306 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
69307 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
69308 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
69309 @@ -82,10 +90,13 @@
69310 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
69311 * Both make kfree a no-op.
69312 */
69313 -#define ZERO_SIZE_PTR ((void *)16)
69314 +#define ZERO_SIZE_PTR \
69315 +({ \
69316 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
69317 + (void *)(-MAX_ERRNO-1L); \
69318 +})
69319
69320 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
69321 - (unsigned long)ZERO_SIZE_PTR)
69322 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
69323
69324 /*
69325 * struct kmem_cache related prototypes
69326 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
69327 void kfree(const void *);
69328 void kzfree(const void *);
69329 size_t ksize(const void *);
69330 +void check_object_size(const void *ptr, unsigned long n, bool to);
69331
69332 /*
69333 * Allocator specific definitions. These are mainly used to establish optimized
69334 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
69335
69336 void __init kmem_cache_init_late(void);
69337
69338 +#define kmalloc(x, y) \
69339 +({ \
69340 + void *___retval; \
69341 + intoverflow_t ___x = (intoverflow_t)x; \
69342 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
69343 + ___retval = NULL; \
69344 + else \
69345 + ___retval = kmalloc((size_t)___x, (y)); \
69346 + ___retval; \
69347 +})
69348 +
69349 +#define kmalloc_node(x, y, z) \
69350 +({ \
69351 + void *___retval; \
69352 + intoverflow_t ___x = (intoverflow_t)x; \
69353 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69354 + ___retval = NULL; \
69355 + else \
69356 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
69357 + ___retval; \
69358 +})
69359 +
69360 +#define kzalloc(x, y) \
69361 +({ \
69362 + void *___retval; \
69363 + intoverflow_t ___x = (intoverflow_t)x; \
69364 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69365 + ___retval = NULL; \
69366 + else \
69367 + ___retval = kzalloc((size_t)___x, (y)); \
69368 + ___retval; \
69369 +})
69370 +
69371 #endif /* _LINUX_SLAB_H */
69372 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69373 index 850d057..d9dfe3c 100644
69374 --- a/include/linux/slab_def.h
69375 +++ b/include/linux/slab_def.h
69376 @@ -69,10 +69,10 @@ struct kmem_cache {
69377 unsigned long node_allocs;
69378 unsigned long node_frees;
69379 unsigned long node_overflow;
69380 - atomic_t allochit;
69381 - atomic_t allocmiss;
69382 - atomic_t freehit;
69383 - atomic_t freemiss;
69384 + atomic_unchecked_t allochit;
69385 + atomic_unchecked_t allocmiss;
69386 + atomic_unchecked_t freehit;
69387 + atomic_unchecked_t freemiss;
69388
69389 /*
69390 * If debugging is enabled, then the allocator can add additional
69391 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69392 index 5ad70a6..57f9f65 100644
69393 --- a/include/linux/slub_def.h
69394 +++ b/include/linux/slub_def.h
69395 @@ -86,7 +86,7 @@ struct kmem_cache {
69396 struct kmem_cache_order_objects max;
69397 struct kmem_cache_order_objects min;
69398 gfp_t allocflags; /* gfp flags to use on each alloc */
69399 - int refcount; /* Refcount for slab cache destroy */
69400 + atomic_t refcount; /* Refcount for slab cache destroy */
69401 void (*ctor)(void *);
69402 int inuse; /* Offset to metadata */
69403 int align; /* Alignment */
69404 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69405 #endif
69406
69407 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69408 -void *__kmalloc(size_t size, gfp_t flags);
69409 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69410
69411 #ifdef CONFIG_KMEMTRACE
69412 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69413 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69414 index 67ad11f..0bbd8af 100644
69415 --- a/include/linux/sonet.h
69416 +++ b/include/linux/sonet.h
69417 @@ -61,7 +61,7 @@ struct sonet_stats {
69418 #include <asm/atomic.h>
69419
69420 struct k_sonet_stats {
69421 -#define __HANDLE_ITEM(i) atomic_t i
69422 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
69423 __SONET_ITEMS
69424 #undef __HANDLE_ITEM
69425 };
69426 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69427 index 6f52b4d..5500323 100644
69428 --- a/include/linux/sunrpc/cache.h
69429 +++ b/include/linux/sunrpc/cache.h
69430 @@ -125,7 +125,7 @@ struct cache_detail {
69431 */
69432 struct cache_req {
69433 struct cache_deferred_req *(*defer)(struct cache_req *req);
69434 -};
69435 +} __no_const;
69436 /* this must be embedded in a deferred_request that is being
69437 * delayed awaiting cache-fill
69438 */
69439 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
69440 index 8ed9642..101ceab 100644
69441 --- a/include/linux/sunrpc/clnt.h
69442 +++ b/include/linux/sunrpc/clnt.h
69443 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
69444 {
69445 switch (sap->sa_family) {
69446 case AF_INET:
69447 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
69448 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
69449 case AF_INET6:
69450 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
69451 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
69452 }
69453 return 0;
69454 }
69455 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
69456 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
69457 const struct sockaddr *src)
69458 {
69459 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
69460 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
69461 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
69462
69463 dsin->sin_family = ssin->sin_family;
69464 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
69465 if (sa->sa_family != AF_INET6)
69466 return 0;
69467
69468 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
69469 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
69470 }
69471
69472 #endif /* __KERNEL__ */
69473 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
69474 index c14fe86..393245e 100644
69475 --- a/include/linux/sunrpc/svc_rdma.h
69476 +++ b/include/linux/sunrpc/svc_rdma.h
69477 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
69478 extern unsigned int svcrdma_max_requests;
69479 extern unsigned int svcrdma_max_req_size;
69480
69481 -extern atomic_t rdma_stat_recv;
69482 -extern atomic_t rdma_stat_read;
69483 -extern atomic_t rdma_stat_write;
69484 -extern atomic_t rdma_stat_sq_starve;
69485 -extern atomic_t rdma_stat_rq_starve;
69486 -extern atomic_t rdma_stat_rq_poll;
69487 -extern atomic_t rdma_stat_rq_prod;
69488 -extern atomic_t rdma_stat_sq_poll;
69489 -extern atomic_t rdma_stat_sq_prod;
69490 +extern atomic_unchecked_t rdma_stat_recv;
69491 +extern atomic_unchecked_t rdma_stat_read;
69492 +extern atomic_unchecked_t rdma_stat_write;
69493 +extern atomic_unchecked_t rdma_stat_sq_starve;
69494 +extern atomic_unchecked_t rdma_stat_rq_starve;
69495 +extern atomic_unchecked_t rdma_stat_rq_poll;
69496 +extern atomic_unchecked_t rdma_stat_rq_prod;
69497 +extern atomic_unchecked_t rdma_stat_sq_poll;
69498 +extern atomic_unchecked_t rdma_stat_sq_prod;
69499
69500 #define RPCRDMA_VERSION 1
69501
69502 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
69503 index 5e781d8..1e62818 100644
69504 --- a/include/linux/suspend.h
69505 +++ b/include/linux/suspend.h
69506 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
69507 * which require special recovery actions in that situation.
69508 */
69509 struct platform_suspend_ops {
69510 - int (*valid)(suspend_state_t state);
69511 - int (*begin)(suspend_state_t state);
69512 - int (*prepare)(void);
69513 - int (*prepare_late)(void);
69514 - int (*enter)(suspend_state_t state);
69515 - void (*wake)(void);
69516 - void (*finish)(void);
69517 - void (*end)(void);
69518 - void (*recover)(void);
69519 + int (* const valid)(suspend_state_t state);
69520 + int (* const begin)(suspend_state_t state);
69521 + int (* const prepare)(void);
69522 + int (* const prepare_late)(void);
69523 + int (* const enter)(suspend_state_t state);
69524 + void (* const wake)(void);
69525 + void (* const finish)(void);
69526 + void (* const end)(void);
69527 + void (* const recover)(void);
69528 };
69529
69530 #ifdef CONFIG_SUSPEND
69531 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
69532 * suspend_set_ops - set platform dependent suspend operations
69533 * @ops: The new suspend operations to set.
69534 */
69535 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
69536 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
69537 extern int suspend_valid_only_mem(suspend_state_t state);
69538
69539 /**
69540 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
69541 #else /* !CONFIG_SUSPEND */
69542 #define suspend_valid_only_mem NULL
69543
69544 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
69545 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
69546 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
69547 #endif /* !CONFIG_SUSPEND */
69548
69549 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
69550 * platforms which require special recovery actions in that situation.
69551 */
69552 struct platform_hibernation_ops {
69553 - int (*begin)(void);
69554 - void (*end)(void);
69555 - int (*pre_snapshot)(void);
69556 - void (*finish)(void);
69557 - int (*prepare)(void);
69558 - int (*enter)(void);
69559 - void (*leave)(void);
69560 - int (*pre_restore)(void);
69561 - void (*restore_cleanup)(void);
69562 - void (*recover)(void);
69563 + int (* const begin)(void);
69564 + void (* const end)(void);
69565 + int (* const pre_snapshot)(void);
69566 + void (* const finish)(void);
69567 + int (* const prepare)(void);
69568 + int (* const enter)(void);
69569 + void (* const leave)(void);
69570 + int (* const pre_restore)(void);
69571 + void (* const restore_cleanup)(void);
69572 + void (* const recover)(void);
69573 };
69574
69575 #ifdef CONFIG_HIBERNATION
69576 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
69577 extern void swsusp_unset_page_free(struct page *);
69578 extern unsigned long get_safe_page(gfp_t gfp_mask);
69579
69580 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
69581 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
69582 extern int hibernate(void);
69583 extern bool system_entering_hibernation(void);
69584 #else /* CONFIG_HIBERNATION */
69585 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
69586 static inline void swsusp_set_page_free(struct page *p) {}
69587 static inline void swsusp_unset_page_free(struct page *p) {}
69588
69589 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
69590 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
69591 static inline int hibernate(void) { return -ENOSYS; }
69592 static inline bool system_entering_hibernation(void) { return false; }
69593 #endif /* CONFIG_HIBERNATION */
69594 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
69595 index 0eb6942..a805cb6 100644
69596 --- a/include/linux/sysctl.h
69597 +++ b/include/linux/sysctl.h
69598 @@ -164,7 +164,11 @@ enum
69599 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
69600 };
69601
69602 -
69603 +#ifdef CONFIG_PAX_SOFTMODE
69604 +enum {
69605 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
69606 +};
69607 +#endif
69608
69609 /* CTL_VM names: */
69610 enum
69611 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
69612
69613 extern int proc_dostring(struct ctl_table *, int,
69614 void __user *, size_t *, loff_t *);
69615 +extern int proc_dostring_modpriv(struct ctl_table *, int,
69616 + void __user *, size_t *, loff_t *);
69617 extern int proc_dointvec(struct ctl_table *, int,
69618 void __user *, size_t *, loff_t *);
69619 extern int proc_dointvec_minmax(struct ctl_table *, int,
69620 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
69621
69622 extern ctl_handler sysctl_data;
69623 extern ctl_handler sysctl_string;
69624 +extern ctl_handler sysctl_string_modpriv;
69625 extern ctl_handler sysctl_intvec;
69626 extern ctl_handler sysctl_jiffies;
69627 extern ctl_handler sysctl_ms_jiffies;
69628 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
69629 index 9d68fed..71f02cc 100644
69630 --- a/include/linux/sysfs.h
69631 +++ b/include/linux/sysfs.h
69632 @@ -75,8 +75,8 @@ struct bin_attribute {
69633 };
69634
69635 struct sysfs_ops {
69636 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
69637 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
69638 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
69639 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
69640 };
69641
69642 struct sysfs_dirent;
69643 diff --git a/include/linux/syslog.h b/include/linux/syslog.h
69644 new file mode 100644
69645 index 0000000..3891139
69646 --- /dev/null
69647 +++ b/include/linux/syslog.h
69648 @@ -0,0 +1,52 @@
69649 +/* Syslog internals
69650 + *
69651 + * Copyright 2010 Canonical, Ltd.
69652 + * Author: Kees Cook <kees.cook@canonical.com>
69653 + *
69654 + * This program is free software; you can redistribute it and/or modify
69655 + * it under the terms of the GNU General Public License as published by
69656 + * the Free Software Foundation; either version 2, or (at your option)
69657 + * any later version.
69658 + *
69659 + * This program is distributed in the hope that it will be useful,
69660 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
69661 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69662 + * GNU General Public License for more details.
69663 + *
69664 + * You should have received a copy of the GNU General Public License
69665 + * along with this program; see the file COPYING. If not, write to
69666 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
69667 + */
69668 +
69669 +#ifndef _LINUX_SYSLOG_H
69670 +#define _LINUX_SYSLOG_H
69671 +
69672 +/* Close the log. Currently a NOP. */
69673 +#define SYSLOG_ACTION_CLOSE 0
69674 +/* Open the log. Currently a NOP. */
69675 +#define SYSLOG_ACTION_OPEN 1
69676 +/* Read from the log. */
69677 +#define SYSLOG_ACTION_READ 2
69678 +/* Read all messages remaining in the ring buffer. */
69679 +#define SYSLOG_ACTION_READ_ALL 3
69680 +/* Read and clear all messages remaining in the ring buffer */
69681 +#define SYSLOG_ACTION_READ_CLEAR 4
69682 +/* Clear ring buffer. */
69683 +#define SYSLOG_ACTION_CLEAR 5
69684 +/* Disable printk's to console */
69685 +#define SYSLOG_ACTION_CONSOLE_OFF 6
69686 +/* Enable printk's to console */
69687 +#define SYSLOG_ACTION_CONSOLE_ON 7
69688 +/* Set level of messages printed to console */
69689 +#define SYSLOG_ACTION_CONSOLE_LEVEL 8
69690 +/* Return number of unread characters in the log buffer */
69691 +#define SYSLOG_ACTION_SIZE_UNREAD 9
69692 +/* Return size of the log buffer */
69693 +#define SYSLOG_ACTION_SIZE_BUFFER 10
69694 +
69695 +#define SYSLOG_FROM_CALL 0
69696 +#define SYSLOG_FROM_FILE 1
69697 +
69698 +int do_syslog(int type, char __user *buf, int count, bool from_file);
69699 +
69700 +#endif /* _LINUX_SYSLOG_H */
69701 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
69702 index a8cc4e1..98d3b85 100644
69703 --- a/include/linux/thread_info.h
69704 +++ b/include/linux/thread_info.h
69705 @@ -23,7 +23,7 @@ struct restart_block {
69706 };
69707 /* For futex_wait and futex_wait_requeue_pi */
69708 struct {
69709 - u32 *uaddr;
69710 + u32 __user *uaddr;
69711 u32 val;
69712 u32 flags;
69713 u32 bitset;
69714 diff --git a/include/linux/tty.h b/include/linux/tty.h
69715 index e9c57e9..ee6d489 100644
69716 --- a/include/linux/tty.h
69717 +++ b/include/linux/tty.h
69718 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
69719 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
69720 extern void tty_ldisc_enable(struct tty_struct *tty);
69721
69722 -
69723 /* n_tty.c */
69724 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
69725
69726 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
69727 index 0c4ee9b..9f7c426 100644
69728 --- a/include/linux/tty_ldisc.h
69729 +++ b/include/linux/tty_ldisc.h
69730 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
69731
69732 struct module *owner;
69733
69734 - int refcount;
69735 + atomic_t refcount;
69736 };
69737
69738 struct tty_ldisc {
69739 diff --git a/include/linux/types.h b/include/linux/types.h
69740 index c42724f..d190eee 100644
69741 --- a/include/linux/types.h
69742 +++ b/include/linux/types.h
69743 @@ -191,10 +191,26 @@ typedef struct {
69744 volatile int counter;
69745 } atomic_t;
69746
69747 +#ifdef CONFIG_PAX_REFCOUNT
69748 +typedef struct {
69749 + volatile int counter;
69750 +} atomic_unchecked_t;
69751 +#else
69752 +typedef atomic_t atomic_unchecked_t;
69753 +#endif
69754 +
69755 #ifdef CONFIG_64BIT
69756 typedef struct {
69757 volatile long counter;
69758 } atomic64_t;
69759 +
69760 +#ifdef CONFIG_PAX_REFCOUNT
69761 +typedef struct {
69762 + volatile long counter;
69763 +} atomic64_unchecked_t;
69764 +#else
69765 +typedef atomic64_t atomic64_unchecked_t;
69766 +#endif
69767 #endif
69768
69769 struct ustat {
69770 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
69771 index 6b58367..53a3e8e 100644
69772 --- a/include/linux/uaccess.h
69773 +++ b/include/linux/uaccess.h
69774 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69775 long ret; \
69776 mm_segment_t old_fs = get_fs(); \
69777 \
69778 - set_fs(KERNEL_DS); \
69779 pagefault_disable(); \
69780 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
69781 - pagefault_enable(); \
69782 + set_fs(KERNEL_DS); \
69783 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
69784 set_fs(old_fs); \
69785 + pagefault_enable(); \
69786 ret; \
69787 })
69788
69789 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69790 * Safely read from address @src to the buffer at @dst. If a kernel fault
69791 * happens, handle that and return -EFAULT.
69792 */
69793 -extern long probe_kernel_read(void *dst, void *src, size_t size);
69794 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
69795
69796 /*
69797 * probe_kernel_write(): safely attempt to write to a location
69798 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
69799 * Safely write to address @dst from the buffer at @src. If a kernel fault
69800 * happens, handle that and return -EFAULT.
69801 */
69802 -extern long probe_kernel_write(void *dst, void *src, size_t size);
69803 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
69804
69805 #endif /* __LINUX_UACCESS_H__ */
69806 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
69807 index 99c1b4d..bb94261 100644
69808 --- a/include/linux/unaligned/access_ok.h
69809 +++ b/include/linux/unaligned/access_ok.h
69810 @@ -6,32 +6,32 @@
69811
69812 static inline u16 get_unaligned_le16(const void *p)
69813 {
69814 - return le16_to_cpup((__le16 *)p);
69815 + return le16_to_cpup((const __le16 *)p);
69816 }
69817
69818 static inline u32 get_unaligned_le32(const void *p)
69819 {
69820 - return le32_to_cpup((__le32 *)p);
69821 + return le32_to_cpup((const __le32 *)p);
69822 }
69823
69824 static inline u64 get_unaligned_le64(const void *p)
69825 {
69826 - return le64_to_cpup((__le64 *)p);
69827 + return le64_to_cpup((const __le64 *)p);
69828 }
69829
69830 static inline u16 get_unaligned_be16(const void *p)
69831 {
69832 - return be16_to_cpup((__be16 *)p);
69833 + return be16_to_cpup((const __be16 *)p);
69834 }
69835
69836 static inline u32 get_unaligned_be32(const void *p)
69837 {
69838 - return be32_to_cpup((__be32 *)p);
69839 + return be32_to_cpup((const __be32 *)p);
69840 }
69841
69842 static inline u64 get_unaligned_be64(const void *p)
69843 {
69844 - return be64_to_cpup((__be64 *)p);
69845 + return be64_to_cpup((const __be64 *)p);
69846 }
69847
69848 static inline void put_unaligned_le16(u16 val, void *p)
69849 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
69850 index 79b9837..b5a56f9 100644
69851 --- a/include/linux/vermagic.h
69852 +++ b/include/linux/vermagic.h
69853 @@ -26,9 +26,35 @@
69854 #define MODULE_ARCH_VERMAGIC ""
69855 #endif
69856
69857 +#ifdef CONFIG_PAX_REFCOUNT
69858 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
69859 +#else
69860 +#define MODULE_PAX_REFCOUNT ""
69861 +#endif
69862 +
69863 +#ifdef CONSTIFY_PLUGIN
69864 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
69865 +#else
69866 +#define MODULE_CONSTIFY_PLUGIN ""
69867 +#endif
69868 +
69869 +#ifdef STACKLEAK_PLUGIN
69870 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
69871 +#else
69872 +#define MODULE_STACKLEAK_PLUGIN ""
69873 +#endif
69874 +
69875 +#ifdef CONFIG_GRKERNSEC
69876 +#define MODULE_GRSEC "GRSEC "
69877 +#else
69878 +#define MODULE_GRSEC ""
69879 +#endif
69880 +
69881 #define VERMAGIC_STRING \
69882 UTS_RELEASE " " \
69883 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
69884 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
69885 - MODULE_ARCH_VERMAGIC
69886 + MODULE_ARCH_VERMAGIC \
69887 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
69888 + MODULE_GRSEC
69889
69890 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
69891 index 819a634..462ac12 100644
69892 --- a/include/linux/vmalloc.h
69893 +++ b/include/linux/vmalloc.h
69894 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
69895 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
69896 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
69897 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
69898 +
69899 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69900 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
69901 +#endif
69902 +
69903 /* bits [20..32] reserved for arch specific ioremap internals */
69904
69905 /*
69906 @@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
69907
69908 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
69909
69910 +#define vmalloc(x) \
69911 +({ \
69912 + void *___retval; \
69913 + intoverflow_t ___x = (intoverflow_t)x; \
69914 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
69915 + ___retval = NULL; \
69916 + else \
69917 + ___retval = vmalloc((unsigned long)___x); \
69918 + ___retval; \
69919 +})
69920 +
69921 +#define __vmalloc(x, y, z) \
69922 +({ \
69923 + void *___retval; \
69924 + intoverflow_t ___x = (intoverflow_t)x; \
69925 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
69926 + ___retval = NULL; \
69927 + else \
69928 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
69929 + ___retval; \
69930 +})
69931 +
69932 +#define vmalloc_user(x) \
69933 +({ \
69934 + void *___retval; \
69935 + intoverflow_t ___x = (intoverflow_t)x; \
69936 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
69937 + ___retval = NULL; \
69938 + else \
69939 + ___retval = vmalloc_user((unsigned long)___x); \
69940 + ___retval; \
69941 +})
69942 +
69943 +#define vmalloc_exec(x) \
69944 +({ \
69945 + void *___retval; \
69946 + intoverflow_t ___x = (intoverflow_t)x; \
69947 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
69948 + ___retval = NULL; \
69949 + else \
69950 + ___retval = vmalloc_exec((unsigned long)___x); \
69951 + ___retval; \
69952 +})
69953 +
69954 +#define vmalloc_node(x, y) \
69955 +({ \
69956 + void *___retval; \
69957 + intoverflow_t ___x = (intoverflow_t)x; \
69958 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
69959 + ___retval = NULL; \
69960 + else \
69961 + ___retval = vmalloc_node((unsigned long)___x, (y));\
69962 + ___retval; \
69963 +})
69964 +
69965 +#define vmalloc_32(x) \
69966 +({ \
69967 + void *___retval; \
69968 + intoverflow_t ___x = (intoverflow_t)x; \
69969 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
69970 + ___retval = NULL; \
69971 + else \
69972 + ___retval = vmalloc_32((unsigned long)___x); \
69973 + ___retval; \
69974 +})
69975 +
69976 +#define vmalloc_32_user(x) \
69977 +({ \
69978 + void *___retval; \
69979 + intoverflow_t ___x = (intoverflow_t)x; \
69980 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
69981 + ___retval = NULL; \
69982 + else \
69983 + ___retval = vmalloc_32_user((unsigned long)___x);\
69984 + ___retval; \
69985 +})
69986 +
69987 #endif /* _LINUX_VMALLOC_H */
69988 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
69989 index 13070d6..aa4159a 100644
69990 --- a/include/linux/vmstat.h
69991 +++ b/include/linux/vmstat.h
69992 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
69993 /*
69994 * Zone based page accounting with per cpu differentials.
69995 */
69996 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69997 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69998
69999 static inline void zone_page_state_add(long x, struct zone *zone,
70000 enum zone_stat_item item)
70001 {
70002 - atomic_long_add(x, &zone->vm_stat[item]);
70003 - atomic_long_add(x, &vm_stat[item]);
70004 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
70005 + atomic_long_add_unchecked(x, &vm_stat[item]);
70006 }
70007
70008 static inline unsigned long global_page_state(enum zone_stat_item item)
70009 {
70010 - long x = atomic_long_read(&vm_stat[item]);
70011 + long x = atomic_long_read_unchecked(&vm_stat[item]);
70012 #ifdef CONFIG_SMP
70013 if (x < 0)
70014 x = 0;
70015 @@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
70016 static inline unsigned long zone_page_state(struct zone *zone,
70017 enum zone_stat_item item)
70018 {
70019 - long x = atomic_long_read(&zone->vm_stat[item]);
70020 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70021 #ifdef CONFIG_SMP
70022 if (x < 0)
70023 x = 0;
70024 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
70025 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
70026 enum zone_stat_item item)
70027 {
70028 - long x = atomic_long_read(&zone->vm_stat[item]);
70029 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70030
70031 #ifdef CONFIG_SMP
70032 int cpu;
70033 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
70034
70035 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
70036 {
70037 - atomic_long_inc(&zone->vm_stat[item]);
70038 - atomic_long_inc(&vm_stat[item]);
70039 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
70040 + atomic_long_inc_unchecked(&vm_stat[item]);
70041 }
70042
70043 static inline void __inc_zone_page_state(struct page *page,
70044 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
70045
70046 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
70047 {
70048 - atomic_long_dec(&zone->vm_stat[item]);
70049 - atomic_long_dec(&vm_stat[item]);
70050 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
70051 + atomic_long_dec_unchecked(&vm_stat[item]);
70052 }
70053
70054 static inline void __dec_zone_page_state(struct page *page,
70055 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
70056 index 5c84af8..1a3b6e2 100644
70057 --- a/include/linux/xattr.h
70058 +++ b/include/linux/xattr.h
70059 @@ -33,6 +33,11 @@
70060 #define XATTR_USER_PREFIX "user."
70061 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
70062
70063 +/* User namespace */
70064 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
70065 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
70066 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
70067 +
70068 struct inode;
70069 struct dentry;
70070
70071 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
70072 index eed5fcc..5080d24 100644
70073 --- a/include/media/saa7146_vv.h
70074 +++ b/include/media/saa7146_vv.h
70075 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
70076 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
70077
70078 /* the extension can override this */
70079 - struct v4l2_ioctl_ops ops;
70080 + v4l2_ioctl_ops_no_const ops;
70081 /* pointer to the saa7146 core ops */
70082 const struct v4l2_ioctl_ops *core_ops;
70083
70084 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
70085 index 73c9867..2da8837 100644
70086 --- a/include/media/v4l2-dev.h
70087 +++ b/include/media/v4l2-dev.h
70088 @@ -34,7 +34,7 @@ struct v4l2_device;
70089 #define V4L2_FL_UNREGISTERED (0)
70090
70091 struct v4l2_file_operations {
70092 - struct module *owner;
70093 + struct module * const owner;
70094 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
70095 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
70096 unsigned int (*poll) (struct file *, struct poll_table_struct *);
70097 @@ -46,6 +46,7 @@ struct v4l2_file_operations {
70098 int (*open) (struct file *);
70099 int (*release) (struct file *);
70100 };
70101 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
70102
70103 /*
70104 * Newer version of video_device, handled by videodev2.c
70105 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
70106 index 5d5d550..f559ef1 100644
70107 --- a/include/media/v4l2-device.h
70108 +++ b/include/media/v4l2-device.h
70109 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
70110 this function returns 0. If the name ends with a digit (e.g. cx18),
70111 then the name will be set to cx18-0 since cx180 looks really odd. */
70112 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
70113 - atomic_t *instance);
70114 + atomic_unchecked_t *instance);
70115
70116 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
70117 Since the parent disappears this ensures that v4l2_dev doesn't have an
70118 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70119 index 7a4529d..7244290 100644
70120 --- a/include/media/v4l2-ioctl.h
70121 +++ b/include/media/v4l2-ioctl.h
70122 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
70123 long (*vidioc_default) (struct file *file, void *fh,
70124 int cmd, void *arg);
70125 };
70126 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
70127
70128
70129 /* v4l debugging and diagnostics */
70130 diff --git a/include/net/flow.h b/include/net/flow.h
70131 index 809970b..c3df4f3 100644
70132 --- a/include/net/flow.h
70133 +++ b/include/net/flow.h
70134 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
70135 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
70136 u8 dir, flow_resolve_t resolver);
70137 extern void flow_cache_flush(void);
70138 -extern atomic_t flow_cache_genid;
70139 +extern atomic_unchecked_t flow_cache_genid;
70140
70141 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
70142 {
70143 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
70144 index 15e1f8fe..668837c 100644
70145 --- a/include/net/inetpeer.h
70146 +++ b/include/net/inetpeer.h
70147 @@ -24,7 +24,7 @@ struct inet_peer
70148 __u32 dtime; /* the time of last use of not
70149 * referenced entries */
70150 atomic_t refcnt;
70151 - atomic_t rid; /* Frag reception counter */
70152 + atomic_unchecked_t rid; /* Frag reception counter */
70153 __u32 tcp_ts;
70154 unsigned long tcp_ts_stamp;
70155 };
70156 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
70157 index 98978e7..2243a3d 100644
70158 --- a/include/net/ip_vs.h
70159 +++ b/include/net/ip_vs.h
70160 @@ -365,7 +365,7 @@ struct ip_vs_conn {
70161 struct ip_vs_conn *control; /* Master control connection */
70162 atomic_t n_control; /* Number of controlled ones */
70163 struct ip_vs_dest *dest; /* real server */
70164 - atomic_t in_pkts; /* incoming packet counter */
70165 + atomic_unchecked_t in_pkts; /* incoming packet counter */
70166
70167 /* packet transmitter for different forwarding methods. If it
70168 mangles the packet, it must return NF_DROP or better NF_STOLEN,
70169 @@ -466,7 +466,7 @@ struct ip_vs_dest {
70170 union nf_inet_addr addr; /* IP address of the server */
70171 __be16 port; /* port number of the server */
70172 volatile unsigned flags; /* dest status flags */
70173 - atomic_t conn_flags; /* flags to copy to conn */
70174 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
70175 atomic_t weight; /* server weight */
70176
70177 atomic_t refcnt; /* reference counter */
70178 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
70179 index 69b610a..fe3962c 100644
70180 --- a/include/net/irda/ircomm_core.h
70181 +++ b/include/net/irda/ircomm_core.h
70182 @@ -51,7 +51,7 @@ typedef struct {
70183 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
70184 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
70185 struct ircomm_info *);
70186 -} call_t;
70187 +} __no_const call_t;
70188
70189 struct ircomm_cb {
70190 irda_queue_t queue;
70191 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
70192 index eea2e61..08c692d 100644
70193 --- a/include/net/irda/ircomm_tty.h
70194 +++ b/include/net/irda/ircomm_tty.h
70195 @@ -35,6 +35,7 @@
70196 #include <linux/termios.h>
70197 #include <linux/timer.h>
70198 #include <linux/tty.h> /* struct tty_struct */
70199 +#include <asm/local.h>
70200
70201 #include <net/irda/irias_object.h>
70202 #include <net/irda/ircomm_core.h>
70203 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
70204 unsigned short close_delay;
70205 unsigned short closing_wait; /* time to wait before closing */
70206
70207 - int open_count;
70208 - int blocked_open; /* # of blocked opens */
70209 + local_t open_count;
70210 + local_t blocked_open; /* # of blocked opens */
70211
70212 /* Protect concurent access to :
70213 * o self->open_count
70214 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
70215 index f82a1e8..82d81e8 100644
70216 --- a/include/net/iucv/af_iucv.h
70217 +++ b/include/net/iucv/af_iucv.h
70218 @@ -87,7 +87,7 @@ struct iucv_sock {
70219 struct iucv_sock_list {
70220 struct hlist_head head;
70221 rwlock_t lock;
70222 - atomic_t autobind_name;
70223 + atomic_unchecked_t autobind_name;
70224 };
70225
70226 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
70227 diff --git a/include/net/lapb.h b/include/net/lapb.h
70228 index 96cb5dd..25e8d4f 100644
70229 --- a/include/net/lapb.h
70230 +++ b/include/net/lapb.h
70231 @@ -95,7 +95,7 @@ struct lapb_cb {
70232 struct sk_buff_head write_queue;
70233 struct sk_buff_head ack_queue;
70234 unsigned char window;
70235 - struct lapb_register_struct callbacks;
70236 + struct lapb_register_struct *callbacks;
70237
70238 /* FRMR control information */
70239 struct lapb_frame frmr_data;
70240 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
70241 index 3817fda..cdb2343 100644
70242 --- a/include/net/neighbour.h
70243 +++ b/include/net/neighbour.h
70244 @@ -131,7 +131,7 @@ struct neigh_ops
70245 int (*connected_output)(struct sk_buff*);
70246 int (*hh_output)(struct sk_buff*);
70247 int (*queue_xmit)(struct sk_buff*);
70248 -};
70249 +} __do_const;
70250
70251 struct pneigh_entry
70252 {
70253 diff --git a/include/net/netlink.h b/include/net/netlink.h
70254 index c344646..4778c71 100644
70255 --- a/include/net/netlink.h
70256 +++ b/include/net/netlink.h
70257 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
70258 {
70259 return (remaining >= (int) sizeof(struct nlmsghdr) &&
70260 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
70261 - nlh->nlmsg_len <= remaining);
70262 + nlh->nlmsg_len <= (unsigned int)remaining);
70263 }
70264
70265 /**
70266 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
70267 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
70268 {
70269 if (mark)
70270 - skb_trim(skb, (unsigned char *) mark - skb->data);
70271 + skb_trim(skb, (const unsigned char *) mark - skb->data);
70272 }
70273
70274 /**
70275 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
70276 index 9a4b8b7..e49e077 100644
70277 --- a/include/net/netns/ipv4.h
70278 +++ b/include/net/netns/ipv4.h
70279 @@ -54,7 +54,7 @@ struct netns_ipv4 {
70280 int current_rt_cache_rebuild_count;
70281
70282 struct timer_list rt_secret_timer;
70283 - atomic_t rt_genid;
70284 + atomic_unchecked_t rt_genid;
70285
70286 #ifdef CONFIG_IP_MROUTE
70287 struct sock *mroute_sk;
70288 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
70289 index 8a6d529..171f401 100644
70290 --- a/include/net/sctp/sctp.h
70291 +++ b/include/net/sctp/sctp.h
70292 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
70293
70294 #else /* SCTP_DEBUG */
70295
70296 -#define SCTP_DEBUG_PRINTK(whatever...)
70297 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
70298 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
70299 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
70300 #define SCTP_ENABLE_DEBUG
70301 #define SCTP_DISABLE_DEBUG
70302 #define SCTP_ASSERT(expr, str, func)
70303 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
70304 index d97f689..f3b90ab 100644
70305 --- a/include/net/secure_seq.h
70306 +++ b/include/net/secure_seq.h
70307 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
70308 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
70309 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
70310 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
70311 - __be16 dport);
70312 + __be16 dport);
70313 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
70314 __be16 sport, __be16 dport);
70315 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70316 - __be16 sport, __be16 dport);
70317 + __be16 sport, __be16 dport);
70318 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
70319 - __be16 sport, __be16 dport);
70320 + __be16 sport, __be16 dport);
70321 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70322 - __be16 sport, __be16 dport);
70323 + __be16 sport, __be16 dport);
70324
70325 #endif /* _NET_SECURE_SEQ */
70326 diff --git a/include/net/sock.h b/include/net/sock.h
70327 index 9f96394..76fc9c7 100644
70328 --- a/include/net/sock.h
70329 +++ b/include/net/sock.h
70330 @@ -272,7 +272,7 @@ struct sock {
70331 rwlock_t sk_callback_lock;
70332 int sk_err,
70333 sk_err_soft;
70334 - atomic_t sk_drops;
70335 + atomic_unchecked_t sk_drops;
70336 unsigned short sk_ack_backlog;
70337 unsigned short sk_max_ack_backlog;
70338 __u32 sk_priority;
70339 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
70340 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
70341 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
70342 #else
70343 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
70344 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
70345 int inc)
70346 {
70347 }
70348 diff --git a/include/net/tcp.h b/include/net/tcp.h
70349 index 6cfe18b..dd21acb 100644
70350 --- a/include/net/tcp.h
70351 +++ b/include/net/tcp.h
70352 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70353 struct tcp_seq_afinfo {
70354 char *name;
70355 sa_family_t family;
70356 - struct file_operations seq_fops;
70357 - struct seq_operations seq_ops;
70358 + file_operations_no_const seq_fops;
70359 + seq_operations_no_const seq_ops;
70360 };
70361
70362 struct tcp_iter_state {
70363 diff --git a/include/net/udp.h b/include/net/udp.h
70364 index f98abd2..b4b042f 100644
70365 --- a/include/net/udp.h
70366 +++ b/include/net/udp.h
70367 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70368 char *name;
70369 sa_family_t family;
70370 struct udp_table *udp_table;
70371 - struct file_operations seq_fops;
70372 - struct seq_operations seq_ops;
70373 + file_operations_no_const seq_fops;
70374 + seq_operations_no_const seq_ops;
70375 };
70376
70377 struct udp_iter_state {
70378 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70379 index cbb822e..e9c1cbe 100644
70380 --- a/include/rdma/iw_cm.h
70381 +++ b/include/rdma/iw_cm.h
70382 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
70383 int backlog);
70384
70385 int (*destroy_listen)(struct iw_cm_id *cm_id);
70386 -};
70387 +} __no_const;
70388
70389 /**
70390 * iw_create_cm_id - Create an IW CM identifier.
70391 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
70392 index 09a124b..caa8ca8 100644
70393 --- a/include/scsi/libfc.h
70394 +++ b/include/scsi/libfc.h
70395 @@ -675,6 +675,7 @@ struct libfc_function_template {
70396 */
70397 void (*disc_stop_final) (struct fc_lport *);
70398 };
70399 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
70400
70401 /* information used by the discovery layer */
70402 struct fc_disc {
70403 @@ -707,7 +708,7 @@ struct fc_lport {
70404 struct fc_disc disc;
70405
70406 /* Operational Information */
70407 - struct libfc_function_template tt;
70408 + libfc_function_template_no_const tt;
70409 u8 link_up;
70410 u8 qfull;
70411 enum fc_lport_state state;
70412 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
70413 index de8e180..f15e0d7 100644
70414 --- a/include/scsi/scsi_device.h
70415 +++ b/include/scsi/scsi_device.h
70416 @@ -156,9 +156,9 @@ struct scsi_device {
70417 unsigned int max_device_blocked; /* what device_blocked counts down from */
70418 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
70419
70420 - atomic_t iorequest_cnt;
70421 - atomic_t iodone_cnt;
70422 - atomic_t ioerr_cnt;
70423 + atomic_unchecked_t iorequest_cnt;
70424 + atomic_unchecked_t iodone_cnt;
70425 + atomic_unchecked_t ioerr_cnt;
70426
70427 struct device sdev_gendev,
70428 sdev_dev;
70429 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
70430 index fc50bd6..81ba9cb 100644
70431 --- a/include/scsi/scsi_transport_fc.h
70432 +++ b/include/scsi/scsi_transport_fc.h
70433 @@ -708,7 +708,7 @@ struct fc_function_template {
70434 unsigned long show_host_system_hostname:1;
70435
70436 unsigned long disable_target_scan:1;
70437 -};
70438 +} __do_const;
70439
70440
70441 /**
70442 diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
70443 index 3dae3f7..8440d6f 100644
70444 --- a/include/sound/ac97_codec.h
70445 +++ b/include/sound/ac97_codec.h
70446 @@ -419,15 +419,15 @@
70447 struct snd_ac97;
70448
70449 struct snd_ac97_build_ops {
70450 - int (*build_3d) (struct snd_ac97 *ac97);
70451 - int (*build_specific) (struct snd_ac97 *ac97);
70452 - int (*build_spdif) (struct snd_ac97 *ac97);
70453 - int (*build_post_spdif) (struct snd_ac97 *ac97);
70454 + int (* const build_3d) (struct snd_ac97 *ac97);
70455 + int (* const build_specific) (struct snd_ac97 *ac97);
70456 + int (* const build_spdif) (struct snd_ac97 *ac97);
70457 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
70458 #ifdef CONFIG_PM
70459 - void (*suspend) (struct snd_ac97 *ac97);
70460 - void (*resume) (struct snd_ac97 *ac97);
70461 + void (* const suspend) (struct snd_ac97 *ac97);
70462 + void (* const resume) (struct snd_ac97 *ac97);
70463 #endif
70464 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70465 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70466 };
70467
70468 struct snd_ac97_bus_ops {
70469 @@ -477,7 +477,7 @@ struct snd_ac97_template {
70470
70471 struct snd_ac97 {
70472 /* -- lowlevel (hardware) driver specific -- */
70473 - struct snd_ac97_build_ops * build_ops;
70474 + const struct snd_ac97_build_ops * build_ops;
70475 void *private_data;
70476 void (*private_free) (struct snd_ac97 *ac97);
70477 /* --- */
70478 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
70479 index 891cf1a..a94ba2b 100644
70480 --- a/include/sound/ak4xxx-adda.h
70481 +++ b/include/sound/ak4xxx-adda.h
70482 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
70483 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
70484 unsigned char val);
70485 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
70486 -};
70487 +} __no_const;
70488
70489 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
70490
70491 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
70492 index 8c05e47..2b5df97 100644
70493 --- a/include/sound/hwdep.h
70494 +++ b/include/sound/hwdep.h
70495 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
70496 struct snd_hwdep_dsp_status *status);
70497 int (*dsp_load)(struct snd_hwdep *hw,
70498 struct snd_hwdep_dsp_image *image);
70499 -};
70500 +} __no_const;
70501
70502 struct snd_hwdep {
70503 struct snd_card *card;
70504 diff --git a/include/sound/info.h b/include/sound/info.h
70505 index 112e894..6fda5b5 100644
70506 --- a/include/sound/info.h
70507 +++ b/include/sound/info.h
70508 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
70509 struct snd_info_buffer *buffer);
70510 void (*write)(struct snd_info_entry *entry,
70511 struct snd_info_buffer *buffer);
70512 -};
70513 +} __no_const;
70514
70515 struct snd_info_entry_ops {
70516 int (*open)(struct snd_info_entry *entry,
70517 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
70518 index de6d981..590a550 100644
70519 --- a/include/sound/pcm.h
70520 +++ b/include/sound/pcm.h
70521 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
70522 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
70523 int (*ack)(struct snd_pcm_substream *substream);
70524 };
70525 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
70526
70527 /*
70528 *
70529 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
70530 index 736eac7..fe8a80f 100644
70531 --- a/include/sound/sb16_csp.h
70532 +++ b/include/sound/sb16_csp.h
70533 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
70534 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
70535 int (*csp_stop) (struct snd_sb_csp * p);
70536 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
70537 -};
70538 +} __no_const;
70539
70540 /*
70541 * CSP private data
70542 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
70543 index 444cd6b..3327cc5 100644
70544 --- a/include/sound/ymfpci.h
70545 +++ b/include/sound/ymfpci.h
70546 @@ -358,7 +358,7 @@ struct snd_ymfpci {
70547 spinlock_t reg_lock;
70548 spinlock_t voice_lock;
70549 wait_queue_head_t interrupt_sleep;
70550 - atomic_t interrupt_sleep_count;
70551 + atomic_unchecked_t interrupt_sleep_count;
70552 struct snd_info_entry *proc_entry;
70553 const struct firmware *dsp_microcode;
70554 const struct firmware *controller_microcode;
70555 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
70556 index b89f9db..f097b38 100644
70557 --- a/include/trace/events/irq.h
70558 +++ b/include/trace/events/irq.h
70559 @@ -34,7 +34,7 @@
70560 */
70561 TRACE_EVENT(irq_handler_entry,
70562
70563 - TP_PROTO(int irq, struct irqaction *action),
70564 + TP_PROTO(int irq, const struct irqaction *action),
70565
70566 TP_ARGS(irq, action),
70567
70568 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
70569 */
70570 TRACE_EVENT(irq_handler_exit,
70571
70572 - TP_PROTO(int irq, struct irqaction *action, int ret),
70573 + TP_PROTO(int irq, const struct irqaction *action, int ret),
70574
70575 TP_ARGS(irq, action, ret),
70576
70577 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
70578 */
70579 TRACE_EVENT(softirq_entry,
70580
70581 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70582 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70583
70584 TP_ARGS(h, vec),
70585
70586 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
70587 */
70588 TRACE_EVENT(softirq_exit,
70589
70590 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70591 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70592
70593 TP_ARGS(h, vec),
70594
70595 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
70596 index 0993a22..32ba2fe 100644
70597 --- a/include/video/uvesafb.h
70598 +++ b/include/video/uvesafb.h
70599 @@ -177,6 +177,7 @@ struct uvesafb_par {
70600 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
70601 u8 pmi_setpal; /* PMI for palette changes */
70602 u16 *pmi_base; /* protected mode interface location */
70603 + u8 *pmi_code; /* protected mode code location */
70604 void *pmi_start;
70605 void *pmi_pal;
70606 u8 *vbe_state_orig; /*
70607 diff --git a/init/Kconfig b/init/Kconfig
70608 index d72691b..3996e54 100644
70609 --- a/init/Kconfig
70610 +++ b/init/Kconfig
70611 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
70612
70613 config COMPAT_BRK
70614 bool "Disable heap randomization"
70615 - default y
70616 + default n
70617 help
70618 Randomizing heap placement makes heap exploits harder, but it
70619 also breaks ancient binaries (including anything libc5 based).
70620 diff --git a/init/do_mounts.c b/init/do_mounts.c
70621 index bb008d0..4fa3933 100644
70622 --- a/init/do_mounts.c
70623 +++ b/init/do_mounts.c
70624 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
70625
70626 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
70627 {
70628 - int err = sys_mount(name, "/root", fs, flags, data);
70629 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
70630 if (err)
70631 return err;
70632
70633 - sys_chdir("/root");
70634 + sys_chdir((__force const char __user *)"/root");
70635 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
70636 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
70637 current->fs->pwd.mnt->mnt_sb->s_type->name,
70638 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
70639 va_start(args, fmt);
70640 vsprintf(buf, fmt, args);
70641 va_end(args);
70642 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
70643 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
70644 if (fd >= 0) {
70645 sys_ioctl(fd, FDEJECT, 0);
70646 sys_close(fd);
70647 }
70648 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
70649 - fd = sys_open("/dev/console", O_RDWR, 0);
70650 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
70651 if (fd >= 0) {
70652 sys_ioctl(fd, TCGETS, (long)&termios);
70653 termios.c_lflag &= ~ICANON;
70654 sys_ioctl(fd, TCSETSF, (long)&termios);
70655 - sys_read(fd, &c, 1);
70656 + sys_read(fd, (char __user *)&c, 1);
70657 termios.c_lflag |= ICANON;
70658 sys_ioctl(fd, TCSETSF, (long)&termios);
70659 sys_close(fd);
70660 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
70661 mount_root();
70662 out:
70663 devtmpfs_mount("dev");
70664 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
70665 - sys_chroot(".");
70666 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
70667 + sys_chroot((__force char __user *)".");
70668 }
70669 diff --git a/init/do_mounts.h b/init/do_mounts.h
70670 index f5b978a..69dbfe8 100644
70671 --- a/init/do_mounts.h
70672 +++ b/init/do_mounts.h
70673 @@ -15,15 +15,15 @@ extern int root_mountflags;
70674
70675 static inline int create_dev(char *name, dev_t dev)
70676 {
70677 - sys_unlink(name);
70678 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
70679 + sys_unlink((char __force_user *)name);
70680 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
70681 }
70682
70683 #if BITS_PER_LONG == 32
70684 static inline u32 bstat(char *name)
70685 {
70686 struct stat64 stat;
70687 - if (sys_stat64(name, &stat) != 0)
70688 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
70689 return 0;
70690 if (!S_ISBLK(stat.st_mode))
70691 return 0;
70692 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
70693 static inline u32 bstat(char *name)
70694 {
70695 struct stat stat;
70696 - if (sys_newstat(name, &stat) != 0)
70697 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
70698 return 0;
70699 if (!S_ISBLK(stat.st_mode))
70700 return 0;
70701 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
70702 index 614241b..4da046b 100644
70703 --- a/init/do_mounts_initrd.c
70704 +++ b/init/do_mounts_initrd.c
70705 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
70706 sys_close(old_fd);sys_close(root_fd);
70707 sys_close(0);sys_close(1);sys_close(2);
70708 sys_setsid();
70709 - (void) sys_open("/dev/console",O_RDWR,0);
70710 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
70711 (void) sys_dup(0);
70712 (void) sys_dup(0);
70713 return kernel_execve(shell, argv, envp_init);
70714 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
70715 create_dev("/dev/root.old", Root_RAM0);
70716 /* mount initrd on rootfs' /root */
70717 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
70718 - sys_mkdir("/old", 0700);
70719 - root_fd = sys_open("/", 0, 0);
70720 - old_fd = sys_open("/old", 0, 0);
70721 + sys_mkdir((const char __force_user *)"/old", 0700);
70722 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
70723 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
70724 /* move initrd over / and chdir/chroot in initrd root */
70725 - sys_chdir("/root");
70726 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
70727 - sys_chroot(".");
70728 + sys_chdir((const char __force_user *)"/root");
70729 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
70730 + sys_chroot((const char __force_user *)".");
70731
70732 /*
70733 * In case that a resume from disk is carried out by linuxrc or one of
70734 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
70735
70736 /* move initrd to rootfs' /old */
70737 sys_fchdir(old_fd);
70738 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
70739 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
70740 /* switch root and cwd back to / of rootfs */
70741 sys_fchdir(root_fd);
70742 - sys_chroot(".");
70743 + sys_chroot((const char __force_user *)".");
70744 sys_close(old_fd);
70745 sys_close(root_fd);
70746
70747 if (new_decode_dev(real_root_dev) == Root_RAM0) {
70748 - sys_chdir("/old");
70749 + sys_chdir((const char __force_user *)"/old");
70750 return;
70751 }
70752
70753 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
70754 mount_root();
70755
70756 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
70757 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
70758 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
70759 if (!error)
70760 printk("okay\n");
70761 else {
70762 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
70763 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
70764 if (error == -ENOENT)
70765 printk("/initrd does not exist. Ignored.\n");
70766 else
70767 printk("failed\n");
70768 printk(KERN_NOTICE "Unmounting old root\n");
70769 - sys_umount("/old", MNT_DETACH);
70770 + sys_umount((char __force_user *)"/old", MNT_DETACH);
70771 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
70772 if (fd < 0) {
70773 error = fd;
70774 @@ -119,11 +119,11 @@ int __init initrd_load(void)
70775 * mounted in the normal path.
70776 */
70777 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
70778 - sys_unlink("/initrd.image");
70779 + sys_unlink((const char __force_user *)"/initrd.image");
70780 handle_initrd();
70781 return 1;
70782 }
70783 }
70784 - sys_unlink("/initrd.image");
70785 + sys_unlink((const char __force_user *)"/initrd.image");
70786 return 0;
70787 }
70788 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
70789 index 69aebbf..c0bf6a7 100644
70790 --- a/init/do_mounts_md.c
70791 +++ b/init/do_mounts_md.c
70792 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
70793 partitioned ? "_d" : "", minor,
70794 md_setup_args[ent].device_names);
70795
70796 - fd = sys_open(name, 0, 0);
70797 + fd = sys_open((char __force_user *)name, 0, 0);
70798 if (fd < 0) {
70799 printk(KERN_ERR "md: open failed - cannot start "
70800 "array %s\n", name);
70801 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
70802 * array without it
70803 */
70804 sys_close(fd);
70805 - fd = sys_open(name, 0, 0);
70806 + fd = sys_open((char __force_user *)name, 0, 0);
70807 sys_ioctl(fd, BLKRRPART, 0);
70808 }
70809 sys_close(fd);
70810 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
70811
70812 wait_for_device_probe();
70813
70814 - fd = sys_open("/dev/md0", 0, 0);
70815 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
70816 if (fd >= 0) {
70817 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
70818 sys_close(fd);
70819 diff --git a/init/initramfs.c b/init/initramfs.c
70820 index 1fd59b8..a01b079 100644
70821 --- a/init/initramfs.c
70822 +++ b/init/initramfs.c
70823 @@ -74,7 +74,7 @@ static void __init free_hash(void)
70824 }
70825 }
70826
70827 -static long __init do_utime(char __user *filename, time_t mtime)
70828 +static long __init do_utime(__force char __user *filename, time_t mtime)
70829 {
70830 struct timespec t[2];
70831
70832 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
70833 struct dir_entry *de, *tmp;
70834 list_for_each_entry_safe(de, tmp, &dir_list, list) {
70835 list_del(&de->list);
70836 - do_utime(de->name, de->mtime);
70837 + do_utime((char __force_user *)de->name, de->mtime);
70838 kfree(de->name);
70839 kfree(de);
70840 }
70841 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
70842 if (nlink >= 2) {
70843 char *old = find_link(major, minor, ino, mode, collected);
70844 if (old)
70845 - return (sys_link(old, collected) < 0) ? -1 : 1;
70846 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
70847 }
70848 return 0;
70849 }
70850 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
70851 {
70852 struct stat st;
70853
70854 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
70855 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
70856 if (S_ISDIR(st.st_mode))
70857 - sys_rmdir(path);
70858 + sys_rmdir((char __force_user *)path);
70859 else
70860 - sys_unlink(path);
70861 + sys_unlink((char __force_user *)path);
70862 }
70863 }
70864
70865 @@ -305,7 +305,7 @@ static int __init do_name(void)
70866 int openflags = O_WRONLY|O_CREAT;
70867 if (ml != 1)
70868 openflags |= O_TRUNC;
70869 - wfd = sys_open(collected, openflags, mode);
70870 + wfd = sys_open((char __force_user *)collected, openflags, mode);
70871
70872 if (wfd >= 0) {
70873 sys_fchown(wfd, uid, gid);
70874 @@ -317,17 +317,17 @@ static int __init do_name(void)
70875 }
70876 }
70877 } else if (S_ISDIR(mode)) {
70878 - sys_mkdir(collected, mode);
70879 - sys_chown(collected, uid, gid);
70880 - sys_chmod(collected, mode);
70881 + sys_mkdir((char __force_user *)collected, mode);
70882 + sys_chown((char __force_user *)collected, uid, gid);
70883 + sys_chmod((char __force_user *)collected, mode);
70884 dir_add(collected, mtime);
70885 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
70886 S_ISFIFO(mode) || S_ISSOCK(mode)) {
70887 if (maybe_link() == 0) {
70888 - sys_mknod(collected, mode, rdev);
70889 - sys_chown(collected, uid, gid);
70890 - sys_chmod(collected, mode);
70891 - do_utime(collected, mtime);
70892 + sys_mknod((char __force_user *)collected, mode, rdev);
70893 + sys_chown((char __force_user *)collected, uid, gid);
70894 + sys_chmod((char __force_user *)collected, mode);
70895 + do_utime((char __force_user *)collected, mtime);
70896 }
70897 }
70898 return 0;
70899 @@ -336,15 +336,15 @@ static int __init do_name(void)
70900 static int __init do_copy(void)
70901 {
70902 if (count >= body_len) {
70903 - sys_write(wfd, victim, body_len);
70904 + sys_write(wfd, (char __force_user *)victim, body_len);
70905 sys_close(wfd);
70906 - do_utime(vcollected, mtime);
70907 + do_utime((char __force_user *)vcollected, mtime);
70908 kfree(vcollected);
70909 eat(body_len);
70910 state = SkipIt;
70911 return 0;
70912 } else {
70913 - sys_write(wfd, victim, count);
70914 + sys_write(wfd, (char __force_user *)victim, count);
70915 body_len -= count;
70916 eat(count);
70917 return 1;
70918 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
70919 {
70920 collected[N_ALIGN(name_len) + body_len] = '\0';
70921 clean_path(collected, 0);
70922 - sys_symlink(collected + N_ALIGN(name_len), collected);
70923 - sys_lchown(collected, uid, gid);
70924 - do_utime(collected, mtime);
70925 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
70926 + sys_lchown((char __force_user *)collected, uid, gid);
70927 + do_utime((char __force_user *)collected, mtime);
70928 state = SkipIt;
70929 next_state = Reset;
70930 return 0;
70931 diff --git a/init/main.c b/init/main.c
70932 index 1eb4bd5..da8c6f5 100644
70933 --- a/init/main.c
70934 +++ b/init/main.c
70935 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
70936 #ifdef CONFIG_TC
70937 extern void tc_init(void);
70938 #endif
70939 +extern void grsecurity_init(void);
70940
70941 enum system_states system_state __read_mostly;
70942 EXPORT_SYMBOL(system_state);
70943 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
70944
70945 __setup("reset_devices", set_reset_devices);
70946
70947 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
70948 +extern char pax_enter_kernel_user[];
70949 +extern char pax_exit_kernel_user[];
70950 +extern pgdval_t clone_pgd_mask;
70951 +#endif
70952 +
70953 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
70954 +static int __init setup_pax_nouderef(char *str)
70955 +{
70956 +#ifdef CONFIG_X86_32
70957 + unsigned int cpu;
70958 + struct desc_struct *gdt;
70959 +
70960 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
70961 + gdt = get_cpu_gdt_table(cpu);
70962 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
70963 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
70964 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
70965 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
70966 + }
70967 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
70968 +#else
70969 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
70970 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
70971 + clone_pgd_mask = ~(pgdval_t)0UL;
70972 +#endif
70973 +
70974 + return 0;
70975 +}
70976 +early_param("pax_nouderef", setup_pax_nouderef);
70977 +#endif
70978 +
70979 +#ifdef CONFIG_PAX_SOFTMODE
70980 +int pax_softmode;
70981 +
70982 +static int __init setup_pax_softmode(char *str)
70983 +{
70984 + get_option(&str, &pax_softmode);
70985 + return 1;
70986 +}
70987 +__setup("pax_softmode=", setup_pax_softmode);
70988 +#endif
70989 +
70990 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
70991 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
70992 static const char *panic_later, *panic_param;
70993 @@ -705,52 +749,53 @@ int initcall_debug;
70994 core_param(initcall_debug, initcall_debug, bool, 0644);
70995
70996 static char msgbuf[64];
70997 -static struct boot_trace_call call;
70998 -static struct boot_trace_ret ret;
70999 +static struct boot_trace_call trace_call;
71000 +static struct boot_trace_ret trace_ret;
71001
71002 int do_one_initcall(initcall_t fn)
71003 {
71004 int count = preempt_count();
71005 ktime_t calltime, delta, rettime;
71006 + const char *msg1 = "", *msg2 = "";
71007
71008 if (initcall_debug) {
71009 - call.caller = task_pid_nr(current);
71010 - printk("calling %pF @ %i\n", fn, call.caller);
71011 + trace_call.caller = task_pid_nr(current);
71012 + printk("calling %pF @ %i\n", fn, trace_call.caller);
71013 calltime = ktime_get();
71014 - trace_boot_call(&call, fn);
71015 + trace_boot_call(&trace_call, fn);
71016 enable_boot_trace();
71017 }
71018
71019 - ret.result = fn();
71020 + trace_ret.result = fn();
71021
71022 if (initcall_debug) {
71023 disable_boot_trace();
71024 rettime = ktime_get();
71025 delta = ktime_sub(rettime, calltime);
71026 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71027 - trace_boot_ret(&ret, fn);
71028 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71029 + trace_boot_ret(&trace_ret, fn);
71030 printk("initcall %pF returned %d after %Ld usecs\n", fn,
71031 - ret.result, ret.duration);
71032 + trace_ret.result, trace_ret.duration);
71033 }
71034
71035 msgbuf[0] = 0;
71036
71037 - if (ret.result && ret.result != -ENODEV && initcall_debug)
71038 - sprintf(msgbuf, "error code %d ", ret.result);
71039 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
71040 + sprintf(msgbuf, "error code %d ", trace_ret.result);
71041
71042 if (preempt_count() != count) {
71043 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
71044 + msg1 = " preemption imbalance";
71045 preempt_count() = count;
71046 }
71047 if (irqs_disabled()) {
71048 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
71049 + msg2 = " disabled interrupts";
71050 local_irq_enable();
71051 }
71052 - if (msgbuf[0]) {
71053 - printk("initcall %pF returned with %s\n", fn, msgbuf);
71054 + if (msgbuf[0] || *msg1 || *msg2) {
71055 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
71056 }
71057
71058 - return ret.result;
71059 + return trace_ret.result;
71060 }
71061
71062
71063 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
71064 if (!ramdisk_execute_command)
71065 ramdisk_execute_command = "/init";
71066
71067 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
71068 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
71069 ramdisk_execute_command = NULL;
71070 prepare_namespace();
71071 }
71072
71073 + grsecurity_init();
71074 +
71075 /*
71076 * Ok, we have completed the initial bootup, and
71077 * we're essentially up and running. Get rid of the
71078 diff --git a/init/noinitramfs.c b/init/noinitramfs.c
71079 index f4c1a3a..96c19bd 100644
71080 --- a/init/noinitramfs.c
71081 +++ b/init/noinitramfs.c
71082 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
71083 {
71084 int err;
71085
71086 - err = sys_mkdir("/dev", 0755);
71087 + err = sys_mkdir((const char __user *)"/dev", 0755);
71088 if (err < 0)
71089 goto out;
71090
71091 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
71092 if (err < 0)
71093 goto out;
71094
71095 - err = sys_mkdir("/root", 0700);
71096 + err = sys_mkdir((const char __user *)"/root", 0700);
71097 if (err < 0)
71098 goto out;
71099
71100 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
71101 index d01bc14..8df81db 100644
71102 --- a/ipc/mqueue.c
71103 +++ b/ipc/mqueue.c
71104 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
71105 mq_bytes = (mq_msg_tblsz +
71106 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
71107
71108 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
71109 spin_lock(&mq_lock);
71110 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
71111 u->mq_bytes + mq_bytes >
71112 diff --git a/ipc/msg.c b/ipc/msg.c
71113 index 779f762..4af9e36 100644
71114 --- a/ipc/msg.c
71115 +++ b/ipc/msg.c
71116 @@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
71117 return security_msg_queue_associate(msq, msgflg);
71118 }
71119
71120 +static struct ipc_ops msg_ops = {
71121 + .getnew = newque,
71122 + .associate = msg_security,
71123 + .more_checks = NULL
71124 +};
71125 +
71126 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
71127 {
71128 struct ipc_namespace *ns;
71129 - struct ipc_ops msg_ops;
71130 struct ipc_params msg_params;
71131
71132 ns = current->nsproxy->ipc_ns;
71133
71134 - msg_ops.getnew = newque;
71135 - msg_ops.associate = msg_security;
71136 - msg_ops.more_checks = NULL;
71137 -
71138 msg_params.key = key;
71139 msg_params.flg = msgflg;
71140
71141 diff --git a/ipc/sem.c b/ipc/sem.c
71142 index b781007..f738b04 100644
71143 --- a/ipc/sem.c
71144 +++ b/ipc/sem.c
71145 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
71146 return 0;
71147 }
71148
71149 +static struct ipc_ops sem_ops = {
71150 + .getnew = newary,
71151 + .associate = sem_security,
71152 + .more_checks = sem_more_checks
71153 +};
71154 +
71155 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71156 {
71157 struct ipc_namespace *ns;
71158 - struct ipc_ops sem_ops;
71159 struct ipc_params sem_params;
71160
71161 ns = current->nsproxy->ipc_ns;
71162 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71163 if (nsems < 0 || nsems > ns->sc_semmsl)
71164 return -EINVAL;
71165
71166 - sem_ops.getnew = newary;
71167 - sem_ops.associate = sem_security;
71168 - sem_ops.more_checks = sem_more_checks;
71169 -
71170 sem_params.key = key;
71171 sem_params.flg = semflg;
71172 sem_params.u.nsems = nsems;
71173 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
71174 ushort* sem_io = fast_sem_io;
71175 int nsems;
71176
71177 + pax_track_stack();
71178 +
71179 sma = sem_lock_check(ns, semid);
71180 if (IS_ERR(sma))
71181 return PTR_ERR(sma);
71182 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
71183 unsigned long jiffies_left = 0;
71184 struct ipc_namespace *ns;
71185
71186 + pax_track_stack();
71187 +
71188 ns = current->nsproxy->ipc_ns;
71189
71190 if (nsops < 1 || semid < 0)
71191 diff --git a/ipc/shm.c b/ipc/shm.c
71192 index d30732c..e4992cd 100644
71193 --- a/ipc/shm.c
71194 +++ b/ipc/shm.c
71195 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71196 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71197 #endif
71198
71199 +#ifdef CONFIG_GRKERNSEC
71200 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71201 + const time_t shm_createtime, const uid_t cuid,
71202 + const int shmid);
71203 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71204 + const time_t shm_createtime);
71205 +#endif
71206 +
71207 void shm_init_ns(struct ipc_namespace *ns)
71208 {
71209 ns->shm_ctlmax = SHMMAX;
71210 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
71211 shp->shm_lprid = 0;
71212 shp->shm_atim = shp->shm_dtim = 0;
71213 shp->shm_ctim = get_seconds();
71214 +#ifdef CONFIG_GRKERNSEC
71215 + {
71216 + struct timespec timeval;
71217 + do_posix_clock_monotonic_gettime(&timeval);
71218 +
71219 + shp->shm_createtime = timeval.tv_sec;
71220 + }
71221 +#endif
71222 shp->shm_segsz = size;
71223 shp->shm_nattch = 0;
71224 shp->shm_file = file;
71225 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
71226 return 0;
71227 }
71228
71229 +static struct ipc_ops shm_ops = {
71230 + .getnew = newseg,
71231 + .associate = shm_security,
71232 + .more_checks = shm_more_checks
71233 +};
71234 +
71235 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
71236 {
71237 struct ipc_namespace *ns;
71238 - struct ipc_ops shm_ops;
71239 struct ipc_params shm_params;
71240
71241 ns = current->nsproxy->ipc_ns;
71242
71243 - shm_ops.getnew = newseg;
71244 - shm_ops.associate = shm_security;
71245 - shm_ops.more_checks = shm_more_checks;
71246 -
71247 shm_params.key = key;
71248 shm_params.flg = shmflg;
71249 shm_params.u.size = size;
71250 @@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71251 f_mode = FMODE_READ | FMODE_WRITE;
71252 }
71253 if (shmflg & SHM_EXEC) {
71254 +
71255 +#ifdef CONFIG_PAX_MPROTECT
71256 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
71257 + goto out;
71258 +#endif
71259 +
71260 prot |= PROT_EXEC;
71261 acc_mode |= S_IXUGO;
71262 }
71263 @@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71264 if (err)
71265 goto out_unlock;
71266
71267 +#ifdef CONFIG_GRKERNSEC
71268 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
71269 + shp->shm_perm.cuid, shmid) ||
71270 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
71271 + err = -EACCES;
71272 + goto out_unlock;
71273 + }
71274 +#endif
71275 +
71276 path.dentry = dget(shp->shm_file->f_path.dentry);
71277 path.mnt = shp->shm_file->f_path.mnt;
71278 shp->shm_nattch++;
71279 +#ifdef CONFIG_GRKERNSEC
71280 + shp->shm_lapid = current->pid;
71281 +#endif
71282 size = i_size_read(path.dentry->d_inode);
71283 shm_unlock(shp);
71284
71285 diff --git a/kernel/acct.c b/kernel/acct.c
71286 index a6605ca..ca91111 100644
71287 --- a/kernel/acct.c
71288 +++ b/kernel/acct.c
71289 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
71290 */
71291 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
71292 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
71293 - file->f_op->write(file, (char *)&ac,
71294 + file->f_op->write(file, (char __force_user *)&ac,
71295 sizeof(acct_t), &file->f_pos);
71296 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
71297 set_fs(fs);
71298 diff --git a/kernel/audit.c b/kernel/audit.c
71299 index 5feed23..48415fd 100644
71300 --- a/kernel/audit.c
71301 +++ b/kernel/audit.c
71302 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
71303 3) suppressed due to audit_rate_limit
71304 4) suppressed due to audit_backlog_limit
71305 */
71306 -static atomic_t audit_lost = ATOMIC_INIT(0);
71307 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
71308
71309 /* The netlink socket. */
71310 static struct sock *audit_sock;
71311 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
71312 unsigned long now;
71313 int print;
71314
71315 - atomic_inc(&audit_lost);
71316 + atomic_inc_unchecked(&audit_lost);
71317
71318 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
71319
71320 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
71321 printk(KERN_WARNING
71322 "audit: audit_lost=%d audit_rate_limit=%d "
71323 "audit_backlog_limit=%d\n",
71324 - atomic_read(&audit_lost),
71325 + atomic_read_unchecked(&audit_lost),
71326 audit_rate_limit,
71327 audit_backlog_limit);
71328 audit_panic(message);
71329 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71330 status_set.pid = audit_pid;
71331 status_set.rate_limit = audit_rate_limit;
71332 status_set.backlog_limit = audit_backlog_limit;
71333 - status_set.lost = atomic_read(&audit_lost);
71334 + status_set.lost = atomic_read_unchecked(&audit_lost);
71335 status_set.backlog = skb_queue_len(&audit_skb_queue);
71336 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
71337 &status_set, sizeof(status_set));
71338 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71339 spin_unlock_irq(&tsk->sighand->siglock);
71340 }
71341 read_unlock(&tasklist_lock);
71342 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
71343 - &s, sizeof(s));
71344 +
71345 + if (!err)
71346 + audit_send_reply(NETLINK_CB(skb).pid, seq,
71347 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
71348 break;
71349 }
71350 case AUDIT_TTY_SET: {
71351 @@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
71352 avail = audit_expand(ab,
71353 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
71354 if (!avail)
71355 - goto out;
71356 + goto out_va_end;
71357 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
71358 }
71359 - va_end(args2);
71360 if (len > 0)
71361 skb_put(skb, len);
71362 +out_va_end:
71363 + va_end(args2);
71364 out:
71365 return;
71366 }
71367 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71368 index 267e484..ac41bc3 100644
71369 --- a/kernel/auditsc.c
71370 +++ b/kernel/auditsc.c
71371 @@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71372 struct audit_buffer **ab,
71373 struct audit_aux_data_execve *axi)
71374 {
71375 - int i;
71376 - size_t len, len_sent = 0;
71377 + int i, len;
71378 + size_t len_sent = 0;
71379 const char __user *p;
71380 char *buf;
71381
71382 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71383 }
71384
71385 /* global counter which is incremented every time something logs in */
71386 -static atomic_t session_id = ATOMIC_INIT(0);
71387 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71388
71389 /**
71390 * audit_set_loginuid - set a task's audit_context loginuid
71391 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
71392 */
71393 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
71394 {
71395 - unsigned int sessionid = atomic_inc_return(&session_id);
71396 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
71397 struct audit_context *context = task->audit_context;
71398
71399 if (context && context->in_syscall) {
71400 diff --git a/kernel/capability.c b/kernel/capability.c
71401 index 8a944f5..db5001e 100644
71402 --- a/kernel/capability.c
71403 +++ b/kernel/capability.c
71404 @@ -305,10 +305,26 @@ int capable(int cap)
71405 BUG();
71406 }
71407
71408 - if (security_capable(cap) == 0) {
71409 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
71410 current->flags |= PF_SUPERPRIV;
71411 return 1;
71412 }
71413 return 0;
71414 }
71415 +
71416 +int capable_nolog(int cap)
71417 +{
71418 + if (unlikely(!cap_valid(cap))) {
71419 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
71420 + BUG();
71421 + }
71422 +
71423 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
71424 + current->flags |= PF_SUPERPRIV;
71425 + return 1;
71426 + }
71427 + return 0;
71428 +}
71429 +
71430 EXPORT_SYMBOL(capable);
71431 +EXPORT_SYMBOL(capable_nolog);
71432 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
71433 index 1fbcc74..7000012 100644
71434 --- a/kernel/cgroup.c
71435 +++ b/kernel/cgroup.c
71436 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
71437 struct hlist_head *hhead;
71438 struct cg_cgroup_link *link;
71439
71440 + pax_track_stack();
71441 +
71442 /* First see if we already have a cgroup group that matches
71443 * the desired set */
71444 read_lock(&css_set_lock);
71445 diff --git a/kernel/compat.c b/kernel/compat.c
71446 index 8bc5578..186e44a 100644
71447 --- a/kernel/compat.c
71448 +++ b/kernel/compat.c
71449 @@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
71450 mm_segment_t oldfs;
71451 long ret;
71452
71453 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
71454 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
71455 oldfs = get_fs();
71456 set_fs(KERNEL_DS);
71457 ret = hrtimer_nanosleep_restart(restart);
71458 @@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
71459 oldfs = get_fs();
71460 set_fs(KERNEL_DS);
71461 ret = hrtimer_nanosleep(&tu,
71462 - rmtp ? (struct timespec __user *)&rmt : NULL,
71463 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
71464 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
71465 set_fs(oldfs);
71466
71467 @@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
71468 mm_segment_t old_fs = get_fs();
71469
71470 set_fs(KERNEL_DS);
71471 - ret = sys_sigpending((old_sigset_t __user *) &s);
71472 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
71473 set_fs(old_fs);
71474 if (ret == 0)
71475 ret = put_user(s, set);
71476 @@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
71477 old_fs = get_fs();
71478 set_fs(KERNEL_DS);
71479 ret = sys_sigprocmask(how,
71480 - set ? (old_sigset_t __user *) &s : NULL,
71481 - oset ? (old_sigset_t __user *) &s : NULL);
71482 + set ? (old_sigset_t __force_user *) &s : NULL,
71483 + oset ? (old_sigset_t __force_user *) &s : NULL);
71484 set_fs(old_fs);
71485 if (ret == 0)
71486 if (oset)
71487 @@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
71488 mm_segment_t old_fs = get_fs();
71489
71490 set_fs(KERNEL_DS);
71491 - ret = sys_old_getrlimit(resource, &r);
71492 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
71493 set_fs(old_fs);
71494
71495 if (!ret) {
71496 @@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
71497 mm_segment_t old_fs = get_fs();
71498
71499 set_fs(KERNEL_DS);
71500 - ret = sys_getrusage(who, (struct rusage __user *) &r);
71501 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
71502 set_fs(old_fs);
71503
71504 if (ret)
71505 @@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
71506 set_fs (KERNEL_DS);
71507 ret = sys_wait4(pid,
71508 (stat_addr ?
71509 - (unsigned int __user *) &status : NULL),
71510 - options, (struct rusage __user *) &r);
71511 + (unsigned int __force_user *) &status : NULL),
71512 + options, (struct rusage __force_user *) &r);
71513 set_fs (old_fs);
71514
71515 if (ret > 0) {
71516 @@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
71517 memset(&info, 0, sizeof(info));
71518
71519 set_fs(KERNEL_DS);
71520 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
71521 - uru ? (struct rusage __user *)&ru : NULL);
71522 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
71523 + uru ? (struct rusage __force_user *)&ru : NULL);
71524 set_fs(old_fs);
71525
71526 if ((ret < 0) || (info.si_signo == 0))
71527 @@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
71528 oldfs = get_fs();
71529 set_fs(KERNEL_DS);
71530 err = sys_timer_settime(timer_id, flags,
71531 - (struct itimerspec __user *) &newts,
71532 - (struct itimerspec __user *) &oldts);
71533 + (struct itimerspec __force_user *) &newts,
71534 + (struct itimerspec __force_user *) &oldts);
71535 set_fs(oldfs);
71536 if (!err && old && put_compat_itimerspec(old, &oldts))
71537 return -EFAULT;
71538 @@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
71539 oldfs = get_fs();
71540 set_fs(KERNEL_DS);
71541 err = sys_timer_gettime(timer_id,
71542 - (struct itimerspec __user *) &ts);
71543 + (struct itimerspec __force_user *) &ts);
71544 set_fs(oldfs);
71545 if (!err && put_compat_itimerspec(setting, &ts))
71546 return -EFAULT;
71547 @@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
71548 oldfs = get_fs();
71549 set_fs(KERNEL_DS);
71550 err = sys_clock_settime(which_clock,
71551 - (struct timespec __user *) &ts);
71552 + (struct timespec __force_user *) &ts);
71553 set_fs(oldfs);
71554 return err;
71555 }
71556 @@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
71557 oldfs = get_fs();
71558 set_fs(KERNEL_DS);
71559 err = sys_clock_gettime(which_clock,
71560 - (struct timespec __user *) &ts);
71561 + (struct timespec __force_user *) &ts);
71562 set_fs(oldfs);
71563 if (!err && put_compat_timespec(&ts, tp))
71564 return -EFAULT;
71565 @@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
71566 oldfs = get_fs();
71567 set_fs(KERNEL_DS);
71568 err = sys_clock_getres(which_clock,
71569 - (struct timespec __user *) &ts);
71570 + (struct timespec __force_user *) &ts);
71571 set_fs(oldfs);
71572 if (!err && tp && put_compat_timespec(&ts, tp))
71573 return -EFAULT;
71574 @@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
71575 long err;
71576 mm_segment_t oldfs;
71577 struct timespec tu;
71578 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
71579 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
71580
71581 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
71582 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
71583 oldfs = get_fs();
71584 set_fs(KERNEL_DS);
71585 err = clock_nanosleep_restart(restart);
71586 @@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
71587 oldfs = get_fs();
71588 set_fs(KERNEL_DS);
71589 err = sys_clock_nanosleep(which_clock, flags,
71590 - (struct timespec __user *) &in,
71591 - (struct timespec __user *) &out);
71592 + (struct timespec __force_user *) &in,
71593 + (struct timespec __force_user *) &out);
71594 set_fs(oldfs);
71595
71596 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
71597 diff --git a/kernel/configs.c b/kernel/configs.c
71598 index abaee68..047facd 100644
71599 --- a/kernel/configs.c
71600 +++ b/kernel/configs.c
71601 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
71602 struct proc_dir_entry *entry;
71603
71604 /* create the current config file */
71605 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
71606 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
71607 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
71608 + &ikconfig_file_ops);
71609 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71610 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
71611 + &ikconfig_file_ops);
71612 +#endif
71613 +#else
71614 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
71615 &ikconfig_file_ops);
71616 +#endif
71617 +
71618 if (!entry)
71619 return -ENOMEM;
71620
71621 diff --git a/kernel/cpu.c b/kernel/cpu.c
71622 index 3f2f04f..4e53ded 100644
71623 --- a/kernel/cpu.c
71624 +++ b/kernel/cpu.c
71625 @@ -20,7 +20,7 @@
71626 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
71627 static DEFINE_MUTEX(cpu_add_remove_lock);
71628
71629 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
71630 +static RAW_NOTIFIER_HEAD(cpu_chain);
71631
71632 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
71633 * Should always be manipulated under cpu_add_remove_lock
71634 diff --git a/kernel/cred.c b/kernel/cred.c
71635 index 0b5b5fc..f7fe51a 100644
71636 --- a/kernel/cred.c
71637 +++ b/kernel/cred.c
71638 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
71639 */
71640 void __put_cred(struct cred *cred)
71641 {
71642 + pax_track_stack();
71643 +
71644 kdebug("__put_cred(%p{%d,%d})", cred,
71645 atomic_read(&cred->usage),
71646 read_cred_subscribers(cred));
71647 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
71648 {
71649 struct cred *cred;
71650
71651 + pax_track_stack();
71652 +
71653 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
71654 atomic_read(&tsk->cred->usage),
71655 read_cred_subscribers(tsk->cred));
71656 @@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
71657 validate_creds(cred);
71658 put_cred(cred);
71659 }
71660 +
71661 +#ifdef CONFIG_GRKERNSEC_SETXID
71662 + cred = (struct cred *) tsk->delayed_cred;
71663 + if (cred) {
71664 + tsk->delayed_cred = NULL;
71665 + validate_creds(cred);
71666 + put_cred(cred);
71667 + }
71668 +#endif
71669 }
71670
71671 /**
71672 @@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
71673 {
71674 const struct cred *cred;
71675
71676 + pax_track_stack();
71677 +
71678 rcu_read_lock();
71679
71680 do {
71681 @@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
71682 {
71683 struct cred *new;
71684
71685 + pax_track_stack();
71686 +
71687 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
71688 if (!new)
71689 return NULL;
71690 @@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
71691 const struct cred *old;
71692 struct cred *new;
71693
71694 + pax_track_stack();
71695 +
71696 validate_process_creds();
71697
71698 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71699 @@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
71700 struct thread_group_cred *tgcred = NULL;
71701 struct cred *new;
71702
71703 + pax_track_stack();
71704 +
71705 #ifdef CONFIG_KEYS
71706 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
71707 if (!tgcred)
71708 @@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
71709 struct cred *new;
71710 int ret;
71711
71712 + pax_track_stack();
71713 +
71714 mutex_init(&p->cred_guard_mutex);
71715
71716 if (
71717 @@ -523,11 +546,13 @@ error_put:
71718 * Always returns 0 thus allowing this function to be tail-called at the end
71719 * of, say, sys_setgid().
71720 */
71721 -int commit_creds(struct cred *new)
71722 +static int __commit_creds(struct cred *new)
71723 {
71724 struct task_struct *task = current;
71725 const struct cred *old = task->real_cred;
71726
71727 + pax_track_stack();
71728 +
71729 kdebug("commit_creds(%p{%d,%d})", new,
71730 atomic_read(&new->usage),
71731 read_cred_subscribers(new));
71732 @@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
71733
71734 get_cred(new); /* we will require a ref for the subj creds too */
71735
71736 + gr_set_role_label(task, new->uid, new->gid);
71737 +
71738 /* dumpability changes */
71739 if (old->euid != new->euid ||
71740 old->egid != new->egid ||
71741 @@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
71742 key_fsgid_changed(task);
71743
71744 /* do it
71745 - * - What if a process setreuid()'s and this brings the
71746 - * new uid over his NPROC rlimit? We can check this now
71747 - * cheaply with the new uid cache, so if it matters
71748 - * we should be checking for it. -DaveM
71749 + * RLIMIT_NPROC limits on user->processes have already been checked
71750 + * in set_user().
71751 */
71752 alter_cred_subscribers(new, 2);
71753 if (new->user != old->user)
71754 @@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
71755 put_cred(old);
71756 return 0;
71757 }
71758 +
71759 +#ifdef CONFIG_GRKERNSEC_SETXID
71760 +extern int set_user(struct cred *new);
71761 +
71762 +void gr_delayed_cred_worker(void)
71763 +{
71764 + const struct cred *new = current->delayed_cred;
71765 + struct cred *ncred;
71766 +
71767 + current->delayed_cred = NULL;
71768 +
71769 + if (current_uid() && new != NULL) {
71770 + // from doing get_cred on it when queueing this
71771 + put_cred(new);
71772 + return;
71773 + } else if (new == NULL)
71774 + return;
71775 +
71776 + ncred = prepare_creds();
71777 + if (!ncred)
71778 + goto die;
71779 + // uids
71780 + ncred->uid = new->uid;
71781 + ncred->euid = new->euid;
71782 + ncred->suid = new->suid;
71783 + ncred->fsuid = new->fsuid;
71784 + // gids
71785 + ncred->gid = new->gid;
71786 + ncred->egid = new->egid;
71787 + ncred->sgid = new->sgid;
71788 + ncred->fsgid = new->fsgid;
71789 + // groups
71790 + if (set_groups(ncred, new->group_info) < 0) {
71791 + abort_creds(ncred);
71792 + goto die;
71793 + }
71794 + // caps
71795 + ncred->securebits = new->securebits;
71796 + ncred->cap_inheritable = new->cap_inheritable;
71797 + ncred->cap_permitted = new->cap_permitted;
71798 + ncred->cap_effective = new->cap_effective;
71799 + ncred->cap_bset = new->cap_bset;
71800 +
71801 + if (set_user(ncred)) {
71802 + abort_creds(ncred);
71803 + goto die;
71804 + }
71805 +
71806 + // from doing get_cred on it when queueing this
71807 + put_cred(new);
71808 +
71809 + __commit_creds(ncred);
71810 + return;
71811 +die:
71812 + // from doing get_cred on it when queueing this
71813 + put_cred(new);
71814 + do_group_exit(SIGKILL);
71815 +}
71816 +#endif
71817 +
71818 +int commit_creds(struct cred *new)
71819 +{
71820 +#ifdef CONFIG_GRKERNSEC_SETXID
71821 + struct task_struct *t;
71822 +
71823 + /* we won't get called with tasklist_lock held for writing
71824 + and interrupts disabled as the cred struct in that case is
71825 + init_cred
71826 + */
71827 + if (grsec_enable_setxid && !current_is_single_threaded() &&
71828 + !current_uid() && new->uid) {
71829 + rcu_read_lock();
71830 + read_lock(&tasklist_lock);
71831 + for (t = next_thread(current); t != current;
71832 + t = next_thread(t)) {
71833 + if (t->delayed_cred == NULL) {
71834 + t->delayed_cred = get_cred(new);
71835 + set_tsk_need_resched(t);
71836 + }
71837 + }
71838 + read_unlock(&tasklist_lock);
71839 + rcu_read_unlock();
71840 + }
71841 +#endif
71842 + return __commit_creds(new);
71843 +}
71844 +
71845 EXPORT_SYMBOL(commit_creds);
71846
71847 +
71848 /**
71849 * abort_creds - Discard a set of credentials and unlock the current task
71850 * @new: The credentials that were going to be applied
71851 @@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
71852 */
71853 void abort_creds(struct cred *new)
71854 {
71855 + pax_track_stack();
71856 +
71857 kdebug("abort_creds(%p{%d,%d})", new,
71858 atomic_read(&new->usage),
71859 read_cred_subscribers(new));
71860 @@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
71861 {
71862 const struct cred *old = current->cred;
71863
71864 + pax_track_stack();
71865 +
71866 kdebug("override_creds(%p{%d,%d})", new,
71867 atomic_read(&new->usage),
71868 read_cred_subscribers(new));
71869 @@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
71870 {
71871 const struct cred *override = current->cred;
71872
71873 + pax_track_stack();
71874 +
71875 kdebug("revert_creds(%p{%d,%d})", old,
71876 atomic_read(&old->usage),
71877 read_cred_subscribers(old));
71878 @@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
71879 const struct cred *old;
71880 struct cred *new;
71881
71882 + pax_track_stack();
71883 +
71884 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71885 if (!new)
71886 return NULL;
71887 @@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
71888 */
71889 int set_security_override(struct cred *new, u32 secid)
71890 {
71891 + pax_track_stack();
71892 +
71893 return security_kernel_act_as(new, secid);
71894 }
71895 EXPORT_SYMBOL(set_security_override);
71896 @@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
71897 u32 secid;
71898 int ret;
71899
71900 + pax_track_stack();
71901 +
71902 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
71903 if (ret < 0)
71904 return ret;
71905 diff --git a/kernel/exit.c b/kernel/exit.c
71906 index 0f8fae3..9344a56 100644
71907 --- a/kernel/exit.c
71908 +++ b/kernel/exit.c
71909 @@ -55,6 +55,10 @@
71910 #include <asm/pgtable.h>
71911 #include <asm/mmu_context.h>
71912
71913 +#ifdef CONFIG_GRKERNSEC
71914 +extern rwlock_t grsec_exec_file_lock;
71915 +#endif
71916 +
71917 static void exit_mm(struct task_struct * tsk);
71918
71919 static void __unhash_process(struct task_struct *p)
71920 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
71921 struct task_struct *leader;
71922 int zap_leader;
71923 repeat:
71924 +#ifdef CONFIG_NET
71925 + gr_del_task_from_ip_table(p);
71926 +#endif
71927 +
71928 tracehook_prepare_release_task(p);
71929 /* don't need to get the RCU readlock here - the process is dead and
71930 * can't be modifying its own credentials */
71931 @@ -397,7 +405,7 @@ int allow_signal(int sig)
71932 * know it'll be handled, so that they don't get converted to
71933 * SIGKILL or just silently dropped.
71934 */
71935 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
71936 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
71937 recalc_sigpending();
71938 spin_unlock_irq(&current->sighand->siglock);
71939 return 0;
71940 @@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
71941 vsnprintf(current->comm, sizeof(current->comm), name, args);
71942 va_end(args);
71943
71944 +#ifdef CONFIG_GRKERNSEC
71945 + write_lock(&grsec_exec_file_lock);
71946 + if (current->exec_file) {
71947 + fput(current->exec_file);
71948 + current->exec_file = NULL;
71949 + }
71950 + write_unlock(&grsec_exec_file_lock);
71951 +#endif
71952 +
71953 + gr_set_kernel_label(current);
71954 +
71955 /*
71956 * If we were started as result of loading a module, close all of the
71957 * user space pages. We don't need them, and if we didn't close them
71958 @@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
71959 struct task_struct *tsk = current;
71960 int group_dead;
71961
71962 - profile_task_exit(tsk);
71963 -
71964 - WARN_ON(atomic_read(&tsk->fs_excl));
71965 -
71966 + /*
71967 + * Check this first since set_fs() below depends on
71968 + * current_thread_info(), which we better not access when we're in
71969 + * interrupt context. Other than that, we want to do the set_fs()
71970 + * as early as possible.
71971 + */
71972 if (unlikely(in_interrupt()))
71973 panic("Aiee, killing interrupt handler!");
71974 - if (unlikely(!tsk->pid))
71975 - panic("Attempted to kill the idle task!");
71976
71977 /*
71978 - * If do_exit is called because this processes oopsed, it's possible
71979 + * If do_exit is called because this processes Oops'ed, it's possible
71980 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
71981 * continuing. Amongst other possible reasons, this is to prevent
71982 * mm_release()->clear_child_tid() from writing to a user-controlled
71983 @@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
71984 */
71985 set_fs(USER_DS);
71986
71987 + profile_task_exit(tsk);
71988 +
71989 + WARN_ON(atomic_read(&tsk->fs_excl));
71990 +
71991 + if (unlikely(!tsk->pid))
71992 + panic("Attempted to kill the idle task!");
71993 +
71994 tracehook_report_exit(&code);
71995
71996 validate_creds_for_do_exit(tsk);
71997 @@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
71998 tsk->exit_code = code;
71999 taskstats_exit(tsk, group_dead);
72000
72001 + gr_acl_handle_psacct(tsk, code);
72002 + gr_acl_handle_exit();
72003 +
72004 exit_mm(tsk);
72005
72006 if (group_dead)
72007 @@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
72008
72009 if (unlikely(wo->wo_flags & WNOWAIT)) {
72010 int exit_code = p->exit_code;
72011 - int why, status;
72012 + int why;
72013
72014 get_task_struct(p);
72015 read_unlock(&tasklist_lock);
72016 diff --git a/kernel/fork.c b/kernel/fork.c
72017 index 4bde56f..29a9bab 100644
72018 --- a/kernel/fork.c
72019 +++ b/kernel/fork.c
72020 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
72021 *stackend = STACK_END_MAGIC; /* for overflow detection */
72022
72023 #ifdef CONFIG_CC_STACKPROTECTOR
72024 - tsk->stack_canary = get_random_int();
72025 + tsk->stack_canary = pax_get_random_long();
72026 #endif
72027
72028 /* One for us, one for whoever does the "release_task()" (usually parent) */
72029 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72030 mm->locked_vm = 0;
72031 mm->mmap = NULL;
72032 mm->mmap_cache = NULL;
72033 - mm->free_area_cache = oldmm->mmap_base;
72034 - mm->cached_hole_size = ~0UL;
72035 + mm->free_area_cache = oldmm->free_area_cache;
72036 + mm->cached_hole_size = oldmm->cached_hole_size;
72037 mm->map_count = 0;
72038 cpumask_clear(mm_cpumask(mm));
72039 mm->mm_rb = RB_ROOT;
72040 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72041 tmp->vm_flags &= ~VM_LOCKED;
72042 tmp->vm_mm = mm;
72043 tmp->vm_next = tmp->vm_prev = NULL;
72044 + tmp->vm_mirror = NULL;
72045 anon_vma_link(tmp);
72046 file = tmp->vm_file;
72047 if (file) {
72048 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72049 if (retval)
72050 goto out;
72051 }
72052 +
72053 +#ifdef CONFIG_PAX_SEGMEXEC
72054 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
72055 + struct vm_area_struct *mpnt_m;
72056 +
72057 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
72058 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
72059 +
72060 + if (!mpnt->vm_mirror)
72061 + continue;
72062 +
72063 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
72064 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
72065 + mpnt->vm_mirror = mpnt_m;
72066 + } else {
72067 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
72068 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
72069 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
72070 + mpnt->vm_mirror->vm_mirror = mpnt;
72071 + }
72072 + }
72073 + BUG_ON(mpnt_m);
72074 + }
72075 +#endif
72076 +
72077 /* a new mm has just been created */
72078 arch_dup_mmap(oldmm, mm);
72079 retval = 0;
72080 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
72081 write_unlock(&fs->lock);
72082 return -EAGAIN;
72083 }
72084 - fs->users++;
72085 + atomic_inc(&fs->users);
72086 write_unlock(&fs->lock);
72087 return 0;
72088 }
72089 tsk->fs = copy_fs_struct(fs);
72090 if (!tsk->fs)
72091 return -ENOMEM;
72092 + gr_set_chroot_entries(tsk, &tsk->fs->root);
72093 return 0;
72094 }
72095
72096 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72097 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
72098 #endif
72099 retval = -EAGAIN;
72100 +
72101 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
72102 +
72103 if (atomic_read(&p->real_cred->user->processes) >=
72104 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
72105 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
72106 - p->real_cred->user != INIT_USER)
72107 + if (p->real_cred->user != INIT_USER &&
72108 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
72109 goto bad_fork_free;
72110 }
72111 + current->flags &= ~PF_NPROC_EXCEEDED;
72112
72113 retval = copy_creds(p, clone_flags);
72114 if (retval < 0)
72115 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72116 goto bad_fork_free_pid;
72117 }
72118
72119 + gr_copy_label(p);
72120 +
72121 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
72122 /*
72123 * Clear TID on mm_release()?
72124 @@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
72125 bad_fork_free:
72126 free_task(p);
72127 fork_out:
72128 + gr_log_forkfail(retval);
72129 +
72130 return ERR_PTR(retval);
72131 }
72132
72133 @@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
72134 if (clone_flags & CLONE_PARENT_SETTID)
72135 put_user(nr, parent_tidptr);
72136
72137 + gr_handle_brute_check();
72138 +
72139 if (clone_flags & CLONE_VFORK) {
72140 p->vfork_done = &vfork;
72141 init_completion(&vfork);
72142 @@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
72143 return 0;
72144
72145 /* don't need lock here; in the worst case we'll do useless copy */
72146 - if (fs->users == 1)
72147 + if (atomic_read(&fs->users) == 1)
72148 return 0;
72149
72150 *new_fsp = copy_fs_struct(fs);
72151 @@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
72152 fs = current->fs;
72153 write_lock(&fs->lock);
72154 current->fs = new_fs;
72155 - if (--fs->users)
72156 + gr_set_chroot_entries(current, &current->fs->root);
72157 + if (atomic_dec_return(&fs->users))
72158 new_fs = NULL;
72159 else
72160 new_fs = fs;
72161 diff --git a/kernel/futex.c b/kernel/futex.c
72162 index fb98c9f..333faec 100644
72163 --- a/kernel/futex.c
72164 +++ b/kernel/futex.c
72165 @@ -54,6 +54,7 @@
72166 #include <linux/mount.h>
72167 #include <linux/pagemap.h>
72168 #include <linux/syscalls.h>
72169 +#include <linux/ptrace.h>
72170 #include <linux/signal.h>
72171 #include <linux/module.h>
72172 #include <linux/magic.h>
72173 @@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
72174 struct page *page;
72175 int err, ro = 0;
72176
72177 +#ifdef CONFIG_PAX_SEGMEXEC
72178 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
72179 + return -EFAULT;
72180 +#endif
72181 +
72182 /*
72183 * The futex address must be "naturally" aligned.
72184 */
72185 @@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
72186 struct futex_q q;
72187 int ret;
72188
72189 + pax_track_stack();
72190 +
72191 if (!bitset)
72192 return -EINVAL;
72193
72194 @@ -1871,7 +1879,7 @@ retry:
72195
72196 restart = &current_thread_info()->restart_block;
72197 restart->fn = futex_wait_restart;
72198 - restart->futex.uaddr = (u32 *)uaddr;
72199 + restart->futex.uaddr = uaddr;
72200 restart->futex.val = val;
72201 restart->futex.time = abs_time->tv64;
72202 restart->futex.bitset = bitset;
72203 @@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
72204 struct futex_q q;
72205 int res, ret;
72206
72207 + pax_track_stack();
72208 +
72209 if (!bitset)
72210 return -EINVAL;
72211
72212 @@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
72213 if (!p)
72214 goto err_unlock;
72215 ret = -EPERM;
72216 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72217 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
72218 + goto err_unlock;
72219 +#endif
72220 pcred = __task_cred(p);
72221 if (cred->euid != pcred->euid &&
72222 cred->euid != pcred->uid &&
72223 @@ -2489,7 +2503,7 @@ retry:
72224 */
72225 static inline int fetch_robust_entry(struct robust_list __user **entry,
72226 struct robust_list __user * __user *head,
72227 - int *pi)
72228 + unsigned int *pi)
72229 {
72230 unsigned long uentry;
72231
72232 @@ -2670,6 +2684,7 @@ static int __init futex_init(void)
72233 {
72234 u32 curval;
72235 int i;
72236 + mm_segment_t oldfs;
72237
72238 /*
72239 * This will fail and we want it. Some arch implementations do
72240 @@ -2681,7 +2696,10 @@ static int __init futex_init(void)
72241 * implementation, the non functional ones will return
72242 * -ENOSYS.
72243 */
72244 + oldfs = get_fs();
72245 + set_fs(USER_DS);
72246 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
72247 + set_fs(oldfs);
72248 if (curval == -EFAULT)
72249 futex_cmpxchg_enabled = 1;
72250
72251 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
72252 index 2357165..eb25501 100644
72253 --- a/kernel/futex_compat.c
72254 +++ b/kernel/futex_compat.c
72255 @@ -10,6 +10,7 @@
72256 #include <linux/compat.h>
72257 #include <linux/nsproxy.h>
72258 #include <linux/futex.h>
72259 +#include <linux/ptrace.h>
72260
72261 #include <asm/uaccess.h>
72262
72263 @@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72264 {
72265 struct compat_robust_list_head __user *head;
72266 unsigned long ret;
72267 - const struct cred *cred = current_cred(), *pcred;
72268 + const struct cred *cred = current_cred();
72269 + const struct cred *pcred;
72270
72271 if (!futex_cmpxchg_enabled)
72272 return -ENOSYS;
72273 @@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72274 if (!p)
72275 goto err_unlock;
72276 ret = -EPERM;
72277 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72278 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
72279 + goto err_unlock;
72280 +#endif
72281 pcred = __task_cred(p);
72282 if (cred->euid != pcred->euid &&
72283 cred->euid != pcred->uid &&
72284 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
72285 index 9b22d03..6295b62 100644
72286 --- a/kernel/gcov/base.c
72287 +++ b/kernel/gcov/base.c
72288 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
72289 }
72290
72291 #ifdef CONFIG_MODULES
72292 -static inline int within(void *addr, void *start, unsigned long size)
72293 -{
72294 - return ((addr >= start) && (addr < start + size));
72295 -}
72296 -
72297 /* Update list and generate events when modules are unloaded. */
72298 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72299 void *data)
72300 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72301 prev = NULL;
72302 /* Remove entries located in module from linked list. */
72303 for (info = gcov_info_head; info; info = info->next) {
72304 - if (within(info, mod->module_core, mod->core_size)) {
72305 + if (within_module_core_rw((unsigned long)info, mod)) {
72306 if (prev)
72307 prev->next = info->next;
72308 else
72309 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
72310 index a6e9d00..a0da4f9 100644
72311 --- a/kernel/hrtimer.c
72312 +++ b/kernel/hrtimer.c
72313 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
72314 local_irq_restore(flags);
72315 }
72316
72317 -static void run_hrtimer_softirq(struct softirq_action *h)
72318 +static void run_hrtimer_softirq(void)
72319 {
72320 hrtimer_peek_ahead_timers();
72321 }
72322 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
72323 index 8b6b8b6..6bc87df 100644
72324 --- a/kernel/kallsyms.c
72325 +++ b/kernel/kallsyms.c
72326 @@ -11,6 +11,9 @@
72327 * Changed the compression method from stem compression to "table lookup"
72328 * compression (see scripts/kallsyms.c for a more complete description)
72329 */
72330 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72331 +#define __INCLUDED_BY_HIDESYM 1
72332 +#endif
72333 #include <linux/kallsyms.h>
72334 #include <linux/module.h>
72335 #include <linux/init.h>
72336 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
72337
72338 static inline int is_kernel_inittext(unsigned long addr)
72339 {
72340 + if (system_state != SYSTEM_BOOTING)
72341 + return 0;
72342 +
72343 if (addr >= (unsigned long)_sinittext
72344 && addr <= (unsigned long)_einittext)
72345 return 1;
72346 return 0;
72347 }
72348
72349 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72350 +#ifdef CONFIG_MODULES
72351 +static inline int is_module_text(unsigned long addr)
72352 +{
72353 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
72354 + return 1;
72355 +
72356 + addr = ktla_ktva(addr);
72357 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
72358 +}
72359 +#else
72360 +static inline int is_module_text(unsigned long addr)
72361 +{
72362 + return 0;
72363 +}
72364 +#endif
72365 +#endif
72366 +
72367 static inline int is_kernel_text(unsigned long addr)
72368 {
72369 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
72370 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
72371
72372 static inline int is_kernel(unsigned long addr)
72373 {
72374 +
72375 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72376 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
72377 + return 1;
72378 +
72379 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
72380 +#else
72381 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
72382 +#endif
72383 +
72384 return 1;
72385 return in_gate_area_no_task(addr);
72386 }
72387
72388 static int is_ksym_addr(unsigned long addr)
72389 {
72390 +
72391 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72392 + if (is_module_text(addr))
72393 + return 0;
72394 +#endif
72395 +
72396 if (all_var)
72397 return is_kernel(addr);
72398
72399 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
72400
72401 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
72402 {
72403 - iter->name[0] = '\0';
72404 iter->nameoff = get_symbol_offset(new_pos);
72405 iter->pos = new_pos;
72406 }
72407 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
72408 {
72409 struct kallsym_iter *iter = m->private;
72410
72411 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72412 + if (current_uid())
72413 + return 0;
72414 +#endif
72415 +
72416 /* Some debugging symbols have no name. Ignore them. */
72417 if (!iter->name[0])
72418 return 0;
72419 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
72420 struct kallsym_iter *iter;
72421 int ret;
72422
72423 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
72424 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
72425 if (!iter)
72426 return -ENOMEM;
72427 reset_iter(iter, 0);
72428 diff --git a/kernel/kexec.c b/kernel/kexec.c
72429 index f336e21..9c1c20b 100644
72430 --- a/kernel/kexec.c
72431 +++ b/kernel/kexec.c
72432 @@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
72433 unsigned long flags)
72434 {
72435 struct compat_kexec_segment in;
72436 - struct kexec_segment out, __user *ksegments;
72437 + struct kexec_segment out;
72438 + struct kexec_segment __user *ksegments;
72439 unsigned long i, result;
72440
72441 /* Don't allow clients that don't understand the native
72442 diff --git a/kernel/kgdb.c b/kernel/kgdb.c
72443 index 53dae4b..9ba3743 100644
72444 --- a/kernel/kgdb.c
72445 +++ b/kernel/kgdb.c
72446 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
72447 /* Guard for recursive entry */
72448 static int exception_level;
72449
72450 -static struct kgdb_io *kgdb_io_ops;
72451 +static const struct kgdb_io *kgdb_io_ops;
72452 static DEFINE_SPINLOCK(kgdb_registration_lock);
72453
72454 /* kgdb console driver is loaded */
72455 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
72456 */
72457 static atomic_t passive_cpu_wait[NR_CPUS];
72458 static atomic_t cpu_in_kgdb[NR_CPUS];
72459 -atomic_t kgdb_setting_breakpoint;
72460 +atomic_unchecked_t kgdb_setting_breakpoint;
72461
72462 struct task_struct *kgdb_usethread;
72463 struct task_struct *kgdb_contthread;
72464 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
72465 sizeof(unsigned long)];
72466
72467 /* to keep track of the CPU which is doing the single stepping*/
72468 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72469 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72470
72471 /*
72472 * If you are debugging a problem where roundup (the collection of
72473 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
72474 return 0;
72475 if (kgdb_connected)
72476 return 1;
72477 - if (atomic_read(&kgdb_setting_breakpoint))
72478 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
72479 return 1;
72480 if (print_wait)
72481 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
72482 @@ -1426,8 +1426,8 @@ acquirelock:
72483 * instance of the exception handler wanted to come into the
72484 * debugger on a different CPU via a single step
72485 */
72486 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
72487 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
72488 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
72489 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
72490
72491 atomic_set(&kgdb_active, -1);
72492 touch_softlockup_watchdog();
72493 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
72494 *
72495 * Register it with the KGDB core.
72496 */
72497 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
72498 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
72499 {
72500 int err;
72501
72502 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
72503 *
72504 * Unregister it with the KGDB core.
72505 */
72506 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
72507 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
72508 {
72509 BUG_ON(kgdb_connected);
72510
72511 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
72512 */
72513 void kgdb_breakpoint(void)
72514 {
72515 - atomic_set(&kgdb_setting_breakpoint, 1);
72516 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
72517 wmb(); /* Sync point before breakpoint */
72518 arch_kgdb_breakpoint();
72519 wmb(); /* Sync point after breakpoint */
72520 - atomic_set(&kgdb_setting_breakpoint, 0);
72521 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
72522 }
72523 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
72524
72525 diff --git a/kernel/kmod.c b/kernel/kmod.c
72526 index d206078..e27ba6a 100644
72527 --- a/kernel/kmod.c
72528 +++ b/kernel/kmod.c
72529 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
72530 * If module auto-loading support is disabled then this function
72531 * becomes a no-operation.
72532 */
72533 -int __request_module(bool wait, const char *fmt, ...)
72534 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
72535 {
72536 - va_list args;
72537 char module_name[MODULE_NAME_LEN];
72538 unsigned int max_modprobes;
72539 int ret;
72540 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
72541 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
72542 static char *envp[] = { "HOME=/",
72543 "TERM=linux",
72544 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
72545 @@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
72546 if (ret)
72547 return ret;
72548
72549 - va_start(args, fmt);
72550 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
72551 - va_end(args);
72552 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
72553 if (ret >= MODULE_NAME_LEN)
72554 return -ENAMETOOLONG;
72555
72556 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72557 + if (!current_uid()) {
72558 + /* hack to workaround consolekit/udisks stupidity */
72559 + read_lock(&tasklist_lock);
72560 + if (!strcmp(current->comm, "mount") &&
72561 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
72562 + read_unlock(&tasklist_lock);
72563 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
72564 + return -EPERM;
72565 + }
72566 + read_unlock(&tasklist_lock);
72567 + }
72568 +#endif
72569 +
72570 /* If modprobe needs a service that is in a module, we get a recursive
72571 * loop. Limit the number of running kmod threads to max_threads/2 or
72572 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
72573 @@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
72574 atomic_dec(&kmod_concurrent);
72575 return ret;
72576 }
72577 +
72578 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
72579 +{
72580 + va_list args;
72581 + int ret;
72582 +
72583 + va_start(args, fmt);
72584 + ret = ____request_module(wait, module_param, fmt, args);
72585 + va_end(args);
72586 +
72587 + return ret;
72588 +}
72589 +
72590 +int __request_module(bool wait, const char *fmt, ...)
72591 +{
72592 + va_list args;
72593 + int ret;
72594 +
72595 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72596 + if (current_uid()) {
72597 + char module_param[MODULE_NAME_LEN];
72598 +
72599 + memset(module_param, 0, sizeof(module_param));
72600 +
72601 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
72602 +
72603 + va_start(args, fmt);
72604 + ret = ____request_module(wait, module_param, fmt, args);
72605 + va_end(args);
72606 +
72607 + return ret;
72608 + }
72609 +#endif
72610 +
72611 + va_start(args, fmt);
72612 + ret = ____request_module(wait, NULL, fmt, args);
72613 + va_end(args);
72614 +
72615 + return ret;
72616 +}
72617 +
72618 +
72619 EXPORT_SYMBOL(__request_module);
72620 #endif /* CONFIG_MODULES */
72621
72622 @@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
72623 *
72624 * Thus the __user pointer cast is valid here.
72625 */
72626 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
72627 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
72628
72629 /*
72630 * If ret is 0, either ____call_usermodehelper failed and the
72631 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
72632 index 176d825..77fa8ea 100644
72633 --- a/kernel/kprobes.c
72634 +++ b/kernel/kprobes.c
72635 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
72636 * kernel image and loaded module images reside. This is required
72637 * so x86_64 can correctly handle the %rip-relative fixups.
72638 */
72639 - kip->insns = module_alloc(PAGE_SIZE);
72640 + kip->insns = module_alloc_exec(PAGE_SIZE);
72641 if (!kip->insns) {
72642 kfree(kip);
72643 return NULL;
72644 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
72645 */
72646 if (!list_is_singular(&kprobe_insn_pages)) {
72647 list_del(&kip->list);
72648 - module_free(NULL, kip->insns);
72649 + module_free_exec(NULL, kip->insns);
72650 kfree(kip);
72651 }
72652 return 1;
72653 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
72654 {
72655 int i, err = 0;
72656 unsigned long offset = 0, size = 0;
72657 - char *modname, namebuf[128];
72658 + char *modname, namebuf[KSYM_NAME_LEN];
72659 const char *symbol_name;
72660 void *addr;
72661 struct kprobe_blackpoint *kb;
72662 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
72663 const char *sym = NULL;
72664 unsigned int i = *(loff_t *) v;
72665 unsigned long offset = 0;
72666 - char *modname, namebuf[128];
72667 + char *modname, namebuf[KSYM_NAME_LEN];
72668
72669 head = &kprobe_table[i];
72670 preempt_disable();
72671 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
72672 index d86fe89..d12fc66 100644
72673 --- a/kernel/lockdep.c
72674 +++ b/kernel/lockdep.c
72675 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
72676 /*
72677 * Various lockdep statistics:
72678 */
72679 -atomic_t chain_lookup_hits;
72680 -atomic_t chain_lookup_misses;
72681 -atomic_t hardirqs_on_events;
72682 -atomic_t hardirqs_off_events;
72683 -atomic_t redundant_hardirqs_on;
72684 -atomic_t redundant_hardirqs_off;
72685 -atomic_t softirqs_on_events;
72686 -atomic_t softirqs_off_events;
72687 -atomic_t redundant_softirqs_on;
72688 -atomic_t redundant_softirqs_off;
72689 -atomic_t nr_unused_locks;
72690 -atomic_t nr_cyclic_checks;
72691 -atomic_t nr_find_usage_forwards_checks;
72692 -atomic_t nr_find_usage_backwards_checks;
72693 +atomic_unchecked_t chain_lookup_hits;
72694 +atomic_unchecked_t chain_lookup_misses;
72695 +atomic_unchecked_t hardirqs_on_events;
72696 +atomic_unchecked_t hardirqs_off_events;
72697 +atomic_unchecked_t redundant_hardirqs_on;
72698 +atomic_unchecked_t redundant_hardirqs_off;
72699 +atomic_unchecked_t softirqs_on_events;
72700 +atomic_unchecked_t softirqs_off_events;
72701 +atomic_unchecked_t redundant_softirqs_on;
72702 +atomic_unchecked_t redundant_softirqs_off;
72703 +atomic_unchecked_t nr_unused_locks;
72704 +atomic_unchecked_t nr_cyclic_checks;
72705 +atomic_unchecked_t nr_find_usage_forwards_checks;
72706 +atomic_unchecked_t nr_find_usage_backwards_checks;
72707 #endif
72708
72709 /*
72710 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
72711 int i;
72712 #endif
72713
72714 +#ifdef CONFIG_PAX_KERNEXEC
72715 + start = ktla_ktva(start);
72716 +#endif
72717 +
72718 /*
72719 * static variable?
72720 */
72721 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
72722 */
72723 for_each_possible_cpu(i) {
72724 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
72725 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
72726 - + per_cpu_offset(i);
72727 + end = start + PERCPU_ENOUGH_ROOM;
72728
72729 if ((addr >= start) && (addr < end))
72730 return 1;
72731 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
72732 if (!static_obj(lock->key)) {
72733 debug_locks_off();
72734 printk("INFO: trying to register non-static key.\n");
72735 + printk("lock:%pS key:%pS.\n", lock, lock->key);
72736 printk("the code is fine but needs lockdep annotation.\n");
72737 printk("turning off the locking correctness validator.\n");
72738 dump_stack();
72739 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
72740 if (!class)
72741 return 0;
72742 }
72743 - debug_atomic_inc((atomic_t *)&class->ops);
72744 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
72745 if (very_verbose(class)) {
72746 printk("\nacquire class [%p] %s", class->key, class->name);
72747 if (class->name_version > 1)
72748 diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
72749 index a2ee95a..092f0f2 100644
72750 --- a/kernel/lockdep_internals.h
72751 +++ b/kernel/lockdep_internals.h
72752 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
72753 /*
72754 * Various lockdep statistics:
72755 */
72756 -extern atomic_t chain_lookup_hits;
72757 -extern atomic_t chain_lookup_misses;
72758 -extern atomic_t hardirqs_on_events;
72759 -extern atomic_t hardirqs_off_events;
72760 -extern atomic_t redundant_hardirqs_on;
72761 -extern atomic_t redundant_hardirqs_off;
72762 -extern atomic_t softirqs_on_events;
72763 -extern atomic_t softirqs_off_events;
72764 -extern atomic_t redundant_softirqs_on;
72765 -extern atomic_t redundant_softirqs_off;
72766 -extern atomic_t nr_unused_locks;
72767 -extern atomic_t nr_cyclic_checks;
72768 -extern atomic_t nr_cyclic_check_recursions;
72769 -extern atomic_t nr_find_usage_forwards_checks;
72770 -extern atomic_t nr_find_usage_forwards_recursions;
72771 -extern atomic_t nr_find_usage_backwards_checks;
72772 -extern atomic_t nr_find_usage_backwards_recursions;
72773 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
72774 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
72775 -# define debug_atomic_read(ptr) atomic_read(ptr)
72776 +extern atomic_unchecked_t chain_lookup_hits;
72777 +extern atomic_unchecked_t chain_lookup_misses;
72778 +extern atomic_unchecked_t hardirqs_on_events;
72779 +extern atomic_unchecked_t hardirqs_off_events;
72780 +extern atomic_unchecked_t redundant_hardirqs_on;
72781 +extern atomic_unchecked_t redundant_hardirqs_off;
72782 +extern atomic_unchecked_t softirqs_on_events;
72783 +extern atomic_unchecked_t softirqs_off_events;
72784 +extern atomic_unchecked_t redundant_softirqs_on;
72785 +extern atomic_unchecked_t redundant_softirqs_off;
72786 +extern atomic_unchecked_t nr_unused_locks;
72787 +extern atomic_unchecked_t nr_cyclic_checks;
72788 +extern atomic_unchecked_t nr_cyclic_check_recursions;
72789 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
72790 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
72791 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
72792 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
72793 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
72794 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
72795 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
72796 #else
72797 # define debug_atomic_inc(ptr) do { } while (0)
72798 # define debug_atomic_dec(ptr) do { } while (0)
72799 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
72800 index d4aba4f..02a353f 100644
72801 --- a/kernel/lockdep_proc.c
72802 +++ b/kernel/lockdep_proc.c
72803 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
72804
72805 static void print_name(struct seq_file *m, struct lock_class *class)
72806 {
72807 - char str[128];
72808 + char str[KSYM_NAME_LEN];
72809 const char *name = class->name;
72810
72811 if (!name) {
72812 diff --git a/kernel/module.c b/kernel/module.c
72813 index 4b270e6..2226274 100644
72814 --- a/kernel/module.c
72815 +++ b/kernel/module.c
72816 @@ -55,6 +55,7 @@
72817 #include <linux/async.h>
72818 #include <linux/percpu.h>
72819 #include <linux/kmemleak.h>
72820 +#include <linux/grsecurity.h>
72821
72822 #define CREATE_TRACE_POINTS
72823 #include <trace/events/module.h>
72824 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
72825 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
72826
72827 /* Bounds of module allocation, for speeding __module_address */
72828 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
72829 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
72830 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
72831
72832 int register_module_notifier(struct notifier_block * nb)
72833 {
72834 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72835 return true;
72836
72837 list_for_each_entry_rcu(mod, &modules, list) {
72838 - struct symsearch arr[] = {
72839 + struct symsearch modarr[] = {
72840 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
72841 NOT_GPL_ONLY, false },
72842 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
72843 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72844 #endif
72845 };
72846
72847 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
72848 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
72849 return true;
72850 }
72851 return false;
72852 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
72853 void *ptr;
72854 int cpu;
72855
72856 - if (align > PAGE_SIZE) {
72857 + if (align-1 >= PAGE_SIZE) {
72858 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
72859 name, align, PAGE_SIZE);
72860 align = PAGE_SIZE;
72861 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
72862 * /sys/module/foo/sections stuff
72863 * J. Corbet <corbet@lwn.net>
72864 */
72865 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
72866 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72867
72868 static inline bool sect_empty(const Elf_Shdr *sect)
72869 {
72870 @@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
72871 destroy_params(mod->kp, mod->num_kp);
72872
72873 /* This may be NULL, but that's OK */
72874 - module_free(mod, mod->module_init);
72875 + module_free(mod, mod->module_init_rw);
72876 + module_free_exec(mod, mod->module_init_rx);
72877 kfree(mod->args);
72878 if (mod->percpu)
72879 percpu_modfree(mod->percpu);
72880 @@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
72881 percpu_modfree(mod->refptr);
72882 #endif
72883 /* Free lock-classes: */
72884 - lockdep_free_key_range(mod->module_core, mod->core_size);
72885 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
72886 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
72887
72888 /* Finally, free the core (containing the module structure) */
72889 - module_free(mod, mod->module_core);
72890 + module_free_exec(mod, mod->module_core_rx);
72891 + module_free(mod, mod->module_core_rw);
72892
72893 #ifdef CONFIG_MPU
72894 update_protections(current->mm);
72895 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72896 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
72897 int ret = 0;
72898 const struct kernel_symbol *ksym;
72899 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72900 + int is_fs_load = 0;
72901 + int register_filesystem_found = 0;
72902 + char *p;
72903 +
72904 + p = strstr(mod->args, "grsec_modharden_fs");
72905 +
72906 + if (p) {
72907 + char *endptr = p + strlen("grsec_modharden_fs");
72908 + /* copy \0 as well */
72909 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
72910 + is_fs_load = 1;
72911 + }
72912 +#endif
72913 +
72914
72915 for (i = 1; i < n; i++) {
72916 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72917 + const char *name = strtab + sym[i].st_name;
72918 +
72919 + /* it's a real shame this will never get ripped and copied
72920 + upstream! ;(
72921 + */
72922 + if (is_fs_load && !strcmp(name, "register_filesystem"))
72923 + register_filesystem_found = 1;
72924 +#endif
72925 switch (sym[i].st_shndx) {
72926 case SHN_COMMON:
72927 /* We compiled with -fno-common. These are not
72928 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72929 strtab + sym[i].st_name, mod);
72930 /* Ok if resolved. */
72931 if (ksym) {
72932 + pax_open_kernel();
72933 sym[i].st_value = ksym->value;
72934 + pax_close_kernel();
72935 break;
72936 }
72937
72938 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72939 secbase = (unsigned long)mod->percpu;
72940 else
72941 secbase = sechdrs[sym[i].st_shndx].sh_addr;
72942 + pax_open_kernel();
72943 sym[i].st_value += secbase;
72944 + pax_close_kernel();
72945 break;
72946 }
72947 }
72948
72949 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72950 + if (is_fs_load && !register_filesystem_found) {
72951 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
72952 + ret = -EPERM;
72953 + }
72954 +#endif
72955 +
72956 return ret;
72957 }
72958
72959 @@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
72960 || s->sh_entsize != ~0UL
72961 || strstarts(secstrings + s->sh_name, ".init"))
72962 continue;
72963 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
72964 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72965 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
72966 + else
72967 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
72968 DEBUGP("\t%s\n", secstrings + s->sh_name);
72969 }
72970 - if (m == 0)
72971 - mod->core_text_size = mod->core_size;
72972 }
72973
72974 DEBUGP("Init section allocation order:\n");
72975 @@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
72976 || s->sh_entsize != ~0UL
72977 || !strstarts(secstrings + s->sh_name, ".init"))
72978 continue;
72979 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
72980 - | INIT_OFFSET_MASK);
72981 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72982 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
72983 + else
72984 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
72985 + s->sh_entsize |= INIT_OFFSET_MASK;
72986 DEBUGP("\t%s\n", secstrings + s->sh_name);
72987 }
72988 - if (m == 0)
72989 - mod->init_text_size = mod->init_size;
72990 }
72991 }
72992
72993 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
72994
72995 /* As per nm */
72996 static char elf_type(const Elf_Sym *sym,
72997 - Elf_Shdr *sechdrs,
72998 - const char *secstrings,
72999 - struct module *mod)
73000 + const Elf_Shdr *sechdrs,
73001 + const char *secstrings)
73002 {
73003 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
73004 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
73005 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
73006
73007 /* Put symbol section at end of init part of module. */
73008 symsect->sh_flags |= SHF_ALLOC;
73009 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
73010 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
73011 symindex) | INIT_OFFSET_MASK;
73012 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
73013
73014 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
73015 }
73016
73017 /* Append room for core symbols at end of core part. */
73018 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
73019 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
73020 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
73021 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
73022
73023 /* Put string table section at end of init part of module. */
73024 strsect->sh_flags |= SHF_ALLOC;
73025 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
73026 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
73027 strindex) | INIT_OFFSET_MASK;
73028 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
73029
73030 /* Append room for core symbols' strings at end of core part. */
73031 - *pstroffs = mod->core_size;
73032 + *pstroffs = mod->core_size_rx;
73033 __set_bit(0, strmap);
73034 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
73035 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
73036
73037 return symoffs;
73038 }
73039 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
73040 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73041 mod->strtab = (void *)sechdrs[strindex].sh_addr;
73042
73043 + pax_open_kernel();
73044 +
73045 /* Set types up while we still have access to sections. */
73046 for (i = 0; i < mod->num_symtab; i++)
73047 mod->symtab[i].st_info
73048 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
73049 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
73050
73051 - mod->core_symtab = dst = mod->module_core + symoffs;
73052 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
73053 src = mod->symtab;
73054 *dst = *src;
73055 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
73056 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
73057 }
73058 mod->core_num_syms = ndst;
73059
73060 - mod->core_strtab = s = mod->module_core + stroffs;
73061 + mod->core_strtab = s = mod->module_core_rx + stroffs;
73062 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
73063 if (test_bit(i, strmap))
73064 *++s = mod->strtab[i];
73065 +
73066 + pax_close_kernel();
73067 }
73068 #else
73069 static inline unsigned long layout_symtab(struct module *mod,
73070 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
73071 #endif
73072 }
73073
73074 -static void *module_alloc_update_bounds(unsigned long size)
73075 +static void *module_alloc_update_bounds_rw(unsigned long size)
73076 {
73077 void *ret = module_alloc(size);
73078
73079 if (ret) {
73080 /* Update module bounds. */
73081 - if ((unsigned long)ret < module_addr_min)
73082 - module_addr_min = (unsigned long)ret;
73083 - if ((unsigned long)ret + size > module_addr_max)
73084 - module_addr_max = (unsigned long)ret + size;
73085 + if ((unsigned long)ret < module_addr_min_rw)
73086 + module_addr_min_rw = (unsigned long)ret;
73087 + if ((unsigned long)ret + size > module_addr_max_rw)
73088 + module_addr_max_rw = (unsigned long)ret + size;
73089 + }
73090 + return ret;
73091 +}
73092 +
73093 +static void *module_alloc_update_bounds_rx(unsigned long size)
73094 +{
73095 + void *ret = module_alloc_exec(size);
73096 +
73097 + if (ret) {
73098 + /* Update module bounds. */
73099 + if ((unsigned long)ret < module_addr_min_rx)
73100 + module_addr_min_rx = (unsigned long)ret;
73101 + if ((unsigned long)ret + size > module_addr_max_rx)
73102 + module_addr_max_rx = (unsigned long)ret + size;
73103 }
73104 return ret;
73105 }
73106 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73107 unsigned int i;
73108
73109 /* only scan the sections containing data */
73110 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
73111 - (unsigned long)mod->module_core,
73112 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
73113 + (unsigned long)mod->module_core_rw,
73114 sizeof(struct module), GFP_KERNEL);
73115
73116 for (i = 1; i < hdr->e_shnum; i++) {
73117 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73118 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
73119 continue;
73120
73121 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
73122 - (unsigned long)mod->module_core,
73123 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
73124 + (unsigned long)mod->module_core_rw,
73125 sechdrs[i].sh_size, GFP_KERNEL);
73126 }
73127 }
73128 @@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
73129 Elf_Ehdr *hdr;
73130 Elf_Shdr *sechdrs;
73131 char *secstrings, *args, *modmagic, *strtab = NULL;
73132 - char *staging;
73133 + char *staging, *license;
73134 unsigned int i;
73135 unsigned int symindex = 0;
73136 unsigned int strindex = 0;
73137 @@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
73138 goto free_hdr;
73139 }
73140
73141 + license = get_modinfo(sechdrs, infoindex, "license");
73142 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
73143 + if (!license || !license_is_gpl_compatible(license)) {
73144 + err -ENOEXEC;
73145 + goto free_hdr;
73146 + }
73147 +#endif
73148 +
73149 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
73150 /* This is allowed: modprobe --force will invalidate it. */
73151 if (!modmagic) {
73152 @@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
73153 secstrings, &stroffs, strmap);
73154
73155 /* Do the allocs. */
73156 - ptr = module_alloc_update_bounds(mod->core_size);
73157 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
73158 /*
73159 * The pointer to this block is stored in the module structure
73160 * which is inside the block. Just mark it as not being a
73161 @@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
73162 err = -ENOMEM;
73163 goto free_percpu;
73164 }
73165 - memset(ptr, 0, mod->core_size);
73166 - mod->module_core = ptr;
73167 + memset(ptr, 0, mod->core_size_rw);
73168 + mod->module_core_rw = ptr;
73169
73170 - ptr = module_alloc_update_bounds(mod->init_size);
73171 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
73172 /*
73173 * The pointer to this block is stored in the module structure
73174 * which is inside the block. This block doesn't need to be
73175 * scanned as it contains data and code that will be freed
73176 * after the module is initialized.
73177 */
73178 - kmemleak_ignore(ptr);
73179 - if (!ptr && mod->init_size) {
73180 + kmemleak_not_leak(ptr);
73181 + if (!ptr && mod->init_size_rw) {
73182 err = -ENOMEM;
73183 - goto free_core;
73184 + goto free_core_rw;
73185 }
73186 - memset(ptr, 0, mod->init_size);
73187 - mod->module_init = ptr;
73188 + memset(ptr, 0, mod->init_size_rw);
73189 + mod->module_init_rw = ptr;
73190 +
73191 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
73192 + kmemleak_not_leak(ptr);
73193 + if (!ptr) {
73194 + err = -ENOMEM;
73195 + goto free_init_rw;
73196 + }
73197 +
73198 + pax_open_kernel();
73199 + memset(ptr, 0, mod->core_size_rx);
73200 + pax_close_kernel();
73201 + mod->module_core_rx = ptr;
73202 +
73203 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
73204 + kmemleak_not_leak(ptr);
73205 + if (!ptr && mod->init_size_rx) {
73206 + err = -ENOMEM;
73207 + goto free_core_rx;
73208 + }
73209 +
73210 + pax_open_kernel();
73211 + memset(ptr, 0, mod->init_size_rx);
73212 + pax_close_kernel();
73213 + mod->module_init_rx = ptr;
73214
73215 /* Transfer each section which specifies SHF_ALLOC */
73216 DEBUGP("final section addresses:\n");
73217 @@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
73218 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
73219 continue;
73220
73221 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
73222 - dest = mod->module_init
73223 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73224 - else
73225 - dest = mod->module_core + sechdrs[i].sh_entsize;
73226 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
73227 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73228 + dest = mod->module_init_rw
73229 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73230 + else
73231 + dest = mod->module_init_rx
73232 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73233 + } else {
73234 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73235 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
73236 + else
73237 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
73238 + }
73239
73240 - if (sechdrs[i].sh_type != SHT_NOBITS)
73241 - memcpy(dest, (void *)sechdrs[i].sh_addr,
73242 - sechdrs[i].sh_size);
73243 + if (sechdrs[i].sh_type != SHT_NOBITS) {
73244 +
73245 +#ifdef CONFIG_PAX_KERNEXEC
73246 +#ifdef CONFIG_X86_64
73247 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
73248 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
73249 +#endif
73250 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
73251 + pax_open_kernel();
73252 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73253 + pax_close_kernel();
73254 + } else
73255 +#endif
73256 +
73257 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73258 + }
73259 /* Update sh_addr to point to copy in image. */
73260 - sechdrs[i].sh_addr = (unsigned long)dest;
73261 +
73262 +#ifdef CONFIG_PAX_KERNEXEC
73263 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
73264 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
73265 + else
73266 +#endif
73267 +
73268 + sechdrs[i].sh_addr = (unsigned long)dest;
73269 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
73270 }
73271 /* Module has been moved. */
73272 @@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
73273 mod->name);
73274 if (!mod->refptr) {
73275 err = -ENOMEM;
73276 - goto free_init;
73277 + goto free_init_rx;
73278 }
73279 #endif
73280 /* Now we've moved module, initialize linked lists, etc. */
73281 @@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
73282 goto free_unload;
73283
73284 /* Set up license info based on the info section */
73285 - set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
73286 + set_license(mod, license);
73287
73288 /*
73289 * ndiswrapper is under GPL by itself, but loads proprietary modules.
73290 @@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
73291 /* Set up MODINFO_ATTR fields */
73292 setup_modinfo(mod, sechdrs, infoindex);
73293
73294 + mod->args = args;
73295 +
73296 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
73297 + {
73298 + char *p, *p2;
73299 +
73300 + if (strstr(mod->args, "grsec_modharden_netdev")) {
73301 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
73302 + err = -EPERM;
73303 + goto cleanup;
73304 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
73305 + p += strlen("grsec_modharden_normal");
73306 + p2 = strstr(p, "_");
73307 + if (p2) {
73308 + *p2 = '\0';
73309 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
73310 + *p2 = '_';
73311 + }
73312 + err = -EPERM;
73313 + goto cleanup;
73314 + }
73315 + }
73316 +#endif
73317 +
73318 +
73319 /* Fix up syms, so that st_value is a pointer to location. */
73320 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
73321 mod);
73322 @@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
73323
73324 /* Now do relocations. */
73325 for (i = 1; i < hdr->e_shnum; i++) {
73326 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
73327 unsigned int info = sechdrs[i].sh_info;
73328 + strtab = (char *)sechdrs[strindex].sh_addr;
73329
73330 /* Not a valid relocation section? */
73331 if (info >= hdr->e_shnum)
73332 @@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
73333 * Do it before processing of module parameters, so the module
73334 * can provide parameter accessor functions of its own.
73335 */
73336 - if (mod->module_init)
73337 - flush_icache_range((unsigned long)mod->module_init,
73338 - (unsigned long)mod->module_init
73339 - + mod->init_size);
73340 - flush_icache_range((unsigned long)mod->module_core,
73341 - (unsigned long)mod->module_core + mod->core_size);
73342 + if (mod->module_init_rx)
73343 + flush_icache_range((unsigned long)mod->module_init_rx,
73344 + (unsigned long)mod->module_init_rx
73345 + + mod->init_size_rx);
73346 + flush_icache_range((unsigned long)mod->module_core_rx,
73347 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
73348
73349 set_fs(old_fs);
73350
73351 - mod->args = args;
73352 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
73353 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
73354 mod->name);
73355 @@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
73356 free_unload:
73357 module_unload_free(mod);
73358 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
73359 + free_init_rx:
73360 percpu_modfree(mod->refptr);
73361 - free_init:
73362 #endif
73363 - module_free(mod, mod->module_init);
73364 - free_core:
73365 - module_free(mod, mod->module_core);
73366 + module_free_exec(mod, mod->module_init_rx);
73367 + free_core_rx:
73368 + module_free_exec(mod, mod->module_core_rx);
73369 + free_init_rw:
73370 + module_free(mod, mod->module_init_rw);
73371 + free_core_rw:
73372 + module_free(mod, mod->module_core_rw);
73373 /* mod will be freed with core. Don't access it beyond this line! */
73374 free_percpu:
73375 if (percpu)
73376 @@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
73377 mod->symtab = mod->core_symtab;
73378 mod->strtab = mod->core_strtab;
73379 #endif
73380 - module_free(mod, mod->module_init);
73381 - mod->module_init = NULL;
73382 - mod->init_size = 0;
73383 - mod->init_text_size = 0;
73384 + module_free(mod, mod->module_init_rw);
73385 + module_free_exec(mod, mod->module_init_rx);
73386 + mod->module_init_rw = NULL;
73387 + mod->module_init_rx = NULL;
73388 + mod->init_size_rw = 0;
73389 + mod->init_size_rx = 0;
73390 mutex_unlock(&module_mutex);
73391
73392 return 0;
73393 @@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
73394 unsigned long nextval;
73395
73396 /* At worse, next value is at end of module */
73397 - if (within_module_init(addr, mod))
73398 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
73399 + if (within_module_init_rx(addr, mod))
73400 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
73401 + else if (within_module_init_rw(addr, mod))
73402 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
73403 + else if (within_module_core_rx(addr, mod))
73404 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
73405 + else if (within_module_core_rw(addr, mod))
73406 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
73407 else
73408 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
73409 + return NULL;
73410
73411 /* Scan for closest preceeding symbol, and next symbol. (ELF
73412 starts real symbols at 1). */
73413 @@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
73414 char buf[8];
73415
73416 seq_printf(m, "%s %u",
73417 - mod->name, mod->init_size + mod->core_size);
73418 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
73419 print_unload_info(m, mod);
73420
73421 /* Informative for users. */
73422 @@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
73423 mod->state == MODULE_STATE_COMING ? "Loading":
73424 "Live");
73425 /* Used by oprofile and other similar tools. */
73426 - seq_printf(m, " 0x%p", mod->module_core);
73427 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
73428
73429 /* Taints info */
73430 if (mod->taints)
73431 @@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
73432
73433 static int __init proc_modules_init(void)
73434 {
73435 +#ifndef CONFIG_GRKERNSEC_HIDESYM
73436 +#ifdef CONFIG_GRKERNSEC_PROC_USER
73437 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73438 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73439 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
73440 +#else
73441 proc_create("modules", 0, NULL, &proc_modules_operations);
73442 +#endif
73443 +#else
73444 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73445 +#endif
73446 return 0;
73447 }
73448 module_init(proc_modules_init);
73449 @@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
73450 {
73451 struct module *mod;
73452
73453 - if (addr < module_addr_min || addr > module_addr_max)
73454 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
73455 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
73456 return NULL;
73457
73458 list_for_each_entry_rcu(mod, &modules, list)
73459 - if (within_module_core(addr, mod)
73460 - || within_module_init(addr, mod))
73461 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
73462 return mod;
73463 return NULL;
73464 }
73465 @@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
73466 */
73467 struct module *__module_text_address(unsigned long addr)
73468 {
73469 - struct module *mod = __module_address(addr);
73470 + struct module *mod;
73471 +
73472 +#ifdef CONFIG_X86_32
73473 + addr = ktla_ktva(addr);
73474 +#endif
73475 +
73476 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
73477 + return NULL;
73478 +
73479 + mod = __module_address(addr);
73480 +
73481 if (mod) {
73482 /* Make sure it's within the text section. */
73483 - if (!within(addr, mod->module_init, mod->init_text_size)
73484 - && !within(addr, mod->module_core, mod->core_text_size))
73485 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
73486 mod = NULL;
73487 }
73488 return mod;
73489 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
73490 index ec815a9..fe46e99 100644
73491 --- a/kernel/mutex-debug.c
73492 +++ b/kernel/mutex-debug.c
73493 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
73494 }
73495
73496 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73497 - struct thread_info *ti)
73498 + struct task_struct *task)
73499 {
73500 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
73501
73502 /* Mark the current thread as blocked on the lock: */
73503 - ti->task->blocked_on = waiter;
73504 + task->blocked_on = waiter;
73505 }
73506
73507 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73508 - struct thread_info *ti)
73509 + struct task_struct *task)
73510 {
73511 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
73512 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
73513 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
73514 - ti->task->blocked_on = NULL;
73515 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
73516 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
73517 + task->blocked_on = NULL;
73518
73519 list_del_init(&waiter->list);
73520 waiter->task = NULL;
73521 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
73522 return;
73523
73524 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
73525 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
73526 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
73527 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
73528 mutex_clear_owner(lock);
73529 }
73530 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
73531 index 6b2d735..372d3c4 100644
73532 --- a/kernel/mutex-debug.h
73533 +++ b/kernel/mutex-debug.h
73534 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
73535 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
73536 extern void debug_mutex_add_waiter(struct mutex *lock,
73537 struct mutex_waiter *waiter,
73538 - struct thread_info *ti);
73539 + struct task_struct *task);
73540 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73541 - struct thread_info *ti);
73542 + struct task_struct *task);
73543 extern void debug_mutex_unlock(struct mutex *lock);
73544 extern void debug_mutex_init(struct mutex *lock, const char *name,
73545 struct lock_class_key *key);
73546
73547 static inline void mutex_set_owner(struct mutex *lock)
73548 {
73549 - lock->owner = current_thread_info();
73550 + lock->owner = current;
73551 }
73552
73553 static inline void mutex_clear_owner(struct mutex *lock)
73554 diff --git a/kernel/mutex.c b/kernel/mutex.c
73555 index f85644c..5ee9f77 100644
73556 --- a/kernel/mutex.c
73557 +++ b/kernel/mutex.c
73558 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73559 */
73560
73561 for (;;) {
73562 - struct thread_info *owner;
73563 + struct task_struct *owner;
73564
73565 /*
73566 * If we own the BKL, then don't spin. The owner of
73567 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73568 spin_lock_mutex(&lock->wait_lock, flags);
73569
73570 debug_mutex_lock_common(lock, &waiter);
73571 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
73572 + debug_mutex_add_waiter(lock, &waiter, task);
73573
73574 /* add waiting tasks to the end of the waitqueue (FIFO): */
73575 list_add_tail(&waiter.list, &lock->wait_list);
73576 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73577 * TASK_UNINTERRUPTIBLE case.)
73578 */
73579 if (unlikely(signal_pending_state(state, task))) {
73580 - mutex_remove_waiter(lock, &waiter,
73581 - task_thread_info(task));
73582 + mutex_remove_waiter(lock, &waiter, task);
73583 mutex_release(&lock->dep_map, 1, ip);
73584 spin_unlock_mutex(&lock->wait_lock, flags);
73585
73586 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73587 done:
73588 lock_acquired(&lock->dep_map, ip);
73589 /* got the lock - rejoice! */
73590 - mutex_remove_waiter(lock, &waiter, current_thread_info());
73591 + mutex_remove_waiter(lock, &waiter, task);
73592 mutex_set_owner(lock);
73593
73594 /* set it to 0 if there are no waiters left: */
73595 diff --git a/kernel/mutex.h b/kernel/mutex.h
73596 index 67578ca..4115fbf 100644
73597 --- a/kernel/mutex.h
73598 +++ b/kernel/mutex.h
73599 @@ -19,7 +19,7 @@
73600 #ifdef CONFIG_SMP
73601 static inline void mutex_set_owner(struct mutex *lock)
73602 {
73603 - lock->owner = current_thread_info();
73604 + lock->owner = current;
73605 }
73606
73607 static inline void mutex_clear_owner(struct mutex *lock)
73608 diff --git a/kernel/panic.c b/kernel/panic.c
73609 index 96b45d0..ff70a46 100644
73610 --- a/kernel/panic.c
73611 +++ b/kernel/panic.c
73612 @@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
73613 va_end(args);
73614 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
73615 #ifdef CONFIG_DEBUG_BUGVERBOSE
73616 - dump_stack();
73617 + /*
73618 + * Avoid nested stack-dumping if a panic occurs during oops processing
73619 + */
73620 + if (!oops_in_progress)
73621 + dump_stack();
73622 #endif
73623
73624 /*
73625 @@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
73626 const char *board;
73627
73628 printk(KERN_WARNING "------------[ cut here ]------------\n");
73629 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
73630 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
73631 board = dmi_get_system_info(DMI_PRODUCT_NAME);
73632 if (board)
73633 printk(KERN_WARNING "Hardware name: %s\n", board);
73634 @@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
73635 */
73636 void __stack_chk_fail(void)
73637 {
73638 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
73639 + dump_stack();
73640 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
73641 __builtin_return_address(0));
73642 }
73643 EXPORT_SYMBOL(__stack_chk_fail);
73644 diff --git a/kernel/params.c b/kernel/params.c
73645 index d656c27..21e452c 100644
73646 --- a/kernel/params.c
73647 +++ b/kernel/params.c
73648 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
73649 return ret;
73650 }
73651
73652 -static struct sysfs_ops module_sysfs_ops = {
73653 +static const struct sysfs_ops module_sysfs_ops = {
73654 .show = module_attr_show,
73655 .store = module_attr_store,
73656 };
73657 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
73658 return 0;
73659 }
73660
73661 -static struct kset_uevent_ops module_uevent_ops = {
73662 +static const struct kset_uevent_ops module_uevent_ops = {
73663 .filter = uevent_filter,
73664 };
73665
73666 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
73667 index 37ebc14..9c121d9 100644
73668 --- a/kernel/perf_event.c
73669 +++ b/kernel/perf_event.c
73670 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
73671 */
73672 int sysctl_perf_event_sample_rate __read_mostly = 100000;
73673
73674 -static atomic64_t perf_event_id;
73675 +static atomic64_unchecked_t perf_event_id;
73676
73677 /*
73678 * Lock for (sysadmin-configurable) event reservations:
73679 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
73680 * In order to keep per-task stats reliable we need to flip the event
73681 * values when we flip the contexts.
73682 */
73683 - value = atomic64_read(&next_event->count);
73684 - value = atomic64_xchg(&event->count, value);
73685 - atomic64_set(&next_event->count, value);
73686 + value = atomic64_read_unchecked(&next_event->count);
73687 + value = atomic64_xchg_unchecked(&event->count, value);
73688 + atomic64_set_unchecked(&next_event->count, value);
73689
73690 swap(event->total_time_enabled, next_event->total_time_enabled);
73691 swap(event->total_time_running, next_event->total_time_running);
73692 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
73693 update_event_times(event);
73694 }
73695
73696 - return atomic64_read(&event->count);
73697 + return atomic64_read_unchecked(&event->count);
73698 }
73699
73700 /*
73701 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
73702 values[n++] = 1 + leader->nr_siblings;
73703 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73704 values[n++] = leader->total_time_enabled +
73705 - atomic64_read(&leader->child_total_time_enabled);
73706 + atomic64_read_unchecked(&leader->child_total_time_enabled);
73707 }
73708 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73709 values[n++] = leader->total_time_running +
73710 - atomic64_read(&leader->child_total_time_running);
73711 + atomic64_read_unchecked(&leader->child_total_time_running);
73712 }
73713
73714 size = n * sizeof(u64);
73715 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
73716 values[n++] = perf_event_read_value(event);
73717 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73718 values[n++] = event->total_time_enabled +
73719 - atomic64_read(&event->child_total_time_enabled);
73720 + atomic64_read_unchecked(&event->child_total_time_enabled);
73721 }
73722 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73723 values[n++] = event->total_time_running +
73724 - atomic64_read(&event->child_total_time_running);
73725 + atomic64_read_unchecked(&event->child_total_time_running);
73726 }
73727 if (read_format & PERF_FORMAT_ID)
73728 values[n++] = primary_event_id(event);
73729 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
73730 static void perf_event_reset(struct perf_event *event)
73731 {
73732 (void)perf_event_read(event);
73733 - atomic64_set(&event->count, 0);
73734 + atomic64_set_unchecked(&event->count, 0);
73735 perf_event_update_userpage(event);
73736 }
73737
73738 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
73739 ++userpg->lock;
73740 barrier();
73741 userpg->index = perf_event_index(event);
73742 - userpg->offset = atomic64_read(&event->count);
73743 + userpg->offset = atomic64_read_unchecked(&event->count);
73744 if (event->state == PERF_EVENT_STATE_ACTIVE)
73745 - userpg->offset -= atomic64_read(&event->hw.prev_count);
73746 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
73747
73748 userpg->time_enabled = event->total_time_enabled +
73749 - atomic64_read(&event->child_total_time_enabled);
73750 + atomic64_read_unchecked(&event->child_total_time_enabled);
73751
73752 userpg->time_running = event->total_time_running +
73753 - atomic64_read(&event->child_total_time_running);
73754 + atomic64_read_unchecked(&event->child_total_time_running);
73755
73756 barrier();
73757 ++userpg->lock;
73758 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
73759 u64 values[4];
73760 int n = 0;
73761
73762 - values[n++] = atomic64_read(&event->count);
73763 + values[n++] = atomic64_read_unchecked(&event->count);
73764 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73765 values[n++] = event->total_time_enabled +
73766 - atomic64_read(&event->child_total_time_enabled);
73767 + atomic64_read_unchecked(&event->child_total_time_enabled);
73768 }
73769 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73770 values[n++] = event->total_time_running +
73771 - atomic64_read(&event->child_total_time_running);
73772 + atomic64_read_unchecked(&event->child_total_time_running);
73773 }
73774 if (read_format & PERF_FORMAT_ID)
73775 values[n++] = primary_event_id(event);
73776 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
73777 if (leader != event)
73778 leader->pmu->read(leader);
73779
73780 - values[n++] = atomic64_read(&leader->count);
73781 + values[n++] = atomic64_read_unchecked(&leader->count);
73782 if (read_format & PERF_FORMAT_ID)
73783 values[n++] = primary_event_id(leader);
73784
73785 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
73786 if (sub != event)
73787 sub->pmu->read(sub);
73788
73789 - values[n++] = atomic64_read(&sub->count);
73790 + values[n++] = atomic64_read_unchecked(&sub->count);
73791 if (read_format & PERF_FORMAT_ID)
73792 values[n++] = primary_event_id(sub);
73793
73794 @@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
73795 * need to add enough zero bytes after the string to handle
73796 * the 64bit alignment we do later.
73797 */
73798 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
73799 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
73800 if (!buf) {
73801 name = strncpy(tmp, "//enomem", sizeof(tmp));
73802 goto got_name;
73803 }
73804 - name = d_path(&file->f_path, buf, PATH_MAX);
73805 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
73806 if (IS_ERR(name)) {
73807 name = strncpy(tmp, "//toolong", sizeof(tmp));
73808 goto got_name;
73809 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
73810 {
73811 struct hw_perf_event *hwc = &event->hw;
73812
73813 - atomic64_add(nr, &event->count);
73814 + atomic64_add_unchecked(nr, &event->count);
73815
73816 if (!hwc->sample_period)
73817 return;
73818 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
73819 u64 now;
73820
73821 now = cpu_clock(cpu);
73822 - prev = atomic64_read(&event->hw.prev_count);
73823 - atomic64_set(&event->hw.prev_count, now);
73824 - atomic64_add(now - prev, &event->count);
73825 + prev = atomic64_read_unchecked(&event->hw.prev_count);
73826 + atomic64_set_unchecked(&event->hw.prev_count, now);
73827 + atomic64_add_unchecked(now - prev, &event->count);
73828 }
73829
73830 static int cpu_clock_perf_event_enable(struct perf_event *event)
73831 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
73832 struct hw_perf_event *hwc = &event->hw;
73833 int cpu = raw_smp_processor_id();
73834
73835 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
73836 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
73837 perf_swevent_start_hrtimer(event);
73838
73839 return 0;
73840 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
73841 u64 prev;
73842 s64 delta;
73843
73844 - prev = atomic64_xchg(&event->hw.prev_count, now);
73845 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
73846 delta = now - prev;
73847 - atomic64_add(delta, &event->count);
73848 + atomic64_add_unchecked(delta, &event->count);
73849 }
73850
73851 static int task_clock_perf_event_enable(struct perf_event *event)
73852 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
73853
73854 now = event->ctx->time;
73855
73856 - atomic64_set(&hwc->prev_count, now);
73857 + atomic64_set_unchecked(&hwc->prev_count, now);
73858
73859 perf_swevent_start_hrtimer(event);
73860
73861 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
73862 event->parent = parent_event;
73863
73864 event->ns = get_pid_ns(current->nsproxy->pid_ns);
73865 - event->id = atomic64_inc_return(&perf_event_id);
73866 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
73867
73868 event->state = PERF_EVENT_STATE_INACTIVE;
73869
73870 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
73871 if (child_event->attr.inherit_stat)
73872 perf_event_read_event(child_event, child);
73873
73874 - child_val = atomic64_read(&child_event->count);
73875 + child_val = atomic64_read_unchecked(&child_event->count);
73876
73877 /*
73878 * Add back the child's count to the parent's count:
73879 */
73880 - atomic64_add(child_val, &parent_event->count);
73881 - atomic64_add(child_event->total_time_enabled,
73882 + atomic64_add_unchecked(child_val, &parent_event->count);
73883 + atomic64_add_unchecked(child_event->total_time_enabled,
73884 &parent_event->child_total_time_enabled);
73885 - atomic64_add(child_event->total_time_running,
73886 + atomic64_add_unchecked(child_event->total_time_running,
73887 &parent_event->child_total_time_running);
73888
73889 /*
73890 diff --git a/kernel/pid.c b/kernel/pid.c
73891 index fce7198..4f23a7e 100644
73892 --- a/kernel/pid.c
73893 +++ b/kernel/pid.c
73894 @@ -33,6 +33,7 @@
73895 #include <linux/rculist.h>
73896 #include <linux/bootmem.h>
73897 #include <linux/hash.h>
73898 +#include <linux/security.h>
73899 #include <linux/pid_namespace.h>
73900 #include <linux/init_task.h>
73901 #include <linux/syscalls.h>
73902 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
73903
73904 int pid_max = PID_MAX_DEFAULT;
73905
73906 -#define RESERVED_PIDS 300
73907 +#define RESERVED_PIDS 500
73908
73909 int pid_max_min = RESERVED_PIDS + 1;
73910 int pid_max_max = PID_MAX_LIMIT;
73911 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
73912 */
73913 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
73914 {
73915 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73916 + struct task_struct *task;
73917 +
73918 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73919 +
73920 + if (gr_pid_is_chrooted(task))
73921 + return NULL;
73922 +
73923 + return task;
73924 }
73925
73926 struct task_struct *find_task_by_vpid(pid_t vnr)
73927 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
73928 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
73929 }
73930
73931 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
73932 +{
73933 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
73934 +}
73935 +
73936 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
73937 {
73938 struct pid *pid;
73939 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
73940 index 5c9dc22..d271117 100644
73941 --- a/kernel/posix-cpu-timers.c
73942 +++ b/kernel/posix-cpu-timers.c
73943 @@ -6,6 +6,7 @@
73944 #include <linux/posix-timers.h>
73945 #include <linux/errno.h>
73946 #include <linux/math64.h>
73947 +#include <linux/security.h>
73948 #include <asm/uaccess.h>
73949 #include <linux/kernel_stat.h>
73950 #include <trace/events/timer.h>
73951 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
73952
73953 static __init int init_posix_cpu_timers(void)
73954 {
73955 - struct k_clock process = {
73956 + static struct k_clock process = {
73957 .clock_getres = process_cpu_clock_getres,
73958 .clock_get = process_cpu_clock_get,
73959 .clock_set = do_posix_clock_nosettime,
73960 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
73961 .nsleep = process_cpu_nsleep,
73962 .nsleep_restart = process_cpu_nsleep_restart,
73963 };
73964 - struct k_clock thread = {
73965 + static struct k_clock thread = {
73966 .clock_getres = thread_cpu_clock_getres,
73967 .clock_get = thread_cpu_clock_get,
73968 .clock_set = do_posix_clock_nosettime,
73969 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
73970 index 5e76d22..cf1baeb 100644
73971 --- a/kernel/posix-timers.c
73972 +++ b/kernel/posix-timers.c
73973 @@ -42,6 +42,7 @@
73974 #include <linux/compiler.h>
73975 #include <linux/idr.h>
73976 #include <linux/posix-timers.h>
73977 +#include <linux/grsecurity.h>
73978 #include <linux/syscalls.h>
73979 #include <linux/wait.h>
73980 #include <linux/workqueue.h>
73981 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
73982 * which we beg off on and pass to do_sys_settimeofday().
73983 */
73984
73985 -static struct k_clock posix_clocks[MAX_CLOCKS];
73986 +static struct k_clock *posix_clocks[MAX_CLOCKS];
73987
73988 /*
73989 * These ones are defined below.
73990 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
73991 */
73992 #define CLOCK_DISPATCH(clock, call, arglist) \
73993 ((clock) < 0 ? posix_cpu_##call arglist : \
73994 - (posix_clocks[clock].call != NULL \
73995 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
73996 + (posix_clocks[clock]->call != NULL \
73997 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
73998
73999 /*
74000 * Default clock hook functions when the struct k_clock passed
74001 @@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
74002 struct timespec *tp)
74003 {
74004 tp->tv_sec = 0;
74005 - tp->tv_nsec = posix_clocks[which_clock].res;
74006 + tp->tv_nsec = posix_clocks[which_clock]->res;
74007 return 0;
74008 }
74009
74010 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
74011 return 0;
74012 if ((unsigned) which_clock >= MAX_CLOCKS)
74013 return 1;
74014 - if (posix_clocks[which_clock].clock_getres != NULL)
74015 + if (posix_clocks[which_clock] == NULL)
74016 return 0;
74017 - if (posix_clocks[which_clock].res != 0)
74018 + if (posix_clocks[which_clock]->clock_getres != NULL)
74019 + return 0;
74020 + if (posix_clocks[which_clock]->res != 0)
74021 return 0;
74022 return 1;
74023 }
74024 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
74025 */
74026 static __init int init_posix_timers(void)
74027 {
74028 - struct k_clock clock_realtime = {
74029 + static struct k_clock clock_realtime = {
74030 .clock_getres = hrtimer_get_res,
74031 };
74032 - struct k_clock clock_monotonic = {
74033 + static struct k_clock clock_monotonic = {
74034 .clock_getres = hrtimer_get_res,
74035 .clock_get = posix_ktime_get_ts,
74036 .clock_set = do_posix_clock_nosettime,
74037 };
74038 - struct k_clock clock_monotonic_raw = {
74039 + static struct k_clock clock_monotonic_raw = {
74040 .clock_getres = hrtimer_get_res,
74041 .clock_get = posix_get_monotonic_raw,
74042 .clock_set = do_posix_clock_nosettime,
74043 .timer_create = no_timer_create,
74044 .nsleep = no_nsleep,
74045 };
74046 - struct k_clock clock_realtime_coarse = {
74047 + static struct k_clock clock_realtime_coarse = {
74048 .clock_getres = posix_get_coarse_res,
74049 .clock_get = posix_get_realtime_coarse,
74050 .clock_set = do_posix_clock_nosettime,
74051 .timer_create = no_timer_create,
74052 .nsleep = no_nsleep,
74053 };
74054 - struct k_clock clock_monotonic_coarse = {
74055 + static struct k_clock clock_monotonic_coarse = {
74056 .clock_getres = posix_get_coarse_res,
74057 .clock_get = posix_get_monotonic_coarse,
74058 .clock_set = do_posix_clock_nosettime,
74059 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
74060 .nsleep = no_nsleep,
74061 };
74062
74063 + pax_track_stack();
74064 +
74065 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
74066 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
74067 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
74068 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
74069 return;
74070 }
74071
74072 - posix_clocks[clock_id] = *new_clock;
74073 + posix_clocks[clock_id] = new_clock;
74074 }
74075 EXPORT_SYMBOL_GPL(register_posix_clock);
74076
74077 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
74078 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
74079 return -EFAULT;
74080
74081 + /* only the CLOCK_REALTIME clock can be set, all other clocks
74082 + have their clock_set fptr set to a nosettime dummy function
74083 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
74084 + call common_clock_set, which calls do_sys_settimeofday, which
74085 + we hook
74086 + */
74087 +
74088 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
74089 }
74090
74091 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
74092 index 04a9e90..bc355aa 100644
74093 --- a/kernel/power/hibernate.c
74094 +++ b/kernel/power/hibernate.c
74095 @@ -48,14 +48,14 @@ enum {
74096
74097 static int hibernation_mode = HIBERNATION_SHUTDOWN;
74098
74099 -static struct platform_hibernation_ops *hibernation_ops;
74100 +static const struct platform_hibernation_ops *hibernation_ops;
74101
74102 /**
74103 * hibernation_set_ops - set the global hibernate operations
74104 * @ops: the hibernation operations to use in subsequent hibernation transitions
74105 */
74106
74107 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
74108 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
74109 {
74110 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
74111 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
74112 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
74113 index e8b3370..484c2e4 100644
74114 --- a/kernel/power/poweroff.c
74115 +++ b/kernel/power/poweroff.c
74116 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
74117 .enable_mask = SYSRQ_ENABLE_BOOT,
74118 };
74119
74120 -static int pm_sysrq_init(void)
74121 +static int __init pm_sysrq_init(void)
74122 {
74123 register_sysrq_key('o', &sysrq_poweroff_op);
74124 return 0;
74125 diff --git a/kernel/power/process.c b/kernel/power/process.c
74126 index e7cd671..56d5f459 100644
74127 --- a/kernel/power/process.c
74128 +++ b/kernel/power/process.c
74129 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
74130 struct timeval start, end;
74131 u64 elapsed_csecs64;
74132 unsigned int elapsed_csecs;
74133 + bool timedout = false;
74134
74135 do_gettimeofday(&start);
74136
74137 end_time = jiffies + TIMEOUT;
74138 do {
74139 todo = 0;
74140 + if (time_after(jiffies, end_time))
74141 + timedout = true;
74142 read_lock(&tasklist_lock);
74143 do_each_thread(g, p) {
74144 if (frozen(p) || !freezeable(p))
74145 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
74146 * It is "frozen enough". If the task does wake
74147 * up, it will immediately call try_to_freeze.
74148 */
74149 - if (!task_is_stopped_or_traced(p) &&
74150 - !freezer_should_skip(p))
74151 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
74152 todo++;
74153 + if (timedout) {
74154 + printk(KERN_ERR "Task refusing to freeze:\n");
74155 + sched_show_task(p);
74156 + }
74157 + }
74158 } while_each_thread(g, p);
74159 read_unlock(&tasklist_lock);
74160 yield(); /* Yield is okay here */
74161 - if (time_after(jiffies, end_time))
74162 - break;
74163 - } while (todo);
74164 + } while (todo && !timedout);
74165
74166 do_gettimeofday(&end);
74167 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
74168 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
74169 index 40dd021..fb30ceb 100644
74170 --- a/kernel/power/suspend.c
74171 +++ b/kernel/power/suspend.c
74172 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
74173 [PM_SUSPEND_MEM] = "mem",
74174 };
74175
74176 -static struct platform_suspend_ops *suspend_ops;
74177 +static const struct platform_suspend_ops *suspend_ops;
74178
74179 /**
74180 * suspend_set_ops - Set the global suspend method table.
74181 * @ops: Pointer to ops structure.
74182 */
74183 -void suspend_set_ops(struct platform_suspend_ops *ops)
74184 +void suspend_set_ops(const struct platform_suspend_ops *ops)
74185 {
74186 mutex_lock(&pm_mutex);
74187 suspend_ops = ops;
74188 diff --git a/kernel/printk.c b/kernel/printk.c
74189 index 4cade47..4d17900 100644
74190 --- a/kernel/printk.c
74191 +++ b/kernel/printk.c
74192 @@ -33,6 +33,7 @@
74193 #include <linux/bootmem.h>
74194 #include <linux/syscalls.h>
74195 #include <linux/kexec.h>
74196 +#include <linux/syslog.h>
74197
74198 #include <asm/uaccess.h>
74199
74200 @@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
74201 }
74202 #endif
74203
74204 -/*
74205 - * Commands to do_syslog:
74206 - *
74207 - * 0 -- Close the log. Currently a NOP.
74208 - * 1 -- Open the log. Currently a NOP.
74209 - * 2 -- Read from the log.
74210 - * 3 -- Read all messages remaining in the ring buffer.
74211 - * 4 -- Read and clear all messages remaining in the ring buffer
74212 - * 5 -- Clear ring buffer.
74213 - * 6 -- Disable printk's to console
74214 - * 7 -- Enable printk's to console
74215 - * 8 -- Set level of messages printed to console
74216 - * 9 -- Return number of unread characters in the log buffer
74217 - * 10 -- Return size of the log buffer
74218 - */
74219 -int do_syslog(int type, char __user *buf, int len)
74220 +int do_syslog(int type, char __user *buf, int len, bool from_file)
74221 {
74222 unsigned i, j, limit, count;
74223 int do_clear = 0;
74224 char c;
74225 int error = 0;
74226
74227 - error = security_syslog(type);
74228 +#ifdef CONFIG_GRKERNSEC_DMESG
74229 + if (grsec_enable_dmesg &&
74230 + (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
74231 + !capable(CAP_SYS_ADMIN))
74232 + return -EPERM;
74233 +#endif
74234 +
74235 + error = security_syslog(type, from_file);
74236 if (error)
74237 return error;
74238
74239 switch (type) {
74240 - case 0: /* Close log */
74241 + case SYSLOG_ACTION_CLOSE: /* Close log */
74242 break;
74243 - case 1: /* Open log */
74244 + case SYSLOG_ACTION_OPEN: /* Open log */
74245 break;
74246 - case 2: /* Read from log */
74247 + case SYSLOG_ACTION_READ: /* Read from log */
74248 error = -EINVAL;
74249 if (!buf || len < 0)
74250 goto out;
74251 @@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
74252 if (!error)
74253 error = i;
74254 break;
74255 - case 4: /* Read/clear last kernel messages */
74256 + /* Read/clear last kernel messages */
74257 + case SYSLOG_ACTION_READ_CLEAR:
74258 do_clear = 1;
74259 /* FALL THRU */
74260 - case 3: /* Read last kernel messages */
74261 + /* Read last kernel messages */
74262 + case SYSLOG_ACTION_READ_ALL:
74263 error = -EINVAL;
74264 if (!buf || len < 0)
74265 goto out;
74266 @@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
74267 }
74268 }
74269 break;
74270 - case 5: /* Clear ring buffer */
74271 + /* Clear ring buffer */
74272 + case SYSLOG_ACTION_CLEAR:
74273 logged_chars = 0;
74274 break;
74275 - case 6: /* Disable logging to console */
74276 + /* Disable logging to console */
74277 + case SYSLOG_ACTION_CONSOLE_OFF:
74278 if (saved_console_loglevel == -1)
74279 saved_console_loglevel = console_loglevel;
74280 console_loglevel = minimum_console_loglevel;
74281 break;
74282 - case 7: /* Enable logging to console */
74283 + /* Enable logging to console */
74284 + case SYSLOG_ACTION_CONSOLE_ON:
74285 if (saved_console_loglevel != -1) {
74286 console_loglevel = saved_console_loglevel;
74287 saved_console_loglevel = -1;
74288 }
74289 break;
74290 - case 8: /* Set level of messages printed to console */
74291 + /* Set level of messages printed to console */
74292 + case SYSLOG_ACTION_CONSOLE_LEVEL:
74293 error = -EINVAL;
74294 if (len < 1 || len > 8)
74295 goto out;
74296 @@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
74297 saved_console_loglevel = -1;
74298 error = 0;
74299 break;
74300 - case 9: /* Number of chars in the log buffer */
74301 + /* Number of chars in the log buffer */
74302 + case SYSLOG_ACTION_SIZE_UNREAD:
74303 error = log_end - log_start;
74304 break;
74305 - case 10: /* Size of the log buffer */
74306 + /* Size of the log buffer */
74307 + case SYSLOG_ACTION_SIZE_BUFFER:
74308 error = log_buf_len;
74309 break;
74310 default:
74311 @@ -415,7 +416,7 @@ out:
74312
74313 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
74314 {
74315 - return do_syslog(type, buf, len);
74316 + return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
74317 }
74318
74319 /*
74320 diff --git a/kernel/profile.c b/kernel/profile.c
74321 index dfadc5b..7f59404 100644
74322 --- a/kernel/profile.c
74323 +++ b/kernel/profile.c
74324 @@ -39,7 +39,7 @@ struct profile_hit {
74325 /* Oprofile timer tick hook */
74326 static int (*timer_hook)(struct pt_regs *) __read_mostly;
74327
74328 -static atomic_t *prof_buffer;
74329 +static atomic_unchecked_t *prof_buffer;
74330 static unsigned long prof_len, prof_shift;
74331
74332 int prof_on __read_mostly;
74333 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
74334 hits[i].pc = 0;
74335 continue;
74336 }
74337 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74338 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74339 hits[i].hits = hits[i].pc = 0;
74340 }
74341 }
74342 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74343 * Add the current hit(s) and flush the write-queue out
74344 * to the global buffer:
74345 */
74346 - atomic_add(nr_hits, &prof_buffer[pc]);
74347 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
74348 for (i = 0; i < NR_PROFILE_HIT; ++i) {
74349 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74350 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74351 hits[i].pc = hits[i].hits = 0;
74352 }
74353 out:
74354 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74355 if (prof_on != type || !prof_buffer)
74356 return;
74357 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
74358 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74359 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74360 }
74361 #endif /* !CONFIG_SMP */
74362 EXPORT_SYMBOL_GPL(profile_hits);
74363 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
74364 return -EFAULT;
74365 buf++; p++; count--; read++;
74366 }
74367 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
74368 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
74369 if (copy_to_user(buf, (void *)pnt, count))
74370 return -EFAULT;
74371 read += count;
74372 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
74373 }
74374 #endif
74375 profile_discard_flip_buffers();
74376 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
74377 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
74378 return count;
74379 }
74380
74381 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
74382 index 05625f6..733bf70 100644
74383 --- a/kernel/ptrace.c
74384 +++ b/kernel/ptrace.c
74385 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
74386 return ret;
74387 }
74388
74389 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74390 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
74391 + unsigned int log)
74392 {
74393 const struct cred *cred = current_cred(), *tcred;
74394
74395 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74396 cred->gid != tcred->egid ||
74397 cred->gid != tcred->sgid ||
74398 cred->gid != tcred->gid) &&
74399 - !capable(CAP_SYS_PTRACE)) {
74400 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74401 + (log && !capable(CAP_SYS_PTRACE)))
74402 + ) {
74403 rcu_read_unlock();
74404 return -EPERM;
74405 }
74406 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74407 smp_rmb();
74408 if (task->mm)
74409 dumpable = get_dumpable(task->mm);
74410 - if (!dumpable && !capable(CAP_SYS_PTRACE))
74411 + if (!dumpable &&
74412 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74413 + (log && !capable(CAP_SYS_PTRACE))))
74414 return -EPERM;
74415
74416 return security_ptrace_access_check(task, mode);
74417 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
74418 {
74419 int err;
74420 task_lock(task);
74421 - err = __ptrace_may_access(task, mode);
74422 + err = __ptrace_may_access(task, mode, 0);
74423 + task_unlock(task);
74424 + return !err;
74425 +}
74426 +
74427 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
74428 +{
74429 + int err;
74430 + task_lock(task);
74431 + err = __ptrace_may_access(task, mode, 1);
74432 task_unlock(task);
74433 return !err;
74434 }
74435 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
74436 goto out;
74437
74438 task_lock(task);
74439 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
74440 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
74441 task_unlock(task);
74442 if (retval)
74443 goto unlock_creds;
74444 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
74445 goto unlock_tasklist;
74446
74447 task->ptrace = PT_PTRACED;
74448 - if (capable(CAP_SYS_PTRACE))
74449 + if (capable_nolog(CAP_SYS_PTRACE))
74450 task->ptrace |= PT_PTRACE_CAP;
74451
74452 __ptrace_link(task, current);
74453 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
74454 {
74455 int copied = 0;
74456
74457 + pax_track_stack();
74458 +
74459 while (len > 0) {
74460 char buf[128];
74461 int this_len, retval;
74462 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
74463 {
74464 int copied = 0;
74465
74466 + pax_track_stack();
74467 +
74468 while (len > 0) {
74469 char buf[128];
74470 int this_len, retval;
74471 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
74472 int ret = -EIO;
74473 siginfo_t siginfo;
74474
74475 + pax_track_stack();
74476 +
74477 switch (request) {
74478 case PTRACE_PEEKTEXT:
74479 case PTRACE_PEEKDATA:
74480 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
74481 ret = ptrace_setoptions(child, data);
74482 break;
74483 case PTRACE_GETEVENTMSG:
74484 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
74485 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
74486 break;
74487
74488 case PTRACE_GETSIGINFO:
74489 ret = ptrace_getsiginfo(child, &siginfo);
74490 if (!ret)
74491 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
74492 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
74493 &siginfo);
74494 break;
74495
74496 case PTRACE_SETSIGINFO:
74497 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
74498 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
74499 sizeof siginfo))
74500 ret = -EFAULT;
74501 else
74502 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
74503 goto out;
74504 }
74505
74506 + if (gr_handle_ptrace(child, request)) {
74507 + ret = -EPERM;
74508 + goto out_put_task_struct;
74509 + }
74510 +
74511 if (request == PTRACE_ATTACH) {
74512 ret = ptrace_attach(child);
74513 /*
74514 * Some architectures need to do book-keeping after
74515 * a ptrace attach.
74516 */
74517 - if (!ret)
74518 + if (!ret) {
74519 arch_ptrace_attach(child);
74520 + gr_audit_ptrace(child);
74521 + }
74522 goto out_put_task_struct;
74523 }
74524
74525 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
74526 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
74527 if (copied != sizeof(tmp))
74528 return -EIO;
74529 - return put_user(tmp, (unsigned long __user *)data);
74530 + return put_user(tmp, (__force unsigned long __user *)data);
74531 }
74532
74533 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
74534 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
74535 siginfo_t siginfo;
74536 int ret;
74537
74538 + pax_track_stack();
74539 +
74540 switch (request) {
74541 case PTRACE_PEEKTEXT:
74542 case PTRACE_PEEKDATA:
74543 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
74544 goto out;
74545 }
74546
74547 + if (gr_handle_ptrace(child, request)) {
74548 + ret = -EPERM;
74549 + goto out_put_task_struct;
74550 + }
74551 +
74552 if (request == PTRACE_ATTACH) {
74553 ret = ptrace_attach(child);
74554 /*
74555 * Some architectures need to do book-keeping after
74556 * a ptrace attach.
74557 */
74558 - if (!ret)
74559 + if (!ret) {
74560 arch_ptrace_attach(child);
74561 + gr_audit_ptrace(child);
74562 + }
74563 goto out_put_task_struct;
74564 }
74565
74566 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
74567 index 697c0a0..2402696 100644
74568 --- a/kernel/rcutorture.c
74569 +++ b/kernel/rcutorture.c
74570 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
74571 { 0 };
74572 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
74573 { 0 };
74574 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74575 -static atomic_t n_rcu_torture_alloc;
74576 -static atomic_t n_rcu_torture_alloc_fail;
74577 -static atomic_t n_rcu_torture_free;
74578 -static atomic_t n_rcu_torture_mberror;
74579 -static atomic_t n_rcu_torture_error;
74580 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74581 +static atomic_unchecked_t n_rcu_torture_alloc;
74582 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
74583 +static atomic_unchecked_t n_rcu_torture_free;
74584 +static atomic_unchecked_t n_rcu_torture_mberror;
74585 +static atomic_unchecked_t n_rcu_torture_error;
74586 static long n_rcu_torture_timers;
74587 static struct list_head rcu_torture_removed;
74588 static cpumask_var_t shuffle_tmp_mask;
74589 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
74590
74591 spin_lock_bh(&rcu_torture_lock);
74592 if (list_empty(&rcu_torture_freelist)) {
74593 - atomic_inc(&n_rcu_torture_alloc_fail);
74594 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
74595 spin_unlock_bh(&rcu_torture_lock);
74596 return NULL;
74597 }
74598 - atomic_inc(&n_rcu_torture_alloc);
74599 + atomic_inc_unchecked(&n_rcu_torture_alloc);
74600 p = rcu_torture_freelist.next;
74601 list_del_init(p);
74602 spin_unlock_bh(&rcu_torture_lock);
74603 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
74604 static void
74605 rcu_torture_free(struct rcu_torture *p)
74606 {
74607 - atomic_inc(&n_rcu_torture_free);
74608 + atomic_inc_unchecked(&n_rcu_torture_free);
74609 spin_lock_bh(&rcu_torture_lock);
74610 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
74611 spin_unlock_bh(&rcu_torture_lock);
74612 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
74613 i = rp->rtort_pipe_count;
74614 if (i > RCU_TORTURE_PIPE_LEN)
74615 i = RCU_TORTURE_PIPE_LEN;
74616 - atomic_inc(&rcu_torture_wcount[i]);
74617 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74618 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74619 rp->rtort_mbtest = 0;
74620 rcu_torture_free(rp);
74621 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
74622 i = rp->rtort_pipe_count;
74623 if (i > RCU_TORTURE_PIPE_LEN)
74624 i = RCU_TORTURE_PIPE_LEN;
74625 - atomic_inc(&rcu_torture_wcount[i]);
74626 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74627 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74628 rp->rtort_mbtest = 0;
74629 list_del(&rp->rtort_free);
74630 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
74631 i = old_rp->rtort_pipe_count;
74632 if (i > RCU_TORTURE_PIPE_LEN)
74633 i = RCU_TORTURE_PIPE_LEN;
74634 - atomic_inc(&rcu_torture_wcount[i]);
74635 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74636 old_rp->rtort_pipe_count++;
74637 cur_ops->deferred_free(old_rp);
74638 }
74639 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
74640 return;
74641 }
74642 if (p->rtort_mbtest == 0)
74643 - atomic_inc(&n_rcu_torture_mberror);
74644 + atomic_inc_unchecked(&n_rcu_torture_mberror);
74645 spin_lock(&rand_lock);
74646 cur_ops->read_delay(&rand);
74647 n_rcu_torture_timers++;
74648 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
74649 continue;
74650 }
74651 if (p->rtort_mbtest == 0)
74652 - atomic_inc(&n_rcu_torture_mberror);
74653 + atomic_inc_unchecked(&n_rcu_torture_mberror);
74654 cur_ops->read_delay(&rand);
74655 preempt_disable();
74656 pipe_count = p->rtort_pipe_count;
74657 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
74658 rcu_torture_current,
74659 rcu_torture_current_version,
74660 list_empty(&rcu_torture_freelist),
74661 - atomic_read(&n_rcu_torture_alloc),
74662 - atomic_read(&n_rcu_torture_alloc_fail),
74663 - atomic_read(&n_rcu_torture_free),
74664 - atomic_read(&n_rcu_torture_mberror),
74665 + atomic_read_unchecked(&n_rcu_torture_alloc),
74666 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
74667 + atomic_read_unchecked(&n_rcu_torture_free),
74668 + atomic_read_unchecked(&n_rcu_torture_mberror),
74669 n_rcu_torture_timers);
74670 - if (atomic_read(&n_rcu_torture_mberror) != 0)
74671 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
74672 cnt += sprintf(&page[cnt], " !!!");
74673 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
74674 if (i > 1) {
74675 cnt += sprintf(&page[cnt], "!!! ");
74676 - atomic_inc(&n_rcu_torture_error);
74677 + atomic_inc_unchecked(&n_rcu_torture_error);
74678 WARN_ON_ONCE(1);
74679 }
74680 cnt += sprintf(&page[cnt], "Reader Pipe: ");
74681 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
74682 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
74683 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74684 cnt += sprintf(&page[cnt], " %d",
74685 - atomic_read(&rcu_torture_wcount[i]));
74686 + atomic_read_unchecked(&rcu_torture_wcount[i]));
74687 }
74688 cnt += sprintf(&page[cnt], "\n");
74689 if (cur_ops->stats)
74690 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
74691
74692 if (cur_ops->cleanup)
74693 cur_ops->cleanup();
74694 - if (atomic_read(&n_rcu_torture_error))
74695 + if (atomic_read_unchecked(&n_rcu_torture_error))
74696 rcu_torture_print_module_parms("End of test: FAILURE");
74697 else
74698 rcu_torture_print_module_parms("End of test: SUCCESS");
74699 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
74700
74701 rcu_torture_current = NULL;
74702 rcu_torture_current_version = 0;
74703 - atomic_set(&n_rcu_torture_alloc, 0);
74704 - atomic_set(&n_rcu_torture_alloc_fail, 0);
74705 - atomic_set(&n_rcu_torture_free, 0);
74706 - atomic_set(&n_rcu_torture_mberror, 0);
74707 - atomic_set(&n_rcu_torture_error, 0);
74708 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
74709 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
74710 + atomic_set_unchecked(&n_rcu_torture_free, 0);
74711 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
74712 + atomic_set_unchecked(&n_rcu_torture_error, 0);
74713 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
74714 - atomic_set(&rcu_torture_wcount[i], 0);
74715 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
74716 for_each_possible_cpu(cpu) {
74717 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74718 per_cpu(rcu_torture_count, cpu)[i] = 0;
74719 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
74720 index 683c4f3..97f54c6 100644
74721 --- a/kernel/rcutree.c
74722 +++ b/kernel/rcutree.c
74723 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
74724 /*
74725 * Do softirq processing for the current CPU.
74726 */
74727 -static void rcu_process_callbacks(struct softirq_action *unused)
74728 +static void rcu_process_callbacks(void)
74729 {
74730 /*
74731 * Memory references from any prior RCU read-side critical sections
74732 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
74733 index c03edf7..ac1b341 100644
74734 --- a/kernel/rcutree_plugin.h
74735 +++ b/kernel/rcutree_plugin.h
74736 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
74737 */
74738 void __rcu_read_lock(void)
74739 {
74740 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
74741 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
74742 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
74743 }
74744 EXPORT_SYMBOL_GPL(__rcu_read_lock);
74745 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
74746 struct task_struct *t = current;
74747
74748 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
74749 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
74750 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
74751 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
74752 rcu_read_unlock_special(t);
74753 }
74754 diff --git a/kernel/relay.c b/kernel/relay.c
74755 index 760c262..a9fd241 100644
74756 --- a/kernel/relay.c
74757 +++ b/kernel/relay.c
74758 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct file *in,
74759 unsigned int flags,
74760 int *nonpad_ret)
74761 {
74762 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
74763 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
74764 struct rchan_buf *rbuf = in->private_data;
74765 unsigned int subbuf_size = rbuf->chan->subbuf_size;
74766 uint64_t pos = (uint64_t) *ppos;
74767 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct file *in,
74768 .ops = &relay_pipe_buf_ops,
74769 .spd_release = relay_page_release,
74770 };
74771 + ssize_t ret;
74772 +
74773 + pax_track_stack();
74774
74775 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
74776 return 0;
74777 diff --git a/kernel/resource.c b/kernel/resource.c
74778 index fb11a58..4e61ae1 100644
74779 --- a/kernel/resource.c
74780 +++ b/kernel/resource.c
74781 @@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
74782
74783 static int __init ioresources_init(void)
74784 {
74785 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74786 +#ifdef CONFIG_GRKERNSEC_PROC_USER
74787 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
74788 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
74789 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74790 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
74791 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
74792 +#endif
74793 +#else
74794 proc_create("ioports", 0, NULL, &proc_ioports_operations);
74795 proc_create("iomem", 0, NULL, &proc_iomem_operations);
74796 +#endif
74797 return 0;
74798 }
74799 __initcall(ioresources_init);
74800 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
74801 index a56f629..1fc4989 100644
74802 --- a/kernel/rtmutex-tester.c
74803 +++ b/kernel/rtmutex-tester.c
74804 @@ -21,7 +21,7 @@
74805 #define MAX_RT_TEST_MUTEXES 8
74806
74807 static spinlock_t rttest_lock;
74808 -static atomic_t rttest_event;
74809 +static atomic_unchecked_t rttest_event;
74810
74811 struct test_thread_data {
74812 int opcode;
74813 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74814
74815 case RTTEST_LOCKCONT:
74816 td->mutexes[td->opdata] = 1;
74817 - td->event = atomic_add_return(1, &rttest_event);
74818 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74819 return 0;
74820
74821 case RTTEST_RESET:
74822 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74823 return 0;
74824
74825 case RTTEST_RESETEVENT:
74826 - atomic_set(&rttest_event, 0);
74827 + atomic_set_unchecked(&rttest_event, 0);
74828 return 0;
74829
74830 default:
74831 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74832 return ret;
74833
74834 td->mutexes[id] = 1;
74835 - td->event = atomic_add_return(1, &rttest_event);
74836 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74837 rt_mutex_lock(&mutexes[id]);
74838 - td->event = atomic_add_return(1, &rttest_event);
74839 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74840 td->mutexes[id] = 4;
74841 return 0;
74842
74843 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74844 return ret;
74845
74846 td->mutexes[id] = 1;
74847 - td->event = atomic_add_return(1, &rttest_event);
74848 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74849 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
74850 - td->event = atomic_add_return(1, &rttest_event);
74851 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74852 td->mutexes[id] = ret ? 0 : 4;
74853 return ret ? -EINTR : 0;
74854
74855 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74856 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
74857 return ret;
74858
74859 - td->event = atomic_add_return(1, &rttest_event);
74860 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74861 rt_mutex_unlock(&mutexes[id]);
74862 - td->event = atomic_add_return(1, &rttest_event);
74863 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74864 td->mutexes[id] = 0;
74865 return 0;
74866
74867 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74868 break;
74869
74870 td->mutexes[dat] = 2;
74871 - td->event = atomic_add_return(1, &rttest_event);
74872 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74873 break;
74874
74875 case RTTEST_LOCKBKL:
74876 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74877 return;
74878
74879 td->mutexes[dat] = 3;
74880 - td->event = atomic_add_return(1, &rttest_event);
74881 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74882 break;
74883
74884 case RTTEST_LOCKNOWAIT:
74885 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74886 return;
74887
74888 td->mutexes[dat] = 1;
74889 - td->event = atomic_add_return(1, &rttest_event);
74890 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74891 return;
74892
74893 case RTTEST_LOCKBKL:
74894 diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
74895 index 29bd4ba..8c5de90 100644
74896 --- a/kernel/rtmutex.c
74897 +++ b/kernel/rtmutex.c
74898 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
74899 */
74900 spin_lock_irqsave(&pendowner->pi_lock, flags);
74901
74902 - WARN_ON(!pendowner->pi_blocked_on);
74903 + BUG_ON(!pendowner->pi_blocked_on);
74904 WARN_ON(pendowner->pi_blocked_on != waiter);
74905 WARN_ON(pendowner->pi_blocked_on->lock != lock);
74906
74907 diff --git a/kernel/sched.c b/kernel/sched.c
74908 index 0591df8..e3af3a4 100644
74909 --- a/kernel/sched.c
74910 +++ b/kernel/sched.c
74911 @@ -5043,7 +5043,7 @@ out:
74912 * In CONFIG_NO_HZ case, the idle load balance owner will do the
74913 * rebalancing for all the cpus for whom scheduler ticks are stopped.
74914 */
74915 -static void run_rebalance_domains(struct softirq_action *h)
74916 +static void run_rebalance_domains(void)
74917 {
74918 int this_cpu = smp_processor_id();
74919 struct rq *this_rq = cpu_rq(this_cpu);
74920 @@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
74921 }
74922 }
74923
74924 +#ifdef CONFIG_GRKERNSEC_SETXID
74925 +extern void gr_delayed_cred_worker(void);
74926 +static inline void gr_cred_schedule(void)
74927 +{
74928 + if (unlikely(current->delayed_cred))
74929 + gr_delayed_cred_worker();
74930 +}
74931 +#else
74932 +static inline void gr_cred_schedule(void)
74933 +{
74934 +}
74935 +#endif
74936 +
74937 /*
74938 * schedule() is the main scheduler function.
74939 */
74940 @@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
74941 struct rq *rq;
74942 int cpu;
74943
74944 + pax_track_stack();
74945 +
74946 need_resched:
74947 preempt_disable();
74948 cpu = smp_processor_id();
74949 @@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
74950
74951 schedule_debug(prev);
74952
74953 + gr_cred_schedule();
74954 +
74955 if (sched_feat(HRTICK))
74956 hrtick_clear(rq);
74957
74958 @@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
74959 * Look out! "owner" is an entirely speculative pointer
74960 * access and not reliable.
74961 */
74962 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74963 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
74964 {
74965 unsigned int cpu;
74966 struct rq *rq;
74967 @@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74968 * DEBUG_PAGEALLOC could have unmapped it if
74969 * the mutex owner just released it and exited.
74970 */
74971 - if (probe_kernel_address(&owner->cpu, cpu))
74972 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
74973 return 0;
74974 #else
74975 - cpu = owner->cpu;
74976 + cpu = task_thread_info(owner)->cpu;
74977 #endif
74978
74979 /*
74980 @@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74981 /*
74982 * Is that owner really running on that cpu?
74983 */
74984 - if (task_thread_info(rq->curr) != owner || need_resched())
74985 + if (rq->curr != owner || need_resched())
74986 return 0;
74987
74988 cpu_relax();
74989 @@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
74990 /* convert nice value [19,-20] to rlimit style value [1,40] */
74991 int nice_rlim = 20 - nice;
74992
74993 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
74994 +
74995 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
74996 capable(CAP_SYS_NICE));
74997 }
74998 @@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
74999 if (nice > 19)
75000 nice = 19;
75001
75002 - if (increment < 0 && !can_nice(current, nice))
75003 + if (increment < 0 && (!can_nice(current, nice) ||
75004 + gr_handle_chroot_nice()))
75005 return -EPERM;
75006
75007 retval = security_task_setnice(current, nice);
75008 @@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
75009 long power;
75010 int weight;
75011
75012 - WARN_ON(!sd || !sd->groups);
75013 + BUG_ON(!sd || !sd->groups);
75014
75015 if (cpu != group_first_cpu(sd->groups))
75016 return;
75017 diff --git a/kernel/signal.c b/kernel/signal.c
75018 index 2494827..cda80a0 100644
75019 --- a/kernel/signal.c
75020 +++ b/kernel/signal.c
75021 @@ -41,12 +41,12 @@
75022
75023 static struct kmem_cache *sigqueue_cachep;
75024
75025 -static void __user *sig_handler(struct task_struct *t, int sig)
75026 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
75027 {
75028 return t->sighand->action[sig - 1].sa.sa_handler;
75029 }
75030
75031 -static int sig_handler_ignored(void __user *handler, int sig)
75032 +static int sig_handler_ignored(__sighandler_t handler, int sig)
75033 {
75034 /* Is it explicitly or implicitly ignored? */
75035 return handler == SIG_IGN ||
75036 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
75037 static int sig_task_ignored(struct task_struct *t, int sig,
75038 int from_ancestor_ns)
75039 {
75040 - void __user *handler;
75041 + __sighandler_t handler;
75042
75043 handler = sig_handler(t, sig);
75044
75045 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
75046 */
75047 user = get_uid(__task_cred(t)->user);
75048 atomic_inc(&user->sigpending);
75049 +
75050 + if (!override_rlimit)
75051 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
75052 if (override_rlimit ||
75053 atomic_read(&user->sigpending) <=
75054 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
75055 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
75056
75057 int unhandled_signal(struct task_struct *tsk, int sig)
75058 {
75059 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
75060 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
75061 if (is_global_init(tsk))
75062 return 1;
75063 if (handler != SIG_IGN && handler != SIG_DFL)
75064 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
75065 }
75066 }
75067
75068 + /* allow glibc communication via tgkill to other threads in our
75069 + thread group */
75070 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
75071 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
75072 + && gr_handle_signal(t, sig))
75073 + return -EPERM;
75074 +
75075 return security_task_kill(t, info, sig, 0);
75076 }
75077
75078 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75079 return send_signal(sig, info, p, 1);
75080 }
75081
75082 -static int
75083 +int
75084 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75085 {
75086 return send_signal(sig, info, t, 0);
75087 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75088 unsigned long int flags;
75089 int ret, blocked, ignored;
75090 struct k_sigaction *action;
75091 + int is_unhandled = 0;
75092
75093 spin_lock_irqsave(&t->sighand->siglock, flags);
75094 action = &t->sighand->action[sig-1];
75095 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75096 }
75097 if (action->sa.sa_handler == SIG_DFL)
75098 t->signal->flags &= ~SIGNAL_UNKILLABLE;
75099 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
75100 + is_unhandled = 1;
75101 ret = specific_send_sig_info(sig, info, t);
75102 spin_unlock_irqrestore(&t->sighand->siglock, flags);
75103
75104 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
75105 + normal operation */
75106 + if (is_unhandled) {
75107 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
75108 + gr_handle_crash(t, sig);
75109 + }
75110 +
75111 return ret;
75112 }
75113
75114 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75115 {
75116 int ret = check_kill_permission(sig, info, p);
75117
75118 - if (!ret && sig)
75119 + if (!ret && sig) {
75120 ret = do_send_sig_info(sig, info, p, true);
75121 + if (!ret)
75122 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
75123 + }
75124
75125 return ret;
75126 }
75127 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
75128 {
75129 siginfo_t info;
75130
75131 + pax_track_stack();
75132 +
75133 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
75134
75135 memset(&info, 0, sizeof info);
75136 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
75137 int error = -ESRCH;
75138
75139 rcu_read_lock();
75140 - p = find_task_by_vpid(pid);
75141 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75142 + /* allow glibc communication via tgkill to other threads in our
75143 + thread group */
75144 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
75145 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
75146 + p = find_task_by_vpid_unrestricted(pid);
75147 + else
75148 +#endif
75149 + p = find_task_by_vpid(pid);
75150 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
75151 error = check_kill_permission(sig, info, p);
75152 /*
75153 diff --git a/kernel/smp.c b/kernel/smp.c
75154 index aa9cff3..631a0de 100644
75155 --- a/kernel/smp.c
75156 +++ b/kernel/smp.c
75157 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
75158 }
75159 EXPORT_SYMBOL(smp_call_function);
75160
75161 -void ipi_call_lock(void)
75162 +void ipi_call_lock(void) __acquires(call_function.lock)
75163 {
75164 spin_lock(&call_function.lock);
75165 }
75166
75167 -void ipi_call_unlock(void)
75168 +void ipi_call_unlock(void) __releases(call_function.lock)
75169 {
75170 spin_unlock(&call_function.lock);
75171 }
75172
75173 -void ipi_call_lock_irq(void)
75174 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
75175 {
75176 spin_lock_irq(&call_function.lock);
75177 }
75178
75179 -void ipi_call_unlock_irq(void)
75180 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
75181 {
75182 spin_unlock_irq(&call_function.lock);
75183 }
75184 diff --git a/kernel/softirq.c b/kernel/softirq.c
75185 index 04a0252..580c512 100644
75186 --- a/kernel/softirq.c
75187 +++ b/kernel/softirq.c
75188 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
75189
75190 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
75191
75192 -char *softirq_to_name[NR_SOFTIRQS] = {
75193 +const char * const softirq_to_name[NR_SOFTIRQS] = {
75194 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
75195 "TASKLET", "SCHED", "HRTIMER", "RCU"
75196 };
75197 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
75198
75199 asmlinkage void __do_softirq(void)
75200 {
75201 - struct softirq_action *h;
75202 + const struct softirq_action *h;
75203 __u32 pending;
75204 int max_restart = MAX_SOFTIRQ_RESTART;
75205 int cpu;
75206 @@ -233,7 +233,7 @@ restart:
75207 kstat_incr_softirqs_this_cpu(h - softirq_vec);
75208
75209 trace_softirq_entry(h, softirq_vec);
75210 - h->action(h);
75211 + h->action();
75212 trace_softirq_exit(h, softirq_vec);
75213 if (unlikely(prev_count != preempt_count())) {
75214 printk(KERN_ERR "huh, entered softirq %td %s %p"
75215 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
75216 local_irq_restore(flags);
75217 }
75218
75219 -void open_softirq(int nr, void (*action)(struct softirq_action *))
75220 +void open_softirq(int nr, void (*action)(void))
75221 {
75222 - softirq_vec[nr].action = action;
75223 + pax_open_kernel();
75224 + *(void **)&softirq_vec[nr].action = action;
75225 + pax_close_kernel();
75226 }
75227
75228 /*
75229 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
75230
75231 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
75232
75233 -static void tasklet_action(struct softirq_action *a)
75234 +static void tasklet_action(void)
75235 {
75236 struct tasklet_struct *list;
75237
75238 @@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
75239 }
75240 }
75241
75242 -static void tasklet_hi_action(struct softirq_action *a)
75243 +static void tasklet_hi_action(void)
75244 {
75245 struct tasklet_struct *list;
75246
75247 diff --git a/kernel/sys.c b/kernel/sys.c
75248 index e9512b1..f07185f 100644
75249 --- a/kernel/sys.c
75250 +++ b/kernel/sys.c
75251 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
75252 error = -EACCES;
75253 goto out;
75254 }
75255 +
75256 + if (gr_handle_chroot_setpriority(p, niceval)) {
75257 + error = -EACCES;
75258 + goto out;
75259 + }
75260 +
75261 no_nice = security_task_setnice(p, niceval);
75262 if (no_nice) {
75263 error = no_nice;
75264 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
75265 !(user = find_user(who)))
75266 goto out_unlock; /* No processes for this user */
75267
75268 - do_each_thread(g, p)
75269 + do_each_thread(g, p) {
75270 if (__task_cred(p)->uid == who)
75271 error = set_one_prio(p, niceval, error);
75272 - while_each_thread(g, p);
75273 + } while_each_thread(g, p);
75274 if (who != cred->uid)
75275 free_uid(user); /* For find_user() */
75276 break;
75277 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
75278 !(user = find_user(who)))
75279 goto out_unlock; /* No processes for this user */
75280
75281 - do_each_thread(g, p)
75282 + do_each_thread(g, p) {
75283 if (__task_cred(p)->uid == who) {
75284 niceval = 20 - task_nice(p);
75285 if (niceval > retval)
75286 retval = niceval;
75287 }
75288 - while_each_thread(g, p);
75289 + } while_each_thread(g, p);
75290 if (who != cred->uid)
75291 free_uid(user); /* for find_user() */
75292 break;
75293 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
75294 goto error;
75295 }
75296
75297 + if (gr_check_group_change(new->gid, new->egid, -1))
75298 + goto error;
75299 +
75300 if (rgid != (gid_t) -1 ||
75301 (egid != (gid_t) -1 && egid != old->gid))
75302 new->sgid = new->egid;
75303 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
75304 goto error;
75305
75306 retval = -EPERM;
75307 +
75308 + if (gr_check_group_change(gid, gid, gid))
75309 + goto error;
75310 +
75311 if (capable(CAP_SETGID))
75312 new->gid = new->egid = new->sgid = new->fsgid = gid;
75313 else if (gid == old->gid || gid == old->sgid)
75314 @@ -559,7 +572,7 @@ error:
75315 /*
75316 * change the user struct in a credentials set to match the new UID
75317 */
75318 -static int set_user(struct cred *new)
75319 +int set_user(struct cred *new)
75320 {
75321 struct user_struct *new_user;
75322
75323 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
75324 if (!new_user)
75325 return -EAGAIN;
75326
75327 + /*
75328 + * We don't fail in case of NPROC limit excess here because too many
75329 + * poorly written programs don't check set*uid() return code, assuming
75330 + * it never fails if called by root. We may still enforce NPROC limit
75331 + * for programs doing set*uid()+execve() by harmlessly deferring the
75332 + * failure to the execve() stage.
75333 + */
75334 if (atomic_read(&new_user->processes) >=
75335 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
75336 - new_user != INIT_USER) {
75337 - free_uid(new_user);
75338 - return -EAGAIN;
75339 - }
75340 + new_user != INIT_USER)
75341 + current->flags |= PF_NPROC_EXCEEDED;
75342 + else
75343 + current->flags &= ~PF_NPROC_EXCEEDED;
75344
75345 free_uid(new->user);
75346 new->user = new_user;
75347 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
75348 goto error;
75349 }
75350
75351 + if (gr_check_user_change(new->uid, new->euid, -1))
75352 + goto error;
75353 +
75354 if (new->uid != old->uid) {
75355 retval = set_user(new);
75356 if (retval < 0)
75357 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
75358 goto error;
75359
75360 retval = -EPERM;
75361 +
75362 + if (gr_check_crash_uid(uid))
75363 + goto error;
75364 + if (gr_check_user_change(uid, uid, uid))
75365 + goto error;
75366 +
75367 if (capable(CAP_SETUID)) {
75368 new->suid = new->uid = uid;
75369 if (uid != old->uid) {
75370 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75371 goto error;
75372 }
75373
75374 + if (gr_check_user_change(ruid, euid, -1))
75375 + goto error;
75376 +
75377 if (ruid != (uid_t) -1) {
75378 new->uid = ruid;
75379 if (ruid != old->uid) {
75380 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75381 goto error;
75382 }
75383
75384 + if (gr_check_group_change(rgid, egid, -1))
75385 + goto error;
75386 +
75387 if (rgid != (gid_t) -1)
75388 new->gid = rgid;
75389 if (egid != (gid_t) -1)
75390 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
75391 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
75392 goto error;
75393
75394 + if (gr_check_user_change(-1, -1, uid))
75395 + goto error;
75396 +
75397 if (uid == old->uid || uid == old->euid ||
75398 uid == old->suid || uid == old->fsuid ||
75399 capable(CAP_SETUID)) {
75400 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
75401 if (gid == old->gid || gid == old->egid ||
75402 gid == old->sgid || gid == old->fsgid ||
75403 capable(CAP_SETGID)) {
75404 + if (gr_check_group_change(-1, -1, gid))
75405 + goto error;
75406 +
75407 if (gid != old_fsgid) {
75408 new->fsgid = gid;
75409 goto change_okay;
75410 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
75411 error = get_dumpable(me->mm);
75412 break;
75413 case PR_SET_DUMPABLE:
75414 - if (arg2 < 0 || arg2 > 1) {
75415 + if (arg2 > 1) {
75416 error = -EINVAL;
75417 break;
75418 }
75419 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
75420 index b8bd058..ab6a76be 100644
75421 --- a/kernel/sysctl.c
75422 +++ b/kernel/sysctl.c
75423 @@ -63,6 +63,13 @@
75424 static int deprecated_sysctl_warning(struct __sysctl_args *args);
75425
75426 #if defined(CONFIG_SYSCTL)
75427 +#include <linux/grsecurity.h>
75428 +#include <linux/grinternal.h>
75429 +
75430 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
75431 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
75432 + const int op);
75433 +extern int gr_handle_chroot_sysctl(const int op);
75434
75435 /* External variables not in a header file. */
75436 extern int C_A_D;
75437 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
75438 static int proc_taint(struct ctl_table *table, int write,
75439 void __user *buffer, size_t *lenp, loff_t *ppos);
75440 #endif
75441 +extern ctl_table grsecurity_table[];
75442
75443 static struct ctl_table root_table[];
75444 static struct ctl_table_root sysctl_table_root;
75445 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
75446 int sysctl_legacy_va_layout;
75447 #endif
75448
75449 +#ifdef CONFIG_PAX_SOFTMODE
75450 +static ctl_table pax_table[] = {
75451 + {
75452 + .ctl_name = CTL_UNNUMBERED,
75453 + .procname = "softmode",
75454 + .data = &pax_softmode,
75455 + .maxlen = sizeof(unsigned int),
75456 + .mode = 0600,
75457 + .proc_handler = &proc_dointvec,
75458 + },
75459 +
75460 + { .ctl_name = 0 }
75461 +};
75462 +#endif
75463 +
75464 extern int prove_locking;
75465 extern int lock_stat;
75466
75467 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
75468 #endif
75469
75470 static struct ctl_table kern_table[] = {
75471 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
75472 + {
75473 + .ctl_name = CTL_UNNUMBERED,
75474 + .procname = "grsecurity",
75475 + .mode = 0500,
75476 + .child = grsecurity_table,
75477 + },
75478 +#endif
75479 +
75480 +#ifdef CONFIG_PAX_SOFTMODE
75481 + {
75482 + .ctl_name = CTL_UNNUMBERED,
75483 + .procname = "pax",
75484 + .mode = 0500,
75485 + .child = pax_table,
75486 + },
75487 +#endif
75488 +
75489 {
75490 .ctl_name = CTL_UNNUMBERED,
75491 .procname = "sched_child_runs_first",
75492 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
75493 .data = &modprobe_path,
75494 .maxlen = KMOD_PATH_LEN,
75495 .mode = 0644,
75496 - .proc_handler = &proc_dostring,
75497 - .strategy = &sysctl_string,
75498 + .proc_handler = &proc_dostring_modpriv,
75499 + .strategy = &sysctl_string_modpriv,
75500 },
75501 {
75502 .ctl_name = CTL_UNNUMBERED,
75503 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
75504 .mode = 0644,
75505 .proc_handler = &proc_dointvec
75506 },
75507 + {
75508 + .procname = "heap_stack_gap",
75509 + .data = &sysctl_heap_stack_gap,
75510 + .maxlen = sizeof(sysctl_heap_stack_gap),
75511 + .mode = 0644,
75512 + .proc_handler = proc_doulongvec_minmax,
75513 + },
75514 #else
75515 {
75516 .ctl_name = CTL_UNNUMBERED,
75517 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
75518 return 0;
75519 }
75520
75521 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
75522 +
75523 static int parse_table(int __user *name, int nlen,
75524 void __user *oldval, size_t __user *oldlenp,
75525 void __user *newval, size_t newlen,
75526 @@ -1821,7 +1871,7 @@ repeat:
75527 if (n == table->ctl_name) {
75528 int error;
75529 if (table->child) {
75530 - if (sysctl_perm(root, table, MAY_EXEC))
75531 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
75532 return -EPERM;
75533 name++;
75534 nlen--;
75535 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
75536 int error;
75537 int mode;
75538
75539 + if (table->parent != NULL && table->parent->procname != NULL &&
75540 + table->procname != NULL &&
75541 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
75542 + return -EACCES;
75543 + if (gr_handle_chroot_sysctl(op))
75544 + return -EACCES;
75545 + error = gr_handle_sysctl(table, op);
75546 + if (error)
75547 + return error;
75548 +
75549 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75550 + if (error)
75551 + return error;
75552 +
75553 + if (root->permissions)
75554 + mode = root->permissions(root, current->nsproxy, table);
75555 + else
75556 + mode = table->mode;
75557 +
75558 + return test_perm(mode, op);
75559 +}
75560 +
75561 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
75562 +{
75563 + int error;
75564 + int mode;
75565 +
75566 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75567 if (error)
75568 return error;
75569 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
75570 buffer, lenp, ppos);
75571 }
75572
75573 +int proc_dostring_modpriv(struct ctl_table *table, int write,
75574 + void __user *buffer, size_t *lenp, loff_t *ppos)
75575 +{
75576 + if (write && !capable(CAP_SYS_MODULE))
75577 + return -EPERM;
75578 +
75579 + return _proc_do_string(table->data, table->maxlen, write,
75580 + buffer, lenp, ppos);
75581 +}
75582 +
75583
75584 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
75585 int *valp,
75586 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
75587 vleft = table->maxlen / sizeof(unsigned long);
75588 left = *lenp;
75589
75590 - for (; left && vleft--; i++, min++, max++, first=0) {
75591 + for (; left && vleft--; i++, first=0) {
75592 if (write) {
75593 while (left) {
75594 char c;
75595 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
75596 return -ENOSYS;
75597 }
75598
75599 +int proc_dostring_modpriv(struct ctl_table *table, int write,
75600 + void __user *buffer, size_t *lenp, loff_t *ppos)
75601 +{
75602 + return -ENOSYS;
75603 +}
75604 +
75605 int proc_dointvec(struct ctl_table *table, int write,
75606 void __user *buffer, size_t *lenp, loff_t *ppos)
75607 {
75608 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
75609 return 1;
75610 }
75611
75612 +int sysctl_string_modpriv(struct ctl_table *table,
75613 + void __user *oldval, size_t __user *oldlenp,
75614 + void __user *newval, size_t newlen)
75615 +{
75616 + if (newval && newlen && !capable(CAP_SYS_MODULE))
75617 + return -EPERM;
75618 +
75619 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
75620 +}
75621 +
75622 /*
75623 * This function makes sure that all of the integers in the vector
75624 * are between the minimum and maximum values given in the arrays
75625 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
75626 return -ENOSYS;
75627 }
75628
75629 +int sysctl_string_modpriv(struct ctl_table *table,
75630 + void __user *oldval, size_t __user *oldlenp,
75631 + void __user *newval, size_t newlen)
75632 +{
75633 + return -ENOSYS;
75634 +}
75635 +
75636 int sysctl_intvec(struct ctl_table *table,
75637 void __user *oldval, size_t __user *oldlenp,
75638 void __user *newval, size_t newlen)
75639 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
75640 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
75641 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
75642 EXPORT_SYMBOL(proc_dostring);
75643 +EXPORT_SYMBOL(proc_dostring_modpriv);
75644 EXPORT_SYMBOL(proc_doulongvec_minmax);
75645 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
75646 EXPORT_SYMBOL(register_sysctl_table);
75647 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
75648 EXPORT_SYMBOL(sysctl_jiffies);
75649 EXPORT_SYMBOL(sysctl_ms_jiffies);
75650 EXPORT_SYMBOL(sysctl_string);
75651 +EXPORT_SYMBOL(sysctl_string_modpriv);
75652 EXPORT_SYMBOL(sysctl_data);
75653 EXPORT_SYMBOL(unregister_sysctl_table);
75654 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
75655 index 469193c..ea3ecb2 100644
75656 --- a/kernel/sysctl_check.c
75657 +++ b/kernel/sysctl_check.c
75658 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
75659 } else {
75660 if ((table->strategy == sysctl_data) ||
75661 (table->strategy == sysctl_string) ||
75662 + (table->strategy == sysctl_string_modpriv) ||
75663 (table->strategy == sysctl_intvec) ||
75664 (table->strategy == sysctl_jiffies) ||
75665 (table->strategy == sysctl_ms_jiffies) ||
75666 (table->proc_handler == proc_dostring) ||
75667 + (table->proc_handler == proc_dostring_modpriv) ||
75668 (table->proc_handler == proc_dointvec) ||
75669 (table->proc_handler == proc_dointvec_minmax) ||
75670 (table->proc_handler == proc_dointvec_jiffies) ||
75671 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
75672 index a4ef542..798bcd7 100644
75673 --- a/kernel/taskstats.c
75674 +++ b/kernel/taskstats.c
75675 @@ -26,9 +26,12 @@
75676 #include <linux/cgroup.h>
75677 #include <linux/fs.h>
75678 #include <linux/file.h>
75679 +#include <linux/grsecurity.h>
75680 #include <net/genetlink.h>
75681 #include <asm/atomic.h>
75682
75683 +extern int gr_is_taskstats_denied(int pid);
75684 +
75685 /*
75686 * Maximum length of a cpumask that can be specified in
75687 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
75688 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
75689 size_t size;
75690 cpumask_var_t mask;
75691
75692 + if (gr_is_taskstats_denied(current->pid))
75693 + return -EACCES;
75694 +
75695 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
75696 return -ENOMEM;
75697
75698 diff --git a/kernel/time.c b/kernel/time.c
75699 index 33df60e..ca768bd 100644
75700 --- a/kernel/time.c
75701 +++ b/kernel/time.c
75702 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
75703 return error;
75704
75705 if (tz) {
75706 + /* we log in do_settimeofday called below, so don't log twice
75707 + */
75708 + if (!tv)
75709 + gr_log_timechange();
75710 +
75711 /* SMP safe, global irq locking makes it work. */
75712 sys_tz = *tz;
75713 update_vsyscall_tz();
75714 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
75715 * Avoid unnecessary multiplications/divisions in the
75716 * two most common HZ cases:
75717 */
75718 -unsigned int inline jiffies_to_msecs(const unsigned long j)
75719 +inline unsigned int jiffies_to_msecs(const unsigned long j)
75720 {
75721 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
75722 return (MSEC_PER_SEC / HZ) * j;
75723 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
75724 }
75725 EXPORT_SYMBOL(jiffies_to_msecs);
75726
75727 -unsigned int inline jiffies_to_usecs(const unsigned long j)
75728 +inline unsigned int jiffies_to_usecs(const unsigned long j)
75729 {
75730 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
75731 return (USEC_PER_SEC / HZ) * j;
75732 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
75733 index 57b953f..06f149f 100644
75734 --- a/kernel/time/tick-broadcast.c
75735 +++ b/kernel/time/tick-broadcast.c
75736 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
75737 * then clear the broadcast bit.
75738 */
75739 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
75740 - int cpu = smp_processor_id();
75741 + cpu = smp_processor_id();
75742
75743 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
75744 tick_broadcast_clear_oneshot(cpu);
75745 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
75746 index 4a71cff..ffb5548 100644
75747 --- a/kernel/time/timekeeping.c
75748 +++ b/kernel/time/timekeeping.c
75749 @@ -14,6 +14,7 @@
75750 #include <linux/init.h>
75751 #include <linux/mm.h>
75752 #include <linux/sched.h>
75753 +#include <linux/grsecurity.h>
75754 #include <linux/sysdev.h>
75755 #include <linux/clocksource.h>
75756 #include <linux/jiffies.h>
75757 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
75758 */
75759 struct timespec ts = xtime;
75760 timespec_add_ns(&ts, nsec);
75761 - ACCESS_ONCE(xtime_cache) = ts;
75762 + ACCESS_ONCE_RW(xtime_cache) = ts;
75763 }
75764
75765 /* must hold xtime_lock */
75766 @@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
75767 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
75768 return -EINVAL;
75769
75770 + gr_log_timechange();
75771 +
75772 write_seqlock_irqsave(&xtime_lock, flags);
75773
75774 timekeeping_forward_now();
75775 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
75776 index 54c0dda..e9095d9 100644
75777 --- a/kernel/time/timer_list.c
75778 +++ b/kernel/time/timer_list.c
75779 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
75780
75781 static void print_name_offset(struct seq_file *m, void *sym)
75782 {
75783 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75784 + SEQ_printf(m, "<%p>", NULL);
75785 +#else
75786 char symname[KSYM_NAME_LEN];
75787
75788 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
75789 SEQ_printf(m, "<%p>", sym);
75790 else
75791 SEQ_printf(m, "%s", symname);
75792 +#endif
75793 }
75794
75795 static void
75796 @@ -112,7 +116,11 @@ next_one:
75797 static void
75798 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
75799 {
75800 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75801 + SEQ_printf(m, " .base: %p\n", NULL);
75802 +#else
75803 SEQ_printf(m, " .base: %p\n", base);
75804 +#endif
75805 SEQ_printf(m, " .index: %d\n",
75806 base->index);
75807 SEQ_printf(m, " .resolution: %Lu nsecs\n",
75808 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
75809 {
75810 struct proc_dir_entry *pe;
75811
75812 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75813 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
75814 +#else
75815 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
75816 +#endif
75817 if (!pe)
75818 return -ENOMEM;
75819 return 0;
75820 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
75821 index ee5681f..634089b 100644
75822 --- a/kernel/time/timer_stats.c
75823 +++ b/kernel/time/timer_stats.c
75824 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
75825 static unsigned long nr_entries;
75826 static struct entry entries[MAX_ENTRIES];
75827
75828 -static atomic_t overflow_count;
75829 +static atomic_unchecked_t overflow_count;
75830
75831 /*
75832 * The entries are in a hash-table, for fast lookup:
75833 @@ -140,7 +140,7 @@ static void reset_entries(void)
75834 nr_entries = 0;
75835 memset(entries, 0, sizeof(entries));
75836 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
75837 - atomic_set(&overflow_count, 0);
75838 + atomic_set_unchecked(&overflow_count, 0);
75839 }
75840
75841 static struct entry *alloc_entry(void)
75842 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75843 if (likely(entry))
75844 entry->count++;
75845 else
75846 - atomic_inc(&overflow_count);
75847 + atomic_inc_unchecked(&overflow_count);
75848
75849 out_unlock:
75850 spin_unlock_irqrestore(lock, flags);
75851 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75852
75853 static void print_name_offset(struct seq_file *m, unsigned long addr)
75854 {
75855 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75856 + seq_printf(m, "<%p>", NULL);
75857 +#else
75858 char symname[KSYM_NAME_LEN];
75859
75860 if (lookup_symbol_name(addr, symname) < 0)
75861 seq_printf(m, "<%p>", (void *)addr);
75862 else
75863 seq_printf(m, "%s", symname);
75864 +#endif
75865 }
75866
75867 static int tstats_show(struct seq_file *m, void *v)
75868 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
75869
75870 seq_puts(m, "Timer Stats Version: v0.2\n");
75871 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
75872 - if (atomic_read(&overflow_count))
75873 + if (atomic_read_unchecked(&overflow_count))
75874 seq_printf(m, "Overflow: %d entries\n",
75875 - atomic_read(&overflow_count));
75876 + atomic_read_unchecked(&overflow_count));
75877
75878 for (i = 0; i < nr_entries; i++) {
75879 entry = entries + i;
75880 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
75881 {
75882 struct proc_dir_entry *pe;
75883
75884 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75885 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
75886 +#else
75887 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
75888 +#endif
75889 if (!pe)
75890 return -ENOMEM;
75891 return 0;
75892 diff --git a/kernel/timer.c b/kernel/timer.c
75893 index cb3c1f1..8bf5526 100644
75894 --- a/kernel/timer.c
75895 +++ b/kernel/timer.c
75896 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
75897 /*
75898 * This function runs timers and the timer-tq in bottom half context.
75899 */
75900 -static void run_timer_softirq(struct softirq_action *h)
75901 +static void run_timer_softirq(void)
75902 {
75903 struct tvec_base *base = __get_cpu_var(tvec_bases);
75904
75905 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
75906 index d9d6206..f19467e 100644
75907 --- a/kernel/trace/blktrace.c
75908 +++ b/kernel/trace/blktrace.c
75909 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
75910 struct blk_trace *bt = filp->private_data;
75911 char buf[16];
75912
75913 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
75914 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
75915
75916 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
75917 }
75918 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
75919 return 1;
75920
75921 bt = buf->chan->private_data;
75922 - atomic_inc(&bt->dropped);
75923 + atomic_inc_unchecked(&bt->dropped);
75924 return 0;
75925 }
75926
75927 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
75928
75929 bt->dir = dir;
75930 bt->dev = dev;
75931 - atomic_set(&bt->dropped, 0);
75932 + atomic_set_unchecked(&bt->dropped, 0);
75933
75934 ret = -EIO;
75935 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
75936 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
75937 index 4872937..c794d40 100644
75938 --- a/kernel/trace/ftrace.c
75939 +++ b/kernel/trace/ftrace.c
75940 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
75941
75942 ip = rec->ip;
75943
75944 + ret = ftrace_arch_code_modify_prepare();
75945 + FTRACE_WARN_ON(ret);
75946 + if (ret)
75947 + return 0;
75948 +
75949 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
75950 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
75951 if (ret) {
75952 ftrace_bug(ret, ip);
75953 rec->flags |= FTRACE_FL_FAILED;
75954 - return 0;
75955 }
75956 - return 1;
75957 + return ret ? 0 : 1;
75958 }
75959
75960 /*
75961 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
75962 index e749a05..19c6e94 100644
75963 --- a/kernel/trace/ring_buffer.c
75964 +++ b/kernel/trace/ring_buffer.c
75965 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
75966 * the reader page). But if the next page is a header page,
75967 * its flags will be non zero.
75968 */
75969 -static int inline
75970 +static inline int
75971 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
75972 struct buffer_page *page, struct list_head *list)
75973 {
75974 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
75975 index a2a2d1f..7f32b09 100644
75976 --- a/kernel/trace/trace.c
75977 +++ b/kernel/trace/trace.c
75978 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
75979 size_t rem;
75980 unsigned int i;
75981
75982 + pax_track_stack();
75983 +
75984 /* copy the tracer to avoid using a global lock all around */
75985 mutex_lock(&trace_types_lock);
75986 if (unlikely(old_tracer != current_trace && current_trace)) {
75987 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
75988 int entries, size, i;
75989 size_t ret;
75990
75991 + pax_track_stack();
75992 +
75993 if (*ppos & (PAGE_SIZE - 1)) {
75994 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
75995 return -EINVAL;
75996 @@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
75997 };
75998 #endif
75999
76000 -static struct dentry *d_tracer;
76001 -
76002 struct dentry *tracing_init_dentry(void)
76003 {
76004 + static struct dentry *d_tracer;
76005 static int once;
76006
76007 if (d_tracer)
76008 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
76009 return d_tracer;
76010 }
76011
76012 -static struct dentry *d_percpu;
76013 -
76014 struct dentry *tracing_dentry_percpu(void)
76015 {
76016 + static struct dentry *d_percpu;
76017 static int once;
76018 struct dentry *d_tracer;
76019
76020 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
76021 index d128f65..f37b4af 100644
76022 --- a/kernel/trace/trace_events.c
76023 +++ b/kernel/trace/trace_events.c
76024 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
76025 * Modules must own their file_operations to keep up with
76026 * reference counting.
76027 */
76028 +
76029 struct ftrace_module_file_ops {
76030 struct list_head list;
76031 struct module *mod;
76032 - struct file_operations id;
76033 - struct file_operations enable;
76034 - struct file_operations format;
76035 - struct file_operations filter;
76036 };
76037
76038 static void remove_subsystem_dir(const char *name)
76039 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
76040
76041 file_ops->mod = mod;
76042
76043 - file_ops->id = ftrace_event_id_fops;
76044 - file_ops->id.owner = mod;
76045 -
76046 - file_ops->enable = ftrace_enable_fops;
76047 - file_ops->enable.owner = mod;
76048 -
76049 - file_ops->filter = ftrace_event_filter_fops;
76050 - file_ops->filter.owner = mod;
76051 -
76052 - file_ops->format = ftrace_event_format_fops;
76053 - file_ops->format.owner = mod;
76054 + pax_open_kernel();
76055 + *(void **)&mod->trace_id.owner = mod;
76056 + *(void **)&mod->trace_enable.owner = mod;
76057 + *(void **)&mod->trace_filter.owner = mod;
76058 + *(void **)&mod->trace_format.owner = mod;
76059 + pax_close_kernel();
76060
76061 list_add(&file_ops->list, &ftrace_module_file_list);
76062
76063 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
76064 call->mod = mod;
76065 list_add(&call->list, &ftrace_events);
76066 event_create_dir(call, d_events,
76067 - &file_ops->id, &file_ops->enable,
76068 - &file_ops->filter, &file_ops->format);
76069 + &mod->trace_id, &mod->trace_enable,
76070 + &mod->trace_filter, &mod->trace_format);
76071 }
76072 }
76073
76074 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
76075 index 0acd834..b800b56 100644
76076 --- a/kernel/trace/trace_mmiotrace.c
76077 +++ b/kernel/trace/trace_mmiotrace.c
76078 @@ -23,7 +23,7 @@ struct header_iter {
76079 static struct trace_array *mmio_trace_array;
76080 static bool overrun_detected;
76081 static unsigned long prev_overruns;
76082 -static atomic_t dropped_count;
76083 +static atomic_unchecked_t dropped_count;
76084
76085 static void mmio_reset_data(struct trace_array *tr)
76086 {
76087 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
76088
76089 static unsigned long count_overruns(struct trace_iterator *iter)
76090 {
76091 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
76092 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
76093 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
76094
76095 if (over > prev_overruns)
76096 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
76097 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
76098 sizeof(*entry), 0, pc);
76099 if (!event) {
76100 - atomic_inc(&dropped_count);
76101 + atomic_inc_unchecked(&dropped_count);
76102 return;
76103 }
76104 entry = ring_buffer_event_data(event);
76105 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
76106 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
76107 sizeof(*entry), 0, pc);
76108 if (!event) {
76109 - atomic_inc(&dropped_count);
76110 + atomic_inc_unchecked(&dropped_count);
76111 return;
76112 }
76113 entry = ring_buffer_event_data(event);
76114 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
76115 index b6c12c6..41fdc53 100644
76116 --- a/kernel/trace/trace_output.c
76117 +++ b/kernel/trace/trace_output.c
76118 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
76119 return 0;
76120 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
76121 if (!IS_ERR(p)) {
76122 - p = mangle_path(s->buffer + s->len, p, "\n");
76123 + p = mangle_path(s->buffer + s->len, p, "\n\\");
76124 if (p) {
76125 s->len = p - s->buffer;
76126 return 1;
76127 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
76128 index 8504ac7..ecf0adb 100644
76129 --- a/kernel/trace/trace_stack.c
76130 +++ b/kernel/trace/trace_stack.c
76131 @@ -50,7 +50,7 @@ static inline void check_stack(void)
76132 return;
76133
76134 /* we do not handle interrupt stacks yet */
76135 - if (!object_is_on_stack(&this_size))
76136 + if (!object_starts_on_stack(&this_size))
76137 return;
76138
76139 local_irq_save(flags);
76140 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
76141 index 40cafb0..d5ead43 100644
76142 --- a/kernel/trace/trace_workqueue.c
76143 +++ b/kernel/trace/trace_workqueue.c
76144 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
76145 int cpu;
76146 pid_t pid;
76147 /* Can be inserted from interrupt or user context, need to be atomic */
76148 - atomic_t inserted;
76149 + atomic_unchecked_t inserted;
76150 /*
76151 * Don't need to be atomic, works are serialized in a single workqueue thread
76152 * on a single CPU.
76153 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
76154 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76155 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
76156 if (node->pid == wq_thread->pid) {
76157 - atomic_inc(&node->inserted);
76158 + atomic_inc_unchecked(&node->inserted);
76159 goto found;
76160 }
76161 }
76162 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
76163 tsk = get_pid_task(pid, PIDTYPE_PID);
76164 if (tsk) {
76165 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
76166 - atomic_read(&cws->inserted), cws->executed,
76167 + atomic_read_unchecked(&cws->inserted), cws->executed,
76168 tsk->comm);
76169 put_task_struct(tsk);
76170 }
76171 diff --git a/kernel/user.c b/kernel/user.c
76172 index 1b91701..8795237 100644
76173 --- a/kernel/user.c
76174 +++ b/kernel/user.c
76175 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
76176 spin_lock_irq(&uidhash_lock);
76177 up = uid_hash_find(uid, hashent);
76178 if (up) {
76179 + put_user_ns(ns);
76180 key_put(new->uid_keyring);
76181 key_put(new->session_keyring);
76182 kmem_cache_free(uid_cachep, new);
76183 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
76184 index 234ceb1..ad74049 100644
76185 --- a/lib/Kconfig.debug
76186 +++ b/lib/Kconfig.debug
76187 @@ -905,7 +905,7 @@ config LATENCYTOP
76188 select STACKTRACE
76189 select SCHEDSTATS
76190 select SCHED_DEBUG
76191 - depends on HAVE_LATENCYTOP_SUPPORT
76192 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
76193 help
76194 Enable this option if you want to use the LatencyTOP tool
76195 to find out which userspace is blocking on what kernel operations.
76196 diff --git a/lib/bitmap.c b/lib/bitmap.c
76197 index 7025658..8d14cab 100644
76198 --- a/lib/bitmap.c
76199 +++ b/lib/bitmap.c
76200 @@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
76201 {
76202 int c, old_c, totaldigits, ndigits, nchunks, nbits;
76203 u32 chunk;
76204 - const char __user *ubuf = buf;
76205 + const char __user *ubuf = (const char __force_user *)buf;
76206
76207 bitmap_zero(maskp, nmaskbits);
76208
76209 @@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
76210 {
76211 if (!access_ok(VERIFY_READ, ubuf, ulen))
76212 return -EFAULT;
76213 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
76214 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
76215 }
76216 EXPORT_SYMBOL(bitmap_parse_user);
76217
76218 diff --git a/lib/bug.c b/lib/bug.c
76219 index 300e41a..2779eb0 100644
76220 --- a/lib/bug.c
76221 +++ b/lib/bug.c
76222 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
76223 return BUG_TRAP_TYPE_NONE;
76224
76225 bug = find_bug(bugaddr);
76226 + if (!bug)
76227 + return BUG_TRAP_TYPE_NONE;
76228
76229 printk(KERN_EMERG "------------[ cut here ]------------\n");
76230
76231 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
76232 index 2b413db..e21d207 100644
76233 --- a/lib/debugobjects.c
76234 +++ b/lib/debugobjects.c
76235 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
76236 if (limit > 4)
76237 return;
76238
76239 - is_on_stack = object_is_on_stack(addr);
76240 + is_on_stack = object_starts_on_stack(addr);
76241 if (is_on_stack == onstack)
76242 return;
76243
76244 diff --git a/lib/devres.c b/lib/devres.c
76245 index 72c8909..7543868 100644
76246 --- a/lib/devres.c
76247 +++ b/lib/devres.c
76248 @@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
76249 {
76250 iounmap(addr);
76251 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
76252 - (void *)addr));
76253 + (void __force *)addr));
76254 }
76255 EXPORT_SYMBOL(devm_iounmap);
76256
76257 @@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
76258 {
76259 ioport_unmap(addr);
76260 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
76261 - devm_ioport_map_match, (void *)addr));
76262 + devm_ioport_map_match, (void __force *)addr));
76263 }
76264 EXPORT_SYMBOL(devm_ioport_unmap);
76265
76266 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
76267 index 084e879..0674448 100644
76268 --- a/lib/dma-debug.c
76269 +++ b/lib/dma-debug.c
76270 @@ -861,7 +861,7 @@ out:
76271
76272 static void check_for_stack(struct device *dev, void *addr)
76273 {
76274 - if (object_is_on_stack(addr))
76275 + if (object_starts_on_stack(addr))
76276 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
76277 "stack [addr=%p]\n", addr);
76278 }
76279 diff --git a/lib/idr.c b/lib/idr.c
76280 index eda7ba3..915dfae 100644
76281 --- a/lib/idr.c
76282 +++ b/lib/idr.c
76283 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
76284 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
76285
76286 /* if already at the top layer, we need to grow */
76287 - if (id >= 1 << (idp->layers * IDR_BITS)) {
76288 + if (id >= (1 << (idp->layers * IDR_BITS))) {
76289 *starting_id = id;
76290 return IDR_NEED_TO_GROW;
76291 }
76292 diff --git a/lib/inflate.c b/lib/inflate.c
76293 index d102559..4215f31 100644
76294 --- a/lib/inflate.c
76295 +++ b/lib/inflate.c
76296 @@ -266,7 +266,7 @@ static void free(void *where)
76297 malloc_ptr = free_mem_ptr;
76298 }
76299 #else
76300 -#define malloc(a) kmalloc(a, GFP_KERNEL)
76301 +#define malloc(a) kmalloc((a), GFP_KERNEL)
76302 #define free(a) kfree(a)
76303 #endif
76304
76305 diff --git a/lib/kobject.c b/lib/kobject.c
76306 index b512b74..8115eb1 100644
76307 --- a/lib/kobject.c
76308 +++ b/lib/kobject.c
76309 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
76310 return ret;
76311 }
76312
76313 -struct sysfs_ops kobj_sysfs_ops = {
76314 +const struct sysfs_ops kobj_sysfs_ops = {
76315 .show = kobj_attr_show,
76316 .store = kobj_attr_store,
76317 };
76318 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
76319 * If the kset was not able to be created, NULL will be returned.
76320 */
76321 static struct kset *kset_create(const char *name,
76322 - struct kset_uevent_ops *uevent_ops,
76323 + const struct kset_uevent_ops *uevent_ops,
76324 struct kobject *parent_kobj)
76325 {
76326 struct kset *kset;
76327 @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
76328 * If the kset was not able to be created, NULL will be returned.
76329 */
76330 struct kset *kset_create_and_add(const char *name,
76331 - struct kset_uevent_ops *uevent_ops,
76332 + const struct kset_uevent_ops *uevent_ops,
76333 struct kobject *parent_kobj)
76334 {
76335 struct kset *kset;
76336 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
76337 index 507b821..0bf8ed0 100644
76338 --- a/lib/kobject_uevent.c
76339 +++ b/lib/kobject_uevent.c
76340 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
76341 const char *subsystem;
76342 struct kobject *top_kobj;
76343 struct kset *kset;
76344 - struct kset_uevent_ops *uevent_ops;
76345 + const struct kset_uevent_ops *uevent_ops;
76346 u64 seq;
76347 int i = 0;
76348 int retval = 0;
76349 diff --git a/lib/kref.c b/lib/kref.c
76350 index 9ecd6e8..12c94c1 100644
76351 --- a/lib/kref.c
76352 +++ b/lib/kref.c
76353 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
76354 */
76355 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
76356 {
76357 - WARN_ON(release == NULL);
76358 + BUG_ON(release == NULL);
76359 WARN_ON(release == (void (*)(struct kref *))kfree);
76360
76361 if (atomic_dec_and_test(&kref->refcount)) {
76362 diff --git a/lib/parser.c b/lib/parser.c
76363 index b00d020..1b34325 100644
76364 --- a/lib/parser.c
76365 +++ b/lib/parser.c
76366 @@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
76367 char *buf;
76368 int ret;
76369
76370 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
76371 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
76372 if (!buf)
76373 return -ENOMEM;
76374 memcpy(buf, s->from, s->to - s->from);
76375 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
76376 index 92cdd99..a8149d7 100644
76377 --- a/lib/radix-tree.c
76378 +++ b/lib/radix-tree.c
76379 @@ -81,7 +81,7 @@ struct radix_tree_preload {
76380 int nr;
76381 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
76382 };
76383 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
76384 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
76385
76386 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
76387 {
76388 diff --git a/lib/random32.c b/lib/random32.c
76389 index 217d5c4..45aba8a 100644
76390 --- a/lib/random32.c
76391 +++ b/lib/random32.c
76392 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
76393 */
76394 static inline u32 __seed(u32 x, u32 m)
76395 {
76396 - return (x < m) ? x + m : x;
76397 + return (x <= m) ? x + m + 1 : x;
76398 }
76399
76400 /**
76401 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
76402 index 33bed5e..1477e46 100644
76403 --- a/lib/vsprintf.c
76404 +++ b/lib/vsprintf.c
76405 @@ -16,6 +16,9 @@
76406 * - scnprintf and vscnprintf
76407 */
76408
76409 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76410 +#define __INCLUDED_BY_HIDESYM 1
76411 +#endif
76412 #include <stdarg.h>
76413 #include <linux/module.h>
76414 #include <linux/types.h>
76415 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
76416 return buf;
76417 }
76418
76419 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
76420 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
76421 {
76422 int len, i;
76423
76424 if ((unsigned long)s < PAGE_SIZE)
76425 - s = "<NULL>";
76426 + s = "(null)";
76427
76428 len = strnlen(s, spec.precision);
76429
76430 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
76431 unsigned long value = (unsigned long) ptr;
76432 #ifdef CONFIG_KALLSYMS
76433 char sym[KSYM_SYMBOL_LEN];
76434 - if (ext != 'f' && ext != 's')
76435 + if (ext != 'f' && ext != 's' && ext != 'a')
76436 sprint_symbol(sym, value);
76437 else
76438 kallsyms_lookup(value, NULL, NULL, NULL, sym);
76439 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
76440 * - 'f' For simple symbolic function names without offset
76441 * - 'S' For symbolic direct pointers with offset
76442 * - 's' For symbolic direct pointers without offset
76443 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
76444 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
76445 * - 'R' For a struct resource pointer, it prints the range of
76446 * addresses (not the name nor the flags)
76447 * - 'M' For a 6-byte MAC address, it prints the address in the
76448 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76449 struct printf_spec spec)
76450 {
76451 if (!ptr)
76452 - return string(buf, end, "(null)", spec);
76453 + return string(buf, end, "(nil)", spec);
76454
76455 switch (*fmt) {
76456 case 'F':
76457 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76458 case 's':
76459 /* Fallthrough */
76460 case 'S':
76461 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76462 + break;
76463 +#else
76464 + return symbol_string(buf, end, ptr, spec, *fmt);
76465 +#endif
76466 + case 'a':
76467 + /* Fallthrough */
76468 + case 'A':
76469 return symbol_string(buf, end, ptr, spec, *fmt);
76470 case 'R':
76471 return resource_string(buf, end, ptr, spec);
76472 @@ -1445,7 +1458,7 @@ do { \
76473 size_t len;
76474 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
76475 || (unsigned long)save_str < PAGE_SIZE)
76476 - save_str = "<NULL>";
76477 + save_str = "(null)";
76478 len = strlen(save_str);
76479 if (str + len + 1 < end)
76480 memcpy(str, save_str, len + 1);
76481 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76482 typeof(type) value; \
76483 if (sizeof(type) == 8) { \
76484 args = PTR_ALIGN(args, sizeof(u32)); \
76485 - *(u32 *)&value = *(u32 *)args; \
76486 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
76487 + *(u32 *)&value = *(const u32 *)args; \
76488 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
76489 } else { \
76490 args = PTR_ALIGN(args, sizeof(type)); \
76491 - value = *(typeof(type) *)args; \
76492 + value = *(const typeof(type) *)args; \
76493 } \
76494 args += sizeof(type); \
76495 value; \
76496 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76497 const char *str_arg = args;
76498 size_t len = strlen(str_arg);
76499 args += len + 1;
76500 - str = string(str, end, (char *)str_arg, spec);
76501 + str = string(str, end, str_arg, spec);
76502 break;
76503 }
76504
76505 diff --git a/localversion-grsec b/localversion-grsec
76506 new file mode 100644
76507 index 0000000..7cd6065
76508 --- /dev/null
76509 +++ b/localversion-grsec
76510 @@ -0,0 +1 @@
76511 +-grsec
76512 diff --git a/mm/Kconfig b/mm/Kconfig
76513 index 2c19c0b..f3c3f83 100644
76514 --- a/mm/Kconfig
76515 +++ b/mm/Kconfig
76516 @@ -228,7 +228,7 @@ config KSM
76517 config DEFAULT_MMAP_MIN_ADDR
76518 int "Low address space to protect from user allocation"
76519 depends on MMU
76520 - default 4096
76521 + default 65536
76522 help
76523 This is the portion of low virtual memory which should be protected
76524 from userspace allocation. Keeping a user from writing to low pages
76525 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
76526 index 67a33a5..094dcf1 100644
76527 --- a/mm/backing-dev.c
76528 +++ b/mm/backing-dev.c
76529 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
76530 list_add_tail_rcu(&wb->list, &bdi->wb_list);
76531 spin_unlock(&bdi->wb_lock);
76532
76533 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
76534 + tsk->flags |= PF_SWAPWRITE;
76535 set_freezable();
76536
76537 /*
76538 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
76539 * Add the default flusher task that gets created for any bdi
76540 * that has dirty data pending writeout
76541 */
76542 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76543 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76544 {
76545 if (!bdi_cap_writeback_dirty(bdi))
76546 return;
76547 diff --git a/mm/filemap.c b/mm/filemap.c
76548 index a1fe378..e26702f 100644
76549 --- a/mm/filemap.c
76550 +++ b/mm/filemap.c
76551 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
76552 struct address_space *mapping = file->f_mapping;
76553
76554 if (!mapping->a_ops->readpage)
76555 - return -ENOEXEC;
76556 + return -ENODEV;
76557 file_accessed(file);
76558 vma->vm_ops = &generic_file_vm_ops;
76559 vma->vm_flags |= VM_CAN_NONLINEAR;
76560 @@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
76561 *pos = i_size_read(inode);
76562
76563 if (limit != RLIM_INFINITY) {
76564 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
76565 if (*pos >= limit) {
76566 send_sig(SIGXFSZ, current, 0);
76567 return -EFBIG;
76568 diff --git a/mm/fremap.c b/mm/fremap.c
76569 index b6ec85a..a24ac22 100644
76570 --- a/mm/fremap.c
76571 +++ b/mm/fremap.c
76572 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76573 retry:
76574 vma = find_vma(mm, start);
76575
76576 +#ifdef CONFIG_PAX_SEGMEXEC
76577 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
76578 + goto out;
76579 +#endif
76580 +
76581 /*
76582 * Make sure the vma is shared, that it supports prefaulting,
76583 * and that the remapped range is valid and fully within
76584 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76585 /*
76586 * drop PG_Mlocked flag for over-mapped range
76587 */
76588 - unsigned int saved_flags = vma->vm_flags;
76589 + unsigned long saved_flags = vma->vm_flags;
76590 munlock_vma_pages_range(vma, start, start + size);
76591 vma->vm_flags = saved_flags;
76592 }
76593 diff --git a/mm/highmem.c b/mm/highmem.c
76594 index 9c1e627..5ca9447 100644
76595 --- a/mm/highmem.c
76596 +++ b/mm/highmem.c
76597 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
76598 * So no dangers, even with speculative execution.
76599 */
76600 page = pte_page(pkmap_page_table[i]);
76601 + pax_open_kernel();
76602 pte_clear(&init_mm, (unsigned long)page_address(page),
76603 &pkmap_page_table[i]);
76604 -
76605 + pax_close_kernel();
76606 set_page_address(page, NULL);
76607 need_flush = 1;
76608 }
76609 @@ -177,9 +178,11 @@ start:
76610 }
76611 }
76612 vaddr = PKMAP_ADDR(last_pkmap_nr);
76613 +
76614 + pax_open_kernel();
76615 set_pte_at(&init_mm, vaddr,
76616 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
76617 -
76618 + pax_close_kernel();
76619 pkmap_count[last_pkmap_nr] = 1;
76620 set_page_address(page, (void *)vaddr);
76621
76622 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
76623 index 5e1e508..ac70275 100644
76624 --- a/mm/hugetlb.c
76625 +++ b/mm/hugetlb.c
76626 @@ -869,6 +869,7 @@ free:
76627 list_del(&page->lru);
76628 enqueue_huge_page(h, page);
76629 }
76630 + spin_unlock(&hugetlb_lock);
76631
76632 /* Free unnecessary surplus pages to the buddy allocator */
76633 if (!list_empty(&surplus_list)) {
76634 @@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
76635 return 1;
76636 }
76637
76638 +#ifdef CONFIG_PAX_SEGMEXEC
76639 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
76640 +{
76641 + struct mm_struct *mm = vma->vm_mm;
76642 + struct vm_area_struct *vma_m;
76643 + unsigned long address_m;
76644 + pte_t *ptep_m;
76645 +
76646 + vma_m = pax_find_mirror_vma(vma);
76647 + if (!vma_m)
76648 + return;
76649 +
76650 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76651 + address_m = address + SEGMEXEC_TASK_SIZE;
76652 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
76653 + get_page(page_m);
76654 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
76655 +}
76656 +#endif
76657 +
76658 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
76659 unsigned long address, pte_t *ptep, pte_t pte,
76660 struct page *pagecache_page)
76661 @@ -2004,6 +2025,11 @@ retry_avoidcopy:
76662 huge_ptep_clear_flush(vma, address, ptep);
76663 set_huge_pte_at(mm, address, ptep,
76664 make_huge_pte(vma, new_page, 1));
76665 +
76666 +#ifdef CONFIG_PAX_SEGMEXEC
76667 + pax_mirror_huge_pte(vma, address, new_page);
76668 +#endif
76669 +
76670 /* Make the old page be freed below */
76671 new_page = old_page;
76672 }
76673 @@ -2135,6 +2161,10 @@ retry:
76674 && (vma->vm_flags & VM_SHARED)));
76675 set_huge_pte_at(mm, address, ptep, new_pte);
76676
76677 +#ifdef CONFIG_PAX_SEGMEXEC
76678 + pax_mirror_huge_pte(vma, address, page);
76679 +#endif
76680 +
76681 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
76682 /* Optimization, do the COW without a second fault */
76683 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
76684 @@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76685 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
76686 struct hstate *h = hstate_vma(vma);
76687
76688 +#ifdef CONFIG_PAX_SEGMEXEC
76689 + struct vm_area_struct *vma_m;
76690 +
76691 + vma_m = pax_find_mirror_vma(vma);
76692 + if (vma_m) {
76693 + unsigned long address_m;
76694 +
76695 + if (vma->vm_start > vma_m->vm_start) {
76696 + address_m = address;
76697 + address -= SEGMEXEC_TASK_SIZE;
76698 + vma = vma_m;
76699 + h = hstate_vma(vma);
76700 + } else
76701 + address_m = address + SEGMEXEC_TASK_SIZE;
76702 +
76703 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
76704 + return VM_FAULT_OOM;
76705 + address_m &= HPAGE_MASK;
76706 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
76707 + }
76708 +#endif
76709 +
76710 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
76711 if (!ptep)
76712 return VM_FAULT_OOM;
76713 diff --git a/mm/internal.h b/mm/internal.h
76714 index f03e8e2..7354343 100644
76715 --- a/mm/internal.h
76716 +++ b/mm/internal.h
76717 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
76718 * in mm/page_alloc.c
76719 */
76720 extern void __free_pages_bootmem(struct page *page, unsigned int order);
76721 +extern void free_compound_page(struct page *page);
76722 extern void prep_compound_page(struct page *page, unsigned long order);
76723
76724
76725 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
76726 index c346660..b47382f 100644
76727 --- a/mm/kmemleak.c
76728 +++ b/mm/kmemleak.c
76729 @@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
76730
76731 for (i = 0; i < object->trace_len; i++) {
76732 void *ptr = (void *)object->trace[i];
76733 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
76734 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
76735 }
76736 }
76737
76738 diff --git a/mm/maccess.c b/mm/maccess.c
76739 index 9073695..1127f348 100644
76740 --- a/mm/maccess.c
76741 +++ b/mm/maccess.c
76742 @@ -14,7 +14,7 @@
76743 * Safely read from address @src to the buffer at @dst. If a kernel fault
76744 * happens, handle that and return -EFAULT.
76745 */
76746 -long probe_kernel_read(void *dst, void *src, size_t size)
76747 +long probe_kernel_read(void *dst, const void *src, size_t size)
76748 {
76749 long ret;
76750 mm_segment_t old_fs = get_fs();
76751 @@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
76752 set_fs(KERNEL_DS);
76753 pagefault_disable();
76754 ret = __copy_from_user_inatomic(dst,
76755 - (__force const void __user *)src, size);
76756 + (const void __force_user *)src, size);
76757 pagefault_enable();
76758 set_fs(old_fs);
76759
76760 @@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
76761 * Safely write to address @dst from the buffer at @src. If a kernel fault
76762 * happens, handle that and return -EFAULT.
76763 */
76764 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
76765 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
76766 {
76767 long ret;
76768 mm_segment_t old_fs = get_fs();
76769
76770 set_fs(KERNEL_DS);
76771 pagefault_disable();
76772 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
76773 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
76774 pagefault_enable();
76775 set_fs(old_fs);
76776
76777 diff --git a/mm/madvise.c b/mm/madvise.c
76778 index 35b1479..499f7d4 100644
76779 --- a/mm/madvise.c
76780 +++ b/mm/madvise.c
76781 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
76782 pgoff_t pgoff;
76783 unsigned long new_flags = vma->vm_flags;
76784
76785 +#ifdef CONFIG_PAX_SEGMEXEC
76786 + struct vm_area_struct *vma_m;
76787 +#endif
76788 +
76789 switch (behavior) {
76790 case MADV_NORMAL:
76791 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
76792 @@ -103,6 +107,13 @@ success:
76793 /*
76794 * vm_flags is protected by the mmap_sem held in write mode.
76795 */
76796 +
76797 +#ifdef CONFIG_PAX_SEGMEXEC
76798 + vma_m = pax_find_mirror_vma(vma);
76799 + if (vma_m)
76800 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
76801 +#endif
76802 +
76803 vma->vm_flags = new_flags;
76804
76805 out:
76806 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
76807 struct vm_area_struct ** prev,
76808 unsigned long start, unsigned long end)
76809 {
76810 +
76811 +#ifdef CONFIG_PAX_SEGMEXEC
76812 + struct vm_area_struct *vma_m;
76813 +#endif
76814 +
76815 *prev = vma;
76816 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
76817 return -EINVAL;
76818 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
76819 zap_page_range(vma, start, end - start, &details);
76820 } else
76821 zap_page_range(vma, start, end - start, NULL);
76822 +
76823 +#ifdef CONFIG_PAX_SEGMEXEC
76824 + vma_m = pax_find_mirror_vma(vma);
76825 + if (vma_m) {
76826 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
76827 + struct zap_details details = {
76828 + .nonlinear_vma = vma_m,
76829 + .last_index = ULONG_MAX,
76830 + };
76831 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
76832 + } else
76833 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
76834 + }
76835 +#endif
76836 +
76837 return 0;
76838 }
76839
76840 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
76841 if (end < start)
76842 goto out;
76843
76844 +#ifdef CONFIG_PAX_SEGMEXEC
76845 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
76846 + if (end > SEGMEXEC_TASK_SIZE)
76847 + goto out;
76848 + } else
76849 +#endif
76850 +
76851 + if (end > TASK_SIZE)
76852 + goto out;
76853 +
76854 error = 0;
76855 if (end == start)
76856 goto out;
76857 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
76858 index 8aeba53..b4a4198 100644
76859 --- a/mm/memory-failure.c
76860 +++ b/mm/memory-failure.c
76861 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
76862
76863 int sysctl_memory_failure_recovery __read_mostly = 1;
76864
76865 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76866 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76867
76868 /*
76869 * Send all the processes who have the page mapped an ``action optional''
76870 @@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
76871 si.si_signo = SIGBUS;
76872 si.si_errno = 0;
76873 si.si_code = BUS_MCEERR_AO;
76874 - si.si_addr = (void *)addr;
76875 + si.si_addr = (void __user *)addr;
76876 #ifdef __ARCH_SI_TRAPNO
76877 si.si_trapno = trapno;
76878 #endif
76879 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
76880 return 0;
76881 }
76882
76883 - atomic_long_add(1, &mce_bad_pages);
76884 + atomic_long_add_unchecked(1, &mce_bad_pages);
76885
76886 /*
76887 * We need/can do nothing about count=0 pages.
76888 diff --git a/mm/memory.c b/mm/memory.c
76889 index 6c836d3..48f3264 100644
76890 --- a/mm/memory.c
76891 +++ b/mm/memory.c
76892 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
76893 return;
76894
76895 pmd = pmd_offset(pud, start);
76896 +
76897 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
76898 pud_clear(pud);
76899 pmd_free_tlb(tlb, pmd, start);
76900 +#endif
76901 +
76902 }
76903
76904 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76905 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76906 if (end - 1 > ceiling - 1)
76907 return;
76908
76909 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
76910 pud = pud_offset(pgd, start);
76911 pgd_clear(pgd);
76912 pud_free_tlb(tlb, pud, start);
76913 +#endif
76914 +
76915 }
76916
76917 /*
76918 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76919 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
76920 i = 0;
76921
76922 - do {
76923 + while (nr_pages) {
76924 struct vm_area_struct *vma;
76925
76926 - vma = find_extend_vma(mm, start);
76927 + vma = find_vma(mm, start);
76928 if (!vma && in_gate_area(tsk, start)) {
76929 unsigned long pg = start & PAGE_MASK;
76930 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
76931 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76932 continue;
76933 }
76934
76935 - if (!vma ||
76936 + if (!vma || start < vma->vm_start ||
76937 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
76938 !(vm_flags & vma->vm_flags))
76939 return i ? : -EFAULT;
76940 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76941 start += PAGE_SIZE;
76942 nr_pages--;
76943 } while (nr_pages && start < vma->vm_end);
76944 - } while (nr_pages);
76945 + }
76946 return i;
76947 }
76948
76949 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
76950 page_add_file_rmap(page);
76951 set_pte_at(mm, addr, pte, mk_pte(page, prot));
76952
76953 +#ifdef CONFIG_PAX_SEGMEXEC
76954 + pax_mirror_file_pte(vma, addr, page, ptl);
76955 +#endif
76956 +
76957 retval = 0;
76958 pte_unmap_unlock(pte, ptl);
76959 return retval;
76960 @@ -1560,10 +1571,22 @@ out:
76961 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
76962 struct page *page)
76963 {
76964 +
76965 +#ifdef CONFIG_PAX_SEGMEXEC
76966 + struct vm_area_struct *vma_m;
76967 +#endif
76968 +
76969 if (addr < vma->vm_start || addr >= vma->vm_end)
76970 return -EFAULT;
76971 if (!page_count(page))
76972 return -EINVAL;
76973 +
76974 +#ifdef CONFIG_PAX_SEGMEXEC
76975 + vma_m = pax_find_mirror_vma(vma);
76976 + if (vma_m)
76977 + vma_m->vm_flags |= VM_INSERTPAGE;
76978 +#endif
76979 +
76980 vma->vm_flags |= VM_INSERTPAGE;
76981 return insert_page(vma, addr, page, vma->vm_page_prot);
76982 }
76983 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
76984 unsigned long pfn)
76985 {
76986 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
76987 + BUG_ON(vma->vm_mirror);
76988
76989 if (addr < vma->vm_start || addr >= vma->vm_end)
76990 return -EFAULT;
76991 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
76992 copy_user_highpage(dst, src, va, vma);
76993 }
76994
76995 +#ifdef CONFIG_PAX_SEGMEXEC
76996 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
76997 +{
76998 + struct mm_struct *mm = vma->vm_mm;
76999 + spinlock_t *ptl;
77000 + pte_t *pte, entry;
77001 +
77002 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
77003 + entry = *pte;
77004 + if (!pte_present(entry)) {
77005 + if (!pte_none(entry)) {
77006 + BUG_ON(pte_file(entry));
77007 + free_swap_and_cache(pte_to_swp_entry(entry));
77008 + pte_clear_not_present_full(mm, address, pte, 0);
77009 + }
77010 + } else {
77011 + struct page *page;
77012 +
77013 + flush_cache_page(vma, address, pte_pfn(entry));
77014 + entry = ptep_clear_flush(vma, address, pte);
77015 + BUG_ON(pte_dirty(entry));
77016 + page = vm_normal_page(vma, address, entry);
77017 + if (page) {
77018 + update_hiwater_rss(mm);
77019 + if (PageAnon(page))
77020 + dec_mm_counter(mm, anon_rss);
77021 + else
77022 + dec_mm_counter(mm, file_rss);
77023 + page_remove_rmap(page);
77024 + page_cache_release(page);
77025 + }
77026 + }
77027 + pte_unmap_unlock(pte, ptl);
77028 +}
77029 +
77030 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
77031 + *
77032 + * the ptl of the lower mapped page is held on entry and is not released on exit
77033 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
77034 + */
77035 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77036 +{
77037 + struct mm_struct *mm = vma->vm_mm;
77038 + unsigned long address_m;
77039 + spinlock_t *ptl_m;
77040 + struct vm_area_struct *vma_m;
77041 + pmd_t *pmd_m;
77042 + pte_t *pte_m, entry_m;
77043 +
77044 + BUG_ON(!page_m || !PageAnon(page_m));
77045 +
77046 + vma_m = pax_find_mirror_vma(vma);
77047 + if (!vma_m)
77048 + return;
77049 +
77050 + BUG_ON(!PageLocked(page_m));
77051 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77052 + address_m = address + SEGMEXEC_TASK_SIZE;
77053 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77054 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77055 + ptl_m = pte_lockptr(mm, pmd_m);
77056 + if (ptl != ptl_m) {
77057 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77058 + if (!pte_none(*pte_m))
77059 + goto out;
77060 + }
77061 +
77062 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77063 + page_cache_get(page_m);
77064 + page_add_anon_rmap(page_m, vma_m, address_m);
77065 + inc_mm_counter(mm, anon_rss);
77066 + set_pte_at(mm, address_m, pte_m, entry_m);
77067 + update_mmu_cache(vma_m, address_m, entry_m);
77068 +out:
77069 + if (ptl != ptl_m)
77070 + spin_unlock(ptl_m);
77071 + pte_unmap_nested(pte_m);
77072 + unlock_page(page_m);
77073 +}
77074 +
77075 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77076 +{
77077 + struct mm_struct *mm = vma->vm_mm;
77078 + unsigned long address_m;
77079 + spinlock_t *ptl_m;
77080 + struct vm_area_struct *vma_m;
77081 + pmd_t *pmd_m;
77082 + pte_t *pte_m, entry_m;
77083 +
77084 + BUG_ON(!page_m || PageAnon(page_m));
77085 +
77086 + vma_m = pax_find_mirror_vma(vma);
77087 + if (!vma_m)
77088 + return;
77089 +
77090 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77091 + address_m = address + SEGMEXEC_TASK_SIZE;
77092 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77093 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77094 + ptl_m = pte_lockptr(mm, pmd_m);
77095 + if (ptl != ptl_m) {
77096 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77097 + if (!pte_none(*pte_m))
77098 + goto out;
77099 + }
77100 +
77101 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77102 + page_cache_get(page_m);
77103 + page_add_file_rmap(page_m);
77104 + inc_mm_counter(mm, file_rss);
77105 + set_pte_at(mm, address_m, pte_m, entry_m);
77106 + update_mmu_cache(vma_m, address_m, entry_m);
77107 +out:
77108 + if (ptl != ptl_m)
77109 + spin_unlock(ptl_m);
77110 + pte_unmap_nested(pte_m);
77111 +}
77112 +
77113 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
77114 +{
77115 + struct mm_struct *mm = vma->vm_mm;
77116 + unsigned long address_m;
77117 + spinlock_t *ptl_m;
77118 + struct vm_area_struct *vma_m;
77119 + pmd_t *pmd_m;
77120 + pte_t *pte_m, entry_m;
77121 +
77122 + vma_m = pax_find_mirror_vma(vma);
77123 + if (!vma_m)
77124 + return;
77125 +
77126 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77127 + address_m = address + SEGMEXEC_TASK_SIZE;
77128 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77129 + pte_m = pte_offset_map_nested(pmd_m, address_m);
77130 + ptl_m = pte_lockptr(mm, pmd_m);
77131 + if (ptl != ptl_m) {
77132 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77133 + if (!pte_none(*pte_m))
77134 + goto out;
77135 + }
77136 +
77137 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
77138 + set_pte_at(mm, address_m, pte_m, entry_m);
77139 +out:
77140 + if (ptl != ptl_m)
77141 + spin_unlock(ptl_m);
77142 + pte_unmap_nested(pte_m);
77143 +}
77144 +
77145 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
77146 +{
77147 + struct page *page_m;
77148 + pte_t entry;
77149 +
77150 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
77151 + goto out;
77152 +
77153 + entry = *pte;
77154 + page_m = vm_normal_page(vma, address, entry);
77155 + if (!page_m)
77156 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
77157 + else if (PageAnon(page_m)) {
77158 + if (pax_find_mirror_vma(vma)) {
77159 + pte_unmap_unlock(pte, ptl);
77160 + lock_page(page_m);
77161 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
77162 + if (pte_same(entry, *pte))
77163 + pax_mirror_anon_pte(vma, address, page_m, ptl);
77164 + else
77165 + unlock_page(page_m);
77166 + }
77167 + } else
77168 + pax_mirror_file_pte(vma, address, page_m, ptl);
77169 +
77170 +out:
77171 + pte_unmap_unlock(pte, ptl);
77172 +}
77173 +#endif
77174 +
77175 /*
77176 * This routine handles present pages, when users try to write
77177 * to a shared page. It is done by copying the page to a new address
77178 @@ -2156,6 +2360,12 @@ gotten:
77179 */
77180 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77181 if (likely(pte_same(*page_table, orig_pte))) {
77182 +
77183 +#ifdef CONFIG_PAX_SEGMEXEC
77184 + if (pax_find_mirror_vma(vma))
77185 + BUG_ON(!trylock_page(new_page));
77186 +#endif
77187 +
77188 if (old_page) {
77189 if (!PageAnon(old_page)) {
77190 dec_mm_counter(mm, file_rss);
77191 @@ -2207,6 +2417,10 @@ gotten:
77192 page_remove_rmap(old_page);
77193 }
77194
77195 +#ifdef CONFIG_PAX_SEGMEXEC
77196 + pax_mirror_anon_pte(vma, address, new_page, ptl);
77197 +#endif
77198 +
77199 /* Free the old page.. */
77200 new_page = old_page;
77201 ret |= VM_FAULT_WRITE;
77202 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77203 swap_free(entry);
77204 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
77205 try_to_free_swap(page);
77206 +
77207 +#ifdef CONFIG_PAX_SEGMEXEC
77208 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
77209 +#endif
77210 +
77211 unlock_page(page);
77212
77213 if (flags & FAULT_FLAG_WRITE) {
77214 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77215
77216 /* No need to invalidate - it was non-present before */
77217 update_mmu_cache(vma, address, pte);
77218 +
77219 +#ifdef CONFIG_PAX_SEGMEXEC
77220 + pax_mirror_anon_pte(vma, address, page, ptl);
77221 +#endif
77222 +
77223 unlock:
77224 pte_unmap_unlock(page_table, ptl);
77225 out:
77226 @@ -2632,40 +2856,6 @@ out_release:
77227 }
77228
77229 /*
77230 - * This is like a special single-page "expand_{down|up}wards()",
77231 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
77232 - * doesn't hit another vma.
77233 - */
77234 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
77235 -{
77236 - address &= PAGE_MASK;
77237 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
77238 - struct vm_area_struct *prev = vma->vm_prev;
77239 -
77240 - /*
77241 - * Is there a mapping abutting this one below?
77242 - *
77243 - * That's only ok if it's the same stack mapping
77244 - * that has gotten split..
77245 - */
77246 - if (prev && prev->vm_end == address)
77247 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
77248 -
77249 - expand_stack(vma, address - PAGE_SIZE);
77250 - }
77251 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
77252 - struct vm_area_struct *next = vma->vm_next;
77253 -
77254 - /* As VM_GROWSDOWN but s/below/above/ */
77255 - if (next && next->vm_start == address + PAGE_SIZE)
77256 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
77257 -
77258 - expand_upwards(vma, address + PAGE_SIZE);
77259 - }
77260 - return 0;
77261 -}
77262 -
77263 -/*
77264 * We enter with non-exclusive mmap_sem (to exclude vma changes,
77265 * but allow concurrent faults), and pte mapped but not yet locked.
77266 * We return with mmap_sem still held, but pte unmapped and unlocked.
77267 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77268 unsigned long address, pte_t *page_table, pmd_t *pmd,
77269 unsigned int flags)
77270 {
77271 - struct page *page;
77272 + struct page *page = NULL;
77273 spinlock_t *ptl;
77274 pte_t entry;
77275
77276 - pte_unmap(page_table);
77277 -
77278 - /* Check if we need to add a guard page to the stack */
77279 - if (check_stack_guard_page(vma, address) < 0)
77280 - return VM_FAULT_SIGBUS;
77281 -
77282 - /* Use the zero-page for reads */
77283 if (!(flags & FAULT_FLAG_WRITE)) {
77284 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
77285 vma->vm_page_prot));
77286 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77287 + ptl = pte_lockptr(mm, pmd);
77288 + spin_lock(ptl);
77289 if (!pte_none(*page_table))
77290 goto unlock;
77291 goto setpte;
77292 }
77293
77294 /* Allocate our own private page. */
77295 + pte_unmap(page_table);
77296 +
77297 if (unlikely(anon_vma_prepare(vma)))
77298 goto oom;
77299 page = alloc_zeroed_user_highpage_movable(vma, address);
77300 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77301 if (!pte_none(*page_table))
77302 goto release;
77303
77304 +#ifdef CONFIG_PAX_SEGMEXEC
77305 + if (pax_find_mirror_vma(vma))
77306 + BUG_ON(!trylock_page(page));
77307 +#endif
77308 +
77309 inc_mm_counter(mm, anon_rss);
77310 page_add_new_anon_rmap(page, vma, address);
77311 setpte:
77312 @@ -2720,6 +2911,12 @@ setpte:
77313
77314 /* No need to invalidate - it was non-present before */
77315 update_mmu_cache(vma, address, entry);
77316 +
77317 +#ifdef CONFIG_PAX_SEGMEXEC
77318 + if (page)
77319 + pax_mirror_anon_pte(vma, address, page, ptl);
77320 +#endif
77321 +
77322 unlock:
77323 pte_unmap_unlock(page_table, ptl);
77324 return 0;
77325 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77326 */
77327 /* Only go through if we didn't race with anybody else... */
77328 if (likely(pte_same(*page_table, orig_pte))) {
77329 +
77330 +#ifdef CONFIG_PAX_SEGMEXEC
77331 + if (anon && pax_find_mirror_vma(vma))
77332 + BUG_ON(!trylock_page(page));
77333 +#endif
77334 +
77335 flush_icache_page(vma, page);
77336 entry = mk_pte(page, vma->vm_page_prot);
77337 if (flags & FAULT_FLAG_WRITE)
77338 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77339
77340 /* no need to invalidate: a not-present page won't be cached */
77341 update_mmu_cache(vma, address, entry);
77342 +
77343 +#ifdef CONFIG_PAX_SEGMEXEC
77344 + if (anon)
77345 + pax_mirror_anon_pte(vma, address, page, ptl);
77346 + else
77347 + pax_mirror_file_pte(vma, address, page, ptl);
77348 +#endif
77349 +
77350 } else {
77351 if (charged)
77352 mem_cgroup_uncharge_page(page);
77353 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
77354 if (flags & FAULT_FLAG_WRITE)
77355 flush_tlb_page(vma, address);
77356 }
77357 +
77358 +#ifdef CONFIG_PAX_SEGMEXEC
77359 + pax_mirror_pte(vma, address, pte, pmd, ptl);
77360 + return 0;
77361 +#endif
77362 +
77363 unlock:
77364 pte_unmap_unlock(pte, ptl);
77365 return 0;
77366 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77367 pmd_t *pmd;
77368 pte_t *pte;
77369
77370 +#ifdef CONFIG_PAX_SEGMEXEC
77371 + struct vm_area_struct *vma_m;
77372 +#endif
77373 +
77374 __set_current_state(TASK_RUNNING);
77375
77376 count_vm_event(PGFAULT);
77377 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77378 if (unlikely(is_vm_hugetlb_page(vma)))
77379 return hugetlb_fault(mm, vma, address, flags);
77380
77381 +#ifdef CONFIG_PAX_SEGMEXEC
77382 + vma_m = pax_find_mirror_vma(vma);
77383 + if (vma_m) {
77384 + unsigned long address_m;
77385 + pgd_t *pgd_m;
77386 + pud_t *pud_m;
77387 + pmd_t *pmd_m;
77388 +
77389 + if (vma->vm_start > vma_m->vm_start) {
77390 + address_m = address;
77391 + address -= SEGMEXEC_TASK_SIZE;
77392 + vma = vma_m;
77393 + } else
77394 + address_m = address + SEGMEXEC_TASK_SIZE;
77395 +
77396 + pgd_m = pgd_offset(mm, address_m);
77397 + pud_m = pud_alloc(mm, pgd_m, address_m);
77398 + if (!pud_m)
77399 + return VM_FAULT_OOM;
77400 + pmd_m = pmd_alloc(mm, pud_m, address_m);
77401 + if (!pmd_m)
77402 + return VM_FAULT_OOM;
77403 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
77404 + return VM_FAULT_OOM;
77405 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
77406 + }
77407 +#endif
77408 +
77409 pgd = pgd_offset(mm, address);
77410 pud = pud_alloc(mm, pgd, address);
77411 if (!pud)
77412 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
77413 gate_vma.vm_start = FIXADDR_USER_START;
77414 gate_vma.vm_end = FIXADDR_USER_END;
77415 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
77416 - gate_vma.vm_page_prot = __P101;
77417 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
77418 /*
77419 * Make sure the vDSO gets into every core dump.
77420 * Dumping its contents makes post-mortem fully interpretable later
77421 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
77422 index 3c6e3e2..ad9871c 100644
77423 --- a/mm/mempolicy.c
77424 +++ b/mm/mempolicy.c
77425 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77426 struct vm_area_struct *next;
77427 int err;
77428
77429 +#ifdef CONFIG_PAX_SEGMEXEC
77430 + struct vm_area_struct *vma_m;
77431 +#endif
77432 +
77433 err = 0;
77434 for (; vma && vma->vm_start < end; vma = next) {
77435 next = vma->vm_next;
77436 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77437 err = policy_vma(vma, new);
77438 if (err)
77439 break;
77440 +
77441 +#ifdef CONFIG_PAX_SEGMEXEC
77442 + vma_m = pax_find_mirror_vma(vma);
77443 + if (vma_m) {
77444 + err = policy_vma(vma_m, new);
77445 + if (err)
77446 + break;
77447 + }
77448 +#endif
77449 +
77450 }
77451 return err;
77452 }
77453 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
77454
77455 if (end < start)
77456 return -EINVAL;
77457 +
77458 +#ifdef CONFIG_PAX_SEGMEXEC
77459 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77460 + if (end > SEGMEXEC_TASK_SIZE)
77461 + return -EINVAL;
77462 + } else
77463 +#endif
77464 +
77465 + if (end > TASK_SIZE)
77466 + return -EINVAL;
77467 +
77468 if (end == start)
77469 return 0;
77470
77471 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77472 if (!mm)
77473 return -EINVAL;
77474
77475 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77476 + if (mm != current->mm &&
77477 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77478 + err = -EPERM;
77479 + goto out;
77480 + }
77481 +#endif
77482 +
77483 /*
77484 * Check if this process has the right to modify the specified
77485 * process. The right exists if the process has administrative
77486 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77487 rcu_read_lock();
77488 tcred = __task_cred(task);
77489 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77490 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
77491 - !capable(CAP_SYS_NICE)) {
77492 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77493 rcu_read_unlock();
77494 err = -EPERM;
77495 goto out;
77496 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, void *v)
77497
77498 if (file) {
77499 seq_printf(m, " file=");
77500 - seq_path(m, &file->f_path, "\n\t= ");
77501 + seq_path(m, &file->f_path, "\n\t\\= ");
77502 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
77503 seq_printf(m, " heap");
77504 } else if (vma->vm_start <= mm->start_stack &&
77505 diff --git a/mm/migrate.c b/mm/migrate.c
77506 index aaca868..2ebecdc 100644
77507 --- a/mm/migrate.c
77508 +++ b/mm/migrate.c
77509 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
77510 unsigned long chunk_start;
77511 int err;
77512
77513 + pax_track_stack();
77514 +
77515 task_nodes = cpuset_mems_allowed(task);
77516
77517 err = -ENOMEM;
77518 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77519 if (!mm)
77520 return -EINVAL;
77521
77522 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77523 + if (mm != current->mm &&
77524 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77525 + err = -EPERM;
77526 + goto out;
77527 + }
77528 +#endif
77529 +
77530 /*
77531 * Check if this process has the right to modify the specified
77532 * process. The right exists if the process has administrative
77533 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77534 rcu_read_lock();
77535 tcred = __task_cred(task);
77536 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77537 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
77538 - !capable(CAP_SYS_NICE)) {
77539 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77540 rcu_read_unlock();
77541 err = -EPERM;
77542 goto out;
77543 diff --git a/mm/mlock.c b/mm/mlock.c
77544 index 2d846cf..98134d2 100644
77545 --- a/mm/mlock.c
77546 +++ b/mm/mlock.c
77547 @@ -13,6 +13,7 @@
77548 #include <linux/pagemap.h>
77549 #include <linux/mempolicy.h>
77550 #include <linux/syscalls.h>
77551 +#include <linux/security.h>
77552 #include <linux/sched.h>
77553 #include <linux/module.h>
77554 #include <linux/rmap.h>
77555 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
77556 }
77557 }
77558
77559 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
77560 -{
77561 - return (vma->vm_flags & VM_GROWSDOWN) &&
77562 - (vma->vm_start == addr) &&
77563 - !vma_stack_continue(vma->vm_prev, addr);
77564 -}
77565 -
77566 /**
77567 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
77568 * @vma: target vma
77569 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
77570 if (vma->vm_flags & VM_WRITE)
77571 gup_flags |= FOLL_WRITE;
77572
77573 - /* We don't try to access the guard page of a stack vma */
77574 - if (stack_guard_page(vma, start)) {
77575 - addr += PAGE_SIZE;
77576 - nr_pages--;
77577 - }
77578 -
77579 while (nr_pages > 0) {
77580 int i;
77581
77582 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
77583 {
77584 unsigned long nstart, end, tmp;
77585 struct vm_area_struct * vma, * prev;
77586 - int error;
77587 + int error = -EINVAL;
77588
77589 len = PAGE_ALIGN(len);
77590 end = start + len;
77591 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
77592 return -EINVAL;
77593 if (end == start)
77594 return 0;
77595 + if (end > TASK_SIZE)
77596 + return -EINVAL;
77597 +
77598 vma = find_vma_prev(current->mm, start, &prev);
77599 if (!vma || vma->vm_start > start)
77600 return -ENOMEM;
77601 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
77602 for (nstart = start ; ; ) {
77603 unsigned int newflags;
77604
77605 +#ifdef CONFIG_PAX_SEGMEXEC
77606 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77607 + break;
77608 +#endif
77609 +
77610 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
77611
77612 newflags = vma->vm_flags | VM_LOCKED;
77613 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
77614 lock_limit >>= PAGE_SHIFT;
77615
77616 /* check against resource limits */
77617 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
77618 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
77619 error = do_mlock(start, len, 1);
77620 up_write(&current->mm->mmap_sem);
77621 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
77622 static int do_mlockall(int flags)
77623 {
77624 struct vm_area_struct * vma, * prev = NULL;
77625 - unsigned int def_flags = 0;
77626
77627 if (flags & MCL_FUTURE)
77628 - def_flags = VM_LOCKED;
77629 - current->mm->def_flags = def_flags;
77630 + current->mm->def_flags |= VM_LOCKED;
77631 + else
77632 + current->mm->def_flags &= ~VM_LOCKED;
77633 if (flags == MCL_FUTURE)
77634 goto out;
77635
77636 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
77637 - unsigned int newflags;
77638 + unsigned long newflags;
77639
77640 +#ifdef CONFIG_PAX_SEGMEXEC
77641 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77642 + break;
77643 +#endif
77644 +
77645 + BUG_ON(vma->vm_end > TASK_SIZE);
77646 newflags = vma->vm_flags | VM_LOCKED;
77647 if (!(flags & MCL_CURRENT))
77648 newflags &= ~VM_LOCKED;
77649 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
77650 lock_limit >>= PAGE_SHIFT;
77651
77652 ret = -ENOMEM;
77653 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
77654 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
77655 capable(CAP_IPC_LOCK))
77656 ret = do_mlockall(flags);
77657 diff --git a/mm/mmap.c b/mm/mmap.c
77658 index 4b80cbf..c5ce1df 100644
77659 --- a/mm/mmap.c
77660 +++ b/mm/mmap.c
77661 @@ -45,6 +45,16 @@
77662 #define arch_rebalance_pgtables(addr, len) (addr)
77663 #endif
77664
77665 +static inline void verify_mm_writelocked(struct mm_struct *mm)
77666 +{
77667 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
77668 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
77669 + up_read(&mm->mmap_sem);
77670 + BUG();
77671 + }
77672 +#endif
77673 +}
77674 +
77675 static void unmap_region(struct mm_struct *mm,
77676 struct vm_area_struct *vma, struct vm_area_struct *prev,
77677 unsigned long start, unsigned long end);
77678 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
77679 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
77680 *
77681 */
77682 -pgprot_t protection_map[16] = {
77683 +pgprot_t protection_map[16] __read_only = {
77684 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
77685 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
77686 };
77687
77688 pgprot_t vm_get_page_prot(unsigned long vm_flags)
77689 {
77690 - return __pgprot(pgprot_val(protection_map[vm_flags &
77691 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
77692 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
77693 pgprot_val(arch_vm_get_page_prot(vm_flags)));
77694 +
77695 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77696 + if (!nx_enabled &&
77697 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
77698 + (vm_flags & (VM_READ | VM_WRITE)))
77699 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
77700 +#endif
77701 +
77702 + return prot;
77703 }
77704 EXPORT_SYMBOL(vm_get_page_prot);
77705
77706 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
77707 int sysctl_overcommit_ratio = 50; /* default is 50% */
77708 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
77709 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
77710 struct percpu_counter vm_committed_as;
77711
77712 /*
77713 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
77714 struct vm_area_struct *next = vma->vm_next;
77715
77716 might_sleep();
77717 + BUG_ON(vma->vm_mirror);
77718 if (vma->vm_ops && vma->vm_ops->close)
77719 vma->vm_ops->close(vma);
77720 if (vma->vm_file) {
77721 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
77722 * not page aligned -Ram Gupta
77723 */
77724 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
77725 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
77726 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
77727 (mm->end_data - mm->start_data) > rlim)
77728 goto out;
77729 @@ -704,6 +726,12 @@ static int
77730 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
77731 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
77732 {
77733 +
77734 +#ifdef CONFIG_PAX_SEGMEXEC
77735 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
77736 + return 0;
77737 +#endif
77738 +
77739 if (is_mergeable_vma(vma, file, vm_flags) &&
77740 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
77741 if (vma->vm_pgoff == vm_pgoff)
77742 @@ -723,6 +751,12 @@ static int
77743 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
77744 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
77745 {
77746 +
77747 +#ifdef CONFIG_PAX_SEGMEXEC
77748 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
77749 + return 0;
77750 +#endif
77751 +
77752 if (is_mergeable_vma(vma, file, vm_flags) &&
77753 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
77754 pgoff_t vm_pglen;
77755 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
77756 struct vm_area_struct *vma_merge(struct mm_struct *mm,
77757 struct vm_area_struct *prev, unsigned long addr,
77758 unsigned long end, unsigned long vm_flags,
77759 - struct anon_vma *anon_vma, struct file *file,
77760 + struct anon_vma *anon_vma, struct file *file,
77761 pgoff_t pgoff, struct mempolicy *policy)
77762 {
77763 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
77764 struct vm_area_struct *area, *next;
77765
77766 +#ifdef CONFIG_PAX_SEGMEXEC
77767 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
77768 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
77769 +
77770 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
77771 +#endif
77772 +
77773 /*
77774 * We later require that vma->vm_flags == vm_flags,
77775 * so this tests vma->vm_flags & VM_SPECIAL, too.
77776 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77777 if (next && next->vm_end == end) /* cases 6, 7, 8 */
77778 next = next->vm_next;
77779
77780 +#ifdef CONFIG_PAX_SEGMEXEC
77781 + if (prev)
77782 + prev_m = pax_find_mirror_vma(prev);
77783 + if (area)
77784 + area_m = pax_find_mirror_vma(area);
77785 + if (next)
77786 + next_m = pax_find_mirror_vma(next);
77787 +#endif
77788 +
77789 /*
77790 * Can it merge with the predecessor?
77791 */
77792 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77793 /* cases 1, 6 */
77794 vma_adjust(prev, prev->vm_start,
77795 next->vm_end, prev->vm_pgoff, NULL);
77796 - } else /* cases 2, 5, 7 */
77797 +
77798 +#ifdef CONFIG_PAX_SEGMEXEC
77799 + if (prev_m)
77800 + vma_adjust(prev_m, prev_m->vm_start,
77801 + next_m->vm_end, prev_m->vm_pgoff, NULL);
77802 +#endif
77803 +
77804 + } else { /* cases 2, 5, 7 */
77805 vma_adjust(prev, prev->vm_start,
77806 end, prev->vm_pgoff, NULL);
77807 +
77808 +#ifdef CONFIG_PAX_SEGMEXEC
77809 + if (prev_m)
77810 + vma_adjust(prev_m, prev_m->vm_start,
77811 + end_m, prev_m->vm_pgoff, NULL);
77812 +#endif
77813 +
77814 + }
77815 return prev;
77816 }
77817
77818 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77819 mpol_equal(policy, vma_policy(next)) &&
77820 can_vma_merge_before(next, vm_flags,
77821 anon_vma, file, pgoff+pglen)) {
77822 - if (prev && addr < prev->vm_end) /* case 4 */
77823 + if (prev && addr < prev->vm_end) { /* case 4 */
77824 vma_adjust(prev, prev->vm_start,
77825 addr, prev->vm_pgoff, NULL);
77826 - else /* cases 3, 8 */
77827 +
77828 +#ifdef CONFIG_PAX_SEGMEXEC
77829 + if (prev_m)
77830 + vma_adjust(prev_m, prev_m->vm_start,
77831 + addr_m, prev_m->vm_pgoff, NULL);
77832 +#endif
77833 +
77834 + } else { /* cases 3, 8 */
77835 vma_adjust(area, addr, next->vm_end,
77836 next->vm_pgoff - pglen, NULL);
77837 +
77838 +#ifdef CONFIG_PAX_SEGMEXEC
77839 + if (area_m)
77840 + vma_adjust(area_m, addr_m, next_m->vm_end,
77841 + next_m->vm_pgoff - pglen, NULL);
77842 +#endif
77843 +
77844 + }
77845 return area;
77846 }
77847
77848 @@ -898,14 +978,11 @@ none:
77849 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
77850 struct file *file, long pages)
77851 {
77852 - const unsigned long stack_flags
77853 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
77854 -
77855 if (file) {
77856 mm->shared_vm += pages;
77857 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
77858 mm->exec_vm += pages;
77859 - } else if (flags & stack_flags)
77860 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
77861 mm->stack_vm += pages;
77862 if (flags & (VM_RESERVED|VM_IO))
77863 mm->reserved_vm += pages;
77864 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77865 * (the exception is when the underlying filesystem is noexec
77866 * mounted, in which case we dont add PROT_EXEC.)
77867 */
77868 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
77869 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
77870 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
77871 prot |= PROT_EXEC;
77872
77873 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77874 /* Obtain the address to map to. we verify (or select) it and ensure
77875 * that it represents a valid section of the address space.
77876 */
77877 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
77878 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
77879 if (addr & ~PAGE_MASK)
77880 return addr;
77881
77882 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77883 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
77884 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
77885
77886 +#ifdef CONFIG_PAX_MPROTECT
77887 + if (mm->pax_flags & MF_PAX_MPROTECT) {
77888 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
77889 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
77890 + gr_log_rwxmmap(file);
77891 +
77892 +#ifdef CONFIG_PAX_EMUPLT
77893 + vm_flags &= ~VM_EXEC;
77894 +#else
77895 + return -EPERM;
77896 +#endif
77897 +
77898 + }
77899 +
77900 + if (!(vm_flags & VM_EXEC))
77901 + vm_flags &= ~VM_MAYEXEC;
77902 +#else
77903 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
77904 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
77905 +#endif
77906 + else
77907 + vm_flags &= ~VM_MAYWRITE;
77908 + }
77909 +#endif
77910 +
77911 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77912 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
77913 + vm_flags &= ~VM_PAGEEXEC;
77914 +#endif
77915 +
77916 if (flags & MAP_LOCKED)
77917 if (!can_do_mlock())
77918 return -EPERM;
77919 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77920 locked += mm->locked_vm;
77921 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
77922 lock_limit >>= PAGE_SHIFT;
77923 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
77924 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
77925 return -EAGAIN;
77926 }
77927 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77928 if (error)
77929 return error;
77930
77931 + if (!gr_acl_handle_mmap(file, prot))
77932 + return -EACCES;
77933 +
77934 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
77935 }
77936 EXPORT_SYMBOL(do_mmap_pgoff);
77937 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
77938 */
77939 int vma_wants_writenotify(struct vm_area_struct *vma)
77940 {
77941 - unsigned int vm_flags = vma->vm_flags;
77942 + unsigned long vm_flags = vma->vm_flags;
77943
77944 /* If it was private or non-writable, the write bit is already clear */
77945 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
77946 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
77947 return 0;
77948
77949 /* The backer wishes to know when pages are first written to? */
77950 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
77951 unsigned long charged = 0;
77952 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
77953
77954 +#ifdef CONFIG_PAX_SEGMEXEC
77955 + struct vm_area_struct *vma_m = NULL;
77956 +#endif
77957 +
77958 + /*
77959 + * mm->mmap_sem is required to protect against another thread
77960 + * changing the mappings in case we sleep.
77961 + */
77962 + verify_mm_writelocked(mm);
77963 +
77964 /* Clear old maps */
77965 error = -ENOMEM;
77966 -munmap_back:
77967 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77968 if (vma && vma->vm_start < addr + len) {
77969 if (do_munmap(mm, addr, len))
77970 return -ENOMEM;
77971 - goto munmap_back;
77972 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77973 + BUG_ON(vma && vma->vm_start < addr + len);
77974 }
77975
77976 /* Check against address space limit. */
77977 @@ -1173,6 +1294,16 @@ munmap_back:
77978 goto unacct_error;
77979 }
77980
77981 +#ifdef CONFIG_PAX_SEGMEXEC
77982 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
77983 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77984 + if (!vma_m) {
77985 + error = -ENOMEM;
77986 + goto free_vma;
77987 + }
77988 + }
77989 +#endif
77990 +
77991 vma->vm_mm = mm;
77992 vma->vm_start = addr;
77993 vma->vm_end = addr + len;
77994 @@ -1195,6 +1326,19 @@ munmap_back:
77995 error = file->f_op->mmap(file, vma);
77996 if (error)
77997 goto unmap_and_free_vma;
77998 +
77999 +#ifdef CONFIG_PAX_SEGMEXEC
78000 + if (vma_m && (vm_flags & VM_EXECUTABLE))
78001 + added_exe_file_vma(mm);
78002 +#endif
78003 +
78004 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78005 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
78006 + vma->vm_flags |= VM_PAGEEXEC;
78007 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78008 + }
78009 +#endif
78010 +
78011 if (vm_flags & VM_EXECUTABLE)
78012 added_exe_file_vma(mm);
78013
78014 @@ -1218,6 +1362,11 @@ munmap_back:
78015 vma_link(mm, vma, prev, rb_link, rb_parent);
78016 file = vma->vm_file;
78017
78018 +#ifdef CONFIG_PAX_SEGMEXEC
78019 + if (vma_m)
78020 + pax_mirror_vma(vma_m, vma);
78021 +#endif
78022 +
78023 /* Once vma denies write, undo our temporary denial count */
78024 if (correct_wcount)
78025 atomic_inc(&inode->i_writecount);
78026 @@ -1226,6 +1375,7 @@ out:
78027
78028 mm->total_vm += len >> PAGE_SHIFT;
78029 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
78030 + track_exec_limit(mm, addr, addr + len, vm_flags);
78031 if (vm_flags & VM_LOCKED) {
78032 /*
78033 * makes pages present; downgrades, drops, reacquires mmap_sem
78034 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
78035 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
78036 charged = 0;
78037 free_vma:
78038 +
78039 +#ifdef CONFIG_PAX_SEGMEXEC
78040 + if (vma_m)
78041 + kmem_cache_free(vm_area_cachep, vma_m);
78042 +#endif
78043 +
78044 kmem_cache_free(vm_area_cachep, vma);
78045 unacct_error:
78046 if (charged)
78047 @@ -1255,6 +1411,44 @@ unacct_error:
78048 return error;
78049 }
78050
78051 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
78052 +{
78053 + if (!vma) {
78054 +#ifdef CONFIG_STACK_GROWSUP
78055 + if (addr > sysctl_heap_stack_gap)
78056 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
78057 + else
78058 + vma = find_vma(current->mm, 0);
78059 + if (vma && (vma->vm_flags & VM_GROWSUP))
78060 + return false;
78061 +#endif
78062 + return true;
78063 + }
78064 +
78065 + if (addr + len > vma->vm_start)
78066 + return false;
78067 +
78068 + if (vma->vm_flags & VM_GROWSDOWN)
78069 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
78070 +#ifdef CONFIG_STACK_GROWSUP
78071 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
78072 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
78073 +#endif
78074 +
78075 + return true;
78076 +}
78077 +
78078 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
78079 +{
78080 + if (vma->vm_start < len)
78081 + return -ENOMEM;
78082 + if (!(vma->vm_flags & VM_GROWSDOWN))
78083 + return vma->vm_start - len;
78084 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
78085 + return vma->vm_start - len - sysctl_heap_stack_gap;
78086 + return -ENOMEM;
78087 +}
78088 +
78089 /* Get an address range which is currently unmapped.
78090 * For shmat() with addr=0.
78091 *
78092 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78093 if (flags & MAP_FIXED)
78094 return addr;
78095
78096 +#ifdef CONFIG_PAX_RANDMMAP
78097 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78098 +#endif
78099 +
78100 if (addr) {
78101 addr = PAGE_ALIGN(addr);
78102 - vma = find_vma(mm, addr);
78103 - if (TASK_SIZE - len >= addr &&
78104 - (!vma || addr + len <= vma->vm_start))
78105 - return addr;
78106 + if (TASK_SIZE - len >= addr) {
78107 + vma = find_vma(mm, addr);
78108 + if (check_heap_stack_gap(vma, addr, len))
78109 + return addr;
78110 + }
78111 }
78112 if (len > mm->cached_hole_size) {
78113 - start_addr = addr = mm->free_area_cache;
78114 + start_addr = addr = mm->free_area_cache;
78115 } else {
78116 - start_addr = addr = TASK_UNMAPPED_BASE;
78117 - mm->cached_hole_size = 0;
78118 + start_addr = addr = mm->mmap_base;
78119 + mm->cached_hole_size = 0;
78120 }
78121
78122 full_search:
78123 @@ -1303,34 +1502,40 @@ full_search:
78124 * Start a new search - just in case we missed
78125 * some holes.
78126 */
78127 - if (start_addr != TASK_UNMAPPED_BASE) {
78128 - addr = TASK_UNMAPPED_BASE;
78129 - start_addr = addr;
78130 + if (start_addr != mm->mmap_base) {
78131 + start_addr = addr = mm->mmap_base;
78132 mm->cached_hole_size = 0;
78133 goto full_search;
78134 }
78135 return -ENOMEM;
78136 }
78137 - if (!vma || addr + len <= vma->vm_start) {
78138 - /*
78139 - * Remember the place where we stopped the search:
78140 - */
78141 - mm->free_area_cache = addr + len;
78142 - return addr;
78143 - }
78144 + if (check_heap_stack_gap(vma, addr, len))
78145 + break;
78146 if (addr + mm->cached_hole_size < vma->vm_start)
78147 mm->cached_hole_size = vma->vm_start - addr;
78148 addr = vma->vm_end;
78149 }
78150 +
78151 + /*
78152 + * Remember the place where we stopped the search:
78153 + */
78154 + mm->free_area_cache = addr + len;
78155 + return addr;
78156 }
78157 #endif
78158
78159 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
78160 {
78161 +
78162 +#ifdef CONFIG_PAX_SEGMEXEC
78163 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78164 + return;
78165 +#endif
78166 +
78167 /*
78168 * Is this a new hole at the lowest possible address?
78169 */
78170 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
78171 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
78172 mm->free_area_cache = addr;
78173 mm->cached_hole_size = ~0UL;
78174 }
78175 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78176 {
78177 struct vm_area_struct *vma;
78178 struct mm_struct *mm = current->mm;
78179 - unsigned long addr = addr0;
78180 + unsigned long base = mm->mmap_base, addr = addr0;
78181
78182 /* requested length too big for entire address space */
78183 if (len > TASK_SIZE)
78184 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78185 if (flags & MAP_FIXED)
78186 return addr;
78187
78188 +#ifdef CONFIG_PAX_RANDMMAP
78189 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78190 +#endif
78191 +
78192 /* requesting a specific address */
78193 if (addr) {
78194 addr = PAGE_ALIGN(addr);
78195 - vma = find_vma(mm, addr);
78196 - if (TASK_SIZE - len >= addr &&
78197 - (!vma || addr + len <= vma->vm_start))
78198 - return addr;
78199 + if (TASK_SIZE - len >= addr) {
78200 + vma = find_vma(mm, addr);
78201 + if (check_heap_stack_gap(vma, addr, len))
78202 + return addr;
78203 + }
78204 }
78205
78206 /* check if free_area_cache is useful for us */
78207 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78208 /* make sure it can fit in the remaining address space */
78209 if (addr > len) {
78210 vma = find_vma(mm, addr-len);
78211 - if (!vma || addr <= vma->vm_start)
78212 + if (check_heap_stack_gap(vma, addr - len, len))
78213 /* remember the address as a hint for next time */
78214 return (mm->free_area_cache = addr-len);
78215 }
78216 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78217 * return with success:
78218 */
78219 vma = find_vma(mm, addr);
78220 - if (!vma || addr+len <= vma->vm_start)
78221 + if (check_heap_stack_gap(vma, addr, len))
78222 /* remember the address as a hint for next time */
78223 return (mm->free_area_cache = addr);
78224
78225 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78226 mm->cached_hole_size = vma->vm_start - addr;
78227
78228 /* try just below the current vma->vm_start */
78229 - addr = vma->vm_start-len;
78230 - } while (len < vma->vm_start);
78231 + addr = skip_heap_stack_gap(vma, len);
78232 + } while (!IS_ERR_VALUE(addr));
78233
78234 bottomup:
78235 /*
78236 @@ -1414,13 +1624,21 @@ bottomup:
78237 * can happen with large stack limits and large mmap()
78238 * allocations.
78239 */
78240 + mm->mmap_base = TASK_UNMAPPED_BASE;
78241 +
78242 +#ifdef CONFIG_PAX_RANDMMAP
78243 + if (mm->pax_flags & MF_PAX_RANDMMAP)
78244 + mm->mmap_base += mm->delta_mmap;
78245 +#endif
78246 +
78247 + mm->free_area_cache = mm->mmap_base;
78248 mm->cached_hole_size = ~0UL;
78249 - mm->free_area_cache = TASK_UNMAPPED_BASE;
78250 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
78251 /*
78252 * Restore the topdown base:
78253 */
78254 - mm->free_area_cache = mm->mmap_base;
78255 + mm->mmap_base = base;
78256 + mm->free_area_cache = base;
78257 mm->cached_hole_size = ~0UL;
78258
78259 return addr;
78260 @@ -1429,6 +1647,12 @@ bottomup:
78261
78262 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78263 {
78264 +
78265 +#ifdef CONFIG_PAX_SEGMEXEC
78266 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78267 + return;
78268 +#endif
78269 +
78270 /*
78271 * Is this a new hole at the highest possible address?
78272 */
78273 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78274 mm->free_area_cache = addr;
78275
78276 /* dont allow allocations above current base */
78277 - if (mm->free_area_cache > mm->mmap_base)
78278 + if (mm->free_area_cache > mm->mmap_base) {
78279 mm->free_area_cache = mm->mmap_base;
78280 + mm->cached_hole_size = ~0UL;
78281 + }
78282 }
78283
78284 unsigned long
78285 @@ -1545,6 +1771,27 @@ out:
78286 return prev ? prev->vm_next : vma;
78287 }
78288
78289 +#ifdef CONFIG_PAX_SEGMEXEC
78290 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
78291 +{
78292 + struct vm_area_struct *vma_m;
78293 +
78294 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
78295 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
78296 + BUG_ON(vma->vm_mirror);
78297 + return NULL;
78298 + }
78299 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
78300 + vma_m = vma->vm_mirror;
78301 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
78302 + BUG_ON(vma->vm_file != vma_m->vm_file);
78303 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
78304 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
78305 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
78306 + return vma_m;
78307 +}
78308 +#endif
78309 +
78310 /*
78311 * Verify that the stack growth is acceptable and
78312 * update accounting. This is shared with both the
78313 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78314 return -ENOMEM;
78315
78316 /* Stack limit test */
78317 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
78318 if (size > rlim[RLIMIT_STACK].rlim_cur)
78319 return -ENOMEM;
78320
78321 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78322 unsigned long limit;
78323 locked = mm->locked_vm + grow;
78324 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
78325 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78326 if (locked > limit && !capable(CAP_IPC_LOCK))
78327 return -ENOMEM;
78328 }
78329 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78330 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
78331 * vma is the last one with address > vma->vm_end. Have to extend vma.
78332 */
78333 +#ifndef CONFIG_IA64
78334 +static
78335 +#endif
78336 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78337 {
78338 int error;
78339 + bool locknext;
78340
78341 if (!(vma->vm_flags & VM_GROWSUP))
78342 return -EFAULT;
78343
78344 + /* Also guard against wrapping around to address 0. */
78345 + if (address < PAGE_ALIGN(address+1))
78346 + address = PAGE_ALIGN(address+1);
78347 + else
78348 + return -ENOMEM;
78349 +
78350 /*
78351 * We must make sure the anon_vma is allocated
78352 * so that the anon_vma locking is not a noop.
78353 */
78354 if (unlikely(anon_vma_prepare(vma)))
78355 return -ENOMEM;
78356 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
78357 + if (locknext && anon_vma_prepare(vma->vm_next))
78358 + return -ENOMEM;
78359 anon_vma_lock(vma);
78360 + if (locknext)
78361 + anon_vma_lock(vma->vm_next);
78362
78363 /*
78364 * vma->vm_start/vm_end cannot change under us because the caller
78365 * is required to hold the mmap_sem in read mode. We need the
78366 - * anon_vma lock to serialize against concurrent expand_stacks.
78367 - * Also guard against wrapping around to address 0.
78368 + * anon_vma locks to serialize against concurrent expand_stacks
78369 + * and expand_upwards.
78370 */
78371 - if (address < PAGE_ALIGN(address+4))
78372 - address = PAGE_ALIGN(address+4);
78373 - else {
78374 - anon_vma_unlock(vma);
78375 - return -ENOMEM;
78376 - }
78377 error = 0;
78378
78379 /* Somebody else might have raced and expanded it already */
78380 - if (address > vma->vm_end) {
78381 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
78382 + error = -ENOMEM;
78383 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
78384 unsigned long size, grow;
78385
78386 size = address - vma->vm_start;
78387 @@ -1643,6 +1903,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78388 vma->vm_end = address;
78389 }
78390 }
78391 + if (locknext)
78392 + anon_vma_unlock(vma->vm_next);
78393 anon_vma_unlock(vma);
78394 return error;
78395 }
78396 @@ -1655,6 +1917,8 @@ static int expand_downwards(struct vm_area_struct *vma,
78397 unsigned long address)
78398 {
78399 int error;
78400 + bool lockprev = false;
78401 + struct vm_area_struct *prev;
78402
78403 /*
78404 * We must make sure the anon_vma is allocated
78405 @@ -1668,6 +1932,15 @@ static int expand_downwards(struct vm_area_struct *vma,
78406 if (error)
78407 return error;
78408
78409 + prev = vma->vm_prev;
78410 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
78411 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
78412 +#endif
78413 + if (lockprev && anon_vma_prepare(prev))
78414 + return -ENOMEM;
78415 + if (lockprev)
78416 + anon_vma_lock(prev);
78417 +
78418 anon_vma_lock(vma);
78419
78420 /*
78421 @@ -1677,9 +1950,17 @@ static int expand_downwards(struct vm_area_struct *vma,
78422 */
78423
78424 /* Somebody else might have raced and expanded it already */
78425 - if (address < vma->vm_start) {
78426 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
78427 + error = -ENOMEM;
78428 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
78429 unsigned long size, grow;
78430
78431 +#ifdef CONFIG_PAX_SEGMEXEC
78432 + struct vm_area_struct *vma_m;
78433 +
78434 + vma_m = pax_find_mirror_vma(vma);
78435 +#endif
78436 +
78437 size = vma->vm_end - address;
78438 grow = (vma->vm_start - address) >> PAGE_SHIFT;
78439
78440 @@ -1689,10 +1970,22 @@ static int expand_downwards(struct vm_area_struct *vma,
78441 if (!error) {
78442 vma->vm_start = address;
78443 vma->vm_pgoff -= grow;
78444 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
78445 +
78446 +#ifdef CONFIG_PAX_SEGMEXEC
78447 + if (vma_m) {
78448 + vma_m->vm_start -= grow << PAGE_SHIFT;
78449 + vma_m->vm_pgoff -= grow;
78450 + }
78451 +#endif
78452 +
78453 +
78454 }
78455 }
78456 }
78457 anon_vma_unlock(vma);
78458 + if (lockprev)
78459 + anon_vma_unlock(prev);
78460 return error;
78461 }
78462
78463 @@ -1768,6 +2061,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
78464 do {
78465 long nrpages = vma_pages(vma);
78466
78467 +#ifdef CONFIG_PAX_SEGMEXEC
78468 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
78469 + vma = remove_vma(vma);
78470 + continue;
78471 + }
78472 +#endif
78473 +
78474 mm->total_vm -= nrpages;
78475 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
78476 vma = remove_vma(vma);
78477 @@ -1813,6 +2113,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
78478 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
78479 vma->vm_prev = NULL;
78480 do {
78481 +
78482 +#ifdef CONFIG_PAX_SEGMEXEC
78483 + if (vma->vm_mirror) {
78484 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
78485 + vma->vm_mirror->vm_mirror = NULL;
78486 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
78487 + vma->vm_mirror = NULL;
78488 + }
78489 +#endif
78490 +
78491 rb_erase(&vma->vm_rb, &mm->mm_rb);
78492 mm->map_count--;
78493 tail_vma = vma;
78494 @@ -1840,10 +2150,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78495 struct mempolicy *pol;
78496 struct vm_area_struct *new;
78497
78498 +#ifdef CONFIG_PAX_SEGMEXEC
78499 + struct vm_area_struct *vma_m, *new_m = NULL;
78500 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
78501 +#endif
78502 +
78503 if (is_vm_hugetlb_page(vma) && (addr &
78504 ~(huge_page_mask(hstate_vma(vma)))))
78505 return -EINVAL;
78506
78507 +#ifdef CONFIG_PAX_SEGMEXEC
78508 + vma_m = pax_find_mirror_vma(vma);
78509 +
78510 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78511 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
78512 + if (mm->map_count >= sysctl_max_map_count-1)
78513 + return -ENOMEM;
78514 + } else
78515 +#endif
78516 +
78517 if (mm->map_count >= sysctl_max_map_count)
78518 return -ENOMEM;
78519
78520 @@ -1851,6 +2176,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78521 if (!new)
78522 return -ENOMEM;
78523
78524 +#ifdef CONFIG_PAX_SEGMEXEC
78525 + if (vma_m) {
78526 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78527 + if (!new_m) {
78528 + kmem_cache_free(vm_area_cachep, new);
78529 + return -ENOMEM;
78530 + }
78531 + }
78532 +#endif
78533 +
78534 /* most fields are the same, copy all, and then fixup */
78535 *new = *vma;
78536
78537 @@ -1861,8 +2196,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78538 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
78539 }
78540
78541 +#ifdef CONFIG_PAX_SEGMEXEC
78542 + if (vma_m) {
78543 + *new_m = *vma_m;
78544 + new_m->vm_mirror = new;
78545 + new->vm_mirror = new_m;
78546 +
78547 + if (new_below)
78548 + new_m->vm_end = addr_m;
78549 + else {
78550 + new_m->vm_start = addr_m;
78551 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
78552 + }
78553 + }
78554 +#endif
78555 +
78556 pol = mpol_dup(vma_policy(vma));
78557 if (IS_ERR(pol)) {
78558 +
78559 +#ifdef CONFIG_PAX_SEGMEXEC
78560 + if (new_m)
78561 + kmem_cache_free(vm_area_cachep, new_m);
78562 +#endif
78563 +
78564 kmem_cache_free(vm_area_cachep, new);
78565 return PTR_ERR(pol);
78566 }
78567 @@ -1883,6 +2239,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78568 else
78569 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
78570
78571 +#ifdef CONFIG_PAX_SEGMEXEC
78572 + if (vma_m) {
78573 + mpol_get(pol);
78574 + vma_set_policy(new_m, pol);
78575 +
78576 + if (new_m->vm_file) {
78577 + get_file(new_m->vm_file);
78578 + if (vma_m->vm_flags & VM_EXECUTABLE)
78579 + added_exe_file_vma(mm);
78580 + }
78581 +
78582 + if (new_m->vm_ops && new_m->vm_ops->open)
78583 + new_m->vm_ops->open(new_m);
78584 +
78585 + if (new_below)
78586 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
78587 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
78588 + else
78589 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
78590 + }
78591 +#endif
78592 +
78593 return 0;
78594 }
78595
78596 @@ -1891,11 +2269,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78597 * work. This now handles partial unmappings.
78598 * Jeremy Fitzhardinge <jeremy@goop.org>
78599 */
78600 +#ifdef CONFIG_PAX_SEGMEXEC
78601 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78602 {
78603 + int ret = __do_munmap(mm, start, len);
78604 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
78605 + return ret;
78606 +
78607 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
78608 +}
78609 +
78610 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78611 +#else
78612 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78613 +#endif
78614 +{
78615 unsigned long end;
78616 struct vm_area_struct *vma, *prev, *last;
78617
78618 + /*
78619 + * mm->mmap_sem is required to protect against another thread
78620 + * changing the mappings in case we sleep.
78621 + */
78622 + verify_mm_writelocked(mm);
78623 +
78624 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
78625 return -EINVAL;
78626
78627 @@ -1959,6 +2356,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78628 /* Fix up all other VM information */
78629 remove_vma_list(mm, vma);
78630
78631 + track_exec_limit(mm, start, end, 0UL);
78632 +
78633 return 0;
78634 }
78635
78636 @@ -1971,22 +2370,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
78637
78638 profile_munmap(addr);
78639
78640 +#ifdef CONFIG_PAX_SEGMEXEC
78641 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
78642 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
78643 + return -EINVAL;
78644 +#endif
78645 +
78646 down_write(&mm->mmap_sem);
78647 ret = do_munmap(mm, addr, len);
78648 up_write(&mm->mmap_sem);
78649 return ret;
78650 }
78651
78652 -static inline void verify_mm_writelocked(struct mm_struct *mm)
78653 -{
78654 -#ifdef CONFIG_DEBUG_VM
78655 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78656 - WARN_ON(1);
78657 - up_read(&mm->mmap_sem);
78658 - }
78659 -#endif
78660 -}
78661 -
78662 /*
78663 * this is really a simplified "do_mmap". it only handles
78664 * anonymous maps. eventually we may be able to do some
78665 @@ -2000,6 +2395,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78666 struct rb_node ** rb_link, * rb_parent;
78667 pgoff_t pgoff = addr >> PAGE_SHIFT;
78668 int error;
78669 + unsigned long charged;
78670
78671 len = PAGE_ALIGN(len);
78672 if (!len)
78673 @@ -2011,16 +2407,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78674
78675 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
78676
78677 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
78678 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
78679 + flags &= ~VM_EXEC;
78680 +
78681 +#ifdef CONFIG_PAX_MPROTECT
78682 + if (mm->pax_flags & MF_PAX_MPROTECT)
78683 + flags &= ~VM_MAYEXEC;
78684 +#endif
78685 +
78686 + }
78687 +#endif
78688 +
78689 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
78690 if (error & ~PAGE_MASK)
78691 return error;
78692
78693 + charged = len >> PAGE_SHIFT;
78694 +
78695 /*
78696 * mlock MCL_FUTURE?
78697 */
78698 if (mm->def_flags & VM_LOCKED) {
78699 unsigned long locked, lock_limit;
78700 - locked = len >> PAGE_SHIFT;
78701 + locked = charged;
78702 locked += mm->locked_vm;
78703 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78704 lock_limit >>= PAGE_SHIFT;
78705 @@ -2037,22 +2447,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78706 /*
78707 * Clear old maps. this also does some error checking for us
78708 */
78709 - munmap_back:
78710 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78711 if (vma && vma->vm_start < addr + len) {
78712 if (do_munmap(mm, addr, len))
78713 return -ENOMEM;
78714 - goto munmap_back;
78715 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78716 + BUG_ON(vma && vma->vm_start < addr + len);
78717 }
78718
78719 /* Check against address space limits *after* clearing old maps... */
78720 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
78721 + if (!may_expand_vm(mm, charged))
78722 return -ENOMEM;
78723
78724 if (mm->map_count > sysctl_max_map_count)
78725 return -ENOMEM;
78726
78727 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
78728 + if (security_vm_enough_memory(charged))
78729 return -ENOMEM;
78730
78731 /* Can we just expand an old private anonymous mapping? */
78732 @@ -2066,7 +2476,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78733 */
78734 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78735 if (!vma) {
78736 - vm_unacct_memory(len >> PAGE_SHIFT);
78737 + vm_unacct_memory(charged);
78738 return -ENOMEM;
78739 }
78740
78741 @@ -2078,11 +2488,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78742 vma->vm_page_prot = vm_get_page_prot(flags);
78743 vma_link(mm, vma, prev, rb_link, rb_parent);
78744 out:
78745 - mm->total_vm += len >> PAGE_SHIFT;
78746 + mm->total_vm += charged;
78747 if (flags & VM_LOCKED) {
78748 if (!mlock_vma_pages_range(vma, addr, addr + len))
78749 - mm->locked_vm += (len >> PAGE_SHIFT);
78750 + mm->locked_vm += charged;
78751 }
78752 + track_exec_limit(mm, addr, addr + len, flags);
78753 return addr;
78754 }
78755
78756 @@ -2129,8 +2540,10 @@ void exit_mmap(struct mm_struct *mm)
78757 * Walk the list again, actually closing and freeing it,
78758 * with preemption enabled, without holding any MM locks.
78759 */
78760 - while (vma)
78761 + while (vma) {
78762 + vma->vm_mirror = NULL;
78763 vma = remove_vma(vma);
78764 + }
78765
78766 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
78767 }
78768 @@ -2144,6 +2557,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
78769 struct vm_area_struct * __vma, * prev;
78770 struct rb_node ** rb_link, * rb_parent;
78771
78772 +#ifdef CONFIG_PAX_SEGMEXEC
78773 + struct vm_area_struct *vma_m = NULL;
78774 +#endif
78775 +
78776 /*
78777 * The vm_pgoff of a purely anonymous vma should be irrelevant
78778 * until its first write fault, when page's anon_vma and index
78779 @@ -2166,7 +2583,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
78780 if ((vma->vm_flags & VM_ACCOUNT) &&
78781 security_vm_enough_memory_mm(mm, vma_pages(vma)))
78782 return -ENOMEM;
78783 +
78784 +#ifdef CONFIG_PAX_SEGMEXEC
78785 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
78786 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78787 + if (!vma_m)
78788 + return -ENOMEM;
78789 + }
78790 +#endif
78791 +
78792 vma_link(mm, vma, prev, rb_link, rb_parent);
78793 +
78794 +#ifdef CONFIG_PAX_SEGMEXEC
78795 + if (vma_m)
78796 + pax_mirror_vma(vma_m, vma);
78797 +#endif
78798 +
78799 return 0;
78800 }
78801
78802 @@ -2184,6 +2616,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
78803 struct rb_node **rb_link, *rb_parent;
78804 struct mempolicy *pol;
78805
78806 + BUG_ON(vma->vm_mirror);
78807 +
78808 /*
78809 * If anonymous vma has not yet been faulted, update new pgoff
78810 * to match new location, to increase its chance of merging.
78811 @@ -2227,6 +2661,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
78812 return new_vma;
78813 }
78814
78815 +#ifdef CONFIG_PAX_SEGMEXEC
78816 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
78817 +{
78818 + struct vm_area_struct *prev_m;
78819 + struct rb_node **rb_link_m, *rb_parent_m;
78820 + struct mempolicy *pol_m;
78821 +
78822 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
78823 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
78824 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
78825 + *vma_m = *vma;
78826 + pol_m = vma_policy(vma_m);
78827 + mpol_get(pol_m);
78828 + vma_set_policy(vma_m, pol_m);
78829 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
78830 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
78831 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
78832 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
78833 + if (vma_m->vm_file)
78834 + get_file(vma_m->vm_file);
78835 + if (vma_m->vm_ops && vma_m->vm_ops->open)
78836 + vma_m->vm_ops->open(vma_m);
78837 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
78838 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
78839 + vma_m->vm_mirror = vma;
78840 + vma->vm_mirror = vma_m;
78841 +}
78842 +#endif
78843 +
78844 /*
78845 * Return true if the calling process may expand its vm space by the passed
78846 * number of pages
78847 @@ -2237,7 +2700,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
78848 unsigned long lim;
78849
78850 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
78851 -
78852 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
78853 if (cur + npages > lim)
78854 return 0;
78855 return 1;
78856 @@ -2307,6 +2770,22 @@ int install_special_mapping(struct mm_struct *mm,
78857 vma->vm_start = addr;
78858 vma->vm_end = addr + len;
78859
78860 +#ifdef CONFIG_PAX_MPROTECT
78861 + if (mm->pax_flags & MF_PAX_MPROTECT) {
78862 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
78863 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
78864 + return -EPERM;
78865 + if (!(vm_flags & VM_EXEC))
78866 + vm_flags &= ~VM_MAYEXEC;
78867 +#else
78868 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78869 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78870 +#endif
78871 + else
78872 + vm_flags &= ~VM_MAYWRITE;
78873 + }
78874 +#endif
78875 +
78876 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
78877 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78878
78879 diff --git a/mm/mprotect.c b/mm/mprotect.c
78880 index 1737c7e..c7faeb4 100644
78881 --- a/mm/mprotect.c
78882 +++ b/mm/mprotect.c
78883 @@ -24,10 +24,16 @@
78884 #include <linux/mmu_notifier.h>
78885 #include <linux/migrate.h>
78886 #include <linux/perf_event.h>
78887 +
78888 +#ifdef CONFIG_PAX_MPROTECT
78889 +#include <linux/elf.h>
78890 +#endif
78891 +
78892 #include <asm/uaccess.h>
78893 #include <asm/pgtable.h>
78894 #include <asm/cacheflush.h>
78895 #include <asm/tlbflush.h>
78896 +#include <asm/mmu_context.h>
78897
78898 #ifndef pgprot_modify
78899 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
78900 @@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
78901 flush_tlb_range(vma, start, end);
78902 }
78903
78904 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
78905 +/* called while holding the mmap semaphor for writing except stack expansion */
78906 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
78907 +{
78908 + unsigned long oldlimit, newlimit = 0UL;
78909 +
78910 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
78911 + return;
78912 +
78913 + spin_lock(&mm->page_table_lock);
78914 + oldlimit = mm->context.user_cs_limit;
78915 + if ((prot & VM_EXEC) && oldlimit < end)
78916 + /* USER_CS limit moved up */
78917 + newlimit = end;
78918 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
78919 + /* USER_CS limit moved down */
78920 + newlimit = start;
78921 +
78922 + if (newlimit) {
78923 + mm->context.user_cs_limit = newlimit;
78924 +
78925 +#ifdef CONFIG_SMP
78926 + wmb();
78927 + cpus_clear(mm->context.cpu_user_cs_mask);
78928 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
78929 +#endif
78930 +
78931 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
78932 + }
78933 + spin_unlock(&mm->page_table_lock);
78934 + if (newlimit == end) {
78935 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
78936 +
78937 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
78938 + if (is_vm_hugetlb_page(vma))
78939 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
78940 + else
78941 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
78942 + }
78943 +}
78944 +#endif
78945 +
78946 int
78947 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78948 unsigned long start, unsigned long end, unsigned long newflags)
78949 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78950 int error;
78951 int dirty_accountable = 0;
78952
78953 +#ifdef CONFIG_PAX_SEGMEXEC
78954 + struct vm_area_struct *vma_m = NULL;
78955 + unsigned long start_m, end_m;
78956 +
78957 + start_m = start + SEGMEXEC_TASK_SIZE;
78958 + end_m = end + SEGMEXEC_TASK_SIZE;
78959 +#endif
78960 +
78961 if (newflags == oldflags) {
78962 *pprev = vma;
78963 return 0;
78964 }
78965
78966 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
78967 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
78968 +
78969 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
78970 + return -ENOMEM;
78971 +
78972 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
78973 + return -ENOMEM;
78974 + }
78975 +
78976 /*
78977 * If we make a private mapping writable we increase our commit;
78978 * but (without finer accounting) cannot reduce our commit if we
78979 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78980 }
78981 }
78982
78983 +#ifdef CONFIG_PAX_SEGMEXEC
78984 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
78985 + if (start != vma->vm_start) {
78986 + error = split_vma(mm, vma, start, 1);
78987 + if (error)
78988 + goto fail;
78989 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
78990 + *pprev = (*pprev)->vm_next;
78991 + }
78992 +
78993 + if (end != vma->vm_end) {
78994 + error = split_vma(mm, vma, end, 0);
78995 + if (error)
78996 + goto fail;
78997 + }
78998 +
78999 + if (pax_find_mirror_vma(vma)) {
79000 + error = __do_munmap(mm, start_m, end_m - start_m);
79001 + if (error)
79002 + goto fail;
79003 + } else {
79004 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79005 + if (!vma_m) {
79006 + error = -ENOMEM;
79007 + goto fail;
79008 + }
79009 + vma->vm_flags = newflags;
79010 + pax_mirror_vma(vma_m, vma);
79011 + }
79012 + }
79013 +#endif
79014 +
79015 /*
79016 * First try to merge with previous and/or next vma.
79017 */
79018 @@ -195,9 +293,21 @@ success:
79019 * vm_flags and vm_page_prot are protected by the mmap_sem
79020 * held in write mode.
79021 */
79022 +
79023 +#ifdef CONFIG_PAX_SEGMEXEC
79024 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
79025 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
79026 +#endif
79027 +
79028 vma->vm_flags = newflags;
79029 +
79030 +#ifdef CONFIG_PAX_MPROTECT
79031 + if (mm->binfmt && mm->binfmt->handle_mprotect)
79032 + mm->binfmt->handle_mprotect(vma, newflags);
79033 +#endif
79034 +
79035 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
79036 - vm_get_page_prot(newflags));
79037 + vm_get_page_prot(vma->vm_flags));
79038
79039 if (vma_wants_writenotify(vma)) {
79040 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
79041 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79042 end = start + len;
79043 if (end <= start)
79044 return -ENOMEM;
79045 +
79046 +#ifdef CONFIG_PAX_SEGMEXEC
79047 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79048 + if (end > SEGMEXEC_TASK_SIZE)
79049 + return -EINVAL;
79050 + } else
79051 +#endif
79052 +
79053 + if (end > TASK_SIZE)
79054 + return -EINVAL;
79055 +
79056 if (!arch_validate_prot(prot))
79057 return -EINVAL;
79058
79059 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79060 /*
79061 * Does the application expect PROT_READ to imply PROT_EXEC:
79062 */
79063 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
79064 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
79065 prot |= PROT_EXEC;
79066
79067 vm_flags = calc_vm_prot_bits(prot);
79068 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79069 if (start > vma->vm_start)
79070 prev = vma;
79071
79072 +#ifdef CONFIG_PAX_MPROTECT
79073 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
79074 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
79075 +#endif
79076 +
79077 for (nstart = start ; ; ) {
79078 unsigned long newflags;
79079
79080 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79081
79082 /* newflags >> 4 shift VM_MAY% in place of VM_% */
79083 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
79084 + if (prot & (PROT_WRITE | PROT_EXEC))
79085 + gr_log_rwxmprotect(vma->vm_file);
79086 +
79087 + error = -EACCES;
79088 + goto out;
79089 + }
79090 +
79091 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
79092 error = -EACCES;
79093 goto out;
79094 }
79095 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79096 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
79097 if (error)
79098 goto out;
79099 +
79100 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
79101 +
79102 nstart = tmp;
79103
79104 if (nstart < prev->vm_end)
79105 diff --git a/mm/mremap.c b/mm/mremap.c
79106 index 3e98d79..1706cec 100644
79107 --- a/mm/mremap.c
79108 +++ b/mm/mremap.c
79109 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
79110 continue;
79111 pte = ptep_clear_flush(vma, old_addr, old_pte);
79112 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
79113 +
79114 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79115 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
79116 + pte = pte_exprotect(pte);
79117 +#endif
79118 +
79119 set_pte_at(mm, new_addr, new_pte, pte);
79120 }
79121
79122 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
79123 if (is_vm_hugetlb_page(vma))
79124 goto Einval;
79125
79126 +#ifdef CONFIG_PAX_SEGMEXEC
79127 + if (pax_find_mirror_vma(vma))
79128 + goto Einval;
79129 +#endif
79130 +
79131 /* We can't remap across vm area boundaries */
79132 if (old_len > vma->vm_end - addr)
79133 goto Efault;
79134 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
79135 unsigned long ret = -EINVAL;
79136 unsigned long charged = 0;
79137 unsigned long map_flags;
79138 + unsigned long pax_task_size = TASK_SIZE;
79139
79140 if (new_addr & ~PAGE_MASK)
79141 goto out;
79142
79143 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
79144 +#ifdef CONFIG_PAX_SEGMEXEC
79145 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
79146 + pax_task_size = SEGMEXEC_TASK_SIZE;
79147 +#endif
79148 +
79149 + pax_task_size -= PAGE_SIZE;
79150 +
79151 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
79152 goto out;
79153
79154 /* Check if the location we're moving into overlaps the
79155 * old location at all, and fail if it does.
79156 */
79157 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
79158 - goto out;
79159 -
79160 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
79161 + if (addr + old_len > new_addr && new_addr + new_len > addr)
79162 goto out;
79163
79164 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79165 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
79166 struct vm_area_struct *vma;
79167 unsigned long ret = -EINVAL;
79168 unsigned long charged = 0;
79169 + unsigned long pax_task_size = TASK_SIZE;
79170
79171 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
79172 goto out;
79173 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
79174 if (!new_len)
79175 goto out;
79176
79177 +#ifdef CONFIG_PAX_SEGMEXEC
79178 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
79179 + pax_task_size = SEGMEXEC_TASK_SIZE;
79180 +#endif
79181 +
79182 + pax_task_size -= PAGE_SIZE;
79183 +
79184 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
79185 + old_len > pax_task_size || addr > pax_task_size-old_len)
79186 + goto out;
79187 +
79188 if (flags & MREMAP_FIXED) {
79189 if (flags & MREMAP_MAYMOVE)
79190 ret = mremap_to(addr, old_len, new_addr, new_len);
79191 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
79192 addr + new_len);
79193 }
79194 ret = addr;
79195 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
79196 goto out;
79197 }
79198 }
79199 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
79200 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79201 if (ret)
79202 goto out;
79203 +
79204 + map_flags = vma->vm_flags;
79205 ret = move_vma(vma, addr, old_len, new_len, new_addr);
79206 + if (!(ret & ~PAGE_MASK)) {
79207 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
79208 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
79209 + }
79210 }
79211 out:
79212 if (ret & ~PAGE_MASK)
79213 diff --git a/mm/nommu.c b/mm/nommu.c
79214 index 406e8d4..53970d3 100644
79215 --- a/mm/nommu.c
79216 +++ b/mm/nommu.c
79217 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
79218 int sysctl_overcommit_ratio = 50; /* default is 50% */
79219 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
79220 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
79221 -int heap_stack_gap = 0;
79222
79223 atomic_long_t mmap_pages_allocated;
79224
79225 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79226 EXPORT_SYMBOL(find_vma);
79227
79228 /*
79229 - * find a VMA
79230 - * - we don't extend stack VMAs under NOMMU conditions
79231 - */
79232 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
79233 -{
79234 - return find_vma(mm, addr);
79235 -}
79236 -
79237 -/*
79238 * expand a stack to a given address
79239 * - not supported under NOMMU conditions
79240 */
79241 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
79242 index 3ecab7e..594a471 100644
79243 --- a/mm/page_alloc.c
79244 +++ b/mm/page_alloc.c
79245 @@ -289,7 +289,7 @@ out:
79246 * This usage means that zero-order pages may not be compound.
79247 */
79248
79249 -static void free_compound_page(struct page *page)
79250 +void free_compound_page(struct page *page)
79251 {
79252 __free_pages_ok(page, compound_order(page));
79253 }
79254 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79255 int bad = 0;
79256 int wasMlocked = __TestClearPageMlocked(page);
79257
79258 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79259 + unsigned long index = 1UL << order;
79260 +#endif
79261 +
79262 kmemcheck_free_shadow(page, order);
79263
79264 for (i = 0 ; i < (1 << order) ; ++i)
79265 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79266 debug_check_no_obj_freed(page_address(page),
79267 PAGE_SIZE << order);
79268 }
79269 +
79270 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79271 + for (; index; --index)
79272 + sanitize_highpage(page + index - 1);
79273 +#endif
79274 +
79275 arch_free_page(page, order);
79276 kernel_map_pages(page, 1 << order, 0);
79277
79278 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
79279 arch_alloc_page(page, order);
79280 kernel_map_pages(page, 1 << order, 1);
79281
79282 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
79283 if (gfp_flags & __GFP_ZERO)
79284 prep_zero_page(page, order, gfp_flags);
79285 +#endif
79286
79287 if (order && (gfp_flags & __GFP_COMP))
79288 prep_compound_page(page, order);
79289 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
79290 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
79291 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
79292 }
79293 +
79294 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
79295 + sanitize_highpage(page);
79296 +#endif
79297 +
79298 arch_free_page(page, 0);
79299 kernel_map_pages(page, 1, 0);
79300
79301 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
79302 int cpu;
79303 struct zone *zone;
79304
79305 + pax_track_stack();
79306 +
79307 for_each_populated_zone(zone) {
79308 show_node(zone);
79309 printk("%s per-cpu:\n", zone->name);
79310 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
79311 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
79312 }
79313 #else
79314 -static void inline setup_usemap(struct pglist_data *pgdat,
79315 +static inline void setup_usemap(struct pglist_data *pgdat,
79316 struct zone *zone, unsigned long zonesize) {}
79317 #endif /* CONFIG_SPARSEMEM */
79318
79319 diff --git a/mm/percpu.c b/mm/percpu.c
79320 index c90614a..5f7b7b8 100644
79321 --- a/mm/percpu.c
79322 +++ b/mm/percpu.c
79323 @@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
79324 static unsigned int pcpu_high_unit_cpu __read_mostly;
79325
79326 /* the address of the first chunk which starts with the kernel static area */
79327 -void *pcpu_base_addr __read_mostly;
79328 +void *pcpu_base_addr __read_only;
79329 EXPORT_SYMBOL_GPL(pcpu_base_addr);
79330
79331 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
79332 diff --git a/mm/rmap.c b/mm/rmap.c
79333 index dd43373..d848cd7 100644
79334 --- a/mm/rmap.c
79335 +++ b/mm/rmap.c
79336 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
79337 /* page_table_lock to protect against threads */
79338 spin_lock(&mm->page_table_lock);
79339 if (likely(!vma->anon_vma)) {
79340 +
79341 +#ifdef CONFIG_PAX_SEGMEXEC
79342 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
79343 +
79344 + if (vma_m) {
79345 + BUG_ON(vma_m->anon_vma);
79346 + vma_m->anon_vma = anon_vma;
79347 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
79348 + }
79349 +#endif
79350 +
79351 vma->anon_vma = anon_vma;
79352 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
79353 allocated = NULL;
79354 diff --git a/mm/shmem.c b/mm/shmem.c
79355 index 3e0005b..1d659a8 100644
79356 --- a/mm/shmem.c
79357 +++ b/mm/shmem.c
79358 @@ -31,7 +31,7 @@
79359 #include <linux/swap.h>
79360 #include <linux/ima.h>
79361
79362 -static struct vfsmount *shm_mnt;
79363 +struct vfsmount *shm_mnt;
79364
79365 #ifdef CONFIG_SHMEM
79366 /*
79367 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
79368 goto unlock;
79369 }
79370 entry = shmem_swp_entry(info, index, NULL);
79371 + if (!entry)
79372 + goto unlock;
79373 if (entry->val) {
79374 /*
79375 * The more uptodate page coming down from a stacked
79376 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
79377 struct vm_area_struct pvma;
79378 struct page *page;
79379
79380 + pax_track_stack();
79381 +
79382 spol = mpol_cond_copy(&mpol,
79383 mpol_shared_policy_lookup(&info->policy, idx));
79384
79385 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
79386
79387 info = SHMEM_I(inode);
79388 inode->i_size = len-1;
79389 - if (len <= (char *)inode - (char *)info) {
79390 + if (len <= (char *)inode - (char *)info && len <= 64) {
79391 /* do it inline */
79392 memcpy(info, symname, len);
79393 inode->i_op = &shmem_symlink_inline_operations;
79394 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
79395 int err = -ENOMEM;
79396
79397 /* Round up to L1_CACHE_BYTES to resist false sharing */
79398 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
79399 - L1_CACHE_BYTES), GFP_KERNEL);
79400 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
79401 if (!sbinfo)
79402 return -ENOMEM;
79403
79404 diff --git a/mm/slab.c b/mm/slab.c
79405 index c8d466a..909e01e 100644
79406 --- a/mm/slab.c
79407 +++ b/mm/slab.c
79408 @@ -174,7 +174,7 @@
79409
79410 /* Legal flag mask for kmem_cache_create(). */
79411 #if DEBUG
79412 -# define CREATE_MASK (SLAB_RED_ZONE | \
79413 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
79414 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
79415 SLAB_CACHE_DMA | \
79416 SLAB_STORE_USER | \
79417 @@ -182,7 +182,7 @@
79418 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79419 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
79420 #else
79421 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
79422 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
79423 SLAB_CACHE_DMA | \
79424 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
79425 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79426 @@ -308,7 +308,7 @@ struct kmem_list3 {
79427 * Need this for bootstrapping a per node allocator.
79428 */
79429 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
79430 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
79431 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
79432 #define CACHE_CACHE 0
79433 #define SIZE_AC MAX_NUMNODES
79434 #define SIZE_L3 (2 * MAX_NUMNODES)
79435 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
79436 if ((x)->max_freeable < i) \
79437 (x)->max_freeable = i; \
79438 } while (0)
79439 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
79440 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
79441 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
79442 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
79443 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
79444 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
79445 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
79446 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
79447 #else
79448 #define STATS_INC_ACTIVE(x) do { } while (0)
79449 #define STATS_DEC_ACTIVE(x) do { } while (0)
79450 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
79451 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
79452 */
79453 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
79454 - const struct slab *slab, void *obj)
79455 + const struct slab *slab, const void *obj)
79456 {
79457 u32 offset = (obj - slab->s_mem);
79458 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
79459 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
79460 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
79461 sizes[INDEX_AC].cs_size,
79462 ARCH_KMALLOC_MINALIGN,
79463 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79464 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79465 NULL);
79466
79467 if (INDEX_AC != INDEX_L3) {
79468 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
79469 kmem_cache_create(names[INDEX_L3].name,
79470 sizes[INDEX_L3].cs_size,
79471 ARCH_KMALLOC_MINALIGN,
79472 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79473 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79474 NULL);
79475 }
79476
79477 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
79478 sizes->cs_cachep = kmem_cache_create(names->name,
79479 sizes->cs_size,
79480 ARCH_KMALLOC_MINALIGN,
79481 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79482 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79483 NULL);
79484 }
79485 #ifdef CONFIG_ZONE_DMA
79486 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
79487 }
79488 /* cpu stats */
79489 {
79490 - unsigned long allochit = atomic_read(&cachep->allochit);
79491 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
79492 - unsigned long freehit = atomic_read(&cachep->freehit);
79493 - unsigned long freemiss = atomic_read(&cachep->freemiss);
79494 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
79495 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
79496 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
79497 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
79498
79499 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
79500 allochit, allocmiss, freehit, freemiss);
79501 @@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
79502
79503 static int __init slab_proc_init(void)
79504 {
79505 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
79506 + mode_t gr_mode = S_IRUGO;
79507 +
79508 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
79509 + gr_mode = S_IRUSR;
79510 +#endif
79511 +
79512 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
79513 #ifdef CONFIG_DEBUG_SLAB_LEAK
79514 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
79515 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
79516 #endif
79517 return 0;
79518 }
79519 module_init(slab_proc_init);
79520 #endif
79521
79522 +void check_object_size(const void *ptr, unsigned long n, bool to)
79523 +{
79524 +
79525 +#ifdef CONFIG_PAX_USERCOPY
79526 + struct page *page;
79527 + struct kmem_cache *cachep = NULL;
79528 + struct slab *slabp;
79529 + unsigned int objnr;
79530 + unsigned long offset;
79531 + const char *type;
79532 +
79533 + if (!n)
79534 + return;
79535 +
79536 + type = "<null>";
79537 + if (ZERO_OR_NULL_PTR(ptr))
79538 + goto report;
79539 +
79540 + if (!virt_addr_valid(ptr))
79541 + return;
79542 +
79543 + page = virt_to_head_page(ptr);
79544 +
79545 + type = "<process stack>";
79546 + if (!PageSlab(page)) {
79547 + if (object_is_on_stack(ptr, n) == -1)
79548 + goto report;
79549 + return;
79550 + }
79551 +
79552 + cachep = page_get_cache(page);
79553 + type = cachep->name;
79554 + if (!(cachep->flags & SLAB_USERCOPY))
79555 + goto report;
79556 +
79557 + slabp = page_get_slab(page);
79558 + objnr = obj_to_index(cachep, slabp, ptr);
79559 + BUG_ON(objnr >= cachep->num);
79560 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
79561 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
79562 + return;
79563 +
79564 +report:
79565 + pax_report_usercopy(ptr, n, to, type);
79566 +#endif
79567 +
79568 +}
79569 +EXPORT_SYMBOL(check_object_size);
79570 +
79571 /**
79572 * ksize - get the actual amount of memory allocated for a given object
79573 * @objp: Pointer to the object
79574 diff --git a/mm/slob.c b/mm/slob.c
79575 index 837ebd6..4712174 100644
79576 --- a/mm/slob.c
79577 +++ b/mm/slob.c
79578 @@ -29,7 +29,7 @@
79579 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
79580 * alloc_pages() directly, allocating compound pages so the page order
79581 * does not have to be separately tracked, and also stores the exact
79582 - * allocation size in page->private so that it can be used to accurately
79583 + * allocation size in slob_page->size so that it can be used to accurately
79584 * provide ksize(). These objects are detected in kfree() because slob_page()
79585 * is false for them.
79586 *
79587 @@ -58,6 +58,7 @@
79588 */
79589
79590 #include <linux/kernel.h>
79591 +#include <linux/sched.h>
79592 #include <linux/slab.h>
79593 #include <linux/mm.h>
79594 #include <linux/swap.h> /* struct reclaim_state */
79595 @@ -100,7 +101,8 @@ struct slob_page {
79596 unsigned long flags; /* mandatory */
79597 atomic_t _count; /* mandatory */
79598 slobidx_t units; /* free units left in page */
79599 - unsigned long pad[2];
79600 + unsigned long pad[1];
79601 + unsigned long size; /* size when >=PAGE_SIZE */
79602 slob_t *free; /* first free slob_t in page */
79603 struct list_head list; /* linked list of free pages */
79604 };
79605 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
79606 */
79607 static inline int is_slob_page(struct slob_page *sp)
79608 {
79609 - return PageSlab((struct page *)sp);
79610 + return PageSlab((struct page *)sp) && !sp->size;
79611 }
79612
79613 static inline void set_slob_page(struct slob_page *sp)
79614 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
79615
79616 static inline struct slob_page *slob_page(const void *addr)
79617 {
79618 - return (struct slob_page *)virt_to_page(addr);
79619 + return (struct slob_page *)virt_to_head_page(addr);
79620 }
79621
79622 /*
79623 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
79624 /*
79625 * Return the size of a slob block.
79626 */
79627 -static slobidx_t slob_units(slob_t *s)
79628 +static slobidx_t slob_units(const slob_t *s)
79629 {
79630 if (s->units > 0)
79631 return s->units;
79632 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
79633 /*
79634 * Return the next free slob block pointer after this one.
79635 */
79636 -static slob_t *slob_next(slob_t *s)
79637 +static slob_t *slob_next(const slob_t *s)
79638 {
79639 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
79640 slobidx_t next;
79641 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
79642 /*
79643 * Returns true if s is the last free block in its page.
79644 */
79645 -static int slob_last(slob_t *s)
79646 +static int slob_last(const slob_t *s)
79647 {
79648 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
79649 }
79650 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
79651 if (!page)
79652 return NULL;
79653
79654 + set_slob_page(page);
79655 return page_address(page);
79656 }
79657
79658 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
79659 if (!b)
79660 return NULL;
79661 sp = slob_page(b);
79662 - set_slob_page(sp);
79663
79664 spin_lock_irqsave(&slob_lock, flags);
79665 sp->units = SLOB_UNITS(PAGE_SIZE);
79666 sp->free = b;
79667 + sp->size = 0;
79668 INIT_LIST_HEAD(&sp->list);
79669 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
79670 set_slob_page_free(sp, slob_list);
79671 @@ -475,10 +478,9 @@ out:
79672 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
79673 #endif
79674
79675 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79676 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
79677 {
79678 - unsigned int *m;
79679 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79680 + slob_t *m;
79681 void *ret;
79682
79683 lockdep_trace_alloc(gfp);
79684 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79685
79686 if (!m)
79687 return NULL;
79688 - *m = size;
79689 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
79690 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
79691 + m[0].units = size;
79692 + m[1].units = align;
79693 ret = (void *)m + align;
79694
79695 trace_kmalloc_node(_RET_IP_, ret,
79696 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79697
79698 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
79699 if (ret) {
79700 - struct page *page;
79701 - page = virt_to_page(ret);
79702 - page->private = size;
79703 + struct slob_page *sp;
79704 + sp = slob_page(ret);
79705 + sp->size = size;
79706 }
79707
79708 trace_kmalloc_node(_RET_IP_, ret,
79709 size, PAGE_SIZE << order, gfp, node);
79710 }
79711
79712 - kmemleak_alloc(ret, size, 1, gfp);
79713 + return ret;
79714 +}
79715 +
79716 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79717 +{
79718 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79719 + void *ret = __kmalloc_node_align(size, gfp, node, align);
79720 +
79721 + if (!ZERO_OR_NULL_PTR(ret))
79722 + kmemleak_alloc(ret, size, 1, gfp);
79723 return ret;
79724 }
79725 EXPORT_SYMBOL(__kmalloc_node);
79726 @@ -528,13 +542,92 @@ void kfree(const void *block)
79727 sp = slob_page(block);
79728 if (is_slob_page(sp)) {
79729 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79730 - unsigned int *m = (unsigned int *)(block - align);
79731 - slob_free(m, *m + align);
79732 - } else
79733 + slob_t *m = (slob_t *)(block - align);
79734 + slob_free(m, m[0].units + align);
79735 + } else {
79736 + clear_slob_page(sp);
79737 + free_slob_page(sp);
79738 + sp->size = 0;
79739 put_page(&sp->page);
79740 + }
79741 }
79742 EXPORT_SYMBOL(kfree);
79743
79744 +void check_object_size(const void *ptr, unsigned long n, bool to)
79745 +{
79746 +
79747 +#ifdef CONFIG_PAX_USERCOPY
79748 + struct slob_page *sp;
79749 + const slob_t *free;
79750 + const void *base;
79751 + unsigned long flags;
79752 + const char *type;
79753 +
79754 + if (!n)
79755 + return;
79756 +
79757 + type = "<null>";
79758 + if (ZERO_OR_NULL_PTR(ptr))
79759 + goto report;
79760 +
79761 + if (!virt_addr_valid(ptr))
79762 + return;
79763 +
79764 + type = "<process stack>";
79765 + sp = slob_page(ptr);
79766 + if (!PageSlab((struct page*)sp)) {
79767 + if (object_is_on_stack(ptr, n) == -1)
79768 + goto report;
79769 + return;
79770 + }
79771 +
79772 + type = "<slob>";
79773 + if (sp->size) {
79774 + base = page_address(&sp->page);
79775 + if (base <= ptr && n <= sp->size - (ptr - base))
79776 + return;
79777 + goto report;
79778 + }
79779 +
79780 + /* some tricky double walking to find the chunk */
79781 + spin_lock_irqsave(&slob_lock, flags);
79782 + base = (void *)((unsigned long)ptr & PAGE_MASK);
79783 + free = sp->free;
79784 +
79785 + while (!slob_last(free) && (void *)free <= ptr) {
79786 + base = free + slob_units(free);
79787 + free = slob_next(free);
79788 + }
79789 +
79790 + while (base < (void *)free) {
79791 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
79792 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
79793 + int offset;
79794 +
79795 + if (ptr < base + align)
79796 + break;
79797 +
79798 + offset = ptr - base - align;
79799 + if (offset >= m) {
79800 + base += size;
79801 + continue;
79802 + }
79803 +
79804 + if (n > m - offset)
79805 + break;
79806 +
79807 + spin_unlock_irqrestore(&slob_lock, flags);
79808 + return;
79809 + }
79810 +
79811 + spin_unlock_irqrestore(&slob_lock, flags);
79812 +report:
79813 + pax_report_usercopy(ptr, n, to, type);
79814 +#endif
79815 +
79816 +}
79817 +EXPORT_SYMBOL(check_object_size);
79818 +
79819 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
79820 size_t ksize(const void *block)
79821 {
79822 @@ -547,10 +640,10 @@ size_t ksize(const void *block)
79823 sp = slob_page(block);
79824 if (is_slob_page(sp)) {
79825 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79826 - unsigned int *m = (unsigned int *)(block - align);
79827 - return SLOB_UNITS(*m) * SLOB_UNIT;
79828 + slob_t *m = (slob_t *)(block - align);
79829 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
79830 } else
79831 - return sp->page.private;
79832 + return sp->size;
79833 }
79834 EXPORT_SYMBOL(ksize);
79835
79836 @@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79837 {
79838 struct kmem_cache *c;
79839
79840 +#ifdef CONFIG_PAX_USERCOPY
79841 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
79842 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
79843 +#else
79844 c = slob_alloc(sizeof(struct kmem_cache),
79845 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
79846 +#endif
79847
79848 if (c) {
79849 c->name = name;
79850 @@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
79851 {
79852 void *b;
79853
79854 +#ifdef CONFIG_PAX_USERCOPY
79855 + b = __kmalloc_node_align(c->size, flags, node, c->align);
79856 +#else
79857 if (c->size < PAGE_SIZE) {
79858 b = slob_alloc(c->size, flags, c->align, node);
79859 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79860 SLOB_UNITS(c->size) * SLOB_UNIT,
79861 flags, node);
79862 } else {
79863 + struct slob_page *sp;
79864 +
79865 b = slob_new_pages(flags, get_order(c->size), node);
79866 + sp = slob_page(b);
79867 + sp->size = c->size;
79868 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79869 PAGE_SIZE << get_order(c->size),
79870 flags, node);
79871 }
79872 +#endif
79873
79874 if (c->ctor)
79875 c->ctor(b);
79876 @@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
79877
79878 static void __kmem_cache_free(void *b, int size)
79879 {
79880 - if (size < PAGE_SIZE)
79881 + struct slob_page *sp = slob_page(b);
79882 +
79883 + if (is_slob_page(sp))
79884 slob_free(b, size);
79885 - else
79886 + else {
79887 + clear_slob_page(sp);
79888 + free_slob_page(sp);
79889 + sp->size = 0;
79890 slob_free_pages(b, get_order(size));
79891 + }
79892 }
79893
79894 static void kmem_rcu_free(struct rcu_head *head)
79895 @@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
79896
79897 void kmem_cache_free(struct kmem_cache *c, void *b)
79898 {
79899 + int size = c->size;
79900 +
79901 +#ifdef CONFIG_PAX_USERCOPY
79902 + if (size + c->align < PAGE_SIZE) {
79903 + size += c->align;
79904 + b -= c->align;
79905 + }
79906 +#endif
79907 +
79908 kmemleak_free_recursive(b, c->flags);
79909 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
79910 struct slob_rcu *slob_rcu;
79911 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
79912 + slob_rcu = b + (size - sizeof(struct slob_rcu));
79913 INIT_RCU_HEAD(&slob_rcu->head);
79914 - slob_rcu->size = c->size;
79915 + slob_rcu->size = size;
79916 call_rcu(&slob_rcu->head, kmem_rcu_free);
79917 } else {
79918 - __kmem_cache_free(b, c->size);
79919 + __kmem_cache_free(b, size);
79920 }
79921
79922 +#ifdef CONFIG_PAX_USERCOPY
79923 + trace_kfree(_RET_IP_, b);
79924 +#else
79925 trace_kmem_cache_free(_RET_IP_, b);
79926 +#endif
79927 +
79928 }
79929 EXPORT_SYMBOL(kmem_cache_free);
79930
79931 diff --git a/mm/slub.c b/mm/slub.c
79932 index 4996fc7..87e01d0 100644
79933 --- a/mm/slub.c
79934 +++ b/mm/slub.c
79935 @@ -201,7 +201,7 @@ struct track {
79936
79937 enum track_item { TRACK_ALLOC, TRACK_FREE };
79938
79939 -#ifdef CONFIG_SLUB_DEBUG
79940 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79941 static int sysfs_slab_add(struct kmem_cache *);
79942 static int sysfs_slab_alias(struct kmem_cache *, const char *);
79943 static void sysfs_slab_remove(struct kmem_cache *);
79944 @@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
79945 if (!t->addr)
79946 return;
79947
79948 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
79949 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
79950 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
79951 }
79952
79953 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
79954
79955 page = virt_to_head_page(x);
79956
79957 + BUG_ON(!PageSlab(page));
79958 +
79959 slab_free(s, page, x, _RET_IP_);
79960
79961 trace_kmem_cache_free(_RET_IP_, x);
79962 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
79963 * Merge control. If this is set then no merging of slab caches will occur.
79964 * (Could be removed. This was introduced to pacify the merge skeptics.)
79965 */
79966 -static int slub_nomerge;
79967 +static int slub_nomerge = 1;
79968
79969 /*
79970 * Calculate the order of allocation given an slab object size.
79971 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
79972 * list to avoid pounding the page allocator excessively.
79973 */
79974 set_min_partial(s, ilog2(s->size));
79975 - s->refcount = 1;
79976 + atomic_set(&s->refcount, 1);
79977 #ifdef CONFIG_NUMA
79978 s->remote_node_defrag_ratio = 1000;
79979 #endif
79980 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
79981 void kmem_cache_destroy(struct kmem_cache *s)
79982 {
79983 down_write(&slub_lock);
79984 - s->refcount--;
79985 - if (!s->refcount) {
79986 + if (atomic_dec_and_test(&s->refcount)) {
79987 list_del(&s->list);
79988 up_write(&slub_lock);
79989 if (kmem_cache_close(s)) {
79990 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
79991 __setup("slub_nomerge", setup_slub_nomerge);
79992
79993 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
79994 - const char *name, int size, gfp_t gfp_flags)
79995 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
79996 {
79997 - unsigned int flags = 0;
79998 -
79999 if (gfp_flags & SLUB_DMA)
80000 - flags = SLAB_CACHE_DMA;
80001 + flags |= SLAB_CACHE_DMA;
80002
80003 /*
80004 * This function is called with IRQs disabled during early-boot on
80005 @@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
80006 EXPORT_SYMBOL(__kmalloc_node);
80007 #endif
80008
80009 +void check_object_size(const void *ptr, unsigned long n, bool to)
80010 +{
80011 +
80012 +#ifdef CONFIG_PAX_USERCOPY
80013 + struct page *page;
80014 + struct kmem_cache *s = NULL;
80015 + unsigned long offset;
80016 + const char *type;
80017 +
80018 + if (!n)
80019 + return;
80020 +
80021 + type = "<null>";
80022 + if (ZERO_OR_NULL_PTR(ptr))
80023 + goto report;
80024 +
80025 + if (!virt_addr_valid(ptr))
80026 + return;
80027 +
80028 + page = get_object_page(ptr);
80029 +
80030 + type = "<process stack>";
80031 + if (!page) {
80032 + if (object_is_on_stack(ptr, n) == -1)
80033 + goto report;
80034 + return;
80035 + }
80036 +
80037 + s = page->slab;
80038 + type = s->name;
80039 + if (!(s->flags & SLAB_USERCOPY))
80040 + goto report;
80041 +
80042 + offset = (ptr - page_address(page)) % s->size;
80043 + if (offset <= s->objsize && n <= s->objsize - offset)
80044 + return;
80045 +
80046 +report:
80047 + pax_report_usercopy(ptr, n, to, type);
80048 +#endif
80049 +
80050 +}
80051 +EXPORT_SYMBOL(check_object_size);
80052 +
80053 size_t ksize(const void *object)
80054 {
80055 struct page *page;
80056 @@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
80057 * kmem_cache_open for slab_state == DOWN.
80058 */
80059 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
80060 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
80061 - kmalloc_caches[0].refcount = -1;
80062 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
80063 + atomic_set(&kmalloc_caches[0].refcount, -1);
80064 caches++;
80065
80066 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
80067 @@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
80068 /* Caches that are not of the two-to-the-power-of size */
80069 if (KMALLOC_MIN_SIZE <= 32) {
80070 create_kmalloc_cache(&kmalloc_caches[1],
80071 - "kmalloc-96", 96, GFP_NOWAIT);
80072 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
80073 caches++;
80074 }
80075 if (KMALLOC_MIN_SIZE <= 64) {
80076 create_kmalloc_cache(&kmalloc_caches[2],
80077 - "kmalloc-192", 192, GFP_NOWAIT);
80078 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
80079 caches++;
80080 }
80081
80082 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
80083 create_kmalloc_cache(&kmalloc_caches[i],
80084 - "kmalloc", 1 << i, GFP_NOWAIT);
80085 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
80086 caches++;
80087 }
80088
80089 @@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
80090 /*
80091 * We may have set a slab to be unmergeable during bootstrap.
80092 */
80093 - if (s->refcount < 0)
80094 + if (atomic_read(&s->refcount) < 0)
80095 return 1;
80096
80097 return 0;
80098 @@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80099 if (s) {
80100 int cpu;
80101
80102 - s->refcount++;
80103 + atomic_inc(&s->refcount);
80104 /*
80105 * Adjust the object sizes so that we clear
80106 * the complete object on kzalloc.
80107 @@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80108
80109 if (sysfs_slab_alias(s, name)) {
80110 down_write(&slub_lock);
80111 - s->refcount--;
80112 + atomic_dec(&s->refcount);
80113 up_write(&slub_lock);
80114 goto err;
80115 }
80116 @@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
80117
80118 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
80119 {
80120 - return sprintf(buf, "%d\n", s->refcount - 1);
80121 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
80122 }
80123 SLAB_ATTR_RO(aliases);
80124
80125 @@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
80126 kfree(s);
80127 }
80128
80129 -static struct sysfs_ops slab_sysfs_ops = {
80130 +static const struct sysfs_ops slab_sysfs_ops = {
80131 .show = slab_attr_show,
80132 .store = slab_attr_store,
80133 };
80134 @@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
80135 return 0;
80136 }
80137
80138 -static struct kset_uevent_ops slab_uevent_ops = {
80139 +static const struct kset_uevent_ops slab_uevent_ops = {
80140 .filter = uevent_filter,
80141 };
80142
80143 @@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
80144 return name;
80145 }
80146
80147 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80148 static int sysfs_slab_add(struct kmem_cache *s)
80149 {
80150 int err;
80151 @@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
80152 kobject_del(&s->kobj);
80153 kobject_put(&s->kobj);
80154 }
80155 +#endif
80156
80157 /*
80158 * Need to buffer aliases during bootup until sysfs becomes
80159 @@ -4632,6 +4677,7 @@ struct saved_alias {
80160
80161 static struct saved_alias *alias_list;
80162
80163 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80164 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80165 {
80166 struct saved_alias *al;
80167 @@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80168 alias_list = al;
80169 return 0;
80170 }
80171 +#endif
80172
80173 static int __init slab_sysfs_init(void)
80174 {
80175 @@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
80176
80177 static int __init slab_proc_init(void)
80178 {
80179 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
80180 + mode_t gr_mode = S_IRUGO;
80181 +
80182 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80183 + gr_mode = S_IRUSR;
80184 +#endif
80185 +
80186 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
80187 return 0;
80188 }
80189 module_init(slab_proc_init);
80190 diff --git a/mm/swap.c b/mm/swap.c
80191 index 308e57d..5de19c0 100644
80192 --- a/mm/swap.c
80193 +++ b/mm/swap.c
80194 @@ -30,6 +30,7 @@
80195 #include <linux/notifier.h>
80196 #include <linux/backing-dev.h>
80197 #include <linux/memcontrol.h>
80198 +#include <linux/hugetlb.h>
80199
80200 #include "internal.h"
80201
80202 @@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
80203 compound_page_dtor *dtor;
80204
80205 dtor = get_compound_page_dtor(page);
80206 + if (!PageHuge(page))
80207 + BUG_ON(dtor != free_compound_page);
80208 (*dtor)(page);
80209 }
80210 }
80211 diff --git a/mm/util.c b/mm/util.c
80212 index e48b493..24a601d 100644
80213 --- a/mm/util.c
80214 +++ b/mm/util.c
80215 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
80216 void arch_pick_mmap_layout(struct mm_struct *mm)
80217 {
80218 mm->mmap_base = TASK_UNMAPPED_BASE;
80219 +
80220 +#ifdef CONFIG_PAX_RANDMMAP
80221 + if (mm->pax_flags & MF_PAX_RANDMMAP)
80222 + mm->mmap_base += mm->delta_mmap;
80223 +#endif
80224 +
80225 mm->get_unmapped_area = arch_get_unmapped_area;
80226 mm->unmap_area = arch_unmap_area;
80227 }
80228 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
80229 index f34ffd0..e60c44f 100644
80230 --- a/mm/vmalloc.c
80231 +++ b/mm/vmalloc.c
80232 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
80233
80234 pte = pte_offset_kernel(pmd, addr);
80235 do {
80236 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80237 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80238 +
80239 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80240 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
80241 + BUG_ON(!pte_exec(*pte));
80242 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80243 + continue;
80244 + }
80245 +#endif
80246 +
80247 + {
80248 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80249 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80250 + }
80251 } while (pte++, addr += PAGE_SIZE, addr != end);
80252 }
80253
80254 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80255 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
80256 {
80257 pte_t *pte;
80258 + int ret = -ENOMEM;
80259
80260 /*
80261 * nr is a running index into the array which helps higher level
80262 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80263 pte = pte_alloc_kernel(pmd, addr);
80264 if (!pte)
80265 return -ENOMEM;
80266 +
80267 + pax_open_kernel();
80268 do {
80269 struct page *page = pages[*nr];
80270
80271 - if (WARN_ON(!pte_none(*pte)))
80272 - return -EBUSY;
80273 - if (WARN_ON(!page))
80274 - return -ENOMEM;
80275 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80276 + if (!(pgprot_val(prot) & _PAGE_NX))
80277 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
80278 + else
80279 +#endif
80280 +
80281 + if (WARN_ON(!pte_none(*pte))) {
80282 + ret = -EBUSY;
80283 + goto out;
80284 + }
80285 + if (WARN_ON(!page)) {
80286 + ret = -ENOMEM;
80287 + goto out;
80288 + }
80289 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
80290 (*nr)++;
80291 } while (pte++, addr += PAGE_SIZE, addr != end);
80292 - return 0;
80293 + ret = 0;
80294 +out:
80295 + pax_close_kernel();
80296 + return ret;
80297 }
80298
80299 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
80300 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
80301 * and fall back on vmalloc() if that fails. Others
80302 * just put it in the vmalloc space.
80303 */
80304 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
80305 +#ifdef CONFIG_MODULES
80306 +#ifdef MODULES_VADDR
80307 unsigned long addr = (unsigned long)x;
80308 if (addr >= MODULES_VADDR && addr < MODULES_END)
80309 return 1;
80310 #endif
80311 +
80312 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80313 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
80314 + return 1;
80315 +#endif
80316 +
80317 +#endif
80318 +
80319 return is_vmalloc_addr(x);
80320 }
80321
80322 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
80323
80324 if (!pgd_none(*pgd)) {
80325 pud_t *pud = pud_offset(pgd, addr);
80326 +#ifdef CONFIG_X86
80327 + if (!pud_large(*pud))
80328 +#endif
80329 if (!pud_none(*pud)) {
80330 pmd_t *pmd = pmd_offset(pud, addr);
80331 +#ifdef CONFIG_X86
80332 + if (!pmd_large(*pmd))
80333 +#endif
80334 if (!pmd_none(*pmd)) {
80335 pte_t *ptep, pte;
80336
80337 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
80338 struct rb_node *tmp;
80339
80340 while (*p) {
80341 - struct vmap_area *tmp;
80342 + struct vmap_area *varea;
80343
80344 parent = *p;
80345 - tmp = rb_entry(parent, struct vmap_area, rb_node);
80346 - if (va->va_start < tmp->va_end)
80347 + varea = rb_entry(parent, struct vmap_area, rb_node);
80348 + if (va->va_start < varea->va_end)
80349 p = &(*p)->rb_left;
80350 - else if (va->va_end > tmp->va_start)
80351 + else if (va->va_end > varea->va_start)
80352 p = &(*p)->rb_right;
80353 else
80354 BUG();
80355 @@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
80356 struct vm_struct *area;
80357
80358 BUG_ON(in_interrupt());
80359 +
80360 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80361 + if (flags & VM_KERNEXEC) {
80362 + if (start != VMALLOC_START || end != VMALLOC_END)
80363 + return NULL;
80364 + start = (unsigned long)MODULES_EXEC_VADDR;
80365 + end = (unsigned long)MODULES_EXEC_END;
80366 + }
80367 +#endif
80368 +
80369 if (flags & VM_IOREMAP) {
80370 int bit = fls(size);
80371
80372 @@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
80373 if (count > totalram_pages)
80374 return NULL;
80375
80376 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80377 + if (!(pgprot_val(prot) & _PAGE_NX))
80378 + flags |= VM_KERNEXEC;
80379 +#endif
80380 +
80381 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
80382 __builtin_return_address(0));
80383 if (!area)
80384 @@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80385 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
80386 return NULL;
80387
80388 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80389 + if (!(pgprot_val(prot) & _PAGE_NX))
80390 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
80391 + VMALLOC_START, VMALLOC_END, node,
80392 + gfp_mask, caller);
80393 + else
80394 +#endif
80395 +
80396 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
80397 VMALLOC_START, VMALLOC_END, node,
80398 gfp_mask, caller);
80399 @@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80400 return addr;
80401 }
80402
80403 +#undef __vmalloc
80404 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
80405 {
80406 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
80407 @@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
80408 * For tight control over page level allocator and protection flags
80409 * use __vmalloc() instead.
80410 */
80411 +#undef vmalloc
80412 void *vmalloc(unsigned long size)
80413 {
80414 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80415 @@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
80416 * The resulting memory area is zeroed so it can be mapped to userspace
80417 * without leaking data.
80418 */
80419 +#undef vmalloc_user
80420 void *vmalloc_user(unsigned long size)
80421 {
80422 struct vm_struct *area;
80423 @@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
80424 * For tight control over page level allocator and protection flags
80425 * use __vmalloc() instead.
80426 */
80427 +#undef vmalloc_node
80428 void *vmalloc_node(unsigned long size, int node)
80429 {
80430 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80431 @@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
80432 * For tight control over page level allocator and protection flags
80433 * use __vmalloc() instead.
80434 */
80435 -
80436 +#undef vmalloc_exec
80437 void *vmalloc_exec(unsigned long size)
80438 {
80439 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
80440 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
80441 -1, __builtin_return_address(0));
80442 }
80443
80444 @@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
80445 * Allocate enough 32bit PA addressable pages to cover @size from the
80446 * page level allocator and map them into contiguous kernel virtual space.
80447 */
80448 +#undef vmalloc_32
80449 void *vmalloc_32(unsigned long size)
80450 {
80451 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
80452 @@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
80453 * The resulting memory area is 32bit addressable and zeroed so it can be
80454 * mapped to userspace without leaking data.
80455 */
80456 +#undef vmalloc_32_user
80457 void *vmalloc_32_user(unsigned long size)
80458 {
80459 struct vm_struct *area;
80460 @@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
80461 unsigned long uaddr = vma->vm_start;
80462 unsigned long usize = vma->vm_end - vma->vm_start;
80463
80464 + BUG_ON(vma->vm_mirror);
80465 +
80466 if ((PAGE_SIZE-1) & (unsigned long)addr)
80467 return -EINVAL;
80468
80469 diff --git a/mm/vmstat.c b/mm/vmstat.c
80470 index 42d76c6..5643dc4 100644
80471 --- a/mm/vmstat.c
80472 +++ b/mm/vmstat.c
80473 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
80474 *
80475 * vm_stat contains the global counters
80476 */
80477 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80478 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80479 EXPORT_SYMBOL(vm_stat);
80480
80481 #ifdef CONFIG_SMP
80482 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
80483 v = p->vm_stat_diff[i];
80484 p->vm_stat_diff[i] = 0;
80485 local_irq_restore(flags);
80486 - atomic_long_add(v, &zone->vm_stat[i]);
80487 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
80488 global_diff[i] += v;
80489 #ifdef CONFIG_NUMA
80490 /* 3 seconds idle till flush */
80491 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
80492
80493 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
80494 if (global_diff[i])
80495 - atomic_long_add(global_diff[i], &vm_stat[i]);
80496 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
80497 }
80498
80499 #endif
80500 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
80501 start_cpu_timer(cpu);
80502 #endif
80503 #ifdef CONFIG_PROC_FS
80504 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
80505 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
80506 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
80507 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
80508 + {
80509 + mode_t gr_mode = S_IRUGO;
80510 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80511 + gr_mode = S_IRUSR;
80512 +#endif
80513 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
80514 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
80515 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
80516 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
80517 +#else
80518 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
80519 +#endif
80520 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
80521 + }
80522 #endif
80523 return 0;
80524 }
80525 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
80526 index a29c5ab..6143f20 100644
80527 --- a/net/8021q/vlan.c
80528 +++ b/net/8021q/vlan.c
80529 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
80530 err = -EPERM;
80531 if (!capable(CAP_NET_ADMIN))
80532 break;
80533 - if ((args.u.name_type >= 0) &&
80534 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
80535 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
80536 struct vlan_net *vn;
80537
80538 vn = net_generic(net, vlan_net_id);
80539 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
80540 index a2d2984..f9eb711 100644
80541 --- a/net/9p/trans_fd.c
80542 +++ b/net/9p/trans_fd.c
80543 @@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
80544 oldfs = get_fs();
80545 set_fs(get_ds());
80546 /* The cast to a user pointer is valid due to the set_fs() */
80547 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
80548 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
80549 set_fs(oldfs);
80550
80551 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
80552 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
80553 index 02cc7e7..4514f1b 100644
80554 --- a/net/atm/atm_misc.c
80555 +++ b/net/atm/atm_misc.c
80556 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
80557 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
80558 return 1;
80559 atm_return(vcc,truesize);
80560 - atomic_inc(&vcc->stats->rx_drop);
80561 + atomic_inc_unchecked(&vcc->stats->rx_drop);
80562 return 0;
80563 }
80564
80565 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
80566 }
80567 }
80568 atm_return(vcc,guess);
80569 - atomic_inc(&vcc->stats->rx_drop);
80570 + atomic_inc_unchecked(&vcc->stats->rx_drop);
80571 return NULL;
80572 }
80573
80574 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
80575
80576 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80577 {
80578 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80579 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
80580 __SONET_ITEMS
80581 #undef __HANDLE_ITEM
80582 }
80583 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80584
80585 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80586 {
80587 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
80588 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
80589 __SONET_ITEMS
80590 #undef __HANDLE_ITEM
80591 }
80592 diff --git a/net/atm/lec.h b/net/atm/lec.h
80593 index 9d14d19..5c145f3 100644
80594 --- a/net/atm/lec.h
80595 +++ b/net/atm/lec.h
80596 @@ -48,7 +48,7 @@ struct lane2_ops {
80597 const u8 *tlvs, u32 sizeoftlvs);
80598 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
80599 const u8 *tlvs, u32 sizeoftlvs);
80600 -};
80601 +} __no_const;
80602
80603 /*
80604 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
80605 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
80606 index 0919a88..a23d54e 100644
80607 --- a/net/atm/mpc.h
80608 +++ b/net/atm/mpc.h
80609 @@ -33,7 +33,7 @@ struct mpoa_client {
80610 struct mpc_parameters parameters; /* parameters for this client */
80611
80612 const struct net_device_ops *old_ops;
80613 - struct net_device_ops new_ops;
80614 + net_device_ops_no_const new_ops;
80615 };
80616
80617
80618 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
80619 index 4504a4b..1733f1e 100644
80620 --- a/net/atm/mpoa_caches.c
80621 +++ b/net/atm/mpoa_caches.c
80622 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
80623 struct timeval now;
80624 struct k_message msg;
80625
80626 + pax_track_stack();
80627 +
80628 do_gettimeofday(&now);
80629
80630 write_lock_irq(&client->egress_lock);
80631 diff --git a/net/atm/proc.c b/net/atm/proc.c
80632 index ab8419a..aa91497 100644
80633 --- a/net/atm/proc.c
80634 +++ b/net/atm/proc.c
80635 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
80636 const struct k_atm_aal_stats *stats)
80637 {
80638 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
80639 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
80640 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
80641 - atomic_read(&stats->rx_drop));
80642 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
80643 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
80644 + atomic_read_unchecked(&stats->rx_drop));
80645 }
80646
80647 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
80648 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
80649 {
80650 struct sock *sk = sk_atm(vcc);
80651
80652 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80653 + seq_printf(seq, "%p ", NULL);
80654 +#else
80655 seq_printf(seq, "%p ", vcc);
80656 +#endif
80657 +
80658 if (!vcc->dev)
80659 seq_printf(seq, "Unassigned ");
80660 else
80661 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
80662 {
80663 if (!vcc->dev)
80664 seq_printf(seq, sizeof(void *) == 4 ?
80665 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80666 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
80667 +#else
80668 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
80669 +#endif
80670 else
80671 seq_printf(seq, "%3d %3d %5d ",
80672 vcc->dev->number, vcc->vpi, vcc->vci);
80673 diff --git a/net/atm/resources.c b/net/atm/resources.c
80674 index 56b7322..c48b84e 100644
80675 --- a/net/atm/resources.c
80676 +++ b/net/atm/resources.c
80677 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
80678 static void copy_aal_stats(struct k_atm_aal_stats *from,
80679 struct atm_aal_stats *to)
80680 {
80681 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80682 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
80683 __AAL_STAT_ITEMS
80684 #undef __HANDLE_ITEM
80685 }
80686 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
80687 static void subtract_aal_stats(struct k_atm_aal_stats *from,
80688 struct atm_aal_stats *to)
80689 {
80690 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
80691 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
80692 __AAL_STAT_ITEMS
80693 #undef __HANDLE_ITEM
80694 }
80695 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
80696 index 8567d47..bba2292 100644
80697 --- a/net/bridge/br_private.h
80698 +++ b/net/bridge/br_private.h
80699 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
80700
80701 #ifdef CONFIG_SYSFS
80702 /* br_sysfs_if.c */
80703 -extern struct sysfs_ops brport_sysfs_ops;
80704 +extern const struct sysfs_ops brport_sysfs_ops;
80705 extern int br_sysfs_addif(struct net_bridge_port *p);
80706
80707 /* br_sysfs_br.c */
80708 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
80709 index 9a52ac5..c97538e 100644
80710 --- a/net/bridge/br_stp_if.c
80711 +++ b/net/bridge/br_stp_if.c
80712 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
80713 char *envp[] = { NULL };
80714
80715 if (br->stp_enabled == BR_USER_STP) {
80716 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
80717 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
80718 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
80719 br->dev->name, r);
80720
80721 diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
80722 index 820643a..ce77fb3 100644
80723 --- a/net/bridge/br_sysfs_if.c
80724 +++ b/net/bridge/br_sysfs_if.c
80725 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
80726 return ret;
80727 }
80728
80729 -struct sysfs_ops brport_sysfs_ops = {
80730 +const struct sysfs_ops brport_sysfs_ops = {
80731 .show = brport_show,
80732 .store = brport_store,
80733 };
80734 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
80735 index d73d47f..72df42a 100644
80736 --- a/net/bridge/netfilter/ebtables.c
80737 +++ b/net/bridge/netfilter/ebtables.c
80738 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
80739 unsigned int entries_size, nentries;
80740 char *entries;
80741
80742 + pax_track_stack();
80743 +
80744 if (cmd == EBT_SO_GET_ENTRIES) {
80745 entries_size = t->private->entries_size;
80746 nentries = t->private->nentries;
80747 diff --git a/net/can/bcm.c b/net/can/bcm.c
80748 index 2ffd2e0..72a7486 100644
80749 --- a/net/can/bcm.c
80750 +++ b/net/can/bcm.c
80751 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
80752 struct bcm_sock *bo = bcm_sk(sk);
80753 struct bcm_op *op;
80754
80755 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80756 + seq_printf(m, ">>> socket %p", NULL);
80757 + seq_printf(m, " / sk %p", NULL);
80758 + seq_printf(m, " / bo %p", NULL);
80759 +#else
80760 seq_printf(m, ">>> socket %p", sk->sk_socket);
80761 seq_printf(m, " / sk %p", sk);
80762 seq_printf(m, " / bo %p", bo);
80763 +#endif
80764 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
80765 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
80766 seq_printf(m, " <<<\n");
80767 diff --git a/net/compat.c b/net/compat.c
80768 index 9559afc..ccd74e1 100644
80769 --- a/net/compat.c
80770 +++ b/net/compat.c
80771 @@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
80772 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
80773 __get_user(kmsg->msg_flags, &umsg->msg_flags))
80774 return -EFAULT;
80775 - kmsg->msg_name = compat_ptr(tmp1);
80776 - kmsg->msg_iov = compat_ptr(tmp2);
80777 - kmsg->msg_control = compat_ptr(tmp3);
80778 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
80779 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
80780 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
80781 return 0;
80782 }
80783
80784 @@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80785 kern_msg->msg_name = NULL;
80786
80787 tot_len = iov_from_user_compat_to_kern(kern_iov,
80788 - (struct compat_iovec __user *)kern_msg->msg_iov,
80789 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
80790 kern_msg->msg_iovlen);
80791 if (tot_len >= 0)
80792 kern_msg->msg_iov = kern_iov;
80793 @@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80794
80795 #define CMSG_COMPAT_FIRSTHDR(msg) \
80796 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
80797 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
80798 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
80799 (struct compat_cmsghdr __user *)NULL)
80800
80801 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
80802 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
80803 (ucmlen) <= (unsigned long) \
80804 ((mhdr)->msg_controllen - \
80805 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
80806 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
80807
80808 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
80809 struct compat_cmsghdr __user *cmsg, int cmsg_len)
80810 {
80811 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
80812 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
80813 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
80814 msg->msg_controllen)
80815 return NULL;
80816 return (struct compat_cmsghdr __user *)ptr;
80817 @@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80818 {
80819 struct compat_timeval ctv;
80820 struct compat_timespec cts[3];
80821 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80822 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80823 struct compat_cmsghdr cmhdr;
80824 int cmlen;
80825
80826 @@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80827
80828 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
80829 {
80830 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80831 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80832 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
80833 int fdnum = scm->fp->count;
80834 struct file **fp = scm->fp->fp;
80835 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
80836 len = sizeof(ktime);
80837 old_fs = get_fs();
80838 set_fs(KERNEL_DS);
80839 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
80840 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
80841 set_fs(old_fs);
80842
80843 if (!err) {
80844 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80845 case MCAST_JOIN_GROUP:
80846 case MCAST_LEAVE_GROUP:
80847 {
80848 - struct compat_group_req __user *gr32 = (void *)optval;
80849 + struct compat_group_req __user *gr32 = (void __user *)optval;
80850 struct group_req __user *kgr =
80851 compat_alloc_user_space(sizeof(struct group_req));
80852 u32 interface;
80853 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80854 case MCAST_BLOCK_SOURCE:
80855 case MCAST_UNBLOCK_SOURCE:
80856 {
80857 - struct compat_group_source_req __user *gsr32 = (void *)optval;
80858 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
80859 struct group_source_req __user *kgsr = compat_alloc_user_space(
80860 sizeof(struct group_source_req));
80861 u32 interface;
80862 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80863 }
80864 case MCAST_MSFILTER:
80865 {
80866 - struct compat_group_filter __user *gf32 = (void *)optval;
80867 + struct compat_group_filter __user *gf32 = (void __user *)optval;
80868 struct group_filter __user *kgf;
80869 u32 interface, fmode, numsrc;
80870
80871 diff --git a/net/core/dev.c b/net/core/dev.c
80872 index 84a0705..575db4c 100644
80873 --- a/net/core/dev.c
80874 +++ b/net/core/dev.c
80875 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
80876 if (no_module && capable(CAP_NET_ADMIN))
80877 no_module = request_module("netdev-%s", name);
80878 if (no_module && capable(CAP_SYS_MODULE)) {
80879 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
80880 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
80881 +#else
80882 if (!request_module("%s", name))
80883 pr_err("Loading kernel module for a network device "
80884 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
80885 "instead\n", name);
80886 +#endif
80887 }
80888 }
80889 EXPORT_SYMBOL(dev_load);
80890 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
80891
80892 struct dev_gso_cb {
80893 void (*destructor)(struct sk_buff *skb);
80894 -};
80895 +} __no_const;
80896
80897 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
80898
80899 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
80900 }
80901 EXPORT_SYMBOL(netif_rx_ni);
80902
80903 -static void net_tx_action(struct softirq_action *h)
80904 +static void net_tx_action(void)
80905 {
80906 struct softnet_data *sd = &__get_cpu_var(softnet_data);
80907
80908 @@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
80909 EXPORT_SYMBOL(netif_napi_del);
80910
80911
80912 -static void net_rx_action(struct softirq_action *h)
80913 +static void net_rx_action(void)
80914 {
80915 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
80916 unsigned long time_limit = jiffies + 2;
80917 diff --git a/net/core/flow.c b/net/core/flow.c
80918 index 9601587..8c4824e 100644
80919 --- a/net/core/flow.c
80920 +++ b/net/core/flow.c
80921 @@ -35,11 +35,11 @@ struct flow_cache_entry {
80922 atomic_t *object_ref;
80923 };
80924
80925 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
80926 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
80927
80928 static u32 flow_hash_shift;
80929 #define flow_hash_size (1 << flow_hash_shift)
80930 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
80931 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
80932
80933 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
80934
80935 @@ -52,7 +52,7 @@ struct flow_percpu_info {
80936 u32 hash_rnd;
80937 int count;
80938 };
80939 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
80940 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
80941
80942 #define flow_hash_rnd_recalc(cpu) \
80943 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
80944 @@ -69,7 +69,7 @@ struct flow_flush_info {
80945 atomic_t cpuleft;
80946 struct completion completion;
80947 };
80948 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
80949 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
80950
80951 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
80952
80953 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
80954 if (fle->family == family &&
80955 fle->dir == dir &&
80956 flow_key_compare(key, &fle->key) == 0) {
80957 - if (fle->genid == atomic_read(&flow_cache_genid)) {
80958 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
80959 void *ret = fle->object;
80960
80961 if (ret)
80962 @@ -228,7 +228,7 @@ nocache:
80963 err = resolver(net, key, family, dir, &obj, &obj_ref);
80964
80965 if (fle && !err) {
80966 - fle->genid = atomic_read(&flow_cache_genid);
80967 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
80968
80969 if (fle->object)
80970 atomic_dec(fle->object_ref);
80971 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
80972
80973 fle = flow_table(cpu)[i];
80974 for (; fle; fle = fle->next) {
80975 - unsigned genid = atomic_read(&flow_cache_genid);
80976 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
80977
80978 if (!fle->object || fle->genid == genid)
80979 continue;
80980 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
80981 index d4fd895..ac9b1e6 100644
80982 --- a/net/core/rtnetlink.c
80983 +++ b/net/core/rtnetlink.c
80984 @@ -57,7 +57,7 @@ struct rtnl_link
80985 {
80986 rtnl_doit_func doit;
80987 rtnl_dumpit_func dumpit;
80988 -};
80989 +} __no_const;
80990
80991 static DEFINE_MUTEX(rtnl_mutex);
80992
80993 diff --git a/net/core/scm.c b/net/core/scm.c
80994 index d98eafc..1a190a9 100644
80995 --- a/net/core/scm.c
80996 +++ b/net/core/scm.c
80997 @@ -191,7 +191,7 @@ error:
80998 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80999 {
81000 struct cmsghdr __user *cm
81001 - = (__force struct cmsghdr __user *)msg->msg_control;
81002 + = (struct cmsghdr __force_user *)msg->msg_control;
81003 struct cmsghdr cmhdr;
81004 int cmlen = CMSG_LEN(len);
81005 int err;
81006 @@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81007 err = -EFAULT;
81008 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
81009 goto out;
81010 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
81011 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
81012 goto out;
81013 cmlen = CMSG_SPACE(len);
81014 if (msg->msg_controllen < cmlen)
81015 @@ -229,7 +229,7 @@ out:
81016 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81017 {
81018 struct cmsghdr __user *cm
81019 - = (__force struct cmsghdr __user*)msg->msg_control;
81020 + = (struct cmsghdr __force_user *)msg->msg_control;
81021
81022 int fdmax = 0;
81023 int fdnum = scm->fp->count;
81024 @@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81025 if (fdnum < fdmax)
81026 fdmax = fdnum;
81027
81028 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
81029 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
81030 i++, cmfptr++)
81031 {
81032 int new_fd;
81033 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
81034 index 45329d7..626aaa6 100644
81035 --- a/net/core/secure_seq.c
81036 +++ b/net/core/secure_seq.c
81037 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
81038 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
81039
81040 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81041 - __be16 dport)
81042 + __be16 dport)
81043 {
81044 u32 secret[MD5_MESSAGE_BYTES / 4];
81045 u32 hash[MD5_DIGEST_WORDS];
81046 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81047 secret[i] = net_secret[i];
81048
81049 md5_transform(hash, secret);
81050 -
81051 return hash[0];
81052 }
81053 #endif
81054 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
81055 index a807f8c..65f906f 100644
81056 --- a/net/core/skbuff.c
81057 +++ b/net/core/skbuff.c
81058 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
81059 struct sk_buff *frag_iter;
81060 struct sock *sk = skb->sk;
81061
81062 + pax_track_stack();
81063 +
81064 /*
81065 * __skb_splice_bits() only fails if the output has no room left,
81066 * so no point in going over the frag_list for the error case.
81067 diff --git a/net/core/sock.c b/net/core/sock.c
81068 index 6605e75..3acebda 100644
81069 --- a/net/core/sock.c
81070 +++ b/net/core/sock.c
81071 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
81072 break;
81073
81074 case SO_PEERCRED:
81075 + {
81076 + struct ucred peercred;
81077 if (len > sizeof(sk->sk_peercred))
81078 len = sizeof(sk->sk_peercred);
81079 - if (copy_to_user(optval, &sk->sk_peercred, len))
81080 + peercred = sk->sk_peercred;
81081 + if (copy_to_user(optval, &peercred, len))
81082 return -EFAULT;
81083 goto lenout;
81084 + }
81085
81086 case SO_PEERNAME:
81087 {
81088 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
81089 */
81090 smp_wmb();
81091 atomic_set(&sk->sk_refcnt, 1);
81092 - atomic_set(&sk->sk_drops, 0);
81093 + atomic_set_unchecked(&sk->sk_drops, 0);
81094 }
81095 EXPORT_SYMBOL(sock_init_data);
81096
81097 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
81098 index 2036568..c55883d 100644
81099 --- a/net/decnet/sysctl_net_decnet.c
81100 +++ b/net/decnet/sysctl_net_decnet.c
81101 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
81102
81103 if (len > *lenp) len = *lenp;
81104
81105 - if (copy_to_user(buffer, addr, len))
81106 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
81107 return -EFAULT;
81108
81109 *lenp = len;
81110 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
81111
81112 if (len > *lenp) len = *lenp;
81113
81114 - if (copy_to_user(buffer, devname, len))
81115 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
81116 return -EFAULT;
81117
81118 *lenp = len;
81119 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
81120 index 39a2d29..f39c0fe 100644
81121 --- a/net/econet/Kconfig
81122 +++ b/net/econet/Kconfig
81123 @@ -4,7 +4,7 @@
81124
81125 config ECONET
81126 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
81127 - depends on EXPERIMENTAL && INET
81128 + depends on EXPERIMENTAL && INET && BROKEN
81129 ---help---
81130 Econet is a fairly old and slow networking protocol mainly used by
81131 Acorn computers to access file and print servers. It uses native
81132 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
81133 index a413b1b..380849c 100644
81134 --- a/net/ieee802154/dgram.c
81135 +++ b/net/ieee802154/dgram.c
81136 @@ -318,7 +318,7 @@ out:
81137 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
81138 {
81139 if (sock_queue_rcv_skb(sk, skb) < 0) {
81140 - atomic_inc(&sk->sk_drops);
81141 + atomic_inc_unchecked(&sk->sk_drops);
81142 kfree_skb(skb);
81143 return NET_RX_DROP;
81144 }
81145 diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
81146 index 30e74ee..bfc6ee0 100644
81147 --- a/net/ieee802154/raw.c
81148 +++ b/net/ieee802154/raw.c
81149 @@ -206,7 +206,7 @@ out:
81150 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
81151 {
81152 if (sock_queue_rcv_skb(sk, skb) < 0) {
81153 - atomic_inc(&sk->sk_drops);
81154 + atomic_inc_unchecked(&sk->sk_drops);
81155 kfree_skb(skb);
81156 return NET_RX_DROP;
81157 }
81158 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
81159 index dba56d2..acee5d6 100644
81160 --- a/net/ipv4/inet_diag.c
81161 +++ b/net/ipv4/inet_diag.c
81162 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
81163 r->idiag_retrans = 0;
81164
81165 r->id.idiag_if = sk->sk_bound_dev_if;
81166 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81167 + r->id.idiag_cookie[0] = 0;
81168 + r->id.idiag_cookie[1] = 0;
81169 +#else
81170 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
81171 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
81172 +#endif
81173
81174 r->id.idiag_sport = inet->sport;
81175 r->id.idiag_dport = inet->dport;
81176 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
81177 r->idiag_family = tw->tw_family;
81178 r->idiag_retrans = 0;
81179 r->id.idiag_if = tw->tw_bound_dev_if;
81180 +
81181 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81182 + r->id.idiag_cookie[0] = 0;
81183 + r->id.idiag_cookie[1] = 0;
81184 +#else
81185 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
81186 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
81187 +#endif
81188 +
81189 r->id.idiag_sport = tw->tw_sport;
81190 r->id.idiag_dport = tw->tw_dport;
81191 r->id.idiag_src[0] = tw->tw_rcv_saddr;
81192 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
81193 if (sk == NULL)
81194 goto unlock;
81195
81196 +#ifndef CONFIG_GRKERNSEC_HIDESYM
81197 err = -ESTALE;
81198 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
81199 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
81200 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
81201 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
81202 goto out;
81203 +#endif
81204
81205 err = -ENOMEM;
81206 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
81207 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
81208 r->idiag_retrans = req->retrans;
81209
81210 r->id.idiag_if = sk->sk_bound_dev_if;
81211 +
81212 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81213 + r->id.idiag_cookie[0] = 0;
81214 + r->id.idiag_cookie[1] = 0;
81215 +#else
81216 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
81217 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
81218 +#endif
81219
81220 tmo = req->expires - jiffies;
81221 if (tmo < 0)
81222 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
81223 index d717267..56de7e7 100644
81224 --- a/net/ipv4/inet_hashtables.c
81225 +++ b/net/ipv4/inet_hashtables.c
81226 @@ -18,12 +18,15 @@
81227 #include <linux/sched.h>
81228 #include <linux/slab.h>
81229 #include <linux/wait.h>
81230 +#include <linux/security.h>
81231
81232 #include <net/inet_connection_sock.h>
81233 #include <net/inet_hashtables.h>
81234 #include <net/secure_seq.h>
81235 #include <net/ip.h>
81236
81237 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
81238 +
81239 /*
81240 * Allocate and initialize a new local port bind bucket.
81241 * The bindhash mutex for snum's hash chain must be held here.
81242 @@ -491,6 +494,8 @@ ok:
81243 }
81244 spin_unlock(&head->lock);
81245
81246 + gr_update_task_in_ip_table(current, inet_sk(sk));
81247 +
81248 if (tw) {
81249 inet_twsk_deschedule(tw, death_row);
81250 inet_twsk_put(tw);
81251 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
81252 index 13b229f..6956484 100644
81253 --- a/net/ipv4/inetpeer.c
81254 +++ b/net/ipv4/inetpeer.c
81255 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81256 struct inet_peer *p, *n;
81257 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
81258
81259 + pax_track_stack();
81260 +
81261 /* Look up for the address quickly. */
81262 read_lock_bh(&peer_pool_lock);
81263 p = lookup(daddr, NULL);
81264 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81265 return NULL;
81266 n->v4daddr = daddr;
81267 atomic_set(&n->refcnt, 1);
81268 - atomic_set(&n->rid, 0);
81269 + atomic_set_unchecked(&n->rid, 0);
81270 n->ip_id_count = secure_ip_id(daddr);
81271 n->tcp_ts_stamp = 0;
81272
81273 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
81274 index d3fe10b..feeafc9 100644
81275 --- a/net/ipv4/ip_fragment.c
81276 +++ b/net/ipv4/ip_fragment.c
81277 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
81278 return 0;
81279
81280 start = qp->rid;
81281 - end = atomic_inc_return(&peer->rid);
81282 + end = atomic_inc_return_unchecked(&peer->rid);
81283 qp->rid = end;
81284
81285 rc = qp->q.fragments && (end - start) > max;
81286 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
81287 index e982b5c..f079d75 100644
81288 --- a/net/ipv4/ip_sockglue.c
81289 +++ b/net/ipv4/ip_sockglue.c
81290 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81291 int val;
81292 int len;
81293
81294 + pax_track_stack();
81295 +
81296 if (level != SOL_IP)
81297 return -EOPNOTSUPP;
81298
81299 @@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81300 if (sk->sk_type != SOCK_STREAM)
81301 return -ENOPROTOOPT;
81302
81303 - msg.msg_control = optval;
81304 + msg.msg_control = (void __force_kernel *)optval;
81305 msg.msg_controllen = len;
81306 msg.msg_flags = 0;
81307
81308 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
81309 index f8d04c2..c1188f2 100644
81310 --- a/net/ipv4/ipconfig.c
81311 +++ b/net/ipv4/ipconfig.c
81312 @@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
81313
81314 mm_segment_t oldfs = get_fs();
81315 set_fs(get_ds());
81316 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81317 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81318 set_fs(oldfs);
81319 return res;
81320 }
81321 @@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
81322
81323 mm_segment_t oldfs = get_fs();
81324 set_fs(get_ds());
81325 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81326 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81327 set_fs(oldfs);
81328 return res;
81329 }
81330 @@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
81331
81332 mm_segment_t oldfs = get_fs();
81333 set_fs(get_ds());
81334 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
81335 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
81336 set_fs(oldfs);
81337 return res;
81338 }
81339 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
81340 index c8b0cc3..4da5ae2 100644
81341 --- a/net/ipv4/netfilter/arp_tables.c
81342 +++ b/net/ipv4/netfilter/arp_tables.c
81343 @@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81344 private = &tmp;
81345 }
81346 #endif
81347 + memset(&info, 0, sizeof(info));
81348 info.valid_hooks = t->valid_hooks;
81349 memcpy(info.hook_entry, private->hook_entry,
81350 sizeof(info.hook_entry));
81351 diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
81352 index c156db2..e772975 100644
81353 --- a/net/ipv4/netfilter/ip_queue.c
81354 +++ b/net/ipv4/netfilter/ip_queue.c
81355 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81356
81357 if (v->data_len < sizeof(*user_iph))
81358 return 0;
81359 + if (v->data_len > 65535)
81360 + return -EMSGSIZE;
81361 +
81362 diff = v->data_len - e->skb->len;
81363 if (diff < 0) {
81364 if (pskb_trim(e->skb, v->data_len))
81365 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
81366 static inline void
81367 __ipq_rcv_skb(struct sk_buff *skb)
81368 {
81369 - int status, type, pid, flags, nlmsglen, skblen;
81370 + int status, type, pid, flags;
81371 + unsigned int nlmsglen, skblen;
81372 struct nlmsghdr *nlh;
81373
81374 skblen = skb->len;
81375 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
81376 index 0606db1..02e7e4c 100644
81377 --- a/net/ipv4/netfilter/ip_tables.c
81378 +++ b/net/ipv4/netfilter/ip_tables.c
81379 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81380 private = &tmp;
81381 }
81382 #endif
81383 + memset(&info, 0, sizeof(info));
81384 info.valid_hooks = t->valid_hooks;
81385 memcpy(info.hook_entry, private->hook_entry,
81386 sizeof(info.hook_entry));
81387 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81388 index d9521f6..3c3eb25 100644
81389 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
81390 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81391 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
81392
81393 *len = 0;
81394
81395 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
81396 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
81397 if (*octets == NULL) {
81398 if (net_ratelimit())
81399 printk("OOM in bsalg (%d)\n", __LINE__);
81400 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
81401 index ab996f9..3da5f96 100644
81402 --- a/net/ipv4/raw.c
81403 +++ b/net/ipv4/raw.c
81404 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81405 /* Charge it to the socket. */
81406
81407 if (sock_queue_rcv_skb(sk, skb) < 0) {
81408 - atomic_inc(&sk->sk_drops);
81409 + atomic_inc_unchecked(&sk->sk_drops);
81410 kfree_skb(skb);
81411 return NET_RX_DROP;
81412 }
81413 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81414 int raw_rcv(struct sock *sk, struct sk_buff *skb)
81415 {
81416 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
81417 - atomic_inc(&sk->sk_drops);
81418 + atomic_inc_unchecked(&sk->sk_drops);
81419 kfree_skb(skb);
81420 return NET_RX_DROP;
81421 }
81422 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
81423
81424 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
81425 {
81426 + struct icmp_filter filter;
81427 +
81428 + if (optlen < 0)
81429 + return -EINVAL;
81430 if (optlen > sizeof(struct icmp_filter))
81431 optlen = sizeof(struct icmp_filter);
81432 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
81433 + if (copy_from_user(&filter, optval, optlen))
81434 return -EFAULT;
81435 + raw_sk(sk)->filter = filter;
81436 +
81437 return 0;
81438 }
81439
81440 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
81441 {
81442 int len, ret = -EFAULT;
81443 + struct icmp_filter filter;
81444
81445 if (get_user(len, optlen))
81446 goto out;
81447 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
81448 if (len > sizeof(struct icmp_filter))
81449 len = sizeof(struct icmp_filter);
81450 ret = -EFAULT;
81451 - if (put_user(len, optlen) ||
81452 - copy_to_user(optval, &raw_sk(sk)->filter, len))
81453 + filter = raw_sk(sk)->filter;
81454 + if (put_user(len, optlen) || len > sizeof filter ||
81455 + copy_to_user(optval, &filter, len))
81456 goto out;
81457 ret = 0;
81458 out: return ret;
81459 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81460 sk_wmem_alloc_get(sp),
81461 sk_rmem_alloc_get(sp),
81462 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81463 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81464 + atomic_read(&sp->sk_refcnt),
81465 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81466 + NULL,
81467 +#else
81468 + sp,
81469 +#endif
81470 + atomic_read_unchecked(&sp->sk_drops));
81471 }
81472
81473 static int raw_seq_show(struct seq_file *seq, void *v)
81474 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
81475 index 58f141b..b759702 100644
81476 --- a/net/ipv4/route.c
81477 +++ b/net/ipv4/route.c
81478 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
81479
81480 static inline int rt_genid(struct net *net)
81481 {
81482 - return atomic_read(&net->ipv4.rt_genid);
81483 + return atomic_read_unchecked(&net->ipv4.rt_genid);
81484 }
81485
81486 #ifdef CONFIG_PROC_FS
81487 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
81488 unsigned char shuffle;
81489
81490 get_random_bytes(&shuffle, sizeof(shuffle));
81491 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
81492 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
81493 }
81494
81495 /*
81496 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
81497
81498 static __net_init int rt_secret_timer_init(struct net *net)
81499 {
81500 - atomic_set(&net->ipv4.rt_genid,
81501 + atomic_set_unchecked(&net->ipv4.rt_genid,
81502 (int) ((num_physpages ^ (num_physpages>>8)) ^
81503 (jiffies ^ (jiffies >> 7))));
81504
81505 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
81506 index f095659..adc892a 100644
81507 --- a/net/ipv4/tcp.c
81508 +++ b/net/ipv4/tcp.c
81509 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
81510 int val;
81511 int err = 0;
81512
81513 + pax_track_stack();
81514 +
81515 /* This is a string value all the others are int's */
81516 if (optname == TCP_CONGESTION) {
81517 char name[TCP_CA_NAME_MAX];
81518 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
81519 struct tcp_sock *tp = tcp_sk(sk);
81520 int val, len;
81521
81522 + pax_track_stack();
81523 +
81524 if (get_user(len, optlen))
81525 return -EFAULT;
81526
81527 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
81528 index 6fc7961..33bad4a 100644
81529 --- a/net/ipv4/tcp_ipv4.c
81530 +++ b/net/ipv4/tcp_ipv4.c
81531 @@ -85,6 +85,9 @@
81532 int sysctl_tcp_tw_reuse __read_mostly;
81533 int sysctl_tcp_low_latency __read_mostly;
81534
81535 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81536 +extern int grsec_enable_blackhole;
81537 +#endif
81538
81539 #ifdef CONFIG_TCP_MD5SIG
81540 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
81541 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
81542 return 0;
81543
81544 reset:
81545 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81546 + if (!grsec_enable_blackhole)
81547 +#endif
81548 tcp_v4_send_reset(rsk, skb);
81549 discard:
81550 kfree_skb(skb);
81551 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
81552 TCP_SKB_CB(skb)->sacked = 0;
81553
81554 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81555 - if (!sk)
81556 + if (!sk) {
81557 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81558 + ret = 1;
81559 +#endif
81560 goto no_tcp_socket;
81561 + }
81562
81563 process:
81564 - if (sk->sk_state == TCP_TIME_WAIT)
81565 + if (sk->sk_state == TCP_TIME_WAIT) {
81566 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81567 + ret = 2;
81568 +#endif
81569 goto do_time_wait;
81570 + }
81571
81572 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
81573 goto discard_and_relse;
81574 @@ -1651,6 +1665,10 @@ no_tcp_socket:
81575 bad_packet:
81576 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81577 } else {
81578 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81579 + if (!grsec_enable_blackhole || (ret == 1 &&
81580 + (skb->dev->flags & IFF_LOOPBACK)))
81581 +#endif
81582 tcp_v4_send_reset(NULL, skb);
81583 }
81584
81585 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
81586 0, /* non standard timer */
81587 0, /* open_requests have no inode */
81588 atomic_read(&sk->sk_refcnt),
81589 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81590 + NULL,
81591 +#else
81592 req,
81593 +#endif
81594 len);
81595 }
81596
81597 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
81598 sock_i_uid(sk),
81599 icsk->icsk_probes_out,
81600 sock_i_ino(sk),
81601 - atomic_read(&sk->sk_refcnt), sk,
81602 + atomic_read(&sk->sk_refcnt),
81603 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81604 + NULL,
81605 +#else
81606 + sk,
81607 +#endif
81608 jiffies_to_clock_t(icsk->icsk_rto),
81609 jiffies_to_clock_t(icsk->icsk_ack.ato),
81610 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
81611 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
81612 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
81613 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
81614 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
81615 - atomic_read(&tw->tw_refcnt), tw, len);
81616 + atomic_read(&tw->tw_refcnt),
81617 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81618 + NULL,
81619 +#else
81620 + tw,
81621 +#endif
81622 + len);
81623 }
81624
81625 #define TMPSZ 150
81626 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
81627 index 4c03598..e09a8e8 100644
81628 --- a/net/ipv4/tcp_minisocks.c
81629 +++ b/net/ipv4/tcp_minisocks.c
81630 @@ -26,6 +26,10 @@
81631 #include <net/inet_common.h>
81632 #include <net/xfrm.h>
81633
81634 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81635 +extern int grsec_enable_blackhole;
81636 +#endif
81637 +
81638 #ifdef CONFIG_SYSCTL
81639 #define SYNC_INIT 0 /* let the user enable it */
81640 #else
81641 @@ -672,6 +676,10 @@ listen_overflow:
81642
81643 embryonic_reset:
81644 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
81645 +
81646 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81647 + if (!grsec_enable_blackhole)
81648 +#endif
81649 if (!(flg & TCP_FLAG_RST))
81650 req->rsk_ops->send_reset(sk, skb);
81651
81652 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
81653 index af83bdf..ec91cb2 100644
81654 --- a/net/ipv4/tcp_output.c
81655 +++ b/net/ipv4/tcp_output.c
81656 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
81657 __u8 *md5_hash_location;
81658 int mss;
81659
81660 + pax_track_stack();
81661 +
81662 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
81663 if (skb == NULL)
81664 return NULL;
81665 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
81666 index 59f5b5e..193860f 100644
81667 --- a/net/ipv4/tcp_probe.c
81668 +++ b/net/ipv4/tcp_probe.c
81669 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
81670 if (cnt + width >= len)
81671 break;
81672
81673 - if (copy_to_user(buf + cnt, tbuf, width))
81674 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
81675 return -EFAULT;
81676 cnt += width;
81677 }
81678 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
81679 index 57d5501..a9ed13a 100644
81680 --- a/net/ipv4/tcp_timer.c
81681 +++ b/net/ipv4/tcp_timer.c
81682 @@ -21,6 +21,10 @@
81683 #include <linux/module.h>
81684 #include <net/tcp.h>
81685
81686 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81687 +extern int grsec_lastack_retries;
81688 +#endif
81689 +
81690 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
81691 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
81692 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
81693 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
81694 }
81695 }
81696
81697 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81698 + if ((sk->sk_state == TCP_LAST_ACK) &&
81699 + (grsec_lastack_retries > 0) &&
81700 + (grsec_lastack_retries < retry_until))
81701 + retry_until = grsec_lastack_retries;
81702 +#endif
81703 +
81704 if (retransmits_timed_out(sk, retry_until)) {
81705 /* Has it gone just too far? */
81706 tcp_write_err(sk);
81707 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
81708 index 0ac8833..58d8c43 100644
81709 --- a/net/ipv4/udp.c
81710 +++ b/net/ipv4/udp.c
81711 @@ -86,6 +86,7 @@
81712 #include <linux/types.h>
81713 #include <linux/fcntl.h>
81714 #include <linux/module.h>
81715 +#include <linux/security.h>
81716 #include <linux/socket.h>
81717 #include <linux/sockios.h>
81718 #include <linux/igmp.h>
81719 @@ -106,6 +107,10 @@
81720 #include <net/xfrm.h>
81721 #include "udp_impl.h"
81722
81723 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81724 +extern int grsec_enable_blackhole;
81725 +#endif
81726 +
81727 struct udp_table udp_table;
81728 EXPORT_SYMBOL(udp_table);
81729
81730 @@ -371,6 +376,9 @@ found:
81731 return s;
81732 }
81733
81734 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
81735 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
81736 +
81737 /*
81738 * This routine is called by the ICMP module when it gets some
81739 * sort of error condition. If err < 0 then the socket should
81740 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
81741 dport = usin->sin_port;
81742 if (dport == 0)
81743 return -EINVAL;
81744 +
81745 + err = gr_search_udp_sendmsg(sk, usin);
81746 + if (err)
81747 + return err;
81748 } else {
81749 if (sk->sk_state != TCP_ESTABLISHED)
81750 return -EDESTADDRREQ;
81751 +
81752 + err = gr_search_udp_sendmsg(sk, NULL);
81753 + if (err)
81754 + return err;
81755 +
81756 daddr = inet->daddr;
81757 dport = inet->dport;
81758 /* Open fast path for connected socket.
81759 @@ -945,6 +962,10 @@ try_again:
81760 if (!skb)
81761 goto out;
81762
81763 + err = gr_search_udp_recvmsg(sk, skb);
81764 + if (err)
81765 + goto out_free;
81766 +
81767 ulen = skb->len - sizeof(struct udphdr);
81768 copied = len;
81769 if (copied > ulen)
81770 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
81771 if (rc == -ENOMEM) {
81772 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
81773 is_udplite);
81774 - atomic_inc(&sk->sk_drops);
81775 + atomic_inc_unchecked(&sk->sk_drops);
81776 }
81777 goto drop;
81778 }
81779 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
81780 goto csum_error;
81781
81782 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
81783 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81784 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
81785 +#endif
81786 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
81787
81788 /*
81789 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
81790 sk_wmem_alloc_get(sp),
81791 sk_rmem_alloc_get(sp),
81792 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81793 - atomic_read(&sp->sk_refcnt), sp,
81794 - atomic_read(&sp->sk_drops), len);
81795 + atomic_read(&sp->sk_refcnt),
81796 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81797 + NULL,
81798 +#else
81799 + sp,
81800 +#endif
81801 + atomic_read_unchecked(&sp->sk_drops), len);
81802 }
81803
81804 int udp4_seq_show(struct seq_file *seq, void *v)
81805 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
81806 index 8ac3d09..fc58c5f 100644
81807 --- a/net/ipv6/addrconf.c
81808 +++ b/net/ipv6/addrconf.c
81809 @@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
81810 p.iph.ihl = 5;
81811 p.iph.protocol = IPPROTO_IPV6;
81812 p.iph.ttl = 64;
81813 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
81814 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
81815
81816 if (ops->ndo_do_ioctl) {
81817 mm_segment_t oldfs = get_fs();
81818 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
81819 index cc4797d..7cfdfcc 100644
81820 --- a/net/ipv6/inet6_connection_sock.c
81821 +++ b/net/ipv6/inet6_connection_sock.c
81822 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
81823 #ifdef CONFIG_XFRM
81824 {
81825 struct rt6_info *rt = (struct rt6_info *)dst;
81826 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
81827 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
81828 }
81829 #endif
81830 }
81831 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
81832 #ifdef CONFIG_XFRM
81833 if (dst) {
81834 struct rt6_info *rt = (struct rt6_info *)dst;
81835 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
81836 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
81837 sk->sk_dst_cache = NULL;
81838 dst_release(dst);
81839 dst = NULL;
81840 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
81841 index 093e9b2..f72cddb 100644
81842 --- a/net/ipv6/inet6_hashtables.c
81843 +++ b/net/ipv6/inet6_hashtables.c
81844 @@ -119,7 +119,7 @@ out:
81845 }
81846 EXPORT_SYMBOL(__inet6_lookup_established);
81847
81848 -static int inline compute_score(struct sock *sk, struct net *net,
81849 +static inline int compute_score(struct sock *sk, struct net *net,
81850 const unsigned short hnum,
81851 const struct in6_addr *daddr,
81852 const int dif)
81853 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
81854 index 4f7aaf6..f7acf45 100644
81855 --- a/net/ipv6/ipv6_sockglue.c
81856 +++ b/net/ipv6/ipv6_sockglue.c
81857 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
81858 int val, valbool;
81859 int retv = -ENOPROTOOPT;
81860
81861 + pax_track_stack();
81862 +
81863 if (optval == NULL)
81864 val=0;
81865 else {
81866 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81867 int len;
81868 int val;
81869
81870 + pax_track_stack();
81871 +
81872 if (ip6_mroute_opt(optname))
81873 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
81874
81875 @@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81876 if (sk->sk_type != SOCK_STREAM)
81877 return -ENOPROTOOPT;
81878
81879 - msg.msg_control = optval;
81880 + msg.msg_control = (void __force_kernel *)optval;
81881 msg.msg_controllen = len;
81882 msg.msg_flags = 0;
81883
81884 diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
81885 index 1cf3f0c..1d4376f 100644
81886 --- a/net/ipv6/netfilter/ip6_queue.c
81887 +++ b/net/ipv6/netfilter/ip6_queue.c
81888 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81889
81890 if (v->data_len < sizeof(*user_iph))
81891 return 0;
81892 + if (v->data_len > 65535)
81893 + return -EMSGSIZE;
81894 +
81895 diff = v->data_len - e->skb->len;
81896 if (diff < 0) {
81897 if (pskb_trim(e->skb, v->data_len))
81898 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
81899 static inline void
81900 __ipq_rcv_skb(struct sk_buff *skb)
81901 {
81902 - int status, type, pid, flags, nlmsglen, skblen;
81903 + int status, type, pid, flags;
81904 + unsigned int nlmsglen, skblen;
81905 struct nlmsghdr *nlh;
81906
81907 skblen = skb->len;
81908 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
81909 index 78b5a36..7f37433 100644
81910 --- a/net/ipv6/netfilter/ip6_tables.c
81911 +++ b/net/ipv6/netfilter/ip6_tables.c
81912 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81913 private = &tmp;
81914 }
81915 #endif
81916 + memset(&info, 0, sizeof(info));
81917 info.valid_hooks = t->valid_hooks;
81918 memcpy(info.hook_entry, private->hook_entry,
81919 sizeof(info.hook_entry));
81920 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
81921 index 4f24570..b813b34 100644
81922 --- a/net/ipv6/raw.c
81923 +++ b/net/ipv6/raw.c
81924 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
81925 {
81926 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
81927 skb_checksum_complete(skb)) {
81928 - atomic_inc(&sk->sk_drops);
81929 + atomic_inc_unchecked(&sk->sk_drops);
81930 kfree_skb(skb);
81931 return NET_RX_DROP;
81932 }
81933
81934 /* Charge it to the socket. */
81935 if (sock_queue_rcv_skb(sk,skb)<0) {
81936 - atomic_inc(&sk->sk_drops);
81937 + atomic_inc_unchecked(&sk->sk_drops);
81938 kfree_skb(skb);
81939 return NET_RX_DROP;
81940 }
81941 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81942 struct raw6_sock *rp = raw6_sk(sk);
81943
81944 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
81945 - atomic_inc(&sk->sk_drops);
81946 + atomic_inc_unchecked(&sk->sk_drops);
81947 kfree_skb(skb);
81948 return NET_RX_DROP;
81949 }
81950 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81951
81952 if (inet->hdrincl) {
81953 if (skb_checksum_complete(skb)) {
81954 - atomic_inc(&sk->sk_drops);
81955 + atomic_inc_unchecked(&sk->sk_drops);
81956 kfree_skb(skb);
81957 return NET_RX_DROP;
81958 }
81959 @@ -518,7 +518,7 @@ csum_copy_err:
81960 as some normal condition.
81961 */
81962 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
81963 - atomic_inc(&sk->sk_drops);
81964 + atomic_inc_unchecked(&sk->sk_drops);
81965 goto out;
81966 }
81967
81968 @@ -600,7 +600,7 @@ out:
81969 return err;
81970 }
81971
81972 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
81973 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
81974 struct flowi *fl, struct rt6_info *rt,
81975 unsigned int flags)
81976 {
81977 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
81978 u16 proto;
81979 int err;
81980
81981 + pax_track_stack();
81982 +
81983 /* Rough check on arithmetic overflow,
81984 better check is made in ip6_append_data().
81985 */
81986 @@ -916,12 +918,17 @@ do_confirm:
81987 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
81988 char __user *optval, int optlen)
81989 {
81990 + struct icmp6_filter filter;
81991 +
81992 switch (optname) {
81993 case ICMPV6_FILTER:
81994 + if (optlen < 0)
81995 + return -EINVAL;
81996 if (optlen > sizeof(struct icmp6_filter))
81997 optlen = sizeof(struct icmp6_filter);
81998 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
81999 + if (copy_from_user(&filter, optval, optlen))
82000 return -EFAULT;
82001 + raw6_sk(sk)->filter = filter;
82002 return 0;
82003 default:
82004 return -ENOPROTOOPT;
82005 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82006 char __user *optval, int __user *optlen)
82007 {
82008 int len;
82009 + struct icmp6_filter filter;
82010
82011 switch (optname) {
82012 case ICMPV6_FILTER:
82013 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82014 len = sizeof(struct icmp6_filter);
82015 if (put_user(len, optlen))
82016 return -EFAULT;
82017 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
82018 + filter = raw6_sk(sk)->filter;
82019 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
82020 return -EFAULT;
82021 return 0;
82022 default:
82023 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82024 0, 0L, 0,
82025 sock_i_uid(sp), 0,
82026 sock_i_ino(sp),
82027 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82028 + atomic_read(&sp->sk_refcnt),
82029 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82030 + NULL,
82031 +#else
82032 + sp,
82033 +#endif
82034 + atomic_read_unchecked(&sp->sk_drops));
82035 }
82036
82037 static int raw6_seq_show(struct seq_file *seq, void *v)
82038 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
82039 index faae6df..d4430c1 100644
82040 --- a/net/ipv6/tcp_ipv6.c
82041 +++ b/net/ipv6/tcp_ipv6.c
82042 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
82043 }
82044 #endif
82045
82046 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82047 +extern int grsec_enable_blackhole;
82048 +#endif
82049 +
82050 static void tcp_v6_hash(struct sock *sk)
82051 {
82052 if (sk->sk_state != TCP_CLOSE) {
82053 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
82054 return 0;
82055
82056 reset:
82057 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82058 + if (!grsec_enable_blackhole)
82059 +#endif
82060 tcp_v6_send_reset(sk, skb);
82061 discard:
82062 if (opt_skb)
82063 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
82064 TCP_SKB_CB(skb)->sacked = 0;
82065
82066 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82067 - if (!sk)
82068 + if (!sk) {
82069 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82070 + ret = 1;
82071 +#endif
82072 goto no_tcp_socket;
82073 + }
82074
82075 process:
82076 - if (sk->sk_state == TCP_TIME_WAIT)
82077 + if (sk->sk_state == TCP_TIME_WAIT) {
82078 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82079 + ret = 2;
82080 +#endif
82081 goto do_time_wait;
82082 + }
82083
82084 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
82085 goto discard_and_relse;
82086 @@ -1701,6 +1716,10 @@ no_tcp_socket:
82087 bad_packet:
82088 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82089 } else {
82090 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82091 + if (!grsec_enable_blackhole || (ret == 1 &&
82092 + (skb->dev->flags & IFF_LOOPBACK)))
82093 +#endif
82094 tcp_v6_send_reset(NULL, skb);
82095 }
82096
82097 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
82098 uid,
82099 0, /* non standard timer */
82100 0, /* open_requests have no inode */
82101 - 0, req);
82102 + 0,
82103 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82104 + NULL
82105 +#else
82106 + req
82107 +#endif
82108 + );
82109 }
82110
82111 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82112 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82113 sock_i_uid(sp),
82114 icsk->icsk_probes_out,
82115 sock_i_ino(sp),
82116 - atomic_read(&sp->sk_refcnt), sp,
82117 + atomic_read(&sp->sk_refcnt),
82118 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82119 + NULL,
82120 +#else
82121 + sp,
82122 +#endif
82123 jiffies_to_clock_t(icsk->icsk_rto),
82124 jiffies_to_clock_t(icsk->icsk_ack.ato),
82125 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
82126 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
82127 dest->s6_addr32[2], dest->s6_addr32[3], destp,
82128 tw->tw_substate, 0, 0,
82129 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82130 - atomic_read(&tw->tw_refcnt), tw);
82131 + atomic_read(&tw->tw_refcnt),
82132 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82133 + NULL
82134 +#else
82135 + tw
82136 +#endif
82137 + );
82138 }
82139
82140 static int tcp6_seq_show(struct seq_file *seq, void *v)
82141 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
82142 index 9cc6289..052c521 100644
82143 --- a/net/ipv6/udp.c
82144 +++ b/net/ipv6/udp.c
82145 @@ -49,6 +49,10 @@
82146 #include <linux/seq_file.h>
82147 #include "udp_impl.h"
82148
82149 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82150 +extern int grsec_enable_blackhole;
82151 +#endif
82152 +
82153 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
82154 {
82155 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
82156 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
82157 if (rc == -ENOMEM) {
82158 UDP6_INC_STATS_BH(sock_net(sk),
82159 UDP_MIB_RCVBUFERRORS, is_udplite);
82160 - atomic_inc(&sk->sk_drops);
82161 + atomic_inc_unchecked(&sk->sk_drops);
82162 }
82163 goto drop;
82164 }
82165 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82166 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
82167 proto == IPPROTO_UDPLITE);
82168
82169 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82170 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82171 +#endif
82172 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
82173
82174 kfree_skb(skb);
82175 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
82176 0, 0L, 0,
82177 sock_i_uid(sp), 0,
82178 sock_i_ino(sp),
82179 - atomic_read(&sp->sk_refcnt), sp,
82180 - atomic_read(&sp->sk_drops));
82181 + atomic_read(&sp->sk_refcnt),
82182 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82183 + NULL,
82184 +#else
82185 + sp,
82186 +#endif
82187 + atomic_read_unchecked(&sp->sk_drops));
82188 }
82189
82190 int udp6_seq_show(struct seq_file *seq, void *v)
82191 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
82192 index 811984d..11f59b7 100644
82193 --- a/net/irda/ircomm/ircomm_tty.c
82194 +++ b/net/irda/ircomm/ircomm_tty.c
82195 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82196 add_wait_queue(&self->open_wait, &wait);
82197
82198 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
82199 - __FILE__,__LINE__, tty->driver->name, self->open_count );
82200 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82201
82202 /* As far as I can see, we protect open_count - Jean II */
82203 spin_lock_irqsave(&self->spinlock, flags);
82204 if (!tty_hung_up_p(filp)) {
82205 extra_count = 1;
82206 - self->open_count--;
82207 + local_dec(&self->open_count);
82208 }
82209 spin_unlock_irqrestore(&self->spinlock, flags);
82210 - self->blocked_open++;
82211 + local_inc(&self->blocked_open);
82212
82213 while (1) {
82214 if (tty->termios->c_cflag & CBAUD) {
82215 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82216 }
82217
82218 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
82219 - __FILE__,__LINE__, tty->driver->name, self->open_count );
82220 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82221
82222 schedule();
82223 }
82224 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82225 if (extra_count) {
82226 /* ++ is not atomic, so this should be protected - Jean II */
82227 spin_lock_irqsave(&self->spinlock, flags);
82228 - self->open_count++;
82229 + local_inc(&self->open_count);
82230 spin_unlock_irqrestore(&self->spinlock, flags);
82231 }
82232 - self->blocked_open--;
82233 + local_dec(&self->blocked_open);
82234
82235 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
82236 - __FILE__,__LINE__, tty->driver->name, self->open_count);
82237 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
82238
82239 if (!retval)
82240 self->flags |= ASYNC_NORMAL_ACTIVE;
82241 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
82242 }
82243 /* ++ is not atomic, so this should be protected - Jean II */
82244 spin_lock_irqsave(&self->spinlock, flags);
82245 - self->open_count++;
82246 + local_inc(&self->open_count);
82247
82248 tty->driver_data = self;
82249 self->tty = tty;
82250 spin_unlock_irqrestore(&self->spinlock, flags);
82251
82252 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
82253 - self->line, self->open_count);
82254 + self->line, local_read(&self->open_count));
82255
82256 /* Not really used by us, but lets do it anyway */
82257 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
82258 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82259 return;
82260 }
82261
82262 - if ((tty->count == 1) && (self->open_count != 1)) {
82263 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
82264 /*
82265 * Uh, oh. tty->count is 1, which means that the tty
82266 * structure will be freed. state->count should always
82267 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82268 */
82269 IRDA_DEBUG(0, "%s(), bad serial port count; "
82270 "tty->count is 1, state->count is %d\n", __func__ ,
82271 - self->open_count);
82272 - self->open_count = 1;
82273 + local_read(&self->open_count));
82274 + local_set(&self->open_count, 1);
82275 }
82276
82277 - if (--self->open_count < 0) {
82278 + if (local_dec_return(&self->open_count) < 0) {
82279 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
82280 - __func__, self->line, self->open_count);
82281 - self->open_count = 0;
82282 + __func__, self->line, local_read(&self->open_count));
82283 + local_set(&self->open_count, 0);
82284 }
82285 - if (self->open_count) {
82286 + if (local_read(&self->open_count)) {
82287 spin_unlock_irqrestore(&self->spinlock, flags);
82288
82289 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
82290 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82291 tty->closing = 0;
82292 self->tty = NULL;
82293
82294 - if (self->blocked_open) {
82295 + if (local_read(&self->blocked_open)) {
82296 if (self->close_delay)
82297 schedule_timeout_interruptible(self->close_delay);
82298 wake_up_interruptible(&self->open_wait);
82299 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
82300 spin_lock_irqsave(&self->spinlock, flags);
82301 self->flags &= ~ASYNC_NORMAL_ACTIVE;
82302 self->tty = NULL;
82303 - self->open_count = 0;
82304 + local_set(&self->open_count, 0);
82305 spin_unlock_irqrestore(&self->spinlock, flags);
82306
82307 wake_up_interruptible(&self->open_wait);
82308 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
82309 seq_putc(m, '\n');
82310
82311 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
82312 - seq_printf(m, "Open count: %d\n", self->open_count);
82313 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
82314 seq_printf(m, "Max data size: %d\n", self->max_data_size);
82315 seq_printf(m, "Max header size: %d\n", self->max_header_size);
82316
82317 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
82318 index bada1b9..f325943 100644
82319 --- a/net/iucv/af_iucv.c
82320 +++ b/net/iucv/af_iucv.c
82321 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
82322
82323 write_lock_bh(&iucv_sk_list.lock);
82324
82325 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
82326 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82327 while (__iucv_get_sock_by_name(name)) {
82328 sprintf(name, "%08x",
82329 - atomic_inc_return(&iucv_sk_list.autobind_name));
82330 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82331 }
82332
82333 write_unlock_bh(&iucv_sk_list.lock);
82334 diff --git a/net/key/af_key.c b/net/key/af_key.c
82335 index 4e98193..439b449 100644
82336 --- a/net/key/af_key.c
82337 +++ b/net/key/af_key.c
82338 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
82339 struct xfrm_migrate m[XFRM_MAX_DEPTH];
82340 struct xfrm_kmaddress k;
82341
82342 + pax_track_stack();
82343 +
82344 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
82345 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
82346 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
82347 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
82348 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
82349 else
82350 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
82351 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82352 + NULL,
82353 +#else
82354 s,
82355 +#endif
82356 atomic_read(&s->sk_refcnt),
82357 sk_rmem_alloc_get(s),
82358 sk_wmem_alloc_get(s),
82359 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
82360 index bda96d1..c038b72 100644
82361 --- a/net/lapb/lapb_iface.c
82362 +++ b/net/lapb/lapb_iface.c
82363 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
82364 goto out;
82365
82366 lapb->dev = dev;
82367 - lapb->callbacks = *callbacks;
82368 + lapb->callbacks = callbacks;
82369
82370 __lapb_insert_cb(lapb);
82371
82372 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
82373
82374 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
82375 {
82376 - if (lapb->callbacks.connect_confirmation)
82377 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
82378 + if (lapb->callbacks->connect_confirmation)
82379 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
82380 }
82381
82382 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
82383 {
82384 - if (lapb->callbacks.connect_indication)
82385 - lapb->callbacks.connect_indication(lapb->dev, reason);
82386 + if (lapb->callbacks->connect_indication)
82387 + lapb->callbacks->connect_indication(lapb->dev, reason);
82388 }
82389
82390 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
82391 {
82392 - if (lapb->callbacks.disconnect_confirmation)
82393 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
82394 + if (lapb->callbacks->disconnect_confirmation)
82395 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
82396 }
82397
82398 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
82399 {
82400 - if (lapb->callbacks.disconnect_indication)
82401 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
82402 + if (lapb->callbacks->disconnect_indication)
82403 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
82404 }
82405
82406 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
82407 {
82408 - if (lapb->callbacks.data_indication)
82409 - return lapb->callbacks.data_indication(lapb->dev, skb);
82410 + if (lapb->callbacks->data_indication)
82411 + return lapb->callbacks->data_indication(lapb->dev, skb);
82412
82413 kfree_skb(skb);
82414 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
82415 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
82416 {
82417 int used = 0;
82418
82419 - if (lapb->callbacks.data_transmit) {
82420 - lapb->callbacks.data_transmit(lapb->dev, skb);
82421 + if (lapb->callbacks->data_transmit) {
82422 + lapb->callbacks->data_transmit(lapb->dev, skb);
82423 used = 1;
82424 }
82425
82426 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
82427 index fe2d3f8..e57f683 100644
82428 --- a/net/mac80211/cfg.c
82429 +++ b/net/mac80211/cfg.c
82430 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
82431 return err;
82432 }
82433
82434 -struct cfg80211_ops mac80211_config_ops = {
82435 +const struct cfg80211_ops mac80211_config_ops = {
82436 .add_virtual_intf = ieee80211_add_iface,
82437 .del_virtual_intf = ieee80211_del_iface,
82438 .change_virtual_intf = ieee80211_change_iface,
82439 diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
82440 index 7d7879f..2d51f62 100644
82441 --- a/net/mac80211/cfg.h
82442 +++ b/net/mac80211/cfg.h
82443 @@ -4,6 +4,6 @@
82444 #ifndef __CFG_H
82445 #define __CFG_H
82446
82447 -extern struct cfg80211_ops mac80211_config_ops;
82448 +extern const struct cfg80211_ops mac80211_config_ops;
82449
82450 #endif /* __CFG_H */
82451 diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
82452 index 99c7525..9cb4937 100644
82453 --- a/net/mac80211/debugfs_key.c
82454 +++ b/net/mac80211/debugfs_key.c
82455 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
82456 size_t count, loff_t *ppos)
82457 {
82458 struct ieee80211_key *key = file->private_data;
82459 - int i, res, bufsize = 2 * key->conf.keylen + 2;
82460 + int i, bufsize = 2 * key->conf.keylen + 2;
82461 char *buf = kmalloc(bufsize, GFP_KERNEL);
82462 char *p = buf;
82463 + ssize_t res;
82464 +
82465 + if (buf == NULL)
82466 + return -ENOMEM;
82467
82468 for (i = 0; i < key->conf.keylen; i++)
82469 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
82470 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
82471 index 33a2e89..08650c8 100644
82472 --- a/net/mac80211/debugfs_sta.c
82473 +++ b/net/mac80211/debugfs_sta.c
82474 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
82475 int i;
82476 struct sta_info *sta = file->private_data;
82477
82478 + pax_track_stack();
82479 +
82480 spin_lock_bh(&sta->lock);
82481 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
82482 sta->ampdu_mlme.dialog_token_allocator + 1);
82483 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
82484 index ca62bfe..6657a03 100644
82485 --- a/net/mac80211/ieee80211_i.h
82486 +++ b/net/mac80211/ieee80211_i.h
82487 @@ -25,6 +25,7 @@
82488 #include <linux/etherdevice.h>
82489 #include <net/cfg80211.h>
82490 #include <net/mac80211.h>
82491 +#include <asm/local.h>
82492 #include "key.h"
82493 #include "sta_info.h"
82494
82495 @@ -635,7 +636,7 @@ struct ieee80211_local {
82496 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
82497 spinlock_t queue_stop_reason_lock;
82498
82499 - int open_count;
82500 + local_t open_count;
82501 int monitors, cooked_mntrs;
82502 /* number of interfaces with corresponding FIF_ flags */
82503 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
82504 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
82505 index 079c500..eb3c6d4 100644
82506 --- a/net/mac80211/iface.c
82507 +++ b/net/mac80211/iface.c
82508 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
82509 break;
82510 }
82511
82512 - if (local->open_count == 0) {
82513 + if (local_read(&local->open_count) == 0) {
82514 res = drv_start(local);
82515 if (res)
82516 goto err_del_bss;
82517 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
82518 * Validate the MAC address for this device.
82519 */
82520 if (!is_valid_ether_addr(dev->dev_addr)) {
82521 - if (!local->open_count)
82522 + if (!local_read(&local->open_count))
82523 drv_stop(local);
82524 return -EADDRNOTAVAIL;
82525 }
82526 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
82527
82528 hw_reconf_flags |= __ieee80211_recalc_idle(local);
82529
82530 - local->open_count++;
82531 + local_inc(&local->open_count);
82532 if (hw_reconf_flags) {
82533 ieee80211_hw_config(local, hw_reconf_flags);
82534 /*
82535 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
82536 err_del_interface:
82537 drv_remove_interface(local, &conf);
82538 err_stop:
82539 - if (!local->open_count)
82540 + if (!local_read(&local->open_count))
82541 drv_stop(local);
82542 err_del_bss:
82543 sdata->bss = NULL;
82544 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
82545 WARN_ON(!list_empty(&sdata->u.ap.vlans));
82546 }
82547
82548 - local->open_count--;
82549 + local_dec(&local->open_count);
82550
82551 switch (sdata->vif.type) {
82552 case NL80211_IFTYPE_AP_VLAN:
82553 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
82554
82555 ieee80211_recalc_ps(local, -1);
82556
82557 - if (local->open_count == 0) {
82558 + if (local_read(&local->open_count) == 0) {
82559 ieee80211_clear_tx_pending(local);
82560 ieee80211_stop_device(local);
82561
82562 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
82563 index 2dfe176..74e4388 100644
82564 --- a/net/mac80211/main.c
82565 +++ b/net/mac80211/main.c
82566 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
82567 local->hw.conf.power_level = power;
82568 }
82569
82570 - if (changed && local->open_count) {
82571 + if (changed && local_read(&local->open_count)) {
82572 ret = drv_config(local, changed);
82573 /*
82574 * Goal:
82575 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
82576 index e67eea7..fcc227e 100644
82577 --- a/net/mac80211/mlme.c
82578 +++ b/net/mac80211/mlme.c
82579 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
82580 bool have_higher_than_11mbit = false, newsta = false;
82581 u16 ap_ht_cap_flags;
82582
82583 + pax_track_stack();
82584 +
82585 /*
82586 * AssocResp and ReassocResp have identical structure, so process both
82587 * of them in this function.
82588 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
82589 index e535f1c..4d733d1 100644
82590 --- a/net/mac80211/pm.c
82591 +++ b/net/mac80211/pm.c
82592 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
82593 }
82594
82595 /* stop hardware - this must stop RX */
82596 - if (local->open_count)
82597 + if (local_read(&local->open_count))
82598 ieee80211_stop_device(local);
82599
82600 local->suspended = true;
82601 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
82602 index b33efc4..0a2efb6 100644
82603 --- a/net/mac80211/rate.c
82604 +++ b/net/mac80211/rate.c
82605 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
82606 struct rate_control_ref *ref, *old;
82607
82608 ASSERT_RTNL();
82609 - if (local->open_count)
82610 + if (local_read(&local->open_count))
82611 return -EBUSY;
82612
82613 ref = rate_control_alloc(name, local);
82614 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
82615 index b1d7904..57e4da7 100644
82616 --- a/net/mac80211/tx.c
82617 +++ b/net/mac80211/tx.c
82618 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
82619 return cpu_to_le16(dur);
82620 }
82621
82622 -static int inline is_ieee80211_device(struct ieee80211_local *local,
82623 +static inline int is_ieee80211_device(struct ieee80211_local *local,
82624 struct net_device *dev)
82625 {
82626 return local == wdev_priv(dev->ieee80211_ptr);
82627 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
82628 index 31b1085..48fb26d 100644
82629 --- a/net/mac80211/util.c
82630 +++ b/net/mac80211/util.c
82631 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
82632 local->resuming = true;
82633
82634 /* restart hardware */
82635 - if (local->open_count) {
82636 + if (local_read(&local->open_count)) {
82637 /*
82638 * Upon resume hardware can sometimes be goofy due to
82639 * various platform / driver / bus issues, so restarting
82640 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
82641 index 634d14a..b35a608 100644
82642 --- a/net/netfilter/Kconfig
82643 +++ b/net/netfilter/Kconfig
82644 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
82645
82646 To compile it as a module, choose M here. If unsure, say N.
82647
82648 +config NETFILTER_XT_MATCH_GRADM
82649 + tristate '"gradm" match support'
82650 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
82651 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
82652 + ---help---
82653 + The gradm match allows to match on grsecurity RBAC being enabled.
82654 + It is useful when iptables rules are applied early on bootup to
82655 + prevent connections to the machine (except from a trusted host)
82656 + while the RBAC system is disabled.
82657 +
82658 config NETFILTER_XT_MATCH_HASHLIMIT
82659 tristate '"hashlimit" match support'
82660 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
82661 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
82662 index 49f62ee..a17b2c6 100644
82663 --- a/net/netfilter/Makefile
82664 +++ b/net/netfilter/Makefile
82665 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
82666 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
82667 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
82668 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
82669 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
82670 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
82671 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
82672 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
82673 diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
82674 index 3c7e427..724043c 100644
82675 --- a/net/netfilter/ipvs/ip_vs_app.c
82676 +++ b/net/netfilter/ipvs/ip_vs_app.c
82677 @@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
82678 .open = ip_vs_app_open,
82679 .read = seq_read,
82680 .llseek = seq_lseek,
82681 - .release = seq_release,
82682 + .release = seq_release_net,
82683 };
82684 #endif
82685
82686 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
82687 index 95682e5..457dbac 100644
82688 --- a/net/netfilter/ipvs/ip_vs_conn.c
82689 +++ b/net/netfilter/ipvs/ip_vs_conn.c
82690 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
82691 /* if the connection is not template and is created
82692 * by sync, preserve the activity flag.
82693 */
82694 - cp->flags |= atomic_read(&dest->conn_flags) &
82695 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
82696 (~IP_VS_CONN_F_INACTIVE);
82697 else
82698 - cp->flags |= atomic_read(&dest->conn_flags);
82699 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
82700 cp->dest = dest;
82701
82702 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
82703 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
82704 atomic_set(&cp->refcnt, 1);
82705
82706 atomic_set(&cp->n_control, 0);
82707 - atomic_set(&cp->in_pkts, 0);
82708 + atomic_set_unchecked(&cp->in_pkts, 0);
82709
82710 atomic_inc(&ip_vs_conn_count);
82711 if (flags & IP_VS_CONN_F_NO_CPORT)
82712 @@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
82713 .open = ip_vs_conn_open,
82714 .read = seq_read,
82715 .llseek = seq_lseek,
82716 - .release = seq_release,
82717 + .release = seq_release_net,
82718 };
82719
82720 static const char *ip_vs_origin_name(unsigned flags)
82721 @@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
82722 .open = ip_vs_conn_sync_open,
82723 .read = seq_read,
82724 .llseek = seq_lseek,
82725 - .release = seq_release,
82726 + .release = seq_release_net,
82727 };
82728
82729 #endif
82730 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
82731
82732 /* Don't drop the entry if its number of incoming packets is not
82733 located in [0, 8] */
82734 - i = atomic_read(&cp->in_pkts);
82735 + i = atomic_read_unchecked(&cp->in_pkts);
82736 if (i > 8 || i < 0) return 0;
82737
82738 if (!todrop_rate[i]) return 0;
82739 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
82740 index b95699f..5fee919 100644
82741 --- a/net/netfilter/ipvs/ip_vs_core.c
82742 +++ b/net/netfilter/ipvs/ip_vs_core.c
82743 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
82744 ret = cp->packet_xmit(skb, cp, pp);
82745 /* do not touch skb anymore */
82746
82747 - atomic_inc(&cp->in_pkts);
82748 + atomic_inc_unchecked(&cp->in_pkts);
82749 ip_vs_conn_put(cp);
82750 return ret;
82751 }
82752 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
82753 * Sync connection if it is about to close to
82754 * encorage the standby servers to update the connections timeout
82755 */
82756 - pkts = atomic_add_return(1, &cp->in_pkts);
82757 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
82758 if (af == AF_INET &&
82759 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
82760 (((cp->protocol != IPPROTO_TCP ||
82761 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
82762 index 02b2610..2d89424 100644
82763 --- a/net/netfilter/ipvs/ip_vs_ctl.c
82764 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
82765 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
82766 ip_vs_rs_hash(dest);
82767 write_unlock_bh(&__ip_vs_rs_lock);
82768 }
82769 - atomic_set(&dest->conn_flags, conn_flags);
82770 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
82771
82772 /* bind the service */
82773 if (!dest->svc) {
82774 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82775 " %-7s %-6d %-10d %-10d\n",
82776 &dest->addr.in6,
82777 ntohs(dest->port),
82778 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82779 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82780 atomic_read(&dest->weight),
82781 atomic_read(&dest->activeconns),
82782 atomic_read(&dest->inactconns));
82783 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82784 "%-7s %-6d %-10d %-10d\n",
82785 ntohl(dest->addr.ip),
82786 ntohs(dest->port),
82787 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82788 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82789 atomic_read(&dest->weight),
82790 atomic_read(&dest->activeconns),
82791 atomic_read(&dest->inactconns));
82792 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
82793 .open = ip_vs_info_open,
82794 .read = seq_read,
82795 .llseek = seq_lseek,
82796 - .release = seq_release_private,
82797 + .release = seq_release_net,
82798 };
82799
82800 #endif
82801 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
82802 .open = ip_vs_stats_seq_open,
82803 .read = seq_read,
82804 .llseek = seq_lseek,
82805 - .release = single_release,
82806 + .release = single_release_net,
82807 };
82808
82809 #endif
82810 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
82811
82812 entry.addr = dest->addr.ip;
82813 entry.port = dest->port;
82814 - entry.conn_flags = atomic_read(&dest->conn_flags);
82815 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
82816 entry.weight = atomic_read(&dest->weight);
82817 entry.u_threshold = dest->u_threshold;
82818 entry.l_threshold = dest->l_threshold;
82819 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
82820 unsigned char arg[128];
82821 int ret = 0;
82822
82823 + pax_track_stack();
82824 +
82825 if (!capable(CAP_NET_ADMIN))
82826 return -EPERM;
82827
82828 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
82829 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
82830
82831 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
82832 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
82833 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
82834 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
82835 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
82836 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
82837 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
82838 index e177f0d..55e8581 100644
82839 --- a/net/netfilter/ipvs/ip_vs_sync.c
82840 +++ b/net/netfilter/ipvs/ip_vs_sync.c
82841 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
82842
82843 if (opt)
82844 memcpy(&cp->in_seq, opt, sizeof(*opt));
82845 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
82846 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
82847 cp->state = state;
82848 cp->old_state = cp->state;
82849 /*
82850 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
82851 index 30b3189..e2e4b55 100644
82852 --- a/net/netfilter/ipvs/ip_vs_xmit.c
82853 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
82854 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
82855 else
82856 rc = NF_ACCEPT;
82857 /* do not touch skb anymore */
82858 - atomic_inc(&cp->in_pkts);
82859 + atomic_inc_unchecked(&cp->in_pkts);
82860 goto out;
82861 }
82862
82863 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
82864 else
82865 rc = NF_ACCEPT;
82866 /* do not touch skb anymore */
82867 - atomic_inc(&cp->in_pkts);
82868 + atomic_inc_unchecked(&cp->in_pkts);
82869 goto out;
82870 }
82871
82872 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
82873 index d521718..d0fd7a1 100644
82874 --- a/net/netfilter/nf_conntrack_netlink.c
82875 +++ b/net/netfilter/nf_conntrack_netlink.c
82876 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
82877 static int
82878 ctnetlink_parse_tuple(const struct nlattr * const cda[],
82879 struct nf_conntrack_tuple *tuple,
82880 - enum ctattr_tuple type, u_int8_t l3num)
82881 + enum ctattr_type type, u_int8_t l3num)
82882 {
82883 struct nlattr *tb[CTA_TUPLE_MAX+1];
82884 int err;
82885 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
82886 index f900dc3..5e45346 100644
82887 --- a/net/netfilter/nfnetlink_log.c
82888 +++ b/net/netfilter/nfnetlink_log.c
82889 @@ -68,7 +68,7 @@ struct nfulnl_instance {
82890 };
82891
82892 static DEFINE_RWLOCK(instances_lock);
82893 -static atomic_t global_seq;
82894 +static atomic_unchecked_t global_seq;
82895
82896 #define INSTANCE_BUCKETS 16
82897 static struct hlist_head instance_table[INSTANCE_BUCKETS];
82898 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
82899 /* global sequence number */
82900 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
82901 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
82902 - htonl(atomic_inc_return(&global_seq)));
82903 + htonl(atomic_inc_return_unchecked(&global_seq)));
82904
82905 if (data_len) {
82906 struct nlattr *nla;
82907 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
82908 new file mode 100644
82909 index 0000000..b1bac76
82910 --- /dev/null
82911 +++ b/net/netfilter/xt_gradm.c
82912 @@ -0,0 +1,51 @@
82913 +/*
82914 + * gradm match for netfilter
82915 + * Copyright © Zbigniew Krzystolik, 2010
82916 + *
82917 + * This program is free software; you can redistribute it and/or modify
82918 + * it under the terms of the GNU General Public License; either version
82919 + * 2 or 3 as published by the Free Software Foundation.
82920 + */
82921 +#include <linux/module.h>
82922 +#include <linux/moduleparam.h>
82923 +#include <linux/skbuff.h>
82924 +#include <linux/netfilter/x_tables.h>
82925 +#include <linux/grsecurity.h>
82926 +#include <linux/netfilter/xt_gradm.h>
82927 +
82928 +static bool
82929 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
82930 +{
82931 + const struct xt_gradm_mtinfo *info = par->matchinfo;
82932 + bool retval = false;
82933 + if (gr_acl_is_enabled())
82934 + retval = true;
82935 + return retval ^ info->invflags;
82936 +}
82937 +
82938 +static struct xt_match gradm_mt_reg __read_mostly = {
82939 + .name = "gradm",
82940 + .revision = 0,
82941 + .family = NFPROTO_UNSPEC,
82942 + .match = gradm_mt,
82943 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
82944 + .me = THIS_MODULE,
82945 +};
82946 +
82947 +static int __init gradm_mt_init(void)
82948 +{
82949 + return xt_register_match(&gradm_mt_reg);
82950 +}
82951 +
82952 +static void __exit gradm_mt_exit(void)
82953 +{
82954 + xt_unregister_match(&gradm_mt_reg);
82955 +}
82956 +
82957 +module_init(gradm_mt_init);
82958 +module_exit(gradm_mt_exit);
82959 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
82960 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
82961 +MODULE_LICENSE("GPL");
82962 +MODULE_ALIAS("ipt_gradm");
82963 +MODULE_ALIAS("ip6t_gradm");
82964 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
82965 index 5a7dcdf..24a3578 100644
82966 --- a/net/netlink/af_netlink.c
82967 +++ b/net/netlink/af_netlink.c
82968 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock *sk)
82969 sk->sk_error_report(sk);
82970 }
82971 }
82972 - atomic_inc(&sk->sk_drops);
82973 + atomic_inc_unchecked(&sk->sk_drops);
82974 }
82975
82976 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
82977 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
82978 struct netlink_sock *nlk = nlk_sk(s);
82979
82980 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
82981 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82982 + NULL,
82983 +#else
82984 s,
82985 +#endif
82986 s->sk_protocol,
82987 nlk->pid,
82988 nlk->groups ? (u32)nlk->groups[0] : 0,
82989 sk_rmem_alloc_get(s),
82990 sk_wmem_alloc_get(s),
82991 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82992 + NULL,
82993 +#else
82994 nlk->cb,
82995 +#endif
82996 atomic_read(&s->sk_refcnt),
82997 - atomic_read(&s->sk_drops)
82998 + atomic_read_unchecked(&s->sk_drops)
82999 );
83000
83001 }
83002 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
83003 index 7a83495..ab0062f 100644
83004 --- a/net/netrom/af_netrom.c
83005 +++ b/net/netrom/af_netrom.c
83006 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
83007 struct sock *sk = sock->sk;
83008 struct nr_sock *nr = nr_sk(sk);
83009
83010 + memset(sax, 0, sizeof(*sax));
83011 lock_sock(sk);
83012 if (peer != 0) {
83013 if (sk->sk_state != TCP_ESTABLISHED) {
83014 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
83015 *uaddr_len = sizeof(struct full_sockaddr_ax25);
83016 } else {
83017 sax->fsa_ax25.sax25_family = AF_NETROM;
83018 - sax->fsa_ax25.sax25_ndigis = 0;
83019 sax->fsa_ax25.sax25_call = nr->source_addr;
83020 *uaddr_len = sizeof(struct sockaddr_ax25);
83021 }
83022 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
83023 index 35cfa79..4e78ff7 100644
83024 --- a/net/packet/af_packet.c
83025 +++ b/net/packet/af_packet.c
83026 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_file *seq, void *v)
83027
83028 seq_printf(seq,
83029 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
83030 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83031 + NULL,
83032 +#else
83033 s,
83034 +#endif
83035 atomic_read(&s->sk_refcnt),
83036 s->sk_type,
83037 ntohs(po->num),
83038 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
83039 index 519ff9d..a422a90 100644
83040 --- a/net/phonet/af_phonet.c
83041 +++ b/net/phonet/af_phonet.c
83042 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
83043 {
83044 struct phonet_protocol *pp;
83045
83046 - if (protocol >= PHONET_NPROTO)
83047 + if (protocol < 0 || protocol >= PHONET_NPROTO)
83048 return NULL;
83049
83050 spin_lock(&proto_tab_lock);
83051 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_register(int protocol,
83052 {
83053 int err = 0;
83054
83055 - if (protocol >= PHONET_NPROTO)
83056 + if (protocol < 0 || protocol >= PHONET_NPROTO)
83057 return -EINVAL;
83058
83059 err = proto_register(pp->prot, 1);
83060 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
83061 index ef5c75c..2b6c2fa 100644
83062 --- a/net/phonet/datagram.c
83063 +++ b/net/phonet/datagram.c
83064 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
83065 if (err < 0) {
83066 kfree_skb(skb);
83067 if (err == -ENOMEM)
83068 - atomic_inc(&sk->sk_drops);
83069 + atomic_inc_unchecked(&sk->sk_drops);
83070 }
83071 return err ? NET_RX_DROP : NET_RX_SUCCESS;
83072 }
83073 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
83074 index 9cdd35e..16cd850 100644
83075 --- a/net/phonet/pep.c
83076 +++ b/net/phonet/pep.c
83077 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
83078
83079 case PNS_PEP_CTRL_REQ:
83080 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
83081 - atomic_inc(&sk->sk_drops);
83082 + atomic_inc_unchecked(&sk->sk_drops);
83083 break;
83084 }
83085 __skb_pull(skb, 4);
83086 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
83087 if (!err)
83088 return 0;
83089 if (err == -ENOMEM)
83090 - atomic_inc(&sk->sk_drops);
83091 + atomic_inc_unchecked(&sk->sk_drops);
83092 break;
83093 }
83094
83095 if (pn->rx_credits == 0) {
83096 - atomic_inc(&sk->sk_drops);
83097 + atomic_inc_unchecked(&sk->sk_drops);
83098 err = -ENOBUFS;
83099 break;
83100 }
83101 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
83102 index aa5b5a9..c09b4f8 100644
83103 --- a/net/phonet/socket.c
83104 +++ b/net/phonet/socket.c
83105 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
83106 sk->sk_state,
83107 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
83108 sock_i_uid(sk), sock_i_ino(sk),
83109 - atomic_read(&sk->sk_refcnt), sk,
83110 - atomic_read(&sk->sk_drops), &len);
83111 + atomic_read(&sk->sk_refcnt),
83112 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83113 + NULL,
83114 +#else
83115 + sk,
83116 +#endif
83117 + atomic_read_unchecked(&sk->sk_drops), &len);
83118 }
83119 seq_printf(seq, "%*s\n", 127 - len, "");
83120 return 0;
83121 diff --git a/net/rds/Kconfig b/net/rds/Kconfig
83122 index ec753b3..821187c 100644
83123 --- a/net/rds/Kconfig
83124 +++ b/net/rds/Kconfig
83125 @@ -1,7 +1,7 @@
83126
83127 config RDS
83128 tristate "The RDS Protocol (EXPERIMENTAL)"
83129 - depends on INET && EXPERIMENTAL
83130 + depends on INET && EXPERIMENTAL && BROKEN
83131 ---help---
83132 The RDS (Reliable Datagram Sockets) protocol provides reliable,
83133 sequenced delivery of datagrams over Infiniband, iWARP,
83134 diff --git a/net/rds/cong.c b/net/rds/cong.c
83135 index dd2711d..1c7ed12 100644
83136 --- a/net/rds/cong.c
83137 +++ b/net/rds/cong.c
83138 @@ -77,7 +77,7 @@
83139 * finds that the saved generation number is smaller than the global generation
83140 * number, it wakes up the process.
83141 */
83142 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
83143 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
83144
83145 /*
83146 * Congestion monitoring
83147 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
83148 rdsdebug("waking map %p for %pI4\n",
83149 map, &map->m_addr);
83150 rds_stats_inc(s_cong_update_received);
83151 - atomic_inc(&rds_cong_generation);
83152 + atomic_inc_unchecked(&rds_cong_generation);
83153 if (waitqueue_active(&map->m_waitq))
83154 wake_up(&map->m_waitq);
83155 if (waitqueue_active(&rds_poll_waitq))
83156 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
83157
83158 int rds_cong_updated_since(unsigned long *recent)
83159 {
83160 - unsigned long gen = atomic_read(&rds_cong_generation);
83161 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
83162
83163 if (likely(*recent == gen))
83164 return 0;
83165 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
83166 index de4a1b1..94ec861 100644
83167 --- a/net/rds/iw_rdma.c
83168 +++ b/net/rds/iw_rdma.c
83169 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
83170 struct rdma_cm_id *pcm_id;
83171 int rc;
83172
83173 + pax_track_stack();
83174 +
83175 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
83176 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
83177
83178 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
83179 index b5198ae..8b9fb90 100644
83180 --- a/net/rds/tcp.c
83181 +++ b/net/rds/tcp.c
83182 @@ -57,7 +57,7 @@ void rds_tcp_nonagle(struct socket *sock)
83183 int val = 1;
83184
83185 set_fs(KERNEL_DS);
83186 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
83187 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
83188 sizeof(val));
83189 set_fs(oldfs);
83190 }
83191 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
83192 index ab545e0..4079b3b 100644
83193 --- a/net/rds/tcp_send.c
83194 +++ b/net/rds/tcp_send.c
83195 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
83196
83197 oldfs = get_fs();
83198 set_fs(KERNEL_DS);
83199 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
83200 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
83201 sizeof(val));
83202 set_fs(oldfs);
83203 }
83204 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
83205 index a86afce..8657bce 100644
83206 --- a/net/rxrpc/af_rxrpc.c
83207 +++ b/net/rxrpc/af_rxrpc.c
83208 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ops;
83209 __be32 rxrpc_epoch;
83210
83211 /* current debugging ID */
83212 -atomic_t rxrpc_debug_id;
83213 +atomic_unchecked_t rxrpc_debug_id;
83214
83215 /* count of skbs currently in use */
83216 atomic_t rxrpc_n_skbs;
83217 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
83218 index b4a2209..539106c 100644
83219 --- a/net/rxrpc/ar-ack.c
83220 +++ b/net/rxrpc/ar-ack.c
83221 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
83222
83223 _enter("{%d,%d,%d,%d},",
83224 call->acks_hard, call->acks_unacked,
83225 - atomic_read(&call->sequence),
83226 + atomic_read_unchecked(&call->sequence),
83227 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
83228
83229 stop = 0;
83230 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
83231
83232 /* each Tx packet has a new serial number */
83233 sp->hdr.serial =
83234 - htonl(atomic_inc_return(&call->conn->serial));
83235 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
83236
83237 hdr = (struct rxrpc_header *) txb->head;
83238 hdr->serial = sp->hdr.serial;
83239 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
83240 */
83241 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
83242 {
83243 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
83244 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
83245 }
83246
83247 /*
83248 @@ -627,7 +627,7 @@ process_further:
83249
83250 latest = ntohl(sp->hdr.serial);
83251 hard = ntohl(ack.firstPacket);
83252 - tx = atomic_read(&call->sequence);
83253 + tx = atomic_read_unchecked(&call->sequence);
83254
83255 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
83256 latest,
83257 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_struct *work)
83258 u32 abort_code = RX_PROTOCOL_ERROR;
83259 u8 *acks = NULL;
83260
83261 + pax_track_stack();
83262 +
83263 //printk("\n--------------------\n");
83264 _enter("{%d,%s,%lx} [%lu]",
83265 call->debug_id, rxrpc_call_states[call->state], call->events,
83266 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
83267 goto maybe_reschedule;
83268
83269 send_ACK_with_skew:
83270 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
83271 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
83272 ntohl(ack.serial));
83273 send_ACK:
83274 mtu = call->conn->trans->peer->if_mtu;
83275 @@ -1171,7 +1173,7 @@ send_ACK:
83276 ackinfo.rxMTU = htonl(5692);
83277 ackinfo.jumbo_max = htonl(4);
83278
83279 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
83280 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
83281 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
83282 ntohl(hdr.serial),
83283 ntohs(ack.maxSkew),
83284 @@ -1189,7 +1191,7 @@ send_ACK:
83285 send_message:
83286 _debug("send message");
83287
83288 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
83289 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
83290 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
83291 send_message_2:
83292
83293 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
83294 index bc0019f..e1b4b24 100644
83295 --- a/net/rxrpc/ar-call.c
83296 +++ b/net/rxrpc/ar-call.c
83297 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
83298 spin_lock_init(&call->lock);
83299 rwlock_init(&call->state_lock);
83300 atomic_set(&call->usage, 1);
83301 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
83302 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83303 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
83304
83305 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
83306 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
83307 index 9f1ce84..ff8d061 100644
83308 --- a/net/rxrpc/ar-connection.c
83309 +++ b/net/rxrpc/ar-connection.c
83310 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
83311 rwlock_init(&conn->lock);
83312 spin_lock_init(&conn->state_lock);
83313 atomic_set(&conn->usage, 1);
83314 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
83315 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83316 conn->avail_calls = RXRPC_MAXCALLS;
83317 conn->size_align = 4;
83318 conn->header_size = sizeof(struct rxrpc_header);
83319 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
83320 index 0505cdc..f0748ce 100644
83321 --- a/net/rxrpc/ar-connevent.c
83322 +++ b/net/rxrpc/ar-connevent.c
83323 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
83324
83325 len = iov[0].iov_len + iov[1].iov_len;
83326
83327 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
83328 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83329 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
83330
83331 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
83332 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
83333 index f98c802..9e8488e 100644
83334 --- a/net/rxrpc/ar-input.c
83335 +++ b/net/rxrpc/ar-input.c
83336 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
83337 /* track the latest serial number on this connection for ACK packet
83338 * information */
83339 serial = ntohl(sp->hdr.serial);
83340 - hi_serial = atomic_read(&call->conn->hi_serial);
83341 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
83342 while (serial > hi_serial)
83343 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
83344 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
83345 serial);
83346
83347 /* request ACK generation for any ACK or DATA packet that requests
83348 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
83349 index 7043b29..06edcdf 100644
83350 --- a/net/rxrpc/ar-internal.h
83351 +++ b/net/rxrpc/ar-internal.h
83352 @@ -272,8 +272,8 @@ struct rxrpc_connection {
83353 int error; /* error code for local abort */
83354 int debug_id; /* debug ID for printks */
83355 unsigned call_counter; /* call ID counter */
83356 - atomic_t serial; /* packet serial number counter */
83357 - atomic_t hi_serial; /* highest serial number received */
83358 + atomic_unchecked_t serial; /* packet serial number counter */
83359 + atomic_unchecked_t hi_serial; /* highest serial number received */
83360 u8 avail_calls; /* number of calls available */
83361 u8 size_align; /* data size alignment (for security) */
83362 u8 header_size; /* rxrpc + security header size */
83363 @@ -346,7 +346,7 @@ struct rxrpc_call {
83364 spinlock_t lock;
83365 rwlock_t state_lock; /* lock for state transition */
83366 atomic_t usage;
83367 - atomic_t sequence; /* Tx data packet sequence counter */
83368 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
83369 u32 abort_code; /* local/remote abort code */
83370 enum { /* current state of call */
83371 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
83372 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
83373 */
83374 extern atomic_t rxrpc_n_skbs;
83375 extern __be32 rxrpc_epoch;
83376 -extern atomic_t rxrpc_debug_id;
83377 +extern atomic_unchecked_t rxrpc_debug_id;
83378 extern struct workqueue_struct *rxrpc_workqueue;
83379
83380 /*
83381 diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
83382 index 74697b2..10f9b77 100644
83383 --- a/net/rxrpc/ar-key.c
83384 +++ b/net/rxrpc/ar-key.c
83385 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
83386 return ret;
83387
83388 plen -= sizeof(*token);
83389 - token = kmalloc(sizeof(*token), GFP_KERNEL);
83390 + token = kzalloc(sizeof(*token), GFP_KERNEL);
83391 if (!token)
83392 return -ENOMEM;
83393
83394 - token->kad = kmalloc(plen, GFP_KERNEL);
83395 + token->kad = kzalloc(plen, GFP_KERNEL);
83396 if (!token->kad) {
83397 kfree(token);
83398 return -ENOMEM;
83399 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
83400 goto error;
83401
83402 ret = -ENOMEM;
83403 - token = kmalloc(sizeof(*token), GFP_KERNEL);
83404 + token = kzalloc(sizeof(*token), GFP_KERNEL);
83405 if (!token)
83406 goto error;
83407 - token->kad = kmalloc(plen, GFP_KERNEL);
83408 + token->kad = kzalloc(plen, GFP_KERNEL);
83409 if (!token->kad)
83410 goto error_free;
83411
83412 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
83413 index 807535f..5b7f19e 100644
83414 --- a/net/rxrpc/ar-local.c
83415 +++ b/net/rxrpc/ar-local.c
83416 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
83417 spin_lock_init(&local->lock);
83418 rwlock_init(&local->services_lock);
83419 atomic_set(&local->usage, 1);
83420 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
83421 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83422 memcpy(&local->srx, srx, sizeof(*srx));
83423 }
83424
83425 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
83426 index cc9102c..7d3888e 100644
83427 --- a/net/rxrpc/ar-output.c
83428 +++ b/net/rxrpc/ar-output.c
83429 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
83430 sp->hdr.cid = call->cid;
83431 sp->hdr.callNumber = call->call_id;
83432 sp->hdr.seq =
83433 - htonl(atomic_inc_return(&call->sequence));
83434 + htonl(atomic_inc_return_unchecked(&call->sequence));
83435 sp->hdr.serial =
83436 - htonl(atomic_inc_return(&conn->serial));
83437 + htonl(atomic_inc_return_unchecked(&conn->serial));
83438 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
83439 sp->hdr.userStatus = 0;
83440 sp->hdr.securityIndex = conn->security_ix;
83441 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
83442 index edc026c..4bd4e2d 100644
83443 --- a/net/rxrpc/ar-peer.c
83444 +++ b/net/rxrpc/ar-peer.c
83445 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
83446 INIT_LIST_HEAD(&peer->error_targets);
83447 spin_lock_init(&peer->lock);
83448 atomic_set(&peer->usage, 1);
83449 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
83450 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83451 memcpy(&peer->srx, srx, sizeof(*srx));
83452
83453 rxrpc_assess_MTU_size(peer);
83454 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
83455 index 38047f7..9f48511 100644
83456 --- a/net/rxrpc/ar-proc.c
83457 +++ b/net/rxrpc/ar-proc.c
83458 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
83459 atomic_read(&conn->usage),
83460 rxrpc_conn_states[conn->state],
83461 key_serial(conn->key),
83462 - atomic_read(&conn->serial),
83463 - atomic_read(&conn->hi_serial));
83464 + atomic_read_unchecked(&conn->serial),
83465 + atomic_read_unchecked(&conn->hi_serial));
83466
83467 return 0;
83468 }
83469 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
83470 index 0936e1a..437c640 100644
83471 --- a/net/rxrpc/ar-transport.c
83472 +++ b/net/rxrpc/ar-transport.c
83473 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
83474 spin_lock_init(&trans->client_lock);
83475 rwlock_init(&trans->conn_lock);
83476 atomic_set(&trans->usage, 1);
83477 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
83478 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
83479
83480 if (peer->srx.transport.family == AF_INET) {
83481 switch (peer->srx.transport_type) {
83482 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
83483 index 713ac59..306f6ae 100644
83484 --- a/net/rxrpc/rxkad.c
83485 +++ b/net/rxrpc/rxkad.c
83486 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
83487 u16 check;
83488 int nsg;
83489
83490 + pax_track_stack();
83491 +
83492 sp = rxrpc_skb(skb);
83493
83494 _enter("");
83495 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
83496 u16 check;
83497 int nsg;
83498
83499 + pax_track_stack();
83500 +
83501 _enter("");
83502
83503 sp = rxrpc_skb(skb);
83504 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
83505
83506 len = iov[0].iov_len + iov[1].iov_len;
83507
83508 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
83509 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83510 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
83511
83512 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
83513 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
83514
83515 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
83516
83517 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
83518 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83519 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
83520
83521 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
83522 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
83523 index 914c419..7a16d2c 100644
83524 --- a/net/sctp/auth.c
83525 +++ b/net/sctp/auth.c
83526 @@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
83527 struct sctp_auth_bytes *key;
83528
83529 /* Verify that we are not going to overflow INT_MAX */
83530 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
83531 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
83532 return NULL;
83533
83534 /* Allocate the shared key */
83535 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
83536 index d093cbf..9fc36fc 100644
83537 --- a/net/sctp/proc.c
83538 +++ b/net/sctp/proc.c
83539 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
83540 sctp_for_each_hentry(epb, node, &head->chain) {
83541 ep = sctp_ep(epb);
83542 sk = epb->sk;
83543 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
83544 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
83545 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83546 + NULL, NULL,
83547 +#else
83548 + ep, sk,
83549 +#endif
83550 sctp_sk(sk)->type, sk->sk_state, hash,
83551 epb->bind_addr.port,
83552 sock_i_uid(sk), sock_i_ino(sk));
83553 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
83554 seq_printf(seq,
83555 "%8p %8p %-3d %-3d %-2d %-4d "
83556 "%4d %8d %8d %7d %5lu %-5d %5d ",
83557 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
83558 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83559 + NULL, NULL,
83560 +#else
83561 + assoc, sk,
83562 +#endif
83563 + sctp_sk(sk)->type, sk->sk_state,
83564 assoc->state, hash,
83565 assoc->assoc_id,
83566 assoc->sndbuf_used,
83567 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
83568 index 3a95fcb..c40fc1d 100644
83569 --- a/net/sctp/socket.c
83570 +++ b/net/sctp/socket.c
83571 @@ -5802,7 +5802,6 @@ pp_found:
83572 */
83573 int reuse = sk->sk_reuse;
83574 struct sock *sk2;
83575 - struct hlist_node *node;
83576
83577 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
83578 if (pp->fastreuse && sk->sk_reuse &&
83579 diff --git a/net/socket.c b/net/socket.c
83580 index d449812..4ac08d3c 100644
83581 --- a/net/socket.c
83582 +++ b/net/socket.c
83583 @@ -87,6 +87,7 @@
83584 #include <linux/wireless.h>
83585 #include <linux/nsproxy.h>
83586 #include <linux/magic.h>
83587 +#include <linux/in.h>
83588
83589 #include <asm/uaccess.h>
83590 #include <asm/unistd.h>
83591 @@ -97,6 +98,21 @@
83592 #include <net/sock.h>
83593 #include <linux/netfilter.h>
83594
83595 +extern void gr_attach_curr_ip(const struct sock *sk);
83596 +extern int gr_handle_sock_all(const int family, const int type,
83597 + const int protocol);
83598 +extern int gr_handle_sock_server(const struct sockaddr *sck);
83599 +extern int gr_handle_sock_server_other(const struct sock *sck);
83600 +extern int gr_handle_sock_client(const struct sockaddr *sck);
83601 +extern int gr_search_connect(struct socket * sock,
83602 + struct sockaddr_in * addr);
83603 +extern int gr_search_bind(struct socket * sock,
83604 + struct sockaddr_in * addr);
83605 +extern int gr_search_listen(struct socket * sock);
83606 +extern int gr_search_accept(struct socket * sock);
83607 +extern int gr_search_socket(const int domain, const int type,
83608 + const int protocol);
83609 +
83610 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
83611 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
83612 unsigned long nr_segs, loff_t pos);
83613 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type,
83614 mnt);
83615 }
83616
83617 -static struct vfsmount *sock_mnt __read_mostly;
83618 +struct vfsmount *sock_mnt __read_mostly;
83619
83620 static struct file_system_type sock_fs_type = {
83621 .name = "sockfs",
83622 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
83623 return -EAFNOSUPPORT;
83624 if (type < 0 || type >= SOCK_MAX)
83625 return -EINVAL;
83626 + if (protocol < 0)
83627 + return -EINVAL;
83628
83629 /* Compatibility.
83630
83631 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
83632 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
83633 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
83634
83635 + if(!gr_search_socket(family, type, protocol)) {
83636 + retval = -EACCES;
83637 + goto out;
83638 + }
83639 +
83640 + if (gr_handle_sock_all(family, type, protocol)) {
83641 + retval = -EACCES;
83642 + goto out;
83643 + }
83644 +
83645 retval = sock_create(family, type, protocol, &sock);
83646 if (retval < 0)
83647 goto out;
83648 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
83649 if (sock) {
83650 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
83651 if (err >= 0) {
83652 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
83653 + err = -EACCES;
83654 + goto error;
83655 + }
83656 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
83657 + if (err)
83658 + goto error;
83659 +
83660 err = security_socket_bind(sock,
83661 (struct sockaddr *)&address,
83662 addrlen);
83663 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
83664 (struct sockaddr *)
83665 &address, addrlen);
83666 }
83667 +error:
83668 fput_light(sock->file, fput_needed);
83669 }
83670 return err;
83671 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
83672 if ((unsigned)backlog > somaxconn)
83673 backlog = somaxconn;
83674
83675 + if (gr_handle_sock_server_other(sock->sk)) {
83676 + err = -EPERM;
83677 + goto error;
83678 + }
83679 +
83680 + err = gr_search_listen(sock);
83681 + if (err)
83682 + goto error;
83683 +
83684 err = security_socket_listen(sock, backlog);
83685 if (!err)
83686 err = sock->ops->listen(sock, backlog);
83687
83688 +error:
83689 fput_light(sock->file, fput_needed);
83690 }
83691 return err;
83692 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
83693 newsock->type = sock->type;
83694 newsock->ops = sock->ops;
83695
83696 + if (gr_handle_sock_server_other(sock->sk)) {
83697 + err = -EPERM;
83698 + sock_release(newsock);
83699 + goto out_put;
83700 + }
83701 +
83702 + err = gr_search_accept(sock);
83703 + if (err) {
83704 + sock_release(newsock);
83705 + goto out_put;
83706 + }
83707 +
83708 /*
83709 * We don't need try_module_get here, as the listening socket (sock)
83710 * has the protocol module (sock->ops->owner) held.
83711 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
83712 fd_install(newfd, newfile);
83713 err = newfd;
83714
83715 + gr_attach_curr_ip(newsock->sk);
83716 +
83717 out_put:
83718 fput_light(sock->file, fput_needed);
83719 out:
83720 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
83721 int, addrlen)
83722 {
83723 struct socket *sock;
83724 + struct sockaddr *sck;
83725 struct sockaddr_storage address;
83726 int err, fput_needed;
83727
83728 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
83729 if (err < 0)
83730 goto out_put;
83731
83732 + sck = (struct sockaddr *)&address;
83733 +
83734 + if (gr_handle_sock_client(sck)) {
83735 + err = -EACCES;
83736 + goto out_put;
83737 + }
83738 +
83739 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
83740 + if (err)
83741 + goto out_put;
83742 +
83743 err =
83744 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
83745 if (err)
83746 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
83747 int err, ctl_len, iov_size, total_len;
83748 int fput_needed;
83749
83750 + pax_track_stack();
83751 +
83752 err = -EFAULT;
83753 if (MSG_CMSG_COMPAT & flags) {
83754 if (get_compat_msghdr(&msg_sys, msg_compat))
83755 @@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
83756 * kernel msghdr to use the kernel address space)
83757 */
83758
83759 - uaddr = (__force void __user *)msg_sys.msg_name;
83760 + uaddr = (void __force_user *)msg_sys.msg_name;
83761 uaddr_len = COMPAT_NAMELEN(msg);
83762 if (MSG_CMSG_COMPAT & flags) {
83763 err = verify_compat_iovec(&msg_sys, iov,
83764 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
83765 index ac94477..8afe5c3 100644
83766 --- a/net/sunrpc/sched.c
83767 +++ b/net/sunrpc/sched.c
83768 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *word)
83769 #ifdef RPC_DEBUG
83770 static void rpc_task_set_debuginfo(struct rpc_task *task)
83771 {
83772 - static atomic_t rpc_pid;
83773 + static atomic_unchecked_t rpc_pid;
83774
83775 task->tk_magic = RPC_TASK_MAGIC_ID;
83776 - task->tk_pid = atomic_inc_return(&rpc_pid);
83777 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
83778 }
83779 #else
83780 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
83781 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
83782 index 35fb68b..236a8bf 100644
83783 --- a/net/sunrpc/xprtrdma/svc_rdma.c
83784 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
83785 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
83786 static unsigned int min_max_inline = 4096;
83787 static unsigned int max_max_inline = 65536;
83788
83789 -atomic_t rdma_stat_recv;
83790 -atomic_t rdma_stat_read;
83791 -atomic_t rdma_stat_write;
83792 -atomic_t rdma_stat_sq_starve;
83793 -atomic_t rdma_stat_rq_starve;
83794 -atomic_t rdma_stat_rq_poll;
83795 -atomic_t rdma_stat_rq_prod;
83796 -atomic_t rdma_stat_sq_poll;
83797 -atomic_t rdma_stat_sq_prod;
83798 +atomic_unchecked_t rdma_stat_recv;
83799 +atomic_unchecked_t rdma_stat_read;
83800 +atomic_unchecked_t rdma_stat_write;
83801 +atomic_unchecked_t rdma_stat_sq_starve;
83802 +atomic_unchecked_t rdma_stat_rq_starve;
83803 +atomic_unchecked_t rdma_stat_rq_poll;
83804 +atomic_unchecked_t rdma_stat_rq_prod;
83805 +atomic_unchecked_t rdma_stat_sq_poll;
83806 +atomic_unchecked_t rdma_stat_sq_prod;
83807
83808 /* Temporary NFS request map and context caches */
83809 struct kmem_cache *svc_rdma_map_cachep;
83810 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *table, int write,
83811 len -= *ppos;
83812 if (len > *lenp)
83813 len = *lenp;
83814 - if (len && copy_to_user(buffer, str_buf, len))
83815 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
83816 return -EFAULT;
83817 *lenp = len;
83818 *ppos += len;
83819 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = {
83820 {
83821 .procname = "rdma_stat_read",
83822 .data = &rdma_stat_read,
83823 - .maxlen = sizeof(atomic_t),
83824 + .maxlen = sizeof(atomic_unchecked_t),
83825 .mode = 0644,
83826 .proc_handler = &read_reset_stat,
83827 },
83828 {
83829 .procname = "rdma_stat_recv",
83830 .data = &rdma_stat_recv,
83831 - .maxlen = sizeof(atomic_t),
83832 + .maxlen = sizeof(atomic_unchecked_t),
83833 .mode = 0644,
83834 .proc_handler = &read_reset_stat,
83835 },
83836 {
83837 .procname = "rdma_stat_write",
83838 .data = &rdma_stat_write,
83839 - .maxlen = sizeof(atomic_t),
83840 + .maxlen = sizeof(atomic_unchecked_t),
83841 .mode = 0644,
83842 .proc_handler = &read_reset_stat,
83843 },
83844 {
83845 .procname = "rdma_stat_sq_starve",
83846 .data = &rdma_stat_sq_starve,
83847 - .maxlen = sizeof(atomic_t),
83848 + .maxlen = sizeof(atomic_unchecked_t),
83849 .mode = 0644,
83850 .proc_handler = &read_reset_stat,
83851 },
83852 {
83853 .procname = "rdma_stat_rq_starve",
83854 .data = &rdma_stat_rq_starve,
83855 - .maxlen = sizeof(atomic_t),
83856 + .maxlen = sizeof(atomic_unchecked_t),
83857 .mode = 0644,
83858 .proc_handler = &read_reset_stat,
83859 },
83860 {
83861 .procname = "rdma_stat_rq_poll",
83862 .data = &rdma_stat_rq_poll,
83863 - .maxlen = sizeof(atomic_t),
83864 + .maxlen = sizeof(atomic_unchecked_t),
83865 .mode = 0644,
83866 .proc_handler = &read_reset_stat,
83867 },
83868 {
83869 .procname = "rdma_stat_rq_prod",
83870 .data = &rdma_stat_rq_prod,
83871 - .maxlen = sizeof(atomic_t),
83872 + .maxlen = sizeof(atomic_unchecked_t),
83873 .mode = 0644,
83874 .proc_handler = &read_reset_stat,
83875 },
83876 {
83877 .procname = "rdma_stat_sq_poll",
83878 .data = &rdma_stat_sq_poll,
83879 - .maxlen = sizeof(atomic_t),
83880 + .maxlen = sizeof(atomic_unchecked_t),
83881 .mode = 0644,
83882 .proc_handler = &read_reset_stat,
83883 },
83884 {
83885 .procname = "rdma_stat_sq_prod",
83886 .data = &rdma_stat_sq_prod,
83887 - .maxlen = sizeof(atomic_t),
83888 + .maxlen = sizeof(atomic_unchecked_t),
83889 .mode = 0644,
83890 .proc_handler = &read_reset_stat,
83891 },
83892 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83893 index 9e88438..8ed5cf0 100644
83894 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83895 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83896 @@ -495,7 +495,7 @@ next_sge:
83897 svc_rdma_put_context(ctxt, 0);
83898 goto out;
83899 }
83900 - atomic_inc(&rdma_stat_read);
83901 + atomic_inc_unchecked(&rdma_stat_read);
83902
83903 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
83904 chl_map->ch[ch_no].count -= read_wr.num_sge;
83905 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
83906 dto_q);
83907 list_del_init(&ctxt->dto_q);
83908 } else {
83909 - atomic_inc(&rdma_stat_rq_starve);
83910 + atomic_inc_unchecked(&rdma_stat_rq_starve);
83911 clear_bit(XPT_DATA, &xprt->xpt_flags);
83912 ctxt = NULL;
83913 }
83914 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
83915 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
83916 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
83917 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
83918 - atomic_inc(&rdma_stat_recv);
83919 + atomic_inc_unchecked(&rdma_stat_recv);
83920
83921 /* Build up the XDR from the receive buffers. */
83922 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
83923 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83924 index f11be72..7aad4e8 100644
83925 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83926 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83927 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
83928 write_wr.wr.rdma.remote_addr = to;
83929
83930 /* Post It */
83931 - atomic_inc(&rdma_stat_write);
83932 + atomic_inc_unchecked(&rdma_stat_write);
83933 if (svc_rdma_send(xprt, &write_wr))
83934 goto err;
83935 return 0;
83936 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
83937 index 3fa5751..030ba89 100644
83938 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
83939 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
83940 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
83941 return;
83942
83943 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
83944 - atomic_inc(&rdma_stat_rq_poll);
83945 + atomic_inc_unchecked(&rdma_stat_rq_poll);
83946
83947 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
83948 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
83949 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
83950 }
83951
83952 if (ctxt)
83953 - atomic_inc(&rdma_stat_rq_prod);
83954 + atomic_inc_unchecked(&rdma_stat_rq_prod);
83955
83956 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
83957 /*
83958 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
83959 return;
83960
83961 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
83962 - atomic_inc(&rdma_stat_sq_poll);
83963 + atomic_inc_unchecked(&rdma_stat_sq_poll);
83964 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
83965 if (wc.status != IB_WC_SUCCESS)
83966 /* Close the transport */
83967 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
83968 }
83969
83970 if (ctxt)
83971 - atomic_inc(&rdma_stat_sq_prod);
83972 + atomic_inc_unchecked(&rdma_stat_sq_prod);
83973 }
83974
83975 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
83976 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
83977 spin_lock_bh(&xprt->sc_lock);
83978 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
83979 spin_unlock_bh(&xprt->sc_lock);
83980 - atomic_inc(&rdma_stat_sq_starve);
83981 + atomic_inc_unchecked(&rdma_stat_sq_starve);
83982
83983 /* See if we can opportunistically reap SQ WR to make room */
83984 sq_cq_reap(xprt);
83985 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
83986 index 0b15d72..7934fbb 100644
83987 --- a/net/sysctl_net.c
83988 +++ b/net/sysctl_net.c
83989 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
83990 struct ctl_table *table)
83991 {
83992 /* Allow network administrator to have same access as root. */
83993 - if (capable(CAP_NET_ADMIN)) {
83994 + if (capable_nolog(CAP_NET_ADMIN)) {
83995 int mode = (table->mode >> 6) & 7;
83996 return (mode << 6) | (mode << 3) | mode;
83997 }
83998 diff --git a/net/tipc/link.c b/net/tipc/link.c
83999 index dd4c18b..f40d38d 100644
84000 --- a/net/tipc/link.c
84001 +++ b/net/tipc/link.c
84002 @@ -1418,7 +1418,7 @@ again:
84003
84004 if (!sect_rest) {
84005 sect_rest = msg_sect[++curr_sect].iov_len;
84006 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
84007 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
84008 }
84009
84010 if (sect_rest < fragm_rest)
84011 @@ -1437,7 +1437,7 @@ error:
84012 }
84013 } else
84014 skb_copy_to_linear_data_offset(buf, fragm_crs,
84015 - sect_crs, sz);
84016 + (const void __force_kernel *)sect_crs, sz);
84017 sect_crs += sz;
84018 sect_rest -= sz;
84019 fragm_crs += sz;
84020 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
84021 index 0747d8a..e8bf3f3 100644
84022 --- a/net/tipc/subscr.c
84023 +++ b/net/tipc/subscr.c
84024 @@ -104,7 +104,7 @@ static void subscr_send_event(struct subscription *sub,
84025 {
84026 struct iovec msg_sect;
84027
84028 - msg_sect.iov_base = (void *)&sub->evt;
84029 + msg_sect.iov_base = (void __force_user *)&sub->evt;
84030 msg_sect.iov_len = sizeof(struct tipc_event);
84031
84032 sub->evt.event = htohl(event, sub->swap);
84033 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
84034 index db8d51a..608692d 100644
84035 --- a/net/unix/af_unix.c
84036 +++ b/net/unix/af_unix.c
84037 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(struct net *net,
84038 err = -ECONNREFUSED;
84039 if (!S_ISSOCK(inode->i_mode))
84040 goto put_fail;
84041 +
84042 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
84043 + err = -EACCES;
84044 + goto put_fail;
84045 + }
84046 +
84047 u = unix_find_socket_byinode(net, inode);
84048 if (!u)
84049 goto put_fail;
84050 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(struct net *net,
84051 if (u) {
84052 struct dentry *dentry;
84053 dentry = unix_sk(u)->dentry;
84054 +
84055 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
84056 + err = -EPERM;
84057 + sock_put(u);
84058 + goto fail;
84059 + }
84060 +
84061 if (dentry)
84062 touch_atime(unix_sk(u)->mnt, dentry);
84063 } else
84064 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
84065 err = security_path_mknod(&nd.path, dentry, mode, 0);
84066 if (err)
84067 goto out_mknod_drop_write;
84068 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
84069 + err = -EACCES;
84070 + goto out_mknod_drop_write;
84071 + }
84072 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
84073 out_mknod_drop_write:
84074 mnt_drop_write(nd.path.mnt);
84075 if (err)
84076 goto out_mknod_dput;
84077 +
84078 + gr_handle_create(dentry, nd.path.mnt);
84079 +
84080 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
84081 dput(nd.path.dentry);
84082 nd.path.dentry = dentry;
84083 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file *seq, void *v)
84084 unix_state_lock(s);
84085
84086 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
84087 +#ifdef CONFIG_GRKERNSEC_HIDESYM
84088 + NULL,
84089 +#else
84090 s,
84091 +#endif
84092 atomic_read(&s->sk_refcnt),
84093 0,
84094 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
84095 diff --git a/net/wireless/core.h b/net/wireless/core.h
84096 index 376798f..109a61f 100644
84097 --- a/net/wireless/core.h
84098 +++ b/net/wireless/core.h
84099 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
84100 struct mutex mtx;
84101
84102 /* rfkill support */
84103 - struct rfkill_ops rfkill_ops;
84104 + rfkill_ops_no_const rfkill_ops;
84105 struct rfkill *rfkill;
84106 struct work_struct rfkill_sync;
84107
84108 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
84109 index a2e4c60..0979cbe 100644
84110 --- a/net/wireless/wext.c
84111 +++ b/net/wireless/wext.c
84112 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
84113 */
84114
84115 /* Support for very large requests */
84116 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
84117 - (user_length > descr->max_tokens)) {
84118 + if (user_length > descr->max_tokens) {
84119 /* Allow userspace to GET more than max so
84120 * we can support any size GET requests.
84121 * There is still a limit : -ENOMEM.
84122 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
84123 }
84124 }
84125
84126 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
84127 - /*
84128 - * If this is a GET, but not NOMAX, it means that the extra
84129 - * data is not bounded by userspace, but by max_tokens. Thus
84130 - * set the length to max_tokens. This matches the extra data
84131 - * allocation.
84132 - * The driver should fill it with the number of tokens it
84133 - * provided, and it may check iwp->length rather than having
84134 - * knowledge of max_tokens. If the driver doesn't change the
84135 - * iwp->length, this ioctl just copies back max_token tokens
84136 - * filled with zeroes. Hopefully the driver isn't claiming
84137 - * them to be valid data.
84138 - */
84139 - iwp->length = descr->max_tokens;
84140 - }
84141 -
84142 err = handler(dev, info, (union iwreq_data *) iwp, extra);
84143
84144 iwp->length += essid_compat;
84145 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
84146 index cb81ca3..e15d49a 100644
84147 --- a/net/xfrm/xfrm_policy.c
84148 +++ b/net/xfrm/xfrm_policy.c
84149 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
84150 hlist_add_head(&policy->bydst, chain);
84151 xfrm_pol_hold(policy);
84152 net->xfrm.policy_count[dir]++;
84153 - atomic_inc(&flow_cache_genid);
84154 + atomic_inc_unchecked(&flow_cache_genid);
84155 if (delpol)
84156 __xfrm_policy_unlink(delpol, dir);
84157 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
84158 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
84159 write_unlock_bh(&xfrm_policy_lock);
84160
84161 if (ret && delete) {
84162 - atomic_inc(&flow_cache_genid);
84163 + atomic_inc_unchecked(&flow_cache_genid);
84164 xfrm_policy_kill(ret);
84165 }
84166 return ret;
84167 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
84168 write_unlock_bh(&xfrm_policy_lock);
84169
84170 if (ret && delete) {
84171 - atomic_inc(&flow_cache_genid);
84172 + atomic_inc_unchecked(&flow_cache_genid);
84173 xfrm_policy_kill(ret);
84174 }
84175 return ret;
84176 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
84177 }
84178
84179 }
84180 - atomic_inc(&flow_cache_genid);
84181 + atomic_inc_unchecked(&flow_cache_genid);
84182 out:
84183 write_unlock_bh(&xfrm_policy_lock);
84184 return err;
84185 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
84186 write_unlock_bh(&xfrm_policy_lock);
84187 if (pol) {
84188 if (dir < XFRM_POLICY_MAX)
84189 - atomic_inc(&flow_cache_genid);
84190 + atomic_inc_unchecked(&flow_cache_genid);
84191 xfrm_policy_kill(pol);
84192 return 0;
84193 }
84194 @@ -1477,7 +1477,7 @@ free_dst:
84195 goto out;
84196 }
84197
84198 -static int inline
84199 +static inline int
84200 xfrm_dst_alloc_copy(void **target, void *src, int size)
84201 {
84202 if (!*target) {
84203 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
84204 return 0;
84205 }
84206
84207 -static int inline
84208 +static inline int
84209 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
84210 {
84211 #ifdef CONFIG_XFRM_SUB_POLICY
84212 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
84213 #endif
84214 }
84215
84216 -static int inline
84217 +static inline int
84218 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
84219 {
84220 #ifdef CONFIG_XFRM_SUB_POLICY
84221 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
84222 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
84223
84224 restart:
84225 - genid = atomic_read(&flow_cache_genid);
84226 + genid = atomic_read_unchecked(&flow_cache_genid);
84227 policy = NULL;
84228 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
84229 pols[pi] = NULL;
84230 @@ -1680,7 +1680,7 @@ restart:
84231 goto error;
84232 }
84233 if (nx == -EAGAIN ||
84234 - genid != atomic_read(&flow_cache_genid)) {
84235 + genid != atomic_read_unchecked(&flow_cache_genid)) {
84236 xfrm_pols_put(pols, npols);
84237 goto restart;
84238 }
84239 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
84240 index b95a2d6..85c4d78 100644
84241 --- a/net/xfrm/xfrm_user.c
84242 +++ b/net/xfrm/xfrm_user.c
84243 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
84244 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
84245 int i;
84246
84247 + pax_track_stack();
84248 +
84249 if (xp->xfrm_nr == 0)
84250 return 0;
84251
84252 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
84253 int err;
84254 int n = 0;
84255
84256 + pax_track_stack();
84257 +
84258 if (attrs[XFRMA_MIGRATE] == NULL)
84259 return -EINVAL;
84260
84261 diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
84262 index 45b7d56..19e828c 100644
84263 --- a/samples/kobject/kset-example.c
84264 +++ b/samples/kobject/kset-example.c
84265 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kobject *kobj,
84266 }
84267
84268 /* Our custom sysfs_ops that we will associate with our ktype later on */
84269 -static struct sysfs_ops foo_sysfs_ops = {
84270 +static const struct sysfs_ops foo_sysfs_ops = {
84271 .show = foo_attr_show,
84272 .store = foo_attr_store,
84273 };
84274 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
84275 index 341b589..405aed3 100644
84276 --- a/scripts/Makefile.build
84277 +++ b/scripts/Makefile.build
84278 @@ -59,7 +59,7 @@ endif
84279 endif
84280
84281 # Do not include host rules unless needed
84282 -ifneq ($(hostprogs-y)$(hostprogs-m),)
84283 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
84284 include scripts/Makefile.host
84285 endif
84286
84287 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
84288 index 6f89fbb..53adc9c 100644
84289 --- a/scripts/Makefile.clean
84290 +++ b/scripts/Makefile.clean
84291 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
84292 __clean-files := $(extra-y) $(always) \
84293 $(targets) $(clean-files) \
84294 $(host-progs) \
84295 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
84296 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
84297 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
84298
84299 # as clean-files is given relative to the current directory, this adds
84300 # a $(obj) prefix, except for absolute paths
84301 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
84302 index 1ac414f..a1c1451 100644
84303 --- a/scripts/Makefile.host
84304 +++ b/scripts/Makefile.host
84305 @@ -31,6 +31,7 @@
84306 # Note: Shared libraries consisting of C++ files are not supported
84307
84308 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
84309 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
84310
84311 # C code
84312 # Executables compiled from a single .c file
84313 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
84314 # Shared libaries (only .c supported)
84315 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
84316 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
84317 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
84318 # Remove .so files from "xxx-objs"
84319 host-cobjs := $(filter-out %.so,$(host-cobjs))
84320
84321 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
84322 index 6bf21f8..c0546b3 100644
84323 --- a/scripts/basic/fixdep.c
84324 +++ b/scripts/basic/fixdep.c
84325 @@ -162,7 +162,7 @@ static void grow_config(int len)
84326 /*
84327 * Lookup a value in the configuration string.
84328 */
84329 -static int is_defined_config(const char * name, int len)
84330 +static int is_defined_config(const char * name, unsigned int len)
84331 {
84332 const char * pconfig;
84333 const char * plast = str_config + len_config - len;
84334 @@ -199,7 +199,7 @@ static void clear_config(void)
84335 /*
84336 * Record the use of a CONFIG_* word.
84337 */
84338 -static void use_config(char *m, int slen)
84339 +static void use_config(char *m, unsigned int slen)
84340 {
84341 char s[PATH_MAX];
84342 char *p;
84343 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen)
84344
84345 static void parse_config_file(char *map, size_t len)
84346 {
84347 - int *end = (int *) (map + len);
84348 + unsigned int *end = (unsigned int *) (map + len);
84349 /* start at +1, so that p can never be < map */
84350 - int *m = (int *) map + 1;
84351 + unsigned int *m = (unsigned int *) map + 1;
84352 char *p, *q;
84353
84354 for (; m < end; m++) {
84355 @@ -371,7 +371,7 @@ static void print_deps(void)
84356 static void traps(void)
84357 {
84358 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
84359 - int *p = (int *)test;
84360 + unsigned int *p = (unsigned int *)test;
84361
84362 if (*p != INT_CONF) {
84363 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
84364 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
84365 new file mode 100644
84366 index 0000000..8729101
84367 --- /dev/null
84368 +++ b/scripts/gcc-plugin.sh
84369 @@ -0,0 +1,2 @@
84370 +#!/bin/sh
84371 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
84372 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
84373 index 62a9025..65b82ad 100644
84374 --- a/scripts/mod/file2alias.c
84375 +++ b/scripts/mod/file2alias.c
84376 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
84377 unsigned long size, unsigned long id_size,
84378 void *symval)
84379 {
84380 - int i;
84381 + unsigned int i;
84382
84383 if (size % id_size || size < id_size) {
84384 if (cross_build != 0)
84385 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
84386 /* USB is special because the bcdDevice can be matched against a numeric range */
84387 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
84388 static void do_usb_entry(struct usb_device_id *id,
84389 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
84390 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
84391 unsigned char range_lo, unsigned char range_hi,
84392 struct module *mod)
84393 {
84394 @@ -151,7 +151,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
84395 {
84396 unsigned int devlo, devhi;
84397 unsigned char chi, clo;
84398 - int ndigits;
84399 + unsigned int ndigits;
84400
84401 id->match_flags = TO_NATIVE(id->match_flags);
84402 id->idVendor = TO_NATIVE(id->idVendor);
84403 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
84404 for (i = 0; i < count; i++) {
84405 const char *id = (char *)devs[i].id;
84406 char acpi_id[sizeof(devs[0].id)];
84407 - int j;
84408 + unsigned int j;
84409
84410 buf_printf(&mod->dev_table_buf,
84411 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
84412 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
84413
84414 for (j = 0; j < PNP_MAX_DEVICES; j++) {
84415 const char *id = (char *)card->devs[j].id;
84416 - int i2, j2;
84417 + unsigned int i2, j2;
84418 int dup = 0;
84419
84420 if (!id[0])
84421 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
84422 /* add an individual alias for every device entry */
84423 if (!dup) {
84424 char acpi_id[sizeof(card->devs[0].id)];
84425 - int k;
84426 + unsigned int k;
84427
84428 buf_printf(&mod->dev_table_buf,
84429 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
84430 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, const char *s)
84431 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
84432 char *alias)
84433 {
84434 - int i, j;
84435 + unsigned int i, j;
84436
84437 sprintf(alias, "dmi*");
84438
84439 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
84440 index 03efeab..0888989 100644
84441 --- a/scripts/mod/modpost.c
84442 +++ b/scripts/mod/modpost.c
84443 @@ -835,6 +835,7 @@ enum mismatch {
84444 INIT_TO_EXIT,
84445 EXIT_TO_INIT,
84446 EXPORT_TO_INIT_EXIT,
84447 + DATA_TO_TEXT
84448 };
84449
84450 struct sectioncheck {
84451 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[] = {
84452 .fromsec = { "__ksymtab*", NULL },
84453 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
84454 .mismatch = EXPORT_TO_INIT_EXIT
84455 +},
84456 +/* Do not reference code from writable data */
84457 +{
84458 + .fromsec = { DATA_SECTIONS, NULL },
84459 + .tosec = { TEXT_SECTIONS, NULL },
84460 + .mismatch = DATA_TO_TEXT
84461 }
84462 };
84463
84464 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
84465 continue;
84466 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
84467 continue;
84468 - if (sym->st_value == addr)
84469 - return sym;
84470 /* Find a symbol nearby - addr are maybe negative */
84471 d = sym->st_value - addr;
84472 + if (d == 0)
84473 + return sym;
84474 if (d < 0)
84475 d = addr - sym->st_value;
84476 if (d < distance) {
84477 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
84478 "Fix this by removing the %sannotation of %s "
84479 "or drop the export.\n",
84480 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
84481 + case DATA_TO_TEXT:
84482 +/*
84483 + fprintf(stderr,
84484 + "The variable %s references\n"
84485 + "the %s %s%s%s\n",
84486 + fromsym, to, sec2annotation(tosec), tosym, to_p);
84487 +*/
84488 + break;
84489 case NO_MISMATCH:
84490 /* To get warnings on missing members */
84491 break;
84492 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
84493 static void check_sec_ref(struct module *mod, const char *modname,
84494 struct elf_info *elf)
84495 {
84496 - int i;
84497 + unsigned int i;
84498 Elf_Shdr *sechdrs = elf->sechdrs;
84499
84500 /* Walk through all sections */
84501 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
84502 va_end(ap);
84503 }
84504
84505 -void buf_write(struct buffer *buf, const char *s, int len)
84506 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
84507 {
84508 if (buf->size - buf->pos < len) {
84509 buf->size += len + SZ;
84510 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
84511 if (fstat(fileno(file), &st) < 0)
84512 goto close_write;
84513
84514 - if (st.st_size != b->pos)
84515 + if (st.st_size != (off_t)b->pos)
84516 goto close_write;
84517
84518 tmp = NOFAIL(malloc(b->pos));
84519 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
84520 index 09f58e3..4b66092 100644
84521 --- a/scripts/mod/modpost.h
84522 +++ b/scripts/mod/modpost.h
84523 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
84524
84525 struct buffer {
84526 char *p;
84527 - int pos;
84528 - int size;
84529 + unsigned int pos;
84530 + unsigned int size;
84531 };
84532
84533 void __attribute__((format(printf, 2, 3)))
84534 buf_printf(struct buffer *buf, const char *fmt, ...);
84535
84536 void
84537 -buf_write(struct buffer *buf, const char *s, int len);
84538 +buf_write(struct buffer *buf, const char *s, unsigned int len);
84539
84540 struct module {
84541 struct module *next;
84542 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
84543 index ecf9c7d..d52b38e 100644
84544 --- a/scripts/mod/sumversion.c
84545 +++ b/scripts/mod/sumversion.c
84546 @@ -455,7 +455,7 @@ static void write_version(const char *filename, const char *sum,
84547 goto out;
84548 }
84549
84550 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
84551 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
84552 warn("writing sum in %s failed: %s\n",
84553 filename, strerror(errno));
84554 goto out;
84555 diff --git a/scripts/package/mkspec b/scripts/package/mkspec
84556 index 47bdd2f..d4d4e93 100755
84557 --- a/scripts/package/mkspec
84558 +++ b/scripts/package/mkspec
84559 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
84560 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
84561 echo "%endif"
84562
84563 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
84564 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
84565 echo "%ifarch ia64"
84566 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
84567 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
84568 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
84569 index 5c11312..72742b5 100644
84570 --- a/scripts/pnmtologo.c
84571 +++ b/scripts/pnmtologo.c
84572 @@ -237,14 +237,14 @@ static void write_header(void)
84573 fprintf(out, " * Linux logo %s\n", logoname);
84574 fputs(" */\n\n", out);
84575 fputs("#include <linux/linux_logo.h>\n\n", out);
84576 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
84577 + fprintf(out, "static unsigned char %s_data[] = {\n",
84578 logoname);
84579 }
84580
84581 static void write_footer(void)
84582 {
84583 fputs("\n};\n\n", out);
84584 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
84585 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
84586 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
84587 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
84588 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
84589 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
84590 fputs("\n};\n\n", out);
84591
84592 /* write logo clut */
84593 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
84594 + fprintf(out, "static unsigned char %s_clut[] = {\n",
84595 logoname);
84596 write_hex_cnt = 0;
84597 for (i = 0; i < logo_clutsize; i++) {
84598 diff --git a/scripts/tags.sh b/scripts/tags.sh
84599 index d52f7a0..269eb1b 100755
84600 --- a/scripts/tags.sh
84601 +++ b/scripts/tags.sh
84602 @@ -93,6 +93,11 @@ docscope()
84603 cscope -b -f cscope.out
84604 }
84605
84606 +dogtags()
84607 +{
84608 + all_sources | gtags -f -
84609 +}
84610 +
84611 exuberant()
84612 {
84613 all_sources | xargs $1 -a \
84614 @@ -164,6 +169,10 @@ case "$1" in
84615 docscope
84616 ;;
84617
84618 + "gtags")
84619 + dogtags
84620 + ;;
84621 +
84622 "tags")
84623 rm -f tags
84624 xtags ctags
84625 diff --git a/security/Kconfig b/security/Kconfig
84626 index fb363cd..0524cf3 100644
84627 --- a/security/Kconfig
84628 +++ b/security/Kconfig
84629 @@ -4,6 +4,625 @@
84630
84631 menu "Security options"
84632
84633 +source grsecurity/Kconfig
84634 +
84635 +menu "PaX"
84636 +
84637 + config ARCH_TRACK_EXEC_LIMIT
84638 + bool
84639 +
84640 + config PAX_KERNEXEC_PLUGIN
84641 + bool
84642 +
84643 + config PAX_PER_CPU_PGD
84644 + bool
84645 +
84646 + config TASK_SIZE_MAX_SHIFT
84647 + int
84648 + depends on X86_64
84649 + default 47 if !PAX_PER_CPU_PGD
84650 + default 42 if PAX_PER_CPU_PGD
84651 +
84652 + config PAX_ENABLE_PAE
84653 + bool
84654 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
84655 +
84656 +config PAX
84657 + bool "Enable various PaX features"
84658 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
84659 + help
84660 + This allows you to enable various PaX features. PaX adds
84661 + intrusion prevention mechanisms to the kernel that reduce
84662 + the risks posed by exploitable memory corruption bugs.
84663 +
84664 +menu "PaX Control"
84665 + depends on PAX
84666 +
84667 +config PAX_SOFTMODE
84668 + bool 'Support soft mode'
84669 + help
84670 + Enabling this option will allow you to run PaX in soft mode, that
84671 + is, PaX features will not be enforced by default, only on executables
84672 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
84673 + support as they are the only way to mark executables for soft mode use.
84674 +
84675 + Soft mode can be activated by using the "pax_softmode=1" kernel command
84676 + line option on boot. Furthermore you can control various PaX features
84677 + at runtime via the entries in /proc/sys/kernel/pax.
84678 +
84679 +config PAX_EI_PAX
84680 + bool 'Use legacy ELF header marking'
84681 + help
84682 + Enabling this option will allow you to control PaX features on
84683 + a per executable basis via the 'chpax' utility available at
84684 + http://pax.grsecurity.net/. The control flags will be read from
84685 + an otherwise reserved part of the ELF header. This marking has
84686 + numerous drawbacks (no support for soft-mode, toolchain does not
84687 + know about the non-standard use of the ELF header) therefore it
84688 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
84689 + support.
84690 +
84691 + If you have applications not marked by the PT_PAX_FLAGS ELF program
84692 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
84693 + option otherwise they will not get any protection.
84694 +
84695 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
84696 + support as well, they will override the legacy EI_PAX marks.
84697 +
84698 +config PAX_PT_PAX_FLAGS
84699 + bool 'Use ELF program header marking'
84700 + help
84701 + Enabling this option will allow you to control PaX features on
84702 + a per executable basis via the 'paxctl' utility available at
84703 + http://pax.grsecurity.net/. The control flags will be read from
84704 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
84705 + has the benefits of supporting both soft mode and being fully
84706 + integrated into the toolchain (the binutils patch is available
84707 + from http://pax.grsecurity.net).
84708 +
84709 + If you have applications not marked by the PT_PAX_FLAGS ELF program
84710 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
84711 + support otherwise they will not get any protection.
84712 +
84713 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
84714 + must make sure that the marks are the same if a binary has both marks.
84715 +
84716 + Note that if you enable the legacy EI_PAX marking support as well,
84717 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
84718 +
84719 +config PAX_XATTR_PAX_FLAGS
84720 + bool 'Use filesystem extended attributes marking'
84721 + depends on EXPERT
84722 + select CIFS_XATTR if CIFS
84723 + select EXT2_FS_XATTR if EXT2_FS
84724 + select EXT3_FS_XATTR if EXT3_FS
84725 + select EXT4_FS_XATTR if EXT4_FS
84726 + select JFFS2_FS_XATTR if JFFS2_FS
84727 + select REISERFS_FS_XATTR if REISERFS_FS
84728 + select UBIFS_FS_XATTR if UBIFS_FS
84729 + help
84730 + Enabling this option will allow you to control PaX features on
84731 + a per executable basis via the 'setfattr' utility. The control
84732 + flags will be read from the user.pax.flags extended attribute of
84733 + the file. This marking has the benefit of supporting binary-only
84734 + applications that self-check themselves (e.g., skype) and would
84735 + not tolerate chpax/paxctl changes. The main drawback is that
84736 + extended attributes are not supported by some filesystems (e.g.,
84737 + isofs, squashfs, tmpfs, udf, vfat) so copying files through such
84738 + filesystems will lose the extended attributes and these PaX markings.
84739 +
84740 + If you have applications not marked by the PT_PAX_FLAGS ELF program
84741 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
84742 + support otherwise they will not get any protection.
84743 +
84744 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
84745 + must make sure that the marks are the same if a binary has both marks.
84746 +
84747 + Note that if you enable the legacy EI_PAX marking support as well,
84748 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
84749 +
84750 +choice
84751 + prompt 'MAC system integration'
84752 + default PAX_HAVE_ACL_FLAGS
84753 + help
84754 + Mandatory Access Control systems have the option of controlling
84755 + PaX flags on a per executable basis, choose the method supported
84756 + by your particular system.
84757 +
84758 + - "none": if your MAC system does not interact with PaX,
84759 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
84760 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
84761 +
84762 + NOTE: this option is for developers/integrators only.
84763 +
84764 + config PAX_NO_ACL_FLAGS
84765 + bool 'none'
84766 +
84767 + config PAX_HAVE_ACL_FLAGS
84768 + bool 'direct'
84769 +
84770 + config PAX_HOOK_ACL_FLAGS
84771 + bool 'hook'
84772 +endchoice
84773 +
84774 +endmenu
84775 +
84776 +menu "Non-executable pages"
84777 + depends on PAX
84778 +
84779 +config PAX_NOEXEC
84780 + bool "Enforce non-executable pages"
84781 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
84782 + help
84783 + By design some architectures do not allow for protecting memory
84784 + pages against execution or even if they do, Linux does not make
84785 + use of this feature. In practice this means that if a page is
84786 + readable (such as the stack or heap) it is also executable.
84787 +
84788 + There is a well known exploit technique that makes use of this
84789 + fact and a common programming mistake where an attacker can
84790 + introduce code of his choice somewhere in the attacked program's
84791 + memory (typically the stack or the heap) and then execute it.
84792 +
84793 + If the attacked program was running with different (typically
84794 + higher) privileges than that of the attacker, then he can elevate
84795 + his own privilege level (e.g. get a root shell, write to files for
84796 + which he does not have write access to, etc).
84797 +
84798 + Enabling this option will let you choose from various features
84799 + that prevent the injection and execution of 'foreign' code in
84800 + a program.
84801 +
84802 + This will also break programs that rely on the old behaviour and
84803 + expect that dynamically allocated memory via the malloc() family
84804 + of functions is executable (which it is not). Notable examples
84805 + are the XFree86 4.x server, the java runtime and wine.
84806 +
84807 +config PAX_PAGEEXEC
84808 + bool "Paging based non-executable pages"
84809 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
84810 + select S390_SWITCH_AMODE if S390
84811 + select S390_EXEC_PROTECT if S390
84812 + select ARCH_TRACK_EXEC_LIMIT if X86_32
84813 + help
84814 + This implementation is based on the paging feature of the CPU.
84815 + On i386 without hardware non-executable bit support there is a
84816 + variable but usually low performance impact, however on Intel's
84817 + P4 core based CPUs it is very high so you should not enable this
84818 + for kernels meant to be used on such CPUs.
84819 +
84820 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
84821 + with hardware non-executable bit support there is no performance
84822 + impact, on ppc the impact is negligible.
84823 +
84824 + Note that several architectures require various emulations due to
84825 + badly designed userland ABIs, this will cause a performance impact
84826 + but will disappear as soon as userland is fixed. For example, ppc
84827 + userland MUST have been built with secure-plt by a recent toolchain.
84828 +
84829 +config PAX_SEGMEXEC
84830 + bool "Segmentation based non-executable pages"
84831 + depends on PAX_NOEXEC && X86_32
84832 + help
84833 + This implementation is based on the segmentation feature of the
84834 + CPU and has a very small performance impact, however applications
84835 + will be limited to a 1.5 GB address space instead of the normal
84836 + 3 GB.
84837 +
84838 +config PAX_EMUTRAMP
84839 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
84840 + default y if PARISC
84841 + help
84842 + There are some programs and libraries that for one reason or
84843 + another attempt to execute special small code snippets from
84844 + non-executable memory pages. Most notable examples are the
84845 + signal handler return code generated by the kernel itself and
84846 + the GCC trampolines.
84847 +
84848 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
84849 + such programs will no longer work under your kernel.
84850 +
84851 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
84852 + utilities to enable trampoline emulation for the affected programs
84853 + yet still have the protection provided by the non-executable pages.
84854 +
84855 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
84856 + your system will not even boot.
84857 +
84858 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
84859 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
84860 + for the affected files.
84861 +
84862 + NOTE: enabling this feature *may* open up a loophole in the
84863 + protection provided by non-executable pages that an attacker
84864 + could abuse. Therefore the best solution is to not have any
84865 + files on your system that would require this option. This can
84866 + be achieved by not using libc5 (which relies on the kernel
84867 + signal handler return code) and not using or rewriting programs
84868 + that make use of the nested function implementation of GCC.
84869 + Skilled users can just fix GCC itself so that it implements
84870 + nested function calls in a way that does not interfere with PaX.
84871 +
84872 +config PAX_EMUSIGRT
84873 + bool "Automatically emulate sigreturn trampolines"
84874 + depends on PAX_EMUTRAMP && PARISC
84875 + default y
84876 + help
84877 + Enabling this option will have the kernel automatically detect
84878 + and emulate signal return trampolines executing on the stack
84879 + that would otherwise lead to task termination.
84880 +
84881 + This solution is intended as a temporary one for users with
84882 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
84883 + Modula-3 runtime, etc) or executables linked to such, basically
84884 + everything that does not specify its own SA_RESTORER function in
84885 + normal executable memory like glibc 2.1+ does.
84886 +
84887 + On parisc you MUST enable this option, otherwise your system will
84888 + not even boot.
84889 +
84890 + NOTE: this feature cannot be disabled on a per executable basis
84891 + and since it *does* open up a loophole in the protection provided
84892 + by non-executable pages, the best solution is to not have any
84893 + files on your system that would require this option.
84894 +
84895 +config PAX_MPROTECT
84896 + bool "Restrict mprotect()"
84897 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
84898 + help
84899 + Enabling this option will prevent programs from
84900 + - changing the executable status of memory pages that were
84901 + not originally created as executable,
84902 + - making read-only executable pages writable again,
84903 + - creating executable pages from anonymous memory,
84904 + - making read-only-after-relocations (RELRO) data pages writable again.
84905 +
84906 + You should say Y here to complete the protection provided by
84907 + the enforcement of non-executable pages.
84908 +
84909 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
84910 + this feature on a per file basis.
84911 +
84912 +config PAX_MPROTECT_COMPAT
84913 + bool "Use legacy/compat protection demoting (read help)"
84914 + depends on PAX_MPROTECT
84915 + default n
84916 + help
84917 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
84918 + by sending the proper error code to the application. For some broken
84919 + userland, this can cause problems with Python or other applications. The
84920 + current implementation however allows for applications like clamav to
84921 + detect if JIT compilation/execution is allowed and to fall back gracefully
84922 + to an interpreter-based mode if it does not. While we encourage everyone
84923 + to use the current implementation as-is and push upstream to fix broken
84924 + userland (note that the RWX logging option can assist with this), in some
84925 + environments this may not be possible. Having to disable MPROTECT
84926 + completely on certain binaries reduces the security benefit of PaX,
84927 + so this option is provided for those environments to revert to the old
84928 + behavior.
84929 +
84930 +config PAX_ELFRELOCS
84931 + bool "Allow ELF text relocations (read help)"
84932 + depends on PAX_MPROTECT
84933 + default n
84934 + help
84935 + Non-executable pages and mprotect() restrictions are effective
84936 + in preventing the introduction of new executable code into an
84937 + attacked task's address space. There remain only two venues
84938 + for this kind of attack: if the attacker can execute already
84939 + existing code in the attacked task then he can either have it
84940 + create and mmap() a file containing his code or have it mmap()
84941 + an already existing ELF library that does not have position
84942 + independent code in it and use mprotect() on it to make it
84943 + writable and copy his code there. While protecting against
84944 + the former approach is beyond PaX, the latter can be prevented
84945 + by having only PIC ELF libraries on one's system (which do not
84946 + need to relocate their code). If you are sure this is your case,
84947 + as is the case with all modern Linux distributions, then leave
84948 + this option disabled. You should say 'n' here.
84949 +
84950 +config PAX_ETEXECRELOCS
84951 + bool "Allow ELF ET_EXEC text relocations"
84952 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
84953 + select PAX_ELFRELOCS
84954 + default y
84955 + help
84956 + On some architectures there are incorrectly created applications
84957 + that require text relocations and would not work without enabling
84958 + this option. If you are an alpha, ia64 or parisc user, you should
84959 + enable this option and disable it once you have made sure that
84960 + none of your applications need it.
84961 +
84962 +config PAX_EMUPLT
84963 + bool "Automatically emulate ELF PLT"
84964 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
84965 + default y
84966 + help
84967 + Enabling this option will have the kernel automatically detect
84968 + and emulate the Procedure Linkage Table entries in ELF files.
84969 + On some architectures such entries are in writable memory, and
84970 + become non-executable leading to task termination. Therefore
84971 + it is mandatory that you enable this option on alpha, parisc,
84972 + sparc and sparc64, otherwise your system would not even boot.
84973 +
84974 + NOTE: this feature *does* open up a loophole in the protection
84975 + provided by the non-executable pages, therefore the proper
84976 + solution is to modify the toolchain to produce a PLT that does
84977 + not need to be writable.
84978 +
84979 +config PAX_DLRESOLVE
84980 + bool 'Emulate old glibc resolver stub'
84981 + depends on PAX_EMUPLT && SPARC
84982 + default n
84983 + help
84984 + This option is needed if userland has an old glibc (before 2.4)
84985 + that puts a 'save' instruction into the runtime generated resolver
84986 + stub that needs special emulation.
84987 +
84988 +config PAX_KERNEXEC
84989 + bool "Enforce non-executable kernel pages"
84990 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
84991 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
84992 + select PAX_KERNEXEC_PLUGIN if X86_64
84993 + help
84994 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
84995 + that is, enabling this option will make it harder to inject
84996 + and execute 'foreign' code in kernel memory itself.
84997 +
84998 + Note that on x86_64 kernels there is a known regression when
84999 + this feature and KVM/VMX are both enabled in the host kernel.
85000 +
85001 +choice
85002 + prompt "Return Address Instrumentation Method"
85003 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
85004 + depends on PAX_KERNEXEC_PLUGIN
85005 + help
85006 + Select the method used to instrument function pointer dereferences.
85007 + Note that binary modules cannot be instrumented by this approach.
85008 +
85009 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
85010 + bool "bts"
85011 + help
85012 + This method is compatible with binary only modules but has
85013 + a higher runtime overhead.
85014 +
85015 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
85016 + bool "or"
85017 + depends on !PARAVIRT
85018 + help
85019 + This method is incompatible with binary only modules but has
85020 + a lower runtime overhead.
85021 +endchoice
85022 +
85023 +config PAX_KERNEXEC_PLUGIN_METHOD
85024 + string
85025 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
85026 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
85027 + default ""
85028 +
85029 +config PAX_KERNEXEC_MODULE_TEXT
85030 + int "Minimum amount of memory reserved for module code"
85031 + default "4"
85032 + depends on PAX_KERNEXEC && X86_32 && MODULES
85033 + help
85034 + Due to implementation details the kernel must reserve a fixed
85035 + amount of memory for module code at compile time that cannot be
85036 + changed at runtime. Here you can specify the minimum amount
85037 + in MB that will be reserved. Due to the same implementation
85038 + details this size will always be rounded up to the next 2/4 MB
85039 + boundary (depends on PAE) so the actually available memory for
85040 + module code will usually be more than this minimum.
85041 +
85042 + The default 4 MB should be enough for most users but if you have
85043 + an excessive number of modules (e.g., most distribution configs
85044 + compile many drivers as modules) or use huge modules such as
85045 + nvidia's kernel driver, you will need to adjust this amount.
85046 + A good rule of thumb is to look at your currently loaded kernel
85047 + modules and add up their sizes.
85048 +
85049 +endmenu
85050 +
85051 +menu "Address Space Layout Randomization"
85052 + depends on PAX
85053 +
85054 +config PAX_ASLR
85055 + bool "Address Space Layout Randomization"
85056 + help
85057 + Many if not most exploit techniques rely on the knowledge of
85058 + certain addresses in the attacked program. The following options
85059 + will allow the kernel to apply a certain amount of randomization
85060 + to specific parts of the program thereby forcing an attacker to
85061 + guess them in most cases. Any failed guess will most likely crash
85062 + the attacked program which allows the kernel to detect such attempts
85063 + and react on them. PaX itself provides no reaction mechanisms,
85064 + instead it is strongly encouraged that you make use of Nergal's
85065 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
85066 + (http://www.grsecurity.net/) built-in crash detection features or
85067 + develop one yourself.
85068 +
85069 + By saying Y here you can choose to randomize the following areas:
85070 + - top of the task's kernel stack
85071 + - top of the task's userland stack
85072 + - base address for mmap() requests that do not specify one
85073 + (this includes all libraries)
85074 + - base address of the main executable
85075 +
85076 + It is strongly recommended to say Y here as address space layout
85077 + randomization has negligible impact on performance yet it provides
85078 + a very effective protection.
85079 +
85080 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
85081 + this feature on a per file basis.
85082 +
85083 +config PAX_RANDKSTACK
85084 + bool "Randomize kernel stack base"
85085 + depends on X86_TSC && X86
85086 + help
85087 + By saying Y here the kernel will randomize every task's kernel
85088 + stack on every system call. This will not only force an attacker
85089 + to guess it but also prevent him from making use of possible
85090 + leaked information about it.
85091 +
85092 + Since the kernel stack is a rather scarce resource, randomization
85093 + may cause unexpected stack overflows, therefore you should very
85094 + carefully test your system. Note that once enabled in the kernel
85095 + configuration, this feature cannot be disabled on a per file basis.
85096 +
85097 +config PAX_RANDUSTACK
85098 + bool "Randomize user stack base"
85099 + depends on PAX_ASLR
85100 + help
85101 + By saying Y here the kernel will randomize every task's userland
85102 + stack. The randomization is done in two steps where the second
85103 + one may apply a big amount of shift to the top of the stack and
85104 + cause problems for programs that want to use lots of memory (more
85105 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
85106 + For this reason the second step can be controlled by 'chpax' or
85107 + 'paxctl' on a per file basis.
85108 +
85109 +config PAX_RANDMMAP
85110 + bool "Randomize mmap() base"
85111 + depends on PAX_ASLR
85112 + help
85113 + By saying Y here the kernel will use a randomized base address for
85114 + mmap() requests that do not specify one themselves. As a result
85115 + all dynamically loaded libraries will appear at random addresses
85116 + and therefore be harder to exploit by a technique where an attacker
85117 + attempts to execute library code for his purposes (e.g. spawn a
85118 + shell from an exploited program that is running at an elevated
85119 + privilege level).
85120 +
85121 + Furthermore, if a program is relinked as a dynamic ELF file, its
85122 + base address will be randomized as well, completing the full
85123 + randomization of the address space layout. Attacking such programs
85124 + becomes a guess game. You can find an example of doing this at
85125 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
85126 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
85127 +
85128 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
85129 + feature on a per file basis.
85130 +
85131 +endmenu
85132 +
85133 +menu "Miscellaneous hardening features"
85134 +
85135 +config PAX_MEMORY_SANITIZE
85136 + bool "Sanitize all freed memory"
85137 + help
85138 + By saying Y here the kernel will erase memory pages as soon as they
85139 + are freed. This in turn reduces the lifetime of data stored in the
85140 + pages, making it less likely that sensitive information such as
85141 + passwords, cryptographic secrets, etc stay in memory for too long.
85142 +
85143 + This is especially useful for programs whose runtime is short, long
85144 + lived processes and the kernel itself benefit from this as long as
85145 + they operate on whole memory pages and ensure timely freeing of pages
85146 + that may hold sensitive information.
85147 +
85148 + The tradeoff is performance impact, on a single CPU system kernel
85149 + compilation sees a 3% slowdown, other systems and workloads may vary
85150 + and you are advised to test this feature on your expected workload
85151 + before deploying it.
85152 +
85153 + Note that this feature does not protect data stored in live pages,
85154 + e.g., process memory swapped to disk may stay there for a long time.
85155 +
85156 +config PAX_MEMORY_STACKLEAK
85157 + bool "Sanitize kernel stack"
85158 + depends on X86
85159 + help
85160 + By saying Y here the kernel will erase the kernel stack before it
85161 + returns from a system call. This in turn reduces the information
85162 + that a kernel stack leak bug can reveal.
85163 +
85164 + Note that such a bug can still leak information that was put on
85165 + the stack by the current system call (the one eventually triggering
85166 + the bug) but traces of earlier system calls on the kernel stack
85167 + cannot leak anymore.
85168 +
85169 + The tradeoff is performance impact, on a single CPU system kernel
85170 + compilation sees a 1% slowdown, other systems and workloads may vary
85171 + and you are advised to test this feature on your expected workload
85172 + before deploying it.
85173 +
85174 + Note: full support for this feature requires gcc with plugin support
85175 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
85176 + versions means that functions with large enough stack frames may
85177 + leave uninitialized memory behind that may be exposed to a later
85178 + syscall leaking the stack.
85179 +
85180 +config PAX_MEMORY_UDEREF
85181 + bool "Prevent invalid userland pointer dereference"
85182 + depends on X86 && !UML_X86 && !XEN
85183 + select PAX_PER_CPU_PGD if X86_64
85184 + help
85185 + By saying Y here the kernel will be prevented from dereferencing
85186 + userland pointers in contexts where the kernel expects only kernel
85187 + pointers. This is both a useful runtime debugging feature and a
85188 + security measure that prevents exploiting a class of kernel bugs.
85189 +
85190 + The tradeoff is that some virtualization solutions may experience
85191 + a huge slowdown and therefore you should not enable this feature
85192 + for kernels meant to run in such environments. Whether a given VM
85193 + solution is affected or not is best determined by simply trying it
85194 + out, the performance impact will be obvious right on boot as this
85195 + mechanism engages from very early on. A good rule of thumb is that
85196 + VMs running on CPUs without hardware virtualization support (i.e.,
85197 + the majority of IA-32 CPUs) will likely experience the slowdown.
85198 +
85199 +config PAX_REFCOUNT
85200 + bool "Prevent various kernel object reference counter overflows"
85201 + depends on GRKERNSEC && (X86 || SPARC64)
85202 + help
85203 + By saying Y here the kernel will detect and prevent overflowing
85204 + various (but not all) kinds of object reference counters. Such
85205 + overflows can normally occur due to bugs only and are often, if
85206 + not always, exploitable.
85207 +
85208 + The tradeoff is that data structures protected by an overflowed
85209 + refcount will never be freed and therefore will leak memory. Note
85210 + that this leak also happens even without this protection but in
85211 + that case the overflow can eventually trigger the freeing of the
85212 + data structure while it is still being used elsewhere, resulting
85213 + in the exploitable situation that this feature prevents.
85214 +
85215 + Since this has a negligible performance impact, you should enable
85216 + this feature.
85217 +
85218 +config PAX_USERCOPY
85219 + bool "Harden heap object copies between kernel and userland"
85220 + depends on X86 || PPC || SPARC || ARM
85221 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
85222 + help
85223 + By saying Y here the kernel will enforce the size of heap objects
85224 + when they are copied in either direction between the kernel and
85225 + userland, even if only a part of the heap object is copied.
85226 +
85227 + Specifically, this checking prevents information leaking from the
85228 + kernel heap during kernel to userland copies (if the kernel heap
85229 + object is otherwise fully initialized) and prevents kernel heap
85230 + overflows during userland to kernel copies.
85231 +
85232 + Note that the current implementation provides the strictest bounds
85233 + checks for the SLUB allocator.
85234 +
85235 + Enabling this option also enables per-slab cache protection against
85236 + data in a given cache being copied into/out of via userland
85237 + accessors. Though the whitelist of regions will be reduced over
85238 + time, it notably protects important data structures like task structs.
85239 +
85240 +
85241 + If frame pointers are enabled on x86, this option will also
85242 + restrict copies into and out of the kernel stack to local variables
85243 + within a single frame.
85244 +
85245 + Since this has a negligible performance impact, you should enable
85246 + this feature.
85247 +
85248 +endmenu
85249 +
85250 +endmenu
85251 +
85252 config KEYS
85253 bool "Enable access key retention support"
85254 help
85255 @@ -146,7 +765,7 @@ config INTEL_TXT
85256 config LSM_MMAP_MIN_ADDR
85257 int "Low address space for LSM to protect from user allocation"
85258 depends on SECURITY && SECURITY_SELINUX
85259 - default 65536
85260 + default 32768
85261 help
85262 This is the portion of low virtual memory which should be protected
85263 from userspace allocation. Keeping a user from writing to low pages
85264 diff --git a/security/capability.c b/security/capability.c
85265 index fce07a7..5f12858 100644
85266 --- a/security/capability.c
85267 +++ b/security/capability.c
85268 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *lsmrule)
85269 }
85270 #endif /* CONFIG_AUDIT */
85271
85272 -struct security_operations default_security_ops = {
85273 +struct security_operations default_security_ops __read_only = {
85274 .name = "default",
85275 };
85276
85277 diff --git a/security/commoncap.c b/security/commoncap.c
85278 index fe30751..aaba312 100644
85279 --- a/security/commoncap.c
85280 +++ b/security/commoncap.c
85281 @@ -27,6 +27,8 @@
85282 #include <linux/sched.h>
85283 #include <linux/prctl.h>
85284 #include <linux/securebits.h>
85285 +#include <linux/syslog.h>
85286 +#include <net/sock.h>
85287
85288 /*
85289 * If a non-root user executes a setuid-root binary in
85290 @@ -50,9 +52,18 @@ static void warn_setuid_and_fcaps_mixed(char *fname)
85291 }
85292 }
85293
85294 +#ifdef CONFIG_NET
85295 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
85296 +#endif
85297 +
85298 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
85299 {
85300 +#ifdef CONFIG_NET
85301 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
85302 +#else
85303 NETLINK_CB(skb).eff_cap = current_cap();
85304 +#endif
85305 +
85306 return 0;
85307 }
85308
85309 @@ -582,6 +593,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
85310 {
85311 const struct cred *cred = current_cred();
85312
85313 + if (gr_acl_enable_at_secure())
85314 + return 1;
85315 +
85316 if (cred->uid != 0) {
85317 if (bprm->cap_effective)
85318 return 1;
85319 @@ -956,13 +970,18 @@ error:
85320 /**
85321 * cap_syslog - Determine whether syslog function is permitted
85322 * @type: Function requested
85323 + * @from_file: Whether this request came from an open file (i.e. /proc)
85324 *
85325 * Determine whether the current process is permitted to use a particular
85326 * syslog function, returning 0 if permission is granted, -ve if not.
85327 */
85328 -int cap_syslog(int type)
85329 +int cap_syslog(int type, bool from_file)
85330 {
85331 - if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
85332 + /* /proc/kmsg can open be opened by CAP_SYS_ADMIN */
85333 + if (type != SYSLOG_ACTION_OPEN && from_file)
85334 + return 0;
85335 + if ((type != SYSLOG_ACTION_READ_ALL &&
85336 + type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
85337 return -EPERM;
85338 return 0;
85339 }
85340 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
85341 index 165eb53..b1db4eb 100644
85342 --- a/security/integrity/ima/ima.h
85343 +++ b/security/integrity/ima/ima.h
85344 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
85345 extern spinlock_t ima_queue_lock;
85346
85347 struct ima_h_table {
85348 - atomic_long_t len; /* number of stored measurements in the list */
85349 - atomic_long_t violations;
85350 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
85351 + atomic_long_unchecked_t violations;
85352 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
85353 };
85354 extern struct ima_h_table ima_htable;
85355 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
85356 index 852bf85..35d6df3 100644
85357 --- a/security/integrity/ima/ima_api.c
85358 +++ b/security/integrity/ima/ima_api.c
85359 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
85360 int result;
85361
85362 /* can overflow, only indicator */
85363 - atomic_long_inc(&ima_htable.violations);
85364 + atomic_long_inc_unchecked(&ima_htable.violations);
85365
85366 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
85367 if (!entry) {
85368 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
85369 index 0c72c9c..433e29b 100644
85370 --- a/security/integrity/ima/ima_fs.c
85371 +++ b/security/integrity/ima/ima_fs.c
85372 @@ -27,12 +27,12 @@
85373 static int valid_policy = 1;
85374 #define TMPBUFLEN 12
85375 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
85376 - loff_t *ppos, atomic_long_t *val)
85377 + loff_t *ppos, atomic_long_unchecked_t *val)
85378 {
85379 char tmpbuf[TMPBUFLEN];
85380 ssize_t len;
85381
85382 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
85383 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
85384 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
85385 }
85386
85387 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
85388 index e19316d..339f7ae 100644
85389 --- a/security/integrity/ima/ima_queue.c
85390 +++ b/security/integrity/ima/ima_queue.c
85391 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
85392 INIT_LIST_HEAD(&qe->later);
85393 list_add_tail_rcu(&qe->later, &ima_measurements);
85394
85395 - atomic_long_inc(&ima_htable.len);
85396 + atomic_long_inc_unchecked(&ima_htable.len);
85397 key = ima_hash_key(entry->digest);
85398 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
85399 return 0;
85400 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
85401 index e031952..c9a535d 100644
85402 --- a/security/keys/keyring.c
85403 +++ b/security/keys/keyring.c
85404 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
85405 ret = -EFAULT;
85406
85407 for (loop = 0; loop < klist->nkeys; loop++) {
85408 + key_serial_t serial;
85409 key = klist->keys[loop];
85410 + serial = key->serial;
85411
85412 tmp = sizeof(key_serial_t);
85413 if (tmp > buflen)
85414 tmp = buflen;
85415
85416 - if (copy_to_user(buffer,
85417 - &key->serial,
85418 - tmp) != 0)
85419 + if (copy_to_user(buffer, &serial, tmp))
85420 goto error;
85421
85422 buflen -= tmp;
85423 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
85424 index 931cfda..e71808a 100644
85425 --- a/security/keys/process_keys.c
85426 +++ b/security/keys/process_keys.c
85427 @@ -208,7 +208,7 @@ static int install_process_keyring(void)
85428 ret = install_process_keyring_to_cred(new);
85429 if (ret < 0) {
85430 abort_creds(new);
85431 - return ret != -EEXIST ?: 0;
85432 + return ret != -EEXIST ? ret : 0;
85433 }
85434
85435 return commit_creds(new);
85436 diff --git a/security/min_addr.c b/security/min_addr.c
85437 index d9f9425..c28cef4 100644
85438 --- a/security/min_addr.c
85439 +++ b/security/min_addr.c
85440 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
85441 */
85442 static void update_mmap_min_addr(void)
85443 {
85444 +#ifndef SPARC
85445 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
85446 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
85447 mmap_min_addr = dac_mmap_min_addr;
85448 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
85449 #else
85450 mmap_min_addr = dac_mmap_min_addr;
85451 #endif
85452 +#endif
85453 }
85454
85455 /*
85456 diff --git a/security/root_plug.c b/security/root_plug.c
85457 index 2f7ffa6..0455400 100644
85458 --- a/security/root_plug.c
85459 +++ b/security/root_plug.c
85460 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm)
85461 return 0;
85462 }
85463
85464 -static struct security_operations rootplug_security_ops = {
85465 +static struct security_operations rootplug_security_ops __read_only = {
85466 .bprm_check_security = rootplug_bprm_check_security,
85467 };
85468
85469 diff --git a/security/security.c b/security/security.c
85470 index c4c6732..7abf13b 100644
85471 --- a/security/security.c
85472 +++ b/security/security.c
85473 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1];
85474 extern struct security_operations default_security_ops;
85475 extern void security_fixup_ops(struct security_operations *ops);
85476
85477 -struct security_operations *security_ops; /* Initialized to NULL */
85478 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
85479
85480 static inline int verify(struct security_operations *ops)
85481 {
85482 @@ -106,7 +106,7 @@ int __init security_module_enable(struct security_operations *ops)
85483 * If there is already a security module registered with the kernel,
85484 * an error will be returned. Otherwise %0 is returned on success.
85485 */
85486 -int register_security(struct security_operations *ops)
85487 +int __init register_security(struct security_operations *ops)
85488 {
85489 if (verify(ops)) {
85490 printk(KERN_DEBUG "%s could not verify "
85491 @@ -199,9 +199,9 @@ int security_quota_on(struct dentry *dentry)
85492 return security_ops->quota_on(dentry);
85493 }
85494
85495 -int security_syslog(int type)
85496 +int security_syslog(int type, bool from_file)
85497 {
85498 - return security_ops->syslog(type);
85499 + return security_ops->syslog(type, from_file);
85500 }
85501
85502 int security_settime(struct timespec *ts, struct timezone *tz)
85503 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
85504 index a106754..ca3a589 100644
85505 --- a/security/selinux/hooks.c
85506 +++ b/security/selinux/hooks.c
85507 @@ -76,6 +76,7 @@
85508 #include <linux/selinux.h>
85509 #include <linux/mutex.h>
85510 #include <linux/posix-timers.h>
85511 +#include <linux/syslog.h>
85512
85513 #include "avc.h"
85514 #include "objsec.h"
85515 @@ -131,7 +132,7 @@ int selinux_enabled = 1;
85516 * Minimal support for a secondary security module,
85517 * just to allow the use of the capability module.
85518 */
85519 -static struct security_operations *secondary_ops;
85520 +static struct security_operations *secondary_ops __read_only;
85521
85522 /* Lists of inode and superblock security structures initialized
85523 before the policy was loaded. */
85524 @@ -2050,29 +2051,30 @@ static int selinux_quota_on(struct dentry *dentry)
85525 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
85526 }
85527
85528 -static int selinux_syslog(int type)
85529 +static int selinux_syslog(int type, bool from_file)
85530 {
85531 int rc;
85532
85533 - rc = cap_syslog(type);
85534 + rc = cap_syslog(type, from_file);
85535 if (rc)
85536 return rc;
85537
85538 switch (type) {
85539 - case 3: /* Read last kernel messages */
85540 - case 10: /* Return size of the log buffer */
85541 + case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
85542 + case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
85543 rc = task_has_system(current, SYSTEM__SYSLOG_READ);
85544 break;
85545 - case 6: /* Disable logging to console */
85546 - case 7: /* Enable logging to console */
85547 - case 8: /* Set level of messages printed to console */
85548 + case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
85549 + case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
85550 + /* Set level of messages printed to console */
85551 + case SYSLOG_ACTION_CONSOLE_LEVEL:
85552 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
85553 break;
85554 - case 0: /* Close log */
85555 - case 1: /* Open log */
85556 - case 2: /* Read from log */
85557 - case 4: /* Read/clear last kernel messages */
85558 - case 5: /* Clear ring buffer */
85559 + case SYSLOG_ACTION_CLOSE: /* Close log */
85560 + case SYSLOG_ACTION_OPEN: /* Open log */
85561 + case SYSLOG_ACTION_READ: /* Read from log */
85562 + case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
85563 + case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
85564 default:
85565 rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
85566 break;
85567 @@ -5457,7 +5459,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
85568
85569 #endif
85570
85571 -static struct security_operations selinux_ops = {
85572 +static struct security_operations selinux_ops __read_only = {
85573 .name = "selinux",
85574
85575 .ptrace_access_check = selinux_ptrace_access_check,
85576 @@ -5841,7 +5843,9 @@ int selinux_disable(void)
85577 avc_disable();
85578
85579 /* Reset security_ops to the secondary module, dummy or capability. */
85580 + pax_open_kernel();
85581 security_ops = secondary_ops;
85582 + pax_close_kernel();
85583
85584 /* Unregister netfilter hooks. */
85585 selinux_nf_ip_exit();
85586 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
85587 index 13128f9..c23c736 100644
85588 --- a/security/selinux/include/xfrm.h
85589 +++ b/security/selinux/include/xfrm.h
85590 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
85591
85592 static inline void selinux_xfrm_notify_policyload(void)
85593 {
85594 - atomic_inc(&flow_cache_genid);
85595 + atomic_inc_unchecked(&flow_cache_genid);
85596 }
85597 #else
85598 static inline int selinux_xfrm_enabled(void)
85599 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
85600 index ff17820..d68084c 100644
85601 --- a/security/selinux/ss/services.c
85602 +++ b/security/selinux/ss/services.c
85603 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, size_t len)
85604 int rc = 0;
85605 struct policy_file file = { data, len }, *fp = &file;
85606
85607 + pax_track_stack();
85608 +
85609 if (!ss_initialized) {
85610 avtab_cache_init();
85611 if (policydb_read(&policydb, fp)) {
85612 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
85613 index c33b6bb..b51f19e 100644
85614 --- a/security/smack/smack_lsm.c
85615 +++ b/security/smack/smack_lsm.c
85616 @@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
85617 *
85618 * Returns 0 on success, error code otherwise.
85619 */
85620 -static int smack_syslog(int type)
85621 +static int smack_syslog(int type, bool from_file)
85622 {
85623 int rc;
85624 char *sp = current_security();
85625
85626 - rc = cap_syslog(type);
85627 + rc = cap_syslog(type, from_file);
85628 if (rc != 0)
85629 return rc;
85630
85631 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
85632 return 0;
85633 }
85634
85635 -struct security_operations smack_ops = {
85636 +struct security_operations smack_ops __read_only = {
85637 .name = "smack",
85638
85639 .ptrace_access_check = smack_ptrace_access_check,
85640 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
85641 index 9548a09..9a5f384 100644
85642 --- a/security/tomoyo/tomoyo.c
85643 +++ b/security/tomoyo/tomoyo.c
85644 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
85645 * tomoyo_security_ops is a "struct security_operations" which is used for
85646 * registering TOMOYO.
85647 */
85648 -static struct security_operations tomoyo_security_ops = {
85649 +static struct security_operations tomoyo_security_ops __read_only = {
85650 .name = "tomoyo",
85651 .cred_alloc_blank = tomoyo_cred_alloc_blank,
85652 .cred_prepare = tomoyo_cred_prepare,
85653 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
85654 index 84bb07d..c2ab6b6 100644
85655 --- a/sound/aoa/codecs/onyx.c
85656 +++ b/sound/aoa/codecs/onyx.c
85657 @@ -53,7 +53,7 @@ struct onyx {
85658 spdif_locked:1,
85659 analog_locked:1,
85660 original_mute:2;
85661 - int open_count;
85662 + local_t open_count;
85663 struct codec_info *codec_info;
85664
85665 /* mutex serializes concurrent access to the device
85666 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_item *cii,
85667 struct onyx *onyx = cii->codec_data;
85668
85669 mutex_lock(&onyx->mutex);
85670 - onyx->open_count++;
85671 + local_inc(&onyx->open_count);
85672 mutex_unlock(&onyx->mutex);
85673
85674 return 0;
85675 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_item *cii,
85676 struct onyx *onyx = cii->codec_data;
85677
85678 mutex_lock(&onyx->mutex);
85679 - onyx->open_count--;
85680 - if (!onyx->open_count)
85681 + if (local_dec_and_test(&onyx->open_count))
85682 onyx->spdif_locked = onyx->analog_locked = 0;
85683 mutex_unlock(&onyx->mutex);
85684
85685 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
85686 index ffd2025..df062c9 100644
85687 --- a/sound/aoa/codecs/onyx.h
85688 +++ b/sound/aoa/codecs/onyx.h
85689 @@ -11,6 +11,7 @@
85690 #include <linux/i2c.h>
85691 #include <asm/pmac_low_i2c.h>
85692 #include <asm/prom.h>
85693 +#include <asm/local.h>
85694
85695 /* PCM3052 register definitions */
85696
85697 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
85698 index d9c9635..bc0a5a2 100644
85699 --- a/sound/core/oss/pcm_oss.c
85700 +++ b/sound/core/oss/pcm_oss.c
85701 @@ -1395,7 +1395,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
85702 }
85703 } else {
85704 tmp = snd_pcm_oss_write2(substream,
85705 - (const char __force *)buf,
85706 + (const char __force_kernel *)buf,
85707 runtime->oss.period_bytes, 0);
85708 if (tmp <= 0)
85709 goto err;
85710 @@ -1483,7 +1483,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
85711 xfer += tmp;
85712 runtime->oss.buffer_used -= tmp;
85713 } else {
85714 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
85715 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
85716 runtime->oss.period_bytes, 0);
85717 if (tmp <= 0)
85718 goto err;
85719 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
85720 index 038232d..7dd9e5c 100644
85721 --- a/sound/core/pcm_compat.c
85722 +++ b/sound/core/pcm_compat.c
85723 @@ -30,7 +30,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
85724 int err;
85725
85726 fs = snd_enter_user();
85727 - err = snd_pcm_delay(substream, &delay);
85728 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
85729 snd_leave_user(fs);
85730 if (err < 0)
85731 return err;
85732 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
85733 index e6d2d97..4843949 100644
85734 --- a/sound/core/pcm_native.c
85735 +++ b/sound/core/pcm_native.c
85736 @@ -2747,11 +2747,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
85737 switch (substream->stream) {
85738 case SNDRV_PCM_STREAM_PLAYBACK:
85739 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
85740 - (void __user *)arg);
85741 + (void __force_user *)arg);
85742 break;
85743 case SNDRV_PCM_STREAM_CAPTURE:
85744 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
85745 - (void __user *)arg);
85746 + (void __force_user *)arg);
85747 break;
85748 default:
85749 result = -EINVAL;
85750 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
85751 index 1f99767..14636533 100644
85752 --- a/sound/core/seq/seq_device.c
85753 +++ b/sound/core/seq/seq_device.c
85754 @@ -63,7 +63,7 @@ struct ops_list {
85755 int argsize; /* argument size */
85756
85757 /* operators */
85758 - struct snd_seq_dev_ops ops;
85759 + struct snd_seq_dev_ops *ops;
85760
85761 /* registred devices */
85762 struct list_head dev_list; /* list of devices */
85763 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
85764
85765 mutex_lock(&ops->reg_mutex);
85766 /* copy driver operators */
85767 - ops->ops = *entry;
85768 + ops->ops = entry;
85769 ops->driver |= DRIVER_LOADED;
85770 ops->argsize = argsize;
85771
85772 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
85773 dev->name, ops->id, ops->argsize, dev->argsize);
85774 return -EINVAL;
85775 }
85776 - if (ops->ops.init_device(dev) >= 0) {
85777 + if (ops->ops->init_device(dev) >= 0) {
85778 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
85779 ops->num_init_devices++;
85780 } else {
85781 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
85782 dev->name, ops->id, ops->argsize, dev->argsize);
85783 return -EINVAL;
85784 }
85785 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
85786 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
85787 dev->status = SNDRV_SEQ_DEVICE_FREE;
85788 dev->driver_data = NULL;
85789 ops->num_init_devices--;
85790 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
85791 index 9284829..ac8e8b2 100644
85792 --- a/sound/drivers/mts64.c
85793 +++ b/sound/drivers/mts64.c
85794 @@ -27,6 +27,7 @@
85795 #include <sound/initval.h>
85796 #include <sound/rawmidi.h>
85797 #include <sound/control.h>
85798 +#include <asm/local.h>
85799
85800 #define CARD_NAME "Miditerminal 4140"
85801 #define DRIVER_NAME "MTS64"
85802 @@ -65,7 +66,7 @@ struct mts64 {
85803 struct pardevice *pardev;
85804 int pardev_claimed;
85805
85806 - int open_count;
85807 + local_t open_count;
85808 int current_midi_output_port;
85809 int current_midi_input_port;
85810 u8 mode[MTS64_NUM_INPUT_PORTS];
85811 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
85812 {
85813 struct mts64 *mts = substream->rmidi->private_data;
85814
85815 - if (mts->open_count == 0) {
85816 + if (local_read(&mts->open_count) == 0) {
85817 /* We don't need a spinlock here, because this is just called
85818 if the device has not been opened before.
85819 So there aren't any IRQs from the device */
85820 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
85821
85822 msleep(50);
85823 }
85824 - ++(mts->open_count);
85825 + local_inc(&mts->open_count);
85826
85827 return 0;
85828 }
85829 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
85830 struct mts64 *mts = substream->rmidi->private_data;
85831 unsigned long flags;
85832
85833 - --(mts->open_count);
85834 - if (mts->open_count == 0) {
85835 + if (local_dec_return(&mts->open_count) == 0) {
85836 /* We need the spinlock_irqsave here because we can still
85837 have IRQs at this point */
85838 spin_lock_irqsave(&mts->lock, flags);
85839 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
85840
85841 msleep(500);
85842
85843 - } else if (mts->open_count < 0)
85844 - mts->open_count = 0;
85845 + } else if (local_read(&mts->open_count) < 0)
85846 + local_set(&mts->open_count, 0);
85847
85848 return 0;
85849 }
85850 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
85851 index 01997f2..cbc1195 100644
85852 --- a/sound/drivers/opl4/opl4_lib.c
85853 +++ b/sound/drivers/opl4/opl4_lib.c
85854 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
85855 MODULE_DESCRIPTION("OPL4 driver");
85856 MODULE_LICENSE("GPL");
85857
85858 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
85859 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
85860 {
85861 int timeout = 10;
85862 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
85863 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
85864 index 60158e2..0a0cc1a 100644
85865 --- a/sound/drivers/portman2x4.c
85866 +++ b/sound/drivers/portman2x4.c
85867 @@ -46,6 +46,7 @@
85868 #include <sound/initval.h>
85869 #include <sound/rawmidi.h>
85870 #include <sound/control.h>
85871 +#include <asm/local.h>
85872
85873 #define CARD_NAME "Portman 2x4"
85874 #define DRIVER_NAME "portman"
85875 @@ -83,7 +84,7 @@ struct portman {
85876 struct pardevice *pardev;
85877 int pardev_claimed;
85878
85879 - int open_count;
85880 + local_t open_count;
85881 int mode[PORTMAN_NUM_INPUT_PORTS];
85882 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
85883 };
85884 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
85885 index 02f79d2..8691d43 100644
85886 --- a/sound/isa/cmi8330.c
85887 +++ b/sound/isa/cmi8330.c
85888 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
85889
85890 struct snd_pcm *pcm;
85891 struct snd_cmi8330_stream {
85892 - struct snd_pcm_ops ops;
85893 + snd_pcm_ops_no_const ops;
85894 snd_pcm_open_callback_t open;
85895 void *private_data; /* sb or wss */
85896 } streams[2];
85897 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
85898 index 733b014..56ce96f 100644
85899 --- a/sound/oss/sb_audio.c
85900 +++ b/sound/oss/sb_audio.c
85901 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
85902 buf16 = (signed short *)(localbuf + localoffs);
85903 while (c)
85904 {
85905 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
85906 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
85907 if (copy_from_user(lbuf8,
85908 userbuf+useroffs + p,
85909 locallen))
85910 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
85911 index 3136c88..28ad950 100644
85912 --- a/sound/oss/swarm_cs4297a.c
85913 +++ b/sound/oss/swarm_cs4297a.c
85914 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
85915 {
85916 struct cs4297a_state *s;
85917 u32 pwr, id;
85918 - mm_segment_t fs;
85919 int rval;
85920 #ifndef CONFIG_BCM_CS4297A_CSWARM
85921 u64 cfg;
85922 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
85923 if (!rval) {
85924 char *sb1250_duart_present;
85925
85926 +#if 0
85927 + mm_segment_t fs;
85928 fs = get_fs();
85929 set_fs(KERNEL_DS);
85930 -#if 0
85931 val = SOUND_MASK_LINE;
85932 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
85933 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
85934 val = initvol[i].vol;
85935 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
85936 }
85937 + set_fs(fs);
85938 // cs4297a_write_ac97(s, 0x18, 0x0808);
85939 #else
85940 // cs4297a_write_ac97(s, 0x5e, 0x180);
85941 cs4297a_write_ac97(s, 0x02, 0x0808);
85942 cs4297a_write_ac97(s, 0x18, 0x0808);
85943 #endif
85944 - set_fs(fs);
85945
85946 list_add(&s->list, &cs4297a_devs);
85947
85948 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
85949 index 78288db..0406809 100644
85950 --- a/sound/pci/ac97/ac97_codec.c
85951 +++ b/sound/pci/ac97/ac97_codec.c
85952 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
85953 }
85954
85955 /* build_ops to do nothing */
85956 -static struct snd_ac97_build_ops null_build_ops;
85957 +static const struct snd_ac97_build_ops null_build_ops;
85958
85959 #ifdef CONFIG_SND_AC97_POWER_SAVE
85960 static void do_update_power(struct work_struct *work)
85961 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
85962 index eeb2e23..82bf625 100644
85963 --- a/sound/pci/ac97/ac97_patch.c
85964 +++ b/sound/pci/ac97/ac97_patch.c
85965 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97)
85966 return 0;
85967 }
85968
85969 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
85970 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
85971 .build_spdif = patch_yamaha_ymf743_build_spdif,
85972 .build_3d = patch_yamaha_ymf7x3_3d,
85973 };
85974 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
85975 return 0;
85976 }
85977
85978 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
85979 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
85980 .build_3d = patch_yamaha_ymf7x3_3d,
85981 .build_post_spdif = patch_yamaha_ymf753_post_spdif
85982 };
85983 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
85984 return 0;
85985 }
85986
85987 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
85988 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
85989 .build_specific = patch_wolfson_wm9703_specific,
85990 };
85991
85992 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
85993 return 0;
85994 }
85995
85996 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
85997 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
85998 .build_specific = patch_wolfson_wm9704_specific,
85999 };
86000
86001 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97)
86002 return 0;
86003 }
86004
86005 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
86006 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
86007 .build_specific = patch_wolfson_wm9705_specific,
86008 };
86009
86010 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
86011 return 0;
86012 }
86013
86014 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
86015 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
86016 .build_specific = patch_wolfson_wm9711_specific,
86017 };
86018
86019 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97)
86020 }
86021 #endif
86022
86023 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
86024 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
86025 .build_specific = patch_wolfson_wm9713_specific,
86026 .build_3d = patch_wolfson_wm9713_3d,
86027 #ifdef CONFIG_PM
86028 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
86029 return 0;
86030 }
86031
86032 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
86033 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
86034 .build_3d = patch_sigmatel_stac9700_3d,
86035 .build_specific = patch_sigmatel_stac97xx_specific
86036 };
86037 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
86038 return patch_sigmatel_stac97xx_specific(ac97);
86039 }
86040
86041 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
86042 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
86043 .build_3d = patch_sigmatel_stac9708_3d,
86044 .build_specific = patch_sigmatel_stac9708_specific
86045 };
86046 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97)
86047 return 0;
86048 }
86049
86050 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
86051 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
86052 .build_3d = patch_sigmatel_stac9700_3d,
86053 .build_specific = patch_sigmatel_stac9758_specific
86054 };
86055 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
86056 return 0;
86057 }
86058
86059 -static struct snd_ac97_build_ops patch_cirrus_ops = {
86060 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
86061 .build_spdif = patch_cirrus_build_spdif
86062 };
86063
86064 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
86065 return 0;
86066 }
86067
86068 -static struct snd_ac97_build_ops patch_conexant_ops = {
86069 +static const struct snd_ac97_build_ops patch_conexant_ops = {
86070 .build_spdif = patch_conexant_build_spdif
86071 };
86072
86073 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int
86074 }
86075 }
86076
86077 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
86078 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
86079 #ifdef CONFIG_PM
86080 .resume = ad18xx_resume
86081 #endif
86082 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97)
86083 return 0;
86084 }
86085
86086 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
86087 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
86088 .build_specific = &patch_ad1885_specific,
86089 #ifdef CONFIG_PM
86090 .resume = ad18xx_resume
86091 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct snd_ac97 * ac97)
86092 return 0;
86093 }
86094
86095 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
86096 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
86097 .build_specific = &patch_ad1886_specific,
86098 #ifdef CONFIG_PM
86099 .resume = ad18xx_resume
86100 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct snd_ac97 * ac97)
86101 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
86102 }
86103
86104 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
86105 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
86106 .build_post_spdif = patch_ad198x_post_spdif,
86107 .build_specific = patch_ad1981a_specific,
86108 #ifdef CONFIG_PM
86109 @@ -1952,7 +1952,7 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97)
86110 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
86111 }
86112
86113 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
86114 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
86115 .build_post_spdif = patch_ad198x_post_spdif,
86116 .build_specific = patch_ad1981b_specific,
86117 #ifdef CONFIG_PM
86118 @@ -2091,7 +2091,7 @@ static int patch_ad1888_specific(struct snd_ac97 *ac97)
86119 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
86120 }
86121
86122 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
86123 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
86124 .build_post_spdif = patch_ad198x_post_spdif,
86125 .build_specific = patch_ad1888_specific,
86126 #ifdef CONFIG_PM
86127 @@ -2140,7 +2140,7 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97)
86128 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
86129 }
86130
86131 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
86132 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
86133 .build_post_spdif = patch_ad198x_post_spdif,
86134 .build_specific = patch_ad1980_specific,
86135 #ifdef CONFIG_PM
86136 @@ -2255,7 +2255,7 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97)
86137 ARRAY_SIZE(snd_ac97_ad1985_controls));
86138 }
86139
86140 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
86141 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
86142 .build_post_spdif = patch_ad198x_post_spdif,
86143 .build_specific = patch_ad1985_specific,
86144 #ifdef CONFIG_PM
86145 @@ -2547,7 +2547,7 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97)
86146 ARRAY_SIZE(snd_ac97_ad1985_controls));
86147 }
86148
86149 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
86150 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
86151 .build_post_spdif = patch_ad198x_post_spdif,
86152 .build_specific = patch_ad1986_specific,
86153 #ifdef CONFIG_PM
86154 @@ -2652,7 +2652,7 @@ static int patch_alc650_specific(struct snd_ac97 * ac97)
86155 return 0;
86156 }
86157
86158 -static struct snd_ac97_build_ops patch_alc650_ops = {
86159 +static const struct snd_ac97_build_ops patch_alc650_ops = {
86160 .build_specific = patch_alc650_specific,
86161 .update_jacks = alc650_update_jacks
86162 };
86163 @@ -2804,7 +2804,7 @@ static int patch_alc655_specific(struct snd_ac97 * ac97)
86164 return 0;
86165 }
86166
86167 -static struct snd_ac97_build_ops patch_alc655_ops = {
86168 +static const struct snd_ac97_build_ops patch_alc655_ops = {
86169 .build_specific = patch_alc655_specific,
86170 .update_jacks = alc655_update_jacks
86171 };
86172 @@ -2916,7 +2916,7 @@ static int patch_alc850_specific(struct snd_ac97 *ac97)
86173 return 0;
86174 }
86175
86176 -static struct snd_ac97_build_ops patch_alc850_ops = {
86177 +static const struct snd_ac97_build_ops patch_alc850_ops = {
86178 .build_specific = patch_alc850_specific,
86179 .update_jacks = alc850_update_jacks
86180 };
86181 @@ -2978,7 +2978,7 @@ static int patch_cm9738_specific(struct snd_ac97 * ac97)
86182 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
86183 }
86184
86185 -static struct snd_ac97_build_ops patch_cm9738_ops = {
86186 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
86187 .build_specific = patch_cm9738_specific,
86188 .update_jacks = cm9738_update_jacks
86189 };
86190 @@ -3069,7 +3069,7 @@ static int patch_cm9739_post_spdif(struct snd_ac97 * ac97)
86191 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
86192 }
86193
86194 -static struct snd_ac97_build_ops patch_cm9739_ops = {
86195 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
86196 .build_specific = patch_cm9739_specific,
86197 .build_post_spdif = patch_cm9739_post_spdif,
86198 .update_jacks = cm9739_update_jacks
86199 @@ -3243,7 +3243,7 @@ static int patch_cm9761_specific(struct snd_ac97 * ac97)
86200 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
86201 }
86202
86203 -static struct snd_ac97_build_ops patch_cm9761_ops = {
86204 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
86205 .build_specific = patch_cm9761_specific,
86206 .build_post_spdif = patch_cm9761_post_spdif,
86207 .update_jacks = cm9761_update_jacks
86208 @@ -3339,7 +3339,7 @@ static int patch_cm9780_specific(struct snd_ac97 *ac97)
86209 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
86210 }
86211
86212 -static struct snd_ac97_build_ops patch_cm9780_ops = {
86213 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
86214 .build_specific = patch_cm9780_specific,
86215 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
86216 };
86217 @@ -3459,7 +3459,7 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97)
86218 return 0;
86219 }
86220
86221 -static struct snd_ac97_build_ops patch_vt1616_ops = {
86222 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
86223 .build_specific = patch_vt1616_specific
86224 };
86225
86226 @@ -3813,7 +3813,7 @@ static int patch_it2646_specific(struct snd_ac97 * ac97)
86227 return 0;
86228 }
86229
86230 -static struct snd_ac97_build_ops patch_it2646_ops = {
86231 +static const struct snd_ac97_build_ops patch_it2646_ops = {
86232 .build_specific = patch_it2646_specific,
86233 .update_jacks = it2646_update_jacks
86234 };
86235 @@ -3847,7 +3847,7 @@ static int patch_si3036_specific(struct snd_ac97 * ac97)
86236 return 0;
86237 }
86238
86239 -static struct snd_ac97_build_ops patch_si3036_ops = {
86240 +static const struct snd_ac97_build_ops patch_si3036_ops = {
86241 .build_specific = patch_si3036_specific,
86242 };
86243
86244 @@ -3914,7 +3914,7 @@ static int patch_ucb1400_specific(struct snd_ac97 * ac97)
86245 return 0;
86246 }
86247
86248 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
86249 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
86250 .build_specific = patch_ucb1400_specific,
86251 };
86252
86253 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
86254 index 99552fb..4dcc2c5 100644
86255 --- a/sound/pci/hda/hda_codec.h
86256 +++ b/sound/pci/hda/hda_codec.h
86257 @@ -580,7 +580,7 @@ struct hda_bus_ops {
86258 /* notify power-up/down from codec to controller */
86259 void (*pm_notify)(struct hda_bus *bus);
86260 #endif
86261 -};
86262 +} __no_const;
86263
86264 /* template to pass to the bus constructor */
86265 struct hda_bus_template {
86266 @@ -675,6 +675,7 @@ struct hda_codec_ops {
86267 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
86268 #endif
86269 };
86270 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
86271
86272 /* record for amp information cache */
86273 struct hda_cache_head {
86274 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
86275 struct snd_pcm_substream *substream);
86276 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
86277 struct snd_pcm_substream *substream);
86278 -};
86279 +} __no_const;
86280
86281 /* PCM information for each substream */
86282 struct hda_pcm_stream {
86283 @@ -760,7 +761,7 @@ struct hda_codec {
86284 const char *modelname; /* model name for preset */
86285
86286 /* set by patch */
86287 - struct hda_codec_ops patch_ops;
86288 + hda_codec_ops_no_const patch_ops;
86289
86290 /* PCM to create, set by patch_ops.build_pcms callback */
86291 unsigned int num_pcms;
86292 diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
86293 index fb684f0..2b11cea 100644
86294 --- a/sound/pci/hda/patch_atihdmi.c
86295 +++ b/sound/pci/hda/patch_atihdmi.c
86296 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_codec *codec)
86297 */
86298 spec->multiout.dig_out_nid = CVT_NID;
86299
86300 - codec->patch_ops = atihdmi_patch_ops;
86301 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
86302
86303 return 0;
86304 }
86305 diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
86306 index 7c23016..c5bfdd7 100644
86307 --- a/sound/pci/hda/patch_intelhdmi.c
86308 +++ b/sound/pci/hda/patch_intelhdmi.c
86309 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
86310 cp_ready);
86311
86312 /* TODO */
86313 - if (cp_state)
86314 - ;
86315 - if (cp_ready)
86316 - ;
86317 + if (cp_state) {
86318 + }
86319 + if (cp_ready) {
86320 + }
86321 }
86322
86323
86324 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hda_codec *codec)
86325 spec->multiout.dig_out_nid = cvt_nid;
86326
86327 codec->spec = spec;
86328 - codec->patch_ops = intel_hdmi_patch_ops;
86329 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
86330
86331 snd_hda_eld_proc_new(codec, &spec->sink_eld);
86332
86333 diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
86334 index 6afdab0..68ed352 100644
86335 --- a/sound/pci/hda/patch_nvhdmi.c
86336 +++ b/sound/pci/hda/patch_nvhdmi.c
86337 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_codec *codec)
86338 spec->multiout.max_channels = 8;
86339 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
86340
86341 - codec->patch_ops = nvhdmi_patch_ops_8ch;
86342 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
86343
86344 return 0;
86345 }
86346 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
86347 spec->multiout.max_channels = 2;
86348 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
86349
86350 - codec->patch_ops = nvhdmi_patch_ops_2ch;
86351 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
86352
86353 return 0;
86354 }
86355 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
86356 index 2fcd70d..a143eaf 100644
86357 --- a/sound/pci/hda/patch_sigmatel.c
86358 +++ b/sound/pci/hda/patch_sigmatel.c
86359 @@ -5220,7 +5220,7 @@ again:
86360 snd_hda_codec_write_cache(codec, nid, 0,
86361 AC_VERB_SET_CONNECT_SEL, num_dacs);
86362
86363 - codec->patch_ops = stac92xx_patch_ops;
86364 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
86365
86366 codec->proc_widget_hook = stac92hd_proc_hook;
86367
86368 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
86369 return -ENOMEM;
86370
86371 codec->spec = spec;
86372 - codec->patch_ops = stac92xx_patch_ops;
86373 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
86374 spec->num_pins = STAC92HD71BXX_NUM_PINS;
86375 switch (codec->vendor_id) {
86376 case 0x111d76b6:
86377 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
86378 index d063149..01599a4 100644
86379 --- a/sound/pci/ice1712/ice1712.h
86380 +++ b/sound/pci/ice1712/ice1712.h
86381 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
86382 unsigned int mask_flags; /* total mask bits */
86383 struct snd_akm4xxx_ops {
86384 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
86385 - } ops;
86386 + } __no_const ops;
86387 };
86388
86389 struct snd_ice1712_spdif {
86390 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
86391 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
86392 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
86393 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
86394 - } ops;
86395 + } __no_const ops;
86396 };
86397
86398
86399 diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
86400 index 9e7d12e..3e3bc64 100644
86401 --- a/sound/pci/intel8x0m.c
86402 +++ b/sound/pci/intel8x0m.c
86403 @@ -1264,7 +1264,7 @@ static struct shortname_table {
86404 { 0x5455, "ALi M5455" },
86405 { 0x746d, "AMD AMD8111" },
86406 #endif
86407 - { 0 },
86408 + { 0, },
86409 };
86410
86411 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
86412 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
86413 index 5518371..45cf7ac 100644
86414 --- a/sound/pci/ymfpci/ymfpci_main.c
86415 +++ b/sound/pci/ymfpci/ymfpci_main.c
86416 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
86417 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
86418 break;
86419 }
86420 - if (atomic_read(&chip->interrupt_sleep_count)) {
86421 - atomic_set(&chip->interrupt_sleep_count, 0);
86422 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
86423 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
86424 wake_up(&chip->interrupt_sleep);
86425 }
86426 __end:
86427 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
86428 continue;
86429 init_waitqueue_entry(&wait, current);
86430 add_wait_queue(&chip->interrupt_sleep, &wait);
86431 - atomic_inc(&chip->interrupt_sleep_count);
86432 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
86433 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
86434 remove_wait_queue(&chip->interrupt_sleep, &wait);
86435 }
86436 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
86437 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
86438 spin_unlock(&chip->reg_lock);
86439
86440 - if (atomic_read(&chip->interrupt_sleep_count)) {
86441 - atomic_set(&chip->interrupt_sleep_count, 0);
86442 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
86443 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
86444 wake_up(&chip->interrupt_sleep);
86445 }
86446 }
86447 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
86448 spin_lock_init(&chip->reg_lock);
86449 spin_lock_init(&chip->voice_lock);
86450 init_waitqueue_head(&chip->interrupt_sleep);
86451 - atomic_set(&chip->interrupt_sleep_count, 0);
86452 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
86453 chip->card = card;
86454 chip->pci = pci;
86455 chip->irq = -1;
86456 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
86457 index 0a1b2f6..776bb19 100644
86458 --- a/sound/soc/soc-core.c
86459 +++ b/sound/soc/soc-core.c
86460 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
86461 }
86462
86463 /* ASoC PCM operations */
86464 -static struct snd_pcm_ops soc_pcm_ops = {
86465 +static snd_pcm_ops_no_const soc_pcm_ops = {
86466 .open = soc_pcm_open,
86467 .close = soc_codec_close,
86468 .hw_params = soc_pcm_hw_params,
86469 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
86470 index 79633ea..9732e90 100644
86471 --- a/sound/usb/usbaudio.c
86472 +++ b/sound/usb/usbaudio.c
86473 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream,
86474 switch (cmd) {
86475 case SNDRV_PCM_TRIGGER_START:
86476 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
86477 - subs->ops.prepare = prepare_playback_urb;
86478 + *(void **)&subs->ops.prepare = prepare_playback_urb;
86479 return 0;
86480 case SNDRV_PCM_TRIGGER_STOP:
86481 return deactivate_urbs(subs, 0, 0);
86482 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
86483 - subs->ops.prepare = prepare_nodata_playback_urb;
86484 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
86485 return 0;
86486 default:
86487 return -EINVAL;
86488 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream,
86489
86490 switch (cmd) {
86491 case SNDRV_PCM_TRIGGER_START:
86492 - subs->ops.retire = retire_capture_urb;
86493 + *(void **)&subs->ops.retire = retire_capture_urb;
86494 return start_urbs(subs, substream->runtime);
86495 case SNDRV_PCM_TRIGGER_STOP:
86496 return deactivate_urbs(subs, 0, 0);
86497 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
86498 - subs->ops.retire = retire_paused_capture_urb;
86499 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
86500 return 0;
86501 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
86502 - subs->ops.retire = retire_capture_urb;
86503 + *(void **)&subs->ops.retire = retire_capture_urb;
86504 return 0;
86505 default:
86506 return -EINVAL;
86507 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
86508 /* for playback, submit the URBs now; otherwise, the first hwptr_done
86509 * updates for all URBs would happen at the same time when starting */
86510 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
86511 - subs->ops.prepare = prepare_nodata_playback_urb;
86512 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
86513 return start_urbs(subs, runtime);
86514 } else
86515 return 0;
86516 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
86517 subs->direction = stream;
86518 subs->dev = as->chip->dev;
86519 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
86520 - subs->ops = audio_urb_ops[stream];
86521 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
86522 } else {
86523 - subs->ops = audio_urb_ops_high_speed[stream];
86524 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
86525 switch (as->chip->usb_id) {
86526 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
86527 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
86528 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
86529 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
86530 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
86531 break;
86532 }
86533 }
86534 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
86535 new file mode 100644
86536 index 0000000..b044b80
86537 --- /dev/null
86538 +++ b/tools/gcc/Makefile
86539 @@ -0,0 +1,21 @@
86540 +#CC := gcc
86541 +#PLUGIN_SOURCE_FILES := pax_plugin.c
86542 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
86543 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
86544 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
86545 +
86546 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
86547 +
86548 +hostlibs-y := constify_plugin.so
86549 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
86550 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
86551 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
86552 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
86553 +
86554 +always := $(hostlibs-y)
86555 +
86556 +constify_plugin-objs := constify_plugin.o
86557 +stackleak_plugin-objs := stackleak_plugin.o
86558 +kallocstat_plugin-objs := kallocstat_plugin.o
86559 +kernexec_plugin-objs := kernexec_plugin.o
86560 +checker_plugin-objs := checker_plugin.o
86561 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
86562 new file mode 100644
86563 index 0000000..d41b5af
86564 --- /dev/null
86565 +++ b/tools/gcc/checker_plugin.c
86566 @@ -0,0 +1,171 @@
86567 +/*
86568 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
86569 + * Licensed under the GPL v2
86570 + *
86571 + * Note: the choice of the license means that the compilation process is
86572 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86573 + * but for the kernel it doesn't matter since it doesn't link against
86574 + * any of the gcc libraries
86575 + *
86576 + * gcc plugin to implement various sparse (source code checker) features
86577 + *
86578 + * TODO:
86579 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
86580 + *
86581 + * BUGS:
86582 + * - none known
86583 + */
86584 +#include "gcc-plugin.h"
86585 +#include "config.h"
86586 +#include "system.h"
86587 +#include "coretypes.h"
86588 +#include "tree.h"
86589 +#include "tree-pass.h"
86590 +#include "flags.h"
86591 +#include "intl.h"
86592 +#include "toplev.h"
86593 +#include "plugin.h"
86594 +//#include "expr.h" where are you...
86595 +#include "diagnostic.h"
86596 +#include "plugin-version.h"
86597 +#include "tm.h"
86598 +#include "function.h"
86599 +#include "basic-block.h"
86600 +#include "gimple.h"
86601 +#include "rtl.h"
86602 +#include "emit-rtl.h"
86603 +#include "tree-flow.h"
86604 +#include "target.h"
86605 +
86606 +extern void c_register_addr_space (const char *str, addr_space_t as);
86607 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
86608 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
86609 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
86610 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
86611 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
86612 +
86613 +extern void print_gimple_stmt(FILE *, gimple, int, int);
86614 +extern rtx emit_move_insn(rtx x, rtx y);
86615 +
86616 +int plugin_is_GPL_compatible;
86617 +
86618 +static struct plugin_info checker_plugin_info = {
86619 + .version = "201111150100",
86620 +};
86621 +
86622 +#define ADDR_SPACE_KERNEL 0
86623 +#define ADDR_SPACE_FORCE_KERNEL 1
86624 +#define ADDR_SPACE_USER 2
86625 +#define ADDR_SPACE_FORCE_USER 3
86626 +#define ADDR_SPACE_IOMEM 0
86627 +#define ADDR_SPACE_FORCE_IOMEM 0
86628 +#define ADDR_SPACE_PERCPU 0
86629 +#define ADDR_SPACE_FORCE_PERCPU 0
86630 +#define ADDR_SPACE_RCU 0
86631 +#define ADDR_SPACE_FORCE_RCU 0
86632 +
86633 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
86634 +{
86635 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
86636 +}
86637 +
86638 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
86639 +{
86640 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
86641 +}
86642 +
86643 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
86644 +{
86645 + return default_addr_space_valid_pointer_mode(mode, as);
86646 +}
86647 +
86648 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
86649 +{
86650 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
86651 +}
86652 +
86653 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
86654 +{
86655 + return default_addr_space_legitimize_address(x, oldx, mode, as);
86656 +}
86657 +
86658 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
86659 +{
86660 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
86661 + return true;
86662 +
86663 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
86664 + return true;
86665 +
86666 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
86667 + return true;
86668 +
86669 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
86670 + return true;
86671 +
86672 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
86673 + return true;
86674 +
86675 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
86676 + return true;
86677 +
86678 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
86679 + return true;
86680 +
86681 + return subset == superset;
86682 +}
86683 +
86684 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
86685 +{
86686 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
86687 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
86688 +
86689 + return op;
86690 +}
86691 +
86692 +static void register_checker_address_spaces(void *event_data, void *data)
86693 +{
86694 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
86695 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
86696 + c_register_addr_space("__user", ADDR_SPACE_USER);
86697 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
86698 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
86699 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
86700 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
86701 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
86702 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
86703 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
86704 +
86705 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
86706 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
86707 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
86708 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
86709 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
86710 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
86711 + targetm.addr_space.convert = checker_addr_space_convert;
86712 +}
86713 +
86714 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86715 +{
86716 + const char * const plugin_name = plugin_info->base_name;
86717 + const int argc = plugin_info->argc;
86718 + const struct plugin_argument * const argv = plugin_info->argv;
86719 + int i;
86720 +
86721 + if (!plugin_default_version_check(version, &gcc_version)) {
86722 + error(G_("incompatible gcc/plugin versions"));
86723 + return 1;
86724 + }
86725 +
86726 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
86727 +
86728 + for (i = 0; i < argc; ++i)
86729 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86730 +
86731 + if (TARGET_64BIT == 0)
86732 + return 0;
86733 +
86734 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
86735 +
86736 + return 0;
86737 +}
86738 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
86739 new file mode 100644
86740 index 0000000..704a564
86741 --- /dev/null
86742 +++ b/tools/gcc/constify_plugin.c
86743 @@ -0,0 +1,303 @@
86744 +/*
86745 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
86746 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
86747 + * Licensed under the GPL v2, or (at your option) v3
86748 + *
86749 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
86750 + *
86751 + * Homepage:
86752 + * http://www.grsecurity.net/~ephox/const_plugin/
86753 + *
86754 + * Usage:
86755 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
86756 + * $ gcc -fplugin=constify_plugin.so test.c -O2
86757 + */
86758 +
86759 +#include "gcc-plugin.h"
86760 +#include "config.h"
86761 +#include "system.h"
86762 +#include "coretypes.h"
86763 +#include "tree.h"
86764 +#include "tree-pass.h"
86765 +#include "flags.h"
86766 +#include "intl.h"
86767 +#include "toplev.h"
86768 +#include "plugin.h"
86769 +#include "diagnostic.h"
86770 +#include "plugin-version.h"
86771 +#include "tm.h"
86772 +#include "function.h"
86773 +#include "basic-block.h"
86774 +#include "gimple.h"
86775 +#include "rtl.h"
86776 +#include "emit-rtl.h"
86777 +#include "tree-flow.h"
86778 +
86779 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
86780 +
86781 +int plugin_is_GPL_compatible;
86782 +
86783 +static struct plugin_info const_plugin_info = {
86784 + .version = "201111150100",
86785 + .help = "no-constify\tturn off constification\n",
86786 +};
86787 +
86788 +static void constify_type(tree type);
86789 +static bool walk_struct(tree node);
86790 +
86791 +static tree deconstify_type(tree old_type)
86792 +{
86793 + tree new_type, field;
86794 +
86795 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
86796 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
86797 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
86798 + DECL_FIELD_CONTEXT(field) = new_type;
86799 + TYPE_READONLY(new_type) = 0;
86800 + C_TYPE_FIELDS_READONLY(new_type) = 0;
86801 + return new_type;
86802 +}
86803 +
86804 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
86805 +{
86806 + tree type;
86807 +
86808 + *no_add_attrs = true;
86809 + if (TREE_CODE(*node) == FUNCTION_DECL) {
86810 + error("%qE attribute does not apply to functions", name);
86811 + return NULL_TREE;
86812 + }
86813 +
86814 + if (TREE_CODE(*node) == VAR_DECL) {
86815 + error("%qE attribute does not apply to variables", name);
86816 + return NULL_TREE;
86817 + }
86818 +
86819 + if (TYPE_P(*node)) {
86820 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
86821 + *no_add_attrs = false;
86822 + else
86823 + error("%qE attribute applies to struct and union types only", name);
86824 + return NULL_TREE;
86825 + }
86826 +
86827 + type = TREE_TYPE(*node);
86828 +
86829 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
86830 + error("%qE attribute applies to struct and union types only", name);
86831 + return NULL_TREE;
86832 + }
86833 +
86834 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
86835 + error("%qE attribute is already applied to the type", name);
86836 + return NULL_TREE;
86837 + }
86838 +
86839 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
86840 + error("%qE attribute used on type that is not constified", name);
86841 + return NULL_TREE;
86842 + }
86843 +
86844 + if (TREE_CODE(*node) == TYPE_DECL) {
86845 + TREE_TYPE(*node) = deconstify_type(type);
86846 + TREE_READONLY(*node) = 0;
86847 + return NULL_TREE;
86848 + }
86849 +
86850 + return NULL_TREE;
86851 +}
86852 +
86853 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
86854 +{
86855 + *no_add_attrs = true;
86856 + if (!TYPE_P(*node)) {
86857 + error("%qE attribute applies to types only", name);
86858 + return NULL_TREE;
86859 + }
86860 +
86861 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
86862 + error("%qE attribute applies to struct and union types only", name);
86863 + return NULL_TREE;
86864 + }
86865 +
86866 + *no_add_attrs = false;
86867 + constify_type(*node);
86868 + return NULL_TREE;
86869 +}
86870 +
86871 +static struct attribute_spec no_const_attr = {
86872 + .name = "no_const",
86873 + .min_length = 0,
86874 + .max_length = 0,
86875 + .decl_required = false,
86876 + .type_required = false,
86877 + .function_type_required = false,
86878 + .handler = handle_no_const_attribute,
86879 +#if BUILDING_GCC_VERSION >= 4007
86880 + .affects_type_identity = true
86881 +#endif
86882 +};
86883 +
86884 +static struct attribute_spec do_const_attr = {
86885 + .name = "do_const",
86886 + .min_length = 0,
86887 + .max_length = 0,
86888 + .decl_required = false,
86889 + .type_required = false,
86890 + .function_type_required = false,
86891 + .handler = handle_do_const_attribute,
86892 +#if BUILDING_GCC_VERSION >= 4007
86893 + .affects_type_identity = true
86894 +#endif
86895 +};
86896 +
86897 +static void register_attributes(void *event_data, void *data)
86898 +{
86899 + register_attribute(&no_const_attr);
86900 + register_attribute(&do_const_attr);
86901 +}
86902 +
86903 +static void constify_type(tree type)
86904 +{
86905 + TYPE_READONLY(type) = 1;
86906 + C_TYPE_FIELDS_READONLY(type) = 1;
86907 +}
86908 +
86909 +static bool is_fptr(tree field)
86910 +{
86911 + tree ptr = TREE_TYPE(field);
86912 +
86913 + if (TREE_CODE(ptr) != POINTER_TYPE)
86914 + return false;
86915 +
86916 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
86917 +}
86918 +
86919 +static bool walk_struct(tree node)
86920 +{
86921 + tree field;
86922 +
86923 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
86924 + return false;
86925 +
86926 + if (TYPE_FIELDS(node) == NULL_TREE)
86927 + return false;
86928 +
86929 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
86930 + tree type = TREE_TYPE(field);
86931 + enum tree_code code = TREE_CODE(type);
86932 + if (code == RECORD_TYPE || code == UNION_TYPE) {
86933 + if (!(walk_struct(type)))
86934 + return false;
86935 + } else if (!is_fptr(field) && !TREE_READONLY(field))
86936 + return false;
86937 + }
86938 + return true;
86939 +}
86940 +
86941 +static void finish_type(void *event_data, void *data)
86942 +{
86943 + tree type = (tree)event_data;
86944 +
86945 + if (type == NULL_TREE)
86946 + return;
86947 +
86948 + if (TYPE_READONLY(type))
86949 + return;
86950 +
86951 + if (walk_struct(type))
86952 + constify_type(type);
86953 +}
86954 +
86955 +static unsigned int check_local_variables(void);
86956 +
86957 +struct gimple_opt_pass pass_local_variable = {
86958 + {
86959 + .type = GIMPLE_PASS,
86960 + .name = "check_local_variables",
86961 + .gate = NULL,
86962 + .execute = check_local_variables,
86963 + .sub = NULL,
86964 + .next = NULL,
86965 + .static_pass_number = 0,
86966 + .tv_id = TV_NONE,
86967 + .properties_required = 0,
86968 + .properties_provided = 0,
86969 + .properties_destroyed = 0,
86970 + .todo_flags_start = 0,
86971 + .todo_flags_finish = 0
86972 + }
86973 +};
86974 +
86975 +static unsigned int check_local_variables(void)
86976 +{
86977 + tree var;
86978 + referenced_var_iterator rvi;
86979 +
86980 +#if BUILDING_GCC_VERSION == 4005
86981 + FOR_EACH_REFERENCED_VAR(var, rvi) {
86982 +#else
86983 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
86984 +#endif
86985 + tree type = TREE_TYPE(var);
86986 +
86987 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
86988 + continue;
86989 +
86990 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
86991 + continue;
86992 +
86993 + if (!TYPE_READONLY(type))
86994 + continue;
86995 +
86996 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
86997 +// continue;
86998 +
86999 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
87000 +// continue;
87001 +
87002 + if (walk_struct(type)) {
87003 + error("constified variable %qE cannot be local", var);
87004 + return 1;
87005 + }
87006 + }
87007 + return 0;
87008 +}
87009 +
87010 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87011 +{
87012 + const char * const plugin_name = plugin_info->base_name;
87013 + const int argc = plugin_info->argc;
87014 + const struct plugin_argument * const argv = plugin_info->argv;
87015 + int i;
87016 + bool constify = true;
87017 +
87018 + struct register_pass_info local_variable_pass_info = {
87019 + .pass = &pass_local_variable.pass,
87020 + .reference_pass_name = "*referenced_vars",
87021 + .ref_pass_instance_number = 0,
87022 + .pos_op = PASS_POS_INSERT_AFTER
87023 + };
87024 +
87025 + if (!plugin_default_version_check(version, &gcc_version)) {
87026 + error(G_("incompatible gcc/plugin versions"));
87027 + return 1;
87028 + }
87029 +
87030 + for (i = 0; i < argc; ++i) {
87031 + if (!(strcmp(argv[i].key, "no-constify"))) {
87032 + constify = false;
87033 + continue;
87034 + }
87035 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87036 + }
87037 +
87038 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
87039 + if (constify) {
87040 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
87041 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
87042 + }
87043 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
87044 +
87045 + return 0;
87046 +}
87047 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
87048 new file mode 100644
87049 index 0000000..a5eabce
87050 --- /dev/null
87051 +++ b/tools/gcc/kallocstat_plugin.c
87052 @@ -0,0 +1,167 @@
87053 +/*
87054 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87055 + * Licensed under the GPL v2
87056 + *
87057 + * Note: the choice of the license means that the compilation process is
87058 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87059 + * but for the kernel it doesn't matter since it doesn't link against
87060 + * any of the gcc libraries
87061 + *
87062 + * gcc plugin to find the distribution of k*alloc sizes
87063 + *
87064 + * TODO:
87065 + *
87066 + * BUGS:
87067 + * - none known
87068 + */
87069 +#include "gcc-plugin.h"
87070 +#include "config.h"
87071 +#include "system.h"
87072 +#include "coretypes.h"
87073 +#include "tree.h"
87074 +#include "tree-pass.h"
87075 +#include "flags.h"
87076 +#include "intl.h"
87077 +#include "toplev.h"
87078 +#include "plugin.h"
87079 +//#include "expr.h" where are you...
87080 +#include "diagnostic.h"
87081 +#include "plugin-version.h"
87082 +#include "tm.h"
87083 +#include "function.h"
87084 +#include "basic-block.h"
87085 +#include "gimple.h"
87086 +#include "rtl.h"
87087 +#include "emit-rtl.h"
87088 +
87089 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87090 +
87091 +int plugin_is_GPL_compatible;
87092 +
87093 +static const char * const kalloc_functions[] = {
87094 + "__kmalloc",
87095 + "kmalloc",
87096 + "kmalloc_large",
87097 + "kmalloc_node",
87098 + "kmalloc_order",
87099 + "kmalloc_order_trace",
87100 + "kmalloc_slab",
87101 + "kzalloc",
87102 + "kzalloc_node",
87103 +};
87104 +
87105 +static struct plugin_info kallocstat_plugin_info = {
87106 + .version = "201111150100",
87107 +};
87108 +
87109 +static unsigned int execute_kallocstat(void);
87110 +
87111 +static struct gimple_opt_pass kallocstat_pass = {
87112 + .pass = {
87113 + .type = GIMPLE_PASS,
87114 + .name = "kallocstat",
87115 + .gate = NULL,
87116 + .execute = execute_kallocstat,
87117 + .sub = NULL,
87118 + .next = NULL,
87119 + .static_pass_number = 0,
87120 + .tv_id = TV_NONE,
87121 + .properties_required = 0,
87122 + .properties_provided = 0,
87123 + .properties_destroyed = 0,
87124 + .todo_flags_start = 0,
87125 + .todo_flags_finish = 0
87126 + }
87127 +};
87128 +
87129 +static bool is_kalloc(const char *fnname)
87130 +{
87131 + size_t i;
87132 +
87133 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
87134 + if (!strcmp(fnname, kalloc_functions[i]))
87135 + return true;
87136 + return false;
87137 +}
87138 +
87139 +static unsigned int execute_kallocstat(void)
87140 +{
87141 + basic_block bb;
87142 +
87143 + // 1. loop through BBs and GIMPLE statements
87144 + FOR_EACH_BB(bb) {
87145 + gimple_stmt_iterator gsi;
87146 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87147 + // gimple match:
87148 + tree fndecl, size;
87149 + gimple call_stmt;
87150 + const char *fnname;
87151 +
87152 + // is it a call
87153 + call_stmt = gsi_stmt(gsi);
87154 + if (!is_gimple_call(call_stmt))
87155 + continue;
87156 + fndecl = gimple_call_fndecl(call_stmt);
87157 + if (fndecl == NULL_TREE)
87158 + continue;
87159 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
87160 + continue;
87161 +
87162 + // is it a call to k*alloc
87163 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
87164 + if (!is_kalloc(fnname))
87165 + continue;
87166 +
87167 + // is the size arg the result of a simple const assignment
87168 + size = gimple_call_arg(call_stmt, 0);
87169 + while (true) {
87170 + gimple def_stmt;
87171 + expanded_location xloc;
87172 + size_t size_val;
87173 +
87174 + if (TREE_CODE(size) != SSA_NAME)
87175 + break;
87176 + def_stmt = SSA_NAME_DEF_STMT(size);
87177 + if (!def_stmt || !is_gimple_assign(def_stmt))
87178 + break;
87179 + if (gimple_num_ops(def_stmt) != 2)
87180 + break;
87181 + size = gimple_assign_rhs1(def_stmt);
87182 + if (!TREE_CONSTANT(size))
87183 + continue;
87184 + xloc = expand_location(gimple_location(def_stmt));
87185 + if (!xloc.file)
87186 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
87187 + size_val = TREE_INT_CST_LOW(size);
87188 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
87189 + break;
87190 + }
87191 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
87192 +//debug_tree(gimple_call_fn(call_stmt));
87193 +//print_node(stderr, "pax", fndecl, 4);
87194 + }
87195 + }
87196 +
87197 + return 0;
87198 +}
87199 +
87200 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87201 +{
87202 + const char * const plugin_name = plugin_info->base_name;
87203 + struct register_pass_info kallocstat_pass_info = {
87204 + .pass = &kallocstat_pass.pass,
87205 + .reference_pass_name = "ssa",
87206 + .ref_pass_instance_number = 0,
87207 + .pos_op = PASS_POS_INSERT_AFTER
87208 + };
87209 +
87210 + if (!plugin_default_version_check(version, &gcc_version)) {
87211 + error(G_("incompatible gcc/plugin versions"));
87212 + return 1;
87213 + }
87214 +
87215 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
87216 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
87217 +
87218 + return 0;
87219 +}
87220 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
87221 new file mode 100644
87222 index 0000000..51f747e
87223 --- /dev/null
87224 +++ b/tools/gcc/kernexec_plugin.c
87225 @@ -0,0 +1,348 @@
87226 +/*
87227 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87228 + * Licensed under the GPL v2
87229 + *
87230 + * Note: the choice of the license means that the compilation process is
87231 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87232 + * but for the kernel it doesn't matter since it doesn't link against
87233 + * any of the gcc libraries
87234 + *
87235 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
87236 + *
87237 + * TODO:
87238 + *
87239 + * BUGS:
87240 + * - none known
87241 + */
87242 +#include "gcc-plugin.h"
87243 +#include "config.h"
87244 +#include "system.h"
87245 +#include "coretypes.h"
87246 +#include "tree.h"
87247 +#include "tree-pass.h"
87248 +#include "flags.h"
87249 +#include "intl.h"
87250 +#include "toplev.h"
87251 +#include "plugin.h"
87252 +//#include "expr.h" where are you...
87253 +#include "diagnostic.h"
87254 +#include "plugin-version.h"
87255 +#include "tm.h"
87256 +#include "function.h"
87257 +#include "basic-block.h"
87258 +#include "gimple.h"
87259 +#include "rtl.h"
87260 +#include "emit-rtl.h"
87261 +#include "tree-flow.h"
87262 +
87263 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87264 +extern rtx emit_move_insn(rtx x, rtx y);
87265 +
87266 +int plugin_is_GPL_compatible;
87267 +
87268 +static struct plugin_info kernexec_plugin_info = {
87269 + .version = "201111291120",
87270 + .help = "method=[bts|or]\tinstrumentation method\n"
87271 +};
87272 +
87273 +static unsigned int execute_kernexec_fptr(void);
87274 +static unsigned int execute_kernexec_retaddr(void);
87275 +static bool kernexec_cmodel_check(void);
87276 +
87277 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator);
87278 +static void (*kernexec_instrument_retaddr)(rtx);
87279 +
87280 +static struct gimple_opt_pass kernexec_fptr_pass = {
87281 + .pass = {
87282 + .type = GIMPLE_PASS,
87283 + .name = "kernexec_fptr",
87284 + .gate = kernexec_cmodel_check,
87285 + .execute = execute_kernexec_fptr,
87286 + .sub = NULL,
87287 + .next = NULL,
87288 + .static_pass_number = 0,
87289 + .tv_id = TV_NONE,
87290 + .properties_required = 0,
87291 + .properties_provided = 0,
87292 + .properties_destroyed = 0,
87293 + .todo_flags_start = 0,
87294 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
87295 + }
87296 +};
87297 +
87298 +static struct rtl_opt_pass kernexec_retaddr_pass = {
87299 + .pass = {
87300 + .type = RTL_PASS,
87301 + .name = "kernexec_retaddr",
87302 + .gate = kernexec_cmodel_check,
87303 + .execute = execute_kernexec_retaddr,
87304 + .sub = NULL,
87305 + .next = NULL,
87306 + .static_pass_number = 0,
87307 + .tv_id = TV_NONE,
87308 + .properties_required = 0,
87309 + .properties_provided = 0,
87310 + .properties_destroyed = 0,
87311 + .todo_flags_start = 0,
87312 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
87313 + }
87314 +};
87315 +
87316 +static bool kernexec_cmodel_check(void)
87317 +{
87318 + tree section;
87319 +
87320 + if (ix86_cmodel != CM_KERNEL)
87321 + return false;
87322 +
87323 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
87324 + if (!section || !TREE_VALUE(section))
87325 + return true;
87326 +
87327 + section = TREE_VALUE(TREE_VALUE(section));
87328 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
87329 + return true;
87330 +
87331 + return false;
87332 +}
87333 +
87334 +/*
87335 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
87336 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
87337 + */
87338 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator gsi)
87339 +{
87340 + gimple assign_intptr, assign_new_fptr, call_stmt;
87341 + tree intptr, old_fptr, new_fptr, kernexec_mask;
87342 +
87343 + call_stmt = gsi_stmt(gsi);
87344 + old_fptr = gimple_call_fn(call_stmt);
87345 +
87346 + // create temporary unsigned long variable used for bitops and cast fptr to it
87347 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
87348 + add_referenced_var(intptr);
87349 + mark_sym_for_renaming(intptr);
87350 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
87351 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
87352 + update_stmt(assign_intptr);
87353 +
87354 + // apply logical or to temporary unsigned long and bitmask
87355 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
87356 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
87357 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
87358 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
87359 + update_stmt(assign_intptr);
87360 +
87361 + // cast temporary unsigned long back to a temporary fptr variable
87362 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
87363 + add_referenced_var(new_fptr);
87364 + mark_sym_for_renaming(new_fptr);
87365 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
87366 + gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
87367 + update_stmt(assign_new_fptr);
87368 +
87369 + // replace call stmt fn with the new fptr
87370 + gimple_call_set_fn(call_stmt, new_fptr);
87371 + update_stmt(call_stmt);
87372 +}
87373 +
87374 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator gsi)
87375 +{
87376 + gimple asm_or_stmt, call_stmt;
87377 + tree old_fptr, new_fptr, input, output;
87378 + VEC(tree, gc) *inputs = NULL;
87379 + VEC(tree, gc) *outputs = NULL;
87380 +
87381 + call_stmt = gsi_stmt(gsi);
87382 + old_fptr = gimple_call_fn(call_stmt);
87383 +
87384 + // create temporary fptr variable
87385 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
87386 + add_referenced_var(new_fptr);
87387 + mark_sym_for_renaming(new_fptr);
87388 +
87389 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
87390 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
87391 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
87392 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
87393 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
87394 + VEC_safe_push(tree, gc, inputs, input);
87395 + VEC_safe_push(tree, gc, outputs, output);
87396 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
87397 + gimple_asm_set_volatile(asm_or_stmt, true);
87398 + gsi_insert_before(&gsi, asm_or_stmt, GSI_SAME_STMT);
87399 + update_stmt(asm_or_stmt);
87400 +
87401 + // replace call stmt fn with the new fptr
87402 + gimple_call_set_fn(call_stmt, new_fptr);
87403 + update_stmt(call_stmt);
87404 +}
87405 +
87406 +/*
87407 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
87408 + */
87409 +static unsigned int execute_kernexec_fptr(void)
87410 +{
87411 + basic_block bb;
87412 + gimple_stmt_iterator gsi;
87413 +
87414 + // 1. loop through BBs and GIMPLE statements
87415 + FOR_EACH_BB(bb) {
87416 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87417 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
87418 + tree fn;
87419 + gimple call_stmt;
87420 +
87421 + // is it a call ...
87422 + call_stmt = gsi_stmt(gsi);
87423 + if (!is_gimple_call(call_stmt))
87424 + continue;
87425 + fn = gimple_call_fn(call_stmt);
87426 + if (TREE_CODE(fn) == ADDR_EXPR)
87427 + continue;
87428 + if (TREE_CODE(fn) != SSA_NAME)
87429 + gcc_unreachable();
87430 +
87431 + // ... through a function pointer
87432 + fn = SSA_NAME_VAR(fn);
87433 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
87434 + continue;
87435 + fn = TREE_TYPE(fn);
87436 + if (TREE_CODE(fn) != POINTER_TYPE)
87437 + continue;
87438 + fn = TREE_TYPE(fn);
87439 + if (TREE_CODE(fn) != FUNCTION_TYPE)
87440 + continue;
87441 +
87442 + kernexec_instrument_fptr(gsi);
87443 +
87444 +//debug_tree(gimple_call_fn(call_stmt));
87445 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
87446 + }
87447 + }
87448 +
87449 + return 0;
87450 +}
87451 +
87452 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
87453 +static void kernexec_instrument_retaddr_bts(rtx insn)
87454 +{
87455 + rtx btsq;
87456 + rtvec argvec, constraintvec, labelvec;
87457 + int line;
87458 +
87459 + // create asm volatile("btsq $63,(%%rsp)":::)
87460 + argvec = rtvec_alloc(0);
87461 + constraintvec = rtvec_alloc(0);
87462 + labelvec = rtvec_alloc(0);
87463 + line = expand_location(RTL_LOCATION(insn)).line;
87464 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
87465 + MEM_VOLATILE_P(btsq) = 1;
87466 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
87467 + emit_insn_before(btsq, insn);
87468 +}
87469 +
87470 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
87471 +static void kernexec_instrument_retaddr_or(rtx insn)
87472 +{
87473 + rtx orq;
87474 + rtvec argvec, constraintvec, labelvec;
87475 + int line;
87476 +
87477 + // create asm volatile("orq %%r10,(%%rsp)":::)
87478 + argvec = rtvec_alloc(0);
87479 + constraintvec = rtvec_alloc(0);
87480 + labelvec = rtvec_alloc(0);
87481 + line = expand_location(RTL_LOCATION(insn)).line;
87482 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
87483 + MEM_VOLATILE_P(orq) = 1;
87484 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
87485 + emit_insn_before(orq, insn);
87486 +}
87487 +
87488 +/*
87489 + * find all asm level function returns and forcibly set the highest bit of the return address
87490 + */
87491 +static unsigned int execute_kernexec_retaddr(void)
87492 +{
87493 + rtx insn;
87494 +
87495 + // 1. find function returns
87496 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
87497 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
87498 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
87499 + rtx body;
87500 +
87501 + // is it a retn
87502 + if (!JUMP_P(insn))
87503 + continue;
87504 + body = PATTERN(insn);
87505 + if (GET_CODE(body) == PARALLEL)
87506 + body = XVECEXP(body, 0, 0);
87507 + if (GET_CODE(body) != RETURN)
87508 + continue;
87509 + kernexec_instrument_retaddr(insn);
87510 + }
87511 +
87512 +// print_simple_rtl(stderr, get_insns());
87513 +// print_rtl(stderr, get_insns());
87514 +
87515 + return 0;
87516 +}
87517 +
87518 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87519 +{
87520 + const char * const plugin_name = plugin_info->base_name;
87521 + const int argc = plugin_info->argc;
87522 + const struct plugin_argument * const argv = plugin_info->argv;
87523 + int i;
87524 + struct register_pass_info kernexec_fptr_pass_info = {
87525 + .pass = &kernexec_fptr_pass.pass,
87526 + .reference_pass_name = "ssa",
87527 + .ref_pass_instance_number = 0,
87528 + .pos_op = PASS_POS_INSERT_AFTER
87529 + };
87530 + struct register_pass_info kernexec_retaddr_pass_info = {
87531 + .pass = &kernexec_retaddr_pass.pass,
87532 + .reference_pass_name = "pro_and_epilogue",
87533 + .ref_pass_instance_number = 0,
87534 + .pos_op = PASS_POS_INSERT_AFTER
87535 + };
87536 +
87537 + if (!plugin_default_version_check(version, &gcc_version)) {
87538 + error(G_("incompatible gcc/plugin versions"));
87539 + return 1;
87540 + }
87541 +
87542 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
87543 +
87544 + if (TARGET_64BIT == 0)
87545 + return 0;
87546 +
87547 + for (i = 0; i < argc; ++i) {
87548 + if (!strcmp(argv[i].key, "method")) {
87549 + if (!argv[i].value) {
87550 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87551 + continue;
87552 + }
87553 + if (!strcmp(argv[i].value, "bts")) {
87554 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
87555 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
87556 + } else if (!strcmp(argv[i].value, "or")) {
87557 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
87558 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
87559 + fix_register("r10", 1, 1);
87560 + } else
87561 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
87562 + continue;
87563 + }
87564 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87565 + }
87566 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
87567 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
87568 +
87569 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
87570 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
87571 +
87572 + return 0;
87573 +}
87574 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
87575 new file mode 100644
87576 index 0000000..d44f37c
87577 --- /dev/null
87578 +++ b/tools/gcc/stackleak_plugin.c
87579 @@ -0,0 +1,291 @@
87580 +/*
87581 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87582 + * Licensed under the GPL v2
87583 + *
87584 + * Note: the choice of the license means that the compilation process is
87585 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87586 + * but for the kernel it doesn't matter since it doesn't link against
87587 + * any of the gcc libraries
87588 + *
87589 + * gcc plugin to help implement various PaX features
87590 + *
87591 + * - track lowest stack pointer
87592 + *
87593 + * TODO:
87594 + * - initialize all local variables
87595 + *
87596 + * BUGS:
87597 + * - none known
87598 + */
87599 +#include "gcc-plugin.h"
87600 +#include "config.h"
87601 +#include "system.h"
87602 +#include "coretypes.h"
87603 +#include "tree.h"
87604 +#include "tree-pass.h"
87605 +#include "flags.h"
87606 +#include "intl.h"
87607 +#include "toplev.h"
87608 +#include "plugin.h"
87609 +//#include "expr.h" where are you...
87610 +#include "diagnostic.h"
87611 +#include "plugin-version.h"
87612 +#include "tm.h"
87613 +#include "function.h"
87614 +#include "basic-block.h"
87615 +#include "gimple.h"
87616 +#include "rtl.h"
87617 +#include "emit-rtl.h"
87618 +
87619 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87620 +
87621 +int plugin_is_GPL_compatible;
87622 +
87623 +static int track_frame_size = -1;
87624 +static const char track_function[] = "pax_track_stack";
87625 +static const char check_function[] = "pax_check_alloca";
87626 +static bool init_locals;
87627 +
87628 +static struct plugin_info stackleak_plugin_info = {
87629 + .version = "201111150100",
87630 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
87631 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
87632 +};
87633 +
87634 +static bool gate_stackleak_track_stack(void);
87635 +static unsigned int execute_stackleak_tree_instrument(void);
87636 +static unsigned int execute_stackleak_final(void);
87637 +
87638 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
87639 + .pass = {
87640 + .type = GIMPLE_PASS,
87641 + .name = "stackleak_tree_instrument",
87642 + .gate = gate_stackleak_track_stack,
87643 + .execute = execute_stackleak_tree_instrument,
87644 + .sub = NULL,
87645 + .next = NULL,
87646 + .static_pass_number = 0,
87647 + .tv_id = TV_NONE,
87648 + .properties_required = PROP_gimple_leh | PROP_cfg,
87649 + .properties_provided = 0,
87650 + .properties_destroyed = 0,
87651 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
87652 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
87653 + }
87654 +};
87655 +
87656 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
87657 + .pass = {
87658 + .type = RTL_PASS,
87659 + .name = "stackleak_final",
87660 + .gate = gate_stackleak_track_stack,
87661 + .execute = execute_stackleak_final,
87662 + .sub = NULL,
87663 + .next = NULL,
87664 + .static_pass_number = 0,
87665 + .tv_id = TV_NONE,
87666 + .properties_required = 0,
87667 + .properties_provided = 0,
87668 + .properties_destroyed = 0,
87669 + .todo_flags_start = 0,
87670 + .todo_flags_finish = TODO_dump_func
87671 + }
87672 +};
87673 +
87674 +static bool gate_stackleak_track_stack(void)
87675 +{
87676 + return track_frame_size >= 0;
87677 +}
87678 +
87679 +static void stackleak_check_alloca(gimple_stmt_iterator gsi)
87680 +{
87681 + gimple check_alloca;
87682 + tree fndecl, fntype, alloca_size;
87683 +
87684 + // insert call to void pax_check_alloca(unsigned long size)
87685 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
87686 + fndecl = build_fn_decl(check_function, fntype);
87687 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
87688 + alloca_size = gimple_call_arg(gsi_stmt(gsi), 0);
87689 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
87690 + gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING);
87691 +}
87692 +
87693 +static void stackleak_add_instrumentation(gimple_stmt_iterator gsi)
87694 +{
87695 + gimple track_stack;
87696 + tree fndecl, fntype;
87697 +
87698 + // insert call to void pax_track_stack(void)
87699 + fntype = build_function_type_list(void_type_node, NULL_TREE);
87700 + fndecl = build_fn_decl(track_function, fntype);
87701 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
87702 + track_stack = gimple_build_call(fndecl, 0);
87703 + gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING);
87704 +}
87705 +
87706 +#if BUILDING_GCC_VERSION == 4005
87707 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
87708 +{
87709 + tree fndecl;
87710 +
87711 + if (!is_gimple_call(stmt))
87712 + return false;
87713 + fndecl = gimple_call_fndecl(stmt);
87714 + if (!fndecl)
87715 + return false;
87716 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
87717 + return false;
87718 +// print_node(stderr, "pax", fndecl, 4);
87719 + return DECL_FUNCTION_CODE(fndecl) == code;
87720 +}
87721 +#endif
87722 +
87723 +static bool is_alloca(gimple stmt)
87724 +{
87725 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
87726 + return true;
87727 +
87728 +#if BUILDING_GCC_VERSION >= 4007
87729 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
87730 + return true;
87731 +#endif
87732 +
87733 + return false;
87734 +}
87735 +
87736 +static unsigned int execute_stackleak_tree_instrument(void)
87737 +{
87738 + basic_block bb, entry_bb;
87739 + bool prologue_instrumented = false;
87740 +
87741 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
87742 +
87743 + // 1. loop through BBs and GIMPLE statements
87744 + FOR_EACH_BB(bb) {
87745 + gimple_stmt_iterator gsi;
87746 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87747 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
87748 + if (!is_alloca(gsi_stmt(gsi)))
87749 + continue;
87750 +
87751 + // 2. insert stack overflow check before each __builtin_alloca call
87752 + stackleak_check_alloca(gsi);
87753 +
87754 + // 3. insert track call after each __builtin_alloca call
87755 + stackleak_add_instrumentation(gsi);
87756 + if (bb == entry_bb)
87757 + prologue_instrumented = true;
87758 + }
87759 + }
87760 +
87761 + // 4. insert track call at the beginning
87762 + if (!prologue_instrumented) {
87763 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
87764 + if (dom_info_available_p(CDI_DOMINATORS))
87765 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
87766 + stackleak_add_instrumentation(gsi_start_bb(bb));
87767 + }
87768 +
87769 + return 0;
87770 +}
87771 +
87772 +static unsigned int execute_stackleak_final(void)
87773 +{
87774 + rtx insn;
87775 +
87776 + if (cfun->calls_alloca)
87777 + return 0;
87778 +
87779 + // keep calls only if function frame is big enough
87780 + if (get_frame_size() >= track_frame_size)
87781 + return 0;
87782 +
87783 + // 1. find pax_track_stack calls
87784 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
87785 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
87786 + rtx body;
87787 +
87788 + if (!CALL_P(insn))
87789 + continue;
87790 + body = PATTERN(insn);
87791 + if (GET_CODE(body) != CALL)
87792 + continue;
87793 + body = XEXP(body, 0);
87794 + if (GET_CODE(body) != MEM)
87795 + continue;
87796 + body = XEXP(body, 0);
87797 + if (GET_CODE(body) != SYMBOL_REF)
87798 + continue;
87799 + if (strcmp(XSTR(body, 0), track_function))
87800 + continue;
87801 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
87802 + // 2. delete call
87803 + insn = delete_insn_and_edges(insn);
87804 +#if BUILDING_GCC_VERSION >= 4007
87805 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
87806 + insn = delete_insn_and_edges(insn);
87807 +#endif
87808 + }
87809 +
87810 +// print_simple_rtl(stderr, get_insns());
87811 +// print_rtl(stderr, get_insns());
87812 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
87813 +
87814 + return 0;
87815 +}
87816 +
87817 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87818 +{
87819 + const char * const plugin_name = plugin_info->base_name;
87820 + const int argc = plugin_info->argc;
87821 + const struct plugin_argument * const argv = plugin_info->argv;
87822 + int i;
87823 + struct register_pass_info stackleak_tree_instrument_pass_info = {
87824 + .pass = &stackleak_tree_instrument_pass.pass,
87825 +// .reference_pass_name = "tree_profile",
87826 + .reference_pass_name = "optimized",
87827 + .ref_pass_instance_number = 0,
87828 + .pos_op = PASS_POS_INSERT_AFTER
87829 + };
87830 + struct register_pass_info stackleak_final_pass_info = {
87831 + .pass = &stackleak_final_rtl_opt_pass.pass,
87832 + .reference_pass_name = "final",
87833 + .ref_pass_instance_number = 0,
87834 + .pos_op = PASS_POS_INSERT_BEFORE
87835 + };
87836 +
87837 + if (!plugin_default_version_check(version, &gcc_version)) {
87838 + error(G_("incompatible gcc/plugin versions"));
87839 + return 1;
87840 + }
87841 +
87842 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
87843 +
87844 + for (i = 0; i < argc; ++i) {
87845 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
87846 + if (!argv[i].value) {
87847 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87848 + continue;
87849 + }
87850 + track_frame_size = atoi(argv[i].value);
87851 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
87852 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
87853 + continue;
87854 + }
87855 + if (!strcmp(argv[i].key, "initialize-locals")) {
87856 + if (argv[i].value) {
87857 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
87858 + continue;
87859 + }
87860 + init_locals = true;
87861 + continue;
87862 + }
87863 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87864 + }
87865 +
87866 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
87867 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
87868 +
87869 + return 0;
87870 +}
87871 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
87872 index 83b3dde..835bee7 100644
87873 --- a/usr/gen_init_cpio.c
87874 +++ b/usr/gen_init_cpio.c
87875 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, const char *location,
87876 int retval;
87877 int rc = -1;
87878 int namesize;
87879 - int i;
87880 + unsigned int i;
87881
87882 mode |= S_IFREG;
87883
87884 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_location)
87885 *env_var = *expanded = '\0';
87886 strncat(env_var, start + 2, end - start - 2);
87887 strncat(expanded, new_location, start - new_location);
87888 - strncat(expanded, getenv(env_var), PATH_MAX);
87889 - strncat(expanded, end + 1, PATH_MAX);
87890 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
87891 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
87892 strncpy(new_location, expanded, PATH_MAX);
87893 + new_location[PATH_MAX] = 0;
87894 } else
87895 break;
87896 }
87897 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
87898 index 4f3434f..159bc3e 100644
87899 --- a/virt/kvm/kvm_main.c
87900 +++ b/virt/kvm/kvm_main.c
87901 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
87902 if (kvm_rebooting)
87903 /* spin while reset goes on */
87904 while (true)
87905 - ;
87906 + cpu_relax();
87907 /* Fault while not rebooting. We want the trace. */
87908 BUG();
87909 }
87910 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
87911 kvm_arch_vcpu_put(vcpu);
87912 }
87913
87914 -int kvm_init(void *opaque, unsigned int vcpu_size,
87915 +int kvm_init(const void *opaque, unsigned int vcpu_size,
87916 struct module *module)
87917 {
87918 int r;
87919 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
87920 /* A kmem cache lets us meet the alignment requirements of fx_save. */
87921 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
87922 __alignof__(struct kvm_vcpu),
87923 - 0, NULL);
87924 + SLAB_USERCOPY, NULL);
87925 if (!kvm_vcpu_cache) {
87926 r = -ENOMEM;
87927 goto out_free_5;
87928 }
87929
87930 - kvm_chardev_ops.owner = module;
87931 - kvm_vm_fops.owner = module;
87932 - kvm_vcpu_fops.owner = module;
87933 + pax_open_kernel();
87934 + *(void **)&kvm_chardev_ops.owner = module;
87935 + *(void **)&kvm_vm_fops.owner = module;
87936 + *(void **)&kvm_vcpu_fops.owner = module;
87937 + pax_close_kernel();
87938
87939 r = misc_register(&kvm_dev);
87940 if (r) {