]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.53-201201062206.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.53-201201062206.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index e1efc40..47f0daf 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9 +*.cis
10 *.cpio
11 *.csp
12 +*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18 +*.gcno
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *_MODULES
32 +*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36 @@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40 +GPATH
41 +GRTAGS
42 +GSYMS
43 +GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49 +PERF*
50 SCCS
51 System.map*
52 TAGS
53 @@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57 +capability_names.h
58 +capflags.c
59 classlist.h*
60 +clut_vga16.c
61 +common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65 @@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69 +config.c
70 +config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74 @@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78 +gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90 +initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103 +mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110 +mkpiggy
111 mkprep
112 +mkregtable
113 mktables
114 mktree
115 modpost
116 @@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120 +piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124 @@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128 +regdb.c
129 relocs
130 +rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152 +vmlinux.bin.all
153 +vmlinux.bin.bz2
154 vmlinux.lds
155 +vmlinux.relocs
156 +voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zoffset.h
169 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170 index c840e7d..f4c451c 100644
171 --- a/Documentation/kernel-parameters.txt
172 +++ b/Documentation/kernel-parameters.txt
173 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178 + virtualization environments that don't cope well with the
179 + expand down segment used by UDEREF on X86-32 or the frequent
180 + page table updates on X86-64.
181 +
182 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183 +
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187 diff --git a/Makefile b/Makefile
188 index 8472e43..c5792be 100644
189 --- a/Makefile
190 +++ b/Makefile
191 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196 -HOSTCXXFLAGS = -O2
197 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203 @@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207 -PHONY += scripts_basic
208 -scripts_basic:
209 +PHONY += scripts_basic gcc-plugins
210 +scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214 @@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218 - cscope TAGS tags help %docs check% \
219 + cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223 @@ -526,6 +527,46 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227 +ifndef DISABLE_PAX_PLUGINS
228 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231 +endif
232 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
233 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234 +STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235 +endif
236 +ifdef CONFIG_KALLOCSTAT_PLUGIN
237 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238 +endif
239 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241 +KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
242 +endif
243 +ifdef CONFIG_CHECKER_PLUGIN
244 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
245 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
246 +endif
247 +endif
248 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
249 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
250 +ifeq ($(KBUILD_EXTMOD),)
251 +gcc-plugins:
252 + $(Q)$(MAKE) $(build)=tools/gcc
253 +else
254 +gcc-plugins: ;
255 +endif
256 +else
257 +gcc-plugins:
258 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
259 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
260 +else
261 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
262 +endif
263 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
264 +endif
265 +endif
266 +
267 include $(srctree)/arch/$(SRCARCH)/Makefile
268
269 ifneq ($(CONFIG_FRAME_WARN),0)
270 @@ -647,7 +688,7 @@ export mod_strip_cmd
271
272
273 ifeq ($(KBUILD_EXTMOD),)
274 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
275 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
276
277 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
278 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
279 @@ -868,6 +909,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
280
281 # The actual objects are generated when descending,
282 # make sure no implicit rule kicks in
283 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
284 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
285
286 # Handle descending into subdirectories listed in $(vmlinux-dirs)
287 @@ -877,7 +919,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288 # Error messages still appears in the original language
289
290 PHONY += $(vmlinux-dirs)
291 -$(vmlinux-dirs): prepare scripts
292 +$(vmlinux-dirs): gcc-plugins prepare scripts
293 $(Q)$(MAKE) $(build)=$@
294
295 # Build the kernel release string
296 @@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
297 $(Q)$(MAKE) $(build)=. missing-syscalls
298
299 # All the preparing..
300 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
301 prepare: prepare0
302
303 # The asm symlink changes when $(ARCH) changes.
304 @@ -1127,6 +1170,7 @@ all: modules
305 # using awk while concatenating to the final file.
306
307 PHONY += modules
308 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
309 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
310 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
311 @$(kecho) ' Building modules, stage 2.';
312 @@ -1136,7 +1180,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
313
314 # Target to prepare building external modules
315 PHONY += modules_prepare
316 -modules_prepare: prepare scripts
317 +modules_prepare: gcc-plugins prepare scripts
318
319 # Target to install modules
320 PHONY += modules_install
321 @@ -1201,7 +1245,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
322 include/linux/autoconf.h include/linux/version.h \
323 include/linux/utsrelease.h \
324 include/linux/bounds.h include/asm*/asm-offsets.h \
325 - Module.symvers Module.markers tags TAGS cscope*
326 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
327
328 # clean - Delete most, but leave enough to build external modules
329 #
330 @@ -1245,7 +1289,7 @@ distclean: mrproper
331 @find $(srctree) $(RCS_FIND_IGNORE) \
332 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
333 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
334 - -o -name '.*.rej' -o -size 0 \
335 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
336 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
337 -type f -print | xargs rm -f
338
339 @@ -1292,6 +1336,7 @@ help:
340 @echo ' modules_prepare - Set up for building external modules'
341 @echo ' tags/TAGS - Generate tags file for editors'
342 @echo ' cscope - Generate cscope index'
343 + @echo ' gtags - Generate GNU GLOBAL index'
344 @echo ' kernelrelease - Output the release version string'
345 @echo ' kernelversion - Output the version stored in Makefile'
346 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
347 @@ -1393,6 +1438,7 @@ PHONY += $(module-dirs) modules
348 $(module-dirs): crmodverdir $(objtree)/Module.symvers
349 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
350
351 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
352 modules: $(module-dirs)
353 @$(kecho) ' Building modules, stage 2.';
354 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
355 @@ -1448,7 +1494,7 @@ endif # KBUILD_EXTMOD
356 quiet_cmd_tags = GEN $@
357 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
358
359 -tags TAGS cscope: FORCE
360 +tags TAGS cscope gtags: FORCE
361 $(call cmd,tags)
362
363 # Scripts to check various things for consistency
364 @@ -1513,17 +1559,19 @@ else
365 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
366 endif
367
368 -%.s: %.c prepare scripts FORCE
369 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
370 +%.s: %.c gcc-plugins prepare scripts FORCE
371 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
372 %.i: %.c prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374 -%.o: %.c prepare scripts FORCE
375 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
376 +%.o: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.lst: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380 -%.s: %.S prepare scripts FORCE
381 +%.s: %.S gcc-plugins prepare scripts FORCE
382 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
383 -%.o: %.S prepare scripts FORCE
384 +%.o: %.S gcc-plugins prepare scripts FORCE
385 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
386 %.symtypes: %.c prepare scripts FORCE
387 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
388 @@ -1533,11 +1581,13 @@ endif
389 $(cmd_crmodverdir)
390 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
391 $(build)=$(build-dir)
392 -%/: prepare scripts FORCE
393 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
394 +%/: gcc-plugins prepare scripts FORCE
395 $(cmd_crmodverdir)
396 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
397 $(build)=$(build-dir)
398 -%.ko: prepare scripts FORCE
399 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
400 +%.ko: gcc-plugins prepare scripts FORCE
401 $(cmd_crmodverdir)
402 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
403 $(build)=$(build-dir) $(@:.ko=.o)
404 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
405 index 5c75c1b..c82f878 100644
406 --- a/arch/alpha/include/asm/elf.h
407 +++ b/arch/alpha/include/asm/elf.h
408 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
409
410 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
411
412 +#ifdef CONFIG_PAX_ASLR
413 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
414 +
415 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
416 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
417 +#endif
418 +
419 /* $0 is set by ld.so to a pointer to a function which might be
420 registered using atexit. This provides a mean for the dynamic
421 linker to call DT_FINI functions for shared libraries that have
422 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
423 index 3f0c59f..cf1e100 100644
424 --- a/arch/alpha/include/asm/pgtable.h
425 +++ b/arch/alpha/include/asm/pgtable.h
426 @@ -101,6 +101,17 @@ struct vm_area_struct;
427 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
428 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
429 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
430 +
431 +#ifdef CONFIG_PAX_PAGEEXEC
432 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
433 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
434 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
435 +#else
436 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
437 +# define PAGE_COPY_NOEXEC PAGE_COPY
438 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
439 +#endif
440 +
441 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
442
443 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
444 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
445 index ebc3c89..20cfa63 100644
446 --- a/arch/alpha/kernel/module.c
447 +++ b/arch/alpha/kernel/module.c
448 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
449
450 /* The small sections were sorted to the end of the segment.
451 The following should definitely cover them. */
452 - gp = (u64)me->module_core + me->core_size - 0x8000;
453 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
454 got = sechdrs[me->arch.gotsecindex].sh_addr;
455
456 for (i = 0; i < n; i++) {
457 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
458 index a94e49c..d71dd44 100644
459 --- a/arch/alpha/kernel/osf_sys.c
460 +++ b/arch/alpha/kernel/osf_sys.c
461 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
462 /* At this point: (!vma || addr < vma->vm_end). */
463 if (limit - len < addr)
464 return -ENOMEM;
465 - if (!vma || addr + len <= vma->vm_start)
466 + if (check_heap_stack_gap(vma, addr, len))
467 return addr;
468 addr = vma->vm_end;
469 vma = vma->vm_next;
470 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
471 merely specific addresses, but regions of memory -- perhaps
472 this feature should be incorporated into all ports? */
473
474 +#ifdef CONFIG_PAX_RANDMMAP
475 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
476 +#endif
477 +
478 if (addr) {
479 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
480 if (addr != (unsigned long) -ENOMEM)
481 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
482 }
483
484 /* Next, try allocating at TASK_UNMAPPED_BASE. */
485 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
486 - len, limit);
487 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
488 +
489 if (addr != (unsigned long) -ENOMEM)
490 return addr;
491
492 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
493 index 00a31de..2ded0f2 100644
494 --- a/arch/alpha/mm/fault.c
495 +++ b/arch/alpha/mm/fault.c
496 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
497 __reload_thread(pcb);
498 }
499
500 +#ifdef CONFIG_PAX_PAGEEXEC
501 +/*
502 + * PaX: decide what to do with offenders (regs->pc = fault address)
503 + *
504 + * returns 1 when task should be killed
505 + * 2 when patched PLT trampoline was detected
506 + * 3 when unpatched PLT trampoline was detected
507 + */
508 +static int pax_handle_fetch_fault(struct pt_regs *regs)
509 +{
510 +
511 +#ifdef CONFIG_PAX_EMUPLT
512 + int err;
513 +
514 + do { /* PaX: patched PLT emulation #1 */
515 + unsigned int ldah, ldq, jmp;
516 +
517 + err = get_user(ldah, (unsigned int *)regs->pc);
518 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
519 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
520 +
521 + if (err)
522 + break;
523 +
524 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
525 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
526 + jmp == 0x6BFB0000U)
527 + {
528 + unsigned long r27, addr;
529 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
530 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
531 +
532 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
533 + err = get_user(r27, (unsigned long *)addr);
534 + if (err)
535 + break;
536 +
537 + regs->r27 = r27;
538 + regs->pc = r27;
539 + return 2;
540 + }
541 + } while (0);
542 +
543 + do { /* PaX: patched PLT emulation #2 */
544 + unsigned int ldah, lda, br;
545 +
546 + err = get_user(ldah, (unsigned int *)regs->pc);
547 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
548 + err |= get_user(br, (unsigned int *)(regs->pc+8));
549 +
550 + if (err)
551 + break;
552 +
553 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
554 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
555 + (br & 0xFFE00000U) == 0xC3E00000U)
556 + {
557 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
558 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
559 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
560 +
561 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
562 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
563 + return 2;
564 + }
565 + } while (0);
566 +
567 + do { /* PaX: unpatched PLT emulation */
568 + unsigned int br;
569 +
570 + err = get_user(br, (unsigned int *)regs->pc);
571 +
572 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
573 + unsigned int br2, ldq, nop, jmp;
574 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
575 +
576 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
577 + err = get_user(br2, (unsigned int *)addr);
578 + err |= get_user(ldq, (unsigned int *)(addr+4));
579 + err |= get_user(nop, (unsigned int *)(addr+8));
580 + err |= get_user(jmp, (unsigned int *)(addr+12));
581 + err |= get_user(resolver, (unsigned long *)(addr+16));
582 +
583 + if (err)
584 + break;
585 +
586 + if (br2 == 0xC3600000U &&
587 + ldq == 0xA77B000CU &&
588 + nop == 0x47FF041FU &&
589 + jmp == 0x6B7B0000U)
590 + {
591 + regs->r28 = regs->pc+4;
592 + regs->r27 = addr+16;
593 + regs->pc = resolver;
594 + return 3;
595 + }
596 + }
597 + } while (0);
598 +#endif
599 +
600 + return 1;
601 +}
602 +
603 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
604 +{
605 + unsigned long i;
606 +
607 + printk(KERN_ERR "PAX: bytes at PC: ");
608 + for (i = 0; i < 5; i++) {
609 + unsigned int c;
610 + if (get_user(c, (unsigned int *)pc+i))
611 + printk(KERN_CONT "???????? ");
612 + else
613 + printk(KERN_CONT "%08x ", c);
614 + }
615 + printk("\n");
616 +}
617 +#endif
618
619 /*
620 * This routine handles page faults. It determines the address,
621 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
622 good_area:
623 si_code = SEGV_ACCERR;
624 if (cause < 0) {
625 - if (!(vma->vm_flags & VM_EXEC))
626 + if (!(vma->vm_flags & VM_EXEC)) {
627 +
628 +#ifdef CONFIG_PAX_PAGEEXEC
629 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
630 + goto bad_area;
631 +
632 + up_read(&mm->mmap_sem);
633 + switch (pax_handle_fetch_fault(regs)) {
634 +
635 +#ifdef CONFIG_PAX_EMUPLT
636 + case 2:
637 + case 3:
638 + return;
639 +#endif
640 +
641 + }
642 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
643 + do_group_exit(SIGKILL);
644 +#else
645 goto bad_area;
646 +#endif
647 +
648 + }
649 } else if (!cause) {
650 /* Allow reads even for write-only mappings */
651 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
652 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
653 index 6aac3f5..265536b 100644
654 --- a/arch/arm/include/asm/elf.h
655 +++ b/arch/arm/include/asm/elf.h
656 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 the loader. We need to make sure that it is out of the way of the program
658 that it will "exec", and that there is sufficient room for the brk. */
659
660 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
661 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
662 +
663 +#ifdef CONFIG_PAX_ASLR
664 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
665 +
666 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
667 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
668 +#endif
669
670 /* When the program starts, a1 contains a pointer to a function to be
671 registered with atexit, as per the SVR4 ABI. A value of 0 means we
672 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
673 index c019949..388fdd1 100644
674 --- a/arch/arm/include/asm/kmap_types.h
675 +++ b/arch/arm/include/asm/kmap_types.h
676 @@ -19,6 +19,7 @@ enum km_type {
677 KM_SOFTIRQ0,
678 KM_SOFTIRQ1,
679 KM_L2_CACHE,
680 + KM_CLEARPAGE,
681 KM_TYPE_NR
682 };
683
684 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
685 index 1d6bd40..fba0cb9 100644
686 --- a/arch/arm/include/asm/uaccess.h
687 +++ b/arch/arm/include/asm/uaccess.h
688 @@ -22,6 +22,8 @@
689 #define VERIFY_READ 0
690 #define VERIFY_WRITE 1
691
692 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
693 +
694 /*
695 * The exception table consists of pairs of addresses: the first is the
696 * address of an instruction that is allowed to fault, and the second is
697 @@ -387,8 +389,23 @@ do { \
698
699
700 #ifdef CONFIG_MMU
701 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
702 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
703 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
704 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
705 +
706 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
707 +{
708 + if (!__builtin_constant_p(n))
709 + check_object_size(to, n, false);
710 + return ___copy_from_user(to, from, n);
711 +}
712 +
713 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
714 +{
715 + if (!__builtin_constant_p(n))
716 + check_object_size(from, n, true);
717 + return ___copy_to_user(to, from, n);
718 +}
719 +
720 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
721 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
722 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
723 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
724
725 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
726 {
727 + if ((long)n < 0)
728 + return n;
729 +
730 if (access_ok(VERIFY_READ, from, n))
731 n = __copy_from_user(to, from, n);
732 else /* security hole - plug it */
733 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
734
735 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
736 {
737 + if ((long)n < 0)
738 + return n;
739 +
740 if (access_ok(VERIFY_WRITE, to, n))
741 n = __copy_to_user(to, from, n);
742 return n;
743 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
744 index 0e62770..e2c2cd6 100644
745 --- a/arch/arm/kernel/armksyms.c
746 +++ b/arch/arm/kernel/armksyms.c
747 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
748 #ifdef CONFIG_MMU
749 EXPORT_SYMBOL(copy_page);
750
751 -EXPORT_SYMBOL(__copy_from_user);
752 -EXPORT_SYMBOL(__copy_to_user);
753 +EXPORT_SYMBOL(___copy_from_user);
754 +EXPORT_SYMBOL(___copy_to_user);
755 EXPORT_SYMBOL(__clear_user);
756
757 EXPORT_SYMBOL(__get_user_1);
758 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
759 index ba8ccfe..2dc34dc 100644
760 --- a/arch/arm/kernel/kgdb.c
761 +++ b/arch/arm/kernel/kgdb.c
762 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
763 * and we handle the normal undef case within the do_undefinstr
764 * handler.
765 */
766 -struct kgdb_arch arch_kgdb_ops = {
767 +const struct kgdb_arch arch_kgdb_ops = {
768 #ifndef __ARMEB__
769 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
770 #else /* ! __ARMEB__ */
771 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
772 index 3f361a7..6e806e1 100644
773 --- a/arch/arm/kernel/traps.c
774 +++ b/arch/arm/kernel/traps.c
775 @@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
776
777 DEFINE_SPINLOCK(die_lock);
778
779 +extern void gr_handle_kernel_exploit(void);
780 +
781 /*
782 * This function is protected against re-entrancy.
783 */
784 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
785 if (panic_on_oops)
786 panic("Fatal exception");
787
788 + gr_handle_kernel_exploit();
789 +
790 do_exit(SIGSEGV);
791 }
792
793 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
794 index e4fe124..0fc246b 100644
795 --- a/arch/arm/lib/copy_from_user.S
796 +++ b/arch/arm/lib/copy_from_user.S
797 @@ -16,7 +16,7 @@
798 /*
799 * Prototype:
800 *
801 - * size_t __copy_from_user(void *to, const void *from, size_t n)
802 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
803 *
804 * Purpose:
805 *
806 @@ -84,11 +84,11 @@
807
808 .text
809
810 -ENTRY(__copy_from_user)
811 +ENTRY(___copy_from_user)
812
813 #include "copy_template.S"
814
815 -ENDPROC(__copy_from_user)
816 +ENDPROC(___copy_from_user)
817
818 .section .fixup,"ax"
819 .align 0
820 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
821 index 1a71e15..ac7b258 100644
822 --- a/arch/arm/lib/copy_to_user.S
823 +++ b/arch/arm/lib/copy_to_user.S
824 @@ -16,7 +16,7 @@
825 /*
826 * Prototype:
827 *
828 - * size_t __copy_to_user(void *to, const void *from, size_t n)
829 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
830 *
831 * Purpose:
832 *
833 @@ -88,11 +88,11 @@
834 .text
835
836 ENTRY(__copy_to_user_std)
837 -WEAK(__copy_to_user)
838 +WEAK(___copy_to_user)
839
840 #include "copy_template.S"
841
842 -ENDPROC(__copy_to_user)
843 +ENDPROC(___copy_to_user)
844
845 .section .fixup,"ax"
846 .align 0
847 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
848 index ffdd274..91017b6 100644
849 --- a/arch/arm/lib/uaccess.S
850 +++ b/arch/arm/lib/uaccess.S
851 @@ -19,7 +19,7 @@
852
853 #define PAGE_SHIFT 12
854
855 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
856 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
857 * Purpose : copy a block to user memory from kernel memory
858 * Params : to - user memory
859 * : from - kernel memory
860 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
861 sub r2, r2, ip
862 b .Lc2u_dest_aligned
863
864 -ENTRY(__copy_to_user)
865 +ENTRY(___copy_to_user)
866 stmfd sp!, {r2, r4 - r7, lr}
867 cmp r2, #4
868 blt .Lc2u_not_enough
869 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
870 ldrgtb r3, [r1], #0
871 USER( strgtbt r3, [r0], #1) @ May fault
872 b .Lc2u_finished
873 -ENDPROC(__copy_to_user)
874 +ENDPROC(___copy_to_user)
875
876 .section .fixup,"ax"
877 .align 0
878 9001: ldmfd sp!, {r0, r4 - r7, pc}
879 .previous
880
881 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
882 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
883 * Purpose : copy a block from user memory to kernel memory
884 * Params : to - kernel memory
885 * : from - user memory
886 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
887 sub r2, r2, ip
888 b .Lcfu_dest_aligned
889
890 -ENTRY(__copy_from_user)
891 +ENTRY(___copy_from_user)
892 stmfd sp!, {r0, r2, r4 - r7, lr}
893 cmp r2, #4
894 blt .Lcfu_not_enough
895 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
896 USER( ldrgtbt r3, [r1], #1) @ May fault
897 strgtb r3, [r0], #1
898 b .Lcfu_finished
899 -ENDPROC(__copy_from_user)
900 +ENDPROC(___copy_from_user)
901
902 .section .fixup,"ax"
903 .align 0
904 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
905 index 6b967ff..67d5b2b 100644
906 --- a/arch/arm/lib/uaccess_with_memcpy.c
907 +++ b/arch/arm/lib/uaccess_with_memcpy.c
908 @@ -97,7 +97,7 @@ out:
909 }
910
911 unsigned long
912 -__copy_to_user(void __user *to, const void *from, unsigned long n)
913 +___copy_to_user(void __user *to, const void *from, unsigned long n)
914 {
915 /*
916 * This test is stubbed out of the main function above to keep
917 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
918 index 4028724..beec230 100644
919 --- a/arch/arm/mach-at91/pm.c
920 +++ b/arch/arm/mach-at91/pm.c
921 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
922 }
923
924
925 -static struct platform_suspend_ops at91_pm_ops ={
926 +static const struct platform_suspend_ops at91_pm_ops ={
927 .valid = at91_pm_valid_state,
928 .begin = at91_pm_begin,
929 .enter = at91_pm_enter,
930 diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
931 index 5218943..0a34552 100644
932 --- a/arch/arm/mach-omap1/pm.c
933 +++ b/arch/arm/mach-omap1/pm.c
934 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
935
936
937
938 -static struct platform_suspend_ops omap_pm_ops ={
939 +static const struct platform_suspend_ops omap_pm_ops ={
940 .prepare = omap_pm_prepare,
941 .enter = omap_pm_enter,
942 .finish = omap_pm_finish,
943 diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
944 index bff5c4e..d4c649b 100644
945 --- a/arch/arm/mach-omap2/pm24xx.c
946 +++ b/arch/arm/mach-omap2/pm24xx.c
947 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
948 enable_hlt();
949 }
950
951 -static struct platform_suspend_ops omap_pm_ops = {
952 +static const struct platform_suspend_ops omap_pm_ops = {
953 .prepare = omap2_pm_prepare,
954 .enter = omap2_pm_enter,
955 .finish = omap2_pm_finish,
956 diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
957 index 8946319..7d3e661 100644
958 --- a/arch/arm/mach-omap2/pm34xx.c
959 +++ b/arch/arm/mach-omap2/pm34xx.c
960 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
961 return;
962 }
963
964 -static struct platform_suspend_ops omap_pm_ops = {
965 +static const struct platform_suspend_ops omap_pm_ops = {
966 .begin = omap3_pm_begin,
967 .end = omap3_pm_end,
968 .prepare = omap3_pm_prepare,
969 diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
970 index b3d8d53..6e68ebc 100644
971 --- a/arch/arm/mach-pnx4008/pm.c
972 +++ b/arch/arm/mach-pnx4008/pm.c
973 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
974 (state == PM_SUSPEND_MEM);
975 }
976
977 -static struct platform_suspend_ops pnx4008_pm_ops = {
978 +static const struct platform_suspend_ops pnx4008_pm_ops = {
979 .enter = pnx4008_pm_enter,
980 .valid = pnx4008_pm_valid,
981 };
982 diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
983 index 7693355..9beb00a 100644
984 --- a/arch/arm/mach-pxa/pm.c
985 +++ b/arch/arm/mach-pxa/pm.c
986 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
987 pxa_cpu_pm_fns->finish();
988 }
989
990 -static struct platform_suspend_ops pxa_pm_ops = {
991 +static const struct platform_suspend_ops pxa_pm_ops = {
992 .valid = pxa_pm_valid,
993 .enter = pxa_pm_enter,
994 .prepare = pxa_pm_prepare,
995 diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
996 index 629e05d..06be589 100644
997 --- a/arch/arm/mach-pxa/sharpsl_pm.c
998 +++ b/arch/arm/mach-pxa/sharpsl_pm.c
999 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1000 }
1001
1002 #ifdef CONFIG_PM
1003 -static struct platform_suspend_ops sharpsl_pm_ops = {
1004 +static const struct platform_suspend_ops sharpsl_pm_ops = {
1005 .prepare = pxa_pm_prepare,
1006 .finish = pxa_pm_finish,
1007 .enter = corgi_pxa_pm_enter,
1008 diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1009 index c83fdc8..ab9fc44 100644
1010 --- a/arch/arm/mach-sa1100/pm.c
1011 +++ b/arch/arm/mach-sa1100/pm.c
1012 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1013 return virt_to_phys(sp);
1014 }
1015
1016 -static struct platform_suspend_ops sa11x0_pm_ops = {
1017 +static const struct platform_suspend_ops sa11x0_pm_ops = {
1018 .enter = sa11x0_pm_enter,
1019 .valid = suspend_valid_only_mem,
1020 };
1021 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1022 index 3191cd6..c0739db 100644
1023 --- a/arch/arm/mm/fault.c
1024 +++ b/arch/arm/mm/fault.c
1025 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1026 }
1027 #endif
1028
1029 +#ifdef CONFIG_PAX_PAGEEXEC
1030 + if (fsr & FSR_LNX_PF) {
1031 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1032 + do_group_exit(SIGKILL);
1033 + }
1034 +#endif
1035 +
1036 tsk->thread.address = addr;
1037 tsk->thread.error_code = fsr;
1038 tsk->thread.trap_no = 14;
1039 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1040 }
1041 #endif /* CONFIG_MMU */
1042
1043 +#ifdef CONFIG_PAX_PAGEEXEC
1044 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1045 +{
1046 + long i;
1047 +
1048 + printk(KERN_ERR "PAX: bytes at PC: ");
1049 + for (i = 0; i < 20; i++) {
1050 + unsigned char c;
1051 + if (get_user(c, (__force unsigned char __user *)pc+i))
1052 + printk(KERN_CONT "?? ");
1053 + else
1054 + printk(KERN_CONT "%02x ", c);
1055 + }
1056 + printk("\n");
1057 +
1058 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1059 + for (i = -1; i < 20; i++) {
1060 + unsigned long c;
1061 + if (get_user(c, (__force unsigned long __user *)sp+i))
1062 + printk(KERN_CONT "???????? ");
1063 + else
1064 + printk(KERN_CONT "%08lx ", c);
1065 + }
1066 + printk("\n");
1067 +}
1068 +#endif
1069 +
1070 /*
1071 * First Level Translation Fault Handler
1072 *
1073 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1074 index f5abc51..7ec524c 100644
1075 --- a/arch/arm/mm/mmap.c
1076 +++ b/arch/arm/mm/mmap.c
1077 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1078 if (len > TASK_SIZE)
1079 return -ENOMEM;
1080
1081 +#ifdef CONFIG_PAX_RANDMMAP
1082 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1083 +#endif
1084 +
1085 if (addr) {
1086 if (do_align)
1087 addr = COLOUR_ALIGN(addr, pgoff);
1088 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1089 addr = PAGE_ALIGN(addr);
1090
1091 vma = find_vma(mm, addr);
1092 - if (TASK_SIZE - len >= addr &&
1093 - (!vma || addr + len <= vma->vm_start))
1094 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1095 return addr;
1096 }
1097 if (len > mm->cached_hole_size) {
1098 - start_addr = addr = mm->free_area_cache;
1099 + start_addr = addr = mm->free_area_cache;
1100 } else {
1101 - start_addr = addr = TASK_UNMAPPED_BASE;
1102 - mm->cached_hole_size = 0;
1103 + start_addr = addr = mm->mmap_base;
1104 + mm->cached_hole_size = 0;
1105 }
1106
1107 full_search:
1108 @@ -94,14 +97,14 @@ full_search:
1109 * Start a new search - just in case we missed
1110 * some holes.
1111 */
1112 - if (start_addr != TASK_UNMAPPED_BASE) {
1113 - start_addr = addr = TASK_UNMAPPED_BASE;
1114 + if (start_addr != mm->mmap_base) {
1115 + start_addr = addr = mm->mmap_base;
1116 mm->cached_hole_size = 0;
1117 goto full_search;
1118 }
1119 return -ENOMEM;
1120 }
1121 - if (!vma || addr + len <= vma->vm_start) {
1122 + if (check_heap_stack_gap(vma, addr, len)) {
1123 /*
1124 * Remember the place where we stopped the search:
1125 */
1126 diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1127 index 8d97db2..b66cfa5 100644
1128 --- a/arch/arm/plat-s3c/pm.c
1129 +++ b/arch/arm/plat-s3c/pm.c
1130 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1131 s3c_pm_check_cleanup();
1132 }
1133
1134 -static struct platform_suspend_ops s3c_pm_ops = {
1135 +static const struct platform_suspend_ops s3c_pm_ops = {
1136 .enter = s3c_pm_enter,
1137 .prepare = s3c_pm_prepare,
1138 .finish = s3c_pm_finish,
1139 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1140 index d5d1d41..856e2ed 100644
1141 --- a/arch/avr32/include/asm/elf.h
1142 +++ b/arch/avr32/include/asm/elf.h
1143 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1144 the loader. We need to make sure that it is out of the way of the program
1145 that it will "exec", and that there is sufficient room for the brk. */
1146
1147 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1148 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1149
1150 +#ifdef CONFIG_PAX_ASLR
1151 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1152 +
1153 +#define PAX_DELTA_MMAP_LEN 15
1154 +#define PAX_DELTA_STACK_LEN 15
1155 +#endif
1156
1157 /* This yields a mask that user programs can use to figure out what
1158 instruction set this CPU supports. This could be done in user space,
1159 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1160 index b7f5c68..556135c 100644
1161 --- a/arch/avr32/include/asm/kmap_types.h
1162 +++ b/arch/avr32/include/asm/kmap_types.h
1163 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1164 D(11) KM_IRQ1,
1165 D(12) KM_SOFTIRQ0,
1166 D(13) KM_SOFTIRQ1,
1167 -D(14) KM_TYPE_NR
1168 +D(14) KM_CLEARPAGE,
1169 +D(15) KM_TYPE_NR
1170 };
1171
1172 #undef D
1173 diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1174 index f021edf..32d680e 100644
1175 --- a/arch/avr32/mach-at32ap/pm.c
1176 +++ b/arch/avr32/mach-at32ap/pm.c
1177 @@ -176,7 +176,7 @@ out:
1178 return 0;
1179 }
1180
1181 -static struct platform_suspend_ops avr32_pm_ops = {
1182 +static const struct platform_suspend_ops avr32_pm_ops = {
1183 .valid = avr32_pm_valid_state,
1184 .enter = avr32_pm_enter,
1185 };
1186 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1187 index b61d86d..e292c7f 100644
1188 --- a/arch/avr32/mm/fault.c
1189 +++ b/arch/avr32/mm/fault.c
1190 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1191
1192 int exception_trace = 1;
1193
1194 +#ifdef CONFIG_PAX_PAGEEXEC
1195 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1196 +{
1197 + unsigned long i;
1198 +
1199 + printk(KERN_ERR "PAX: bytes at PC: ");
1200 + for (i = 0; i < 20; i++) {
1201 + unsigned char c;
1202 + if (get_user(c, (unsigned char *)pc+i))
1203 + printk(KERN_CONT "???????? ");
1204 + else
1205 + printk(KERN_CONT "%02x ", c);
1206 + }
1207 + printk("\n");
1208 +}
1209 +#endif
1210 +
1211 /*
1212 * This routine handles page faults. It determines the address and the
1213 * problem, and then passes it off to one of the appropriate routines.
1214 @@ -157,6 +174,16 @@ bad_area:
1215 up_read(&mm->mmap_sem);
1216
1217 if (user_mode(regs)) {
1218 +
1219 +#ifdef CONFIG_PAX_PAGEEXEC
1220 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1221 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1222 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1223 + do_group_exit(SIGKILL);
1224 + }
1225 + }
1226 +#endif
1227 +
1228 if (exception_trace && printk_ratelimit())
1229 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1230 "sp %08lx ecr %lu\n",
1231 diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1232 index cce79d0..c406c85 100644
1233 --- a/arch/blackfin/kernel/kgdb.c
1234 +++ b/arch/blackfin/kernel/kgdb.c
1235 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1236 return -1; /* this means that we do not want to exit from the handler */
1237 }
1238
1239 -struct kgdb_arch arch_kgdb_ops = {
1240 +const struct kgdb_arch arch_kgdb_ops = {
1241 .gdb_bpt_instr = {0xa1},
1242 #ifdef CONFIG_SMP
1243 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1244 diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1245 index 8837be4..b2fb413 100644
1246 --- a/arch/blackfin/mach-common/pm.c
1247 +++ b/arch/blackfin/mach-common/pm.c
1248 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1249 return 0;
1250 }
1251
1252 -struct platform_suspend_ops bfin_pm_ops = {
1253 +const struct platform_suspend_ops bfin_pm_ops = {
1254 .enter = bfin_pm_enter,
1255 .valid = bfin_pm_valid,
1256 };
1257 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1258 index f8e16b2..c73ff79 100644
1259 --- a/arch/frv/include/asm/kmap_types.h
1260 +++ b/arch/frv/include/asm/kmap_types.h
1261 @@ -23,6 +23,7 @@ enum km_type {
1262 KM_IRQ1,
1263 KM_SOFTIRQ0,
1264 KM_SOFTIRQ1,
1265 + KM_CLEARPAGE,
1266 KM_TYPE_NR
1267 };
1268
1269 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1270 index 385fd30..6c3d97e 100644
1271 --- a/arch/frv/mm/elf-fdpic.c
1272 +++ b/arch/frv/mm/elf-fdpic.c
1273 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1274 if (addr) {
1275 addr = PAGE_ALIGN(addr);
1276 vma = find_vma(current->mm, addr);
1277 - if (TASK_SIZE - len >= addr &&
1278 - (!vma || addr + len <= vma->vm_start))
1279 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1280 goto success;
1281 }
1282
1283 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1284 for (; vma; vma = vma->vm_next) {
1285 if (addr > limit)
1286 break;
1287 - if (addr + len <= vma->vm_start)
1288 + if (check_heap_stack_gap(vma, addr, len))
1289 goto success;
1290 addr = vma->vm_end;
1291 }
1292 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1293 for (; vma; vma = vma->vm_next) {
1294 if (addr > limit)
1295 break;
1296 - if (addr + len <= vma->vm_start)
1297 + if (check_heap_stack_gap(vma, addr, len))
1298 goto success;
1299 addr = vma->vm_end;
1300 }
1301 diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1302 index e4a80d8..11a7ea1 100644
1303 --- a/arch/ia64/hp/common/hwsw_iommu.c
1304 +++ b/arch/ia64/hp/common/hwsw_iommu.c
1305 @@ -17,7 +17,7 @@
1306 #include <linux/swiotlb.h>
1307 #include <asm/machvec.h>
1308
1309 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1310 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1311
1312 /* swiotlb declarations & definitions: */
1313 extern int swiotlb_late_init_with_default_size (size_t size);
1314 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1315 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1316 }
1317
1318 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1319 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1320 {
1321 if (use_swiotlb(dev))
1322 return &swiotlb_dma_ops;
1323 diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1324 index 01ae69b..35752fd 100644
1325 --- a/arch/ia64/hp/common/sba_iommu.c
1326 +++ b/arch/ia64/hp/common/sba_iommu.c
1327 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1328 },
1329 };
1330
1331 -extern struct dma_map_ops swiotlb_dma_ops;
1332 +extern const struct dma_map_ops swiotlb_dma_ops;
1333
1334 static int __init
1335 sba_init(void)
1336 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1337
1338 __setup("sbapagesize=",sba_page_override);
1339
1340 -struct dma_map_ops sba_dma_ops = {
1341 +const struct dma_map_ops sba_dma_ops = {
1342 .alloc_coherent = sba_alloc_coherent,
1343 .free_coherent = sba_free_coherent,
1344 .map_page = sba_map_page,
1345 diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1346 index c69552b..c7122f4 100644
1347 --- a/arch/ia64/ia32/binfmt_elf32.c
1348 +++ b/arch/ia64/ia32/binfmt_elf32.c
1349 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1350
1351 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1352
1353 +#ifdef CONFIG_PAX_ASLR
1354 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1355 +
1356 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1357 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1358 +#endif
1359 +
1360 /* Ugly but avoids duplication */
1361 #include "../../../fs/binfmt_elf.c"
1362
1363 diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1364 index 0f15349..26b3429 100644
1365 --- a/arch/ia64/ia32/ia32priv.h
1366 +++ b/arch/ia64/ia32/ia32priv.h
1367 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1368 #define ELF_DATA ELFDATA2LSB
1369 #define ELF_ARCH EM_386
1370
1371 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
1372 +#ifdef CONFIG_PAX_RANDUSTACK
1373 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
1374 +#else
1375 +#define __IA32_DELTA_STACK 0UL
1376 +#endif
1377 +
1378 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1379 +
1380 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1381 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1382
1383 diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1384 index 8d3c79c..71b3af6 100644
1385 --- a/arch/ia64/include/asm/dma-mapping.h
1386 +++ b/arch/ia64/include/asm/dma-mapping.h
1387 @@ -12,7 +12,7 @@
1388
1389 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1390
1391 -extern struct dma_map_ops *dma_ops;
1392 +extern const struct dma_map_ops *dma_ops;
1393 extern struct ia64_machine_vector ia64_mv;
1394 extern void set_iommu_machvec(void);
1395
1396 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1397 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1398 dma_addr_t *daddr, gfp_t gfp)
1399 {
1400 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1401 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1402 void *caddr;
1403
1404 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1405 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1406 static inline void dma_free_coherent(struct device *dev, size_t size,
1407 void *caddr, dma_addr_t daddr)
1408 {
1409 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1410 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1411 debug_dma_free_coherent(dev, size, caddr, daddr);
1412 ops->free_coherent(dev, size, caddr, daddr);
1413 }
1414 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1415
1416 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1417 {
1418 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1419 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1420 return ops->mapping_error(dev, daddr);
1421 }
1422
1423 static inline int dma_supported(struct device *dev, u64 mask)
1424 {
1425 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1426 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1427 return ops->dma_supported(dev, mask);
1428 }
1429
1430 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1431 index 86eddee..b116bb4 100644
1432 --- a/arch/ia64/include/asm/elf.h
1433 +++ b/arch/ia64/include/asm/elf.h
1434 @@ -43,6 +43,13 @@
1435 */
1436 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1437
1438 +#ifdef CONFIG_PAX_ASLR
1439 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1440 +
1441 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1442 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1443 +#endif
1444 +
1445 #define PT_IA_64_UNWIND 0x70000001
1446
1447 /* IA-64 relocations: */
1448 diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1449 index 367d299..9ad4279 100644
1450 --- a/arch/ia64/include/asm/machvec.h
1451 +++ b/arch/ia64/include/asm/machvec.h
1452 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1453 /* DMA-mapping interface: */
1454 typedef void ia64_mv_dma_init (void);
1455 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1456 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1457 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1458
1459 /*
1460 * WARNING: The legacy I/O space is _architected_. Platforms are
1461 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1462 # endif /* CONFIG_IA64_GENERIC */
1463
1464 extern void swiotlb_dma_init(void);
1465 -extern struct dma_map_ops *dma_get_ops(struct device *);
1466 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1467
1468 /*
1469 * Define default versions so we can extend machvec for new platforms without having
1470 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1471 index 8840a69..cdb63d9 100644
1472 --- a/arch/ia64/include/asm/pgtable.h
1473 +++ b/arch/ia64/include/asm/pgtable.h
1474 @@ -12,7 +12,7 @@
1475 * David Mosberger-Tang <davidm@hpl.hp.com>
1476 */
1477
1478 -
1479 +#include <linux/const.h>
1480 #include <asm/mman.h>
1481 #include <asm/page.h>
1482 #include <asm/processor.h>
1483 @@ -143,6 +143,17 @@
1484 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1485 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1486 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1487 +
1488 +#ifdef CONFIG_PAX_PAGEEXEC
1489 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1490 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1491 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1492 +#else
1493 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1494 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1495 +# define PAGE_COPY_NOEXEC PAGE_COPY
1496 +#endif
1497 +
1498 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1499 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1500 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1501 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1502 index 239ecdc..f94170e 100644
1503 --- a/arch/ia64/include/asm/spinlock.h
1504 +++ b/arch/ia64/include/asm/spinlock.h
1505 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1506 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1507
1508 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1509 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1510 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1511 }
1512
1513 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1514 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1515 index 449c8c0..432a3d2 100644
1516 --- a/arch/ia64/include/asm/uaccess.h
1517 +++ b/arch/ia64/include/asm/uaccess.h
1518 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1519 const void *__cu_from = (from); \
1520 long __cu_len = (n); \
1521 \
1522 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1523 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1524 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1525 __cu_len; \
1526 })
1527 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1528 long __cu_len = (n); \
1529 \
1530 __chk_user_ptr(__cu_from); \
1531 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1532 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1533 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1534 __cu_len; \
1535 })
1536 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1537 index f2c1600..969398a 100644
1538 --- a/arch/ia64/kernel/dma-mapping.c
1539 +++ b/arch/ia64/kernel/dma-mapping.c
1540 @@ -3,7 +3,7 @@
1541 /* Set this to 1 if there is a HW IOMMU in the system */
1542 int iommu_detected __read_mostly;
1543
1544 -struct dma_map_ops *dma_ops;
1545 +const struct dma_map_ops *dma_ops;
1546 EXPORT_SYMBOL(dma_ops);
1547
1548 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1549 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1550 }
1551 fs_initcall(dma_init);
1552
1553 -struct dma_map_ops *dma_get_ops(struct device *dev)
1554 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1555 {
1556 return dma_ops;
1557 }
1558 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1559 index 1481b0a..e7d38ff 100644
1560 --- a/arch/ia64/kernel/module.c
1561 +++ b/arch/ia64/kernel/module.c
1562 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1563 void
1564 module_free (struct module *mod, void *module_region)
1565 {
1566 - if (mod && mod->arch.init_unw_table &&
1567 - module_region == mod->module_init) {
1568 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1569 unw_remove_unwind_table(mod->arch.init_unw_table);
1570 mod->arch.init_unw_table = NULL;
1571 }
1572 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1573 }
1574
1575 static inline int
1576 +in_init_rx (const struct module *mod, uint64_t addr)
1577 +{
1578 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1579 +}
1580 +
1581 +static inline int
1582 +in_init_rw (const struct module *mod, uint64_t addr)
1583 +{
1584 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1585 +}
1586 +
1587 +static inline int
1588 in_init (const struct module *mod, uint64_t addr)
1589 {
1590 - return addr - (uint64_t) mod->module_init < mod->init_size;
1591 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1592 +}
1593 +
1594 +static inline int
1595 +in_core_rx (const struct module *mod, uint64_t addr)
1596 +{
1597 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1598 +}
1599 +
1600 +static inline int
1601 +in_core_rw (const struct module *mod, uint64_t addr)
1602 +{
1603 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1604 }
1605
1606 static inline int
1607 in_core (const struct module *mod, uint64_t addr)
1608 {
1609 - return addr - (uint64_t) mod->module_core < mod->core_size;
1610 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1611 }
1612
1613 static inline int
1614 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1615 break;
1616
1617 case RV_BDREL:
1618 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1619 + if (in_init_rx(mod, val))
1620 + val -= (uint64_t) mod->module_init_rx;
1621 + else if (in_init_rw(mod, val))
1622 + val -= (uint64_t) mod->module_init_rw;
1623 + else if (in_core_rx(mod, val))
1624 + val -= (uint64_t) mod->module_core_rx;
1625 + else if (in_core_rw(mod, val))
1626 + val -= (uint64_t) mod->module_core_rw;
1627 break;
1628
1629 case RV_LTV:
1630 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1631 * addresses have been selected...
1632 */
1633 uint64_t gp;
1634 - if (mod->core_size > MAX_LTOFF)
1635 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1636 /*
1637 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1638 * at the end of the module.
1639 */
1640 - gp = mod->core_size - MAX_LTOFF / 2;
1641 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1642 else
1643 - gp = mod->core_size / 2;
1644 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1645 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1646 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1647 mod->arch.gp = gp;
1648 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1649 }
1650 diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1651 index f6b1ff0..de773fb 100644
1652 --- a/arch/ia64/kernel/pci-dma.c
1653 +++ b/arch/ia64/kernel/pci-dma.c
1654 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1655 .dma_mask = &fallback_dev.coherent_dma_mask,
1656 };
1657
1658 -extern struct dma_map_ops intel_dma_ops;
1659 +extern const struct dma_map_ops intel_dma_ops;
1660
1661 static int __init pci_iommu_init(void)
1662 {
1663 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1664 }
1665 EXPORT_SYMBOL(iommu_dma_supported);
1666
1667 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1668 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1669 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1670 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1671 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1672 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1673 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1674 +
1675 +static const struct dma_map_ops intel_iommu_dma_ops = {
1676 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1677 + .alloc_coherent = intel_alloc_coherent,
1678 + .free_coherent = intel_free_coherent,
1679 + .map_sg = intel_map_sg,
1680 + .unmap_sg = intel_unmap_sg,
1681 + .map_page = intel_map_page,
1682 + .unmap_page = intel_unmap_page,
1683 + .mapping_error = intel_mapping_error,
1684 +
1685 + .sync_single_for_cpu = machvec_dma_sync_single,
1686 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1687 + .sync_single_for_device = machvec_dma_sync_single,
1688 + .sync_sg_for_device = machvec_dma_sync_sg,
1689 + .dma_supported = iommu_dma_supported,
1690 +};
1691 +
1692 void __init pci_iommu_alloc(void)
1693 {
1694 - dma_ops = &intel_dma_ops;
1695 -
1696 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1697 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1698 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1699 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1700 - dma_ops->dma_supported = iommu_dma_supported;
1701 + dma_ops = &intel_iommu_dma_ops;
1702
1703 /*
1704 * The order of these functions is important for
1705 diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1706 index 285aae8..61dbab6 100644
1707 --- a/arch/ia64/kernel/pci-swiotlb.c
1708 +++ b/arch/ia64/kernel/pci-swiotlb.c
1709 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1710 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1711 }
1712
1713 -struct dma_map_ops swiotlb_dma_ops = {
1714 +const struct dma_map_ops swiotlb_dma_ops = {
1715 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1716 .free_coherent = swiotlb_free_coherent,
1717 .map_page = swiotlb_map_page,
1718 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1719 index 609d500..7dde2a8 100644
1720 --- a/arch/ia64/kernel/sys_ia64.c
1721 +++ b/arch/ia64/kernel/sys_ia64.c
1722 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1723 if (REGION_NUMBER(addr) == RGN_HPAGE)
1724 addr = 0;
1725 #endif
1726 +
1727 +#ifdef CONFIG_PAX_RANDMMAP
1728 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1729 + addr = mm->free_area_cache;
1730 + else
1731 +#endif
1732 +
1733 if (!addr)
1734 addr = mm->free_area_cache;
1735
1736 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1737 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1738 /* At this point: (!vma || addr < vma->vm_end). */
1739 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1740 - if (start_addr != TASK_UNMAPPED_BASE) {
1741 + if (start_addr != mm->mmap_base) {
1742 /* Start a new search --- just in case we missed some holes. */
1743 - addr = TASK_UNMAPPED_BASE;
1744 + addr = mm->mmap_base;
1745 goto full_search;
1746 }
1747 return -ENOMEM;
1748 }
1749 - if (!vma || addr + len <= vma->vm_start) {
1750 + if (check_heap_stack_gap(vma, addr, len)) {
1751 /* Remember the address where we stopped this search: */
1752 mm->free_area_cache = addr + len;
1753 return addr;
1754 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1755 index 8f06035..b3a5818 100644
1756 --- a/arch/ia64/kernel/topology.c
1757 +++ b/arch/ia64/kernel/topology.c
1758 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1759 return ret;
1760 }
1761
1762 -static struct sysfs_ops cache_sysfs_ops = {
1763 +static const struct sysfs_ops cache_sysfs_ops = {
1764 .show = cache_show
1765 };
1766
1767 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1768 index 0a0c77b..8e55a81 100644
1769 --- a/arch/ia64/kernel/vmlinux.lds.S
1770 +++ b/arch/ia64/kernel/vmlinux.lds.S
1771 @@ -190,7 +190,7 @@ SECTIONS
1772 /* Per-cpu data: */
1773 . = ALIGN(PERCPU_PAGE_SIZE);
1774 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1775 - __phys_per_cpu_start = __per_cpu_load;
1776 + __phys_per_cpu_start = per_cpu_load;
1777 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1778 * into percpu page size
1779 */
1780 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1781 index 19261a9..1611b7a 100644
1782 --- a/arch/ia64/mm/fault.c
1783 +++ b/arch/ia64/mm/fault.c
1784 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1785 return pte_present(pte);
1786 }
1787
1788 +#ifdef CONFIG_PAX_PAGEEXEC
1789 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1790 +{
1791 + unsigned long i;
1792 +
1793 + printk(KERN_ERR "PAX: bytes at PC: ");
1794 + for (i = 0; i < 8; i++) {
1795 + unsigned int c;
1796 + if (get_user(c, (unsigned int *)pc+i))
1797 + printk(KERN_CONT "???????? ");
1798 + else
1799 + printk(KERN_CONT "%08x ", c);
1800 + }
1801 + printk("\n");
1802 +}
1803 +#endif
1804 +
1805 void __kprobes
1806 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1807 {
1808 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1809 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1810 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1811
1812 - if ((vma->vm_flags & mask) != mask)
1813 + if ((vma->vm_flags & mask) != mask) {
1814 +
1815 +#ifdef CONFIG_PAX_PAGEEXEC
1816 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1817 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1818 + goto bad_area;
1819 +
1820 + up_read(&mm->mmap_sem);
1821 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1822 + do_group_exit(SIGKILL);
1823 + }
1824 +#endif
1825 +
1826 goto bad_area;
1827
1828 + }
1829 +
1830 survive:
1831 /*
1832 * If for any reason at all we couldn't handle the fault, make
1833 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1834 index b0f6157..a082bbc 100644
1835 --- a/arch/ia64/mm/hugetlbpage.c
1836 +++ b/arch/ia64/mm/hugetlbpage.c
1837 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1838 /* At this point: (!vmm || addr < vmm->vm_end). */
1839 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1840 return -ENOMEM;
1841 - if (!vmm || (addr + len) <= vmm->vm_start)
1842 + if (check_heap_stack_gap(vmm, addr, len))
1843 return addr;
1844 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1845 }
1846 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1847 index 1857766..05cc6a3 100644
1848 --- a/arch/ia64/mm/init.c
1849 +++ b/arch/ia64/mm/init.c
1850 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1851 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1852 vma->vm_end = vma->vm_start + PAGE_SIZE;
1853 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1854 +
1855 +#ifdef CONFIG_PAX_PAGEEXEC
1856 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1857 + vma->vm_flags &= ~VM_EXEC;
1858 +
1859 +#ifdef CONFIG_PAX_MPROTECT
1860 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1861 + vma->vm_flags &= ~VM_MAYEXEC;
1862 +#endif
1863 +
1864 + }
1865 +#endif
1866 +
1867 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1868 down_write(&current->mm->mmap_sem);
1869 if (insert_vm_struct(current->mm, vma)) {
1870 diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1871 index 98b6849..8046766 100644
1872 --- a/arch/ia64/sn/pci/pci_dma.c
1873 +++ b/arch/ia64/sn/pci/pci_dma.c
1874 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1875 return ret;
1876 }
1877
1878 -static struct dma_map_ops sn_dma_ops = {
1879 +static const struct dma_map_ops sn_dma_ops = {
1880 .alloc_coherent = sn_dma_alloc_coherent,
1881 .free_coherent = sn_dma_free_coherent,
1882 .map_page = sn_dma_map_page,
1883 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1884 index 82abd15..d95ae5d 100644
1885 --- a/arch/m32r/lib/usercopy.c
1886 +++ b/arch/m32r/lib/usercopy.c
1887 @@ -14,6 +14,9 @@
1888 unsigned long
1889 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1890 {
1891 + if ((long)n < 0)
1892 + return n;
1893 +
1894 prefetch(from);
1895 if (access_ok(VERIFY_WRITE, to, n))
1896 __copy_user(to,from,n);
1897 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1898 unsigned long
1899 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1900 {
1901 + if ((long)n < 0)
1902 + return n;
1903 +
1904 prefetchw(to);
1905 if (access_ok(VERIFY_READ, from, n))
1906 __copy_user_zeroing(to,from,n);
1907 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
1908 index 77f5021..2b1db8a 100644
1909 --- a/arch/mips/Makefile
1910 +++ b/arch/mips/Makefile
1911 @@ -51,6 +51,8 @@ endif
1912 cflags-y := -ffunction-sections
1913 cflags-y += $(call cc-option, -mno-check-zero-division)
1914
1915 +cflags-y += -Wno-sign-compare -Wno-extra
1916 +
1917 ifdef CONFIG_32BIT
1918 ld-emul = $(32bit-emul)
1919 vmlinux-32 = vmlinux
1920 diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
1921 index 632f986..fd0378d 100644
1922 --- a/arch/mips/alchemy/devboards/pm.c
1923 +++ b/arch/mips/alchemy/devboards/pm.c
1924 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1925
1926 }
1927
1928 -static struct platform_suspend_ops db1x_pm_ops = {
1929 +static const struct platform_suspend_ops db1x_pm_ops = {
1930 .valid = suspend_valid_only_mem,
1931 .begin = db1x_pm_begin,
1932 .enter = db1x_pm_enter,
1933 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1934 index 7990694..4e93acf 100644
1935 --- a/arch/mips/include/asm/elf.h
1936 +++ b/arch/mips/include/asm/elf.h
1937 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
1938 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1939 #endif
1940
1941 +#ifdef CONFIG_PAX_ASLR
1942 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1943 +
1944 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1945 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1946 +#endif
1947 +
1948 #endif /* _ASM_ELF_H */
1949 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1950 index f266295..627cfff 100644
1951 --- a/arch/mips/include/asm/page.h
1952 +++ b/arch/mips/include/asm/page.h
1953 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1954 #ifdef CONFIG_CPU_MIPS32
1955 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1956 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1957 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1958 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1959 #else
1960 typedef struct { unsigned long long pte; } pte_t;
1961 #define pte_val(x) ((x).pte)
1962 diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
1963 index e48c0bf..f3acf65 100644
1964 --- a/arch/mips/include/asm/reboot.h
1965 +++ b/arch/mips/include/asm/reboot.h
1966 @@ -9,7 +9,7 @@
1967 #ifndef _ASM_REBOOT_H
1968 #define _ASM_REBOOT_H
1969
1970 -extern void (*_machine_restart)(char *command);
1971 -extern void (*_machine_halt)(void);
1972 +extern void (*__noreturn _machine_restart)(char *command);
1973 +extern void (*__noreturn _machine_halt)(void);
1974
1975 #endif /* _ASM_REBOOT_H */
1976 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1977 index 83b5509..9fa24a23 100644
1978 --- a/arch/mips/include/asm/system.h
1979 +++ b/arch/mips/include/asm/system.h
1980 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1981 */
1982 #define __ARCH_WANT_UNLOCKED_CTXSW
1983
1984 -extern unsigned long arch_align_stack(unsigned long sp);
1985 +#define arch_align_stack(x) ((x) & ~0xfUL)
1986
1987 #endif /* _ASM_SYSTEM_H */
1988 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1989 index 9fdd8bc..fcf9d68 100644
1990 --- a/arch/mips/kernel/binfmt_elfn32.c
1991 +++ b/arch/mips/kernel/binfmt_elfn32.c
1992 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1993 #undef ELF_ET_DYN_BASE
1994 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1995
1996 +#ifdef CONFIG_PAX_ASLR
1997 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1998 +
1999 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2000 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2001 +#endif
2002 +
2003 #include <asm/processor.h>
2004 #include <linux/module.h>
2005 #include <linux/elfcore.h>
2006 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2007 index ff44823..cf0b48a 100644
2008 --- a/arch/mips/kernel/binfmt_elfo32.c
2009 +++ b/arch/mips/kernel/binfmt_elfo32.c
2010 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2011 #undef ELF_ET_DYN_BASE
2012 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2013
2014 +#ifdef CONFIG_PAX_ASLR
2015 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2016 +
2017 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2018 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2019 +#endif
2020 +
2021 #include <asm/processor.h>
2022
2023 /*
2024 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2025 index 50c9bb8..efdd5f8 100644
2026 --- a/arch/mips/kernel/kgdb.c
2027 +++ b/arch/mips/kernel/kgdb.c
2028 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2029 return -1;
2030 }
2031
2032 +/* cannot be const */
2033 struct kgdb_arch arch_kgdb_ops;
2034
2035 /*
2036 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2037 index f3d73e1..bb3f57a 100644
2038 --- a/arch/mips/kernel/process.c
2039 +++ b/arch/mips/kernel/process.c
2040 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2041 out:
2042 return pc;
2043 }
2044 -
2045 -/*
2046 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2047 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2048 - */
2049 -unsigned long arch_align_stack(unsigned long sp)
2050 -{
2051 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2052 - sp -= get_random_int() & ~PAGE_MASK;
2053 -
2054 - return sp & ALMASK;
2055 -}
2056 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2057 index 060563a..7fbf310 100644
2058 --- a/arch/mips/kernel/reset.c
2059 +++ b/arch/mips/kernel/reset.c
2060 @@ -19,8 +19,8 @@
2061 * So handle all using function pointers to machine specific
2062 * functions.
2063 */
2064 -void (*_machine_restart)(char *command);
2065 -void (*_machine_halt)(void);
2066 +void (*__noreturn _machine_restart)(char *command);
2067 +void (*__noreturn _machine_halt)(void);
2068 void (*pm_power_off)(void);
2069
2070 EXPORT_SYMBOL(pm_power_off);
2071 @@ -29,16 +29,19 @@ void machine_restart(char *command)
2072 {
2073 if (_machine_restart)
2074 _machine_restart(command);
2075 + BUG();
2076 }
2077
2078 void machine_halt(void)
2079 {
2080 if (_machine_halt)
2081 _machine_halt();
2082 + BUG();
2083 }
2084
2085 void machine_power_off(void)
2086 {
2087 if (pm_power_off)
2088 pm_power_off();
2089 + BUG();
2090 }
2091 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2092 index 3f7f466..3abe0b5 100644
2093 --- a/arch/mips/kernel/syscall.c
2094 +++ b/arch/mips/kernel/syscall.c
2095 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2096 do_color_align = 0;
2097 if (filp || (flags & MAP_SHARED))
2098 do_color_align = 1;
2099 +
2100 +#ifdef CONFIG_PAX_RANDMMAP
2101 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2102 +#endif
2103 +
2104 if (addr) {
2105 if (do_color_align)
2106 addr = COLOUR_ALIGN(addr, pgoff);
2107 else
2108 addr = PAGE_ALIGN(addr);
2109 vmm = find_vma(current->mm, addr);
2110 - if (task_size - len >= addr &&
2111 - (!vmm || addr + len <= vmm->vm_start))
2112 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2113 return addr;
2114 }
2115 - addr = TASK_UNMAPPED_BASE;
2116 + addr = current->mm->mmap_base;
2117 if (do_color_align)
2118 addr = COLOUR_ALIGN(addr, pgoff);
2119 else
2120 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2121 /* At this point: (!vmm || addr < vmm->vm_end). */
2122 if (task_size - len < addr)
2123 return -ENOMEM;
2124 - if (!vmm || addr + len <= vmm->vm_start)
2125 + if (check_heap_stack_gap(vmm, addr, len))
2126 return addr;
2127 addr = vmm->vm_end;
2128 if (do_color_align)
2129 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2130 index e97a7a2..f18f5b0 100644
2131 --- a/arch/mips/mm/fault.c
2132 +++ b/arch/mips/mm/fault.c
2133 @@ -26,6 +26,23 @@
2134 #include <asm/ptrace.h>
2135 #include <asm/highmem.h> /* For VMALLOC_END */
2136
2137 +#ifdef CONFIG_PAX_PAGEEXEC
2138 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2139 +{
2140 + unsigned long i;
2141 +
2142 + printk(KERN_ERR "PAX: bytes at PC: ");
2143 + for (i = 0; i < 5; i++) {
2144 + unsigned int c;
2145 + if (get_user(c, (unsigned int *)pc+i))
2146 + printk(KERN_CONT "???????? ");
2147 + else
2148 + printk(KERN_CONT "%08x ", c);
2149 + }
2150 + printk("\n");
2151 +}
2152 +#endif
2153 +
2154 /*
2155 * This routine handles page faults. It determines the address,
2156 * and the problem, and then passes it off to one of the appropriate
2157 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2158 index 9c802eb..0592e41 100644
2159 --- a/arch/parisc/include/asm/elf.h
2160 +++ b/arch/parisc/include/asm/elf.h
2161 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2162
2163 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2164
2165 +#ifdef CONFIG_PAX_ASLR
2166 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2167 +
2168 +#define PAX_DELTA_MMAP_LEN 16
2169 +#define PAX_DELTA_STACK_LEN 16
2170 +#endif
2171 +
2172 /* This yields a mask that user programs can use to figure out what
2173 instruction set this CPU supports. This could be done in user space,
2174 but it's not easy, and we've already done it here. */
2175 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2176 index a27d2e2..18fd845 100644
2177 --- a/arch/parisc/include/asm/pgtable.h
2178 +++ b/arch/parisc/include/asm/pgtable.h
2179 @@ -207,6 +207,17 @@
2180 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2181 #define PAGE_COPY PAGE_EXECREAD
2182 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2183 +
2184 +#ifdef CONFIG_PAX_PAGEEXEC
2185 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2186 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2187 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2188 +#else
2189 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2190 +# define PAGE_COPY_NOEXEC PAGE_COPY
2191 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2192 +#endif
2193 +
2194 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2195 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2196 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2197 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2198 index 2120746..8d70a5e 100644
2199 --- a/arch/parisc/kernel/module.c
2200 +++ b/arch/parisc/kernel/module.c
2201 @@ -95,16 +95,38 @@
2202
2203 /* three functions to determine where in the module core
2204 * or init pieces the location is */
2205 +static inline int in_init_rx(struct module *me, void *loc)
2206 +{
2207 + return (loc >= me->module_init_rx &&
2208 + loc < (me->module_init_rx + me->init_size_rx));
2209 +}
2210 +
2211 +static inline int in_init_rw(struct module *me, void *loc)
2212 +{
2213 + return (loc >= me->module_init_rw &&
2214 + loc < (me->module_init_rw + me->init_size_rw));
2215 +}
2216 +
2217 static inline int in_init(struct module *me, void *loc)
2218 {
2219 - return (loc >= me->module_init &&
2220 - loc <= (me->module_init + me->init_size));
2221 + return in_init_rx(me, loc) || in_init_rw(me, loc);
2222 +}
2223 +
2224 +static inline int in_core_rx(struct module *me, void *loc)
2225 +{
2226 + return (loc >= me->module_core_rx &&
2227 + loc < (me->module_core_rx + me->core_size_rx));
2228 +}
2229 +
2230 +static inline int in_core_rw(struct module *me, void *loc)
2231 +{
2232 + return (loc >= me->module_core_rw &&
2233 + loc < (me->module_core_rw + me->core_size_rw));
2234 }
2235
2236 static inline int in_core(struct module *me, void *loc)
2237 {
2238 - return (loc >= me->module_core &&
2239 - loc <= (me->module_core + me->core_size));
2240 + return in_core_rx(me, loc) || in_core_rw(me, loc);
2241 }
2242
2243 static inline int in_local(struct module *me, void *loc)
2244 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2245 }
2246
2247 /* align things a bit */
2248 - me->core_size = ALIGN(me->core_size, 16);
2249 - me->arch.got_offset = me->core_size;
2250 - me->core_size += gots * sizeof(struct got_entry);
2251 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2252 + me->arch.got_offset = me->core_size_rw;
2253 + me->core_size_rw += gots * sizeof(struct got_entry);
2254
2255 - me->core_size = ALIGN(me->core_size, 16);
2256 - me->arch.fdesc_offset = me->core_size;
2257 - me->core_size += fdescs * sizeof(Elf_Fdesc);
2258 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2259 + me->arch.fdesc_offset = me->core_size_rw;
2260 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2261
2262 me->arch.got_max = gots;
2263 me->arch.fdesc_max = fdescs;
2264 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2265
2266 BUG_ON(value == 0);
2267
2268 - got = me->module_core + me->arch.got_offset;
2269 + got = me->module_core_rw + me->arch.got_offset;
2270 for (i = 0; got[i].addr; i++)
2271 if (got[i].addr == value)
2272 goto out;
2273 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2274 #ifdef CONFIG_64BIT
2275 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2276 {
2277 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2278 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2279
2280 if (!value) {
2281 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2282 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2283
2284 /* Create new one */
2285 fdesc->addr = value;
2286 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2287 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2288 return (Elf_Addr)fdesc;
2289 }
2290 #endif /* CONFIG_64BIT */
2291 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2292
2293 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2294 end = table + sechdrs[me->arch.unwind_section].sh_size;
2295 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2296 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2297
2298 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2299 me->arch.unwind_section, table, end, gp);
2300 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2301 index 9147391..f3d949a 100644
2302 --- a/arch/parisc/kernel/sys_parisc.c
2303 +++ b/arch/parisc/kernel/sys_parisc.c
2304 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2305 /* At this point: (!vma || addr < vma->vm_end). */
2306 if (TASK_SIZE - len < addr)
2307 return -ENOMEM;
2308 - if (!vma || addr + len <= vma->vm_start)
2309 + if (check_heap_stack_gap(vma, addr, len))
2310 return addr;
2311 addr = vma->vm_end;
2312 }
2313 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2314 /* At this point: (!vma || addr < vma->vm_end). */
2315 if (TASK_SIZE - len < addr)
2316 return -ENOMEM;
2317 - if (!vma || addr + len <= vma->vm_start)
2318 + if (check_heap_stack_gap(vma, addr, len))
2319 return addr;
2320 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2321 if (addr < vma->vm_end) /* handle wraparound */
2322 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2323 if (flags & MAP_FIXED)
2324 return addr;
2325 if (!addr)
2326 - addr = TASK_UNMAPPED_BASE;
2327 + addr = current->mm->mmap_base;
2328
2329 if (filp) {
2330 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2331 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2332 index 8b58bf0..7afff03 100644
2333 --- a/arch/parisc/kernel/traps.c
2334 +++ b/arch/parisc/kernel/traps.c
2335 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2336
2337 down_read(&current->mm->mmap_sem);
2338 vma = find_vma(current->mm,regs->iaoq[0]);
2339 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2340 - && (vma->vm_flags & VM_EXEC)) {
2341 -
2342 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2343 fault_address = regs->iaoq[0];
2344 fault_space = regs->iasq[0];
2345
2346 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2347 index c6afbfc..c5839f6 100644
2348 --- a/arch/parisc/mm/fault.c
2349 +++ b/arch/parisc/mm/fault.c
2350 @@ -15,6 +15,7 @@
2351 #include <linux/sched.h>
2352 #include <linux/interrupt.h>
2353 #include <linux/module.h>
2354 +#include <linux/unistd.h>
2355
2356 #include <asm/uaccess.h>
2357 #include <asm/traps.h>
2358 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2359 static unsigned long
2360 parisc_acctyp(unsigned long code, unsigned int inst)
2361 {
2362 - if (code == 6 || code == 16)
2363 + if (code == 6 || code == 7 || code == 16)
2364 return VM_EXEC;
2365
2366 switch (inst & 0xf0000000) {
2367 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2368 }
2369 #endif
2370
2371 +#ifdef CONFIG_PAX_PAGEEXEC
2372 +/*
2373 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2374 + *
2375 + * returns 1 when task should be killed
2376 + * 2 when rt_sigreturn trampoline was detected
2377 + * 3 when unpatched PLT trampoline was detected
2378 + */
2379 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2380 +{
2381 +
2382 +#ifdef CONFIG_PAX_EMUPLT
2383 + int err;
2384 +
2385 + do { /* PaX: unpatched PLT emulation */
2386 + unsigned int bl, depwi;
2387 +
2388 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2389 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2390 +
2391 + if (err)
2392 + break;
2393 +
2394 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2395 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2396 +
2397 + err = get_user(ldw, (unsigned int *)addr);
2398 + err |= get_user(bv, (unsigned int *)(addr+4));
2399 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2400 +
2401 + if (err)
2402 + break;
2403 +
2404 + if (ldw == 0x0E801096U &&
2405 + bv == 0xEAC0C000U &&
2406 + ldw2 == 0x0E881095U)
2407 + {
2408 + unsigned int resolver, map;
2409 +
2410 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2411 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2412 + if (err)
2413 + break;
2414 +
2415 + regs->gr[20] = instruction_pointer(regs)+8;
2416 + regs->gr[21] = map;
2417 + regs->gr[22] = resolver;
2418 + regs->iaoq[0] = resolver | 3UL;
2419 + regs->iaoq[1] = regs->iaoq[0] + 4;
2420 + return 3;
2421 + }
2422 + }
2423 + } while (0);
2424 +#endif
2425 +
2426 +#ifdef CONFIG_PAX_EMUTRAMP
2427 +
2428 +#ifndef CONFIG_PAX_EMUSIGRT
2429 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2430 + return 1;
2431 +#endif
2432 +
2433 + do { /* PaX: rt_sigreturn emulation */
2434 + unsigned int ldi1, ldi2, bel, nop;
2435 +
2436 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2437 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2438 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2439 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2440 +
2441 + if (err)
2442 + break;
2443 +
2444 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2445 + ldi2 == 0x3414015AU &&
2446 + bel == 0xE4008200U &&
2447 + nop == 0x08000240U)
2448 + {
2449 + regs->gr[25] = (ldi1 & 2) >> 1;
2450 + regs->gr[20] = __NR_rt_sigreturn;
2451 + regs->gr[31] = regs->iaoq[1] + 16;
2452 + regs->sr[0] = regs->iasq[1];
2453 + regs->iaoq[0] = 0x100UL;
2454 + regs->iaoq[1] = regs->iaoq[0] + 4;
2455 + regs->iasq[0] = regs->sr[2];
2456 + regs->iasq[1] = regs->sr[2];
2457 + return 2;
2458 + }
2459 + } while (0);
2460 +#endif
2461 +
2462 + return 1;
2463 +}
2464 +
2465 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2466 +{
2467 + unsigned long i;
2468 +
2469 + printk(KERN_ERR "PAX: bytes at PC: ");
2470 + for (i = 0; i < 5; i++) {
2471 + unsigned int c;
2472 + if (get_user(c, (unsigned int *)pc+i))
2473 + printk(KERN_CONT "???????? ");
2474 + else
2475 + printk(KERN_CONT "%08x ", c);
2476 + }
2477 + printk("\n");
2478 +}
2479 +#endif
2480 +
2481 int fixup_exception(struct pt_regs *regs)
2482 {
2483 const struct exception_table_entry *fix;
2484 @@ -192,8 +303,33 @@ good_area:
2485
2486 acc_type = parisc_acctyp(code,regs->iir);
2487
2488 - if ((vma->vm_flags & acc_type) != acc_type)
2489 + if ((vma->vm_flags & acc_type) != acc_type) {
2490 +
2491 +#ifdef CONFIG_PAX_PAGEEXEC
2492 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2493 + (address & ~3UL) == instruction_pointer(regs))
2494 + {
2495 + up_read(&mm->mmap_sem);
2496 + switch (pax_handle_fetch_fault(regs)) {
2497 +
2498 +#ifdef CONFIG_PAX_EMUPLT
2499 + case 3:
2500 + return;
2501 +#endif
2502 +
2503 +#ifdef CONFIG_PAX_EMUTRAMP
2504 + case 2:
2505 + return;
2506 +#endif
2507 +
2508 + }
2509 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2510 + do_group_exit(SIGKILL);
2511 + }
2512 +#endif
2513 +
2514 goto bad_area;
2515 + }
2516
2517 /*
2518 * If for any reason at all we couldn't handle the fault, make
2519 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2520 index c107b74..409dc0f 100644
2521 --- a/arch/powerpc/Makefile
2522 +++ b/arch/powerpc/Makefile
2523 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2524 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2525 CPP = $(CC) -E $(KBUILD_CFLAGS)
2526
2527 +cflags-y += -Wno-sign-compare -Wno-extra
2528 +
2529 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2530
2531 ifeq ($(CONFIG_PPC64),y)
2532 diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2533 index 6d94d27..50d4cad 100644
2534 --- a/arch/powerpc/include/asm/device.h
2535 +++ b/arch/powerpc/include/asm/device.h
2536 @@ -14,7 +14,7 @@ struct dev_archdata {
2537 struct device_node *of_node;
2538
2539 /* DMA operations on that device */
2540 - struct dma_map_ops *dma_ops;
2541 + const struct dma_map_ops *dma_ops;
2542
2543 /*
2544 * When an iommu is in use, dma_data is used as a ptr to the base of the
2545 diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2546 index e281dae..2b8a784 100644
2547 --- a/arch/powerpc/include/asm/dma-mapping.h
2548 +++ b/arch/powerpc/include/asm/dma-mapping.h
2549 @@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2550 #ifdef CONFIG_PPC64
2551 extern struct dma_map_ops dma_iommu_ops;
2552 #endif
2553 -extern struct dma_map_ops dma_direct_ops;
2554 +extern const struct dma_map_ops dma_direct_ops;
2555
2556 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2557 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2558 {
2559 /* We don't handle the NULL dev case for ISA for now. We could
2560 * do it via an out of line call but it is not needed for now. The
2561 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2562 return dev->archdata.dma_ops;
2563 }
2564
2565 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2566 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2567 {
2568 dev->archdata.dma_ops = ops;
2569 }
2570 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2571
2572 static inline int dma_supported(struct device *dev, u64 mask)
2573 {
2574 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2575 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2576
2577 if (unlikely(dma_ops == NULL))
2578 return 0;
2579 @@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2580
2581 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2582 {
2583 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2584 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2585
2586 if (unlikely(dma_ops == NULL))
2587 return -EIO;
2588 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2589 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2590 dma_addr_t *dma_handle, gfp_t flag)
2591 {
2592 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2593 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2594 void *cpu_addr;
2595
2596 BUG_ON(!dma_ops);
2597 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2598 static inline void dma_free_coherent(struct device *dev, size_t size,
2599 void *cpu_addr, dma_addr_t dma_handle)
2600 {
2601 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2602 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2603
2604 BUG_ON(!dma_ops);
2605
2606 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2607
2608 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2609 {
2610 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2611 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2612
2613 if (dma_ops->mapping_error)
2614 return dma_ops->mapping_error(dev, dma_addr);
2615 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2616 index 5698502..5db093c 100644
2617 --- a/arch/powerpc/include/asm/elf.h
2618 +++ b/arch/powerpc/include/asm/elf.h
2619 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2620 the loader. We need to make sure that it is out of the way of the program
2621 that it will "exec", and that there is sufficient room for the brk. */
2622
2623 -extern unsigned long randomize_et_dyn(unsigned long base);
2624 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2625 +#define ELF_ET_DYN_BASE (0x20000000)
2626 +
2627 +#ifdef CONFIG_PAX_ASLR
2628 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2629 +
2630 +#ifdef __powerpc64__
2631 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2632 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2633 +#else
2634 +#define PAX_DELTA_MMAP_LEN 15
2635 +#define PAX_DELTA_STACK_LEN 15
2636 +#endif
2637 +#endif
2638
2639 /*
2640 * Our registers are always unsigned longs, whether we're a 32 bit
2641 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2642 (0x7ff >> (PAGE_SHIFT - 12)) : \
2643 (0x3ffff >> (PAGE_SHIFT - 12)))
2644
2645 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2646 -#define arch_randomize_brk arch_randomize_brk
2647 -
2648 #endif /* __KERNEL__ */
2649
2650 /*
2651 diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2652 index edfc980..1766f59 100644
2653 --- a/arch/powerpc/include/asm/iommu.h
2654 +++ b/arch/powerpc/include/asm/iommu.h
2655 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2656 extern void iommu_init_early_dart(void);
2657 extern void iommu_init_early_pasemi(void);
2658
2659 +/* dma-iommu.c */
2660 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2661 +
2662 #ifdef CONFIG_PCI
2663 extern void pci_iommu_init(void);
2664 extern void pci_direct_iommu_init(void);
2665 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2666 index 9163695..5a00112 100644
2667 --- a/arch/powerpc/include/asm/kmap_types.h
2668 +++ b/arch/powerpc/include/asm/kmap_types.h
2669 @@ -26,6 +26,7 @@ enum km_type {
2670 KM_SOFTIRQ1,
2671 KM_PPC_SYNC_PAGE,
2672 KM_PPC_SYNC_ICACHE,
2673 + KM_CLEARPAGE,
2674 KM_TYPE_NR
2675 };
2676
2677 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2678 index ff24254..fe45b21 100644
2679 --- a/arch/powerpc/include/asm/page.h
2680 +++ b/arch/powerpc/include/asm/page.h
2681 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2682 * and needs to be executable. This means the whole heap ends
2683 * up being executable.
2684 */
2685 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2686 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2687 +#define VM_DATA_DEFAULT_FLAGS32 \
2688 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2689 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2690
2691 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2692 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2693 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2694 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2695 #endif
2696
2697 +#define ktla_ktva(addr) (addr)
2698 +#define ktva_ktla(addr) (addr)
2699 +
2700 #ifndef __ASSEMBLY__
2701
2702 #undef STRICT_MM_TYPECHECKS
2703 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2704 index 3f17b83..1f9e766 100644
2705 --- a/arch/powerpc/include/asm/page_64.h
2706 +++ b/arch/powerpc/include/asm/page_64.h
2707 @@ -180,15 +180,18 @@ do { \
2708 * stack by default, so in the absense of a PT_GNU_STACK program header
2709 * we turn execute permission off.
2710 */
2711 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2712 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2713 +#define VM_STACK_DEFAULT_FLAGS32 \
2714 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2715 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2716
2717 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2718 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2719
2720 +#ifndef CONFIG_PAX_PAGEEXEC
2721 #define VM_STACK_DEFAULT_FLAGS \
2722 (test_thread_flag(TIF_32BIT) ? \
2723 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2724 +#endif
2725
2726 #include <asm-generic/getorder.h>
2727
2728 diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2729 index b5ea626..4030822 100644
2730 --- a/arch/powerpc/include/asm/pci.h
2731 +++ b/arch/powerpc/include/asm/pci.h
2732 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2733 }
2734
2735 #ifdef CONFIG_PCI
2736 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2737 -extern struct dma_map_ops *get_pci_dma_ops(void);
2738 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2739 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2740 #else /* CONFIG_PCI */
2741 #define set_pci_dma_ops(d)
2742 #define get_pci_dma_ops() NULL
2743 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2744 index 2a5da06..d65bea2 100644
2745 --- a/arch/powerpc/include/asm/pgtable.h
2746 +++ b/arch/powerpc/include/asm/pgtable.h
2747 @@ -2,6 +2,7 @@
2748 #define _ASM_POWERPC_PGTABLE_H
2749 #ifdef __KERNEL__
2750
2751 +#include <linux/const.h>
2752 #ifndef __ASSEMBLY__
2753 #include <asm/processor.h> /* For TASK_SIZE */
2754 #include <asm/mmu.h>
2755 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2756 index 4aad413..85d86bf 100644
2757 --- a/arch/powerpc/include/asm/pte-hash32.h
2758 +++ b/arch/powerpc/include/asm/pte-hash32.h
2759 @@ -21,6 +21,7 @@
2760 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2761 #define _PAGE_USER 0x004 /* usermode access allowed */
2762 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2763 +#define _PAGE_EXEC _PAGE_GUARDED
2764 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2765 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2766 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2767 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2768 index 8c34149..78f425a 100644
2769 --- a/arch/powerpc/include/asm/ptrace.h
2770 +++ b/arch/powerpc/include/asm/ptrace.h
2771 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2772 } while(0)
2773
2774 struct task_struct;
2775 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2776 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2777 extern int ptrace_put_reg(struct task_struct *task, int regno,
2778 unsigned long data);
2779
2780 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2781 index 32a7c30..be3a8bb 100644
2782 --- a/arch/powerpc/include/asm/reg.h
2783 +++ b/arch/powerpc/include/asm/reg.h
2784 @@ -191,6 +191,7 @@
2785 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2786 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2787 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2788 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2789 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2790 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2791 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2792 diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2793 index 8979d4c..d2fd0d3 100644
2794 --- a/arch/powerpc/include/asm/swiotlb.h
2795 +++ b/arch/powerpc/include/asm/swiotlb.h
2796 @@ -13,7 +13,7 @@
2797
2798 #include <linux/swiotlb.h>
2799
2800 -extern struct dma_map_ops swiotlb_dma_ops;
2801 +extern const struct dma_map_ops swiotlb_dma_ops;
2802
2803 static inline void dma_mark_clean(void *addr, size_t size) {}
2804
2805 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2806 index 094a12a..877a60a 100644
2807 --- a/arch/powerpc/include/asm/system.h
2808 +++ b/arch/powerpc/include/asm/system.h
2809 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2810 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2811 #endif
2812
2813 -extern unsigned long arch_align_stack(unsigned long sp);
2814 +#define arch_align_stack(x) ((x) & ~0xfUL)
2815
2816 /* Used in very early kernel initialization. */
2817 extern unsigned long reloc_offset(void);
2818 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2819 index bd0fb84..a42a14b 100644
2820 --- a/arch/powerpc/include/asm/uaccess.h
2821 +++ b/arch/powerpc/include/asm/uaccess.h
2822 @@ -13,6 +13,8 @@
2823 #define VERIFY_READ 0
2824 #define VERIFY_WRITE 1
2825
2826 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2827 +
2828 /*
2829 * The fs value determines whether argument validity checking should be
2830 * performed or not. If get_fs() == USER_DS, checking is performed, with
2831 @@ -327,52 +329,6 @@ do { \
2832 extern unsigned long __copy_tofrom_user(void __user *to,
2833 const void __user *from, unsigned long size);
2834
2835 -#ifndef __powerpc64__
2836 -
2837 -static inline unsigned long copy_from_user(void *to,
2838 - const void __user *from, unsigned long n)
2839 -{
2840 - unsigned long over;
2841 -
2842 - if (access_ok(VERIFY_READ, from, n))
2843 - return __copy_tofrom_user((__force void __user *)to, from, n);
2844 - if ((unsigned long)from < TASK_SIZE) {
2845 - over = (unsigned long)from + n - TASK_SIZE;
2846 - return __copy_tofrom_user((__force void __user *)to, from,
2847 - n - over) + over;
2848 - }
2849 - return n;
2850 -}
2851 -
2852 -static inline unsigned long copy_to_user(void __user *to,
2853 - const void *from, unsigned long n)
2854 -{
2855 - unsigned long over;
2856 -
2857 - if (access_ok(VERIFY_WRITE, to, n))
2858 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2859 - if ((unsigned long)to < TASK_SIZE) {
2860 - over = (unsigned long)to + n - TASK_SIZE;
2861 - return __copy_tofrom_user(to, (__force void __user *)from,
2862 - n - over) + over;
2863 - }
2864 - return n;
2865 -}
2866 -
2867 -#else /* __powerpc64__ */
2868 -
2869 -#define __copy_in_user(to, from, size) \
2870 - __copy_tofrom_user((to), (from), (size))
2871 -
2872 -extern unsigned long copy_from_user(void *to, const void __user *from,
2873 - unsigned long n);
2874 -extern unsigned long copy_to_user(void __user *to, const void *from,
2875 - unsigned long n);
2876 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2877 - unsigned long n);
2878 -
2879 -#endif /* __powerpc64__ */
2880 -
2881 static inline unsigned long __copy_from_user_inatomic(void *to,
2882 const void __user *from, unsigned long n)
2883 {
2884 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2885 if (ret == 0)
2886 return 0;
2887 }
2888 +
2889 + if (!__builtin_constant_p(n))
2890 + check_object_size(to, n, false);
2891 +
2892 return __copy_tofrom_user((__force void __user *)to, from, n);
2893 }
2894
2895 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2896 if (ret == 0)
2897 return 0;
2898 }
2899 +
2900 + if (!__builtin_constant_p(n))
2901 + check_object_size(from, n, true);
2902 +
2903 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2904 }
2905
2906 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2907 return __copy_to_user_inatomic(to, from, size);
2908 }
2909
2910 +#ifndef __powerpc64__
2911 +
2912 +static inline unsigned long __must_check copy_from_user(void *to,
2913 + const void __user *from, unsigned long n)
2914 +{
2915 + unsigned long over;
2916 +
2917 + if ((long)n < 0)
2918 + return n;
2919 +
2920 + if (access_ok(VERIFY_READ, from, n)) {
2921 + if (!__builtin_constant_p(n))
2922 + check_object_size(to, n, false);
2923 + return __copy_tofrom_user((__force void __user *)to, from, n);
2924 + }
2925 + if ((unsigned long)from < TASK_SIZE) {
2926 + over = (unsigned long)from + n - TASK_SIZE;
2927 + if (!__builtin_constant_p(n - over))
2928 + check_object_size(to, n - over, false);
2929 + return __copy_tofrom_user((__force void __user *)to, from,
2930 + n - over) + over;
2931 + }
2932 + return n;
2933 +}
2934 +
2935 +static inline unsigned long __must_check copy_to_user(void __user *to,
2936 + const void *from, unsigned long n)
2937 +{
2938 + unsigned long over;
2939 +
2940 + if ((long)n < 0)
2941 + return n;
2942 +
2943 + if (access_ok(VERIFY_WRITE, to, n)) {
2944 + if (!__builtin_constant_p(n))
2945 + check_object_size(from, n, true);
2946 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2947 + }
2948 + if ((unsigned long)to < TASK_SIZE) {
2949 + over = (unsigned long)to + n - TASK_SIZE;
2950 + if (!__builtin_constant_p(n))
2951 + check_object_size(from, n - over, true);
2952 + return __copy_tofrom_user(to, (__force void __user *)from,
2953 + n - over) + over;
2954 + }
2955 + return n;
2956 +}
2957 +
2958 +#else /* __powerpc64__ */
2959 +
2960 +#define __copy_in_user(to, from, size) \
2961 + __copy_tofrom_user((to), (from), (size))
2962 +
2963 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2964 +{
2965 + if ((long)n < 0 || n > INT_MAX)
2966 + return n;
2967 +
2968 + if (!__builtin_constant_p(n))
2969 + check_object_size(to, n, false);
2970 +
2971 + if (likely(access_ok(VERIFY_READ, from, n)))
2972 + n = __copy_from_user(to, from, n);
2973 + else
2974 + memset(to, 0, n);
2975 + return n;
2976 +}
2977 +
2978 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2979 +{
2980 + if ((long)n < 0 || n > INT_MAX)
2981 + return n;
2982 +
2983 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2984 + if (!__builtin_constant_p(n))
2985 + check_object_size(from, n, true);
2986 + n = __copy_to_user(to, from, n);
2987 + }
2988 + return n;
2989 +}
2990 +
2991 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2992 + unsigned long n);
2993 +
2994 +#endif /* __powerpc64__ */
2995 +
2996 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2997
2998 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2999 diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3000 index bb37b1d..01fe9ce 100644
3001 --- a/arch/powerpc/kernel/cacheinfo.c
3002 +++ b/arch/powerpc/kernel/cacheinfo.c
3003 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3004 &cache_assoc_attr,
3005 };
3006
3007 -static struct sysfs_ops cache_index_ops = {
3008 +static const struct sysfs_ops cache_index_ops = {
3009 .show = cache_index_show,
3010 };
3011
3012 diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3013 index 37771a5..648530c 100644
3014 --- a/arch/powerpc/kernel/dma-iommu.c
3015 +++ b/arch/powerpc/kernel/dma-iommu.c
3016 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3017 }
3018
3019 /* We support DMA to/from any memory page via the iommu */
3020 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3021 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
3022 {
3023 struct iommu_table *tbl = get_iommu_table_base(dev);
3024
3025 diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3026 index e96cbbd..bdd6d41 100644
3027 --- a/arch/powerpc/kernel/dma-swiotlb.c
3028 +++ b/arch/powerpc/kernel/dma-swiotlb.c
3029 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3030 * map_page, and unmap_page on highmem, use normal dma_ops
3031 * for everything else.
3032 */
3033 -struct dma_map_ops swiotlb_dma_ops = {
3034 +const struct dma_map_ops swiotlb_dma_ops = {
3035 .alloc_coherent = dma_direct_alloc_coherent,
3036 .free_coherent = dma_direct_free_coherent,
3037 .map_sg = swiotlb_map_sg_attrs,
3038 diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3039 index 6215062..ebea59c 100644
3040 --- a/arch/powerpc/kernel/dma.c
3041 +++ b/arch/powerpc/kernel/dma.c
3042 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3043 }
3044 #endif
3045
3046 -struct dma_map_ops dma_direct_ops = {
3047 +const struct dma_map_ops dma_direct_ops = {
3048 .alloc_coherent = dma_direct_alloc_coherent,
3049 .free_coherent = dma_direct_free_coherent,
3050 .map_sg = dma_direct_map_sg,
3051 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3052 index 24dcc0e..a300455 100644
3053 --- a/arch/powerpc/kernel/exceptions-64e.S
3054 +++ b/arch/powerpc/kernel/exceptions-64e.S
3055 @@ -455,6 +455,7 @@ storage_fault_common:
3056 std r14,_DAR(r1)
3057 std r15,_DSISR(r1)
3058 addi r3,r1,STACK_FRAME_OVERHEAD
3059 + bl .save_nvgprs
3060 mr r4,r14
3061 mr r5,r15
3062 ld r14,PACA_EXGEN+EX_R14(r13)
3063 @@ -464,8 +465,7 @@ storage_fault_common:
3064 cmpdi r3,0
3065 bne- 1f
3066 b .ret_from_except_lite
3067 -1: bl .save_nvgprs
3068 - mr r5,r3
3069 +1: mr r5,r3
3070 addi r3,r1,STACK_FRAME_OVERHEAD
3071 ld r4,_DAR(r1)
3072 bl .bad_page_fault
3073 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3074 index 1808876..9fd206a 100644
3075 --- a/arch/powerpc/kernel/exceptions-64s.S
3076 +++ b/arch/powerpc/kernel/exceptions-64s.S
3077 @@ -818,10 +818,10 @@ handle_page_fault:
3078 11: ld r4,_DAR(r1)
3079 ld r5,_DSISR(r1)
3080 addi r3,r1,STACK_FRAME_OVERHEAD
3081 + bl .save_nvgprs
3082 bl .do_page_fault
3083 cmpdi r3,0
3084 beq+ 13f
3085 - bl .save_nvgprs
3086 mr r5,r3
3087 addi r3,r1,STACK_FRAME_OVERHEAD
3088 lwz r4,_DAR(r1)
3089 diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3090 index a4c8b38..1b09ad9 100644
3091 --- a/arch/powerpc/kernel/ibmebus.c
3092 +++ b/arch/powerpc/kernel/ibmebus.c
3093 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3094 return 1;
3095 }
3096
3097 -static struct dma_map_ops ibmebus_dma_ops = {
3098 +static const struct dma_map_ops ibmebus_dma_ops = {
3099 .alloc_coherent = ibmebus_alloc_coherent,
3100 .free_coherent = ibmebus_free_coherent,
3101 .map_sg = ibmebus_map_sg,
3102 diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3103 index 641c74b..8339ad7 100644
3104 --- a/arch/powerpc/kernel/kgdb.c
3105 +++ b/arch/powerpc/kernel/kgdb.c
3106 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3107 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3108 return 0;
3109
3110 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3111 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3112 regs->nip += 4;
3113
3114 return 1;
3115 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3116 /*
3117 * Global data
3118 */
3119 -struct kgdb_arch arch_kgdb_ops = {
3120 +const struct kgdb_arch arch_kgdb_ops = {
3121 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3122 };
3123
3124 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3125 index 477c663..4f50234 100644
3126 --- a/arch/powerpc/kernel/module.c
3127 +++ b/arch/powerpc/kernel/module.c
3128 @@ -31,11 +31,24 @@
3129
3130 LIST_HEAD(module_bug_list);
3131
3132 +#ifdef CONFIG_PAX_KERNEXEC
3133 void *module_alloc(unsigned long size)
3134 {
3135 if (size == 0)
3136 return NULL;
3137
3138 + return vmalloc(size);
3139 +}
3140 +
3141 +void *module_alloc_exec(unsigned long size)
3142 +#else
3143 +void *module_alloc(unsigned long size)
3144 +#endif
3145 +
3146 +{
3147 + if (size == 0)
3148 + return NULL;
3149 +
3150 return vmalloc_exec(size);
3151 }
3152
3153 @@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3154 vfree(module_region);
3155 }
3156
3157 +#ifdef CONFIG_PAX_KERNEXEC
3158 +void module_free_exec(struct module *mod, void *module_region)
3159 +{
3160 + module_free(mod, module_region);
3161 +}
3162 +#endif
3163 +
3164 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3165 const Elf_Shdr *sechdrs,
3166 const char *name)
3167 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3168 index f832773..0507238 100644
3169 --- a/arch/powerpc/kernel/module_32.c
3170 +++ b/arch/powerpc/kernel/module_32.c
3171 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3172 me->arch.core_plt_section = i;
3173 }
3174 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3175 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3176 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3177 return -ENOEXEC;
3178 }
3179
3180 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3181
3182 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3183 /* Init, or core PLT? */
3184 - if (location >= mod->module_core
3185 - && location < mod->module_core + mod->core_size)
3186 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3187 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3188 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3189 - else
3190 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3191 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3192 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3193 + else {
3194 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3195 + return ~0UL;
3196 + }
3197
3198 /* Find this entry, or if that fails, the next avail. entry */
3199 while (entry->jump[0]) {
3200 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3201 index cadbed6..b9bbb00 100644
3202 --- a/arch/powerpc/kernel/pci-common.c
3203 +++ b/arch/powerpc/kernel/pci-common.c
3204 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3205 unsigned int ppc_pci_flags = 0;
3206
3207
3208 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3209 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3210
3211 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3212 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3213 {
3214 pci_dma_ops = dma_ops;
3215 }
3216
3217 -struct dma_map_ops *get_pci_dma_ops(void)
3218 +const struct dma_map_ops *get_pci_dma_ops(void)
3219 {
3220 return pci_dma_ops;
3221 }
3222 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3223 index 7b816da..8d5c277 100644
3224 --- a/arch/powerpc/kernel/process.c
3225 +++ b/arch/powerpc/kernel/process.c
3226 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3227 * Lookup NIP late so we have the best change of getting the
3228 * above info out without failing
3229 */
3230 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3231 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3232 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3233 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3234 #endif
3235 show_stack(current, (unsigned long *) regs->gpr[1]);
3236 if (!user_mode(regs))
3237 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3238 newsp = stack[0];
3239 ip = stack[STACK_FRAME_LR_SAVE];
3240 if (!firstframe || ip != lr) {
3241 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3242 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3243 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3244 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3245 - printk(" (%pS)",
3246 + printk(" (%pA)",
3247 (void *)current->ret_stack[curr_frame].ret);
3248 curr_frame--;
3249 }
3250 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3251 struct pt_regs *regs = (struct pt_regs *)
3252 (sp + STACK_FRAME_OVERHEAD);
3253 lr = regs->link;
3254 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3255 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3256 regs->trap, (void *)regs->nip, (void *)lr);
3257 firstframe = 1;
3258 }
3259 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3260 }
3261
3262 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3263 -
3264 -unsigned long arch_align_stack(unsigned long sp)
3265 -{
3266 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3267 - sp -= get_random_int() & ~PAGE_MASK;
3268 - return sp & ~0xf;
3269 -}
3270 -
3271 -static inline unsigned long brk_rnd(void)
3272 -{
3273 - unsigned long rnd = 0;
3274 -
3275 - /* 8MB for 32bit, 1GB for 64bit */
3276 - if (is_32bit_task())
3277 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3278 - else
3279 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3280 -
3281 - return rnd << PAGE_SHIFT;
3282 -}
3283 -
3284 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3285 -{
3286 - unsigned long base = mm->brk;
3287 - unsigned long ret;
3288 -
3289 -#ifdef CONFIG_PPC_STD_MMU_64
3290 - /*
3291 - * If we are using 1TB segments and we are allowed to randomise
3292 - * the heap, we can put it above 1TB so it is backed by a 1TB
3293 - * segment. Otherwise the heap will be in the bottom 1TB
3294 - * which always uses 256MB segments and this may result in a
3295 - * performance penalty.
3296 - */
3297 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3298 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3299 -#endif
3300 -
3301 - ret = PAGE_ALIGN(base + brk_rnd());
3302 -
3303 - if (ret < mm->brk)
3304 - return mm->brk;
3305 -
3306 - return ret;
3307 -}
3308 -
3309 -unsigned long randomize_et_dyn(unsigned long base)
3310 -{
3311 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3312 -
3313 - if (ret < base)
3314 - return base;
3315 -
3316 - return ret;
3317 -}
3318 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3319 index ef14988..856c4bc 100644
3320 --- a/arch/powerpc/kernel/ptrace.c
3321 +++ b/arch/powerpc/kernel/ptrace.c
3322 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3323 /*
3324 * Get contents of register REGNO in task TASK.
3325 */
3326 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3327 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3328 {
3329 if (task->thread.regs == NULL)
3330 return -EIO;
3331 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3332
3333 CHECK_FULL_REGS(child->thread.regs);
3334 if (index < PT_FPR0) {
3335 - tmp = ptrace_get_reg(child, (int) index);
3336 + tmp = ptrace_get_reg(child, index);
3337 } else {
3338 flush_fp_to_thread(child);
3339 tmp = ((unsigned long *)child->thread.fpr)
3340 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3341 index d670429..2bc59b2 100644
3342 --- a/arch/powerpc/kernel/signal_32.c
3343 +++ b/arch/powerpc/kernel/signal_32.c
3344 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3345 /* Save user registers on the stack */
3346 frame = &rt_sf->uc.uc_mcontext;
3347 addr = frame;
3348 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3349 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3350 if (save_user_regs(regs, frame, 0, 1))
3351 goto badframe;
3352 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3353 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3354 index 2fe6fc6..ada0d96 100644
3355 --- a/arch/powerpc/kernel/signal_64.c
3356 +++ b/arch/powerpc/kernel/signal_64.c
3357 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3358 current->thread.fpscr.val = 0;
3359
3360 /* Set up to return from userspace. */
3361 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3362 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3363 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3364 } else {
3365 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3366 diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3367 index b97c2d6..dd01a6a 100644
3368 --- a/arch/powerpc/kernel/sys_ppc32.c
3369 +++ b/arch/powerpc/kernel/sys_ppc32.c
3370 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3371 if (oldlenp) {
3372 if (!error) {
3373 if (get_user(oldlen, oldlenp) ||
3374 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3375 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3376 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3377 error = -EFAULT;
3378 }
3379 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3380 }
3381 return error;
3382 }
3383 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3384 index 6f0ae1a..e4b6a56 100644
3385 --- a/arch/powerpc/kernel/traps.c
3386 +++ b/arch/powerpc/kernel/traps.c
3387 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3388 static inline void pmac_backlight_unblank(void) { }
3389 #endif
3390
3391 +extern void gr_handle_kernel_exploit(void);
3392 +
3393 int die(const char *str, struct pt_regs *regs, long err)
3394 {
3395 static struct {
3396 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3397 if (panic_on_oops)
3398 panic("Fatal exception");
3399
3400 + gr_handle_kernel_exploit();
3401 +
3402 oops_exit();
3403 do_exit(err);
3404
3405 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3406 index 137dc22..fe57a79 100644
3407 --- a/arch/powerpc/kernel/vdso.c
3408 +++ b/arch/powerpc/kernel/vdso.c
3409 @@ -36,6 +36,7 @@
3410 #include <asm/firmware.h>
3411 #include <asm/vdso.h>
3412 #include <asm/vdso_datapage.h>
3413 +#include <asm/mman.h>
3414
3415 #include "setup.h"
3416
3417 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3418 vdso_base = VDSO32_MBASE;
3419 #endif
3420
3421 - current->mm->context.vdso_base = 0;
3422 + current->mm->context.vdso_base = ~0UL;
3423
3424 /* vDSO has a problem and was disabled, just don't "enable" it for the
3425 * process
3426 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3427 vdso_base = get_unmapped_area(NULL, vdso_base,
3428 (vdso_pages << PAGE_SHIFT) +
3429 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3430 - 0, 0);
3431 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3432 if (IS_ERR_VALUE(vdso_base)) {
3433 rc = vdso_base;
3434 goto fail_mmapsem;
3435 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3436 index 77f6421..829564a 100644
3437 --- a/arch/powerpc/kernel/vio.c
3438 +++ b/arch/powerpc/kernel/vio.c
3439 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3440 vio_cmo_dealloc(viodev, alloc_size);
3441 }
3442
3443 -struct dma_map_ops vio_dma_mapping_ops = {
3444 +static const struct dma_map_ops vio_dma_mapping_ops = {
3445 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3446 .free_coherent = vio_dma_iommu_free_coherent,
3447 .map_sg = vio_dma_iommu_map_sg,
3448 .unmap_sg = vio_dma_iommu_unmap_sg,
3449 + .dma_supported = dma_iommu_dma_supported,
3450 .map_page = vio_dma_iommu_map_page,
3451 .unmap_page = vio_dma_iommu_unmap_page,
3452
3453 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3454
3455 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3456 {
3457 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3458 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3459 }
3460
3461 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3462 index 5eea6f3..5d10396 100644
3463 --- a/arch/powerpc/lib/usercopy_64.c
3464 +++ b/arch/powerpc/lib/usercopy_64.c
3465 @@ -9,22 +9,6 @@
3466 #include <linux/module.h>
3467 #include <asm/uaccess.h>
3468
3469 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3470 -{
3471 - if (likely(access_ok(VERIFY_READ, from, n)))
3472 - n = __copy_from_user(to, from, n);
3473 - else
3474 - memset(to, 0, n);
3475 - return n;
3476 -}
3477 -
3478 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3479 -{
3480 - if (likely(access_ok(VERIFY_WRITE, to, n)))
3481 - n = __copy_to_user(to, from, n);
3482 - return n;
3483 -}
3484 -
3485 unsigned long copy_in_user(void __user *to, const void __user *from,
3486 unsigned long n)
3487 {
3488 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3489 return n;
3490 }
3491
3492 -EXPORT_SYMBOL(copy_from_user);
3493 -EXPORT_SYMBOL(copy_to_user);
3494 EXPORT_SYMBOL(copy_in_user);
3495
3496 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3497 index e7dae82..877ce0d 100644
3498 --- a/arch/powerpc/mm/fault.c
3499 +++ b/arch/powerpc/mm/fault.c
3500 @@ -30,6 +30,10 @@
3501 #include <linux/kprobes.h>
3502 #include <linux/kdebug.h>
3503 #include <linux/perf_event.h>
3504 +#include <linux/slab.h>
3505 +#include <linux/pagemap.h>
3506 +#include <linux/compiler.h>
3507 +#include <linux/unistd.h>
3508
3509 #include <asm/firmware.h>
3510 #include <asm/page.h>
3511 @@ -40,6 +44,7 @@
3512 #include <asm/uaccess.h>
3513 #include <asm/tlbflush.h>
3514 #include <asm/siginfo.h>
3515 +#include <asm/ptrace.h>
3516
3517
3518 #ifdef CONFIG_KPROBES
3519 @@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3520 }
3521 #endif
3522
3523 +#ifdef CONFIG_PAX_PAGEEXEC
3524 +/*
3525 + * PaX: decide what to do with offenders (regs->nip = fault address)
3526 + *
3527 + * returns 1 when task should be killed
3528 + */
3529 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3530 +{
3531 + return 1;
3532 +}
3533 +
3534 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3535 +{
3536 + unsigned long i;
3537 +
3538 + printk(KERN_ERR "PAX: bytes at PC: ");
3539 + for (i = 0; i < 5; i++) {
3540 + unsigned int c;
3541 + if (get_user(c, (unsigned int __user *)pc+i))
3542 + printk(KERN_CONT "???????? ");
3543 + else
3544 + printk(KERN_CONT "%08x ", c);
3545 + }
3546 + printk("\n");
3547 +}
3548 +#endif
3549 +
3550 /*
3551 * Check whether the instruction at regs->nip is a store using
3552 * an update addressing form which will update r1.
3553 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3554 * indicate errors in DSISR but can validly be set in SRR1.
3555 */
3556 if (trap == 0x400)
3557 - error_code &= 0x48200000;
3558 + error_code &= 0x58200000;
3559 else
3560 is_write = error_code & DSISR_ISSTORE;
3561 #else
3562 @@ -250,7 +282,7 @@ good_area:
3563 * "undefined". Of those that can be set, this is the only
3564 * one which seems bad.
3565 */
3566 - if (error_code & 0x10000000)
3567 + if (error_code & DSISR_GUARDED)
3568 /* Guarded storage error. */
3569 goto bad_area;
3570 #endif /* CONFIG_8xx */
3571 @@ -265,7 +297,7 @@ good_area:
3572 * processors use the same I/D cache coherency mechanism
3573 * as embedded.
3574 */
3575 - if (error_code & DSISR_PROTFAULT)
3576 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3577 goto bad_area;
3578 #endif /* CONFIG_PPC_STD_MMU */
3579
3580 @@ -335,6 +367,23 @@ bad_area:
3581 bad_area_nosemaphore:
3582 /* User mode accesses cause a SIGSEGV */
3583 if (user_mode(regs)) {
3584 +
3585 +#ifdef CONFIG_PAX_PAGEEXEC
3586 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3587 +#ifdef CONFIG_PPC_STD_MMU
3588 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3589 +#else
3590 + if (is_exec && regs->nip == address) {
3591 +#endif
3592 + switch (pax_handle_fetch_fault(regs)) {
3593 + }
3594 +
3595 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3596 + do_group_exit(SIGKILL);
3597 + }
3598 + }
3599 +#endif
3600 +
3601 _exception(SIGSEGV, regs, code, address);
3602 return 0;
3603 }
3604 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3605 index 5973631..ad617af 100644
3606 --- a/arch/powerpc/mm/mem.c
3607 +++ b/arch/powerpc/mm/mem.c
3608 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3609 {
3610 unsigned long lmb_next_region_start_pfn,
3611 lmb_region_max_pfn;
3612 - int i;
3613 + unsigned int i;
3614
3615 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3616 lmb_region_max_pfn =
3617 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3618 index 0d957a4..26d968f 100644
3619 --- a/arch/powerpc/mm/mmap_64.c
3620 +++ b/arch/powerpc/mm/mmap_64.c
3621 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3622 */
3623 if (mmap_is_legacy()) {
3624 mm->mmap_base = TASK_UNMAPPED_BASE;
3625 +
3626 +#ifdef CONFIG_PAX_RANDMMAP
3627 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3628 + mm->mmap_base += mm->delta_mmap;
3629 +#endif
3630 +
3631 mm->get_unmapped_area = arch_get_unmapped_area;
3632 mm->unmap_area = arch_unmap_area;
3633 } else {
3634 mm->mmap_base = mmap_base();
3635 +
3636 +#ifdef CONFIG_PAX_RANDMMAP
3637 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3638 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3639 +#endif
3640 +
3641 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3642 mm->unmap_area = arch_unmap_area_topdown;
3643 }
3644 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3645 index ba51948..23009d9 100644
3646 --- a/arch/powerpc/mm/slice.c
3647 +++ b/arch/powerpc/mm/slice.c
3648 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3649 if ((mm->task_size - len) < addr)
3650 return 0;
3651 vma = find_vma(mm, addr);
3652 - return (!vma || (addr + len) <= vma->vm_start);
3653 + return check_heap_stack_gap(vma, addr, len);
3654 }
3655
3656 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3657 @@ -256,7 +256,7 @@ full_search:
3658 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3659 continue;
3660 }
3661 - if (!vma || addr + len <= vma->vm_start) {
3662 + if (check_heap_stack_gap(vma, addr, len)) {
3663 /*
3664 * Remember the place where we stopped the search:
3665 */
3666 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3667 }
3668 }
3669
3670 - addr = mm->mmap_base;
3671 - while (addr > len) {
3672 + if (mm->mmap_base < len)
3673 + addr = -ENOMEM;
3674 + else
3675 + addr = mm->mmap_base - len;
3676 +
3677 + while (!IS_ERR_VALUE(addr)) {
3678 /* Go down by chunk size */
3679 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3680 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3681
3682 /* Check for hit with different page size */
3683 mask = slice_range_to_mask(addr, len);
3684 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3685 * return with success:
3686 */
3687 vma = find_vma(mm, addr);
3688 - if (!vma || (addr + len) <= vma->vm_start) {
3689 + if (check_heap_stack_gap(vma, addr, len)) {
3690 /* remember the address as a hint for next time */
3691 if (use_cache)
3692 mm->free_area_cache = addr;
3693 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3694 mm->cached_hole_size = vma->vm_start - addr;
3695
3696 /* try just below the current vma->vm_start */
3697 - addr = vma->vm_start;
3698 + addr = skip_heap_stack_gap(vma, len);
3699 }
3700
3701 /*
3702 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3703 if (fixed && addr > (mm->task_size - len))
3704 return -EINVAL;
3705
3706 +#ifdef CONFIG_PAX_RANDMMAP
3707 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3708 + addr = 0;
3709 +#endif
3710 +
3711 /* If hint, make sure it matches our alignment restrictions */
3712 if (!fixed && addr) {
3713 addr = _ALIGN_UP(addr, 1ul << pshift);
3714 diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3715 index b5c753d..8f01abe 100644
3716 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3717 +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3718 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3719 lite5200_pm_target_state = PM_SUSPEND_ON;
3720 }
3721
3722 -static struct platform_suspend_ops lite5200_pm_ops = {
3723 +static const struct platform_suspend_ops lite5200_pm_ops = {
3724 .valid = lite5200_pm_valid,
3725 .begin = lite5200_pm_begin,
3726 .prepare = lite5200_pm_prepare,
3727 diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3728 index a55b0b6..478c18e 100644
3729 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3730 +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3731 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3732 iounmap(mbar);
3733 }
3734
3735 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3736 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3737 .valid = mpc52xx_pm_valid,
3738 .prepare = mpc52xx_pm_prepare,
3739 .enter = mpc52xx_pm_enter,
3740 diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3741 index 08e65fc..643d3ac 100644
3742 --- a/arch/powerpc/platforms/83xx/suspend.c
3743 +++ b/arch/powerpc/platforms/83xx/suspend.c
3744 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3745 return ret;
3746 }
3747
3748 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3749 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3750 .valid = mpc83xx_suspend_valid,
3751 .begin = mpc83xx_suspend_begin,
3752 .enter = mpc83xx_suspend_enter,
3753 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3754 index ca5bfdf..1602e09 100644
3755 --- a/arch/powerpc/platforms/cell/iommu.c
3756 +++ b/arch/powerpc/platforms/cell/iommu.c
3757 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3758
3759 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3760
3761 -struct dma_map_ops dma_iommu_fixed_ops = {
3762 +const struct dma_map_ops dma_iommu_fixed_ops = {
3763 .alloc_coherent = dma_fixed_alloc_coherent,
3764 .free_coherent = dma_fixed_free_coherent,
3765 .map_sg = dma_fixed_map_sg,
3766 diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3767 index e34b305..20e48ec 100644
3768 --- a/arch/powerpc/platforms/ps3/system-bus.c
3769 +++ b/arch/powerpc/platforms/ps3/system-bus.c
3770 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3771 return mask >= DMA_BIT_MASK(32);
3772 }
3773
3774 -static struct dma_map_ops ps3_sb_dma_ops = {
3775 +static const struct dma_map_ops ps3_sb_dma_ops = {
3776 .alloc_coherent = ps3_alloc_coherent,
3777 .free_coherent = ps3_free_coherent,
3778 .map_sg = ps3_sb_map_sg,
3779 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3780 .unmap_page = ps3_unmap_page,
3781 };
3782
3783 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3784 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3785 .alloc_coherent = ps3_alloc_coherent,
3786 .free_coherent = ps3_free_coherent,
3787 .map_sg = ps3_ioc0_map_sg,
3788 diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3789 index f0e6f28..60d53ed 100644
3790 --- a/arch/powerpc/platforms/pseries/Kconfig
3791 +++ b/arch/powerpc/platforms/pseries/Kconfig
3792 @@ -2,6 +2,8 @@ config PPC_PSERIES
3793 depends on PPC64 && PPC_BOOK3S
3794 bool "IBM pSeries & new (POWER5-based) iSeries"
3795 select MPIC
3796 + select PCI_MSI
3797 + select XICS
3798 select PPC_I8259
3799 select PPC_RTAS
3800 select RTAS_ERROR_LOGGING
3801 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3802 index 43c0aca..42c045b 100644
3803 --- a/arch/s390/Kconfig
3804 +++ b/arch/s390/Kconfig
3805 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3806
3807 config S390_SWITCH_AMODE
3808 bool "Switch kernel/user addressing modes"
3809 + default y
3810 help
3811 This option allows to switch the addressing modes of kernel and user
3812 - space. The kernel parameter switch_amode=on will enable this feature,
3813 - default is disabled. Enabling this (via kernel parameter) on machines
3814 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3815 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3816 + will reduce system performance.
3817
3818 Note that this option will also be selected by selecting the execute
3819 - protection option below. Enabling the execute protection via the
3820 - noexec kernel parameter will also switch the addressing modes,
3821 - independent of the switch_amode kernel parameter.
3822 + protection option below. Enabling the execute protection will also
3823 + switch the addressing modes, independent of this option.
3824
3825
3826 config S390_EXEC_PROTECT
3827 bool "Data execute protection"
3828 + default y
3829 select S390_SWITCH_AMODE
3830 help
3831 This option allows to enable a buffer overflow protection for user
3832 space programs and it also selects the addressing mode option above.
3833 - The kernel parameter noexec=on will enable this feature and also
3834 - switch the addressing modes, default is disabled. Enabling this (via
3835 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3836 - will reduce system performance.
3837 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3838 + reduce system performance.
3839
3840 comment "Code generation options"
3841
3842 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3843 index e885442..5e6c303 100644
3844 --- a/arch/s390/include/asm/elf.h
3845 +++ b/arch/s390/include/asm/elf.h
3846 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3847 that it will "exec", and that there is sufficient room for the brk. */
3848 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3849
3850 +#ifdef CONFIG_PAX_ASLR
3851 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3852 +
3853 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3854 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3855 +#endif
3856 +
3857 /* This yields a mask that user programs can use to figure out what
3858 instruction set this CPU supports. */
3859
3860 diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
3861 index e37478e..9ce0e9f 100644
3862 --- a/arch/s390/include/asm/setup.h
3863 +++ b/arch/s390/include/asm/setup.h
3864 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3865 void detect_memory_layout(struct mem_chunk chunk[]);
3866
3867 #ifdef CONFIG_S390_SWITCH_AMODE
3868 -extern unsigned int switch_amode;
3869 +#define switch_amode (1)
3870 #else
3871 #define switch_amode (0)
3872 #endif
3873
3874 #ifdef CONFIG_S390_EXEC_PROTECT
3875 -extern unsigned int s390_noexec;
3876 +#define s390_noexec (1)
3877 #else
3878 #define s390_noexec (0)
3879 #endif
3880 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3881 index 8377e91..e28e6f1 100644
3882 --- a/arch/s390/include/asm/uaccess.h
3883 +++ b/arch/s390/include/asm/uaccess.h
3884 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3885 copy_to_user(void __user *to, const void *from, unsigned long n)
3886 {
3887 might_fault();
3888 +
3889 + if ((long)n < 0)
3890 + return n;
3891 +
3892 if (access_ok(VERIFY_WRITE, to, n))
3893 n = __copy_to_user(to, from, n);
3894 return n;
3895 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3896 static inline unsigned long __must_check
3897 __copy_from_user(void *to, const void __user *from, unsigned long n)
3898 {
3899 + if ((long)n < 0)
3900 + return n;
3901 +
3902 if (__builtin_constant_p(n) && (n <= 256))
3903 return uaccess.copy_from_user_small(n, from, to);
3904 else
3905 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3906 copy_from_user(void *to, const void __user *from, unsigned long n)
3907 {
3908 might_fault();
3909 +
3910 + if ((long)n < 0)
3911 + return n;
3912 +
3913 if (access_ok(VERIFY_READ, from, n))
3914 n = __copy_from_user(to, from, n);
3915 else
3916 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3917 index 639380a..72e3c02 100644
3918 --- a/arch/s390/kernel/module.c
3919 +++ b/arch/s390/kernel/module.c
3920 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3921
3922 /* Increase core size by size of got & plt and set start
3923 offsets for got and plt. */
3924 - me->core_size = ALIGN(me->core_size, 4);
3925 - me->arch.got_offset = me->core_size;
3926 - me->core_size += me->arch.got_size;
3927 - me->arch.plt_offset = me->core_size;
3928 - me->core_size += me->arch.plt_size;
3929 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3930 + me->arch.got_offset = me->core_size_rw;
3931 + me->core_size_rw += me->arch.got_size;
3932 + me->arch.plt_offset = me->core_size_rx;
3933 + me->core_size_rx += me->arch.plt_size;
3934 return 0;
3935 }
3936
3937 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3938 if (info->got_initialized == 0) {
3939 Elf_Addr *gotent;
3940
3941 - gotent = me->module_core + me->arch.got_offset +
3942 + gotent = me->module_core_rw + me->arch.got_offset +
3943 info->got_offset;
3944 *gotent = val;
3945 info->got_initialized = 1;
3946 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3947 else if (r_type == R_390_GOTENT ||
3948 r_type == R_390_GOTPLTENT)
3949 *(unsigned int *) loc =
3950 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3951 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3952 else if (r_type == R_390_GOT64 ||
3953 r_type == R_390_GOTPLT64)
3954 *(unsigned long *) loc = val;
3955 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3956 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3957 if (info->plt_initialized == 0) {
3958 unsigned int *ip;
3959 - ip = me->module_core + me->arch.plt_offset +
3960 + ip = me->module_core_rx + me->arch.plt_offset +
3961 info->plt_offset;
3962 #ifndef CONFIG_64BIT
3963 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3964 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3965 val - loc + 0xffffUL < 0x1ffffeUL) ||
3966 (r_type == R_390_PLT32DBL &&
3967 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3968 - val = (Elf_Addr) me->module_core +
3969 + val = (Elf_Addr) me->module_core_rx +
3970 me->arch.plt_offset +
3971 info->plt_offset;
3972 val += rela->r_addend - loc;
3973 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3974 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3975 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3976 val = val + rela->r_addend -
3977 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3978 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3979 if (r_type == R_390_GOTOFF16)
3980 *(unsigned short *) loc = val;
3981 else if (r_type == R_390_GOTOFF32)
3982 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3983 break;
3984 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3985 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3986 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3987 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3988 rela->r_addend - loc;
3989 if (r_type == R_390_GOTPC)
3990 *(unsigned int *) loc = val;
3991 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3992 index 061479f..dbfb08c 100644
3993 --- a/arch/s390/kernel/setup.c
3994 +++ b/arch/s390/kernel/setup.c
3995 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
3996 early_param("mem", early_parse_mem);
3997
3998 #ifdef CONFIG_S390_SWITCH_AMODE
3999 -unsigned int switch_amode = 0;
4000 -EXPORT_SYMBOL_GPL(switch_amode);
4001 -
4002 static int set_amode_and_uaccess(unsigned long user_amode,
4003 unsigned long user32_amode)
4004 {
4005 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4006 return 0;
4007 }
4008 }
4009 -
4010 -/*
4011 - * Switch kernel/user addressing modes?
4012 - */
4013 -static int __init early_parse_switch_amode(char *p)
4014 -{
4015 - switch_amode = 1;
4016 - return 0;
4017 -}
4018 -early_param("switch_amode", early_parse_switch_amode);
4019 -
4020 #else /* CONFIG_S390_SWITCH_AMODE */
4021 static inline int set_amode_and_uaccess(unsigned long user_amode,
4022 unsigned long user32_amode)
4023 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4024 }
4025 #endif /* CONFIG_S390_SWITCH_AMODE */
4026
4027 -#ifdef CONFIG_S390_EXEC_PROTECT
4028 -unsigned int s390_noexec = 0;
4029 -EXPORT_SYMBOL_GPL(s390_noexec);
4030 -
4031 -/*
4032 - * Enable execute protection?
4033 - */
4034 -static int __init early_parse_noexec(char *p)
4035 -{
4036 - if (!strncmp(p, "off", 3))
4037 - return 0;
4038 - switch_amode = 1;
4039 - s390_noexec = 1;
4040 - return 0;
4041 -}
4042 -early_param("noexec", early_parse_noexec);
4043 -#endif /* CONFIG_S390_EXEC_PROTECT */
4044 -
4045 static void setup_addressing_mode(void)
4046 {
4047 if (s390_noexec) {
4048 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4049 index f4558cc..e461f37 100644
4050 --- a/arch/s390/mm/mmap.c
4051 +++ b/arch/s390/mm/mmap.c
4052 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 */
4054 if (mmap_is_legacy()) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE;
4056 +
4057 +#ifdef CONFIG_PAX_RANDMMAP
4058 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4059 + mm->mmap_base += mm->delta_mmap;
4060 +#endif
4061 +
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065 mm->mmap_base = mmap_base();
4066 +
4067 +#ifdef CONFIG_PAX_RANDMMAP
4068 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4069 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4070 +#endif
4071 +
4072 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4073 mm->unmap_area = arch_unmap_area_topdown;
4074 }
4075 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4076 */
4077 if (mmap_is_legacy()) {
4078 mm->mmap_base = TASK_UNMAPPED_BASE;
4079 +
4080 +#ifdef CONFIG_PAX_RANDMMAP
4081 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4082 + mm->mmap_base += mm->delta_mmap;
4083 +#endif
4084 +
4085 mm->get_unmapped_area = s390_get_unmapped_area;
4086 mm->unmap_area = arch_unmap_area;
4087 } else {
4088 mm->mmap_base = mmap_base();
4089 +
4090 +#ifdef CONFIG_PAX_RANDMMAP
4091 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4092 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4093 +#endif
4094 +
4095 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4096 mm->unmap_area = arch_unmap_area_topdown;
4097 }
4098 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4099 index 589d5c7..669e274 100644
4100 --- a/arch/score/include/asm/system.h
4101 +++ b/arch/score/include/asm/system.h
4102 @@ -17,7 +17,7 @@ do { \
4103 #define finish_arch_switch(prev) do {} while (0)
4104
4105 typedef void (*vi_handler_t)(void);
4106 -extern unsigned long arch_align_stack(unsigned long sp);
4107 +#define arch_align_stack(x) (x)
4108
4109 #define mb() barrier()
4110 #define rmb() barrier()
4111 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4112 index 25d0803..d6c8e36 100644
4113 --- a/arch/score/kernel/process.c
4114 +++ b/arch/score/kernel/process.c
4115 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4116
4117 return task_pt_regs(task)->cp0_epc;
4118 }
4119 -
4120 -unsigned long arch_align_stack(unsigned long sp)
4121 -{
4122 - return sp;
4123 -}
4124 diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4125 index d936c1a..304a252 100644
4126 --- a/arch/sh/boards/mach-hp6xx/pm.c
4127 +++ b/arch/sh/boards/mach-hp6xx/pm.c
4128 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4129 return 0;
4130 }
4131
4132 -static struct platform_suspend_ops hp6x0_pm_ops = {
4133 +static const struct platform_suspend_ops hp6x0_pm_ops = {
4134 .enter = hp6x0_pm_enter,
4135 .valid = suspend_valid_only_mem,
4136 };
4137 diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4138 index 8a8a993..7b3079b 100644
4139 --- a/arch/sh/kernel/cpu/sh4/sq.c
4140 +++ b/arch/sh/kernel/cpu/sh4/sq.c
4141 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4142 NULL,
4143 };
4144
4145 -static struct sysfs_ops sq_sysfs_ops = {
4146 +static const struct sysfs_ops sq_sysfs_ops = {
4147 .show = sq_sysfs_show,
4148 .store = sq_sysfs_store,
4149 };
4150 diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4151 index ee3c2aa..c49cee6 100644
4152 --- a/arch/sh/kernel/cpu/shmobile/pm.c
4153 +++ b/arch/sh/kernel/cpu/shmobile/pm.c
4154 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4155 return 0;
4156 }
4157
4158 -static struct platform_suspend_ops sh_pm_ops = {
4159 +static const struct platform_suspend_ops sh_pm_ops = {
4160 .enter = sh_pm_enter,
4161 .valid = suspend_valid_only_mem,
4162 };
4163 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4164 index 3e532d0..9faa306 100644
4165 --- a/arch/sh/kernel/kgdb.c
4166 +++ b/arch/sh/kernel/kgdb.c
4167 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4168 {
4169 }
4170
4171 -struct kgdb_arch arch_kgdb_ops = {
4172 +const struct kgdb_arch arch_kgdb_ops = {
4173 /* Breakpoint instruction: trapa #0x3c */
4174 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4175 .gdb_bpt_instr = { 0x3c, 0xc3 },
4176 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4177 index afeb710..d1d1289 100644
4178 --- a/arch/sh/mm/mmap.c
4179 +++ b/arch/sh/mm/mmap.c
4180 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4181 addr = PAGE_ALIGN(addr);
4182
4183 vma = find_vma(mm, addr);
4184 - if (TASK_SIZE - len >= addr &&
4185 - (!vma || addr + len <= vma->vm_start))
4186 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4187 return addr;
4188 }
4189
4190 @@ -106,7 +105,7 @@ full_search:
4191 }
4192 return -ENOMEM;
4193 }
4194 - if (likely(!vma || addr + len <= vma->vm_start)) {
4195 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4196 /*
4197 * Remember the place where we stopped the search:
4198 */
4199 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4200 addr = PAGE_ALIGN(addr);
4201
4202 vma = find_vma(mm, addr);
4203 - if (TASK_SIZE - len >= addr &&
4204 - (!vma || addr + len <= vma->vm_start))
4205 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4206 return addr;
4207 }
4208
4209 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4210 /* make sure it can fit in the remaining address space */
4211 if (likely(addr > len)) {
4212 vma = find_vma(mm, addr-len);
4213 - if (!vma || addr <= vma->vm_start) {
4214 + if (check_heap_stack_gap(vma, addr - len, len)) {
4215 /* remember the address as a hint for next time */
4216 return (mm->free_area_cache = addr-len);
4217 }
4218 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4219 if (unlikely(mm->mmap_base < len))
4220 goto bottomup;
4221
4222 - addr = mm->mmap_base-len;
4223 - if (do_colour_align)
4224 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4225 + addr = mm->mmap_base - len;
4226
4227 do {
4228 + if (do_colour_align)
4229 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4230 /*
4231 * Lookup failure means no vma is above this address,
4232 * else if new region fits below vma->vm_start,
4233 * return with success:
4234 */
4235 vma = find_vma(mm, addr);
4236 - if (likely(!vma || addr+len <= vma->vm_start)) {
4237 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4238 /* remember the address as a hint for next time */
4239 return (mm->free_area_cache = addr);
4240 }
4241 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4242 mm->cached_hole_size = vma->vm_start - addr;
4243
4244 /* try just below the current vma->vm_start */
4245 - addr = vma->vm_start-len;
4246 - if (do_colour_align)
4247 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4248 - } while (likely(len < vma->vm_start));
4249 + addr = skip_heap_stack_gap(vma, len);
4250 + } while (!IS_ERR_VALUE(addr));
4251
4252 bottomup:
4253 /*
4254 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4255 index 113225b..7fd04e7 100644
4256 --- a/arch/sparc/Makefile
4257 +++ b/arch/sparc/Makefile
4258 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4259 # Export what is needed by arch/sparc/boot/Makefile
4260 export VMLINUX_INIT VMLINUX_MAIN
4261 VMLINUX_INIT := $(head-y) $(init-y)
4262 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4263 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4264 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4265 VMLINUX_MAIN += $(drivers-y) $(net-y)
4266
4267 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4268 index f5cc06f..f858d47 100644
4269 --- a/arch/sparc/include/asm/atomic_64.h
4270 +++ b/arch/sparc/include/asm/atomic_64.h
4271 @@ -14,18 +14,40 @@
4272 #define ATOMIC64_INIT(i) { (i) }
4273
4274 #define atomic_read(v) ((v)->counter)
4275 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4276 +{
4277 + return v->counter;
4278 +}
4279 #define atomic64_read(v) ((v)->counter)
4280 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4281 +{
4282 + return v->counter;
4283 +}
4284
4285 #define atomic_set(v, i) (((v)->counter) = i)
4286 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4287 +{
4288 + v->counter = i;
4289 +}
4290 #define atomic64_set(v, i) (((v)->counter) = i)
4291 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4292 +{
4293 + v->counter = i;
4294 +}
4295
4296 extern void atomic_add(int, atomic_t *);
4297 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4298 extern void atomic64_add(long, atomic64_t *);
4299 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4300 extern void atomic_sub(int, atomic_t *);
4301 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4302 extern void atomic64_sub(long, atomic64_t *);
4303 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4304
4305 extern int atomic_add_ret(int, atomic_t *);
4306 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4307 extern long atomic64_add_ret(long, atomic64_t *);
4308 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4309 extern int atomic_sub_ret(int, atomic_t *);
4310 extern long atomic64_sub_ret(long, atomic64_t *);
4311
4312 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4313 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4314
4315 #define atomic_inc_return(v) atomic_add_ret(1, v)
4316 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4317 +{
4318 + return atomic_add_ret_unchecked(1, v);
4319 +}
4320 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4321 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4322 +{
4323 + return atomic64_add_ret_unchecked(1, v);
4324 +}
4325
4326 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4327 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4328
4329 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4330 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4331 +{
4332 + return atomic_add_ret_unchecked(i, v);
4333 +}
4334 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4335 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4336 +{
4337 + return atomic64_add_ret_unchecked(i, v);
4338 +}
4339
4340 /*
4341 * atomic_inc_and_test - increment and test
4342 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4343 * other cases.
4344 */
4345 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4346 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4347 +{
4348 + return atomic_inc_return_unchecked(v) == 0;
4349 +}
4350 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4351
4352 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4353 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4354 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4355
4356 #define atomic_inc(v) atomic_add(1, v)
4357 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4358 +{
4359 + atomic_add_unchecked(1, v);
4360 +}
4361 #define atomic64_inc(v) atomic64_add(1, v)
4362 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4363 +{
4364 + atomic64_add_unchecked(1, v);
4365 +}
4366
4367 #define atomic_dec(v) atomic_sub(1, v)
4368 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4369 +{
4370 + atomic_sub_unchecked(1, v);
4371 +}
4372 #define atomic64_dec(v) atomic64_sub(1, v)
4373 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4374 +{
4375 + atomic64_sub_unchecked(1, v);
4376 +}
4377
4378 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4379 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4380
4381 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4382 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4383 +{
4384 + return cmpxchg(&v->counter, old, new);
4385 +}
4386 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4387 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4388 +{
4389 + return xchg(&v->counter, new);
4390 +}
4391
4392 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4393 {
4394 - int c, old;
4395 + int c, old, new;
4396 c = atomic_read(v);
4397 for (;;) {
4398 - if (unlikely(c == (u)))
4399 + if (unlikely(c == u))
4400 break;
4401 - old = atomic_cmpxchg((v), c, c + (a));
4402 +
4403 + asm volatile("addcc %2, %0, %0\n"
4404 +
4405 +#ifdef CONFIG_PAX_REFCOUNT
4406 + "tvs %%icc, 6\n"
4407 +#endif
4408 +
4409 + : "=r" (new)
4410 + : "0" (c), "ir" (a)
4411 + : "cc");
4412 +
4413 + old = atomic_cmpxchg(v, c, new);
4414 if (likely(old == c))
4415 break;
4416 c = old;
4417 }
4418 - return c != (u);
4419 + return c != u;
4420 }
4421
4422 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4423 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4424 #define atomic64_cmpxchg(v, o, n) \
4425 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4426 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4427 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4428 +{
4429 + return xchg(&v->counter, new);
4430 +}
4431
4432 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4433 {
4434 - long c, old;
4435 + long c, old, new;
4436 c = atomic64_read(v);
4437 for (;;) {
4438 - if (unlikely(c == (u)))
4439 + if (unlikely(c == u))
4440 break;
4441 - old = atomic64_cmpxchg((v), c, c + (a));
4442 +
4443 + asm volatile("addcc %2, %0, %0\n"
4444 +
4445 +#ifdef CONFIG_PAX_REFCOUNT
4446 + "tvs %%xcc, 6\n"
4447 +#endif
4448 +
4449 + : "=r" (new)
4450 + : "0" (c), "ir" (a)
4451 + : "cc");
4452 +
4453 + old = atomic64_cmpxchg(v, c, new);
4454 if (likely(old == c))
4455 break;
4456 c = old;
4457 }
4458 - return c != (u);
4459 + return c != u;
4460 }
4461
4462 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4463 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4464 index 41f85ae..fb54d5e 100644
4465 --- a/arch/sparc/include/asm/cache.h
4466 +++ b/arch/sparc/include/asm/cache.h
4467 @@ -8,7 +8,7 @@
4468 #define _SPARC_CACHE_H
4469
4470 #define L1_CACHE_SHIFT 5
4471 -#define L1_CACHE_BYTES 32
4472 +#define L1_CACHE_BYTES 32UL
4473 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4474
4475 #ifdef CONFIG_SPARC32
4476 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4477 index 5a8c308..38def92 100644
4478 --- a/arch/sparc/include/asm/dma-mapping.h
4479 +++ b/arch/sparc/include/asm/dma-mapping.h
4480 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4481 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4482 #define dma_is_consistent(d, h) (1)
4483
4484 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4485 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4486 extern struct bus_type pci_bus_type;
4487
4488 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4489 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4490 {
4491 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4492 if (dev->bus == &pci_bus_type)
4493 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4494 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4495 dma_addr_t *dma_handle, gfp_t flag)
4496 {
4497 - struct dma_map_ops *ops = get_dma_ops(dev);
4498 + const struct dma_map_ops *ops = get_dma_ops(dev);
4499 void *cpu_addr;
4500
4501 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4502 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4503 static inline void dma_free_coherent(struct device *dev, size_t size,
4504 void *cpu_addr, dma_addr_t dma_handle)
4505 {
4506 - struct dma_map_ops *ops = get_dma_ops(dev);
4507 + const struct dma_map_ops *ops = get_dma_ops(dev);
4508
4509 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4510 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4511 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4512 index 381a1b5..b97e3ff 100644
4513 --- a/arch/sparc/include/asm/elf_32.h
4514 +++ b/arch/sparc/include/asm/elf_32.h
4515 @@ -116,6 +116,13 @@ typedef struct {
4516
4517 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4518
4519 +#ifdef CONFIG_PAX_ASLR
4520 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4521 +
4522 +#define PAX_DELTA_MMAP_LEN 16
4523 +#define PAX_DELTA_STACK_LEN 16
4524 +#endif
4525 +
4526 /* This yields a mask that user programs can use to figure out what
4527 instruction set this cpu supports. This can NOT be done in userspace
4528 on Sparc. */
4529 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4530 index 9968085..c2106ef 100644
4531 --- a/arch/sparc/include/asm/elf_64.h
4532 +++ b/arch/sparc/include/asm/elf_64.h
4533 @@ -163,6 +163,12 @@ typedef struct {
4534 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4535 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4536
4537 +#ifdef CONFIG_PAX_ASLR
4538 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4539 +
4540 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4541 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4542 +#endif
4543
4544 /* This yields a mask that user programs can use to figure out what
4545 instruction set this cpu supports. */
4546 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4547 index e0cabe7..efd60f1 100644
4548 --- a/arch/sparc/include/asm/pgtable_32.h
4549 +++ b/arch/sparc/include/asm/pgtable_32.h
4550 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4551 BTFIXUPDEF_INT(page_none)
4552 BTFIXUPDEF_INT(page_copy)
4553 BTFIXUPDEF_INT(page_readonly)
4554 +
4555 +#ifdef CONFIG_PAX_PAGEEXEC
4556 +BTFIXUPDEF_INT(page_shared_noexec)
4557 +BTFIXUPDEF_INT(page_copy_noexec)
4558 +BTFIXUPDEF_INT(page_readonly_noexec)
4559 +#endif
4560 +
4561 BTFIXUPDEF_INT(page_kernel)
4562
4563 #define PMD_SHIFT SUN4C_PMD_SHIFT
4564 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4565 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4566 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4567
4568 +#ifdef CONFIG_PAX_PAGEEXEC
4569 +extern pgprot_t PAGE_SHARED_NOEXEC;
4570 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4571 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4572 +#else
4573 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4574 +# define PAGE_COPY_NOEXEC PAGE_COPY
4575 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4576 +#endif
4577 +
4578 extern unsigned long page_kernel;
4579
4580 #ifdef MODULE
4581 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4582 index 1407c07..7e10231 100644
4583 --- a/arch/sparc/include/asm/pgtsrmmu.h
4584 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4585 @@ -115,6 +115,13 @@
4586 SRMMU_EXEC | SRMMU_REF)
4587 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4588 SRMMU_EXEC | SRMMU_REF)
4589 +
4590 +#ifdef CONFIG_PAX_PAGEEXEC
4591 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4592 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4593 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4594 +#endif
4595 +
4596 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4597 SRMMU_DIRTY | SRMMU_REF)
4598
4599 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4600 index 43e5147..47622a1 100644
4601 --- a/arch/sparc/include/asm/spinlock_64.h
4602 +++ b/arch/sparc/include/asm/spinlock_64.h
4603 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4604
4605 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4606
4607 -static void inline arch_read_lock(raw_rwlock_t *lock)
4608 +static inline void arch_read_lock(raw_rwlock_t *lock)
4609 {
4610 unsigned long tmp1, tmp2;
4611
4612 __asm__ __volatile__ (
4613 "1: ldsw [%2], %0\n"
4614 " brlz,pn %0, 2f\n"
4615 -"4: add %0, 1, %1\n"
4616 +"4: addcc %0, 1, %1\n"
4617 +
4618 +#ifdef CONFIG_PAX_REFCOUNT
4619 +" tvs %%icc, 6\n"
4620 +#endif
4621 +
4622 " cas [%2], %0, %1\n"
4623 " cmp %0, %1\n"
4624 " bne,pn %%icc, 1b\n"
4625 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4626 " .previous"
4627 : "=&r" (tmp1), "=&r" (tmp2)
4628 : "r" (lock)
4629 - : "memory");
4630 + : "memory", "cc");
4631 }
4632
4633 -static int inline arch_read_trylock(raw_rwlock_t *lock)
4634 +static inline int arch_read_trylock(raw_rwlock_t *lock)
4635 {
4636 int tmp1, tmp2;
4637
4638 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4639 "1: ldsw [%2], %0\n"
4640 " brlz,a,pn %0, 2f\n"
4641 " mov 0, %0\n"
4642 -" add %0, 1, %1\n"
4643 +" addcc %0, 1, %1\n"
4644 +
4645 +#ifdef CONFIG_PAX_REFCOUNT
4646 +" tvs %%icc, 6\n"
4647 +#endif
4648 +
4649 " cas [%2], %0, %1\n"
4650 " cmp %0, %1\n"
4651 " bne,pn %%icc, 1b\n"
4652 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4653 return tmp1;
4654 }
4655
4656 -static void inline arch_read_unlock(raw_rwlock_t *lock)
4657 +static inline void arch_read_unlock(raw_rwlock_t *lock)
4658 {
4659 unsigned long tmp1, tmp2;
4660
4661 __asm__ __volatile__(
4662 "1: lduw [%2], %0\n"
4663 -" sub %0, 1, %1\n"
4664 +" subcc %0, 1, %1\n"
4665 +
4666 +#ifdef CONFIG_PAX_REFCOUNT
4667 +" tvs %%icc, 6\n"
4668 +#endif
4669 +
4670 " cas [%2], %0, %1\n"
4671 " cmp %0, %1\n"
4672 " bne,pn %%xcc, 1b\n"
4673 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4674 : "memory");
4675 }
4676
4677 -static void inline arch_write_lock(raw_rwlock_t *lock)
4678 +static inline void arch_write_lock(raw_rwlock_t *lock)
4679 {
4680 unsigned long mask, tmp1, tmp2;
4681
4682 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4683 : "memory");
4684 }
4685
4686 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4687 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4688 {
4689 __asm__ __volatile__(
4690 " stw %%g0, [%0]"
4691 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4692 : "memory");
4693 }
4694
4695 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4696 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4697 {
4698 unsigned long mask, tmp1, tmp2, result;
4699
4700 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4701 index 844d73a..f787fb9 100644
4702 --- a/arch/sparc/include/asm/thread_info_32.h
4703 +++ b/arch/sparc/include/asm/thread_info_32.h
4704 @@ -50,6 +50,8 @@ struct thread_info {
4705 unsigned long w_saved;
4706
4707 struct restart_block restart_block;
4708 +
4709 + unsigned long lowest_stack;
4710 };
4711
4712 /*
4713 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4714 index f78ad9a..9f55fc7 100644
4715 --- a/arch/sparc/include/asm/thread_info_64.h
4716 +++ b/arch/sparc/include/asm/thread_info_64.h
4717 @@ -68,6 +68,8 @@ struct thread_info {
4718 struct pt_regs *kern_una_regs;
4719 unsigned int kern_una_insn;
4720
4721 + unsigned long lowest_stack;
4722 +
4723 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4724 };
4725
4726 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4727 index e88fbe5..96b0ce5 100644
4728 --- a/arch/sparc/include/asm/uaccess.h
4729 +++ b/arch/sparc/include/asm/uaccess.h
4730 @@ -1,5 +1,13 @@
4731 #ifndef ___ASM_SPARC_UACCESS_H
4732 #define ___ASM_SPARC_UACCESS_H
4733 +
4734 +#ifdef __KERNEL__
4735 +#ifndef __ASSEMBLY__
4736 +#include <linux/types.h>
4737 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4738 +#endif
4739 +#endif
4740 +
4741 #if defined(__sparc__) && defined(__arch64__)
4742 #include <asm/uaccess_64.h>
4743 #else
4744 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4745 index 8303ac4..07f333d 100644
4746 --- a/arch/sparc/include/asm/uaccess_32.h
4747 +++ b/arch/sparc/include/asm/uaccess_32.h
4748 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4749
4750 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4751 {
4752 - if (n && __access_ok((unsigned long) to, n))
4753 + if ((long)n < 0)
4754 + return n;
4755 +
4756 + if (n && __access_ok((unsigned long) to, n)) {
4757 + if (!__builtin_constant_p(n))
4758 + check_object_size(from, n, true);
4759 return __copy_user(to, (__force void __user *) from, n);
4760 - else
4761 + } else
4762 return n;
4763 }
4764
4765 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4766 {
4767 + if ((long)n < 0)
4768 + return n;
4769 +
4770 + if (!__builtin_constant_p(n))
4771 + check_object_size(from, n, true);
4772 +
4773 return __copy_user(to, (__force void __user *) from, n);
4774 }
4775
4776 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4777 {
4778 - if (n && __access_ok((unsigned long) from, n))
4779 + if ((long)n < 0)
4780 + return n;
4781 +
4782 + if (n && __access_ok((unsigned long) from, n)) {
4783 + if (!__builtin_constant_p(n))
4784 + check_object_size(to, n, false);
4785 return __copy_user((__force void __user *) to, from, n);
4786 - else
4787 + } else
4788 return n;
4789 }
4790
4791 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4792 {
4793 + if ((long)n < 0)
4794 + return n;
4795 +
4796 return __copy_user((__force void __user *) to, from, n);
4797 }
4798
4799 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4800 index 9ea271e..7b8a271 100644
4801 --- a/arch/sparc/include/asm/uaccess_64.h
4802 +++ b/arch/sparc/include/asm/uaccess_64.h
4803 @@ -9,6 +9,7 @@
4804 #include <linux/compiler.h>
4805 #include <linux/string.h>
4806 #include <linux/thread_info.h>
4807 +#include <linux/kernel.h>
4808 #include <asm/asi.h>
4809 #include <asm/system.h>
4810 #include <asm/spitfire.h>
4811 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4812 static inline unsigned long __must_check
4813 copy_from_user(void *to, const void __user *from, unsigned long size)
4814 {
4815 - unsigned long ret = ___copy_from_user(to, from, size);
4816 + unsigned long ret;
4817
4818 + if ((long)size < 0 || size > INT_MAX)
4819 + return size;
4820 +
4821 + if (!__builtin_constant_p(size))
4822 + check_object_size(to, size, false);
4823 +
4824 + ret = ___copy_from_user(to, from, size);
4825 if (unlikely(ret))
4826 ret = copy_from_user_fixup(to, from, size);
4827 return ret;
4828 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4829 static inline unsigned long __must_check
4830 copy_to_user(void __user *to, const void *from, unsigned long size)
4831 {
4832 - unsigned long ret = ___copy_to_user(to, from, size);
4833 + unsigned long ret;
4834
4835 + if ((long)size < 0 || size > INT_MAX)
4836 + return size;
4837 +
4838 + if (!__builtin_constant_p(size))
4839 + check_object_size(from, size, true);
4840 +
4841 + ret = ___copy_to_user(to, from, size);
4842 if (unlikely(ret))
4843 ret = copy_to_user_fixup(to, from, size);
4844 return ret;
4845 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4846 index 2782681..77ded84 100644
4847 --- a/arch/sparc/kernel/Makefile
4848 +++ b/arch/sparc/kernel/Makefile
4849 @@ -3,7 +3,7 @@
4850 #
4851
4852 asflags-y := -ansi
4853 -ccflags-y := -Werror
4854 +#ccflags-y := -Werror
4855
4856 extra-y := head_$(BITS).o
4857 extra-y += init_task.o
4858 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
4859 index 7690cc2..ece64c9 100644
4860 --- a/arch/sparc/kernel/iommu.c
4861 +++ b/arch/sparc/kernel/iommu.c
4862 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
4863 spin_unlock_irqrestore(&iommu->lock, flags);
4864 }
4865
4866 -static struct dma_map_ops sun4u_dma_ops = {
4867 +static const struct dma_map_ops sun4u_dma_ops = {
4868 .alloc_coherent = dma_4u_alloc_coherent,
4869 .free_coherent = dma_4u_free_coherent,
4870 .map_page = dma_4u_map_page,
4871 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
4872 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4873 };
4874
4875 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4876 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4877 EXPORT_SYMBOL(dma_ops);
4878
4879 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4880 diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
4881 index 9f61fd8..bd048db 100644
4882 --- a/arch/sparc/kernel/ioport.c
4883 +++ b/arch/sparc/kernel/ioport.c
4884 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
4885 BUG();
4886 }
4887
4888 -struct dma_map_ops sbus_dma_ops = {
4889 +const struct dma_map_ops sbus_dma_ops = {
4890 .alloc_coherent = sbus_alloc_coherent,
4891 .free_coherent = sbus_free_coherent,
4892 .map_page = sbus_map_page,
4893 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4894 .sync_sg_for_device = sbus_sync_sg_for_device,
4895 };
4896
4897 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4898 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4899 EXPORT_SYMBOL(dma_ops);
4900
4901 static int __init sparc_register_ioport(void)
4902 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
4903 }
4904 }
4905
4906 -struct dma_map_ops pci32_dma_ops = {
4907 +const struct dma_map_ops pci32_dma_ops = {
4908 .alloc_coherent = pci32_alloc_coherent,
4909 .free_coherent = pci32_free_coherent,
4910 .map_page = pci32_map_page,
4911 diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
4912 index 04df4ed..55c4b6e 100644
4913 --- a/arch/sparc/kernel/kgdb_32.c
4914 +++ b/arch/sparc/kernel/kgdb_32.c
4915 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4916 {
4917 }
4918
4919 -struct kgdb_arch arch_kgdb_ops = {
4920 +const struct kgdb_arch arch_kgdb_ops = {
4921 /* Breakpoint instruction: ta 0x7d */
4922 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4923 };
4924 diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
4925 index f5a0fd4..d886f71 100644
4926 --- a/arch/sparc/kernel/kgdb_64.c
4927 +++ b/arch/sparc/kernel/kgdb_64.c
4928 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4929 {
4930 }
4931
4932 -struct kgdb_arch arch_kgdb_ops = {
4933 +const struct kgdb_arch arch_kgdb_ops = {
4934 /* Breakpoint instruction: ta 0x72 */
4935 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4936 };
4937 diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
4938 index 23c33ff..d137fbd 100644
4939 --- a/arch/sparc/kernel/pci_sun4v.c
4940 +++ b/arch/sparc/kernel/pci_sun4v.c
4941 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
4942 spin_unlock_irqrestore(&iommu->lock, flags);
4943 }
4944
4945 -static struct dma_map_ops sun4v_dma_ops = {
4946 +static const struct dma_map_ops sun4v_dma_ops = {
4947 .alloc_coherent = dma_4v_alloc_coherent,
4948 .free_coherent = dma_4v_free_coherent,
4949 .map_page = dma_4v_map_page,
4950 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4951 index c49865b..b41a81b 100644
4952 --- a/arch/sparc/kernel/process_32.c
4953 +++ b/arch/sparc/kernel/process_32.c
4954 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4955 rw->ins[4], rw->ins[5],
4956 rw->ins[6],
4957 rw->ins[7]);
4958 - printk("%pS\n", (void *) rw->ins[7]);
4959 + printk("%pA\n", (void *) rw->ins[7]);
4960 rw = (struct reg_window32 *) rw->ins[6];
4961 }
4962 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4963 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4964
4965 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4966 r->psr, r->pc, r->npc, r->y, print_tainted());
4967 - printk("PC: <%pS>\n", (void *) r->pc);
4968 + printk("PC: <%pA>\n", (void *) r->pc);
4969 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4970 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4971 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4972 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4973 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4974 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4975 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4976 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4977
4978 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4979 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4980 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4981 rw = (struct reg_window32 *) fp;
4982 pc = rw->ins[7];
4983 printk("[%08lx : ", pc);
4984 - printk("%pS ] ", (void *) pc);
4985 + printk("%pA ] ", (void *) pc);
4986 fp = rw->ins[6];
4987 } while (++count < 16);
4988 printk("\n");
4989 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4990 index cb70476..3d0c191 100644
4991 --- a/arch/sparc/kernel/process_64.c
4992 +++ b/arch/sparc/kernel/process_64.c
4993 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4994 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4995 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4996 if (regs->tstate & TSTATE_PRIV)
4997 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4998 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4999 }
5000
5001 void show_regs(struct pt_regs *regs)
5002 {
5003 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5004 regs->tpc, regs->tnpc, regs->y, print_tainted());
5005 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5006 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5007 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5008 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5009 regs->u_regs[3]);
5010 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5011 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5012 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5013 regs->u_regs[15]);
5014 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5015 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5016 show_regwindow(regs);
5017 }
5018
5019 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5020 ((tp && tp->task) ? tp->task->pid : -1));
5021
5022 if (gp->tstate & TSTATE_PRIV) {
5023 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5024 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5025 (void *) gp->tpc,
5026 (void *) gp->o7,
5027 (void *) gp->i7,
5028 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5029 index 6edc4e5..06a69b4 100644
5030 --- a/arch/sparc/kernel/sigutil_64.c
5031 +++ b/arch/sparc/kernel/sigutil_64.c
5032 @@ -2,6 +2,7 @@
5033 #include <linux/types.h>
5034 #include <linux/thread_info.h>
5035 #include <linux/uaccess.h>
5036 +#include <linux/errno.h>
5037
5038 #include <asm/sigcontext.h>
5039 #include <asm/fpumacro.h>
5040 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5041 index 3a82e65..ce0a53a 100644
5042 --- a/arch/sparc/kernel/sys_sparc_32.c
5043 +++ b/arch/sparc/kernel/sys_sparc_32.c
5044 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5045 if (ARCH_SUN4C && len > 0x20000000)
5046 return -ENOMEM;
5047 if (!addr)
5048 - addr = TASK_UNMAPPED_BASE;
5049 + addr = current->mm->mmap_base;
5050
5051 if (flags & MAP_SHARED)
5052 addr = COLOUR_ALIGN(addr);
5053 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5054 }
5055 if (TASK_SIZE - PAGE_SIZE - len < addr)
5056 return -ENOMEM;
5057 - if (!vmm || addr + len <= vmm->vm_start)
5058 + if (check_heap_stack_gap(vmm, addr, len))
5059 return addr;
5060 addr = vmm->vm_end;
5061 if (flags & MAP_SHARED)
5062 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5063 index cfa0e19..98972ac 100644
5064 --- a/arch/sparc/kernel/sys_sparc_64.c
5065 +++ b/arch/sparc/kernel/sys_sparc_64.c
5066 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5067 /* We do not accept a shared mapping if it would violate
5068 * cache aliasing constraints.
5069 */
5070 - if ((flags & MAP_SHARED) &&
5071 + if ((filp || (flags & MAP_SHARED)) &&
5072 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5073 return -EINVAL;
5074 return addr;
5075 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5076 if (filp || (flags & MAP_SHARED))
5077 do_color_align = 1;
5078
5079 +#ifdef CONFIG_PAX_RANDMMAP
5080 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5081 +#endif
5082 +
5083 if (addr) {
5084 if (do_color_align)
5085 addr = COLOUR_ALIGN(addr, pgoff);
5086 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5087 addr = PAGE_ALIGN(addr);
5088
5089 vma = find_vma(mm, addr);
5090 - if (task_size - len >= addr &&
5091 - (!vma || addr + len <= vma->vm_start))
5092 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5093 return addr;
5094 }
5095
5096 if (len > mm->cached_hole_size) {
5097 - start_addr = addr = mm->free_area_cache;
5098 + start_addr = addr = mm->free_area_cache;
5099 } else {
5100 - start_addr = addr = TASK_UNMAPPED_BASE;
5101 + start_addr = addr = mm->mmap_base;
5102 mm->cached_hole_size = 0;
5103 }
5104
5105 @@ -175,14 +178,14 @@ full_search:
5106 vma = find_vma(mm, VA_EXCLUDE_END);
5107 }
5108 if (unlikely(task_size < addr)) {
5109 - if (start_addr != TASK_UNMAPPED_BASE) {
5110 - start_addr = addr = TASK_UNMAPPED_BASE;
5111 + if (start_addr != mm->mmap_base) {
5112 + start_addr = addr = mm->mmap_base;
5113 mm->cached_hole_size = 0;
5114 goto full_search;
5115 }
5116 return -ENOMEM;
5117 }
5118 - if (likely(!vma || addr + len <= vma->vm_start)) {
5119 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5120 /*
5121 * Remember the place where we stopped the search:
5122 */
5123 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5124 /* We do not accept a shared mapping if it would violate
5125 * cache aliasing constraints.
5126 */
5127 - if ((flags & MAP_SHARED) &&
5128 + if ((filp || (flags & MAP_SHARED)) &&
5129 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5130 return -EINVAL;
5131 return addr;
5132 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5133 addr = PAGE_ALIGN(addr);
5134
5135 vma = find_vma(mm, addr);
5136 - if (task_size - len >= addr &&
5137 - (!vma || addr + len <= vma->vm_start))
5138 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5139 return addr;
5140 }
5141
5142 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5143 /* make sure it can fit in the remaining address space */
5144 if (likely(addr > len)) {
5145 vma = find_vma(mm, addr-len);
5146 - if (!vma || addr <= vma->vm_start) {
5147 + if (check_heap_stack_gap(vma, addr - len, len)) {
5148 /* remember the address as a hint for next time */
5149 return (mm->free_area_cache = addr-len);
5150 }
5151 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5152 if (unlikely(mm->mmap_base < len))
5153 goto bottomup;
5154
5155 - addr = mm->mmap_base-len;
5156 - if (do_color_align)
5157 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5158 + addr = mm->mmap_base - len;
5159
5160 do {
5161 + if (do_color_align)
5162 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5163 /*
5164 * Lookup failure means no vma is above this address,
5165 * else if new region fits below vma->vm_start,
5166 * return with success:
5167 */
5168 vma = find_vma(mm, addr);
5169 - if (likely(!vma || addr+len <= vma->vm_start)) {
5170 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5171 /* remember the address as a hint for next time */
5172 return (mm->free_area_cache = addr);
5173 }
5174 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5175 mm->cached_hole_size = vma->vm_start - addr;
5176
5177 /* try just below the current vma->vm_start */
5178 - addr = vma->vm_start-len;
5179 - if (do_color_align)
5180 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5181 - } while (likely(len < vma->vm_start));
5182 + addr = skip_heap_stack_gap(vma, len);
5183 + } while (!IS_ERR_VALUE(addr));
5184
5185 bottomup:
5186 /*
5187 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5188 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5189 sysctl_legacy_va_layout) {
5190 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5191 +
5192 +#ifdef CONFIG_PAX_RANDMMAP
5193 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5194 + mm->mmap_base += mm->delta_mmap;
5195 +#endif
5196 +
5197 mm->get_unmapped_area = arch_get_unmapped_area;
5198 mm->unmap_area = arch_unmap_area;
5199 } else {
5200 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5201 gap = (task_size / 6 * 5);
5202
5203 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5204 +
5205 +#ifdef CONFIG_PAX_RANDMMAP
5206 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5207 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5208 +#endif
5209 +
5210 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5211 mm->unmap_area = arch_unmap_area_topdown;
5212 }
5213 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5214 index c0490c7..84959d1 100644
5215 --- a/arch/sparc/kernel/traps_32.c
5216 +++ b/arch/sparc/kernel/traps_32.c
5217 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5218 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5219 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5220
5221 +extern void gr_handle_kernel_exploit(void);
5222 +
5223 void die_if_kernel(char *str, struct pt_regs *regs)
5224 {
5225 static int die_counter;
5226 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5227 count++ < 30 &&
5228 (((unsigned long) rw) >= PAGE_OFFSET) &&
5229 !(((unsigned long) rw) & 0x7)) {
5230 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5231 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5232 (void *) rw->ins[7]);
5233 rw = (struct reg_window32 *)rw->ins[6];
5234 }
5235 }
5236 printk("Instruction DUMP:");
5237 instruction_dump ((unsigned long *) regs->pc);
5238 - if(regs->psr & PSR_PS)
5239 + if(regs->psr & PSR_PS) {
5240 + gr_handle_kernel_exploit();
5241 do_exit(SIGKILL);
5242 + }
5243 do_exit(SIGSEGV);
5244 }
5245
5246 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5247 index 10f7bb9..cdb6793 100644
5248 --- a/arch/sparc/kernel/traps_64.c
5249 +++ b/arch/sparc/kernel/traps_64.c
5250 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5251 i + 1,
5252 p->trapstack[i].tstate, p->trapstack[i].tpc,
5253 p->trapstack[i].tnpc, p->trapstack[i].tt);
5254 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5255 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5256 }
5257 }
5258
5259 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5260
5261 lvl -= 0x100;
5262 if (regs->tstate & TSTATE_PRIV) {
5263 +
5264 +#ifdef CONFIG_PAX_REFCOUNT
5265 + if (lvl == 6)
5266 + pax_report_refcount_overflow(regs);
5267 +#endif
5268 +
5269 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5270 die_if_kernel(buffer, regs);
5271 }
5272 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5273 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5274 {
5275 char buffer[32];
5276 -
5277 +
5278 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5279 0, lvl, SIGTRAP) == NOTIFY_STOP)
5280 return;
5281
5282 +#ifdef CONFIG_PAX_REFCOUNT
5283 + if (lvl == 6)
5284 + pax_report_refcount_overflow(regs);
5285 +#endif
5286 +
5287 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5288
5289 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5290 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5291 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5292 printk("%s" "ERROR(%d): ",
5293 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5294 - printk("TPC<%pS>\n", (void *) regs->tpc);
5295 + printk("TPC<%pA>\n", (void *) regs->tpc);
5296 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5297 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5298 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5299 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5300 smp_processor_id(),
5301 (type & 0x1) ? 'I' : 'D',
5302 regs->tpc);
5303 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5304 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5305 panic("Irrecoverable Cheetah+ parity error.");
5306 }
5307
5308 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5309 smp_processor_id(),
5310 (type & 0x1) ? 'I' : 'D',
5311 regs->tpc);
5312 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5313 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5314 }
5315
5316 struct sun4v_error_entry {
5317 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5318
5319 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5320 regs->tpc, tl);
5321 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5322 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5323 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5324 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5325 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5326 (void *) regs->u_regs[UREG_I7]);
5327 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5328 "pte[%lx] error[%lx]\n",
5329 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5330
5331 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5332 regs->tpc, tl);
5333 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5334 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5335 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5336 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5337 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5338 (void *) regs->u_regs[UREG_I7]);
5339 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5340 "pte[%lx] error[%lx]\n",
5341 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5342 fp = (unsigned long)sf->fp + STACK_BIAS;
5343 }
5344
5345 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5346 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5347 } while (++count < 16);
5348 }
5349
5350 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5351 return (struct reg_window *) (fp + STACK_BIAS);
5352 }
5353
5354 +extern void gr_handle_kernel_exploit(void);
5355 +
5356 void die_if_kernel(char *str, struct pt_regs *regs)
5357 {
5358 static int die_counter;
5359 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5360 while (rw &&
5361 count++ < 30&&
5362 is_kernel_stack(current, rw)) {
5363 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5364 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5365 (void *) rw->ins[7]);
5366
5367 rw = kernel_stack_up(rw);
5368 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5369 }
5370 user_instruction_dump ((unsigned int __user *) regs->tpc);
5371 }
5372 - if (regs->tstate & TSTATE_PRIV)
5373 + if (regs->tstate & TSTATE_PRIV) {
5374 + gr_handle_kernel_exploit();
5375 do_exit(SIGKILL);
5376 + }
5377 +
5378 do_exit(SIGSEGV);
5379 }
5380 EXPORT_SYMBOL(die_if_kernel);
5381 diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5382 index be183fe..1c8d332 100644
5383 --- a/arch/sparc/kernel/una_asm_64.S
5384 +++ b/arch/sparc/kernel/una_asm_64.S
5385 @@ -127,7 +127,7 @@ do_int_load:
5386 wr %o5, 0x0, %asi
5387 retl
5388 mov 0, %o0
5389 - .size __do_int_load, .-__do_int_load
5390 + .size do_int_load, .-do_int_load
5391
5392 .section __ex_table,"a"
5393 .word 4b, __retl_efault
5394 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5395 index 3792099..2af17d8 100644
5396 --- a/arch/sparc/kernel/unaligned_64.c
5397 +++ b/arch/sparc/kernel/unaligned_64.c
5398 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5399 if (count < 5) {
5400 last_time = jiffies;
5401 count++;
5402 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5403 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5404 regs->tpc, (void *) regs->tpc);
5405 }
5406 }
5407 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5408 index e75faf0..24f12f9 100644
5409 --- a/arch/sparc/lib/Makefile
5410 +++ b/arch/sparc/lib/Makefile
5411 @@ -2,7 +2,7 @@
5412 #
5413
5414 asflags-y := -ansi -DST_DIV0=0x02
5415 -ccflags-y := -Werror
5416 +#ccflags-y := -Werror
5417
5418 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5419 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5420 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5421 index 0268210..f0291ca 100644
5422 --- a/arch/sparc/lib/atomic_64.S
5423 +++ b/arch/sparc/lib/atomic_64.S
5424 @@ -18,7 +18,12 @@
5425 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5426 BACKOFF_SETUP(%o2)
5427 1: lduw [%o1], %g1
5428 - add %g1, %o0, %g7
5429 + addcc %g1, %o0, %g7
5430 +
5431 +#ifdef CONFIG_PAX_REFCOUNT
5432 + tvs %icc, 6
5433 +#endif
5434 +
5435 cas [%o1], %g1, %g7
5436 cmp %g1, %g7
5437 bne,pn %icc, 2f
5438 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5439 2: BACKOFF_SPIN(%o2, %o3, 1b)
5440 .size atomic_add, .-atomic_add
5441
5442 + .globl atomic_add_unchecked
5443 + .type atomic_add_unchecked,#function
5444 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5445 + BACKOFF_SETUP(%o2)
5446 +1: lduw [%o1], %g1
5447 + add %g1, %o0, %g7
5448 + cas [%o1], %g1, %g7
5449 + cmp %g1, %g7
5450 + bne,pn %icc, 2f
5451 + nop
5452 + retl
5453 + nop
5454 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5455 + .size atomic_add_unchecked, .-atomic_add_unchecked
5456 +
5457 .globl atomic_sub
5458 .type atomic_sub,#function
5459 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5460 BACKOFF_SETUP(%o2)
5461 1: lduw [%o1], %g1
5462 - sub %g1, %o0, %g7
5463 + subcc %g1, %o0, %g7
5464 +
5465 +#ifdef CONFIG_PAX_REFCOUNT
5466 + tvs %icc, 6
5467 +#endif
5468 +
5469 cas [%o1], %g1, %g7
5470 cmp %g1, %g7
5471 bne,pn %icc, 2f
5472 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5473 2: BACKOFF_SPIN(%o2, %o3, 1b)
5474 .size atomic_sub, .-atomic_sub
5475
5476 + .globl atomic_sub_unchecked
5477 + .type atomic_sub_unchecked,#function
5478 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5479 + BACKOFF_SETUP(%o2)
5480 +1: lduw [%o1], %g1
5481 + sub %g1, %o0, %g7
5482 + cas [%o1], %g1, %g7
5483 + cmp %g1, %g7
5484 + bne,pn %icc, 2f
5485 + nop
5486 + retl
5487 + nop
5488 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5489 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5490 +
5491 .globl atomic_add_ret
5492 .type atomic_add_ret,#function
5493 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5494 BACKOFF_SETUP(%o2)
5495 1: lduw [%o1], %g1
5496 - add %g1, %o0, %g7
5497 + addcc %g1, %o0, %g7
5498 +
5499 +#ifdef CONFIG_PAX_REFCOUNT
5500 + tvs %icc, 6
5501 +#endif
5502 +
5503 cas [%o1], %g1, %g7
5504 cmp %g1, %g7
5505 bne,pn %icc, 2f
5506 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5507 2: BACKOFF_SPIN(%o2, %o3, 1b)
5508 .size atomic_add_ret, .-atomic_add_ret
5509
5510 + .globl atomic_add_ret_unchecked
5511 + .type atomic_add_ret_unchecked,#function
5512 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5513 + BACKOFF_SETUP(%o2)
5514 +1: lduw [%o1], %g1
5515 + addcc %g1, %o0, %g7
5516 + cas [%o1], %g1, %g7
5517 + cmp %g1, %g7
5518 + bne,pn %icc, 2f
5519 + add %g7, %o0, %g7
5520 + sra %g7, 0, %o0
5521 + retl
5522 + nop
5523 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5524 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5525 +
5526 .globl atomic_sub_ret
5527 .type atomic_sub_ret,#function
5528 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5529 BACKOFF_SETUP(%o2)
5530 1: lduw [%o1], %g1
5531 - sub %g1, %o0, %g7
5532 + subcc %g1, %o0, %g7
5533 +
5534 +#ifdef CONFIG_PAX_REFCOUNT
5535 + tvs %icc, 6
5536 +#endif
5537 +
5538 cas [%o1], %g1, %g7
5539 cmp %g1, %g7
5540 bne,pn %icc, 2f
5541 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5542 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5543 BACKOFF_SETUP(%o2)
5544 1: ldx [%o1], %g1
5545 - add %g1, %o0, %g7
5546 + addcc %g1, %o0, %g7
5547 +
5548 +#ifdef CONFIG_PAX_REFCOUNT
5549 + tvs %xcc, 6
5550 +#endif
5551 +
5552 casx [%o1], %g1, %g7
5553 cmp %g1, %g7
5554 bne,pn %xcc, 2f
5555 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5556 2: BACKOFF_SPIN(%o2, %o3, 1b)
5557 .size atomic64_add, .-atomic64_add
5558
5559 + .globl atomic64_add_unchecked
5560 + .type atomic64_add_unchecked,#function
5561 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5562 + BACKOFF_SETUP(%o2)
5563 +1: ldx [%o1], %g1
5564 + addcc %g1, %o0, %g7
5565 + casx [%o1], %g1, %g7
5566 + cmp %g1, %g7
5567 + bne,pn %xcc, 2f
5568 + nop
5569 + retl
5570 + nop
5571 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5572 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5573 +
5574 .globl atomic64_sub
5575 .type atomic64_sub,#function
5576 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5577 BACKOFF_SETUP(%o2)
5578 1: ldx [%o1], %g1
5579 - sub %g1, %o0, %g7
5580 + subcc %g1, %o0, %g7
5581 +
5582 +#ifdef CONFIG_PAX_REFCOUNT
5583 + tvs %xcc, 6
5584 +#endif
5585 +
5586 casx [%o1], %g1, %g7
5587 cmp %g1, %g7
5588 bne,pn %xcc, 2f
5589 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5590 2: BACKOFF_SPIN(%o2, %o3, 1b)
5591 .size atomic64_sub, .-atomic64_sub
5592
5593 + .globl atomic64_sub_unchecked
5594 + .type atomic64_sub_unchecked,#function
5595 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5596 + BACKOFF_SETUP(%o2)
5597 +1: ldx [%o1], %g1
5598 + subcc %g1, %o0, %g7
5599 + casx [%o1], %g1, %g7
5600 + cmp %g1, %g7
5601 + bne,pn %xcc, 2f
5602 + nop
5603 + retl
5604 + nop
5605 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5606 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5607 +
5608 .globl atomic64_add_ret
5609 .type atomic64_add_ret,#function
5610 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5611 BACKOFF_SETUP(%o2)
5612 1: ldx [%o1], %g1
5613 - add %g1, %o0, %g7
5614 + addcc %g1, %o0, %g7
5615 +
5616 +#ifdef CONFIG_PAX_REFCOUNT
5617 + tvs %xcc, 6
5618 +#endif
5619 +
5620 casx [%o1], %g1, %g7
5621 cmp %g1, %g7
5622 bne,pn %xcc, 2f
5623 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5624 2: BACKOFF_SPIN(%o2, %o3, 1b)
5625 .size atomic64_add_ret, .-atomic64_add_ret
5626
5627 + .globl atomic64_add_ret_unchecked
5628 + .type atomic64_add_ret_unchecked,#function
5629 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5630 + BACKOFF_SETUP(%o2)
5631 +1: ldx [%o1], %g1
5632 + addcc %g1, %o0, %g7
5633 + casx [%o1], %g1, %g7
5634 + cmp %g1, %g7
5635 + bne,pn %xcc, 2f
5636 + add %g7, %o0, %g7
5637 + mov %g7, %o0
5638 + retl
5639 + nop
5640 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5641 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5642 +
5643 .globl atomic64_sub_ret
5644 .type atomic64_sub_ret,#function
5645 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5646 BACKOFF_SETUP(%o2)
5647 1: ldx [%o1], %g1
5648 - sub %g1, %o0, %g7
5649 + subcc %g1, %o0, %g7
5650 +
5651 +#ifdef CONFIG_PAX_REFCOUNT
5652 + tvs %xcc, 6
5653 +#endif
5654 +
5655 casx [%o1], %g1, %g7
5656 cmp %g1, %g7
5657 bne,pn %xcc, 2f
5658 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5659 index 704b126..2e79d76 100644
5660 --- a/arch/sparc/lib/ksyms.c
5661 +++ b/arch/sparc/lib/ksyms.c
5662 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5663
5664 /* Atomic counter implementation. */
5665 EXPORT_SYMBOL(atomic_add);
5666 +EXPORT_SYMBOL(atomic_add_unchecked);
5667 EXPORT_SYMBOL(atomic_add_ret);
5668 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5669 EXPORT_SYMBOL(atomic_sub);
5670 +EXPORT_SYMBOL(atomic_sub_unchecked);
5671 EXPORT_SYMBOL(atomic_sub_ret);
5672 EXPORT_SYMBOL(atomic64_add);
5673 +EXPORT_SYMBOL(atomic64_add_unchecked);
5674 EXPORT_SYMBOL(atomic64_add_ret);
5675 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5676 EXPORT_SYMBOL(atomic64_sub);
5677 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5678 EXPORT_SYMBOL(atomic64_sub_ret);
5679
5680 /* Atomic bit operations. */
5681 diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5682 index 91a7d29..ce75c29 100644
5683 --- a/arch/sparc/lib/rwsem_64.S
5684 +++ b/arch/sparc/lib/rwsem_64.S
5685 @@ -11,7 +11,12 @@
5686 .globl __down_read
5687 __down_read:
5688 1: lduw [%o0], %g1
5689 - add %g1, 1, %g7
5690 + addcc %g1, 1, %g7
5691 +
5692 +#ifdef CONFIG_PAX_REFCOUNT
5693 + tvs %icc, 6
5694 +#endif
5695 +
5696 cas [%o0], %g1, %g7
5697 cmp %g1, %g7
5698 bne,pn %icc, 1b
5699 @@ -33,7 +38,12 @@ __down_read:
5700 .globl __down_read_trylock
5701 __down_read_trylock:
5702 1: lduw [%o0], %g1
5703 - add %g1, 1, %g7
5704 + addcc %g1, 1, %g7
5705 +
5706 +#ifdef CONFIG_PAX_REFCOUNT
5707 + tvs %icc, 6
5708 +#endif
5709 +
5710 cmp %g7, 0
5711 bl,pn %icc, 2f
5712 mov 0, %o1
5713 @@ -51,7 +61,12 @@ __down_write:
5714 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5715 1:
5716 lduw [%o0], %g3
5717 - add %g3, %g1, %g7
5718 + addcc %g3, %g1, %g7
5719 +
5720 +#ifdef CONFIG_PAX_REFCOUNT
5721 + tvs %icc, 6
5722 +#endif
5723 +
5724 cas [%o0], %g3, %g7
5725 cmp %g3, %g7
5726 bne,pn %icc, 1b
5727 @@ -77,7 +92,12 @@ __down_write_trylock:
5728 cmp %g3, 0
5729 bne,pn %icc, 2f
5730 mov 0, %o1
5731 - add %g3, %g1, %g7
5732 + addcc %g3, %g1, %g7
5733 +
5734 +#ifdef CONFIG_PAX_REFCOUNT
5735 + tvs %icc, 6
5736 +#endif
5737 +
5738 cas [%o0], %g3, %g7
5739 cmp %g3, %g7
5740 bne,pn %icc, 1b
5741 @@ -90,7 +110,12 @@ __down_write_trylock:
5742 __up_read:
5743 1:
5744 lduw [%o0], %g1
5745 - sub %g1, 1, %g7
5746 + subcc %g1, 1, %g7
5747 +
5748 +#ifdef CONFIG_PAX_REFCOUNT
5749 + tvs %icc, 6
5750 +#endif
5751 +
5752 cas [%o0], %g1, %g7
5753 cmp %g1, %g7
5754 bne,pn %icc, 1b
5755 @@ -118,7 +143,12 @@ __up_write:
5756 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5757 1:
5758 lduw [%o0], %g3
5759 - sub %g3, %g1, %g7
5760 + subcc %g3, %g1, %g7
5761 +
5762 +#ifdef CONFIG_PAX_REFCOUNT
5763 + tvs %icc, 6
5764 +#endif
5765 +
5766 cas [%o0], %g3, %g7
5767 cmp %g3, %g7
5768 bne,pn %icc, 1b
5769 @@ -143,7 +173,12 @@ __downgrade_write:
5770 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5771 1:
5772 lduw [%o0], %g3
5773 - sub %g3, %g1, %g7
5774 + subcc %g3, %g1, %g7
5775 +
5776 +#ifdef CONFIG_PAX_REFCOUNT
5777 + tvs %icc, 6
5778 +#endif
5779 +
5780 cas [%o0], %g3, %g7
5781 cmp %g3, %g7
5782 bne,pn %icc, 1b
5783 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5784 index 79836a7..62f47a2 100644
5785 --- a/arch/sparc/mm/Makefile
5786 +++ b/arch/sparc/mm/Makefile
5787 @@ -2,7 +2,7 @@
5788 #
5789
5790 asflags-y := -ansi
5791 -ccflags-y := -Werror
5792 +#ccflags-y := -Werror
5793
5794 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5795 obj-y += fault_$(BITS).o
5796 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5797 index b99f81c..3453e93 100644
5798 --- a/arch/sparc/mm/fault_32.c
5799 +++ b/arch/sparc/mm/fault_32.c
5800 @@ -21,6 +21,9 @@
5801 #include <linux/interrupt.h>
5802 #include <linux/module.h>
5803 #include <linux/kdebug.h>
5804 +#include <linux/slab.h>
5805 +#include <linux/pagemap.h>
5806 +#include <linux/compiler.h>
5807
5808 #include <asm/system.h>
5809 #include <asm/page.h>
5810 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5811 return safe_compute_effective_address(regs, insn);
5812 }
5813
5814 +#ifdef CONFIG_PAX_PAGEEXEC
5815 +#ifdef CONFIG_PAX_DLRESOLVE
5816 +static void pax_emuplt_close(struct vm_area_struct *vma)
5817 +{
5818 + vma->vm_mm->call_dl_resolve = 0UL;
5819 +}
5820 +
5821 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5822 +{
5823 + unsigned int *kaddr;
5824 +
5825 + vmf->page = alloc_page(GFP_HIGHUSER);
5826 + if (!vmf->page)
5827 + return VM_FAULT_OOM;
5828 +
5829 + kaddr = kmap(vmf->page);
5830 + memset(kaddr, 0, PAGE_SIZE);
5831 + kaddr[0] = 0x9DE3BFA8U; /* save */
5832 + flush_dcache_page(vmf->page);
5833 + kunmap(vmf->page);
5834 + return VM_FAULT_MAJOR;
5835 +}
5836 +
5837 +static const struct vm_operations_struct pax_vm_ops = {
5838 + .close = pax_emuplt_close,
5839 + .fault = pax_emuplt_fault
5840 +};
5841 +
5842 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5843 +{
5844 + int ret;
5845 +
5846 + vma->vm_mm = current->mm;
5847 + vma->vm_start = addr;
5848 + vma->vm_end = addr + PAGE_SIZE;
5849 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5850 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5851 + vma->vm_ops = &pax_vm_ops;
5852 +
5853 + ret = insert_vm_struct(current->mm, vma);
5854 + if (ret)
5855 + return ret;
5856 +
5857 + ++current->mm->total_vm;
5858 + return 0;
5859 +}
5860 +#endif
5861 +
5862 +/*
5863 + * PaX: decide what to do with offenders (regs->pc = fault address)
5864 + *
5865 + * returns 1 when task should be killed
5866 + * 2 when patched PLT trampoline was detected
5867 + * 3 when unpatched PLT trampoline was detected
5868 + */
5869 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5870 +{
5871 +
5872 +#ifdef CONFIG_PAX_EMUPLT
5873 + int err;
5874 +
5875 + do { /* PaX: patched PLT emulation #1 */
5876 + unsigned int sethi1, sethi2, jmpl;
5877 +
5878 + err = get_user(sethi1, (unsigned int *)regs->pc);
5879 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5880 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5881 +
5882 + if (err)
5883 + break;
5884 +
5885 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5886 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5887 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5888 + {
5889 + unsigned int addr;
5890 +
5891 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5892 + addr = regs->u_regs[UREG_G1];
5893 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5894 + regs->pc = addr;
5895 + regs->npc = addr+4;
5896 + return 2;
5897 + }
5898 + } while (0);
5899 +
5900 + { /* PaX: patched PLT emulation #2 */
5901 + unsigned int ba;
5902 +
5903 + err = get_user(ba, (unsigned int *)regs->pc);
5904 +
5905 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5906 + unsigned int addr;
5907 +
5908 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5909 + regs->pc = addr;
5910 + regs->npc = addr+4;
5911 + return 2;
5912 + }
5913 + }
5914 +
5915 + do { /* PaX: patched PLT emulation #3 */
5916 + unsigned int sethi, jmpl, nop;
5917 +
5918 + err = get_user(sethi, (unsigned int *)regs->pc);
5919 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5920 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5921 +
5922 + if (err)
5923 + break;
5924 +
5925 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5926 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5927 + nop == 0x01000000U)
5928 + {
5929 + unsigned int addr;
5930 +
5931 + addr = (sethi & 0x003FFFFFU) << 10;
5932 + regs->u_regs[UREG_G1] = addr;
5933 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5934 + regs->pc = addr;
5935 + regs->npc = addr+4;
5936 + return 2;
5937 + }
5938 + } while (0);
5939 +
5940 + do { /* PaX: unpatched PLT emulation step 1 */
5941 + unsigned int sethi, ba, nop;
5942 +
5943 + err = get_user(sethi, (unsigned int *)regs->pc);
5944 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5945 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5946 +
5947 + if (err)
5948 + break;
5949 +
5950 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5951 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5952 + nop == 0x01000000U)
5953 + {
5954 + unsigned int addr, save, call;
5955 +
5956 + if ((ba & 0xFFC00000U) == 0x30800000U)
5957 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5958 + else
5959 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5960 +
5961 + err = get_user(save, (unsigned int *)addr);
5962 + err |= get_user(call, (unsigned int *)(addr+4));
5963 + err |= get_user(nop, (unsigned int *)(addr+8));
5964 + if (err)
5965 + break;
5966 +
5967 +#ifdef CONFIG_PAX_DLRESOLVE
5968 + if (save == 0x9DE3BFA8U &&
5969 + (call & 0xC0000000U) == 0x40000000U &&
5970 + nop == 0x01000000U)
5971 + {
5972 + struct vm_area_struct *vma;
5973 + unsigned long call_dl_resolve;
5974 +
5975 + down_read(&current->mm->mmap_sem);
5976 + call_dl_resolve = current->mm->call_dl_resolve;
5977 + up_read(&current->mm->mmap_sem);
5978 + if (likely(call_dl_resolve))
5979 + goto emulate;
5980 +
5981 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5982 +
5983 + down_write(&current->mm->mmap_sem);
5984 + if (current->mm->call_dl_resolve) {
5985 + call_dl_resolve = current->mm->call_dl_resolve;
5986 + up_write(&current->mm->mmap_sem);
5987 + if (vma)
5988 + kmem_cache_free(vm_area_cachep, vma);
5989 + goto emulate;
5990 + }
5991 +
5992 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5993 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5994 + up_write(&current->mm->mmap_sem);
5995 + if (vma)
5996 + kmem_cache_free(vm_area_cachep, vma);
5997 + return 1;
5998 + }
5999 +
6000 + if (pax_insert_vma(vma, call_dl_resolve)) {
6001 + up_write(&current->mm->mmap_sem);
6002 + kmem_cache_free(vm_area_cachep, vma);
6003 + return 1;
6004 + }
6005 +
6006 + current->mm->call_dl_resolve = call_dl_resolve;
6007 + up_write(&current->mm->mmap_sem);
6008 +
6009 +emulate:
6010 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6011 + regs->pc = call_dl_resolve;
6012 + regs->npc = addr+4;
6013 + return 3;
6014 + }
6015 +#endif
6016 +
6017 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6018 + if ((save & 0xFFC00000U) == 0x05000000U &&
6019 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6020 + nop == 0x01000000U)
6021 + {
6022 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6023 + regs->u_regs[UREG_G2] = addr + 4;
6024 + addr = (save & 0x003FFFFFU) << 10;
6025 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6026 + regs->pc = addr;
6027 + regs->npc = addr+4;
6028 + return 3;
6029 + }
6030 + }
6031 + } while (0);
6032 +
6033 + do { /* PaX: unpatched PLT emulation step 2 */
6034 + unsigned int save, call, nop;
6035 +
6036 + err = get_user(save, (unsigned int *)(regs->pc-4));
6037 + err |= get_user(call, (unsigned int *)regs->pc);
6038 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6039 + if (err)
6040 + break;
6041 +
6042 + if (save == 0x9DE3BFA8U &&
6043 + (call & 0xC0000000U) == 0x40000000U &&
6044 + nop == 0x01000000U)
6045 + {
6046 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6047 +
6048 + regs->u_regs[UREG_RETPC] = regs->pc;
6049 + regs->pc = dl_resolve;
6050 + regs->npc = dl_resolve+4;
6051 + return 3;
6052 + }
6053 + } while (0);
6054 +#endif
6055 +
6056 + return 1;
6057 +}
6058 +
6059 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6060 +{
6061 + unsigned long i;
6062 +
6063 + printk(KERN_ERR "PAX: bytes at PC: ");
6064 + for (i = 0; i < 8; i++) {
6065 + unsigned int c;
6066 + if (get_user(c, (unsigned int *)pc+i))
6067 + printk(KERN_CONT "???????? ");
6068 + else
6069 + printk(KERN_CONT "%08x ", c);
6070 + }
6071 + printk("\n");
6072 +}
6073 +#endif
6074 +
6075 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6076 unsigned long address)
6077 {
6078 @@ -231,6 +495,24 @@ good_area:
6079 if(!(vma->vm_flags & VM_WRITE))
6080 goto bad_area;
6081 } else {
6082 +
6083 +#ifdef CONFIG_PAX_PAGEEXEC
6084 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6085 + up_read(&mm->mmap_sem);
6086 + switch (pax_handle_fetch_fault(regs)) {
6087 +
6088 +#ifdef CONFIG_PAX_EMUPLT
6089 + case 2:
6090 + case 3:
6091 + return;
6092 +#endif
6093 +
6094 + }
6095 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6096 + do_group_exit(SIGKILL);
6097 + }
6098 +#endif
6099 +
6100 /* Allow reads even for write-only mappings */
6101 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6102 goto bad_area;
6103 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6104 index 43b0da9..a0b78f9 100644
6105 --- a/arch/sparc/mm/fault_64.c
6106 +++ b/arch/sparc/mm/fault_64.c
6107 @@ -20,6 +20,9 @@
6108 #include <linux/kprobes.h>
6109 #include <linux/kdebug.h>
6110 #include <linux/percpu.h>
6111 +#include <linux/slab.h>
6112 +#include <linux/pagemap.h>
6113 +#include <linux/compiler.h>
6114
6115 #include <asm/page.h>
6116 #include <asm/pgtable.h>
6117 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6118 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6119 regs->tpc);
6120 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6121 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6122 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6123 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6124 dump_stack();
6125 unhandled_fault(regs->tpc, current, regs);
6126 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6127 show_regs(regs);
6128 }
6129
6130 +#ifdef CONFIG_PAX_PAGEEXEC
6131 +#ifdef CONFIG_PAX_DLRESOLVE
6132 +static void pax_emuplt_close(struct vm_area_struct *vma)
6133 +{
6134 + vma->vm_mm->call_dl_resolve = 0UL;
6135 +}
6136 +
6137 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6138 +{
6139 + unsigned int *kaddr;
6140 +
6141 + vmf->page = alloc_page(GFP_HIGHUSER);
6142 + if (!vmf->page)
6143 + return VM_FAULT_OOM;
6144 +
6145 + kaddr = kmap(vmf->page);
6146 + memset(kaddr, 0, PAGE_SIZE);
6147 + kaddr[0] = 0x9DE3BFA8U; /* save */
6148 + flush_dcache_page(vmf->page);
6149 + kunmap(vmf->page);
6150 + return VM_FAULT_MAJOR;
6151 +}
6152 +
6153 +static const struct vm_operations_struct pax_vm_ops = {
6154 + .close = pax_emuplt_close,
6155 + .fault = pax_emuplt_fault
6156 +};
6157 +
6158 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6159 +{
6160 + int ret;
6161 +
6162 + vma->vm_mm = current->mm;
6163 + vma->vm_start = addr;
6164 + vma->vm_end = addr + PAGE_SIZE;
6165 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6166 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6167 + vma->vm_ops = &pax_vm_ops;
6168 +
6169 + ret = insert_vm_struct(current->mm, vma);
6170 + if (ret)
6171 + return ret;
6172 +
6173 + ++current->mm->total_vm;
6174 + return 0;
6175 +}
6176 +#endif
6177 +
6178 +/*
6179 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6180 + *
6181 + * returns 1 when task should be killed
6182 + * 2 when patched PLT trampoline was detected
6183 + * 3 when unpatched PLT trampoline was detected
6184 + */
6185 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6186 +{
6187 +
6188 +#ifdef CONFIG_PAX_EMUPLT
6189 + int err;
6190 +
6191 + do { /* PaX: patched PLT emulation #1 */
6192 + unsigned int sethi1, sethi2, jmpl;
6193 +
6194 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6195 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6196 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6197 +
6198 + if (err)
6199 + break;
6200 +
6201 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6202 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6203 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6204 + {
6205 + unsigned long addr;
6206 +
6207 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6208 + addr = regs->u_regs[UREG_G1];
6209 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6210 +
6211 + if (test_thread_flag(TIF_32BIT))
6212 + addr &= 0xFFFFFFFFUL;
6213 +
6214 + regs->tpc = addr;
6215 + regs->tnpc = addr+4;
6216 + return 2;
6217 + }
6218 + } while (0);
6219 +
6220 + { /* PaX: patched PLT emulation #2 */
6221 + unsigned int ba;
6222 +
6223 + err = get_user(ba, (unsigned int *)regs->tpc);
6224 +
6225 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6226 + unsigned long addr;
6227 +
6228 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6229 +
6230 + if (test_thread_flag(TIF_32BIT))
6231 + addr &= 0xFFFFFFFFUL;
6232 +
6233 + regs->tpc = addr;
6234 + regs->tnpc = addr+4;
6235 + return 2;
6236 + }
6237 + }
6238 +
6239 + do { /* PaX: patched PLT emulation #3 */
6240 + unsigned int sethi, jmpl, nop;
6241 +
6242 + err = get_user(sethi, (unsigned int *)regs->tpc);
6243 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6244 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6245 +
6246 + if (err)
6247 + break;
6248 +
6249 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6250 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6251 + nop == 0x01000000U)
6252 + {
6253 + unsigned long addr;
6254 +
6255 + addr = (sethi & 0x003FFFFFU) << 10;
6256 + regs->u_regs[UREG_G1] = addr;
6257 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6258 +
6259 + if (test_thread_flag(TIF_32BIT))
6260 + addr &= 0xFFFFFFFFUL;
6261 +
6262 + regs->tpc = addr;
6263 + regs->tnpc = addr+4;
6264 + return 2;
6265 + }
6266 + } while (0);
6267 +
6268 + do { /* PaX: patched PLT emulation #4 */
6269 + unsigned int sethi, mov1, call, mov2;
6270 +
6271 + err = get_user(sethi, (unsigned int *)regs->tpc);
6272 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6273 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6274 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6275 +
6276 + if (err)
6277 + break;
6278 +
6279 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6280 + mov1 == 0x8210000FU &&
6281 + (call & 0xC0000000U) == 0x40000000U &&
6282 + mov2 == 0x9E100001U)
6283 + {
6284 + unsigned long addr;
6285 +
6286 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6287 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6288 +
6289 + if (test_thread_flag(TIF_32BIT))
6290 + addr &= 0xFFFFFFFFUL;
6291 +
6292 + regs->tpc = addr;
6293 + regs->tnpc = addr+4;
6294 + return 2;
6295 + }
6296 + } while (0);
6297 +
6298 + do { /* PaX: patched PLT emulation #5 */
6299 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6300 +
6301 + err = get_user(sethi, (unsigned int *)regs->tpc);
6302 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6303 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6304 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6305 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6306 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6307 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6308 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6309 +
6310 + if (err)
6311 + break;
6312 +
6313 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6314 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6315 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6316 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6317 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6318 + sllx == 0x83287020U &&
6319 + jmpl == 0x81C04005U &&
6320 + nop == 0x01000000U)
6321 + {
6322 + unsigned long addr;
6323 +
6324 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6325 + regs->u_regs[UREG_G1] <<= 32;
6326 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6327 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6328 + regs->tpc = addr;
6329 + regs->tnpc = addr+4;
6330 + return 2;
6331 + }
6332 + } while (0);
6333 +
6334 + do { /* PaX: patched PLT emulation #6 */
6335 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6336 +
6337 + err = get_user(sethi, (unsigned int *)regs->tpc);
6338 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6339 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6340 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6341 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6342 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6343 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6344 +
6345 + if (err)
6346 + break;
6347 +
6348 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6349 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6350 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6351 + sllx == 0x83287020U &&
6352 + (or & 0xFFFFE000U) == 0x8A116000U &&
6353 + jmpl == 0x81C04005U &&
6354 + nop == 0x01000000U)
6355 + {
6356 + unsigned long addr;
6357 +
6358 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6359 + regs->u_regs[UREG_G1] <<= 32;
6360 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6361 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6362 + regs->tpc = addr;
6363 + regs->tnpc = addr+4;
6364 + return 2;
6365 + }
6366 + } while (0);
6367 +
6368 + do { /* PaX: unpatched PLT emulation step 1 */
6369 + unsigned int sethi, ba, nop;
6370 +
6371 + err = get_user(sethi, (unsigned int *)regs->tpc);
6372 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6373 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6374 +
6375 + if (err)
6376 + break;
6377 +
6378 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6379 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6380 + nop == 0x01000000U)
6381 + {
6382 + unsigned long addr;
6383 + unsigned int save, call;
6384 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6385 +
6386 + if ((ba & 0xFFC00000U) == 0x30800000U)
6387 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6388 + else
6389 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6390 +
6391 + if (test_thread_flag(TIF_32BIT))
6392 + addr &= 0xFFFFFFFFUL;
6393 +
6394 + err = get_user(save, (unsigned int *)addr);
6395 + err |= get_user(call, (unsigned int *)(addr+4));
6396 + err |= get_user(nop, (unsigned int *)(addr+8));
6397 + if (err)
6398 + break;
6399 +
6400 +#ifdef CONFIG_PAX_DLRESOLVE
6401 + if (save == 0x9DE3BFA8U &&
6402 + (call & 0xC0000000U) == 0x40000000U &&
6403 + nop == 0x01000000U)
6404 + {
6405 + struct vm_area_struct *vma;
6406 + unsigned long call_dl_resolve;
6407 +
6408 + down_read(&current->mm->mmap_sem);
6409 + call_dl_resolve = current->mm->call_dl_resolve;
6410 + up_read(&current->mm->mmap_sem);
6411 + if (likely(call_dl_resolve))
6412 + goto emulate;
6413 +
6414 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6415 +
6416 + down_write(&current->mm->mmap_sem);
6417 + if (current->mm->call_dl_resolve) {
6418 + call_dl_resolve = current->mm->call_dl_resolve;
6419 + up_write(&current->mm->mmap_sem);
6420 + if (vma)
6421 + kmem_cache_free(vm_area_cachep, vma);
6422 + goto emulate;
6423 + }
6424 +
6425 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6426 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6427 + up_write(&current->mm->mmap_sem);
6428 + if (vma)
6429 + kmem_cache_free(vm_area_cachep, vma);
6430 + return 1;
6431 + }
6432 +
6433 + if (pax_insert_vma(vma, call_dl_resolve)) {
6434 + up_write(&current->mm->mmap_sem);
6435 + kmem_cache_free(vm_area_cachep, vma);
6436 + return 1;
6437 + }
6438 +
6439 + current->mm->call_dl_resolve = call_dl_resolve;
6440 + up_write(&current->mm->mmap_sem);
6441 +
6442 +emulate:
6443 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6444 + regs->tpc = call_dl_resolve;
6445 + regs->tnpc = addr+4;
6446 + return 3;
6447 + }
6448 +#endif
6449 +
6450 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6451 + if ((save & 0xFFC00000U) == 0x05000000U &&
6452 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6453 + nop == 0x01000000U)
6454 + {
6455 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6456 + regs->u_regs[UREG_G2] = addr + 4;
6457 + addr = (save & 0x003FFFFFU) << 10;
6458 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6459 +
6460 + if (test_thread_flag(TIF_32BIT))
6461 + addr &= 0xFFFFFFFFUL;
6462 +
6463 + regs->tpc = addr;
6464 + regs->tnpc = addr+4;
6465 + return 3;
6466 + }
6467 +
6468 + /* PaX: 64-bit PLT stub */
6469 + err = get_user(sethi1, (unsigned int *)addr);
6470 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6471 + err |= get_user(or1, (unsigned int *)(addr+8));
6472 + err |= get_user(or2, (unsigned int *)(addr+12));
6473 + err |= get_user(sllx, (unsigned int *)(addr+16));
6474 + err |= get_user(add, (unsigned int *)(addr+20));
6475 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6476 + err |= get_user(nop, (unsigned int *)(addr+28));
6477 + if (err)
6478 + break;
6479 +
6480 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6481 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6482 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6483 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6484 + sllx == 0x89293020U &&
6485 + add == 0x8A010005U &&
6486 + jmpl == 0x89C14000U &&
6487 + nop == 0x01000000U)
6488 + {
6489 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6490 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6491 + regs->u_regs[UREG_G4] <<= 32;
6492 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6493 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6494 + regs->u_regs[UREG_G4] = addr + 24;
6495 + addr = regs->u_regs[UREG_G5];
6496 + regs->tpc = addr;
6497 + regs->tnpc = addr+4;
6498 + return 3;
6499 + }
6500 + }
6501 + } while (0);
6502 +
6503 +#ifdef CONFIG_PAX_DLRESOLVE
6504 + do { /* PaX: unpatched PLT emulation step 2 */
6505 + unsigned int save, call, nop;
6506 +
6507 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6508 + err |= get_user(call, (unsigned int *)regs->tpc);
6509 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6510 + if (err)
6511 + break;
6512 +
6513 + if (save == 0x9DE3BFA8U &&
6514 + (call & 0xC0000000U) == 0x40000000U &&
6515 + nop == 0x01000000U)
6516 + {
6517 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6518 +
6519 + if (test_thread_flag(TIF_32BIT))
6520 + dl_resolve &= 0xFFFFFFFFUL;
6521 +
6522 + regs->u_regs[UREG_RETPC] = regs->tpc;
6523 + regs->tpc = dl_resolve;
6524 + regs->tnpc = dl_resolve+4;
6525 + return 3;
6526 + }
6527 + } while (0);
6528 +#endif
6529 +
6530 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6531 + unsigned int sethi, ba, nop;
6532 +
6533 + err = get_user(sethi, (unsigned int *)regs->tpc);
6534 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6535 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6536 +
6537 + if (err)
6538 + break;
6539 +
6540 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6541 + (ba & 0xFFF00000U) == 0x30600000U &&
6542 + nop == 0x01000000U)
6543 + {
6544 + unsigned long addr;
6545 +
6546 + addr = (sethi & 0x003FFFFFU) << 10;
6547 + regs->u_regs[UREG_G1] = addr;
6548 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6549 +
6550 + if (test_thread_flag(TIF_32BIT))
6551 + addr &= 0xFFFFFFFFUL;
6552 +
6553 + regs->tpc = addr;
6554 + regs->tnpc = addr+4;
6555 + return 2;
6556 + }
6557 + } while (0);
6558 +
6559 +#endif
6560 +
6561 + return 1;
6562 +}
6563 +
6564 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6565 +{
6566 + unsigned long i;
6567 +
6568 + printk(KERN_ERR "PAX: bytes at PC: ");
6569 + for (i = 0; i < 8; i++) {
6570 + unsigned int c;
6571 + if (get_user(c, (unsigned int *)pc+i))
6572 + printk(KERN_CONT "???????? ");
6573 + else
6574 + printk(KERN_CONT "%08x ", c);
6575 + }
6576 + printk("\n");
6577 +}
6578 +#endif
6579 +
6580 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6581 {
6582 struct mm_struct *mm = current->mm;
6583 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6584 if (!vma)
6585 goto bad_area;
6586
6587 +#ifdef CONFIG_PAX_PAGEEXEC
6588 + /* PaX: detect ITLB misses on non-exec pages */
6589 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6590 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6591 + {
6592 + if (address != regs->tpc)
6593 + goto good_area;
6594 +
6595 + up_read(&mm->mmap_sem);
6596 + switch (pax_handle_fetch_fault(regs)) {
6597 +
6598 +#ifdef CONFIG_PAX_EMUPLT
6599 + case 2:
6600 + case 3:
6601 + return;
6602 +#endif
6603 +
6604 + }
6605 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6606 + do_group_exit(SIGKILL);
6607 + }
6608 +#endif
6609 +
6610 /* Pure DTLB misses do not tell us whether the fault causing
6611 * load/store/atomic was a write or not, it only says that there
6612 * was no match. So in such a case we (carefully) read the
6613 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6614 index f27d103..1b06377 100644
6615 --- a/arch/sparc/mm/hugetlbpage.c
6616 +++ b/arch/sparc/mm/hugetlbpage.c
6617 @@ -69,7 +69,7 @@ full_search:
6618 }
6619 return -ENOMEM;
6620 }
6621 - if (likely(!vma || addr + len <= vma->vm_start)) {
6622 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6623 /*
6624 * Remember the place where we stopped the search:
6625 */
6626 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6627 /* make sure it can fit in the remaining address space */
6628 if (likely(addr > len)) {
6629 vma = find_vma(mm, addr-len);
6630 - if (!vma || addr <= vma->vm_start) {
6631 + if (check_heap_stack_gap(vma, addr - len, len)) {
6632 /* remember the address as a hint for next time */
6633 return (mm->free_area_cache = addr-len);
6634 }
6635 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6636 if (unlikely(mm->mmap_base < len))
6637 goto bottomup;
6638
6639 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6640 + addr = mm->mmap_base - len;
6641
6642 do {
6643 + addr &= HPAGE_MASK;
6644 /*
6645 * Lookup failure means no vma is above this address,
6646 * else if new region fits below vma->vm_start,
6647 * return with success:
6648 */
6649 vma = find_vma(mm, addr);
6650 - if (likely(!vma || addr+len <= vma->vm_start)) {
6651 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6652 /* remember the address as a hint for next time */
6653 return (mm->free_area_cache = addr);
6654 }
6655 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6656 mm->cached_hole_size = vma->vm_start - addr;
6657
6658 /* try just below the current vma->vm_start */
6659 - addr = (vma->vm_start-len) & HPAGE_MASK;
6660 - } while (likely(len < vma->vm_start));
6661 + addr = skip_heap_stack_gap(vma, len);
6662 + } while (!IS_ERR_VALUE(addr));
6663
6664 bottomup:
6665 /*
6666 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6667 if (addr) {
6668 addr = ALIGN(addr, HPAGE_SIZE);
6669 vma = find_vma(mm, addr);
6670 - if (task_size - len >= addr &&
6671 - (!vma || addr + len <= vma->vm_start))
6672 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6673 return addr;
6674 }
6675 if (mm->get_unmapped_area == arch_get_unmapped_area)
6676 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6677 index dc7c3b1..34c0070 100644
6678 --- a/arch/sparc/mm/init_32.c
6679 +++ b/arch/sparc/mm/init_32.c
6680 @@ -317,6 +317,9 @@ extern void device_scan(void);
6681 pgprot_t PAGE_SHARED __read_mostly;
6682 EXPORT_SYMBOL(PAGE_SHARED);
6683
6684 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6685 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6686 +
6687 void __init paging_init(void)
6688 {
6689 switch(sparc_cpu_model) {
6690 @@ -345,17 +348,17 @@ void __init paging_init(void)
6691
6692 /* Initialize the protection map with non-constant, MMU dependent values. */
6693 protection_map[0] = PAGE_NONE;
6694 - protection_map[1] = PAGE_READONLY;
6695 - protection_map[2] = PAGE_COPY;
6696 - protection_map[3] = PAGE_COPY;
6697 + protection_map[1] = PAGE_READONLY_NOEXEC;
6698 + protection_map[2] = PAGE_COPY_NOEXEC;
6699 + protection_map[3] = PAGE_COPY_NOEXEC;
6700 protection_map[4] = PAGE_READONLY;
6701 protection_map[5] = PAGE_READONLY;
6702 protection_map[6] = PAGE_COPY;
6703 protection_map[7] = PAGE_COPY;
6704 protection_map[8] = PAGE_NONE;
6705 - protection_map[9] = PAGE_READONLY;
6706 - protection_map[10] = PAGE_SHARED;
6707 - protection_map[11] = PAGE_SHARED;
6708 + protection_map[9] = PAGE_READONLY_NOEXEC;
6709 + protection_map[10] = PAGE_SHARED_NOEXEC;
6710 + protection_map[11] = PAGE_SHARED_NOEXEC;
6711 protection_map[12] = PAGE_READONLY;
6712 protection_map[13] = PAGE_READONLY;
6713 protection_map[14] = PAGE_SHARED;
6714 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6715 index 509b1ff..bfd7118 100644
6716 --- a/arch/sparc/mm/srmmu.c
6717 +++ b/arch/sparc/mm/srmmu.c
6718 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6719 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6720 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6721 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6722 +
6723 +#ifdef CONFIG_PAX_PAGEEXEC
6724 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6725 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6726 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6727 +#endif
6728 +
6729 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6730 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6731
6732 diff --git a/arch/um/Makefile b/arch/um/Makefile
6733 index fc633db..5e1a1c2 100644
6734 --- a/arch/um/Makefile
6735 +++ b/arch/um/Makefile
6736 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6737 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6738 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6739
6740 +ifdef CONSTIFY_PLUGIN
6741 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6742 +endif
6743 +
6744 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6745
6746 #This will adjust *FLAGS accordingly to the platform.
6747 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6748 index 6c03acd..a5e0215 100644
6749 --- a/arch/um/include/asm/kmap_types.h
6750 +++ b/arch/um/include/asm/kmap_types.h
6751 @@ -23,6 +23,7 @@ enum km_type {
6752 KM_IRQ1,
6753 KM_SOFTIRQ0,
6754 KM_SOFTIRQ1,
6755 + KM_CLEARPAGE,
6756 KM_TYPE_NR
6757 };
6758
6759 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6760 index 4cc9b6c..02e5029 100644
6761 --- a/arch/um/include/asm/page.h
6762 +++ b/arch/um/include/asm/page.h
6763 @@ -14,6 +14,9 @@
6764 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6765 #define PAGE_MASK (~(PAGE_SIZE-1))
6766
6767 +#define ktla_ktva(addr) (addr)
6768 +#define ktva_ktla(addr) (addr)
6769 +
6770 #ifndef __ASSEMBLY__
6771
6772 struct page;
6773 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6774 index 4a28a15..654dc2a 100644
6775 --- a/arch/um/kernel/process.c
6776 +++ b/arch/um/kernel/process.c
6777 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6778 return 2;
6779 }
6780
6781 -/*
6782 - * Only x86 and x86_64 have an arch_align_stack().
6783 - * All other arches have "#define arch_align_stack(x) (x)"
6784 - * in their asm/system.h
6785 - * As this is included in UML from asm-um/system-generic.h,
6786 - * we can use it to behave as the subarch does.
6787 - */
6788 -#ifndef arch_align_stack
6789 -unsigned long arch_align_stack(unsigned long sp)
6790 -{
6791 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6792 - sp -= get_random_int() % 8192;
6793 - return sp & ~0xf;
6794 -}
6795 -#endif
6796 -
6797 unsigned long get_wchan(struct task_struct *p)
6798 {
6799 unsigned long stack_page, sp, ip;
6800 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
6801 index d1b93c4..ae1b7fd 100644
6802 --- a/arch/um/sys-i386/shared/sysdep/system.h
6803 +++ b/arch/um/sys-i386/shared/sysdep/system.h
6804 @@ -17,7 +17,7 @@
6805 # define AT_VECTOR_SIZE_ARCH 1
6806 #endif
6807
6808 -extern unsigned long arch_align_stack(unsigned long sp);
6809 +#define arch_align_stack(x) ((x) & ~0xfUL)
6810
6811 void default_idle(void);
6812
6813 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
6814 index 857ca0b..9a2669d 100644
6815 --- a/arch/um/sys-i386/syscalls.c
6816 +++ b/arch/um/sys-i386/syscalls.c
6817 @@ -11,6 +11,21 @@
6818 #include "asm/uaccess.h"
6819 #include "asm/unistd.h"
6820
6821 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6822 +{
6823 + unsigned long pax_task_size = TASK_SIZE;
6824 +
6825 +#ifdef CONFIG_PAX_SEGMEXEC
6826 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6827 + pax_task_size = SEGMEXEC_TASK_SIZE;
6828 +#endif
6829 +
6830 + if (len > pax_task_size || addr > pax_task_size - len)
6831 + return -EINVAL;
6832 +
6833 + return 0;
6834 +}
6835 +
6836 /*
6837 * Perform the select(nd, in, out, ex, tv) and mmap() system
6838 * calls. Linux/i386 didn't use to be able to handle more than
6839 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
6840 index d1b93c4..ae1b7fd 100644
6841 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
6842 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
6843 @@ -17,7 +17,7 @@
6844 # define AT_VECTOR_SIZE_ARCH 1
6845 #endif
6846
6847 -extern unsigned long arch_align_stack(unsigned long sp);
6848 +#define arch_align_stack(x) ((x) & ~0xfUL)
6849
6850 void default_idle(void);
6851
6852 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
6853 index 73ae02a..f932de5 100644
6854 --- a/arch/x86/Kconfig
6855 +++ b/arch/x86/Kconfig
6856 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
6857
6858 config X86_32_LAZY_GS
6859 def_bool y
6860 - depends on X86_32 && !CC_STACKPROTECTOR
6861 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
6862
6863 config KTIME_SCALAR
6864 def_bool X86_32
6865 @@ -1008,7 +1008,7 @@ choice
6866
6867 config NOHIGHMEM
6868 bool "off"
6869 - depends on !X86_NUMAQ
6870 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6871 ---help---
6872 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
6873 However, the address space of 32-bit x86 processors is only 4
6874 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
6875
6876 config HIGHMEM4G
6877 bool "4GB"
6878 - depends on !X86_NUMAQ
6879 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6880 ---help---
6881 Select this if you have a 32-bit processor and between 1 and 4
6882 gigabytes of physical RAM.
6883 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
6884 hex
6885 default 0xB0000000 if VMSPLIT_3G_OPT
6886 default 0x80000000 if VMSPLIT_2G
6887 - default 0x78000000 if VMSPLIT_2G_OPT
6888 + default 0x70000000 if VMSPLIT_2G_OPT
6889 default 0x40000000 if VMSPLIT_1G
6890 default 0xC0000000
6891 depends on X86_32
6892 @@ -1460,6 +1460,7 @@ config SECCOMP
6893
6894 config CC_STACKPROTECTOR
6895 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
6896 + depends on X86_64 || !PAX_MEMORY_UDEREF
6897 ---help---
6898 This option turns on the -fstack-protector GCC feature. This
6899 feature puts, at the beginning of functions, a canary value on
6900 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
6901 config PHYSICAL_START
6902 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
6903 default "0x1000000"
6904 + range 0x400000 0x40000000
6905 ---help---
6906 This gives the physical address where the kernel is loaded.
6907
6908 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
6909 hex
6910 prompt "Alignment value to which kernel should be aligned" if X86_32
6911 default "0x1000000"
6912 + range 0x400000 0x1000000 if PAX_KERNEXEC
6913 range 0x2000 0x1000000
6914 ---help---
6915 This value puts the alignment restrictions on physical address
6916 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
6917 Say N if you want to disable CPU hotplug.
6918
6919 config COMPAT_VDSO
6920 - def_bool y
6921 + def_bool n
6922 prompt "Compat VDSO support"
6923 depends on X86_32 || IA32_EMULATION
6924 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
6925 ---help---
6926 Map the 32-bit VDSO to the predictable old-style address too.
6927 ---help---
6928 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
6929 index 0e566103..1a6b57e 100644
6930 --- a/arch/x86/Kconfig.cpu
6931 +++ b/arch/x86/Kconfig.cpu
6932 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
6933
6934 config X86_F00F_BUG
6935 def_bool y
6936 - depends on M586MMX || M586TSC || M586 || M486 || M386
6937 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
6938
6939 config X86_WP_WORKS_OK
6940 def_bool y
6941 @@ -360,7 +360,7 @@ config X86_POPAD_OK
6942
6943 config X86_ALIGNMENT_16
6944 def_bool y
6945 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6946 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6947
6948 config X86_INTEL_USERCOPY
6949 def_bool y
6950 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
6951 # generates cmov.
6952 config X86_CMOV
6953 def_bool y
6954 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6955 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6956
6957 config X86_MINIMUM_CPU_FAMILY
6958 int
6959 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
6960 index d105f29..c928727 100644
6961 --- a/arch/x86/Kconfig.debug
6962 +++ b/arch/x86/Kconfig.debug
6963 @@ -99,7 +99,7 @@ config X86_PTDUMP
6964 config DEBUG_RODATA
6965 bool "Write protect kernel read-only data structures"
6966 default y
6967 - depends on DEBUG_KERNEL
6968 + depends on DEBUG_KERNEL && BROKEN
6969 ---help---
6970 Mark the kernel read-only data as write-protected in the pagetables,
6971 in order to catch accidental (and incorrect) writes to such const
6972 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
6973 index d2d24c9..0f21f8d 100644
6974 --- a/arch/x86/Makefile
6975 +++ b/arch/x86/Makefile
6976 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
6977 else
6978 BITS := 64
6979 UTS_MACHINE := x86_64
6980 + biarch := $(call cc-option,-m64)
6981 CHECKFLAGS += -D__x86_64__ -m64
6982
6983 KBUILD_AFLAGS += -m64
6984 @@ -189,3 +190,12 @@ define archhelp
6985 echo ' FDARGS="..." arguments for the booted kernel'
6986 echo ' FDINITRD=file initrd for the booted kernel'
6987 endef
6988 +
6989 +define OLD_LD
6990 +
6991 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
6992 +*** Please upgrade your binutils to 2.18 or newer
6993 +endef
6994 +
6995 +archprepare:
6996 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
6997 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
6998 index ec749c2..bbb5319 100644
6999 --- a/arch/x86/boot/Makefile
7000 +++ b/arch/x86/boot/Makefile
7001 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7002 $(call cc-option, -fno-stack-protector) \
7003 $(call cc-option, -mpreferred-stack-boundary=2)
7004 KBUILD_CFLAGS += $(call cc-option, -m32)
7005 +ifdef CONSTIFY_PLUGIN
7006 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7007 +endif
7008 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7009 GCOV_PROFILE := n
7010
7011 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7012 index 878e4b9..20537ab 100644
7013 --- a/arch/x86/boot/bitops.h
7014 +++ b/arch/x86/boot/bitops.h
7015 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7016 u8 v;
7017 const u32 *p = (const u32 *)addr;
7018
7019 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7020 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7021 return v;
7022 }
7023
7024 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7025
7026 static inline void set_bit(int nr, void *addr)
7027 {
7028 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7029 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7030 }
7031
7032 #endif /* BOOT_BITOPS_H */
7033 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7034 index 98239d2..f40214c 100644
7035 --- a/arch/x86/boot/boot.h
7036 +++ b/arch/x86/boot/boot.h
7037 @@ -82,7 +82,7 @@ static inline void io_delay(void)
7038 static inline u16 ds(void)
7039 {
7040 u16 seg;
7041 - asm("movw %%ds,%0" : "=rm" (seg));
7042 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7043 return seg;
7044 }
7045
7046 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7047 static inline int memcmp(const void *s1, const void *s2, size_t len)
7048 {
7049 u8 diff;
7050 - asm("repe; cmpsb; setnz %0"
7051 + asm volatile("repe; cmpsb; setnz %0"
7052 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7053 return diff;
7054 }
7055 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7056 index f8ed065..5bf5ff3 100644
7057 --- a/arch/x86/boot/compressed/Makefile
7058 +++ b/arch/x86/boot/compressed/Makefile
7059 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7060 KBUILD_CFLAGS += $(cflags-y)
7061 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7062 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7063 +ifdef CONSTIFY_PLUGIN
7064 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7065 +endif
7066
7067 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7068 GCOV_PROFILE := n
7069 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7070 index f543b70..b60fba8 100644
7071 --- a/arch/x86/boot/compressed/head_32.S
7072 +++ b/arch/x86/boot/compressed/head_32.S
7073 @@ -76,7 +76,7 @@ ENTRY(startup_32)
7074 notl %eax
7075 andl %eax, %ebx
7076 #else
7077 - movl $LOAD_PHYSICAL_ADDR, %ebx
7078 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7079 #endif
7080
7081 /* Target address to relocate to for decompression */
7082 @@ -149,7 +149,7 @@ relocated:
7083 * and where it was actually loaded.
7084 */
7085 movl %ebp, %ebx
7086 - subl $LOAD_PHYSICAL_ADDR, %ebx
7087 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7088 jz 2f /* Nothing to be done if loaded at compiled addr. */
7089 /*
7090 * Process relocations.
7091 @@ -157,8 +157,7 @@ relocated:
7092
7093 1: subl $4, %edi
7094 movl (%edi), %ecx
7095 - testl %ecx, %ecx
7096 - jz 2f
7097 + jecxz 2f
7098 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7099 jmp 1b
7100 2:
7101 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7102 index 077e1b6..2c6b13b 100644
7103 --- a/arch/x86/boot/compressed/head_64.S
7104 +++ b/arch/x86/boot/compressed/head_64.S
7105 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7106 notl %eax
7107 andl %eax, %ebx
7108 #else
7109 - movl $LOAD_PHYSICAL_ADDR, %ebx
7110 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7111 #endif
7112
7113 /* Target address to relocate to for decompression */
7114 @@ -183,7 +183,7 @@ no_longmode:
7115 hlt
7116 jmp 1b
7117
7118 -#include "../../kernel/verify_cpu_64.S"
7119 +#include "../../kernel/verify_cpu.S"
7120
7121 /*
7122 * Be careful here startup_64 needs to be at a predictable
7123 @@ -234,7 +234,7 @@ ENTRY(startup_64)
7124 notq %rax
7125 andq %rax, %rbp
7126 #else
7127 - movq $LOAD_PHYSICAL_ADDR, %rbp
7128 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7129 #endif
7130
7131 /* Target address to relocate to for decompression */
7132 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7133 index 842b2a3..f00178b 100644
7134 --- a/arch/x86/boot/compressed/misc.c
7135 +++ b/arch/x86/boot/compressed/misc.c
7136 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
7137 case PT_LOAD:
7138 #ifdef CONFIG_RELOCATABLE
7139 dest = output;
7140 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7141 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7142 #else
7143 dest = (void *)(phdr->p_paddr);
7144 #endif
7145 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7146 error("Destination address too large");
7147 #endif
7148 #ifndef CONFIG_RELOCATABLE
7149 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7150 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7151 error("Wrong destination address");
7152 #endif
7153
7154 diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7155 index bcbd36c..b1754af 100644
7156 --- a/arch/x86/boot/compressed/mkpiggy.c
7157 +++ b/arch/x86/boot/compressed/mkpiggy.c
7158 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7159
7160 offs = (olen > ilen) ? olen - ilen : 0;
7161 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7162 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7163 + offs += 64*1024; /* Add 64K bytes slack */
7164 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7165
7166 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7167 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7168 index bbeb0c3..f5167ab 100644
7169 --- a/arch/x86/boot/compressed/relocs.c
7170 +++ b/arch/x86/boot/compressed/relocs.c
7171 @@ -10,8 +10,11 @@
7172 #define USE_BSD
7173 #include <endian.h>
7174
7175 +#include "../../../../include/linux/autoconf.h"
7176 +
7177 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7178 static Elf32_Ehdr ehdr;
7179 +static Elf32_Phdr *phdr;
7180 static unsigned long reloc_count, reloc_idx;
7181 static unsigned long *relocs;
7182
7183 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7184
7185 static int is_safe_abs_reloc(const char* sym_name)
7186 {
7187 - int i;
7188 + unsigned int i;
7189
7190 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7191 if (!strcmp(sym_name, safe_abs_relocs[i]))
7192 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7193 }
7194 }
7195
7196 +static void read_phdrs(FILE *fp)
7197 +{
7198 + unsigned int i;
7199 +
7200 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7201 + if (!phdr) {
7202 + die("Unable to allocate %d program headers\n",
7203 + ehdr.e_phnum);
7204 + }
7205 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7206 + die("Seek to %d failed: %s\n",
7207 + ehdr.e_phoff, strerror(errno));
7208 + }
7209 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7210 + die("Cannot read ELF program headers: %s\n",
7211 + strerror(errno));
7212 + }
7213 + for(i = 0; i < ehdr.e_phnum; i++) {
7214 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7215 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7216 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7217 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7218 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7219 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7220 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7221 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7222 + }
7223 +
7224 +}
7225 +
7226 static void read_shdrs(FILE *fp)
7227 {
7228 - int i;
7229 + unsigned int i;
7230 Elf32_Shdr shdr;
7231
7232 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7233 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7234
7235 static void read_strtabs(FILE *fp)
7236 {
7237 - int i;
7238 + unsigned int i;
7239 for (i = 0; i < ehdr.e_shnum; i++) {
7240 struct section *sec = &secs[i];
7241 if (sec->shdr.sh_type != SHT_STRTAB) {
7242 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7243
7244 static void read_symtabs(FILE *fp)
7245 {
7246 - int i,j;
7247 + unsigned int i,j;
7248 for (i = 0; i < ehdr.e_shnum; i++) {
7249 struct section *sec = &secs[i];
7250 if (sec->shdr.sh_type != SHT_SYMTAB) {
7251 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7252
7253 static void read_relocs(FILE *fp)
7254 {
7255 - int i,j;
7256 + unsigned int i,j;
7257 + uint32_t base;
7258 +
7259 for (i = 0; i < ehdr.e_shnum; i++) {
7260 struct section *sec = &secs[i];
7261 if (sec->shdr.sh_type != SHT_REL) {
7262 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7263 die("Cannot read symbol table: %s\n",
7264 strerror(errno));
7265 }
7266 + base = 0;
7267 + for (j = 0; j < ehdr.e_phnum; j++) {
7268 + if (phdr[j].p_type != PT_LOAD )
7269 + continue;
7270 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7271 + continue;
7272 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7273 + break;
7274 + }
7275 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7276 Elf32_Rel *rel = &sec->reltab[j];
7277 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7278 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7279 rel->r_info = elf32_to_cpu(rel->r_info);
7280 }
7281 }
7282 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7283
7284 static void print_absolute_symbols(void)
7285 {
7286 - int i;
7287 + unsigned int i;
7288 printf("Absolute symbols\n");
7289 printf(" Num: Value Size Type Bind Visibility Name\n");
7290 for (i = 0; i < ehdr.e_shnum; i++) {
7291 struct section *sec = &secs[i];
7292 char *sym_strtab;
7293 Elf32_Sym *sh_symtab;
7294 - int j;
7295 + unsigned int j;
7296
7297 if (sec->shdr.sh_type != SHT_SYMTAB) {
7298 continue;
7299 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7300
7301 static void print_absolute_relocs(void)
7302 {
7303 - int i, printed = 0;
7304 + unsigned int i, printed = 0;
7305
7306 for (i = 0; i < ehdr.e_shnum; i++) {
7307 struct section *sec = &secs[i];
7308 struct section *sec_applies, *sec_symtab;
7309 char *sym_strtab;
7310 Elf32_Sym *sh_symtab;
7311 - int j;
7312 + unsigned int j;
7313 if (sec->shdr.sh_type != SHT_REL) {
7314 continue;
7315 }
7316 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7317
7318 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7319 {
7320 - int i;
7321 + unsigned int i;
7322 /* Walk through the relocations */
7323 for (i = 0; i < ehdr.e_shnum; i++) {
7324 char *sym_strtab;
7325 Elf32_Sym *sh_symtab;
7326 struct section *sec_applies, *sec_symtab;
7327 - int j;
7328 + unsigned int j;
7329 struct section *sec = &secs[i];
7330
7331 if (sec->shdr.sh_type != SHT_REL) {
7332 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7333 if (sym->st_shndx == SHN_ABS) {
7334 continue;
7335 }
7336 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7337 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7338 + continue;
7339 +
7340 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7341 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7342 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7343 + continue;
7344 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7345 + continue;
7346 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7347 + continue;
7348 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7349 + continue;
7350 +#endif
7351 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7352 /*
7353 * NONE can be ignored and and PC relative
7354 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7355
7356 static void emit_relocs(int as_text)
7357 {
7358 - int i;
7359 + unsigned int i;
7360 /* Count how many relocations I have and allocate space for them. */
7361 reloc_count = 0;
7362 walk_relocs(count_reloc);
7363 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
7364 fname, strerror(errno));
7365 }
7366 read_ehdr(fp);
7367 + read_phdrs(fp);
7368 read_shdrs(fp);
7369 read_strtabs(fp);
7370 read_symtabs(fp);
7371 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7372 index 4d3ff03..e4972ff 100644
7373 --- a/arch/x86/boot/cpucheck.c
7374 +++ b/arch/x86/boot/cpucheck.c
7375 @@ -74,7 +74,7 @@ static int has_fpu(void)
7376 u16 fcw = -1, fsw = -1;
7377 u32 cr0;
7378
7379 - asm("movl %%cr0,%0" : "=r" (cr0));
7380 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7381 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7382 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7383 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7384 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7385 {
7386 u32 f0, f1;
7387
7388 - asm("pushfl ; "
7389 + asm volatile("pushfl ; "
7390 "pushfl ; "
7391 "popl %0 ; "
7392 "movl %0,%1 ; "
7393 @@ -115,7 +115,7 @@ static void get_flags(void)
7394 set_bit(X86_FEATURE_FPU, cpu.flags);
7395
7396 if (has_eflag(X86_EFLAGS_ID)) {
7397 - asm("cpuid"
7398 + asm volatile("cpuid"
7399 : "=a" (max_intel_level),
7400 "=b" (cpu_vendor[0]),
7401 "=d" (cpu_vendor[1]),
7402 @@ -124,7 +124,7 @@ static void get_flags(void)
7403
7404 if (max_intel_level >= 0x00000001 &&
7405 max_intel_level <= 0x0000ffff) {
7406 - asm("cpuid"
7407 + asm volatile("cpuid"
7408 : "=a" (tfms),
7409 "=c" (cpu.flags[4]),
7410 "=d" (cpu.flags[0])
7411 @@ -136,7 +136,7 @@ static void get_flags(void)
7412 cpu.model += ((tfms >> 16) & 0xf) << 4;
7413 }
7414
7415 - asm("cpuid"
7416 + asm volatile("cpuid"
7417 : "=a" (max_amd_level)
7418 : "a" (0x80000000)
7419 : "ebx", "ecx", "edx");
7420 @@ -144,7 +144,7 @@ static void get_flags(void)
7421 if (max_amd_level >= 0x80000001 &&
7422 max_amd_level <= 0x8000ffff) {
7423 u32 eax = 0x80000001;
7424 - asm("cpuid"
7425 + asm volatile("cpuid"
7426 : "+a" (eax),
7427 "=c" (cpu.flags[6]),
7428 "=d" (cpu.flags[1])
7429 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7430 u32 ecx = MSR_K7_HWCR;
7431 u32 eax, edx;
7432
7433 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7434 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7435 eax &= ~(1 << 15);
7436 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7437 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7438
7439 get_flags(); /* Make sure it really did something */
7440 err = check_flags();
7441 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7442 u32 ecx = MSR_VIA_FCR;
7443 u32 eax, edx;
7444
7445 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7446 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7447 eax |= (1<<1)|(1<<7);
7448 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7449 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7450
7451 set_bit(X86_FEATURE_CX8, cpu.flags);
7452 err = check_flags();
7453 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7454 u32 eax, edx;
7455 u32 level = 1;
7456
7457 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7458 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7459 - asm("cpuid"
7460 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7461 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7462 + asm volatile("cpuid"
7463 : "+a" (level), "=d" (cpu.flags[0])
7464 : : "ecx", "ebx");
7465 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7466 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7467
7468 err = check_flags();
7469 }
7470 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7471 index b31cc54..8d69237 100644
7472 --- a/arch/x86/boot/header.S
7473 +++ b/arch/x86/boot/header.S
7474 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7475 # single linked list of
7476 # struct setup_data
7477
7478 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7479 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7480
7481 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7482 #define VO_INIT_SIZE (VO__end - VO__text)
7483 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7484 index cae3feb..ff8ff2a 100644
7485 --- a/arch/x86/boot/memory.c
7486 +++ b/arch/x86/boot/memory.c
7487 @@ -19,7 +19,7 @@
7488
7489 static int detect_memory_e820(void)
7490 {
7491 - int count = 0;
7492 + unsigned int count = 0;
7493 struct biosregs ireg, oreg;
7494 struct e820entry *desc = boot_params.e820_map;
7495 static struct e820entry buf; /* static so it is zeroed */
7496 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7497 index 11e8c6e..fdbb1ed 100644
7498 --- a/arch/x86/boot/video-vesa.c
7499 +++ b/arch/x86/boot/video-vesa.c
7500 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7501
7502 boot_params.screen_info.vesapm_seg = oreg.es;
7503 boot_params.screen_info.vesapm_off = oreg.di;
7504 + boot_params.screen_info.vesapm_size = oreg.cx;
7505 }
7506
7507 /*
7508 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7509 index d42da38..787cdf3 100644
7510 --- a/arch/x86/boot/video.c
7511 +++ b/arch/x86/boot/video.c
7512 @@ -90,7 +90,7 @@ static void store_mode_params(void)
7513 static unsigned int get_entry(void)
7514 {
7515 char entry_buf[4];
7516 - int i, len = 0;
7517 + unsigned int i, len = 0;
7518 int key;
7519 unsigned int v;
7520
7521 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7522 index 5b577d5..3c1fed4 100644
7523 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7524 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7525 @@ -8,6 +8,8 @@
7526 * including this sentence is retained in full.
7527 */
7528
7529 +#include <asm/alternative-asm.h>
7530 +
7531 .extern crypto_ft_tab
7532 .extern crypto_it_tab
7533 .extern crypto_fl_tab
7534 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7535 je B192; \
7536 leaq 32(r9),r9;
7537
7538 +#define ret pax_force_retaddr 0, 1; ret
7539 +
7540 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7541 movq r1,r2; \
7542 movq r3,r4; \
7543 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7544 index eb0566e..e3ebad8 100644
7545 --- a/arch/x86/crypto/aesni-intel_asm.S
7546 +++ b/arch/x86/crypto/aesni-intel_asm.S
7547 @@ -16,6 +16,7 @@
7548 */
7549
7550 #include <linux/linkage.h>
7551 +#include <asm/alternative-asm.h>
7552
7553 .text
7554
7555 @@ -52,6 +53,7 @@ _key_expansion_256a:
7556 pxor %xmm1, %xmm0
7557 movaps %xmm0, (%rcx)
7558 add $0x10, %rcx
7559 + pax_force_retaddr_bts
7560 ret
7561
7562 _key_expansion_192a:
7563 @@ -75,6 +77,7 @@ _key_expansion_192a:
7564 shufps $0b01001110, %xmm2, %xmm1
7565 movaps %xmm1, 16(%rcx)
7566 add $0x20, %rcx
7567 + pax_force_retaddr_bts
7568 ret
7569
7570 _key_expansion_192b:
7571 @@ -93,6 +96,7 @@ _key_expansion_192b:
7572
7573 movaps %xmm0, (%rcx)
7574 add $0x10, %rcx
7575 + pax_force_retaddr_bts
7576 ret
7577
7578 _key_expansion_256b:
7579 @@ -104,6 +108,7 @@ _key_expansion_256b:
7580 pxor %xmm1, %xmm2
7581 movaps %xmm2, (%rcx)
7582 add $0x10, %rcx
7583 + pax_force_retaddr_bts
7584 ret
7585
7586 /*
7587 @@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7588 cmp %rcx, %rdi
7589 jb .Ldec_key_loop
7590 xor %rax, %rax
7591 + pax_force_retaddr 0, 1
7592 ret
7593 +ENDPROC(aesni_set_key)
7594
7595 /*
7596 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7597 @@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7598 movups (INP), STATE # input
7599 call _aesni_enc1
7600 movups STATE, (OUTP) # output
7601 + pax_force_retaddr 0, 1
7602 ret
7603 +ENDPROC(aesni_enc)
7604
7605 /*
7606 * _aesni_enc1: internal ABI
7607 @@ -319,6 +328,7 @@ _aesni_enc1:
7608 movaps 0x70(TKEYP), KEY
7609 # aesenclast KEY, STATE # last round
7610 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7611 + pax_force_retaddr_bts
7612 ret
7613
7614 /*
7615 @@ -482,6 +492,7 @@ _aesni_enc4:
7616 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7617 # aesenclast KEY, STATE4
7618 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7619 + pax_force_retaddr_bts
7620 ret
7621
7622 /*
7623 @@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7624 movups (INP), STATE # input
7625 call _aesni_dec1
7626 movups STATE, (OUTP) #output
7627 + pax_force_retaddr 0, 1
7628 ret
7629 +ENDPROC(aesni_dec)
7630
7631 /*
7632 * _aesni_dec1: internal ABI
7633 @@ -563,6 +576,7 @@ _aesni_dec1:
7634 movaps 0x70(TKEYP), KEY
7635 # aesdeclast KEY, STATE # last round
7636 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7637 + pax_force_retaddr_bts
7638 ret
7639
7640 /*
7641 @@ -726,6 +740,7 @@ _aesni_dec4:
7642 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7643 # aesdeclast KEY, STATE4
7644 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7645 + pax_force_retaddr_bts
7646 ret
7647
7648 /*
7649 @@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7650 cmp $16, LEN
7651 jge .Lecb_enc_loop1
7652 .Lecb_enc_ret:
7653 + pax_force_retaddr 0, 1
7654 ret
7655 +ENDPROC(aesni_ecb_enc)
7656
7657 /*
7658 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7659 @@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7660 cmp $16, LEN
7661 jge .Lecb_dec_loop1
7662 .Lecb_dec_ret:
7663 + pax_force_retaddr 0, 1
7664 ret
7665 +ENDPROC(aesni_ecb_dec)
7666
7667 /*
7668 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7669 @@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7670 jge .Lcbc_enc_loop
7671 movups STATE, (IVP)
7672 .Lcbc_enc_ret:
7673 + pax_force_retaddr 0, 1
7674 ret
7675 +ENDPROC(aesni_cbc_enc)
7676
7677 /*
7678 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7679 @@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7680 .Lcbc_dec_ret:
7681 movups IV, (IVP)
7682 .Lcbc_dec_just_ret:
7683 + pax_force_retaddr 0, 1
7684 ret
7685 +ENDPROC(aesni_cbc_dec)
7686 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7687 index 6214a9b..1f4fc9a 100644
7688 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7689 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7690 @@ -1,3 +1,5 @@
7691 +#include <asm/alternative-asm.h>
7692 +
7693 # enter ECRYPT_encrypt_bytes
7694 .text
7695 .p2align 5
7696 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7697 add %r11,%rsp
7698 mov %rdi,%rax
7699 mov %rsi,%rdx
7700 + pax_force_retaddr 0, 1
7701 ret
7702 # bytesatleast65:
7703 ._bytesatleast65:
7704 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7705 add %r11,%rsp
7706 mov %rdi,%rax
7707 mov %rsi,%rdx
7708 + pax_force_retaddr
7709 ret
7710 # enter ECRYPT_ivsetup
7711 .text
7712 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7713 add %r11,%rsp
7714 mov %rdi,%rax
7715 mov %rsi,%rdx
7716 + pax_force_retaddr
7717 ret
7718 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7719 index 35974a5..5662ae2 100644
7720 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7721 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7722 @@ -21,6 +21,7 @@
7723 .text
7724
7725 #include <asm/asm-offsets.h>
7726 +#include <asm/alternative-asm.h>
7727
7728 #define a_offset 0
7729 #define b_offset 4
7730 @@ -269,6 +270,7 @@ twofish_enc_blk:
7731
7732 popq R1
7733 movq $1,%rax
7734 + pax_force_retaddr 0, 1
7735 ret
7736
7737 twofish_dec_blk:
7738 @@ -321,4 +323,5 @@ twofish_dec_blk:
7739
7740 popq R1
7741 movq $1,%rax
7742 + pax_force_retaddr 0, 1
7743 ret
7744 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7745 index 14531ab..a89a0c0 100644
7746 --- a/arch/x86/ia32/ia32_aout.c
7747 +++ b/arch/x86/ia32/ia32_aout.c
7748 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7749 unsigned long dump_start, dump_size;
7750 struct user32 dump;
7751
7752 + memset(&dump, 0, sizeof(dump));
7753 +
7754 fs = get_fs();
7755 set_fs(KERNEL_DS);
7756 has_dumped = 1;
7757 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7758 dump_size = dump.u_ssize << PAGE_SHIFT;
7759 DUMP_WRITE(dump_start, dump_size);
7760 }
7761 - /*
7762 - * Finally dump the task struct. Not be used by gdb, but
7763 - * could be useful
7764 - */
7765 - set_fs(KERNEL_DS);
7766 - DUMP_WRITE(current, sizeof(*current));
7767 end_coredump:
7768 set_fs(fs);
7769 return has_dumped;
7770 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7771 index 588a7aa..a3468b0 100644
7772 --- a/arch/x86/ia32/ia32_signal.c
7773 +++ b/arch/x86/ia32/ia32_signal.c
7774 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7775 }
7776 seg = get_fs();
7777 set_fs(KERNEL_DS);
7778 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7779 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7780 set_fs(seg);
7781 if (ret >= 0 && uoss_ptr) {
7782 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7783 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
7784 */
7785 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7786 size_t frame_size,
7787 - void **fpstate)
7788 + void __user **fpstate)
7789 {
7790 unsigned long sp;
7791
7792 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7793
7794 if (used_math()) {
7795 sp = sp - sig_xstate_ia32_size;
7796 - *fpstate = (struct _fpstate_ia32 *) sp;
7797 + *fpstate = (struct _fpstate_ia32 __user *) sp;
7798 if (save_i387_xstate_ia32(*fpstate) < 0)
7799 return (void __user *) -1L;
7800 }
7801 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7802 sp -= frame_size;
7803 /* Align the stack pointer according to the i386 ABI,
7804 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
7805 - sp = ((sp + 4) & -16ul) - 4;
7806 + sp = ((sp - 12) & -16ul) - 4;
7807 return (void __user *) sp;
7808 }
7809
7810 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
7811 * These are actually not used anymore, but left because some
7812 * gdb versions depend on them as a marker.
7813 */
7814 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7815 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7816 } put_user_catch(err);
7817
7818 if (err)
7819 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7820 0xb8,
7821 __NR_ia32_rt_sigreturn,
7822 0x80cd,
7823 - 0,
7824 + 0
7825 };
7826
7827 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
7828 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7829
7830 if (ka->sa.sa_flags & SA_RESTORER)
7831 restorer = ka->sa.sa_restorer;
7832 + else if (current->mm->context.vdso)
7833 + /* Return stub is in 32bit vsyscall page */
7834 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
7835 else
7836 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
7837 - rt_sigreturn);
7838 + restorer = &frame->retcode;
7839 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
7840
7841 /*
7842 * Not actually used anymore, but left because some gdb
7843 * versions need it.
7844 */
7845 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7846 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7847 } put_user_catch(err);
7848
7849 if (err)
7850 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
7851 index 4edd8eb..a558697 100644
7852 --- a/arch/x86/ia32/ia32entry.S
7853 +++ b/arch/x86/ia32/ia32entry.S
7854 @@ -13,7 +13,9 @@
7855 #include <asm/thread_info.h>
7856 #include <asm/segment.h>
7857 #include <asm/irqflags.h>
7858 +#include <asm/pgtable.h>
7859 #include <linux/linkage.h>
7860 +#include <asm/alternative-asm.h>
7861
7862 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
7863 #include <linux/elf-em.h>
7864 @@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
7865 ENDPROC(native_irq_enable_sysexit)
7866 #endif
7867
7868 + .macro pax_enter_kernel_user
7869 + pax_set_fptr_mask
7870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7871 + call pax_enter_kernel_user
7872 +#endif
7873 + .endm
7874 +
7875 + .macro pax_exit_kernel_user
7876 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7877 + call pax_exit_kernel_user
7878 +#endif
7879 +#ifdef CONFIG_PAX_RANDKSTACK
7880 + pushq %rax
7881 + pushq %r11
7882 + call pax_randomize_kstack
7883 + popq %r11
7884 + popq %rax
7885 +#endif
7886 + .endm
7887 +
7888 +.macro pax_erase_kstack
7889 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
7890 + call pax_erase_kstack
7891 +#endif
7892 +.endm
7893 +
7894 /*
7895 * 32bit SYSENTER instruction entry.
7896 *
7897 @@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
7898 CFI_REGISTER rsp,rbp
7899 SWAPGS_UNSAFE_STACK
7900 movq PER_CPU_VAR(kernel_stack), %rsp
7901 - addq $(KERNEL_STACK_OFFSET),%rsp
7902 - /*
7903 - * No need to follow this irqs on/off section: the syscall
7904 - * disabled irqs, here we enable it straight after entry:
7905 - */
7906 - ENABLE_INTERRUPTS(CLBR_NONE)
7907 movl %ebp,%ebp /* zero extension */
7908 pushq $__USER32_DS
7909 CFI_ADJUST_CFA_OFFSET 8
7910 @@ -135,28 +157,41 @@ ENTRY(ia32_sysenter_target)
7911 pushfq
7912 CFI_ADJUST_CFA_OFFSET 8
7913 /*CFI_REL_OFFSET rflags,0*/
7914 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
7915 - CFI_REGISTER rip,r10
7916 + GET_THREAD_INFO(%r11)
7917 + movl TI_sysenter_return(%r11), %r11d
7918 + CFI_REGISTER rip,r11
7919 pushq $__USER32_CS
7920 CFI_ADJUST_CFA_OFFSET 8
7921 /*CFI_REL_OFFSET cs,0*/
7922 movl %eax, %eax
7923 - pushq %r10
7924 + pushq %r11
7925 CFI_ADJUST_CFA_OFFSET 8
7926 CFI_REL_OFFSET rip,0
7927 pushq %rax
7928 CFI_ADJUST_CFA_OFFSET 8
7929 cld
7930 SAVE_ARGS 0,0,1
7931 + pax_enter_kernel_user
7932 + /*
7933 + * No need to follow this irqs on/off section: the syscall
7934 + * disabled irqs, here we enable it straight after entry:
7935 + */
7936 + ENABLE_INTERRUPTS(CLBR_NONE)
7937 /* no need to do an access_ok check here because rbp has been
7938 32bit zero extended */
7939 +
7940 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7941 + mov $PAX_USER_SHADOW_BASE,%r11
7942 + add %r11,%rbp
7943 +#endif
7944 +
7945 1: movl (%rbp),%ebp
7946 .section __ex_table,"a"
7947 .quad 1b,ia32_badarg
7948 .previous
7949 - GET_THREAD_INFO(%r10)
7950 - orl $TS_COMPAT,TI_status(%r10)
7951 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7952 + GET_THREAD_INFO(%r11)
7953 + orl $TS_COMPAT,TI_status(%r11)
7954 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7955 CFI_REMEMBER_STATE
7956 jnz sysenter_tracesys
7957 cmpq $(IA32_NR_syscalls-1),%rax
7958 @@ -166,13 +201,15 @@ sysenter_do_call:
7959 sysenter_dispatch:
7960 call *ia32_sys_call_table(,%rax,8)
7961 movq %rax,RAX-ARGOFFSET(%rsp)
7962 - GET_THREAD_INFO(%r10)
7963 + GET_THREAD_INFO(%r11)
7964 DISABLE_INTERRUPTS(CLBR_NONE)
7965 TRACE_IRQS_OFF
7966 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7967 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7968 jnz sysexit_audit
7969 sysexit_from_sys_call:
7970 - andl $~TS_COMPAT,TI_status(%r10)
7971 + pax_exit_kernel_user
7972 + pax_erase_kstack
7973 + andl $~TS_COMPAT,TI_status(%r11)
7974 /* clear IF, that popfq doesn't enable interrupts early */
7975 andl $~0x200,EFLAGS-R11(%rsp)
7976 movl RIP-R11(%rsp),%edx /* User %eip */
7977 @@ -200,6 +237,9 @@ sysexit_from_sys_call:
7978 movl %eax,%esi /* 2nd arg: syscall number */
7979 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7980 call audit_syscall_entry
7981 +
7982 + pax_erase_kstack
7983 +
7984 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7985 cmpq $(IA32_NR_syscalls-1),%rax
7986 ja ia32_badsys
7987 @@ -211,7 +251,7 @@ sysexit_from_sys_call:
7988 .endm
7989
7990 .macro auditsys_exit exit
7991 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7992 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7993 jnz ia32_ret_from_sys_call
7994 TRACE_IRQS_ON
7995 sti
7996 @@ -221,12 +261,12 @@ sysexit_from_sys_call:
7997 movzbl %al,%edi /* zero-extend that into %edi */
7998 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
7999 call audit_syscall_exit
8000 - GET_THREAD_INFO(%r10)
8001 + GET_THREAD_INFO(%r11)
8002 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8003 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8004 cli
8005 TRACE_IRQS_OFF
8006 - testl %edi,TI_flags(%r10)
8007 + testl %edi,TI_flags(%r11)
8008 jz \exit
8009 CLEAR_RREGS -ARGOFFSET
8010 jmp int_with_check
8011 @@ -244,7 +284,7 @@ sysexit_audit:
8012
8013 sysenter_tracesys:
8014 #ifdef CONFIG_AUDITSYSCALL
8015 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8016 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8017 jz sysenter_auditsys
8018 #endif
8019 SAVE_REST
8020 @@ -252,6 +292,9 @@ sysenter_tracesys:
8021 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8022 movq %rsp,%rdi /* &pt_regs -> arg1 */
8023 call syscall_trace_enter
8024 +
8025 + pax_erase_kstack
8026 +
8027 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8028 RESTORE_REST
8029 cmpq $(IA32_NR_syscalls-1),%rax
8030 @@ -283,19 +326,20 @@ ENDPROC(ia32_sysenter_target)
8031 ENTRY(ia32_cstar_target)
8032 CFI_STARTPROC32 simple
8033 CFI_SIGNAL_FRAME
8034 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8035 + CFI_DEF_CFA rsp,0
8036 CFI_REGISTER rip,rcx
8037 /*CFI_REGISTER rflags,r11*/
8038 SWAPGS_UNSAFE_STACK
8039 movl %esp,%r8d
8040 CFI_REGISTER rsp,r8
8041 movq PER_CPU_VAR(kernel_stack),%rsp
8042 + SAVE_ARGS 8*6,1,1
8043 + pax_enter_kernel_user
8044 /*
8045 * No need to follow this irqs on/off section: the syscall
8046 * disabled irqs and here we enable it straight after entry:
8047 */
8048 ENABLE_INTERRUPTS(CLBR_NONE)
8049 - SAVE_ARGS 8,1,1
8050 movl %eax,%eax /* zero extension */
8051 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8052 movq %rcx,RIP-ARGOFFSET(%rsp)
8053 @@ -311,13 +355,19 @@ ENTRY(ia32_cstar_target)
8054 /* no need to do an access_ok check here because r8 has been
8055 32bit zero extended */
8056 /* hardware stack frame is complete now */
8057 +
8058 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8059 + mov $PAX_USER_SHADOW_BASE,%r11
8060 + add %r11,%r8
8061 +#endif
8062 +
8063 1: movl (%r8),%r9d
8064 .section __ex_table,"a"
8065 .quad 1b,ia32_badarg
8066 .previous
8067 - GET_THREAD_INFO(%r10)
8068 - orl $TS_COMPAT,TI_status(%r10)
8069 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8070 + GET_THREAD_INFO(%r11)
8071 + orl $TS_COMPAT,TI_status(%r11)
8072 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8073 CFI_REMEMBER_STATE
8074 jnz cstar_tracesys
8075 cmpq $IA32_NR_syscalls-1,%rax
8076 @@ -327,13 +377,15 @@ cstar_do_call:
8077 cstar_dispatch:
8078 call *ia32_sys_call_table(,%rax,8)
8079 movq %rax,RAX-ARGOFFSET(%rsp)
8080 - GET_THREAD_INFO(%r10)
8081 + GET_THREAD_INFO(%r11)
8082 DISABLE_INTERRUPTS(CLBR_NONE)
8083 TRACE_IRQS_OFF
8084 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8085 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8086 jnz sysretl_audit
8087 sysretl_from_sys_call:
8088 - andl $~TS_COMPAT,TI_status(%r10)
8089 + pax_exit_kernel_user
8090 + pax_erase_kstack
8091 + andl $~TS_COMPAT,TI_status(%r11)
8092 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8093 movl RIP-ARGOFFSET(%rsp),%ecx
8094 CFI_REGISTER rip,rcx
8095 @@ -361,7 +413,7 @@ sysretl_audit:
8096
8097 cstar_tracesys:
8098 #ifdef CONFIG_AUDITSYSCALL
8099 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8100 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8101 jz cstar_auditsys
8102 #endif
8103 xchgl %r9d,%ebp
8104 @@ -370,6 +422,9 @@ cstar_tracesys:
8105 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8106 movq %rsp,%rdi /* &pt_regs -> arg1 */
8107 call syscall_trace_enter
8108 +
8109 + pax_erase_kstack
8110 +
8111 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8112 RESTORE_REST
8113 xchgl %ebp,%r9d
8114 @@ -415,11 +470,6 @@ ENTRY(ia32_syscall)
8115 CFI_REL_OFFSET rip,RIP-RIP
8116 PARAVIRT_ADJUST_EXCEPTION_FRAME
8117 SWAPGS
8118 - /*
8119 - * No need to follow this irqs on/off section: the syscall
8120 - * disabled irqs and here we enable it straight after entry:
8121 - */
8122 - ENABLE_INTERRUPTS(CLBR_NONE)
8123 movl %eax,%eax
8124 pushq %rax
8125 CFI_ADJUST_CFA_OFFSET 8
8126 @@ -427,9 +477,15 @@ ENTRY(ia32_syscall)
8127 /* note the registers are not zero extended to the sf.
8128 this could be a problem. */
8129 SAVE_ARGS 0,0,1
8130 - GET_THREAD_INFO(%r10)
8131 - orl $TS_COMPAT,TI_status(%r10)
8132 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8133 + pax_enter_kernel_user
8134 + /*
8135 + * No need to follow this irqs on/off section: the syscall
8136 + * disabled irqs and here we enable it straight after entry:
8137 + */
8138 + ENABLE_INTERRUPTS(CLBR_NONE)
8139 + GET_THREAD_INFO(%r11)
8140 + orl $TS_COMPAT,TI_status(%r11)
8141 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8142 jnz ia32_tracesys
8143 cmpq $(IA32_NR_syscalls-1),%rax
8144 ja ia32_badsys
8145 @@ -448,6 +504,9 @@ ia32_tracesys:
8146 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8147 movq %rsp,%rdi /* &pt_regs -> arg1 */
8148 call syscall_trace_enter
8149 +
8150 + pax_erase_kstack
8151 +
8152 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8153 RESTORE_REST
8154 cmpq $(IA32_NR_syscalls-1),%rax
8155 @@ -462,6 +521,7 @@ ia32_badsys:
8156
8157 quiet_ni_syscall:
8158 movq $-ENOSYS,%rax
8159 + pax_force_retaddr
8160 ret
8161 CFI_ENDPROC
8162
8163 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8164 index 016218c..47ccbdd 100644
8165 --- a/arch/x86/ia32/sys_ia32.c
8166 +++ b/arch/x86/ia32/sys_ia32.c
8167 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8168 */
8169 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8170 {
8171 - typeof(ubuf->st_uid) uid = 0;
8172 - typeof(ubuf->st_gid) gid = 0;
8173 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8174 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8175 SET_UID(uid, stat->uid);
8176 SET_GID(gid, stat->gid);
8177 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8178 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8179 }
8180 set_fs(KERNEL_DS);
8181 ret = sys_rt_sigprocmask(how,
8182 - set ? (sigset_t __user *)&s : NULL,
8183 - oset ? (sigset_t __user *)&s : NULL,
8184 + set ? (sigset_t __force_user *)&s : NULL,
8185 + oset ? (sigset_t __force_user *)&s : NULL,
8186 sigsetsize);
8187 set_fs(old_fs);
8188 if (ret)
8189 @@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8190 mm_segment_t old_fs = get_fs();
8191
8192 set_fs(KERNEL_DS);
8193 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8194 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8195 set_fs(old_fs);
8196 if (put_compat_timespec(&t, interval))
8197 return -EFAULT;
8198 @@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8199 mm_segment_t old_fs = get_fs();
8200
8201 set_fs(KERNEL_DS);
8202 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8203 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8204 set_fs(old_fs);
8205 if (!ret) {
8206 switch (_NSIG_WORDS) {
8207 @@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8208 if (copy_siginfo_from_user32(&info, uinfo))
8209 return -EFAULT;
8210 set_fs(KERNEL_DS);
8211 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8212 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8213 set_fs(old_fs);
8214 return ret;
8215 }
8216 @@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8217 return -EFAULT;
8218
8219 set_fs(KERNEL_DS);
8220 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8221 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8222 count);
8223 set_fs(old_fs);
8224
8225 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8226 index e2077d3..e134a5e 100644
8227 --- a/arch/x86/include/asm/alternative-asm.h
8228 +++ b/arch/x86/include/asm/alternative-asm.h
8229 @@ -19,4 +19,43 @@
8230 .endm
8231 #endif
8232
8233 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
8234 + .macro pax_force_retaddr_bts rip=0
8235 + btsq $63,\rip(%rsp)
8236 + .endm
8237 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8238 + .macro pax_force_retaddr rip=0, reload=0
8239 + btsq $63,\rip(%rsp)
8240 + .endm
8241 + .macro pax_force_fptr ptr
8242 + btsq $63,\ptr
8243 + .endm
8244 + .macro pax_set_fptr_mask
8245 + .endm
8246 +#endif
8247 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8248 + .macro pax_force_retaddr rip=0, reload=0
8249 + .if \reload
8250 + pax_set_fptr_mask
8251 + .endif
8252 + orq %r10,\rip(%rsp)
8253 + .endm
8254 + .macro pax_force_fptr ptr
8255 + orq %r10,\ptr
8256 + .endm
8257 + .macro pax_set_fptr_mask
8258 + movabs $0x8000000000000000,%r10
8259 + .endm
8260 +#endif
8261 +#else
8262 + .macro pax_force_retaddr rip=0, reload=0
8263 + .endm
8264 + .macro pax_force_fptr ptr
8265 + .endm
8266 + .macro pax_force_retaddr_bts rip=0
8267 + .endm
8268 + .macro pax_set_fptr_mask
8269 + .endm
8270 +#endif
8271 +
8272 #endif /* __ASSEMBLY__ */
8273 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8274 index c240efc..fdfadf3 100644
8275 --- a/arch/x86/include/asm/alternative.h
8276 +++ b/arch/x86/include/asm/alternative.h
8277 @@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8278 " .byte 662b-661b\n" /* sourcelen */ \
8279 " .byte 664f-663f\n" /* replacementlen */ \
8280 ".previous\n" \
8281 - ".section .altinstr_replacement, \"ax\"\n" \
8282 + ".section .altinstr_replacement, \"a\"\n" \
8283 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8284 ".previous"
8285
8286 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8287 index 474d80d..1f97d58 100644
8288 --- a/arch/x86/include/asm/apic.h
8289 +++ b/arch/x86/include/asm/apic.h
8290 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8291
8292 #ifdef CONFIG_X86_LOCAL_APIC
8293
8294 -extern unsigned int apic_verbosity;
8295 +extern int apic_verbosity;
8296 extern int local_apic_timer_c2_ok;
8297
8298 extern int disable_apic;
8299 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8300 index 20370c6..a2eb9b0 100644
8301 --- a/arch/x86/include/asm/apm.h
8302 +++ b/arch/x86/include/asm/apm.h
8303 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8304 __asm__ __volatile__(APM_DO_ZERO_SEGS
8305 "pushl %%edi\n\t"
8306 "pushl %%ebp\n\t"
8307 - "lcall *%%cs:apm_bios_entry\n\t"
8308 + "lcall *%%ss:apm_bios_entry\n\t"
8309 "setc %%al\n\t"
8310 "popl %%ebp\n\t"
8311 "popl %%edi\n\t"
8312 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8313 __asm__ __volatile__(APM_DO_ZERO_SEGS
8314 "pushl %%edi\n\t"
8315 "pushl %%ebp\n\t"
8316 - "lcall *%%cs:apm_bios_entry\n\t"
8317 + "lcall *%%ss:apm_bios_entry\n\t"
8318 "setc %%bl\n\t"
8319 "popl %%ebp\n\t"
8320 "popl %%edi\n\t"
8321 diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8322 index dc5a667..fbed878 100644
8323 --- a/arch/x86/include/asm/atomic_32.h
8324 +++ b/arch/x86/include/asm/atomic_32.h
8325 @@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8326 }
8327
8328 /**
8329 + * atomic_read_unchecked - read atomic variable
8330 + * @v: pointer of type atomic_unchecked_t
8331 + *
8332 + * Atomically reads the value of @v.
8333 + */
8334 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8335 +{
8336 + return v->counter;
8337 +}
8338 +
8339 +/**
8340 * atomic_set - set atomic variable
8341 * @v: pointer of type atomic_t
8342 * @i: required value
8343 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8344 }
8345
8346 /**
8347 + * atomic_set_unchecked - set atomic variable
8348 + * @v: pointer of type atomic_unchecked_t
8349 + * @i: required value
8350 + *
8351 + * Atomically sets the value of @v to @i.
8352 + */
8353 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8354 +{
8355 + v->counter = i;
8356 +}
8357 +
8358 +/**
8359 * atomic_add - add integer to atomic variable
8360 * @i: integer value to add
8361 * @v: pointer of type atomic_t
8362 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8363 */
8364 static inline void atomic_add(int i, atomic_t *v)
8365 {
8366 - asm volatile(LOCK_PREFIX "addl %1,%0"
8367 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8368 +
8369 +#ifdef CONFIG_PAX_REFCOUNT
8370 + "jno 0f\n"
8371 + LOCK_PREFIX "subl %1,%0\n"
8372 + "int $4\n0:\n"
8373 + _ASM_EXTABLE(0b, 0b)
8374 +#endif
8375 +
8376 + : "+m" (v->counter)
8377 + : "ir" (i));
8378 +}
8379 +
8380 +/**
8381 + * atomic_add_unchecked - add integer to atomic variable
8382 + * @i: integer value to add
8383 + * @v: pointer of type atomic_unchecked_t
8384 + *
8385 + * Atomically adds @i to @v.
8386 + */
8387 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8388 +{
8389 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8390 : "+m" (v->counter)
8391 : "ir" (i));
8392 }
8393 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8394 */
8395 static inline void atomic_sub(int i, atomic_t *v)
8396 {
8397 - asm volatile(LOCK_PREFIX "subl %1,%0"
8398 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8399 +
8400 +#ifdef CONFIG_PAX_REFCOUNT
8401 + "jno 0f\n"
8402 + LOCK_PREFIX "addl %1,%0\n"
8403 + "int $4\n0:\n"
8404 + _ASM_EXTABLE(0b, 0b)
8405 +#endif
8406 +
8407 + : "+m" (v->counter)
8408 + : "ir" (i));
8409 +}
8410 +
8411 +/**
8412 + * atomic_sub_unchecked - subtract integer from atomic variable
8413 + * @i: integer value to subtract
8414 + * @v: pointer of type atomic_unchecked_t
8415 + *
8416 + * Atomically subtracts @i from @v.
8417 + */
8418 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8419 +{
8420 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8421 : "+m" (v->counter)
8422 : "ir" (i));
8423 }
8424 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8425 {
8426 unsigned char c;
8427
8428 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8429 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8430 +
8431 +#ifdef CONFIG_PAX_REFCOUNT
8432 + "jno 0f\n"
8433 + LOCK_PREFIX "addl %2,%0\n"
8434 + "int $4\n0:\n"
8435 + _ASM_EXTABLE(0b, 0b)
8436 +#endif
8437 +
8438 + "sete %1\n"
8439 : "+m" (v->counter), "=qm" (c)
8440 : "ir" (i) : "memory");
8441 return c;
8442 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8443 */
8444 static inline void atomic_inc(atomic_t *v)
8445 {
8446 - asm volatile(LOCK_PREFIX "incl %0"
8447 + asm volatile(LOCK_PREFIX "incl %0\n"
8448 +
8449 +#ifdef CONFIG_PAX_REFCOUNT
8450 + "jno 0f\n"
8451 + LOCK_PREFIX "decl %0\n"
8452 + "int $4\n0:\n"
8453 + _ASM_EXTABLE(0b, 0b)
8454 +#endif
8455 +
8456 + : "+m" (v->counter));
8457 +}
8458 +
8459 +/**
8460 + * atomic_inc_unchecked - increment atomic variable
8461 + * @v: pointer of type atomic_unchecked_t
8462 + *
8463 + * Atomically increments @v by 1.
8464 + */
8465 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8466 +{
8467 + asm volatile(LOCK_PREFIX "incl %0\n"
8468 : "+m" (v->counter));
8469 }
8470
8471 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8472 */
8473 static inline void atomic_dec(atomic_t *v)
8474 {
8475 - asm volatile(LOCK_PREFIX "decl %0"
8476 + asm volatile(LOCK_PREFIX "decl %0\n"
8477 +
8478 +#ifdef CONFIG_PAX_REFCOUNT
8479 + "jno 0f\n"
8480 + LOCK_PREFIX "incl %0\n"
8481 + "int $4\n0:\n"
8482 + _ASM_EXTABLE(0b, 0b)
8483 +#endif
8484 +
8485 + : "+m" (v->counter));
8486 +}
8487 +
8488 +/**
8489 + * atomic_dec_unchecked - decrement atomic variable
8490 + * @v: pointer of type atomic_unchecked_t
8491 + *
8492 + * Atomically decrements @v by 1.
8493 + */
8494 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8495 +{
8496 + asm volatile(LOCK_PREFIX "decl %0\n"
8497 : "+m" (v->counter));
8498 }
8499
8500 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8501 {
8502 unsigned char c;
8503
8504 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8505 + asm volatile(LOCK_PREFIX "decl %0\n"
8506 +
8507 +#ifdef CONFIG_PAX_REFCOUNT
8508 + "jno 0f\n"
8509 + LOCK_PREFIX "incl %0\n"
8510 + "int $4\n0:\n"
8511 + _ASM_EXTABLE(0b, 0b)
8512 +#endif
8513 +
8514 + "sete %1\n"
8515 : "+m" (v->counter), "=qm" (c)
8516 : : "memory");
8517 return c != 0;
8518 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8519 {
8520 unsigned char c;
8521
8522 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8523 + asm volatile(LOCK_PREFIX "incl %0\n"
8524 +
8525 +#ifdef CONFIG_PAX_REFCOUNT
8526 + "jno 0f\n"
8527 + LOCK_PREFIX "decl %0\n"
8528 + "into\n0:\n"
8529 + _ASM_EXTABLE(0b, 0b)
8530 +#endif
8531 +
8532 + "sete %1\n"
8533 + : "+m" (v->counter), "=qm" (c)
8534 + : : "memory");
8535 + return c != 0;
8536 +}
8537 +
8538 +/**
8539 + * atomic_inc_and_test_unchecked - increment and test
8540 + * @v: pointer of type atomic_unchecked_t
8541 + *
8542 + * Atomically increments @v by 1
8543 + * and returns true if the result is zero, or false for all
8544 + * other cases.
8545 + */
8546 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8547 +{
8548 + unsigned char c;
8549 +
8550 + asm volatile(LOCK_PREFIX "incl %0\n"
8551 + "sete %1\n"
8552 : "+m" (v->counter), "=qm" (c)
8553 : : "memory");
8554 return c != 0;
8555 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8556 {
8557 unsigned char c;
8558
8559 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8560 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8561 +
8562 +#ifdef CONFIG_PAX_REFCOUNT
8563 + "jno 0f\n"
8564 + LOCK_PREFIX "subl %2,%0\n"
8565 + "int $4\n0:\n"
8566 + _ASM_EXTABLE(0b, 0b)
8567 +#endif
8568 +
8569 + "sets %1\n"
8570 : "+m" (v->counter), "=qm" (c)
8571 : "ir" (i) : "memory");
8572 return c;
8573 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
8574 #endif
8575 /* Modern 486+ processor */
8576 __i = i;
8577 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8578 +
8579 +#ifdef CONFIG_PAX_REFCOUNT
8580 + "jno 0f\n"
8581 + "movl %0, %1\n"
8582 + "int $4\n0:\n"
8583 + _ASM_EXTABLE(0b, 0b)
8584 +#endif
8585 +
8586 + : "+r" (i), "+m" (v->counter)
8587 + : : "memory");
8588 + return i + __i;
8589 +
8590 +#ifdef CONFIG_M386
8591 +no_xadd: /* Legacy 386 processor */
8592 + local_irq_save(flags);
8593 + __i = atomic_read(v);
8594 + atomic_set(v, i + __i);
8595 + local_irq_restore(flags);
8596 + return i + __i;
8597 +#endif
8598 +}
8599 +
8600 +/**
8601 + * atomic_add_return_unchecked - add integer and return
8602 + * @v: pointer of type atomic_unchecked_t
8603 + * @i: integer value to add
8604 + *
8605 + * Atomically adds @i to @v and returns @i + @v
8606 + */
8607 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8608 +{
8609 + int __i;
8610 +#ifdef CONFIG_M386
8611 + unsigned long flags;
8612 + if (unlikely(boot_cpu_data.x86 <= 3))
8613 + goto no_xadd;
8614 +#endif
8615 + /* Modern 486+ processor */
8616 + __i = i;
8617 asm volatile(LOCK_PREFIX "xaddl %0, %1"
8618 : "+r" (i), "+m" (v->counter)
8619 : : "memory");
8620 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8621 return cmpxchg(&v->counter, old, new);
8622 }
8623
8624 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8625 +{
8626 + return cmpxchg(&v->counter, old, new);
8627 +}
8628 +
8629 static inline int atomic_xchg(atomic_t *v, int new)
8630 {
8631 return xchg(&v->counter, new);
8632 }
8633
8634 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8635 +{
8636 + return xchg(&v->counter, new);
8637 +}
8638 +
8639 /**
8640 * atomic_add_unless - add unless the number is already a given value
8641 * @v: pointer of type atomic_t
8642 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8643 */
8644 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8645 {
8646 - int c, old;
8647 + int c, old, new;
8648 c = atomic_read(v);
8649 for (;;) {
8650 - if (unlikely(c == (u)))
8651 + if (unlikely(c == u))
8652 break;
8653 - old = atomic_cmpxchg((v), c, c + (a));
8654 +
8655 + asm volatile("addl %2,%0\n"
8656 +
8657 +#ifdef CONFIG_PAX_REFCOUNT
8658 + "jno 0f\n"
8659 + "subl %2,%0\n"
8660 + "int $4\n0:\n"
8661 + _ASM_EXTABLE(0b, 0b)
8662 +#endif
8663 +
8664 + : "=r" (new)
8665 + : "0" (c), "ir" (a));
8666 +
8667 + old = atomic_cmpxchg(v, c, new);
8668 if (likely(old == c))
8669 break;
8670 c = old;
8671 }
8672 - return c != (u);
8673 + return c != u;
8674 }
8675
8676 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8677
8678 #define atomic_inc_return(v) (atomic_add_return(1, v))
8679 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8680 +{
8681 + return atomic_add_return_unchecked(1, v);
8682 +}
8683 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8684
8685 /* These are x86-specific, used by some header files */
8686 @@ -266,9 +495,18 @@ typedef struct {
8687 u64 __aligned(8) counter;
8688 } atomic64_t;
8689
8690 +#ifdef CONFIG_PAX_REFCOUNT
8691 +typedef struct {
8692 + u64 __aligned(8) counter;
8693 +} atomic64_unchecked_t;
8694 +#else
8695 +typedef atomic64_t atomic64_unchecked_t;
8696 +#endif
8697 +
8698 #define ATOMIC64_INIT(val) { (val) }
8699
8700 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8701 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8702
8703 /**
8704 * atomic64_xchg - xchg atomic64 variable
8705 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8706 * the old value.
8707 */
8708 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8709 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8710
8711 /**
8712 * atomic64_set - set atomic64 variable
8713 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8714 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8715
8716 /**
8717 + * atomic64_unchecked_set - set atomic64 variable
8718 + * @ptr: pointer to type atomic64_unchecked_t
8719 + * @new_val: value to assign
8720 + *
8721 + * Atomically sets the value of @ptr to @new_val.
8722 + */
8723 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8724 +
8725 +/**
8726 * atomic64_read - read atomic64 variable
8727 * @ptr: pointer to type atomic64_t
8728 *
8729 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8730 return res;
8731 }
8732
8733 -extern u64 atomic64_read(atomic64_t *ptr);
8734 +/**
8735 + * atomic64_read_unchecked - read atomic64 variable
8736 + * @ptr: pointer to type atomic64_unchecked_t
8737 + *
8738 + * Atomically reads the value of @ptr and returns it.
8739 + */
8740 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8741 +{
8742 + u64 res;
8743 +
8744 + /*
8745 + * Note, we inline this atomic64_unchecked_t primitive because
8746 + * it only clobbers EAX/EDX and leaves the others
8747 + * untouched. We also (somewhat subtly) rely on the
8748 + * fact that cmpxchg8b returns the current 64-bit value
8749 + * of the memory location we are touching:
8750 + */
8751 + asm volatile(
8752 + "mov %%ebx, %%eax\n\t"
8753 + "mov %%ecx, %%edx\n\t"
8754 + LOCK_PREFIX "cmpxchg8b %1\n"
8755 + : "=&A" (res)
8756 + : "m" (*ptr)
8757 + );
8758 +
8759 + return res;
8760 +}
8761
8762 /**
8763 * atomic64_add_return - add and return
8764 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
8765 * Other variants with different arithmetic operators:
8766 */
8767 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
8768 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8769 extern u64 atomic64_inc_return(atomic64_t *ptr);
8770 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
8771 extern u64 atomic64_dec_return(atomic64_t *ptr);
8772 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
8773
8774 /**
8775 * atomic64_add - add integer to atomic64 variable
8776 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
8777 extern void atomic64_add(u64 delta, atomic64_t *ptr);
8778
8779 /**
8780 + * atomic64_add_unchecked - add integer to atomic64 variable
8781 + * @delta: integer value to add
8782 + * @ptr: pointer to type atomic64_unchecked_t
8783 + *
8784 + * Atomically adds @delta to @ptr.
8785 + */
8786 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8787 +
8788 +/**
8789 * atomic64_sub - subtract the atomic64 variable
8790 * @delta: integer value to subtract
8791 * @ptr: pointer to type atomic64_t
8792 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
8793 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
8794
8795 /**
8796 + * atomic64_sub_unchecked - subtract the atomic64 variable
8797 + * @delta: integer value to subtract
8798 + * @ptr: pointer to type atomic64_unchecked_t
8799 + *
8800 + * Atomically subtracts @delta from @ptr.
8801 + */
8802 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8803 +
8804 +/**
8805 * atomic64_sub_and_test - subtract value from variable and test result
8806 * @delta: integer value to subtract
8807 * @ptr: pointer to type atomic64_t
8808 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
8809 extern void atomic64_inc(atomic64_t *ptr);
8810
8811 /**
8812 + * atomic64_inc_unchecked - increment atomic64 variable
8813 + * @ptr: pointer to type atomic64_unchecked_t
8814 + *
8815 + * Atomically increments @ptr by 1.
8816 + */
8817 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
8818 +
8819 +/**
8820 * atomic64_dec - decrement atomic64 variable
8821 * @ptr: pointer to type atomic64_t
8822 *
8823 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
8824 extern void atomic64_dec(atomic64_t *ptr);
8825
8826 /**
8827 + * atomic64_dec_unchecked - decrement atomic64 variable
8828 + * @ptr: pointer to type atomic64_unchecked_t
8829 + *
8830 + * Atomically decrements @ptr by 1.
8831 + */
8832 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
8833 +
8834 +/**
8835 * atomic64_dec_and_test - decrement and test
8836 * @ptr: pointer to type atomic64_t
8837 *
8838 diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
8839 index d605dc2..fafd7bd 100644
8840 --- a/arch/x86/include/asm/atomic_64.h
8841 +++ b/arch/x86/include/asm/atomic_64.h
8842 @@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
8843 }
8844
8845 /**
8846 + * atomic_read_unchecked - read atomic variable
8847 + * @v: pointer of type atomic_unchecked_t
8848 + *
8849 + * Atomically reads the value of @v.
8850 + */
8851 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8852 +{
8853 + return v->counter;
8854 +}
8855 +
8856 +/**
8857 * atomic_set - set atomic variable
8858 * @v: pointer of type atomic_t
8859 * @i: required value
8860 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
8861 }
8862
8863 /**
8864 + * atomic_set_unchecked - set atomic variable
8865 + * @v: pointer of type atomic_unchecked_t
8866 + * @i: required value
8867 + *
8868 + * Atomically sets the value of @v to @i.
8869 + */
8870 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8871 +{
8872 + v->counter = i;
8873 +}
8874 +
8875 +/**
8876 * atomic_add - add integer to atomic variable
8877 * @i: integer value to add
8878 * @v: pointer of type atomic_t
8879 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
8880 */
8881 static inline void atomic_add(int i, atomic_t *v)
8882 {
8883 - asm volatile(LOCK_PREFIX "addl %1,%0"
8884 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8885 +
8886 +#ifdef CONFIG_PAX_REFCOUNT
8887 + "jno 0f\n"
8888 + LOCK_PREFIX "subl %1,%0\n"
8889 + "int $4\n0:\n"
8890 + _ASM_EXTABLE(0b, 0b)
8891 +#endif
8892 +
8893 + : "=m" (v->counter)
8894 + : "ir" (i), "m" (v->counter));
8895 +}
8896 +
8897 +/**
8898 + * atomic_add_unchecked - add integer to atomic variable
8899 + * @i: integer value to add
8900 + * @v: pointer of type atomic_unchecked_t
8901 + *
8902 + * Atomically adds @i to @v.
8903 + */
8904 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8905 +{
8906 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8907 : "=m" (v->counter)
8908 : "ir" (i), "m" (v->counter));
8909 }
8910 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
8911 */
8912 static inline void atomic_sub(int i, atomic_t *v)
8913 {
8914 - asm volatile(LOCK_PREFIX "subl %1,%0"
8915 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8916 +
8917 +#ifdef CONFIG_PAX_REFCOUNT
8918 + "jno 0f\n"
8919 + LOCK_PREFIX "addl %1,%0\n"
8920 + "int $4\n0:\n"
8921 + _ASM_EXTABLE(0b, 0b)
8922 +#endif
8923 +
8924 + : "=m" (v->counter)
8925 + : "ir" (i), "m" (v->counter));
8926 +}
8927 +
8928 +/**
8929 + * atomic_sub_unchecked - subtract the atomic variable
8930 + * @i: integer value to subtract
8931 + * @v: pointer of type atomic_unchecked_t
8932 + *
8933 + * Atomically subtracts @i from @v.
8934 + */
8935 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8936 +{
8937 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8938 : "=m" (v->counter)
8939 : "ir" (i), "m" (v->counter));
8940 }
8941 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8942 {
8943 unsigned char c;
8944
8945 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8946 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8947 +
8948 +#ifdef CONFIG_PAX_REFCOUNT
8949 + "jno 0f\n"
8950 + LOCK_PREFIX "addl %2,%0\n"
8951 + "int $4\n0:\n"
8952 + _ASM_EXTABLE(0b, 0b)
8953 +#endif
8954 +
8955 + "sete %1\n"
8956 : "=m" (v->counter), "=qm" (c)
8957 : "ir" (i), "m" (v->counter) : "memory");
8958 return c;
8959 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8960 */
8961 static inline void atomic_inc(atomic_t *v)
8962 {
8963 - asm volatile(LOCK_PREFIX "incl %0"
8964 + asm volatile(LOCK_PREFIX "incl %0\n"
8965 +
8966 +#ifdef CONFIG_PAX_REFCOUNT
8967 + "jno 0f\n"
8968 + LOCK_PREFIX "decl %0\n"
8969 + "int $4\n0:\n"
8970 + _ASM_EXTABLE(0b, 0b)
8971 +#endif
8972 +
8973 + : "=m" (v->counter)
8974 + : "m" (v->counter));
8975 +}
8976 +
8977 +/**
8978 + * atomic_inc_unchecked - increment atomic variable
8979 + * @v: pointer of type atomic_unchecked_t
8980 + *
8981 + * Atomically increments @v by 1.
8982 + */
8983 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8984 +{
8985 + asm volatile(LOCK_PREFIX "incl %0\n"
8986 : "=m" (v->counter)
8987 : "m" (v->counter));
8988 }
8989 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
8990 */
8991 static inline void atomic_dec(atomic_t *v)
8992 {
8993 - asm volatile(LOCK_PREFIX "decl %0"
8994 + asm volatile(LOCK_PREFIX "decl %0\n"
8995 +
8996 +#ifdef CONFIG_PAX_REFCOUNT
8997 + "jno 0f\n"
8998 + LOCK_PREFIX "incl %0\n"
8999 + "int $4\n0:\n"
9000 + _ASM_EXTABLE(0b, 0b)
9001 +#endif
9002 +
9003 + : "=m" (v->counter)
9004 + : "m" (v->counter));
9005 +}
9006 +
9007 +/**
9008 + * atomic_dec_unchecked - decrement atomic variable
9009 + * @v: pointer of type atomic_unchecked_t
9010 + *
9011 + * Atomically decrements @v by 1.
9012 + */
9013 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9014 +{
9015 + asm volatile(LOCK_PREFIX "decl %0\n"
9016 : "=m" (v->counter)
9017 : "m" (v->counter));
9018 }
9019 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9020 {
9021 unsigned char c;
9022
9023 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9024 + asm volatile(LOCK_PREFIX "decl %0\n"
9025 +
9026 +#ifdef CONFIG_PAX_REFCOUNT
9027 + "jno 0f\n"
9028 + LOCK_PREFIX "incl %0\n"
9029 + "int $4\n0:\n"
9030 + _ASM_EXTABLE(0b, 0b)
9031 +#endif
9032 +
9033 + "sete %1\n"
9034 : "=m" (v->counter), "=qm" (c)
9035 : "m" (v->counter) : "memory");
9036 return c != 0;
9037 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9038 {
9039 unsigned char c;
9040
9041 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9042 + asm volatile(LOCK_PREFIX "incl %0\n"
9043 +
9044 +#ifdef CONFIG_PAX_REFCOUNT
9045 + "jno 0f\n"
9046 + LOCK_PREFIX "decl %0\n"
9047 + "int $4\n0:\n"
9048 + _ASM_EXTABLE(0b, 0b)
9049 +#endif
9050 +
9051 + "sete %1\n"
9052 + : "=m" (v->counter), "=qm" (c)
9053 + : "m" (v->counter) : "memory");
9054 + return c != 0;
9055 +}
9056 +
9057 +/**
9058 + * atomic_inc_and_test_unchecked - increment and test
9059 + * @v: pointer of type atomic_unchecked_t
9060 + *
9061 + * Atomically increments @v by 1
9062 + * and returns true if the result is zero, or false for all
9063 + * other cases.
9064 + */
9065 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9066 +{
9067 + unsigned char c;
9068 +
9069 + asm volatile(LOCK_PREFIX "incl %0\n"
9070 + "sete %1\n"
9071 : "=m" (v->counter), "=qm" (c)
9072 : "m" (v->counter) : "memory");
9073 return c != 0;
9074 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9075 {
9076 unsigned char c;
9077
9078 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9079 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9080 +
9081 +#ifdef CONFIG_PAX_REFCOUNT
9082 + "jno 0f\n"
9083 + LOCK_PREFIX "subl %2,%0\n"
9084 + "int $4\n0:\n"
9085 + _ASM_EXTABLE(0b, 0b)
9086 +#endif
9087 +
9088 + "sets %1\n"
9089 : "=m" (v->counter), "=qm" (c)
9090 : "ir" (i), "m" (v->counter) : "memory");
9091 return c;
9092 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9093 static inline int atomic_add_return(int i, atomic_t *v)
9094 {
9095 int __i = i;
9096 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
9097 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9098 +
9099 +#ifdef CONFIG_PAX_REFCOUNT
9100 + "jno 0f\n"
9101 + "movl %0, %1\n"
9102 + "int $4\n0:\n"
9103 + _ASM_EXTABLE(0b, 0b)
9104 +#endif
9105 +
9106 + : "+r" (i), "+m" (v->counter)
9107 + : : "memory");
9108 + return i + __i;
9109 +}
9110 +
9111 +/**
9112 + * atomic_add_return_unchecked - add and return
9113 + * @i: integer value to add
9114 + * @v: pointer of type atomic_unchecked_t
9115 + *
9116 + * Atomically adds @i to @v and returns @i + @v
9117 + */
9118 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9119 +{
9120 + int __i = i;
9121 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9122 : "+r" (i), "+m" (v->counter)
9123 : : "memory");
9124 return i + __i;
9125 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9126 }
9127
9128 #define atomic_inc_return(v) (atomic_add_return(1, v))
9129 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9130 +{
9131 + return atomic_add_return_unchecked(1, v);
9132 +}
9133 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9134
9135 /* The 64-bit atomic type */
9136 @@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9137 }
9138
9139 /**
9140 + * atomic64_read_unchecked - read atomic64 variable
9141 + * @v: pointer of type atomic64_unchecked_t
9142 + *
9143 + * Atomically reads the value of @v.
9144 + * Doesn't imply a read memory barrier.
9145 + */
9146 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9147 +{
9148 + return v->counter;
9149 +}
9150 +
9151 +/**
9152 * atomic64_set - set atomic64 variable
9153 * @v: pointer to type atomic64_t
9154 * @i: required value
9155 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9156 }
9157
9158 /**
9159 + * atomic64_set_unchecked - set atomic64 variable
9160 + * @v: pointer to type atomic64_unchecked_t
9161 + * @i: required value
9162 + *
9163 + * Atomically sets the value of @v to @i.
9164 + */
9165 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9166 +{
9167 + v->counter = i;
9168 +}
9169 +
9170 +/**
9171 * atomic64_add - add integer to atomic64 variable
9172 * @i: integer value to add
9173 * @v: pointer to type atomic64_t
9174 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9175 */
9176 static inline void atomic64_add(long i, atomic64_t *v)
9177 {
9178 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9179 +
9180 +#ifdef CONFIG_PAX_REFCOUNT
9181 + "jno 0f\n"
9182 + LOCK_PREFIX "subq %1,%0\n"
9183 + "int $4\n0:\n"
9184 + _ASM_EXTABLE(0b, 0b)
9185 +#endif
9186 +
9187 + : "=m" (v->counter)
9188 + : "er" (i), "m" (v->counter));
9189 +}
9190 +
9191 +/**
9192 + * atomic64_add_unchecked - add integer to atomic64 variable
9193 + * @i: integer value to add
9194 + * @v: pointer to type atomic64_unchecked_t
9195 + *
9196 + * Atomically adds @i to @v.
9197 + */
9198 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9199 +{
9200 asm volatile(LOCK_PREFIX "addq %1,%0"
9201 : "=m" (v->counter)
9202 : "er" (i), "m" (v->counter));
9203 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9204 */
9205 static inline void atomic64_sub(long i, atomic64_t *v)
9206 {
9207 - asm volatile(LOCK_PREFIX "subq %1,%0"
9208 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9209 +
9210 +#ifdef CONFIG_PAX_REFCOUNT
9211 + "jno 0f\n"
9212 + LOCK_PREFIX "addq %1,%0\n"
9213 + "int $4\n0:\n"
9214 + _ASM_EXTABLE(0b, 0b)
9215 +#endif
9216 +
9217 : "=m" (v->counter)
9218 : "er" (i), "m" (v->counter));
9219 }
9220 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9221 {
9222 unsigned char c;
9223
9224 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9225 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9226 +
9227 +#ifdef CONFIG_PAX_REFCOUNT
9228 + "jno 0f\n"
9229 + LOCK_PREFIX "addq %2,%0\n"
9230 + "int $4\n0:\n"
9231 + _ASM_EXTABLE(0b, 0b)
9232 +#endif
9233 +
9234 + "sete %1\n"
9235 : "=m" (v->counter), "=qm" (c)
9236 : "er" (i), "m" (v->counter) : "memory");
9237 return c;
9238 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9239 */
9240 static inline void atomic64_inc(atomic64_t *v)
9241 {
9242 + asm volatile(LOCK_PREFIX "incq %0\n"
9243 +
9244 +#ifdef CONFIG_PAX_REFCOUNT
9245 + "jno 0f\n"
9246 + LOCK_PREFIX "decq %0\n"
9247 + "int $4\n0:\n"
9248 + _ASM_EXTABLE(0b, 0b)
9249 +#endif
9250 +
9251 + : "=m" (v->counter)
9252 + : "m" (v->counter));
9253 +}
9254 +
9255 +/**
9256 + * atomic64_inc_unchecked - increment atomic64 variable
9257 + * @v: pointer to type atomic64_unchecked_t
9258 + *
9259 + * Atomically increments @v by 1.
9260 + */
9261 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9262 +{
9263 asm volatile(LOCK_PREFIX "incq %0"
9264 : "=m" (v->counter)
9265 : "m" (v->counter));
9266 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9267 */
9268 static inline void atomic64_dec(atomic64_t *v)
9269 {
9270 - asm volatile(LOCK_PREFIX "decq %0"
9271 + asm volatile(LOCK_PREFIX "decq %0\n"
9272 +
9273 +#ifdef CONFIG_PAX_REFCOUNT
9274 + "jno 0f\n"
9275 + LOCK_PREFIX "incq %0\n"
9276 + "int $4\n0:\n"
9277 + _ASM_EXTABLE(0b, 0b)
9278 +#endif
9279 +
9280 + : "=m" (v->counter)
9281 + : "m" (v->counter));
9282 +}
9283 +
9284 +/**
9285 + * atomic64_dec_unchecked - decrement atomic64 variable
9286 + * @v: pointer to type atomic64_t
9287 + *
9288 + * Atomically decrements @v by 1.
9289 + */
9290 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9291 +{
9292 + asm volatile(LOCK_PREFIX "decq %0\n"
9293 : "=m" (v->counter)
9294 : "m" (v->counter));
9295 }
9296 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9297 {
9298 unsigned char c;
9299
9300 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9301 + asm volatile(LOCK_PREFIX "decq %0\n"
9302 +
9303 +#ifdef CONFIG_PAX_REFCOUNT
9304 + "jno 0f\n"
9305 + LOCK_PREFIX "incq %0\n"
9306 + "int $4\n0:\n"
9307 + _ASM_EXTABLE(0b, 0b)
9308 +#endif
9309 +
9310 + "sete %1\n"
9311 : "=m" (v->counter), "=qm" (c)
9312 : "m" (v->counter) : "memory");
9313 return c != 0;
9314 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9315 {
9316 unsigned char c;
9317
9318 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9319 + asm volatile(LOCK_PREFIX "incq %0\n"
9320 +
9321 +#ifdef CONFIG_PAX_REFCOUNT
9322 + "jno 0f\n"
9323 + LOCK_PREFIX "decq %0\n"
9324 + "int $4\n0:\n"
9325 + _ASM_EXTABLE(0b, 0b)
9326 +#endif
9327 +
9328 + "sete %1\n"
9329 : "=m" (v->counter), "=qm" (c)
9330 : "m" (v->counter) : "memory");
9331 return c != 0;
9332 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9333 {
9334 unsigned char c;
9335
9336 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9337 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9338 +
9339 +#ifdef CONFIG_PAX_REFCOUNT
9340 + "jno 0f\n"
9341 + LOCK_PREFIX "subq %2,%0\n"
9342 + "int $4\n0:\n"
9343 + _ASM_EXTABLE(0b, 0b)
9344 +#endif
9345 +
9346 + "sets %1\n"
9347 : "=m" (v->counter), "=qm" (c)
9348 : "er" (i), "m" (v->counter) : "memory");
9349 return c;
9350 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9351 static inline long atomic64_add_return(long i, atomic64_t *v)
9352 {
9353 long __i = i;
9354 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9355 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9356 +
9357 +#ifdef CONFIG_PAX_REFCOUNT
9358 + "jno 0f\n"
9359 + "movq %0, %1\n"
9360 + "int $4\n0:\n"
9361 + _ASM_EXTABLE(0b, 0b)
9362 +#endif
9363 +
9364 + : "+r" (i), "+m" (v->counter)
9365 + : : "memory");
9366 + return i + __i;
9367 +}
9368 +
9369 +/**
9370 + * atomic64_add_return_unchecked - add and return
9371 + * @i: integer value to add
9372 + * @v: pointer to type atomic64_unchecked_t
9373 + *
9374 + * Atomically adds @i to @v and returns @i + @v
9375 + */
9376 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9377 +{
9378 + long __i = i;
9379 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
9380 : "+r" (i), "+m" (v->counter)
9381 : : "memory");
9382 return i + __i;
9383 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9384 }
9385
9386 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9387 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9388 +{
9389 + return atomic64_add_return_unchecked(1, v);
9390 +}
9391 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9392
9393 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9394 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9395 return cmpxchg(&v->counter, old, new);
9396 }
9397
9398 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9399 +{
9400 + return cmpxchg(&v->counter, old, new);
9401 +}
9402 +
9403 static inline long atomic64_xchg(atomic64_t *v, long new)
9404 {
9405 return xchg(&v->counter, new);
9406 }
9407
9408 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9409 +{
9410 + return xchg(&v->counter, new);
9411 +}
9412 +
9413 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9414 {
9415 return cmpxchg(&v->counter, old, new);
9416 }
9417
9418 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9419 +{
9420 + return cmpxchg(&v->counter, old, new);
9421 +}
9422 +
9423 static inline long atomic_xchg(atomic_t *v, int new)
9424 {
9425 return xchg(&v->counter, new);
9426 }
9427
9428 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9429 +{
9430 + return xchg(&v->counter, new);
9431 +}
9432 +
9433 /**
9434 * atomic_add_unless - add unless the number is a given value
9435 * @v: pointer of type atomic_t
9436 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9437 */
9438 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9439 {
9440 - int c, old;
9441 + int c, old, new;
9442 c = atomic_read(v);
9443 for (;;) {
9444 - if (unlikely(c == (u)))
9445 + if (unlikely(c == u))
9446 break;
9447 - old = atomic_cmpxchg((v), c, c + (a));
9448 +
9449 + asm volatile("addl %2,%0\n"
9450 +
9451 +#ifdef CONFIG_PAX_REFCOUNT
9452 + "jno 0f\n"
9453 + "subl %2,%0\n"
9454 + "int $4\n0:\n"
9455 + _ASM_EXTABLE(0b, 0b)
9456 +#endif
9457 +
9458 + : "=r" (new)
9459 + : "0" (c), "ir" (a));
9460 +
9461 + old = atomic_cmpxchg(v, c, new);
9462 if (likely(old == c))
9463 break;
9464 c = old;
9465 }
9466 - return c != (u);
9467 + return c != u;
9468 }
9469
9470 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9471 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9472 */
9473 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9474 {
9475 - long c, old;
9476 + long c, old, new;
9477 c = atomic64_read(v);
9478 for (;;) {
9479 - if (unlikely(c == (u)))
9480 + if (unlikely(c == u))
9481 break;
9482 - old = atomic64_cmpxchg((v), c, c + (a));
9483 +
9484 + asm volatile("addq %2,%0\n"
9485 +
9486 +#ifdef CONFIG_PAX_REFCOUNT
9487 + "jno 0f\n"
9488 + "subq %2,%0\n"
9489 + "int $4\n0:\n"
9490 + _ASM_EXTABLE(0b, 0b)
9491 +#endif
9492 +
9493 + : "=r" (new)
9494 + : "0" (c), "er" (a));
9495 +
9496 + old = atomic64_cmpxchg(v, c, new);
9497 if (likely(old == c))
9498 break;
9499 c = old;
9500 }
9501 - return c != (u);
9502 + return c != u;
9503 }
9504
9505 /**
9506 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9507 index 02b47a6..d5c4b15 100644
9508 --- a/arch/x86/include/asm/bitops.h
9509 +++ b/arch/x86/include/asm/bitops.h
9510 @@ -38,7 +38,7 @@
9511 * a mask operation on a byte.
9512 */
9513 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9514 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9515 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9516 #define CONST_MASK(nr) (1 << ((nr) & 7))
9517
9518 /**
9519 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9520 index 7a10659..8bbf355 100644
9521 --- a/arch/x86/include/asm/boot.h
9522 +++ b/arch/x86/include/asm/boot.h
9523 @@ -11,10 +11,15 @@
9524 #include <asm/pgtable_types.h>
9525
9526 /* Physical address where kernel should be loaded. */
9527 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9528 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9529 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9530 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9531
9532 +#ifndef __ASSEMBLY__
9533 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9534 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9535 +#endif
9536 +
9537 /* Minimum kernel alignment, as a power of two */
9538 #ifdef CONFIG_X86_64
9539 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9540 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9541 index 549860d..7d45f68 100644
9542 --- a/arch/x86/include/asm/cache.h
9543 +++ b/arch/x86/include/asm/cache.h
9544 @@ -5,9 +5,10 @@
9545
9546 /* L1 cache line size */
9547 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9548 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9549 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9550
9551 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9552 +#define __read_only __attribute__((__section__(".data.read_only")))
9553
9554 #ifdef CONFIG_X86_VSMP
9555 /* vSMP Internode cacheline shift */
9556 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9557 index b54f6af..5b376a6 100644
9558 --- a/arch/x86/include/asm/cacheflush.h
9559 +++ b/arch/x86/include/asm/cacheflush.h
9560 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9561 static inline unsigned long get_page_memtype(struct page *pg)
9562 {
9563 if (!PageUncached(pg) && !PageWC(pg))
9564 - return -1;
9565 + return ~0UL;
9566 else if (!PageUncached(pg) && PageWC(pg))
9567 return _PAGE_CACHE_WC;
9568 else if (PageUncached(pg) && !PageWC(pg))
9569 @@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9570 SetPageWC(pg);
9571 break;
9572 default:
9573 - case -1:
9574 + case ~0UL:
9575 ClearPageUncached(pg);
9576 ClearPageWC(pg);
9577 break;
9578 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9579 index 0e63c9a..ab8d972 100644
9580 --- a/arch/x86/include/asm/calling.h
9581 +++ b/arch/x86/include/asm/calling.h
9582 @@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9583 * for assembly code:
9584 */
9585
9586 -#define R15 0
9587 -#define R14 8
9588 -#define R13 16
9589 -#define R12 24
9590 -#define RBP 32
9591 -#define RBX 40
9592 +#define R15 (0)
9593 +#define R14 (8)
9594 +#define R13 (16)
9595 +#define R12 (24)
9596 +#define RBP (32)
9597 +#define RBX (40)
9598
9599 /* arguments: interrupts/non tracing syscalls only save up to here: */
9600 -#define R11 48
9601 -#define R10 56
9602 -#define R9 64
9603 -#define R8 72
9604 -#define RAX 80
9605 -#define RCX 88
9606 -#define RDX 96
9607 -#define RSI 104
9608 -#define RDI 112
9609 -#define ORIG_RAX 120 /* + error_code */
9610 +#define R11 (48)
9611 +#define R10 (56)
9612 +#define R9 (64)
9613 +#define R8 (72)
9614 +#define RAX (80)
9615 +#define RCX (88)
9616 +#define RDX (96)
9617 +#define RSI (104)
9618 +#define RDI (112)
9619 +#define ORIG_RAX (120) /* + error_code */
9620 /* end of arguments */
9621
9622 /* cpu exception frame or undefined in case of fast syscall: */
9623 -#define RIP 128
9624 -#define CS 136
9625 -#define EFLAGS 144
9626 -#define RSP 152
9627 -#define SS 160
9628 +#define RIP (128)
9629 +#define CS (136)
9630 +#define EFLAGS (144)
9631 +#define RSP (152)
9632 +#define SS (160)
9633
9634 #define ARGOFFSET R11
9635 #define SWFRAME ORIG_RAX
9636 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9637 index 46fc474..b02b0f9 100644
9638 --- a/arch/x86/include/asm/checksum_32.h
9639 +++ b/arch/x86/include/asm/checksum_32.h
9640 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9641 int len, __wsum sum,
9642 int *src_err_ptr, int *dst_err_ptr);
9643
9644 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9645 + int len, __wsum sum,
9646 + int *src_err_ptr, int *dst_err_ptr);
9647 +
9648 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9649 + int len, __wsum sum,
9650 + int *src_err_ptr, int *dst_err_ptr);
9651 +
9652 /*
9653 * Note: when you get a NULL pointer exception here this means someone
9654 * passed in an incorrect kernel address to one of these functions.
9655 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9656 int *err_ptr)
9657 {
9658 might_sleep();
9659 - return csum_partial_copy_generic((__force void *)src, dst,
9660 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9661 len, sum, err_ptr, NULL);
9662 }
9663
9664 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9665 {
9666 might_sleep();
9667 if (access_ok(VERIFY_WRITE, dst, len))
9668 - return csum_partial_copy_generic(src, (__force void *)dst,
9669 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9670 len, sum, NULL, err_ptr);
9671
9672 if (len)
9673 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9674 index 617bd56..7b047a1 100644
9675 --- a/arch/x86/include/asm/desc.h
9676 +++ b/arch/x86/include/asm/desc.h
9677 @@ -4,6 +4,7 @@
9678 #include <asm/desc_defs.h>
9679 #include <asm/ldt.h>
9680 #include <asm/mmu.h>
9681 +#include <asm/pgtable.h>
9682 #include <linux/smp.h>
9683
9684 static inline void fill_ldt(struct desc_struct *desc,
9685 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9686 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9687 desc->type = (info->read_exec_only ^ 1) << 1;
9688 desc->type |= info->contents << 2;
9689 + desc->type |= info->seg_not_present ^ 1;
9690 desc->s = 1;
9691 desc->dpl = 0x3;
9692 desc->p = info->seg_not_present ^ 1;
9693 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9694 }
9695
9696 extern struct desc_ptr idt_descr;
9697 -extern gate_desc idt_table[];
9698 -
9699 -struct gdt_page {
9700 - struct desc_struct gdt[GDT_ENTRIES];
9701 -} __attribute__((aligned(PAGE_SIZE)));
9702 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9703 +extern gate_desc idt_table[256];
9704
9705 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9706 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9707 {
9708 - return per_cpu(gdt_page, cpu).gdt;
9709 + return cpu_gdt_table[cpu];
9710 }
9711
9712 #ifdef CONFIG_X86_64
9713 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9714 unsigned long base, unsigned dpl, unsigned flags,
9715 unsigned short seg)
9716 {
9717 - gate->a = (seg << 16) | (base & 0xffff);
9718 - gate->b = (base & 0xffff0000) |
9719 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9720 + gate->gate.offset_low = base;
9721 + gate->gate.seg = seg;
9722 + gate->gate.reserved = 0;
9723 + gate->gate.type = type;
9724 + gate->gate.s = 0;
9725 + gate->gate.dpl = dpl;
9726 + gate->gate.p = 1;
9727 + gate->gate.offset_high = base >> 16;
9728 }
9729
9730 #endif
9731 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9732 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9733 const gate_desc *gate)
9734 {
9735 + pax_open_kernel();
9736 memcpy(&idt[entry], gate, sizeof(*gate));
9737 + pax_close_kernel();
9738 }
9739
9740 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9741 const void *desc)
9742 {
9743 + pax_open_kernel();
9744 memcpy(&ldt[entry], desc, 8);
9745 + pax_close_kernel();
9746 }
9747
9748 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9749 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9750 size = sizeof(struct desc_struct);
9751 break;
9752 }
9753 +
9754 + pax_open_kernel();
9755 memcpy(&gdt[entry], desc, size);
9756 + pax_close_kernel();
9757 }
9758
9759 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9760 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9761
9762 static inline void native_load_tr_desc(void)
9763 {
9764 + pax_open_kernel();
9765 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9766 + pax_close_kernel();
9767 }
9768
9769 static inline void native_load_gdt(const struct desc_ptr *dtr)
9770 @@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9771 unsigned int i;
9772 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9773
9774 + pax_open_kernel();
9775 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9776 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9777 + pax_close_kernel();
9778 }
9779
9780 #define _LDT_empty(info) \
9781 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9782 desc->limit = (limit >> 16) & 0xf;
9783 }
9784
9785 -static inline void _set_gate(int gate, unsigned type, void *addr,
9786 +static inline void _set_gate(int gate, unsigned type, const void *addr,
9787 unsigned dpl, unsigned ist, unsigned seg)
9788 {
9789 gate_desc s;
9790 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9791 * Pentium F0 0F bugfix can have resulted in the mapped
9792 * IDT being write-protected.
9793 */
9794 -static inline void set_intr_gate(unsigned int n, void *addr)
9795 +static inline void set_intr_gate(unsigned int n, const void *addr)
9796 {
9797 BUG_ON((unsigned)n > 0xFF);
9798 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9799 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9800 /*
9801 * This routine sets up an interrupt gate at directory privilege level 3.
9802 */
9803 -static inline void set_system_intr_gate(unsigned int n, void *addr)
9804 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
9805 {
9806 BUG_ON((unsigned)n > 0xFF);
9807 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9808 }
9809
9810 -static inline void set_system_trap_gate(unsigned int n, void *addr)
9811 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
9812 {
9813 BUG_ON((unsigned)n > 0xFF);
9814 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9815 }
9816
9817 -static inline void set_trap_gate(unsigned int n, void *addr)
9818 +static inline void set_trap_gate(unsigned int n, const void *addr)
9819 {
9820 BUG_ON((unsigned)n > 0xFF);
9821 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9822 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9823 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9824 {
9825 BUG_ON((unsigned)n > 0xFF);
9826 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9827 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9828 }
9829
9830 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9831 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9832 {
9833 BUG_ON((unsigned)n > 0xFF);
9834 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9835 }
9836
9837 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9838 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9839 {
9840 BUG_ON((unsigned)n > 0xFF);
9841 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9842 }
9843
9844 +#ifdef CONFIG_X86_32
9845 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9846 +{
9847 + struct desc_struct d;
9848 +
9849 + if (likely(limit))
9850 + limit = (limit - 1UL) >> PAGE_SHIFT;
9851 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
9852 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9853 +}
9854 +#endif
9855 +
9856 #endif /* _ASM_X86_DESC_H */
9857 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9858 index 9d66848..6b4a691 100644
9859 --- a/arch/x86/include/asm/desc_defs.h
9860 +++ b/arch/x86/include/asm/desc_defs.h
9861 @@ -31,6 +31,12 @@ struct desc_struct {
9862 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9863 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9864 };
9865 + struct {
9866 + u16 offset_low;
9867 + u16 seg;
9868 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9869 + unsigned offset_high: 16;
9870 + } gate;
9871 };
9872 } __attribute__((packed));
9873
9874 diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
9875 index cee34e9..a7c3fa2 100644
9876 --- a/arch/x86/include/asm/device.h
9877 +++ b/arch/x86/include/asm/device.h
9878 @@ -6,7 +6,7 @@ struct dev_archdata {
9879 void *acpi_handle;
9880 #endif
9881 #ifdef CONFIG_X86_64
9882 -struct dma_map_ops *dma_ops;
9883 + const struct dma_map_ops *dma_ops;
9884 #endif
9885 #ifdef CONFIG_DMAR
9886 void *iommu; /* hook for IOMMU specific extension */
9887 diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
9888 index 6a25d5d..786b202 100644
9889 --- a/arch/x86/include/asm/dma-mapping.h
9890 +++ b/arch/x86/include/asm/dma-mapping.h
9891 @@ -25,9 +25,9 @@ extern int iommu_merge;
9892 extern struct device x86_dma_fallback_dev;
9893 extern int panic_on_overflow;
9894
9895 -extern struct dma_map_ops *dma_ops;
9896 +extern const struct dma_map_ops *dma_ops;
9897
9898 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9899 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
9900 {
9901 #ifdef CONFIG_X86_32
9902 return dma_ops;
9903 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9904 /* Make sure we keep the same behaviour */
9905 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
9906 {
9907 - struct dma_map_ops *ops = get_dma_ops(dev);
9908 + const struct dma_map_ops *ops = get_dma_ops(dev);
9909 if (ops->mapping_error)
9910 return ops->mapping_error(dev, dma_addr);
9911
9912 @@ -122,7 +122,7 @@ static inline void *
9913 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9914 gfp_t gfp)
9915 {
9916 - struct dma_map_ops *ops = get_dma_ops(dev);
9917 + const struct dma_map_ops *ops = get_dma_ops(dev);
9918 void *memory;
9919
9920 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
9921 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9922 static inline void dma_free_coherent(struct device *dev, size_t size,
9923 void *vaddr, dma_addr_t bus)
9924 {
9925 - struct dma_map_ops *ops = get_dma_ops(dev);
9926 + const struct dma_map_ops *ops = get_dma_ops(dev);
9927
9928 WARN_ON(irqs_disabled()); /* for portability */
9929
9930 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9931 index 40b4e61..40d8133 100644
9932 --- a/arch/x86/include/asm/e820.h
9933 +++ b/arch/x86/include/asm/e820.h
9934 @@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
9935 #define ISA_END_ADDRESS 0x100000
9936 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
9937
9938 -#define BIOS_BEGIN 0x000a0000
9939 +#define BIOS_BEGIN 0x000c0000
9940 #define BIOS_END 0x00100000
9941
9942 #ifdef __KERNEL__
9943 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9944 index 8ac9d9a..0a6c96e 100644
9945 --- a/arch/x86/include/asm/elf.h
9946 +++ b/arch/x86/include/asm/elf.h
9947 @@ -257,7 +257,25 @@ extern int force_personality32;
9948 the loader. We need to make sure that it is out of the way of the program
9949 that it will "exec", and that there is sufficient room for the brk. */
9950
9951 +#ifdef CONFIG_PAX_SEGMEXEC
9952 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9953 +#else
9954 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9955 +#endif
9956 +
9957 +#ifdef CONFIG_PAX_ASLR
9958 +#ifdef CONFIG_X86_32
9959 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9960 +
9961 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9962 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9963 +#else
9964 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
9965 +
9966 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9967 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9968 +#endif
9969 +#endif
9970
9971 /* This yields a mask that user programs can use to figure out what
9972 instruction set this CPU supports. This could be done in user space,
9973 @@ -310,9 +328,7 @@ do { \
9974
9975 #define ARCH_DLINFO \
9976 do { \
9977 - if (vdso_enabled) \
9978 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9979 - (unsigned long)current->mm->context.vdso); \
9980 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9981 } while (0)
9982
9983 #define AT_SYSINFO 32
9984 @@ -323,7 +339,7 @@ do { \
9985
9986 #endif /* !CONFIG_X86_32 */
9987
9988 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9989 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
9990
9991 #define VDSO_ENTRY \
9992 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
9993 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
9994 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
9995 #define compat_arch_setup_additional_pages syscall32_setup_pages
9996
9997 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9998 -#define arch_randomize_brk arch_randomize_brk
9999 -
10000 #endif /* _ASM_X86_ELF_H */
10001 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10002 index cc70c1c..d96d011 100644
10003 --- a/arch/x86/include/asm/emergency-restart.h
10004 +++ b/arch/x86/include/asm/emergency-restart.h
10005 @@ -15,6 +15,6 @@ enum reboot_type {
10006
10007 extern enum reboot_type reboot_type;
10008
10009 -extern void machine_emergency_restart(void);
10010 +extern void machine_emergency_restart(void) __noreturn;
10011
10012 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10013 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10014 index 1f11ce4..7caabd1 100644
10015 --- a/arch/x86/include/asm/futex.h
10016 +++ b/arch/x86/include/asm/futex.h
10017 @@ -12,16 +12,18 @@
10018 #include <asm/system.h>
10019
10020 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10021 + typecheck(u32 __user *, uaddr); \
10022 asm volatile("1:\t" insn "\n" \
10023 "2:\t.section .fixup,\"ax\"\n" \
10024 "3:\tmov\t%3, %1\n" \
10025 "\tjmp\t2b\n" \
10026 "\t.previous\n" \
10027 _ASM_EXTABLE(1b, 3b) \
10028 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10029 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10030 : "i" (-EFAULT), "0" (oparg), "1" (0))
10031
10032 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10033 + typecheck(u32 __user *, uaddr); \
10034 asm volatile("1:\tmovl %2, %0\n" \
10035 "\tmovl\t%0, %3\n" \
10036 "\t" insn "\n" \
10037 @@ -34,10 +36,10 @@
10038 _ASM_EXTABLE(1b, 4b) \
10039 _ASM_EXTABLE(2b, 4b) \
10040 : "=&a" (oldval), "=&r" (ret), \
10041 - "+m" (*uaddr), "=&r" (tem) \
10042 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10043 : "r" (oparg), "i" (-EFAULT), "1" (0))
10044
10045 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10046 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10047 {
10048 int op = (encoded_op >> 28) & 7;
10049 int cmp = (encoded_op >> 24) & 15;
10050 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10051
10052 switch (op) {
10053 case FUTEX_OP_SET:
10054 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10055 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10056 break;
10057 case FUTEX_OP_ADD:
10058 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10059 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10060 uaddr, oparg);
10061 break;
10062 case FUTEX_OP_OR:
10063 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10064 return ret;
10065 }
10066
10067 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10068 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10069 int newval)
10070 {
10071
10072 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10073 return -ENOSYS;
10074 #endif
10075
10076 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10077 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10078 return -EFAULT;
10079
10080 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10081 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10082 "2:\t.section .fixup, \"ax\"\n"
10083 "3:\tmov %2, %0\n"
10084 "\tjmp 2b\n"
10085 "\t.previous\n"
10086 _ASM_EXTABLE(1b, 3b)
10087 - : "=a" (oldval), "+m" (*uaddr)
10088 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10089 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10090 : "memory"
10091 );
10092 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10093 index ba180d9..3bad351 100644
10094 --- a/arch/x86/include/asm/hw_irq.h
10095 +++ b/arch/x86/include/asm/hw_irq.h
10096 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10097 extern void enable_IO_APIC(void);
10098
10099 /* Statistics */
10100 -extern atomic_t irq_err_count;
10101 -extern atomic_t irq_mis_count;
10102 +extern atomic_unchecked_t irq_err_count;
10103 +extern atomic_unchecked_t irq_mis_count;
10104
10105 /* EISA */
10106 extern void eisa_set_level_irq(unsigned int irq);
10107 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10108 index 0b20bbb..4cb1396 100644
10109 --- a/arch/x86/include/asm/i387.h
10110 +++ b/arch/x86/include/asm/i387.h
10111 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10112 {
10113 int err;
10114
10115 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10116 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10117 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10118 +#endif
10119 +
10120 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10121 "2:\n"
10122 ".section .fixup,\"ax\"\n"
10123 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10124 {
10125 int err;
10126
10127 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10128 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10129 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10130 +#endif
10131 +
10132 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10133 "2:\n"
10134 ".section .fixup,\"ax\"\n"
10135 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10136 }
10137
10138 /* We need a safe address that is cheap to find and that is already
10139 - in L1 during context switch. The best choices are unfortunately
10140 - different for UP and SMP */
10141 -#ifdef CONFIG_SMP
10142 -#define safe_address (__per_cpu_offset[0])
10143 -#else
10144 -#define safe_address (kstat_cpu(0).cpustat.user)
10145 -#endif
10146 + in L1 during context switch. */
10147 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10148
10149 /*
10150 * These must be called with preempt disabled
10151 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10152 struct thread_info *me = current_thread_info();
10153 preempt_disable();
10154 if (me->status & TS_USEDFPU)
10155 - __save_init_fpu(me->task);
10156 + __save_init_fpu(current);
10157 else
10158 clts();
10159 }
10160 diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10161 index a299900..15c5410 100644
10162 --- a/arch/x86/include/asm/io_32.h
10163 +++ b/arch/x86/include/asm/io_32.h
10164 @@ -3,6 +3,7 @@
10165
10166 #include <linux/string.h>
10167 #include <linux/compiler.h>
10168 +#include <asm/processor.h>
10169
10170 /*
10171 * This file contains the definitions for the x86 IO instructions
10172 @@ -42,6 +43,17 @@
10173
10174 #ifdef __KERNEL__
10175
10176 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10177 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10178 +{
10179 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10180 +}
10181 +
10182 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10183 +{
10184 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10185 +}
10186 +
10187 #include <asm-generic/iomap.h>
10188
10189 #include <linux/vmalloc.h>
10190 diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10191 index 2440678..c158b88 100644
10192 --- a/arch/x86/include/asm/io_64.h
10193 +++ b/arch/x86/include/asm/io_64.h
10194 @@ -140,6 +140,17 @@ __OUTS(l)
10195
10196 #include <linux/vmalloc.h>
10197
10198 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10199 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10200 +{
10201 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10202 +}
10203 +
10204 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10205 +{
10206 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10207 +}
10208 +
10209 #include <asm-generic/iomap.h>
10210
10211 void __memcpy_fromio(void *, unsigned long, unsigned);
10212 diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10213 index fd6d21b..8b13915 100644
10214 --- a/arch/x86/include/asm/iommu.h
10215 +++ b/arch/x86/include/asm/iommu.h
10216 @@ -3,7 +3,7 @@
10217
10218 extern void pci_iommu_shutdown(void);
10219 extern void no_iommu_init(void);
10220 -extern struct dma_map_ops nommu_dma_ops;
10221 +extern const struct dma_map_ops nommu_dma_ops;
10222 extern int force_iommu, no_iommu;
10223 extern int iommu_detected;
10224 extern int iommu_pass_through;
10225 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10226 index 9e2b952..557206e 100644
10227 --- a/arch/x86/include/asm/irqflags.h
10228 +++ b/arch/x86/include/asm/irqflags.h
10229 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10230 sti; \
10231 sysexit
10232
10233 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10234 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10235 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10236 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10237 +
10238 #else
10239 #define INTERRUPT_RETURN iret
10240 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10241 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10242 index 4fe681d..bb6d40c 100644
10243 --- a/arch/x86/include/asm/kprobes.h
10244 +++ b/arch/x86/include/asm/kprobes.h
10245 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10246 #define BREAKPOINT_INSTRUCTION 0xcc
10247 #define RELATIVEJUMP_INSTRUCTION 0xe9
10248 #define MAX_INSN_SIZE 16
10249 -#define MAX_STACK_SIZE 64
10250 -#define MIN_STACK_SIZE(ADDR) \
10251 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10252 - THREAD_SIZE - (unsigned long)(ADDR))) \
10253 - ? (MAX_STACK_SIZE) \
10254 - : (((unsigned long)current_thread_info()) + \
10255 - THREAD_SIZE - (unsigned long)(ADDR)))
10256 +#define MAX_STACK_SIZE 64UL
10257 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10258
10259 #define flush_insn_slot(p) do { } while (0)
10260
10261 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10262 index 08bc2ff..2e88d1f 100644
10263 --- a/arch/x86/include/asm/kvm_host.h
10264 +++ b/arch/x86/include/asm/kvm_host.h
10265 @@ -534,9 +534,9 @@ struct kvm_x86_ops {
10266 bool (*gb_page_enable)(void);
10267
10268 const struct trace_print_flags *exit_reasons_str;
10269 -};
10270 +} __do_const;
10271
10272 -extern struct kvm_x86_ops *kvm_x86_ops;
10273 +extern const struct kvm_x86_ops *kvm_x86_ops;
10274
10275 int kvm_mmu_module_init(void);
10276 void kvm_mmu_module_exit(void);
10277 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10278 index 47b9b6f..815aaa1 100644
10279 --- a/arch/x86/include/asm/local.h
10280 +++ b/arch/x86/include/asm/local.h
10281 @@ -18,26 +18,58 @@ typedef struct {
10282
10283 static inline void local_inc(local_t *l)
10284 {
10285 - asm volatile(_ASM_INC "%0"
10286 + asm volatile(_ASM_INC "%0\n"
10287 +
10288 +#ifdef CONFIG_PAX_REFCOUNT
10289 + "jno 0f\n"
10290 + _ASM_DEC "%0\n"
10291 + "int $4\n0:\n"
10292 + _ASM_EXTABLE(0b, 0b)
10293 +#endif
10294 +
10295 : "+m" (l->a.counter));
10296 }
10297
10298 static inline void local_dec(local_t *l)
10299 {
10300 - asm volatile(_ASM_DEC "%0"
10301 + asm volatile(_ASM_DEC "%0\n"
10302 +
10303 +#ifdef CONFIG_PAX_REFCOUNT
10304 + "jno 0f\n"
10305 + _ASM_INC "%0\n"
10306 + "int $4\n0:\n"
10307 + _ASM_EXTABLE(0b, 0b)
10308 +#endif
10309 +
10310 : "+m" (l->a.counter));
10311 }
10312
10313 static inline void local_add(long i, local_t *l)
10314 {
10315 - asm volatile(_ASM_ADD "%1,%0"
10316 + asm volatile(_ASM_ADD "%1,%0\n"
10317 +
10318 +#ifdef CONFIG_PAX_REFCOUNT
10319 + "jno 0f\n"
10320 + _ASM_SUB "%1,%0\n"
10321 + "int $4\n0:\n"
10322 + _ASM_EXTABLE(0b, 0b)
10323 +#endif
10324 +
10325 : "+m" (l->a.counter)
10326 : "ir" (i));
10327 }
10328
10329 static inline void local_sub(long i, local_t *l)
10330 {
10331 - asm volatile(_ASM_SUB "%1,%0"
10332 + asm volatile(_ASM_SUB "%1,%0\n"
10333 +
10334 +#ifdef CONFIG_PAX_REFCOUNT
10335 + "jno 0f\n"
10336 + _ASM_ADD "%1,%0\n"
10337 + "int $4\n0:\n"
10338 + _ASM_EXTABLE(0b, 0b)
10339 +#endif
10340 +
10341 : "+m" (l->a.counter)
10342 : "ir" (i));
10343 }
10344 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10345 {
10346 unsigned char c;
10347
10348 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10349 + asm volatile(_ASM_SUB "%2,%0\n"
10350 +
10351 +#ifdef CONFIG_PAX_REFCOUNT
10352 + "jno 0f\n"
10353 + _ASM_ADD "%2,%0\n"
10354 + "int $4\n0:\n"
10355 + _ASM_EXTABLE(0b, 0b)
10356 +#endif
10357 +
10358 + "sete %1\n"
10359 : "+m" (l->a.counter), "=qm" (c)
10360 : "ir" (i) : "memory");
10361 return c;
10362 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10363 {
10364 unsigned char c;
10365
10366 - asm volatile(_ASM_DEC "%0; sete %1"
10367 + asm volatile(_ASM_DEC "%0\n"
10368 +
10369 +#ifdef CONFIG_PAX_REFCOUNT
10370 + "jno 0f\n"
10371 + _ASM_INC "%0\n"
10372 + "int $4\n0:\n"
10373 + _ASM_EXTABLE(0b, 0b)
10374 +#endif
10375 +
10376 + "sete %1\n"
10377 : "+m" (l->a.counter), "=qm" (c)
10378 : : "memory");
10379 return c != 0;
10380 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10381 {
10382 unsigned char c;
10383
10384 - asm volatile(_ASM_INC "%0; sete %1"
10385 + asm volatile(_ASM_INC "%0\n"
10386 +
10387 +#ifdef CONFIG_PAX_REFCOUNT
10388 + "jno 0f\n"
10389 + _ASM_DEC "%0\n"
10390 + "int $4\n0:\n"
10391 + _ASM_EXTABLE(0b, 0b)
10392 +#endif
10393 +
10394 + "sete %1\n"
10395 : "+m" (l->a.counter), "=qm" (c)
10396 : : "memory");
10397 return c != 0;
10398 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10399 {
10400 unsigned char c;
10401
10402 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10403 + asm volatile(_ASM_ADD "%2,%0\n"
10404 +
10405 +#ifdef CONFIG_PAX_REFCOUNT
10406 + "jno 0f\n"
10407 + _ASM_SUB "%2,%0\n"
10408 + "int $4\n0:\n"
10409 + _ASM_EXTABLE(0b, 0b)
10410 +#endif
10411 +
10412 + "sets %1\n"
10413 : "+m" (l->a.counter), "=qm" (c)
10414 : "ir" (i) : "memory");
10415 return c;
10416 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10417 #endif
10418 /* Modern 486+ processor */
10419 __i = i;
10420 - asm volatile(_ASM_XADD "%0, %1;"
10421 + asm volatile(_ASM_XADD "%0, %1\n"
10422 +
10423 +#ifdef CONFIG_PAX_REFCOUNT
10424 + "jno 0f\n"
10425 + _ASM_MOV "%0,%1\n"
10426 + "int $4\n0:\n"
10427 + _ASM_EXTABLE(0b, 0b)
10428 +#endif
10429 +
10430 : "+r" (i), "+m" (l->a.counter)
10431 : : "memory");
10432 return i + __i;
10433 diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10434 index ef51b50..514ba37 100644
10435 --- a/arch/x86/include/asm/microcode.h
10436 +++ b/arch/x86/include/asm/microcode.h
10437 @@ -12,13 +12,13 @@ struct device;
10438 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10439
10440 struct microcode_ops {
10441 - enum ucode_state (*request_microcode_user) (int cpu,
10442 + enum ucode_state (* const request_microcode_user) (int cpu,
10443 const void __user *buf, size_t size);
10444
10445 - enum ucode_state (*request_microcode_fw) (int cpu,
10446 + enum ucode_state (* const request_microcode_fw) (int cpu,
10447 struct device *device);
10448
10449 - void (*microcode_fini_cpu) (int cpu);
10450 + void (* const microcode_fini_cpu) (int cpu);
10451
10452 /*
10453 * The generic 'microcode_core' part guarantees that
10454 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
10455 extern struct ucode_cpu_info ucode_cpu_info[];
10456
10457 #ifdef CONFIG_MICROCODE_INTEL
10458 -extern struct microcode_ops * __init init_intel_microcode(void);
10459 +extern const struct microcode_ops * __init init_intel_microcode(void);
10460 #else
10461 -static inline struct microcode_ops * __init init_intel_microcode(void)
10462 +static inline const struct microcode_ops * __init init_intel_microcode(void)
10463 {
10464 return NULL;
10465 }
10466 #endif /* CONFIG_MICROCODE_INTEL */
10467
10468 #ifdef CONFIG_MICROCODE_AMD
10469 -extern struct microcode_ops * __init init_amd_microcode(void);
10470 +extern const struct microcode_ops * __init init_amd_microcode(void);
10471 #else
10472 -static inline struct microcode_ops * __init init_amd_microcode(void)
10473 +static inline const struct microcode_ops * __init init_amd_microcode(void)
10474 {
10475 return NULL;
10476 }
10477 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10478 index 593e51d..fa69c9a 100644
10479 --- a/arch/x86/include/asm/mman.h
10480 +++ b/arch/x86/include/asm/mman.h
10481 @@ -5,4 +5,14 @@
10482
10483 #include <asm-generic/mman.h>
10484
10485 +#ifdef __KERNEL__
10486 +#ifndef __ASSEMBLY__
10487 +#ifdef CONFIG_X86_32
10488 +#define arch_mmap_check i386_mmap_check
10489 +int i386_mmap_check(unsigned long addr, unsigned long len,
10490 + unsigned long flags);
10491 +#endif
10492 +#endif
10493 +#endif
10494 +
10495 #endif /* _ASM_X86_MMAN_H */
10496 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10497 index 80a1dee..239c67d 100644
10498 --- a/arch/x86/include/asm/mmu.h
10499 +++ b/arch/x86/include/asm/mmu.h
10500 @@ -9,10 +9,23 @@
10501 * we put the segment information here.
10502 */
10503 typedef struct {
10504 - void *ldt;
10505 + struct desc_struct *ldt;
10506 int size;
10507 struct mutex lock;
10508 - void *vdso;
10509 + unsigned long vdso;
10510 +
10511 +#ifdef CONFIG_X86_32
10512 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10513 + unsigned long user_cs_base;
10514 + unsigned long user_cs_limit;
10515 +
10516 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10517 + cpumask_t cpu_user_cs_mask;
10518 +#endif
10519 +
10520 +#endif
10521 +#endif
10522 +
10523 } mm_context_t;
10524
10525 #ifdef CONFIG_SMP
10526 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10527 index 8b5393e..8143173 100644
10528 --- a/arch/x86/include/asm/mmu_context.h
10529 +++ b/arch/x86/include/asm/mmu_context.h
10530 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10531
10532 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10533 {
10534 +
10535 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10536 + unsigned int i;
10537 + pgd_t *pgd;
10538 +
10539 + pax_open_kernel();
10540 + pgd = get_cpu_pgd(smp_processor_id());
10541 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10542 + set_pgd_batched(pgd+i, native_make_pgd(0));
10543 + pax_close_kernel();
10544 +#endif
10545 +
10546 #ifdef CONFIG_SMP
10547 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10548 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10549 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10550 struct task_struct *tsk)
10551 {
10552 unsigned cpu = smp_processor_id();
10553 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10554 + int tlbstate = TLBSTATE_OK;
10555 +#endif
10556
10557 if (likely(prev != next)) {
10558 #ifdef CONFIG_SMP
10559 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10560 + tlbstate = percpu_read(cpu_tlbstate.state);
10561 +#endif
10562 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10563 percpu_write(cpu_tlbstate.active_mm, next);
10564 #endif
10565 cpumask_set_cpu(cpu, mm_cpumask(next));
10566
10567 /* Re-load page tables */
10568 +#ifdef CONFIG_PAX_PER_CPU_PGD
10569 + pax_open_kernel();
10570 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10571 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10572 + pax_close_kernel();
10573 + load_cr3(get_cpu_pgd(cpu));
10574 +#else
10575 load_cr3(next->pgd);
10576 +#endif
10577
10578 /* stop flush ipis for the previous mm */
10579 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10580 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10581 */
10582 if (unlikely(prev->context.ldt != next->context.ldt))
10583 load_LDT_nolock(&next->context);
10584 - }
10585 +
10586 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10587 + if (!nx_enabled) {
10588 + smp_mb__before_clear_bit();
10589 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10590 + smp_mb__after_clear_bit();
10591 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10592 + }
10593 +#endif
10594 +
10595 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10596 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10597 + prev->context.user_cs_limit != next->context.user_cs_limit))
10598 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10599 #ifdef CONFIG_SMP
10600 + else if (unlikely(tlbstate != TLBSTATE_OK))
10601 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10602 +#endif
10603 +#endif
10604 +
10605 + }
10606 else {
10607 +
10608 +#ifdef CONFIG_PAX_PER_CPU_PGD
10609 + pax_open_kernel();
10610 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10611 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10612 + pax_close_kernel();
10613 + load_cr3(get_cpu_pgd(cpu));
10614 +#endif
10615 +
10616 +#ifdef CONFIG_SMP
10617 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10618 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10619
10620 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10621 * tlb flush IPI delivery. We must reload CR3
10622 * to make sure to use no freed page tables.
10623 */
10624 +
10625 +#ifndef CONFIG_PAX_PER_CPU_PGD
10626 load_cr3(next->pgd);
10627 +#endif
10628 +
10629 load_LDT_nolock(&next->context);
10630 +
10631 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10632 + if (!nx_enabled)
10633 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10634 +#endif
10635 +
10636 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10637 +#ifdef CONFIG_PAX_PAGEEXEC
10638 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10639 +#endif
10640 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10641 +#endif
10642 +
10643 }
10644 +#endif
10645 }
10646 -#endif
10647 }
10648
10649 #define activate_mm(prev, next) \
10650 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10651 index 3e2ce58..caaf478 100644
10652 --- a/arch/x86/include/asm/module.h
10653 +++ b/arch/x86/include/asm/module.h
10654 @@ -5,6 +5,7 @@
10655
10656 #ifdef CONFIG_X86_64
10657 /* X86_64 does not define MODULE_PROC_FAMILY */
10658 +#define MODULE_PROC_FAMILY ""
10659 #elif defined CONFIG_M386
10660 #define MODULE_PROC_FAMILY "386 "
10661 #elif defined CONFIG_M486
10662 @@ -59,13 +60,26 @@
10663 #error unknown processor family
10664 #endif
10665
10666 -#ifdef CONFIG_X86_32
10667 -# ifdef CONFIG_4KSTACKS
10668 -# define MODULE_STACKSIZE "4KSTACKS "
10669 -# else
10670 -# define MODULE_STACKSIZE ""
10671 -# endif
10672 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10673 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10674 +#define MODULE_STACKSIZE "4KSTACKS "
10675 +#else
10676 +#define MODULE_STACKSIZE ""
10677 #endif
10678
10679 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10680 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10681 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10682 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10683 +#else
10684 +#define MODULE_PAX_KERNEXEC ""
10685 +#endif
10686 +
10687 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10688 +#define MODULE_PAX_UDEREF "UDEREF "
10689 +#else
10690 +#define MODULE_PAX_UDEREF ""
10691 +#endif
10692 +
10693 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10694 +
10695 #endif /* _ASM_X86_MODULE_H */
10696 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10697 index 7639dbf..e08a58c 100644
10698 --- a/arch/x86/include/asm/page_64_types.h
10699 +++ b/arch/x86/include/asm/page_64_types.h
10700 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10701
10702 /* duplicated to the one in bootmem.h */
10703 extern unsigned long max_pfn;
10704 -extern unsigned long phys_base;
10705 +extern const unsigned long phys_base;
10706
10707 extern unsigned long __phys_addr(unsigned long);
10708 #define __phys_reloc_hide(x) (x)
10709 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10710 index efb3899..ef30687 100644
10711 --- a/arch/x86/include/asm/paravirt.h
10712 +++ b/arch/x86/include/asm/paravirt.h
10713 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10714 val);
10715 }
10716
10717 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10718 +{
10719 + pgdval_t val = native_pgd_val(pgd);
10720 +
10721 + if (sizeof(pgdval_t) > sizeof(long))
10722 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10723 + val, (u64)val >> 32);
10724 + else
10725 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10726 + val);
10727 +}
10728 +
10729 static inline void pgd_clear(pgd_t *pgdp)
10730 {
10731 set_pgd(pgdp, __pgd(0));
10732 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10733 pv_mmu_ops.set_fixmap(idx, phys, flags);
10734 }
10735
10736 +#ifdef CONFIG_PAX_KERNEXEC
10737 +static inline unsigned long pax_open_kernel(void)
10738 +{
10739 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10740 +}
10741 +
10742 +static inline unsigned long pax_close_kernel(void)
10743 +{
10744 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10745 +}
10746 +#else
10747 +static inline unsigned long pax_open_kernel(void) { return 0; }
10748 +static inline unsigned long pax_close_kernel(void) { return 0; }
10749 +#endif
10750 +
10751 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10752
10753 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10754 @@ -945,7 +972,7 @@ extern void default_banner(void);
10755
10756 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10757 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10758 -#define PARA_INDIRECT(addr) *%cs:addr
10759 +#define PARA_INDIRECT(addr) *%ss:addr
10760 #endif
10761
10762 #define INTERRUPT_RETURN \
10763 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
10764 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10765 CLBR_NONE, \
10766 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10767 +
10768 +#define GET_CR0_INTO_RDI \
10769 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10770 + mov %rax,%rdi
10771 +
10772 +#define SET_RDI_INTO_CR0 \
10773 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10774 +
10775 +#define GET_CR3_INTO_RDI \
10776 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10777 + mov %rax,%rdi
10778 +
10779 +#define SET_RDI_INTO_CR3 \
10780 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10781 +
10782 #endif /* CONFIG_X86_32 */
10783
10784 #endif /* __ASSEMBLY__ */
10785 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10786 index 9357473..aeb2de5 100644
10787 --- a/arch/x86/include/asm/paravirt_types.h
10788 +++ b/arch/x86/include/asm/paravirt_types.h
10789 @@ -78,19 +78,19 @@ struct pv_init_ops {
10790 */
10791 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10792 unsigned long addr, unsigned len);
10793 -};
10794 +} __no_const;
10795
10796
10797 struct pv_lazy_ops {
10798 /* Set deferred update mode, used for batching operations. */
10799 void (*enter)(void);
10800 void (*leave)(void);
10801 -};
10802 +} __no_const;
10803
10804 struct pv_time_ops {
10805 unsigned long long (*sched_clock)(void);
10806 unsigned long (*get_tsc_khz)(void);
10807 -};
10808 +} __no_const;
10809
10810 struct pv_cpu_ops {
10811 /* hooks for various privileged instructions */
10812 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
10813
10814 void (*start_context_switch)(struct task_struct *prev);
10815 void (*end_context_switch)(struct task_struct *next);
10816 -};
10817 +} __no_const;
10818
10819 struct pv_irq_ops {
10820 /*
10821 @@ -217,7 +217,7 @@ struct pv_apic_ops {
10822 unsigned long start_eip,
10823 unsigned long start_esp);
10824 #endif
10825 -};
10826 +} __no_const;
10827
10828 struct pv_mmu_ops {
10829 unsigned long (*read_cr2)(void);
10830 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
10831 struct paravirt_callee_save make_pud;
10832
10833 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10834 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10835 #endif /* PAGETABLE_LEVELS == 4 */
10836 #endif /* PAGETABLE_LEVELS >= 3 */
10837
10838 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
10839 an mfn. We can tell which is which from the index. */
10840 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10841 phys_addr_t phys, pgprot_t flags);
10842 +
10843 +#ifdef CONFIG_PAX_KERNEXEC
10844 + unsigned long (*pax_open_kernel)(void);
10845 + unsigned long (*pax_close_kernel)(void);
10846 +#endif
10847 +
10848 };
10849
10850 struct raw_spinlock;
10851 @@ -326,7 +333,7 @@ struct pv_lock_ops {
10852 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
10853 int (*spin_trylock)(struct raw_spinlock *lock);
10854 void (*spin_unlock)(struct raw_spinlock *lock);
10855 -};
10856 +} __no_const;
10857
10858 /* This contains all the paravirt structures: we get a convenient
10859 * number for each function using the offset which we use to indicate
10860 diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
10861 index b399988..3f47c38 100644
10862 --- a/arch/x86/include/asm/pci_x86.h
10863 +++ b/arch/x86/include/asm/pci_x86.h
10864 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
10865 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
10866
10867 struct pci_raw_ops {
10868 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10869 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10870 int reg, int len, u32 *val);
10871 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10872 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10873 int reg, int len, u32 val);
10874 };
10875
10876 -extern struct pci_raw_ops *raw_pci_ops;
10877 -extern struct pci_raw_ops *raw_pci_ext_ops;
10878 +extern const struct pci_raw_ops *raw_pci_ops;
10879 +extern const struct pci_raw_ops *raw_pci_ext_ops;
10880
10881 -extern struct pci_raw_ops pci_direct_conf1;
10882 +extern const struct pci_raw_ops pci_direct_conf1;
10883 extern bool port_cf9_safe;
10884
10885 /* arch_initcall level */
10886 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
10887 index b65a36d..50345a4 100644
10888 --- a/arch/x86/include/asm/percpu.h
10889 +++ b/arch/x86/include/asm/percpu.h
10890 @@ -78,6 +78,7 @@ do { \
10891 if (0) { \
10892 T__ tmp__; \
10893 tmp__ = (val); \
10894 + (void)tmp__; \
10895 } \
10896 switch (sizeof(var)) { \
10897 case 1: \
10898 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10899 index 271de94..ef944d6 100644
10900 --- a/arch/x86/include/asm/pgalloc.h
10901 +++ b/arch/x86/include/asm/pgalloc.h
10902 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10903 pmd_t *pmd, pte_t *pte)
10904 {
10905 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10906 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10907 +}
10908 +
10909 +static inline void pmd_populate_user(struct mm_struct *mm,
10910 + pmd_t *pmd, pte_t *pte)
10911 +{
10912 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10913 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10914 }
10915
10916 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10917 index 2334982..70bc412 100644
10918 --- a/arch/x86/include/asm/pgtable-2level.h
10919 +++ b/arch/x86/include/asm/pgtable-2level.h
10920 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10921
10922 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10923 {
10924 + pax_open_kernel();
10925 *pmdp = pmd;
10926 + pax_close_kernel();
10927 }
10928
10929 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10930 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10931 index 33927d2..ccde329 100644
10932 --- a/arch/x86/include/asm/pgtable-3level.h
10933 +++ b/arch/x86/include/asm/pgtable-3level.h
10934 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10935
10936 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10937 {
10938 + pax_open_kernel();
10939 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10940 + pax_close_kernel();
10941 }
10942
10943 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10944 {
10945 + pax_open_kernel();
10946 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10947 + pax_close_kernel();
10948 }
10949
10950 /*
10951 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10952 index af6fd36..867ff74 100644
10953 --- a/arch/x86/include/asm/pgtable.h
10954 +++ b/arch/x86/include/asm/pgtable.h
10955 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
10956
10957 #ifndef __PAGETABLE_PUD_FOLDED
10958 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10959 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10960 #define pgd_clear(pgd) native_pgd_clear(pgd)
10961 #endif
10962
10963 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
10964
10965 #define arch_end_context_switch(prev) do {} while(0)
10966
10967 +#define pax_open_kernel() native_pax_open_kernel()
10968 +#define pax_close_kernel() native_pax_close_kernel()
10969 #endif /* CONFIG_PARAVIRT */
10970
10971 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
10972 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10973 +
10974 +#ifdef CONFIG_PAX_KERNEXEC
10975 +static inline unsigned long native_pax_open_kernel(void)
10976 +{
10977 + unsigned long cr0;
10978 +
10979 + preempt_disable();
10980 + barrier();
10981 + cr0 = read_cr0() ^ X86_CR0_WP;
10982 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
10983 + write_cr0(cr0);
10984 + return cr0 ^ X86_CR0_WP;
10985 +}
10986 +
10987 +static inline unsigned long native_pax_close_kernel(void)
10988 +{
10989 + unsigned long cr0;
10990 +
10991 + cr0 = read_cr0() ^ X86_CR0_WP;
10992 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10993 + write_cr0(cr0);
10994 + barrier();
10995 + preempt_enable_no_resched();
10996 + return cr0 ^ X86_CR0_WP;
10997 +}
10998 +#else
10999 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11000 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11001 +#endif
11002 +
11003 /*
11004 * The following only work if pte_present() is true.
11005 * Undefined behaviour if not..
11006 */
11007 +static inline int pte_user(pte_t pte)
11008 +{
11009 + return pte_val(pte) & _PAGE_USER;
11010 +}
11011 +
11012 static inline int pte_dirty(pte_t pte)
11013 {
11014 return pte_flags(pte) & _PAGE_DIRTY;
11015 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11016 return pte_clear_flags(pte, _PAGE_RW);
11017 }
11018
11019 +static inline pte_t pte_mkread(pte_t pte)
11020 +{
11021 + return __pte(pte_val(pte) | _PAGE_USER);
11022 +}
11023 +
11024 static inline pte_t pte_mkexec(pte_t pte)
11025 {
11026 - return pte_clear_flags(pte, _PAGE_NX);
11027 +#ifdef CONFIG_X86_PAE
11028 + if (__supported_pte_mask & _PAGE_NX)
11029 + return pte_clear_flags(pte, _PAGE_NX);
11030 + else
11031 +#endif
11032 + return pte_set_flags(pte, _PAGE_USER);
11033 +}
11034 +
11035 +static inline pte_t pte_exprotect(pte_t pte)
11036 +{
11037 +#ifdef CONFIG_X86_PAE
11038 + if (__supported_pte_mask & _PAGE_NX)
11039 + return pte_set_flags(pte, _PAGE_NX);
11040 + else
11041 +#endif
11042 + return pte_clear_flags(pte, _PAGE_USER);
11043 }
11044
11045 static inline pte_t pte_mkdirty(pte_t pte)
11046 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11047 #endif
11048
11049 #ifndef __ASSEMBLY__
11050 +
11051 +#ifdef CONFIG_PAX_PER_CPU_PGD
11052 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11053 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11054 +{
11055 + return cpu_pgd[cpu];
11056 +}
11057 +#endif
11058 +
11059 #include <linux/mm_types.h>
11060
11061 static inline int pte_none(pte_t pte)
11062 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11063
11064 static inline int pgd_bad(pgd_t pgd)
11065 {
11066 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11067 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11068 }
11069
11070 static inline int pgd_none(pgd_t pgd)
11071 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11072 * pgd_offset() returns a (pgd_t *)
11073 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11074 */
11075 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11076 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11077 +
11078 +#ifdef CONFIG_PAX_PER_CPU_PGD
11079 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11080 +#endif
11081 +
11082 /*
11083 * a shortcut which implies the use of the kernel's pgd, instead
11084 * of a process's
11085 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11086 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11087 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11088
11089 +#ifdef CONFIG_X86_32
11090 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11091 +#else
11092 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11093 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11094 +
11095 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11096 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11097 +#else
11098 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11099 +#endif
11100 +
11101 +#endif
11102 +
11103 #ifndef __ASSEMBLY__
11104
11105 extern int direct_gbpages;
11106 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11107 * dst and src can be on the same page, but the range must not overlap,
11108 * and must not cross a page boundary.
11109 */
11110 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11111 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11112 {
11113 - memcpy(dst, src, count * sizeof(pgd_t));
11114 + pax_open_kernel();
11115 + while (count--)
11116 + *dst++ = *src++;
11117 + pax_close_kernel();
11118 }
11119
11120 +#ifdef CONFIG_PAX_PER_CPU_PGD
11121 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11122 +#endif
11123 +
11124 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11125 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11126 +#else
11127 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11128 +#endif
11129
11130 #include <asm-generic/pgtable.h>
11131 #endif /* __ASSEMBLY__ */
11132 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11133 index 750f1bf..971e839 100644
11134 --- a/arch/x86/include/asm/pgtable_32.h
11135 +++ b/arch/x86/include/asm/pgtable_32.h
11136 @@ -26,9 +26,6 @@
11137 struct mm_struct;
11138 struct vm_area_struct;
11139
11140 -extern pgd_t swapper_pg_dir[1024];
11141 -extern pgd_t trampoline_pg_dir[1024];
11142 -
11143 static inline void pgtable_cache_init(void) { }
11144 static inline void check_pgt_cache(void) { }
11145 void paging_init(void);
11146 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11147 # include <asm/pgtable-2level.h>
11148 #endif
11149
11150 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11151 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11152 +#ifdef CONFIG_X86_PAE
11153 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11154 +#endif
11155 +
11156 #if defined(CONFIG_HIGHPTE)
11157 #define __KM_PTE \
11158 (in_nmi() ? KM_NMI_PTE : \
11159 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11160 /* Clear a kernel PTE and flush it from the TLB */
11161 #define kpte_clear_flush(ptep, vaddr) \
11162 do { \
11163 + pax_open_kernel(); \
11164 pte_clear(&init_mm, (vaddr), (ptep)); \
11165 + pax_close_kernel(); \
11166 __flush_tlb_one((vaddr)); \
11167 } while (0)
11168
11169 @@ -85,6 +90,9 @@ do { \
11170
11171 #endif /* !__ASSEMBLY__ */
11172
11173 +#define HAVE_ARCH_UNMAPPED_AREA
11174 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11175 +
11176 /*
11177 * kern_addr_valid() is (1) for FLATMEM and (0) for
11178 * SPARSEMEM and DISCONTIGMEM
11179 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11180 index 5e67c15..12d5c47 100644
11181 --- a/arch/x86/include/asm/pgtable_32_types.h
11182 +++ b/arch/x86/include/asm/pgtable_32_types.h
11183 @@ -8,7 +8,7 @@
11184 */
11185 #ifdef CONFIG_X86_PAE
11186 # include <asm/pgtable-3level_types.h>
11187 -# define PMD_SIZE (1UL << PMD_SHIFT)
11188 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11189 # define PMD_MASK (~(PMD_SIZE - 1))
11190 #else
11191 # include <asm/pgtable-2level_types.h>
11192 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11193 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11194 #endif
11195
11196 +#ifdef CONFIG_PAX_KERNEXEC
11197 +#ifndef __ASSEMBLY__
11198 +extern unsigned char MODULES_EXEC_VADDR[];
11199 +extern unsigned char MODULES_EXEC_END[];
11200 +#endif
11201 +#include <asm/boot.h>
11202 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11203 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11204 +#else
11205 +#define ktla_ktva(addr) (addr)
11206 +#define ktva_ktla(addr) (addr)
11207 +#endif
11208 +
11209 #define MODULES_VADDR VMALLOC_START
11210 #define MODULES_END VMALLOC_END
11211 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11212 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11213 index c57a301..6b414ff 100644
11214 --- a/arch/x86/include/asm/pgtable_64.h
11215 +++ b/arch/x86/include/asm/pgtable_64.h
11216 @@ -16,10 +16,14 @@
11217
11218 extern pud_t level3_kernel_pgt[512];
11219 extern pud_t level3_ident_pgt[512];
11220 +extern pud_t level3_vmalloc_start_pgt[512];
11221 +extern pud_t level3_vmalloc_end_pgt[512];
11222 +extern pud_t level3_vmemmap_pgt[512];
11223 +extern pud_t level2_vmemmap_pgt[512];
11224 extern pmd_t level2_kernel_pgt[512];
11225 extern pmd_t level2_fixmap_pgt[512];
11226 -extern pmd_t level2_ident_pgt[512];
11227 -extern pgd_t init_level4_pgt[];
11228 +extern pmd_t level2_ident_pgt[512*2];
11229 +extern pgd_t init_level4_pgt[512];
11230
11231 #define swapper_pg_dir init_level4_pgt
11232
11233 @@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11234
11235 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11236 {
11237 + pax_open_kernel();
11238 *pmdp = pmd;
11239 + pax_close_kernel();
11240 }
11241
11242 static inline void native_pmd_clear(pmd_t *pmd)
11243 @@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11244
11245 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11246 {
11247 + pax_open_kernel();
11248 + *pgdp = pgd;
11249 + pax_close_kernel();
11250 +}
11251 +
11252 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11253 +{
11254 *pgdp = pgd;
11255 }
11256
11257 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11258 index 766ea16..5b96cb3 100644
11259 --- a/arch/x86/include/asm/pgtable_64_types.h
11260 +++ b/arch/x86/include/asm/pgtable_64_types.h
11261 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11262 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11263 #define MODULES_END _AC(0xffffffffff000000, UL)
11264 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11265 +#define MODULES_EXEC_VADDR MODULES_VADDR
11266 +#define MODULES_EXEC_END MODULES_END
11267 +
11268 +#define ktla_ktva(addr) (addr)
11269 +#define ktva_ktla(addr) (addr)
11270
11271 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11272 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11273 index d1f4a76..2f46ba1 100644
11274 --- a/arch/x86/include/asm/pgtable_types.h
11275 +++ b/arch/x86/include/asm/pgtable_types.h
11276 @@ -16,12 +16,11 @@
11277 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11278 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11279 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11280 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11281 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11282 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11283 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11284 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11285 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11286 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11287 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11288 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11289
11290 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11291 @@ -39,7 +38,6 @@
11292 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11293 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11294 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11295 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11296 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11297 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11298 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11299 @@ -55,8 +53,10 @@
11300
11301 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11302 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11303 -#else
11304 +#elif defined(CONFIG_KMEMCHECK)
11305 #define _PAGE_NX (_AT(pteval_t, 0))
11306 +#else
11307 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11308 #endif
11309
11310 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11311 @@ -93,6 +93,9 @@
11312 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11313 _PAGE_ACCESSED)
11314
11315 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11316 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11317 +
11318 #define __PAGE_KERNEL_EXEC \
11319 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11320 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11321 @@ -103,8 +106,8 @@
11322 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11323 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11324 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11325 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11326 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11327 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11328 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11329 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11330 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11331 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11332 @@ -163,8 +166,8 @@
11333 * bits are combined, this will alow user to access the high address mapped
11334 * VDSO in the presence of CONFIG_COMPAT_VDSO
11335 */
11336 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11337 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11338 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11339 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11340 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11341 #endif
11342
11343 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11344 {
11345 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11346 }
11347 +#endif
11348
11349 +#if PAGETABLE_LEVELS == 3
11350 +#include <asm-generic/pgtable-nopud.h>
11351 +#endif
11352 +
11353 +#if PAGETABLE_LEVELS == 2
11354 +#include <asm-generic/pgtable-nopmd.h>
11355 +#endif
11356 +
11357 +#ifndef __ASSEMBLY__
11358 #if PAGETABLE_LEVELS > 3
11359 typedef struct { pudval_t pud; } pud_t;
11360
11361 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11362 return pud.pud;
11363 }
11364 #else
11365 -#include <asm-generic/pgtable-nopud.h>
11366 -
11367 static inline pudval_t native_pud_val(pud_t pud)
11368 {
11369 return native_pgd_val(pud.pgd);
11370 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11371 return pmd.pmd;
11372 }
11373 #else
11374 -#include <asm-generic/pgtable-nopmd.h>
11375 -
11376 static inline pmdval_t native_pmd_val(pmd_t pmd)
11377 {
11378 return native_pgd_val(pmd.pud.pgd);
11379 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11380
11381 extern pteval_t __supported_pte_mask;
11382 extern void set_nx(void);
11383 +
11384 +#ifdef CONFIG_X86_32
11385 +#ifdef CONFIG_X86_PAE
11386 extern int nx_enabled;
11387 +#else
11388 +#define nx_enabled (0)
11389 +#endif
11390 +#else
11391 +#define nx_enabled (1)
11392 +#endif
11393
11394 #define pgprot_writecombine pgprot_writecombine
11395 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11396 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11397 index fa04dea..5f823fc 100644
11398 --- a/arch/x86/include/asm/processor.h
11399 +++ b/arch/x86/include/asm/processor.h
11400 @@ -272,7 +272,7 @@ struct tss_struct {
11401
11402 } ____cacheline_aligned;
11403
11404 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11405 +extern struct tss_struct init_tss[NR_CPUS];
11406
11407 /*
11408 * Save the original ist values for checking stack pointers during debugging
11409 @@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11410 */
11411 #define TASK_SIZE PAGE_OFFSET
11412 #define TASK_SIZE_MAX TASK_SIZE
11413 +
11414 +#ifdef CONFIG_PAX_SEGMEXEC
11415 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11416 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11417 +#else
11418 #define STACK_TOP TASK_SIZE
11419 -#define STACK_TOP_MAX STACK_TOP
11420 +#endif
11421 +
11422 +#define STACK_TOP_MAX TASK_SIZE
11423
11424 #define INIT_THREAD { \
11425 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11426 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11427 .vm86_info = NULL, \
11428 .sysenter_cs = __KERNEL_CS, \
11429 .io_bitmap_ptr = NULL, \
11430 @@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11431 */
11432 #define INIT_TSS { \
11433 .x86_tss = { \
11434 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11435 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11436 .ss0 = __KERNEL_DS, \
11437 .ss1 = __KERNEL_CS, \
11438 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11439 @@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11440 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11441
11442 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11443 -#define KSTK_TOP(info) \
11444 -({ \
11445 - unsigned long *__ptr = (unsigned long *)(info); \
11446 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11447 -})
11448 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11449
11450 /*
11451 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11452 @@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11453 #define task_pt_regs(task) \
11454 ({ \
11455 struct pt_regs *__regs__; \
11456 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11457 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11458 __regs__ - 1; \
11459 })
11460
11461 @@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11462 /*
11463 * User space process size. 47bits minus one guard page.
11464 */
11465 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11466 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11467
11468 /* This decides where the kernel will search for a free chunk of vm
11469 * space during mmap's.
11470 */
11471 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11472 - 0xc0000000 : 0xFFFFe000)
11473 + 0xc0000000 : 0xFFFFf000)
11474
11475 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11476 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11477 @@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11478 #define STACK_TOP_MAX TASK_SIZE_MAX
11479
11480 #define INIT_THREAD { \
11481 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11482 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11483 }
11484
11485 #define INIT_TSS { \
11486 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11487 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11488 }
11489
11490 /*
11491 @@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11492 */
11493 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11494
11495 +#ifdef CONFIG_PAX_SEGMEXEC
11496 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11497 +#endif
11498 +
11499 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11500
11501 /* Get/set a process' ability to use the timestamp counter instruction */
11502 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11503 index 0f0d908..f2e3da2 100644
11504 --- a/arch/x86/include/asm/ptrace.h
11505 +++ b/arch/x86/include/asm/ptrace.h
11506 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11507 }
11508
11509 /*
11510 - * user_mode_vm(regs) determines whether a register set came from user mode.
11511 + * user_mode(regs) determines whether a register set came from user mode.
11512 * This is true if V8086 mode was enabled OR if the register set was from
11513 * protected mode with RPL-3 CS value. This tricky test checks that with
11514 * one comparison. Many places in the kernel can bypass this full check
11515 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11516 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11517 + * be used.
11518 */
11519 -static inline int user_mode(struct pt_regs *regs)
11520 +static inline int user_mode_novm(struct pt_regs *regs)
11521 {
11522 #ifdef CONFIG_X86_32
11523 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11524 #else
11525 - return !!(regs->cs & 3);
11526 + return !!(regs->cs & SEGMENT_RPL_MASK);
11527 #endif
11528 }
11529
11530 -static inline int user_mode_vm(struct pt_regs *regs)
11531 +static inline int user_mode(struct pt_regs *regs)
11532 {
11533 #ifdef CONFIG_X86_32
11534 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11535 USER_RPL;
11536 #else
11537 - return user_mode(regs);
11538 + return user_mode_novm(regs);
11539 #endif
11540 }
11541
11542 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11543 index 562d4fd..6e39df1 100644
11544 --- a/arch/x86/include/asm/reboot.h
11545 +++ b/arch/x86/include/asm/reboot.h
11546 @@ -6,19 +6,19 @@
11547 struct pt_regs;
11548
11549 struct machine_ops {
11550 - void (*restart)(char *cmd);
11551 - void (*halt)(void);
11552 - void (*power_off)(void);
11553 + void (* __noreturn restart)(char *cmd);
11554 + void (* __noreturn halt)(void);
11555 + void (* __noreturn power_off)(void);
11556 void (*shutdown)(void);
11557 void (*crash_shutdown)(struct pt_regs *);
11558 - void (*emergency_restart)(void);
11559 -};
11560 + void (* __noreturn emergency_restart)(void);
11561 +} __no_const;
11562
11563 extern struct machine_ops machine_ops;
11564
11565 void native_machine_crash_shutdown(struct pt_regs *regs);
11566 void native_machine_shutdown(void);
11567 -void machine_real_restart(const unsigned char *code, int length);
11568 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11569
11570 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11571 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11572 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11573 index 606ede1..dbfff37 100644
11574 --- a/arch/x86/include/asm/rwsem.h
11575 +++ b/arch/x86/include/asm/rwsem.h
11576 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11577 {
11578 asm volatile("# beginning down_read\n\t"
11579 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11580 +
11581 +#ifdef CONFIG_PAX_REFCOUNT
11582 + "jno 0f\n"
11583 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11584 + "int $4\n0:\n"
11585 + _ASM_EXTABLE(0b, 0b)
11586 +#endif
11587 +
11588 /* adds 0x00000001, returns the old value */
11589 " jns 1f\n"
11590 " call call_rwsem_down_read_failed\n"
11591 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11592 "1:\n\t"
11593 " mov %1,%2\n\t"
11594 " add %3,%2\n\t"
11595 +
11596 +#ifdef CONFIG_PAX_REFCOUNT
11597 + "jno 0f\n"
11598 + "sub %3,%2\n"
11599 + "int $4\n0:\n"
11600 + _ASM_EXTABLE(0b, 0b)
11601 +#endif
11602 +
11603 " jle 2f\n\t"
11604 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11605 " jnz 1b\n\t"
11606 @@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11607 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11608 asm volatile("# beginning down_write\n\t"
11609 LOCK_PREFIX " xadd %1,(%2)\n\t"
11610 +
11611 +#ifdef CONFIG_PAX_REFCOUNT
11612 + "jno 0f\n"
11613 + "mov %1,(%2)\n"
11614 + "int $4\n0:\n"
11615 + _ASM_EXTABLE(0b, 0b)
11616 +#endif
11617 +
11618 /* subtract 0x0000ffff, returns the old value */
11619 " test %1,%1\n\t"
11620 /* was the count 0 before? */
11621 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11622 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11623 asm volatile("# beginning __up_read\n\t"
11624 LOCK_PREFIX " xadd %1,(%2)\n\t"
11625 +
11626 +#ifdef CONFIG_PAX_REFCOUNT
11627 + "jno 0f\n"
11628 + "mov %1,(%2)\n"
11629 + "int $4\n0:\n"
11630 + _ASM_EXTABLE(0b, 0b)
11631 +#endif
11632 +
11633 /* subtracts 1, returns the old value */
11634 " jns 1f\n\t"
11635 " call call_rwsem_wake\n"
11636 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11637 rwsem_count_t tmp;
11638 asm volatile("# beginning __up_write\n\t"
11639 LOCK_PREFIX " xadd %1,(%2)\n\t"
11640 +
11641 +#ifdef CONFIG_PAX_REFCOUNT
11642 + "jno 0f\n"
11643 + "mov %1,(%2)\n"
11644 + "int $4\n0:\n"
11645 + _ASM_EXTABLE(0b, 0b)
11646 +#endif
11647 +
11648 /* tries to transition
11649 0xffff0001 -> 0x00000000 */
11650 " jz 1f\n"
11651 @@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11652 {
11653 asm volatile("# beginning __downgrade_write\n\t"
11654 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11655 +
11656 +#ifdef CONFIG_PAX_REFCOUNT
11657 + "jno 0f\n"
11658 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11659 + "int $4\n0:\n"
11660 + _ASM_EXTABLE(0b, 0b)
11661 +#endif
11662 +
11663 /*
11664 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11665 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11666 @@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11667 static inline void rwsem_atomic_add(rwsem_count_t delta,
11668 struct rw_semaphore *sem)
11669 {
11670 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11671 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11672 +
11673 +#ifdef CONFIG_PAX_REFCOUNT
11674 + "jno 0f\n"
11675 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11676 + "int $4\n0:\n"
11677 + _ASM_EXTABLE(0b, 0b)
11678 +#endif
11679 +
11680 : "+m" (sem->count)
11681 : "er" (delta));
11682 }
11683 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11684 {
11685 rwsem_count_t tmp = delta;
11686
11687 - asm volatile(LOCK_PREFIX "xadd %0,%1"
11688 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11689 +
11690 +#ifdef CONFIG_PAX_REFCOUNT
11691 + "jno 0f\n"
11692 + "mov %0,%1\n"
11693 + "int $4\n0:\n"
11694 + _ASM_EXTABLE(0b, 0b)
11695 +#endif
11696 +
11697 : "+r" (tmp), "+m" (sem->count)
11698 : : "memory");
11699
11700 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11701 index 14e0ed8..7f7dd5e 100644
11702 --- a/arch/x86/include/asm/segment.h
11703 +++ b/arch/x86/include/asm/segment.h
11704 @@ -62,10 +62,15 @@
11705 * 26 - ESPFIX small SS
11706 * 27 - per-cpu [ offset to per-cpu data area ]
11707 * 28 - stack_canary-20 [ for stack protector ]
11708 - * 29 - unused
11709 - * 30 - unused
11710 + * 29 - PCI BIOS CS
11711 + * 30 - PCI BIOS DS
11712 * 31 - TSS for double fault handler
11713 */
11714 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11715 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11716 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11717 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11718 +
11719 #define GDT_ENTRY_TLS_MIN 6
11720 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11721
11722 @@ -77,6 +82,8 @@
11723
11724 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11725
11726 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11727 +
11728 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11729
11730 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11731 @@ -88,7 +95,7 @@
11732 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11733 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11734
11735 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11736 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11737 #ifdef CONFIG_SMP
11738 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11739 #else
11740 @@ -102,6 +109,12 @@
11741 #define __KERNEL_STACK_CANARY 0
11742 #endif
11743
11744 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11745 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11746 +
11747 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11748 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11749 +
11750 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11751
11752 /*
11753 @@ -139,7 +152,7 @@
11754 */
11755
11756 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11757 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11758 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11759
11760
11761 #else
11762 @@ -163,6 +176,8 @@
11763 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
11764 #define __USER32_DS __USER_DS
11765
11766 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11767 +
11768 #define GDT_ENTRY_TSS 8 /* needs two entries */
11769 #define GDT_ENTRY_LDT 10 /* needs two entries */
11770 #define GDT_ENTRY_TLS_MIN 12
11771 @@ -183,6 +198,7 @@
11772 #endif
11773
11774 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
11775 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
11776 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
11777 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
11778 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
11779 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11780 index 4c2f63c..5685db2 100644
11781 --- a/arch/x86/include/asm/smp.h
11782 +++ b/arch/x86/include/asm/smp.h
11783 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
11784 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
11785 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11786 DECLARE_PER_CPU(u16, cpu_llc_id);
11787 -DECLARE_PER_CPU(int, cpu_number);
11788 +DECLARE_PER_CPU(unsigned int, cpu_number);
11789
11790 static inline struct cpumask *cpu_sibling_mask(int cpu)
11791 {
11792 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
11793 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
11794
11795 /* Static state in head.S used to set up a CPU */
11796 -extern struct {
11797 - void *sp;
11798 - unsigned short ss;
11799 -} stack_start;
11800 +extern unsigned long stack_start; /* Initial stack pointer address */
11801
11802 struct smp_ops {
11803 void (*smp_prepare_boot_cpu)(void);
11804 @@ -60,7 +57,7 @@ struct smp_ops {
11805
11806 void (*send_call_func_ipi)(const struct cpumask *mask);
11807 void (*send_call_func_single_ipi)(int cpu);
11808 -};
11809 +} __no_const;
11810
11811 /* Globals due to paravirt */
11812 extern void set_cpu_sibling_map(int cpu);
11813 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11814 extern int safe_smp_processor_id(void);
11815
11816 #elif defined(CONFIG_X86_64_SMP)
11817 -#define raw_smp_processor_id() (percpu_read(cpu_number))
11818 -
11819 -#define stack_smp_processor_id() \
11820 -({ \
11821 - struct thread_info *ti; \
11822 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11823 - ti->cpu; \
11824 -})
11825 +#define raw_smp_processor_id() (percpu_read(cpu_number))
11826 +#define stack_smp_processor_id() raw_smp_processor_id()
11827 #define safe_smp_processor_id() smp_processor_id()
11828
11829 #endif
11830 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11831 index 4e77853..4359783 100644
11832 --- a/arch/x86/include/asm/spinlock.h
11833 +++ b/arch/x86/include/asm/spinlock.h
11834 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
11835 static inline void __raw_read_lock(raw_rwlock_t *rw)
11836 {
11837 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
11838 +
11839 +#ifdef CONFIG_PAX_REFCOUNT
11840 + "jno 0f\n"
11841 + LOCK_PREFIX " addl $1,(%0)\n"
11842 + "int $4\n0:\n"
11843 + _ASM_EXTABLE(0b, 0b)
11844 +#endif
11845 +
11846 "jns 1f\n"
11847 "call __read_lock_failed\n\t"
11848 "1:\n"
11849 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
11850 static inline void __raw_write_lock(raw_rwlock_t *rw)
11851 {
11852 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
11853 +
11854 +#ifdef CONFIG_PAX_REFCOUNT
11855 + "jno 0f\n"
11856 + LOCK_PREFIX " addl %1,(%0)\n"
11857 + "int $4\n0:\n"
11858 + _ASM_EXTABLE(0b, 0b)
11859 +#endif
11860 +
11861 "jz 1f\n"
11862 "call __write_lock_failed\n\t"
11863 "1:\n"
11864 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
11865
11866 static inline void __raw_read_unlock(raw_rwlock_t *rw)
11867 {
11868 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
11869 + asm volatile(LOCK_PREFIX "incl %0\n"
11870 +
11871 +#ifdef CONFIG_PAX_REFCOUNT
11872 + "jno 0f\n"
11873 + LOCK_PREFIX "decl %0\n"
11874 + "int $4\n0:\n"
11875 + _ASM_EXTABLE(0b, 0b)
11876 +#endif
11877 +
11878 + :"+m" (rw->lock) : : "memory");
11879 }
11880
11881 static inline void __raw_write_unlock(raw_rwlock_t *rw)
11882 {
11883 - asm volatile(LOCK_PREFIX "addl %1, %0"
11884 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
11885 +
11886 +#ifdef CONFIG_PAX_REFCOUNT
11887 + "jno 0f\n"
11888 + LOCK_PREFIX "subl %1, %0\n"
11889 + "int $4\n0:\n"
11890 + _ASM_EXTABLE(0b, 0b)
11891 +#endif
11892 +
11893 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
11894 }
11895
11896 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11897 index 1575177..cb23f52 100644
11898 --- a/arch/x86/include/asm/stackprotector.h
11899 +++ b/arch/x86/include/asm/stackprotector.h
11900 @@ -48,7 +48,7 @@
11901 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11902 */
11903 #define GDT_STACK_CANARY_INIT \
11904 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11905 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11906
11907 /*
11908 * Initialize the stackprotector canary value.
11909 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11910
11911 static inline void load_stack_canary_segment(void)
11912 {
11913 -#ifdef CONFIG_X86_32
11914 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11915 asm volatile ("mov %0, %%gs" : : "r" (0));
11916 #endif
11917 }
11918 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11919 index e0fbf29..858ef4a 100644
11920 --- a/arch/x86/include/asm/system.h
11921 +++ b/arch/x86/include/asm/system.h
11922 @@ -132,7 +132,7 @@ do { \
11923 "thread_return:\n\t" \
11924 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11925 __switch_canary \
11926 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
11927 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11928 "movq %%rax,%%rdi\n\t" \
11929 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11930 "jnz ret_from_fork\n\t" \
11931 @@ -143,7 +143,7 @@ do { \
11932 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11933 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11934 [_tif_fork] "i" (_TIF_FORK), \
11935 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
11936 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
11937 [current_task] "m" (per_cpu_var(current_task)) \
11938 __switch_canary_iparam \
11939 : "memory", "cc" __EXTRA_CLOBBER)
11940 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11941 {
11942 unsigned long __limit;
11943 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11944 - return __limit + 1;
11945 + return __limit;
11946 }
11947
11948 static inline void native_clts(void)
11949 @@ -340,12 +340,12 @@ void enable_hlt(void);
11950
11951 void cpu_idle_wait(void);
11952
11953 -extern unsigned long arch_align_stack(unsigned long sp);
11954 +#define arch_align_stack(x) ((x) & ~0xfUL)
11955 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11956
11957 void default_idle(void);
11958
11959 -void stop_this_cpu(void *dummy);
11960 +void stop_this_cpu(void *dummy) __noreturn;
11961
11962 /*
11963 * Force strict CPU ordering.
11964 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11965 index 19c3ce4..8962535 100644
11966 --- a/arch/x86/include/asm/thread_info.h
11967 +++ b/arch/x86/include/asm/thread_info.h
11968 @@ -10,6 +10,7 @@
11969 #include <linux/compiler.h>
11970 #include <asm/page.h>
11971 #include <asm/types.h>
11972 +#include <asm/percpu.h>
11973
11974 /*
11975 * low level task data that entry.S needs immediate access to
11976 @@ -24,7 +25,6 @@ struct exec_domain;
11977 #include <asm/atomic.h>
11978
11979 struct thread_info {
11980 - struct task_struct *task; /* main task structure */
11981 struct exec_domain *exec_domain; /* execution domain */
11982 __u32 flags; /* low level flags */
11983 __u32 status; /* thread synchronous flags */
11984 @@ -34,18 +34,12 @@ struct thread_info {
11985 mm_segment_t addr_limit;
11986 struct restart_block restart_block;
11987 void __user *sysenter_return;
11988 -#ifdef CONFIG_X86_32
11989 - unsigned long previous_esp; /* ESP of the previous stack in
11990 - case of nested (IRQ) stacks
11991 - */
11992 - __u8 supervisor_stack[0];
11993 -#endif
11994 + unsigned long lowest_stack;
11995 int uaccess_err;
11996 };
11997
11998 -#define INIT_THREAD_INFO(tsk) \
11999 +#define INIT_THREAD_INFO \
12000 { \
12001 - .task = &tsk, \
12002 .exec_domain = &default_exec_domain, \
12003 .flags = 0, \
12004 .cpu = 0, \
12005 @@ -56,7 +50,7 @@ struct thread_info {
12006 }, \
12007 }
12008
12009 -#define init_thread_info (init_thread_union.thread_info)
12010 +#define init_thread_info (init_thread_union.stack)
12011 #define init_stack (init_thread_union.stack)
12012
12013 #else /* !__ASSEMBLY__ */
12014 @@ -163,45 +157,40 @@ struct thread_info {
12015 #define alloc_thread_info(tsk) \
12016 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12017
12018 -#ifdef CONFIG_X86_32
12019 -
12020 -#define STACK_WARN (THREAD_SIZE/8)
12021 -/*
12022 - * macros/functions for gaining access to the thread information structure
12023 - *
12024 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12025 - */
12026 -#ifndef __ASSEMBLY__
12027 -
12028 -
12029 -/* how to get the current stack pointer from C */
12030 -register unsigned long current_stack_pointer asm("esp") __used;
12031 -
12032 -/* how to get the thread information struct from C */
12033 -static inline struct thread_info *current_thread_info(void)
12034 -{
12035 - return (struct thread_info *)
12036 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12037 -}
12038 -
12039 -#else /* !__ASSEMBLY__ */
12040 -
12041 +#ifdef __ASSEMBLY__
12042 /* how to get the thread information struct from ASM */
12043 #define GET_THREAD_INFO(reg) \
12044 - movl $-THREAD_SIZE, reg; \
12045 - andl %esp, reg
12046 + mov PER_CPU_VAR(current_tinfo), reg
12047
12048 /* use this one if reg already contains %esp */
12049 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12050 - andl $-THREAD_SIZE, reg
12051 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12052 +#else
12053 +/* how to get the thread information struct from C */
12054 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12055 +
12056 +static __always_inline struct thread_info *current_thread_info(void)
12057 +{
12058 + return percpu_read_stable(current_tinfo);
12059 +}
12060 +#endif
12061 +
12062 +#ifdef CONFIG_X86_32
12063 +
12064 +#define STACK_WARN (THREAD_SIZE/8)
12065 +/*
12066 + * macros/functions for gaining access to the thread information structure
12067 + *
12068 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12069 + */
12070 +#ifndef __ASSEMBLY__
12071 +
12072 +/* how to get the current stack pointer from C */
12073 +register unsigned long current_stack_pointer asm("esp") __used;
12074
12075 #endif
12076
12077 #else /* X86_32 */
12078
12079 -#include <asm/percpu.h>
12080 -#define KERNEL_STACK_OFFSET (5*8)
12081 -
12082 /*
12083 * macros/functions for gaining access to the thread information structure
12084 * preempt_count needs to be 1 initially, until the scheduler is functional.
12085 @@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12086 #ifndef __ASSEMBLY__
12087 DECLARE_PER_CPU(unsigned long, kernel_stack);
12088
12089 -static inline struct thread_info *current_thread_info(void)
12090 -{
12091 - struct thread_info *ti;
12092 - ti = (void *)(percpu_read_stable(kernel_stack) +
12093 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12094 - return ti;
12095 -}
12096 -
12097 -#else /* !__ASSEMBLY__ */
12098 -
12099 -/* how to get the thread information struct from ASM */
12100 -#define GET_THREAD_INFO(reg) \
12101 - movq PER_CPU_VAR(kernel_stack),reg ; \
12102 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12103 -
12104 +/* how to get the current stack pointer from C */
12105 +register unsigned long current_stack_pointer asm("rsp") __used;
12106 #endif
12107
12108 #endif /* !X86_32 */
12109 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12110 extern void free_thread_info(struct thread_info *ti);
12111 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12112 #define arch_task_cache_init arch_task_cache_init
12113 +
12114 +#define __HAVE_THREAD_FUNCTIONS
12115 +#define task_thread_info(task) (&(task)->tinfo)
12116 +#define task_stack_page(task) ((task)->stack)
12117 +#define setup_thread_stack(p, org) do {} while (0)
12118 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12119 +
12120 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12121 +extern struct task_struct *alloc_task_struct(void);
12122 +extern void free_task_struct(struct task_struct *);
12123 +
12124 #endif
12125 #endif /* _ASM_X86_THREAD_INFO_H */
12126 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12127 index 61c5874..8a046e9 100644
12128 --- a/arch/x86/include/asm/uaccess.h
12129 +++ b/arch/x86/include/asm/uaccess.h
12130 @@ -8,12 +8,15 @@
12131 #include <linux/thread_info.h>
12132 #include <linux/prefetch.h>
12133 #include <linux/string.h>
12134 +#include <linux/sched.h>
12135 #include <asm/asm.h>
12136 #include <asm/page.h>
12137
12138 #define VERIFY_READ 0
12139 #define VERIFY_WRITE 1
12140
12141 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12142 +
12143 /*
12144 * The fs value determines whether argument validity checking should be
12145 * performed or not. If get_fs() == USER_DS, checking is performed, with
12146 @@ -29,7 +32,12 @@
12147
12148 #define get_ds() (KERNEL_DS)
12149 #define get_fs() (current_thread_info()->addr_limit)
12150 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12151 +void __set_fs(mm_segment_t x);
12152 +void set_fs(mm_segment_t x);
12153 +#else
12154 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12155 +#endif
12156
12157 #define segment_eq(a, b) ((a).seg == (b).seg)
12158
12159 @@ -77,7 +85,33 @@
12160 * checks that the pointer is in the user space range - after calling
12161 * this function, memory access functions may still return -EFAULT.
12162 */
12163 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12164 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12165 +#define access_ok(type, addr, size) \
12166 +({ \
12167 + long __size = size; \
12168 + unsigned long __addr = (unsigned long)addr; \
12169 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12170 + unsigned long __end_ao = __addr + __size - 1; \
12171 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12172 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12173 + while(__addr_ao <= __end_ao) { \
12174 + char __c_ao; \
12175 + __addr_ao += PAGE_SIZE; \
12176 + if (__size > PAGE_SIZE) \
12177 + cond_resched(); \
12178 + if (__get_user(__c_ao, (char __user *)__addr)) \
12179 + break; \
12180 + if (type != VERIFY_WRITE) { \
12181 + __addr = __addr_ao; \
12182 + continue; \
12183 + } \
12184 + if (__put_user(__c_ao, (char __user *)__addr)) \
12185 + break; \
12186 + __addr = __addr_ao; \
12187 + } \
12188 + } \
12189 + __ret_ao; \
12190 +})
12191
12192 /*
12193 * The exception table consists of pairs of addresses: the first is the
12194 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12195 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12196 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12197
12198 -
12199 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12200 +#define __copyuser_seg "gs;"
12201 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12202 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12203 +#else
12204 +#define __copyuser_seg
12205 +#define __COPYUSER_SET_ES
12206 +#define __COPYUSER_RESTORE_ES
12207 +#endif
12208
12209 #ifdef CONFIG_X86_32
12210 #define __put_user_asm_u64(x, addr, err, errret) \
12211 - asm volatile("1: movl %%eax,0(%2)\n" \
12212 - "2: movl %%edx,4(%2)\n" \
12213 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12214 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12215 "3:\n" \
12216 ".section .fixup,\"ax\"\n" \
12217 "4: movl %3,%0\n" \
12218 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12219 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12220
12221 #define __put_user_asm_ex_u64(x, addr) \
12222 - asm volatile("1: movl %%eax,0(%1)\n" \
12223 - "2: movl %%edx,4(%1)\n" \
12224 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12225 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12226 "3:\n" \
12227 _ASM_EXTABLE(1b, 2b - 1b) \
12228 _ASM_EXTABLE(2b, 3b - 2b) \
12229 @@ -253,7 +295,7 @@ extern void __put_user_8(void);
12230 __typeof__(*(ptr)) __pu_val; \
12231 __chk_user_ptr(ptr); \
12232 might_fault(); \
12233 - __pu_val = x; \
12234 + __pu_val = (x); \
12235 switch (sizeof(*(ptr))) { \
12236 case 1: \
12237 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12238 @@ -374,7 +416,7 @@ do { \
12239 } while (0)
12240
12241 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12242 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12243 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12244 "2:\n" \
12245 ".section .fixup,\"ax\"\n" \
12246 "3: mov %3,%0\n" \
12247 @@ -382,7 +424,7 @@ do { \
12248 " jmp 2b\n" \
12249 ".previous\n" \
12250 _ASM_EXTABLE(1b, 3b) \
12251 - : "=r" (err), ltype(x) \
12252 + : "=r" (err), ltype (x) \
12253 : "m" (__m(addr)), "i" (errret), "0" (err))
12254
12255 #define __get_user_size_ex(x, ptr, size) \
12256 @@ -407,7 +449,7 @@ do { \
12257 } while (0)
12258
12259 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12260 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12261 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12262 "2:\n" \
12263 _ASM_EXTABLE(1b, 2b - 1b) \
12264 : ltype(x) : "m" (__m(addr)))
12265 @@ -424,13 +466,24 @@ do { \
12266 int __gu_err; \
12267 unsigned long __gu_val; \
12268 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12269 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12270 + (x) = (__typeof__(*(ptr)))__gu_val; \
12271 __gu_err; \
12272 })
12273
12274 /* FIXME: this hack is definitely wrong -AK */
12275 struct __large_struct { unsigned long buf[100]; };
12276 -#define __m(x) (*(struct __large_struct __user *)(x))
12277 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12278 +#define ____m(x) \
12279 +({ \
12280 + unsigned long ____x = (unsigned long)(x); \
12281 + if (____x < PAX_USER_SHADOW_BASE) \
12282 + ____x += PAX_USER_SHADOW_BASE; \
12283 + (void __user *)____x; \
12284 +})
12285 +#else
12286 +#define ____m(x) (x)
12287 +#endif
12288 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12289
12290 /*
12291 * Tell gcc we read from memory instead of writing: this is because
12292 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12293 * aliasing issues.
12294 */
12295 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12296 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12297 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12298 "2:\n" \
12299 ".section .fixup,\"ax\"\n" \
12300 "3: mov %3,%0\n" \
12301 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12302 ".previous\n" \
12303 _ASM_EXTABLE(1b, 3b) \
12304 : "=r"(err) \
12305 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12306 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12307
12308 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12309 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12310 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12311 "2:\n" \
12312 _ASM_EXTABLE(1b, 2b - 1b) \
12313 : : ltype(x), "m" (__m(addr)))
12314 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12315 * On error, the variable @x is set to zero.
12316 */
12317
12318 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12319 +#define __get_user(x, ptr) get_user((x), (ptr))
12320 +#else
12321 #define __get_user(x, ptr) \
12322 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12323 +#endif
12324
12325 /**
12326 * __put_user: - Write a simple value into user space, with less checking.
12327 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12328 * Returns zero on success, or -EFAULT on error.
12329 */
12330
12331 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12332 +#define __put_user(x, ptr) put_user((x), (ptr))
12333 +#else
12334 #define __put_user(x, ptr) \
12335 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12336 +#endif
12337
12338 #define __get_user_unaligned __get_user
12339 #define __put_user_unaligned __put_user
12340 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12341 #define get_user_ex(x, ptr) do { \
12342 unsigned long __gue_val; \
12343 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12344 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12345 + (x) = (__typeof__(*(ptr)))__gue_val; \
12346 } while (0)
12347
12348 #ifdef CONFIG_X86_WP_WORKS_OK
12349 @@ -567,6 +628,7 @@ extern struct movsl_mask {
12350
12351 #define ARCH_HAS_NOCACHE_UACCESS 1
12352
12353 +#define ARCH_HAS_SORT_EXTABLE
12354 #ifdef CONFIG_X86_32
12355 # include "uaccess_32.h"
12356 #else
12357 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12358 index 632fb44..e30e334 100644
12359 --- a/arch/x86/include/asm/uaccess_32.h
12360 +++ b/arch/x86/include/asm/uaccess_32.h
12361 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12362 static __always_inline unsigned long __must_check
12363 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12364 {
12365 + pax_track_stack();
12366 +
12367 + if ((long)n < 0)
12368 + return n;
12369 +
12370 if (__builtin_constant_p(n)) {
12371 unsigned long ret;
12372
12373 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12374 return ret;
12375 }
12376 }
12377 + if (!__builtin_constant_p(n))
12378 + check_object_size(from, n, true);
12379 return __copy_to_user_ll(to, from, n);
12380 }
12381
12382 @@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12383 __copy_to_user(void __user *to, const void *from, unsigned long n)
12384 {
12385 might_fault();
12386 +
12387 return __copy_to_user_inatomic(to, from, n);
12388 }
12389
12390 static __always_inline unsigned long
12391 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12392 {
12393 + if ((long)n < 0)
12394 + return n;
12395 +
12396 /* Avoid zeroing the tail if the copy fails..
12397 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12398 * but as the zeroing behaviour is only significant when n is not
12399 @@ -138,6 +149,12 @@ static __always_inline unsigned long
12400 __copy_from_user(void *to, const void __user *from, unsigned long n)
12401 {
12402 might_fault();
12403 +
12404 + pax_track_stack();
12405 +
12406 + if ((long)n < 0)
12407 + return n;
12408 +
12409 if (__builtin_constant_p(n)) {
12410 unsigned long ret;
12411
12412 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12413 return ret;
12414 }
12415 }
12416 + if (!__builtin_constant_p(n))
12417 + check_object_size(to, n, false);
12418 return __copy_from_user_ll(to, from, n);
12419 }
12420
12421 @@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12422 const void __user *from, unsigned long n)
12423 {
12424 might_fault();
12425 +
12426 + if ((long)n < 0)
12427 + return n;
12428 +
12429 if (__builtin_constant_p(n)) {
12430 unsigned long ret;
12431
12432 @@ -182,14 +205,62 @@ static __always_inline unsigned long
12433 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12434 unsigned long n)
12435 {
12436 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12437 + if ((long)n < 0)
12438 + return n;
12439 +
12440 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12441 +}
12442 +
12443 +/**
12444 + * copy_to_user: - Copy a block of data into user space.
12445 + * @to: Destination address, in user space.
12446 + * @from: Source address, in kernel space.
12447 + * @n: Number of bytes to copy.
12448 + *
12449 + * Context: User context only. This function may sleep.
12450 + *
12451 + * Copy data from kernel space to user space.
12452 + *
12453 + * Returns number of bytes that could not be copied.
12454 + * On success, this will be zero.
12455 + */
12456 +static __always_inline unsigned long __must_check
12457 +copy_to_user(void __user *to, const void *from, unsigned long n)
12458 +{
12459 + if (access_ok(VERIFY_WRITE, to, n))
12460 + n = __copy_to_user(to, from, n);
12461 + return n;
12462 +}
12463 +
12464 +/**
12465 + * copy_from_user: - Copy a block of data from user space.
12466 + * @to: Destination address, in kernel space.
12467 + * @from: Source address, in user space.
12468 + * @n: Number of bytes to copy.
12469 + *
12470 + * Context: User context only. This function may sleep.
12471 + *
12472 + * Copy data from user space to kernel space.
12473 + *
12474 + * Returns number of bytes that could not be copied.
12475 + * On success, this will be zero.
12476 + *
12477 + * If some data could not be copied, this function will pad the copied
12478 + * data to the requested size using zero bytes.
12479 + */
12480 +static __always_inline unsigned long __must_check
12481 +copy_from_user(void *to, const void __user *from, unsigned long n)
12482 +{
12483 + if (access_ok(VERIFY_READ, from, n))
12484 + n = __copy_from_user(to, from, n);
12485 + else if ((long)n > 0) {
12486 + if (!__builtin_constant_p(n))
12487 + check_object_size(to, n, false);
12488 + memset(to, 0, n);
12489 + }
12490 + return n;
12491 }
12492
12493 -unsigned long __must_check copy_to_user(void __user *to,
12494 - const void *from, unsigned long n);
12495 -unsigned long __must_check copy_from_user(void *to,
12496 - const void __user *from,
12497 - unsigned long n);
12498 long __must_check strncpy_from_user(char *dst, const char __user *src,
12499 long count);
12500 long __must_check __strncpy_from_user(char *dst,
12501 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12502 index db24b21..f595ae7 100644
12503 --- a/arch/x86/include/asm/uaccess_64.h
12504 +++ b/arch/x86/include/asm/uaccess_64.h
12505 @@ -9,6 +9,9 @@
12506 #include <linux/prefetch.h>
12507 #include <linux/lockdep.h>
12508 #include <asm/page.h>
12509 +#include <asm/pgtable.h>
12510 +
12511 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12512
12513 /*
12514 * Copy To/From Userspace
12515 @@ -16,116 +19,205 @@
12516
12517 /* Handles exceptions in both to and from, but doesn't do access_ok */
12518 __must_check unsigned long
12519 -copy_user_generic(void *to, const void *from, unsigned len);
12520 +copy_user_generic(void *to, const void *from, unsigned long len);
12521
12522 __must_check unsigned long
12523 -copy_to_user(void __user *to, const void *from, unsigned len);
12524 -__must_check unsigned long
12525 -copy_from_user(void *to, const void __user *from, unsigned len);
12526 -__must_check unsigned long
12527 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12528 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12529
12530 static __always_inline __must_check
12531 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12532 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12533 {
12534 - int ret = 0;
12535 + unsigned ret = 0;
12536
12537 might_fault();
12538 - if (!__builtin_constant_p(size))
12539 - return copy_user_generic(dst, (__force void *)src, size);
12540 +
12541 + if (size > INT_MAX)
12542 + return size;
12543 +
12544 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12545 + if (!__access_ok(VERIFY_READ, src, size))
12546 + return size;
12547 +#endif
12548 +
12549 + if (!__builtin_constant_p(size)) {
12550 + check_object_size(dst, size, false);
12551 +
12552 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12553 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12554 + src += PAX_USER_SHADOW_BASE;
12555 +#endif
12556 +
12557 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12558 + }
12559 switch (size) {
12560 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12561 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12562 ret, "b", "b", "=q", 1);
12563 return ret;
12564 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12565 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12566 ret, "w", "w", "=r", 2);
12567 return ret;
12568 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12569 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12570 ret, "l", "k", "=r", 4);
12571 return ret;
12572 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12573 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12574 ret, "q", "", "=r", 8);
12575 return ret;
12576 case 10:
12577 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12578 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12579 ret, "q", "", "=r", 10);
12580 if (unlikely(ret))
12581 return ret;
12582 __get_user_asm(*(u16 *)(8 + (char *)dst),
12583 - (u16 __user *)(8 + (char __user *)src),
12584 + (const u16 __user *)(8 + (const char __user *)src),
12585 ret, "w", "w", "=r", 2);
12586 return ret;
12587 case 16:
12588 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12589 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12590 ret, "q", "", "=r", 16);
12591 if (unlikely(ret))
12592 return ret;
12593 __get_user_asm(*(u64 *)(8 + (char *)dst),
12594 - (u64 __user *)(8 + (char __user *)src),
12595 + (const u64 __user *)(8 + (const char __user *)src),
12596 ret, "q", "", "=r", 8);
12597 return ret;
12598 default:
12599 - return copy_user_generic(dst, (__force void *)src, size);
12600 +
12601 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12602 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12603 + src += PAX_USER_SHADOW_BASE;
12604 +#endif
12605 +
12606 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12607 }
12608 }
12609
12610 static __always_inline __must_check
12611 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12612 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12613 {
12614 - int ret = 0;
12615 + unsigned ret = 0;
12616
12617 might_fault();
12618 - if (!__builtin_constant_p(size))
12619 - return copy_user_generic((__force void *)dst, src, size);
12620 +
12621 + pax_track_stack();
12622 +
12623 + if (size > INT_MAX)
12624 + return size;
12625 +
12626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12627 + if (!__access_ok(VERIFY_WRITE, dst, size))
12628 + return size;
12629 +#endif
12630 +
12631 + if (!__builtin_constant_p(size)) {
12632 + check_object_size(src, size, true);
12633 +
12634 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12635 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12636 + dst += PAX_USER_SHADOW_BASE;
12637 +#endif
12638 +
12639 + return copy_user_generic((__force_kernel void *)dst, src, size);
12640 + }
12641 switch (size) {
12642 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12643 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12644 ret, "b", "b", "iq", 1);
12645 return ret;
12646 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12647 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12648 ret, "w", "w", "ir", 2);
12649 return ret;
12650 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12651 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12652 ret, "l", "k", "ir", 4);
12653 return ret;
12654 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12655 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12656 ret, "q", "", "er", 8);
12657 return ret;
12658 case 10:
12659 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12660 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12661 ret, "q", "", "er", 10);
12662 if (unlikely(ret))
12663 return ret;
12664 asm("":::"memory");
12665 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12666 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12667 ret, "w", "w", "ir", 2);
12668 return ret;
12669 case 16:
12670 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12671 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12672 ret, "q", "", "er", 16);
12673 if (unlikely(ret))
12674 return ret;
12675 asm("":::"memory");
12676 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12677 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12678 ret, "q", "", "er", 8);
12679 return ret;
12680 default:
12681 - return copy_user_generic((__force void *)dst, src, size);
12682 +
12683 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12684 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12685 + dst += PAX_USER_SHADOW_BASE;
12686 +#endif
12687 +
12688 + return copy_user_generic((__force_kernel void *)dst, src, size);
12689 + }
12690 +}
12691 +
12692 +static __always_inline __must_check
12693 +unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12694 +{
12695 + if (access_ok(VERIFY_WRITE, to, len))
12696 + len = __copy_to_user(to, from, len);
12697 + return len;
12698 +}
12699 +
12700 +static __always_inline __must_check
12701 +unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12702 +{
12703 + might_fault();
12704 +
12705 + if (access_ok(VERIFY_READ, from, len))
12706 + len = __copy_from_user(to, from, len);
12707 + else if (len < INT_MAX) {
12708 + if (!__builtin_constant_p(len))
12709 + check_object_size(to, len, false);
12710 + memset(to, 0, len);
12711 }
12712 + return len;
12713 }
12714
12715 static __always_inline __must_check
12716 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12717 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12718 {
12719 - int ret = 0;
12720 + unsigned ret = 0;
12721
12722 might_fault();
12723 - if (!__builtin_constant_p(size))
12724 - return copy_user_generic((__force void *)dst,
12725 - (__force void *)src, size);
12726 +
12727 + pax_track_stack();
12728 +
12729 + if (size > INT_MAX)
12730 + return size;
12731 +
12732 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12733 + if (!__access_ok(VERIFY_READ, src, size))
12734 + return size;
12735 + if (!__access_ok(VERIFY_WRITE, dst, size))
12736 + return size;
12737 +#endif
12738 +
12739 + if (!__builtin_constant_p(size)) {
12740 +
12741 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12742 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12743 + src += PAX_USER_SHADOW_BASE;
12744 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12745 + dst += PAX_USER_SHADOW_BASE;
12746 +#endif
12747 +
12748 + return copy_user_generic((__force_kernel void *)dst,
12749 + (__force_kernel const void *)src, size);
12750 + }
12751 switch (size) {
12752 case 1: {
12753 u8 tmp;
12754 - __get_user_asm(tmp, (u8 __user *)src,
12755 + __get_user_asm(tmp, (const u8 __user *)src,
12756 ret, "b", "b", "=q", 1);
12757 if (likely(!ret))
12758 __put_user_asm(tmp, (u8 __user *)dst,
12759 @@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12760 }
12761 case 2: {
12762 u16 tmp;
12763 - __get_user_asm(tmp, (u16 __user *)src,
12764 + __get_user_asm(tmp, (const u16 __user *)src,
12765 ret, "w", "w", "=r", 2);
12766 if (likely(!ret))
12767 __put_user_asm(tmp, (u16 __user *)dst,
12768 @@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12769
12770 case 4: {
12771 u32 tmp;
12772 - __get_user_asm(tmp, (u32 __user *)src,
12773 + __get_user_asm(tmp, (const u32 __user *)src,
12774 ret, "l", "k", "=r", 4);
12775 if (likely(!ret))
12776 __put_user_asm(tmp, (u32 __user *)dst,
12777 @@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12778 }
12779 case 8: {
12780 u64 tmp;
12781 - __get_user_asm(tmp, (u64 __user *)src,
12782 + __get_user_asm(tmp, (const u64 __user *)src,
12783 ret, "q", "", "=r", 8);
12784 if (likely(!ret))
12785 __put_user_asm(tmp, (u64 __user *)dst,
12786 @@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12787 return ret;
12788 }
12789 default:
12790 - return copy_user_generic((__force void *)dst,
12791 - (__force void *)src, size);
12792 +
12793 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12794 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12795 + src += PAX_USER_SHADOW_BASE;
12796 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12797 + dst += PAX_USER_SHADOW_BASE;
12798 +#endif
12799 +
12800 + return copy_user_generic((__force_kernel void *)dst,
12801 + (__force_kernel const void *)src, size);
12802 }
12803 }
12804
12805 @@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
12806 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12807 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12808
12809 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
12810 - unsigned size);
12811 +static __must_check __always_inline unsigned long
12812 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12813 +{
12814 + pax_track_stack();
12815 +
12816 + if (size > INT_MAX)
12817 + return size;
12818 +
12819 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12820 + if (!__access_ok(VERIFY_READ, src, size))
12821 + return size;
12822
12823 -static __must_check __always_inline int
12824 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12825 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12826 + src += PAX_USER_SHADOW_BASE;
12827 +#endif
12828 +
12829 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12830 +}
12831 +
12832 +static __must_check __always_inline unsigned long
12833 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12834 {
12835 - return copy_user_generic((__force void *)dst, src, size);
12836 + if (size > INT_MAX)
12837 + return size;
12838 +
12839 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12840 + if (!__access_ok(VERIFY_WRITE, dst, size))
12841 + return size;
12842 +
12843 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12844 + dst += PAX_USER_SHADOW_BASE;
12845 +#endif
12846 +
12847 + return copy_user_generic((__force_kernel void *)dst, src, size);
12848 }
12849
12850 -extern long __copy_user_nocache(void *dst, const void __user *src,
12851 - unsigned size, int zerorest);
12852 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12853 + unsigned long size, int zerorest);
12854
12855 -static inline int
12856 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12857 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12858 {
12859 might_sleep();
12860 +
12861 + if (size > INT_MAX)
12862 + return size;
12863 +
12864 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12865 + if (!__access_ok(VERIFY_READ, src, size))
12866 + return size;
12867 +#endif
12868 +
12869 return __copy_user_nocache(dst, src, size, 1);
12870 }
12871
12872 -static inline int
12873 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12874 - unsigned size)
12875 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12876 + unsigned long size)
12877 {
12878 + if (size > INT_MAX)
12879 + return size;
12880 +
12881 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12882 + if (!__access_ok(VERIFY_READ, src, size))
12883 + return size;
12884 +#endif
12885 +
12886 return __copy_user_nocache(dst, src, size, 0);
12887 }
12888
12889 -unsigned long
12890 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12891 +extern unsigned long
12892 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12893
12894 #endif /* _ASM_X86_UACCESS_64_H */
12895 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12896 index 9064052..786cfbc 100644
12897 --- a/arch/x86/include/asm/vdso.h
12898 +++ b/arch/x86/include/asm/vdso.h
12899 @@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
12900 #define VDSO32_SYMBOL(base, name) \
12901 ({ \
12902 extern const char VDSO32_##name[]; \
12903 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12904 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12905 })
12906 #endif
12907
12908 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
12909 index 3d61e20..9507180 100644
12910 --- a/arch/x86/include/asm/vgtod.h
12911 +++ b/arch/x86/include/asm/vgtod.h
12912 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
12913 int sysctl_enabled;
12914 struct timezone sys_tz;
12915 struct { /* extract of a clocksource struct */
12916 + char name[8];
12917 cycle_t (*vread)(void);
12918 cycle_t cycle_last;
12919 cycle_t mask;
12920 diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
12921 index 61e08c0..b0da582 100644
12922 --- a/arch/x86/include/asm/vmi.h
12923 +++ b/arch/x86/include/asm/vmi.h
12924 @@ -191,6 +191,7 @@ struct vrom_header {
12925 u8 reserved[96]; /* Reserved for headers */
12926 char vmi_init[8]; /* VMI_Init jump point */
12927 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
12928 + char rom_data[8048]; /* rest of the option ROM */
12929 } __attribute__((packed));
12930
12931 struct pnp_header {
12932 diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
12933 index c6e0bee..fcb9f74 100644
12934 --- a/arch/x86/include/asm/vmi_time.h
12935 +++ b/arch/x86/include/asm/vmi_time.h
12936 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
12937 int (*wallclock_updated)(void);
12938 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
12939 void (*cancel_alarm)(u32 flags);
12940 -} vmi_timer_ops;
12941 +} __no_const vmi_timer_ops;
12942
12943 /* Prototypes */
12944 extern void __init vmi_time_init(void);
12945 diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
12946 index d0983d2..1f7c9e9 100644
12947 --- a/arch/x86/include/asm/vsyscall.h
12948 +++ b/arch/x86/include/asm/vsyscall.h
12949 @@ -15,9 +15,10 @@ enum vsyscall_num {
12950
12951 #ifdef __KERNEL__
12952 #include <linux/seqlock.h>
12953 +#include <linux/getcpu.h>
12954 +#include <linux/time.h>
12955
12956 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
12957 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
12958
12959 /* Definitions for CONFIG_GENERIC_TIME definitions */
12960 #define __section_vsyscall_gtod_data __attribute__ \
12961 @@ -31,7 +32,6 @@ enum vsyscall_num {
12962 #define VGETCPU_LSL 2
12963
12964 extern int __vgetcpu_mode;
12965 -extern volatile unsigned long __jiffies;
12966
12967 /* kernel space (writeable) */
12968 extern int vgetcpu_mode;
12969 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
12970
12971 extern void map_vsyscall(void);
12972
12973 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
12974 +extern time_t vtime(time_t *t);
12975 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
12976 #endif /* __KERNEL__ */
12977
12978 #endif /* _ASM_X86_VSYSCALL_H */
12979 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
12980 index 2c756fd..3377e37 100644
12981 --- a/arch/x86/include/asm/x86_init.h
12982 +++ b/arch/x86/include/asm/x86_init.h
12983 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
12984 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
12985 void (*find_smp_config)(unsigned int reserve);
12986 void (*get_smp_config)(unsigned int early);
12987 -};
12988 +} __no_const;
12989
12990 /**
12991 * struct x86_init_resources - platform specific resource related ops
12992 @@ -42,7 +42,7 @@ struct x86_init_resources {
12993 void (*probe_roms)(void);
12994 void (*reserve_resources)(void);
12995 char *(*memory_setup)(void);
12996 -};
12997 +} __no_const;
12998
12999 /**
13000 * struct x86_init_irqs - platform specific interrupt setup
13001 @@ -55,7 +55,7 @@ struct x86_init_irqs {
13002 void (*pre_vector_init)(void);
13003 void (*intr_init)(void);
13004 void (*trap_init)(void);
13005 -};
13006 +} __no_const;
13007
13008 /**
13009 * struct x86_init_oem - oem platform specific customizing functions
13010 @@ -65,7 +65,7 @@ struct x86_init_irqs {
13011 struct x86_init_oem {
13012 void (*arch_setup)(void);
13013 void (*banner)(void);
13014 -};
13015 +} __no_const;
13016
13017 /**
13018 * struct x86_init_paging - platform specific paging functions
13019 @@ -75,7 +75,7 @@ struct x86_init_oem {
13020 struct x86_init_paging {
13021 void (*pagetable_setup_start)(pgd_t *base);
13022 void (*pagetable_setup_done)(pgd_t *base);
13023 -};
13024 +} __no_const;
13025
13026 /**
13027 * struct x86_init_timers - platform specific timer setup
13028 @@ -88,7 +88,7 @@ struct x86_init_timers {
13029 void (*setup_percpu_clockev)(void);
13030 void (*tsc_pre_init)(void);
13031 void (*timer_init)(void);
13032 -};
13033 +} __no_const;
13034
13035 /**
13036 * struct x86_init_ops - functions for platform specific setup
13037 @@ -101,7 +101,7 @@ struct x86_init_ops {
13038 struct x86_init_oem oem;
13039 struct x86_init_paging paging;
13040 struct x86_init_timers timers;
13041 -};
13042 +} __no_const;
13043
13044 /**
13045 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13046 @@ -109,7 +109,7 @@ struct x86_init_ops {
13047 */
13048 struct x86_cpuinit_ops {
13049 void (*setup_percpu_clockev)(void);
13050 -};
13051 +} __no_const;
13052
13053 /**
13054 * struct x86_platform_ops - platform specific runtime functions
13055 @@ -121,7 +121,7 @@ struct x86_platform_ops {
13056 unsigned long (*calibrate_tsc)(void);
13057 unsigned long (*get_wallclock)(void);
13058 int (*set_wallclock)(unsigned long nowtime);
13059 -};
13060 +} __no_const;
13061
13062 extern struct x86_init_ops x86_init;
13063 extern struct x86_cpuinit_ops x86_cpuinit;
13064 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13065 index 727acc1..554f3eb 100644
13066 --- a/arch/x86/include/asm/xsave.h
13067 +++ b/arch/x86/include/asm/xsave.h
13068 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13069 static inline int xsave_user(struct xsave_struct __user *buf)
13070 {
13071 int err;
13072 +
13073 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13074 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13075 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13076 +#endif
13077 +
13078 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13079 "2:\n"
13080 ".section .fixup,\"ax\"\n"
13081 @@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13082 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13083 {
13084 int err;
13085 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13086 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13087 u32 lmask = mask;
13088 u32 hmask = mask >> 32;
13089
13090 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13091 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13092 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13093 +#endif
13094 +
13095 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13096 "2:\n"
13097 ".section .fixup,\"ax\"\n"
13098 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13099 index 6a564ac..9b1340c 100644
13100 --- a/arch/x86/kernel/acpi/realmode/Makefile
13101 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13102 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13103 $(call cc-option, -fno-stack-protector) \
13104 $(call cc-option, -mpreferred-stack-boundary=2)
13105 KBUILD_CFLAGS += $(call cc-option, -m32)
13106 +ifdef CONSTIFY_PLUGIN
13107 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13108 +endif
13109 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13110 GCOV_PROFILE := n
13111
13112 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13113 index 580b4e2..d4129e4 100644
13114 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13115 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13116 @@ -91,6 +91,9 @@ _start:
13117 /* Do any other stuff... */
13118
13119 #ifndef CONFIG_64BIT
13120 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
13121 + call verify_cpu
13122 +
13123 /* This could also be done in C code... */
13124 movl pmode_cr3, %eax
13125 movl %eax, %cr3
13126 @@ -104,7 +107,7 @@ _start:
13127 movl %eax, %ecx
13128 orl %edx, %ecx
13129 jz 1f
13130 - movl $0xc0000080, %ecx
13131 + mov $MSR_EFER, %ecx
13132 wrmsr
13133 1:
13134
13135 @@ -114,6 +117,7 @@ _start:
13136 movl pmode_cr0, %eax
13137 movl %eax, %cr0
13138 jmp pmode_return
13139 +# include "../../verify_cpu.S"
13140 #else
13141 pushw $0
13142 pushw trampoline_segment
13143 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13144 index ca93638..7042f24 100644
13145 --- a/arch/x86/kernel/acpi/sleep.c
13146 +++ b/arch/x86/kernel/acpi/sleep.c
13147 @@ -11,11 +11,12 @@
13148 #include <linux/cpumask.h>
13149 #include <asm/segment.h>
13150 #include <asm/desc.h>
13151 +#include <asm/e820.h>
13152
13153 #include "realmode/wakeup.h"
13154 #include "sleep.h"
13155
13156 -unsigned long acpi_wakeup_address;
13157 +unsigned long acpi_wakeup_address = 0x2000;
13158 unsigned long acpi_realmode_flags;
13159
13160 /* address in low memory of the wakeup routine. */
13161 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13162 #else /* CONFIG_64BIT */
13163 header->trampoline_segment = setup_trampoline() >> 4;
13164 #ifdef CONFIG_SMP
13165 - stack_start.sp = temp_stack + sizeof(temp_stack);
13166 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13167 +
13168 + pax_open_kernel();
13169 early_gdt_descr.address =
13170 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13171 + pax_close_kernel();
13172 +
13173 initial_gs = per_cpu_offset(smp_processor_id());
13174 #endif
13175 initial_code = (unsigned long)wakeup_long64;
13176 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13177 return;
13178 }
13179
13180 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13181 -
13182 - if (!acpi_realmode) {
13183 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13184 - return;
13185 - }
13186 -
13187 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13188 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13189 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13190 }
13191
13192
13193 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13194 index 8ded418..079961e 100644
13195 --- a/arch/x86/kernel/acpi/wakeup_32.S
13196 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13197 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13198 # and restore the stack ... but you need gdt for this to work
13199 movl saved_context_esp, %esp
13200
13201 - movl %cs:saved_magic, %eax
13202 - cmpl $0x12345678, %eax
13203 + cmpl $0x12345678, saved_magic
13204 jne bogus_magic
13205
13206 # jump to place where we left off
13207 - movl saved_eip, %eax
13208 - jmp *%eax
13209 + jmp *(saved_eip)
13210
13211 bogus_magic:
13212 jmp bogus_magic
13213 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13214 index de7353c..075da5f 100644
13215 --- a/arch/x86/kernel/alternative.c
13216 +++ b/arch/x86/kernel/alternative.c
13217 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13218
13219 BUG_ON(p->len > MAX_PATCH_LEN);
13220 /* prep the buffer with the original instructions */
13221 - memcpy(insnbuf, p->instr, p->len);
13222 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13223 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13224 (unsigned long)p->instr, p->len);
13225
13226 @@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13227 if (smp_alt_once)
13228 free_init_pages("SMP alternatives",
13229 (unsigned long)__smp_locks,
13230 - (unsigned long)__smp_locks_end);
13231 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13232
13233 restart_nmi();
13234 }
13235 @@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13236 * instructions. And on the local CPU you need to be protected again NMI or MCE
13237 * handlers seeing an inconsistent instruction while you patch.
13238 */
13239 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13240 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
13241 size_t len)
13242 {
13243 unsigned long flags;
13244 local_irq_save(flags);
13245 - memcpy(addr, opcode, len);
13246 +
13247 + pax_open_kernel();
13248 + memcpy(ktla_ktva(addr), opcode, len);
13249 sync_core();
13250 + pax_close_kernel();
13251 +
13252 local_irq_restore(flags);
13253 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13254 that causes hangs on some VIA CPUs. */
13255 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13256 */
13257 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13258 {
13259 - unsigned long flags;
13260 - char *vaddr;
13261 + unsigned char *vaddr = ktla_ktva(addr);
13262 struct page *pages[2];
13263 - int i;
13264 + size_t i;
13265
13266 if (!core_kernel_text((unsigned long)addr)) {
13267 - pages[0] = vmalloc_to_page(addr);
13268 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13269 + pages[0] = vmalloc_to_page(vaddr);
13270 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13271 } else {
13272 - pages[0] = virt_to_page(addr);
13273 + pages[0] = virt_to_page(vaddr);
13274 WARN_ON(!PageReserved(pages[0]));
13275 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13276 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13277 }
13278 BUG_ON(!pages[0]);
13279 - local_irq_save(flags);
13280 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13281 - if (pages[1])
13282 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13283 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13284 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13285 - clear_fixmap(FIX_TEXT_POKE0);
13286 - if (pages[1])
13287 - clear_fixmap(FIX_TEXT_POKE1);
13288 - local_flush_tlb();
13289 - sync_core();
13290 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13291 - that causes hangs on some VIA CPUs. */
13292 + text_poke_early(addr, opcode, len);
13293 for (i = 0; i < len; i++)
13294 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13295 - local_irq_restore(flags);
13296 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13297 return addr;
13298 }
13299 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13300 index 3a44b75..1601800 100644
13301 --- a/arch/x86/kernel/amd_iommu.c
13302 +++ b/arch/x86/kernel/amd_iommu.c
13303 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13304 }
13305 }
13306
13307 -static struct dma_map_ops amd_iommu_dma_ops = {
13308 +static const struct dma_map_ops amd_iommu_dma_ops = {
13309 .alloc_coherent = alloc_coherent,
13310 .free_coherent = free_coherent,
13311 .map_page = map_page,
13312 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13313 index 1d2d670..8e3f477 100644
13314 --- a/arch/x86/kernel/apic/apic.c
13315 +++ b/arch/x86/kernel/apic/apic.c
13316 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13317 /*
13318 * Debug level, exported for io_apic.c
13319 */
13320 -unsigned int apic_verbosity;
13321 +int apic_verbosity;
13322
13323 int pic_mode;
13324
13325 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13326 apic_write(APIC_ESR, 0);
13327 v1 = apic_read(APIC_ESR);
13328 ack_APIC_irq();
13329 - atomic_inc(&irq_err_count);
13330 + atomic_inc_unchecked(&irq_err_count);
13331
13332 /*
13333 * Here is what the APIC error bits mean:
13334 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13335 u16 *bios_cpu_apicid;
13336 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13337
13338 + pax_track_stack();
13339 +
13340 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13341 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13342
13343 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13344 index 8928d97..f799cea 100644
13345 --- a/arch/x86/kernel/apic/io_apic.c
13346 +++ b/arch/x86/kernel/apic/io_apic.c
13347 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13348 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13349 GFP_ATOMIC);
13350 if (!ioapic_entries)
13351 - return 0;
13352 + return NULL;
13353
13354 for (apic = 0; apic < nr_ioapics; apic++) {
13355 ioapic_entries[apic] =
13356 @@ -733,7 +733,7 @@ nomem:
13357 kfree(ioapic_entries[apic]);
13358 kfree(ioapic_entries);
13359
13360 - return 0;
13361 + return NULL;
13362 }
13363
13364 /*
13365 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13366 }
13367 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13368
13369 -void lock_vector_lock(void)
13370 +void lock_vector_lock(void) __acquires(vector_lock)
13371 {
13372 /* Used to the online set of cpus does not change
13373 * during assign_irq_vector.
13374 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13375 spin_lock(&vector_lock);
13376 }
13377
13378 -void unlock_vector_lock(void)
13379 +void unlock_vector_lock(void) __releases(vector_lock)
13380 {
13381 spin_unlock(&vector_lock);
13382 }
13383 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13384 ack_APIC_irq();
13385 }
13386
13387 -atomic_t irq_mis_count;
13388 +atomic_unchecked_t irq_mis_count;
13389
13390 static void ack_apic_level(unsigned int irq)
13391 {
13392 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13393
13394 /* Tail end of version 0x11 I/O APIC bug workaround */
13395 if (!(v & (1 << (i & 0x1f)))) {
13396 - atomic_inc(&irq_mis_count);
13397 + atomic_inc_unchecked(&irq_mis_count);
13398 spin_lock(&ioapic_lock);
13399 __mask_and_edge_IO_APIC_irq(cfg);
13400 __unmask_and_level_IO_APIC_irq(cfg);
13401 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13402 index 151ace6..f317474 100644
13403 --- a/arch/x86/kernel/apm_32.c
13404 +++ b/arch/x86/kernel/apm_32.c
13405 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13406 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13407 * even though they are called in protected mode.
13408 */
13409 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13410 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13411 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13412
13413 static const char driver_version[] = "1.16ac"; /* no spaces */
13414 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13415 BUG_ON(cpu != 0);
13416 gdt = get_cpu_gdt_table(cpu);
13417 save_desc_40 = gdt[0x40 / 8];
13418 +
13419 + pax_open_kernel();
13420 gdt[0x40 / 8] = bad_bios_desc;
13421 + pax_close_kernel();
13422
13423 apm_irq_save(flags);
13424 APM_DO_SAVE_SEGS;
13425 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13426 &call->esi);
13427 APM_DO_RESTORE_SEGS;
13428 apm_irq_restore(flags);
13429 +
13430 + pax_open_kernel();
13431 gdt[0x40 / 8] = save_desc_40;
13432 + pax_close_kernel();
13433 +
13434 put_cpu();
13435
13436 return call->eax & 0xff;
13437 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13438 BUG_ON(cpu != 0);
13439 gdt = get_cpu_gdt_table(cpu);
13440 save_desc_40 = gdt[0x40 / 8];
13441 +
13442 + pax_open_kernel();
13443 gdt[0x40 / 8] = bad_bios_desc;
13444 + pax_close_kernel();
13445
13446 apm_irq_save(flags);
13447 APM_DO_SAVE_SEGS;
13448 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13449 &call->eax);
13450 APM_DO_RESTORE_SEGS;
13451 apm_irq_restore(flags);
13452 +
13453 + pax_open_kernel();
13454 gdt[0x40 / 8] = save_desc_40;
13455 + pax_close_kernel();
13456 +
13457 put_cpu();
13458 return error;
13459 }
13460 @@ -975,7 +989,7 @@ recalc:
13461
13462 static void apm_power_off(void)
13463 {
13464 - unsigned char po_bios_call[] = {
13465 + const unsigned char po_bios_call[] = {
13466 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13467 0x8e, 0xd0, /* movw ax,ss */
13468 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13469 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13470 * code to that CPU.
13471 */
13472 gdt = get_cpu_gdt_table(0);
13473 +
13474 + pax_open_kernel();
13475 set_desc_base(&gdt[APM_CS >> 3],
13476 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13477 set_desc_base(&gdt[APM_CS_16 >> 3],
13478 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13479 set_desc_base(&gdt[APM_DS >> 3],
13480 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13481 + pax_close_kernel();
13482
13483 proc_create("apm", 0, NULL, &apm_file_ops);
13484
13485 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13486 index dfdbf64..9b2b6ce 100644
13487 --- a/arch/x86/kernel/asm-offsets_32.c
13488 +++ b/arch/x86/kernel/asm-offsets_32.c
13489 @@ -51,7 +51,6 @@ void foo(void)
13490 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13491 BLANK();
13492
13493 - OFFSET(TI_task, thread_info, task);
13494 OFFSET(TI_exec_domain, thread_info, exec_domain);
13495 OFFSET(TI_flags, thread_info, flags);
13496 OFFSET(TI_status, thread_info, status);
13497 @@ -60,6 +59,8 @@ void foo(void)
13498 OFFSET(TI_restart_block, thread_info, restart_block);
13499 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13500 OFFSET(TI_cpu, thread_info, cpu);
13501 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13502 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13503 BLANK();
13504
13505 OFFSET(GDS_size, desc_ptr, size);
13506 @@ -99,6 +100,7 @@ void foo(void)
13507
13508 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13509 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13510 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13511 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13512 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13513 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13514 @@ -115,6 +117,11 @@ void foo(void)
13515 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13516 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13517 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13518 +
13519 +#ifdef CONFIG_PAX_KERNEXEC
13520 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13521 +#endif
13522 +
13523 #endif
13524
13525 #ifdef CONFIG_XEN
13526 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13527 index 4a6aeed..371de20 100644
13528 --- a/arch/x86/kernel/asm-offsets_64.c
13529 +++ b/arch/x86/kernel/asm-offsets_64.c
13530 @@ -44,6 +44,8 @@ int main(void)
13531 ENTRY(addr_limit);
13532 ENTRY(preempt_count);
13533 ENTRY(status);
13534 + ENTRY(lowest_stack);
13535 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13536 #ifdef CONFIG_IA32_EMULATION
13537 ENTRY(sysenter_return);
13538 #endif
13539 @@ -63,6 +65,18 @@ int main(void)
13540 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13541 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13542 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13543 +
13544 +#ifdef CONFIG_PAX_KERNEXEC
13545 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13546 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13547 +#endif
13548 +
13549 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13550 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13551 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13552 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13553 +#endif
13554 +
13555 #endif
13556
13557
13558 @@ -115,6 +129,7 @@ int main(void)
13559 ENTRY(cr8);
13560 BLANK();
13561 #undef ENTRY
13562 + DEFINE(TSS_size, sizeof(struct tss_struct));
13563 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13564 BLANK();
13565 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13566 @@ -130,6 +145,7 @@ int main(void)
13567
13568 BLANK();
13569 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13570 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13571 #ifdef CONFIG_XEN
13572 BLANK();
13573 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13574 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13575 index ff502cc..dc5133e 100644
13576 --- a/arch/x86/kernel/cpu/Makefile
13577 +++ b/arch/x86/kernel/cpu/Makefile
13578 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13579 CFLAGS_REMOVE_common.o = -pg
13580 endif
13581
13582 -# Make sure load_percpu_segment has no stackprotector
13583 -nostackp := $(call cc-option, -fno-stack-protector)
13584 -CFLAGS_common.o := $(nostackp)
13585 -
13586 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13587 obj-y += proc.o capflags.o powerflags.o common.o
13588 obj-y += vmware.o hypervisor.o sched.o
13589 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13590 index 6e082dc..a0b5f36 100644
13591 --- a/arch/x86/kernel/cpu/amd.c
13592 +++ b/arch/x86/kernel/cpu/amd.c
13593 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13594 unsigned int size)
13595 {
13596 /* AMD errata T13 (order #21922) */
13597 - if ((c->x86 == 6)) {
13598 + if (c->x86 == 6) {
13599 /* Duron Rev A0 */
13600 if (c->x86_model == 3 && c->x86_mask == 0)
13601 size = 64;
13602 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13603 index 4e34d10..ba6bc97 100644
13604 --- a/arch/x86/kernel/cpu/common.c
13605 +++ b/arch/x86/kernel/cpu/common.c
13606 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13607
13608 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13609
13610 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13611 -#ifdef CONFIG_X86_64
13612 - /*
13613 - * We need valid kernel segments for data and code in long mode too
13614 - * IRET will check the segment types kkeil 2000/10/28
13615 - * Also sysret mandates a special GDT layout
13616 - *
13617 - * TLS descriptors are currently at a different place compared to i386.
13618 - * Hopefully nobody expects them at a fixed place (Wine?)
13619 - */
13620 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13621 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13622 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13623 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13624 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13625 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13626 -#else
13627 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13628 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13629 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13630 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13631 - /*
13632 - * Segments used for calling PnP BIOS have byte granularity.
13633 - * They code segments and data segments have fixed 64k limits,
13634 - * the transfer segment sizes are set at run time.
13635 - */
13636 - /* 32-bit code */
13637 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13638 - /* 16-bit code */
13639 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13640 - /* 16-bit data */
13641 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13642 - /* 16-bit data */
13643 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13644 - /* 16-bit data */
13645 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13646 - /*
13647 - * The APM segments have byte granularity and their bases
13648 - * are set at run time. All have 64k limits.
13649 - */
13650 - /* 32-bit code */
13651 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13652 - /* 16-bit code */
13653 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13654 - /* data */
13655 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13656 -
13657 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13658 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13659 - GDT_STACK_CANARY_INIT
13660 -#endif
13661 -} };
13662 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13663 -
13664 static int __init x86_xsave_setup(char *s)
13665 {
13666 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13667 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13668 {
13669 struct desc_ptr gdt_descr;
13670
13671 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13672 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13673 gdt_descr.size = GDT_SIZE - 1;
13674 load_gdt(&gdt_descr);
13675 /* Reload the per-cpu base */
13676 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13677 /* Filter out anything that depends on CPUID levels we don't have */
13678 filter_cpuid_features(c, true);
13679
13680 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13681 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13682 +#endif
13683 +
13684 /* If the model name is still unset, do table lookup. */
13685 if (!c->x86_model_id[0]) {
13686 const char *p;
13687 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13688 }
13689 __setup("clearcpuid=", setup_disablecpuid);
13690
13691 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13692 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13693 +
13694 #ifdef CONFIG_X86_64
13695 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13696
13697 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13698 EXPORT_PER_CPU_SYMBOL(current_task);
13699
13700 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13701 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13702 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13703 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13704
13705 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13706 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13707 {
13708 memset(regs, 0, sizeof(struct pt_regs));
13709 regs->fs = __KERNEL_PERCPU;
13710 - regs->gs = __KERNEL_STACK_CANARY;
13711 + savesegment(gs, regs->gs);
13712
13713 return regs;
13714 }
13715 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13716 int i;
13717
13718 cpu = stack_smp_processor_id();
13719 - t = &per_cpu(init_tss, cpu);
13720 + t = init_tss + cpu;
13721 orig_ist = &per_cpu(orig_ist, cpu);
13722
13723 #ifdef CONFIG_NUMA
13724 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13725 switch_to_new_gdt(cpu);
13726 loadsegment(fs, 0);
13727
13728 - load_idt((const struct desc_ptr *)&idt_descr);
13729 + load_idt(&idt_descr);
13730
13731 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13732 syscall_init();
13733 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13734 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13735 barrier();
13736
13737 - check_efer();
13738 if (cpu != 0)
13739 enable_x2apic();
13740
13741 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13742 {
13743 int cpu = smp_processor_id();
13744 struct task_struct *curr = current;
13745 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13746 + struct tss_struct *t = init_tss + cpu;
13747 struct thread_struct *thread = &curr->thread;
13748
13749 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13750 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13751 index 6a77cca..4f4fca0 100644
13752 --- a/arch/x86/kernel/cpu/intel.c
13753 +++ b/arch/x86/kernel/cpu/intel.c
13754 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13755 * Update the IDT descriptor and reload the IDT so that
13756 * it uses the read-only mapped virtual address.
13757 */
13758 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13759 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13760 load_idt(&idt_descr);
13761 }
13762 #endif
13763 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
13764 index 417990f..96dc36b 100644
13765 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
13766 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
13767 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13768 return ret;
13769 }
13770
13771 -static struct sysfs_ops sysfs_ops = {
13772 +static const struct sysfs_ops sysfs_ops = {
13773 .show = show,
13774 .store = store,
13775 };
13776 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13777 index 472763d..9831e11 100644
13778 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13779 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13780 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13781 static int inject_init(void)
13782 {
13783 printk(KERN_INFO "Machine check injector initialized\n");
13784 - mce_chrdev_ops.write = mce_write;
13785 + pax_open_kernel();
13786 + *(void **)&mce_chrdev_ops.write = mce_write;
13787 + pax_close_kernel();
13788 register_die_notifier(&mce_raise_nb);
13789 return 0;
13790 }
13791 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13792 index 0f16a2b..21740f5 100644
13793 --- a/arch/x86/kernel/cpu/mcheck/mce.c
13794 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
13795 @@ -43,6 +43,7 @@
13796 #include <asm/ipi.h>
13797 #include <asm/mce.h>
13798 #include <asm/msr.h>
13799 +#include <asm/local.h>
13800
13801 #include "mce-internal.h"
13802
13803 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
13804 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13805 m->cs, m->ip);
13806
13807 - if (m->cs == __KERNEL_CS)
13808 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13809 print_symbol("{%s}", m->ip);
13810 pr_cont("\n");
13811 }
13812 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
13813
13814 #define PANIC_TIMEOUT 5 /* 5 seconds */
13815
13816 -static atomic_t mce_paniced;
13817 +static atomic_unchecked_t mce_paniced;
13818
13819 static int fake_panic;
13820 -static atomic_t mce_fake_paniced;
13821 +static atomic_unchecked_t mce_fake_paniced;
13822
13823 /* Panic in progress. Enable interrupts and wait for final IPI */
13824 static void wait_for_panic(void)
13825 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13826 /*
13827 * Make sure only one CPU runs in machine check panic
13828 */
13829 - if (atomic_inc_return(&mce_paniced) > 1)
13830 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13831 wait_for_panic();
13832 barrier();
13833
13834 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13835 console_verbose();
13836 } else {
13837 /* Don't log too much for fake panic */
13838 - if (atomic_inc_return(&mce_fake_paniced) > 1)
13839 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13840 return;
13841 }
13842 print_mce_head();
13843 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
13844 * might have been modified by someone else.
13845 */
13846 rmb();
13847 - if (atomic_read(&mce_paniced))
13848 + if (atomic_read_unchecked(&mce_paniced))
13849 wait_for_panic();
13850 if (!monarch_timeout)
13851 goto out;
13852 @@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13853 }
13854
13855 /* Call the installed machine check handler for this CPU setup. */
13856 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
13857 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13858 unexpected_machine_check;
13859
13860 /*
13861 @@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13862 return;
13863 }
13864
13865 + pax_open_kernel();
13866 machine_check_vector = do_machine_check;
13867 + pax_close_kernel();
13868
13869 mce_init();
13870 mce_cpu_features(c);
13871 @@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13872 */
13873
13874 static DEFINE_SPINLOCK(mce_state_lock);
13875 -static int open_count; /* #times opened */
13876 +static local_t open_count; /* #times opened */
13877 static int open_exclu; /* already open exclusive? */
13878
13879 static int mce_open(struct inode *inode, struct file *file)
13880 {
13881 spin_lock(&mce_state_lock);
13882
13883 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
13884 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
13885 spin_unlock(&mce_state_lock);
13886
13887 return -EBUSY;
13888 @@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
13889
13890 if (file->f_flags & O_EXCL)
13891 open_exclu = 1;
13892 - open_count++;
13893 + local_inc(&open_count);
13894
13895 spin_unlock(&mce_state_lock);
13896
13897 @@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
13898 {
13899 spin_lock(&mce_state_lock);
13900
13901 - open_count--;
13902 + local_dec(&open_count);
13903 open_exclu = 0;
13904
13905 spin_unlock(&mce_state_lock);
13906 @@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
13907 static void mce_reset(void)
13908 {
13909 cpu_missing = 0;
13910 - atomic_set(&mce_fake_paniced, 0);
13911 + atomic_set_unchecked(&mce_fake_paniced, 0);
13912 atomic_set(&mce_executing, 0);
13913 atomic_set(&mce_callin, 0);
13914 atomic_set(&global_nwo, 0);
13915 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13916 index ef3cd31..9d2f6ab 100644
13917 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
13918 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13919 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13920 return ret;
13921 }
13922
13923 -static struct sysfs_ops threshold_ops = {
13924 +static const struct sysfs_ops threshold_ops = {
13925 .show = show,
13926 .store = store,
13927 };
13928 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13929 index 5c0e653..1e82c7c 100644
13930 --- a/arch/x86/kernel/cpu/mcheck/p5.c
13931 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
13932 @@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13933 if (!cpu_has(c, X86_FEATURE_MCE))
13934 return;
13935
13936 + pax_open_kernel();
13937 machine_check_vector = pentium_machine_check;
13938 + pax_close_kernel();
13939 /* Make sure the vector pointer is visible before we enable MCEs: */
13940 wmb();
13941
13942 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13943 index 54060f5..e6ba93d 100644
13944 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
13945 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13946 @@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13947 {
13948 u32 lo, hi;
13949
13950 + pax_open_kernel();
13951 machine_check_vector = winchip_machine_check;
13952 + pax_close_kernel();
13953 /* Make sure the vector pointer is visible before we enable MCEs: */
13954 wmb();
13955
13956 diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
13957 index 33af141..92ba9cd 100644
13958 --- a/arch/x86/kernel/cpu/mtrr/amd.c
13959 +++ b/arch/x86/kernel/cpu/mtrr/amd.c
13960 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
13961 return 0;
13962 }
13963
13964 -static struct mtrr_ops amd_mtrr_ops = {
13965 +static const struct mtrr_ops amd_mtrr_ops = {
13966 .vendor = X86_VENDOR_AMD,
13967 .set = amd_set_mtrr,
13968 .get = amd_get_mtrr,
13969 diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
13970 index de89f14..316fe3e 100644
13971 --- a/arch/x86/kernel/cpu/mtrr/centaur.c
13972 +++ b/arch/x86/kernel/cpu/mtrr/centaur.c
13973 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
13974 return 0;
13975 }
13976
13977 -static struct mtrr_ops centaur_mtrr_ops = {
13978 +static const struct mtrr_ops centaur_mtrr_ops = {
13979 .vendor = X86_VENDOR_CENTAUR,
13980 .set = centaur_set_mcr,
13981 .get = centaur_get_mcr,
13982 diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
13983 index 228d982..68a3343 100644
13984 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c
13985 +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
13986 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
13987 post_set();
13988 }
13989
13990 -static struct mtrr_ops cyrix_mtrr_ops = {
13991 +static const struct mtrr_ops cyrix_mtrr_ops = {
13992 .vendor = X86_VENDOR_CYRIX,
13993 .set_all = cyrix_set_all,
13994 .set = cyrix_set_arr,
13995 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
13996 index 55da0c5..4d75584 100644
13997 --- a/arch/x86/kernel/cpu/mtrr/generic.c
13998 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
13999 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14000 /*
14001 * Generic structure...
14002 */
14003 -struct mtrr_ops generic_mtrr_ops = {
14004 +const struct mtrr_ops generic_mtrr_ops = {
14005 .use_intel_if = 1,
14006 .set_all = generic_set_all,
14007 .get = generic_get_mtrr,
14008 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14009 index fd60f09..c94ef52 100644
14010 --- a/arch/x86/kernel/cpu/mtrr/main.c
14011 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14012 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14013 u64 size_or_mask, size_and_mask;
14014 static bool mtrr_aps_delayed_init;
14015
14016 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14017 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14018
14019 -struct mtrr_ops *mtrr_if;
14020 +const struct mtrr_ops *mtrr_if;
14021
14022 static void set_mtrr(unsigned int reg, unsigned long base,
14023 unsigned long size, mtrr_type type);
14024
14025 -void set_mtrr_ops(struct mtrr_ops *ops)
14026 +void set_mtrr_ops(const struct mtrr_ops *ops)
14027 {
14028 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14029 mtrr_ops[ops->vendor] = ops;
14030 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14031 index a501dee..816c719 100644
14032 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14033 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14034 @@ -25,14 +25,14 @@ struct mtrr_ops {
14035 int (*validate_add_page)(unsigned long base, unsigned long size,
14036 unsigned int type);
14037 int (*have_wrcomb)(void);
14038 -};
14039 +} __do_const;
14040
14041 extern int generic_get_free_region(unsigned long base, unsigned long size,
14042 int replace_reg);
14043 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14044 unsigned int type);
14045
14046 -extern struct mtrr_ops generic_mtrr_ops;
14047 +extern const struct mtrr_ops generic_mtrr_ops;
14048
14049 extern int positive_have_wrcomb(void);
14050
14051 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14052 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14053 void get_mtrr_state(void);
14054
14055 -extern void set_mtrr_ops(struct mtrr_ops *ops);
14056 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
14057
14058 extern u64 size_or_mask, size_and_mask;
14059 -extern struct mtrr_ops *mtrr_if;
14060 +extern const struct mtrr_ops *mtrr_if;
14061
14062 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14063 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14064 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14065 index 0ff02ca..fc49a60 100644
14066 --- a/arch/x86/kernel/cpu/perf_event.c
14067 +++ b/arch/x86/kernel/cpu/perf_event.c
14068 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14069 * count to the generic event atomically:
14070 */
14071 again:
14072 - prev_raw_count = atomic64_read(&hwc->prev_count);
14073 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14074 rdmsrl(hwc->event_base + idx, new_raw_count);
14075
14076 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14077 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14078 new_raw_count) != prev_raw_count)
14079 goto again;
14080
14081 @@ -741,7 +741,7 @@ again:
14082 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14083 delta >>= shift;
14084
14085 - atomic64_add(delta, &event->count);
14086 + atomic64_add_unchecked(delta, &event->count);
14087 atomic64_sub(delta, &hwc->period_left);
14088
14089 return new_raw_count;
14090 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14091 * The hw event starts counting from this event offset,
14092 * mark it to be able to extra future deltas:
14093 */
14094 - atomic64_set(&hwc->prev_count, (u64)-left);
14095 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14096
14097 err = checking_wrmsrl(hwc->event_base + idx,
14098 (u64)(-left) & x86_pmu.event_mask);
14099 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14100 break;
14101
14102 callchain_store(entry, frame.return_address);
14103 - fp = frame.next_frame;
14104 + fp = (__force const void __user *)frame.next_frame;
14105 }
14106 }
14107
14108 diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14109 index 898df97..9e82503 100644
14110 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14111 +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14112 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14113
14114 /* Interface defining a CPU specific perfctr watchdog */
14115 struct wd_ops {
14116 - int (*reserve)(void);
14117 - void (*unreserve)(void);
14118 - int (*setup)(unsigned nmi_hz);
14119 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14120 - void (*stop)(void);
14121 + int (* const reserve)(void);
14122 + void (* const unreserve)(void);
14123 + int (* const setup)(unsigned nmi_hz);
14124 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14125 + void (* const stop)(void);
14126 unsigned perfctr;
14127 unsigned evntsel;
14128 u64 checkbit;
14129 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14130 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14131 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14132
14133 +/* cannot be const */
14134 static struct wd_ops intel_arch_wd_ops;
14135
14136 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14137 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14138 return 1;
14139 }
14140
14141 +/* cannot be const */
14142 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14143 .reserve = single_msr_reserve,
14144 .unreserve = single_msr_unreserve,
14145 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14146 index ff95824..2ffdcb5 100644
14147 --- a/arch/x86/kernel/crash.c
14148 +++ b/arch/x86/kernel/crash.c
14149 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14150 regs = args->regs;
14151
14152 #ifdef CONFIG_X86_32
14153 - if (!user_mode_vm(regs)) {
14154 + if (!user_mode(regs)) {
14155 crash_fixup_ss_esp(&fixed_regs, regs);
14156 regs = &fixed_regs;
14157 }
14158 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14159 index 37250fe..bf2ec74 100644
14160 --- a/arch/x86/kernel/doublefault_32.c
14161 +++ b/arch/x86/kernel/doublefault_32.c
14162 @@ -11,7 +11,7 @@
14163
14164 #define DOUBLEFAULT_STACKSIZE (1024)
14165 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14166 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14167 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14168
14169 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14170
14171 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14172 unsigned long gdt, tss;
14173
14174 store_gdt(&gdt_desc);
14175 - gdt = gdt_desc.address;
14176 + gdt = (unsigned long)gdt_desc.address;
14177
14178 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14179
14180 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14181 /* 0x2 bit is always set */
14182 .flags = X86_EFLAGS_SF | 0x2,
14183 .sp = STACK_START,
14184 - .es = __USER_DS,
14185 + .es = __KERNEL_DS,
14186 .cs = __KERNEL_CS,
14187 .ss = __KERNEL_DS,
14188 - .ds = __USER_DS,
14189 + .ds = __KERNEL_DS,
14190 .fs = __KERNEL_PERCPU,
14191
14192 .__cr3 = __pa_nodebug(swapper_pg_dir),
14193 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14194 index 2d8a371..4fa6ae6 100644
14195 --- a/arch/x86/kernel/dumpstack.c
14196 +++ b/arch/x86/kernel/dumpstack.c
14197 @@ -2,6 +2,9 @@
14198 * Copyright (C) 1991, 1992 Linus Torvalds
14199 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14200 */
14201 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14202 +#define __INCLUDED_BY_HIDESYM 1
14203 +#endif
14204 #include <linux/kallsyms.h>
14205 #include <linux/kprobes.h>
14206 #include <linux/uaccess.h>
14207 @@ -28,7 +31,7 @@ static int die_counter;
14208
14209 void printk_address(unsigned long address, int reliable)
14210 {
14211 - printk(" [<%p>] %s%pS\n", (void *) address,
14212 + printk(" [<%p>] %s%pA\n", (void *) address,
14213 reliable ? "" : "? ", (void *) address);
14214 }
14215
14216 @@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14217 static void
14218 print_ftrace_graph_addr(unsigned long addr, void *data,
14219 const struct stacktrace_ops *ops,
14220 - struct thread_info *tinfo, int *graph)
14221 + struct task_struct *task, int *graph)
14222 {
14223 - struct task_struct *task = tinfo->task;
14224 unsigned long ret_addr;
14225 int index = task->curr_ret_stack;
14226
14227 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14228 static inline void
14229 print_ftrace_graph_addr(unsigned long addr, void *data,
14230 const struct stacktrace_ops *ops,
14231 - struct thread_info *tinfo, int *graph)
14232 + struct task_struct *task, int *graph)
14233 { }
14234 #endif
14235
14236 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14237 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14238 */
14239
14240 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14241 - void *p, unsigned int size, void *end)
14242 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14243 {
14244 - void *t = tinfo;
14245 if (end) {
14246 if (p < end && p >= (end-THREAD_SIZE))
14247 return 1;
14248 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14249 }
14250
14251 unsigned long
14252 -print_context_stack(struct thread_info *tinfo,
14253 +print_context_stack(struct task_struct *task, void *stack_start,
14254 unsigned long *stack, unsigned long bp,
14255 const struct stacktrace_ops *ops, void *data,
14256 unsigned long *end, int *graph)
14257 {
14258 struct stack_frame *frame = (struct stack_frame *)bp;
14259
14260 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14261 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14262 unsigned long addr;
14263
14264 addr = *stack;
14265 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14266 } else {
14267 ops->address(data, addr, 0);
14268 }
14269 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14270 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14271 }
14272 stack++;
14273 }
14274 @@ -180,7 +180,7 @@ void dump_stack(void)
14275 #endif
14276
14277 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14278 - current->pid, current->comm, print_tainted(),
14279 + task_pid_nr(current), current->comm, print_tainted(),
14280 init_utsname()->release,
14281 (int)strcspn(init_utsname()->version, " "),
14282 init_utsname()->version);
14283 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14284 return flags;
14285 }
14286
14287 +extern void gr_handle_kernel_exploit(void);
14288 +
14289 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14290 {
14291 if (regs && kexec_should_crash(current))
14292 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14293 panic("Fatal exception in interrupt");
14294 if (panic_on_oops)
14295 panic("Fatal exception");
14296 - do_exit(signr);
14297 +
14298 + gr_handle_kernel_exploit();
14299 +
14300 + do_group_exit(signr);
14301 }
14302
14303 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14304 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14305 unsigned long flags = oops_begin();
14306 int sig = SIGSEGV;
14307
14308 - if (!user_mode_vm(regs))
14309 + if (!user_mode(regs))
14310 report_bug(regs->ip, regs);
14311
14312 if (__die(str, regs, err))
14313 diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14314 index 81086c2..13e8b17 100644
14315 --- a/arch/x86/kernel/dumpstack.h
14316 +++ b/arch/x86/kernel/dumpstack.h
14317 @@ -15,7 +15,7 @@
14318 #endif
14319
14320 extern unsigned long
14321 -print_context_stack(struct thread_info *tinfo,
14322 +print_context_stack(struct task_struct *task, void *stack_start,
14323 unsigned long *stack, unsigned long bp,
14324 const struct stacktrace_ops *ops, void *data,
14325 unsigned long *end, int *graph);
14326 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14327 index f7dd2a7..504f53b 100644
14328 --- a/arch/x86/kernel/dumpstack_32.c
14329 +++ b/arch/x86/kernel/dumpstack_32.c
14330 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14331 #endif
14332
14333 for (;;) {
14334 - struct thread_info *context;
14335 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14336 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14337
14338 - context = (struct thread_info *)
14339 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14340 - bp = print_context_stack(context, stack, bp, ops,
14341 - data, NULL, &graph);
14342 -
14343 - stack = (unsigned long *)context->previous_esp;
14344 - if (!stack)
14345 + if (stack_start == task_stack_page(task))
14346 break;
14347 + stack = *(unsigned long **)stack_start;
14348 if (ops->stack(data, "IRQ") < 0)
14349 break;
14350 touch_nmi_watchdog();
14351 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14352 * When in-kernel, we also print out the stack and code at the
14353 * time of the fault..
14354 */
14355 - if (!user_mode_vm(regs)) {
14356 + if (!user_mode(regs)) {
14357 unsigned int code_prologue = code_bytes * 43 / 64;
14358 unsigned int code_len = code_bytes;
14359 unsigned char c;
14360 u8 *ip;
14361 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14362
14363 printk(KERN_EMERG "Stack:\n");
14364 show_stack_log_lvl(NULL, regs, &regs->sp,
14365 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14366
14367 printk(KERN_EMERG "Code: ");
14368
14369 - ip = (u8 *)regs->ip - code_prologue;
14370 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14371 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14372 /* try starting at IP */
14373 - ip = (u8 *)regs->ip;
14374 + ip = (u8 *)regs->ip + cs_base;
14375 code_len = code_len - code_prologue + 1;
14376 }
14377 for (i = 0; i < code_len; i++, ip++) {
14378 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14379 printk(" Bad EIP value.");
14380 break;
14381 }
14382 - if (ip == (u8 *)regs->ip)
14383 + if (ip == (u8 *)regs->ip + cs_base)
14384 printk("<%02x> ", c);
14385 else
14386 printk("%02x ", c);
14387 @@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14388 printk("\n");
14389 }
14390
14391 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14392 +void pax_check_alloca(unsigned long size)
14393 +{
14394 + unsigned long sp = (unsigned long)&sp, stack_left;
14395 +
14396 + /* all kernel stacks are of the same size */
14397 + stack_left = sp & (THREAD_SIZE - 1);
14398 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14399 +}
14400 +EXPORT_SYMBOL(pax_check_alloca);
14401 +#endif
14402 +
14403 int is_valid_bugaddr(unsigned long ip)
14404 {
14405 unsigned short ud2;
14406
14407 + ip = ktla_ktva(ip);
14408 if (ip < PAGE_OFFSET)
14409 return 0;
14410 if (probe_kernel_address((unsigned short *)ip, ud2))
14411 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14412 index a071e6b..36cd585 100644
14413 --- a/arch/x86/kernel/dumpstack_64.c
14414 +++ b/arch/x86/kernel/dumpstack_64.c
14415 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14416 unsigned long *irq_stack_end =
14417 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14418 unsigned used = 0;
14419 - struct thread_info *tinfo;
14420 int graph = 0;
14421 + void *stack_start;
14422
14423 if (!task)
14424 task = current;
14425 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14426 * current stack address. If the stacks consist of nested
14427 * exceptions
14428 */
14429 - tinfo = task_thread_info(task);
14430 for (;;) {
14431 char *id;
14432 unsigned long *estack_end;
14433 +
14434 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14435 &used, &id);
14436
14437 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14438 if (ops->stack(data, id) < 0)
14439 break;
14440
14441 - bp = print_context_stack(tinfo, stack, bp, ops,
14442 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14443 data, estack_end, &graph);
14444 ops->stack(data, "<EOE>");
14445 /*
14446 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14447 if (stack >= irq_stack && stack < irq_stack_end) {
14448 if (ops->stack(data, "IRQ") < 0)
14449 break;
14450 - bp = print_context_stack(tinfo, stack, bp,
14451 + bp = print_context_stack(task, irq_stack, stack, bp,
14452 ops, data, irq_stack_end, &graph);
14453 /*
14454 * We link to the next stack (which would be
14455 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14456 /*
14457 * This handles the process stack:
14458 */
14459 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14460 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14461 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14462 put_cpu();
14463 }
14464 EXPORT_SYMBOL(dump_trace);
14465 @@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14466 return ud2 == 0x0b0f;
14467 }
14468
14469 +
14470 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14471 +void pax_check_alloca(unsigned long size)
14472 +{
14473 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14474 + unsigned cpu, used;
14475 + char *id;
14476 +
14477 + /* check the process stack first */
14478 + stack_start = (unsigned long)task_stack_page(current);
14479 + stack_end = stack_start + THREAD_SIZE;
14480 + if (likely(stack_start <= sp && sp < stack_end)) {
14481 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14482 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14483 + return;
14484 + }
14485 +
14486 + cpu = get_cpu();
14487 +
14488 + /* check the irq stacks */
14489 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14490 + stack_start = stack_end - IRQ_STACK_SIZE;
14491 + if (stack_start <= sp && sp < stack_end) {
14492 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14493 + put_cpu();
14494 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14495 + return;
14496 + }
14497 +
14498 + /* check the exception stacks */
14499 + used = 0;
14500 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14501 + stack_start = stack_end - EXCEPTION_STKSZ;
14502 + if (stack_end && stack_start <= sp && sp < stack_end) {
14503 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14504 + put_cpu();
14505 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14506 + return;
14507 + }
14508 +
14509 + put_cpu();
14510 +
14511 + /* unknown stack */
14512 + BUG();
14513 +}
14514 +EXPORT_SYMBOL(pax_check_alloca);
14515 +#endif
14516 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14517 index a89739a..95e0c48 100644
14518 --- a/arch/x86/kernel/e820.c
14519 +++ b/arch/x86/kernel/e820.c
14520 @@ -733,7 +733,7 @@ struct early_res {
14521 };
14522 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14523 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14524 - {}
14525 + { 0, 0, {0}, 0 }
14526 };
14527
14528 static int __init find_overlapped_early(u64 start, u64 end)
14529 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14530 index b9c830c..1e41a96 100644
14531 --- a/arch/x86/kernel/early_printk.c
14532 +++ b/arch/x86/kernel/early_printk.c
14533 @@ -7,6 +7,7 @@
14534 #include <linux/pci_regs.h>
14535 #include <linux/pci_ids.h>
14536 #include <linux/errno.h>
14537 +#include <linux/sched.h>
14538 #include <asm/io.h>
14539 #include <asm/processor.h>
14540 #include <asm/fcntl.h>
14541 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14542 int n;
14543 va_list ap;
14544
14545 + pax_track_stack();
14546 +
14547 va_start(ap, fmt);
14548 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14549 early_console->write(early_console, buf, n);
14550 diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14551 index 5cab48e..b025f9b 100644
14552 --- a/arch/x86/kernel/efi_32.c
14553 +++ b/arch/x86/kernel/efi_32.c
14554 @@ -38,70 +38,56 @@
14555 */
14556
14557 static unsigned long efi_rt_eflags;
14558 -static pgd_t efi_bak_pg_dir_pointer[2];
14559 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14560
14561 -void efi_call_phys_prelog(void)
14562 +void __init efi_call_phys_prelog(void)
14563 {
14564 - unsigned long cr4;
14565 - unsigned long temp;
14566 struct desc_ptr gdt_descr;
14567
14568 +#ifdef CONFIG_PAX_KERNEXEC
14569 + struct desc_struct d;
14570 +#endif
14571 +
14572 local_irq_save(efi_rt_eflags);
14573
14574 - /*
14575 - * If I don't have PAE, I should just duplicate two entries in page
14576 - * directory. If I have PAE, I just need to duplicate one entry in
14577 - * page directory.
14578 - */
14579 - cr4 = read_cr4_safe();
14580 -
14581 - if (cr4 & X86_CR4_PAE) {
14582 - efi_bak_pg_dir_pointer[0].pgd =
14583 - swapper_pg_dir[pgd_index(0)].pgd;
14584 - swapper_pg_dir[0].pgd =
14585 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14586 - } else {
14587 - efi_bak_pg_dir_pointer[0].pgd =
14588 - swapper_pg_dir[pgd_index(0)].pgd;
14589 - efi_bak_pg_dir_pointer[1].pgd =
14590 - swapper_pg_dir[pgd_index(0x400000)].pgd;
14591 - swapper_pg_dir[pgd_index(0)].pgd =
14592 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14593 - temp = PAGE_OFFSET + 0x400000;
14594 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14595 - swapper_pg_dir[pgd_index(temp)].pgd;
14596 - }
14597 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14598 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14599 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14600
14601 /*
14602 * After the lock is released, the original page table is restored.
14603 */
14604 __flush_tlb_all();
14605
14606 +#ifdef CONFIG_PAX_KERNEXEC
14607 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14608 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14609 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14610 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14611 +#endif
14612 +
14613 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14614 gdt_descr.size = GDT_SIZE - 1;
14615 load_gdt(&gdt_descr);
14616 }
14617
14618 -void efi_call_phys_epilog(void)
14619 +void __init efi_call_phys_epilog(void)
14620 {
14621 - unsigned long cr4;
14622 struct desc_ptr gdt_descr;
14623
14624 +#ifdef CONFIG_PAX_KERNEXEC
14625 + struct desc_struct d;
14626 +
14627 + memset(&d, 0, sizeof d);
14628 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14629 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14630 +#endif
14631 +
14632 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14633 gdt_descr.size = GDT_SIZE - 1;
14634 load_gdt(&gdt_descr);
14635
14636 - cr4 = read_cr4_safe();
14637 -
14638 - if (cr4 & X86_CR4_PAE) {
14639 - swapper_pg_dir[pgd_index(0)].pgd =
14640 - efi_bak_pg_dir_pointer[0].pgd;
14641 - } else {
14642 - swapper_pg_dir[pgd_index(0)].pgd =
14643 - efi_bak_pg_dir_pointer[0].pgd;
14644 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14645 - efi_bak_pg_dir_pointer[1].pgd;
14646 - }
14647 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14648
14649 /*
14650 * After the lock is released, the original page table is restored.
14651 diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14652 index fbe66e6..c5c0dd2 100644
14653 --- a/arch/x86/kernel/efi_stub_32.S
14654 +++ b/arch/x86/kernel/efi_stub_32.S
14655 @@ -6,7 +6,9 @@
14656 */
14657
14658 #include <linux/linkage.h>
14659 +#include <linux/init.h>
14660 #include <asm/page_types.h>
14661 +#include <asm/segment.h>
14662
14663 /*
14664 * efi_call_phys(void *, ...) is a function with variable parameters.
14665 @@ -20,7 +22,7 @@
14666 * service functions will comply with gcc calling convention, too.
14667 */
14668
14669 -.text
14670 +__INIT
14671 ENTRY(efi_call_phys)
14672 /*
14673 * 0. The function can only be called in Linux kernel. So CS has been
14674 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14675 * The mapping of lower virtual memory has been created in prelog and
14676 * epilog.
14677 */
14678 - movl $1f, %edx
14679 - subl $__PAGE_OFFSET, %edx
14680 - jmp *%edx
14681 + movl $(__KERNEXEC_EFI_DS), %edx
14682 + mov %edx, %ds
14683 + mov %edx, %es
14684 + mov %edx, %ss
14685 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14686 1:
14687
14688 /*
14689 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14690 * parameter 2, ..., param n. To make things easy, we save the return
14691 * address of efi_call_phys in a global variable.
14692 */
14693 - popl %edx
14694 - movl %edx, saved_return_addr
14695 - /* get the function pointer into ECX*/
14696 - popl %ecx
14697 - movl %ecx, efi_rt_function_ptr
14698 - movl $2f, %edx
14699 - subl $__PAGE_OFFSET, %edx
14700 - pushl %edx
14701 + popl (saved_return_addr)
14702 + popl (efi_rt_function_ptr)
14703
14704 /*
14705 * 3. Clear PG bit in %CR0.
14706 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14707 /*
14708 * 5. Call the physical function.
14709 */
14710 - jmp *%ecx
14711 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
14712
14713 -2:
14714 /*
14715 * 6. After EFI runtime service returns, control will return to
14716 * following instruction. We'd better readjust stack pointer first.
14717 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14718 movl %cr0, %edx
14719 orl $0x80000000, %edx
14720 movl %edx, %cr0
14721 - jmp 1f
14722 -1:
14723 +
14724 /*
14725 * 8. Now restore the virtual mode from flat mode by
14726 * adding EIP with PAGE_OFFSET.
14727 */
14728 - movl $1f, %edx
14729 - jmp *%edx
14730 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14731 1:
14732 + movl $(__KERNEL_DS), %edx
14733 + mov %edx, %ds
14734 + mov %edx, %es
14735 + mov %edx, %ss
14736
14737 /*
14738 * 9. Balance the stack. And because EAX contain the return value,
14739 * we'd better not clobber it.
14740 */
14741 - leal efi_rt_function_ptr, %edx
14742 - movl (%edx), %ecx
14743 - pushl %ecx
14744 + pushl (efi_rt_function_ptr)
14745
14746 /*
14747 - * 10. Push the saved return address onto the stack and return.
14748 + * 10. Return to the saved return address.
14749 */
14750 - leal saved_return_addr, %edx
14751 - movl (%edx), %ecx
14752 - pushl %ecx
14753 - ret
14754 + jmpl *(saved_return_addr)
14755 ENDPROC(efi_call_phys)
14756 .previous
14757
14758 -.data
14759 +__INITDATA
14760 saved_return_addr:
14761 .long 0
14762 efi_rt_function_ptr:
14763 diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
14764 index 4c07cca..2c8427d 100644
14765 --- a/arch/x86/kernel/efi_stub_64.S
14766 +++ b/arch/x86/kernel/efi_stub_64.S
14767 @@ -7,6 +7,7 @@
14768 */
14769
14770 #include <linux/linkage.h>
14771 +#include <asm/alternative-asm.h>
14772
14773 #define SAVE_XMM \
14774 mov %rsp, %rax; \
14775 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
14776 call *%rdi
14777 addq $32, %rsp
14778 RESTORE_XMM
14779 + pax_force_retaddr 0, 1
14780 ret
14781 ENDPROC(efi_call0)
14782
14783 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
14784 call *%rdi
14785 addq $32, %rsp
14786 RESTORE_XMM
14787 + pax_force_retaddr 0, 1
14788 ret
14789 ENDPROC(efi_call1)
14790
14791 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
14792 call *%rdi
14793 addq $32, %rsp
14794 RESTORE_XMM
14795 + pax_force_retaddr 0, 1
14796 ret
14797 ENDPROC(efi_call2)
14798
14799 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
14800 call *%rdi
14801 addq $32, %rsp
14802 RESTORE_XMM
14803 + pax_force_retaddr 0, 1
14804 ret
14805 ENDPROC(efi_call3)
14806
14807 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
14808 call *%rdi
14809 addq $32, %rsp
14810 RESTORE_XMM
14811 + pax_force_retaddr 0, 1
14812 ret
14813 ENDPROC(efi_call4)
14814
14815 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
14816 call *%rdi
14817 addq $48, %rsp
14818 RESTORE_XMM
14819 + pax_force_retaddr 0, 1
14820 ret
14821 ENDPROC(efi_call5)
14822
14823 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
14824 call *%rdi
14825 addq $48, %rsp
14826 RESTORE_XMM
14827 + pax_force_retaddr 0, 1
14828 ret
14829 ENDPROC(efi_call6)
14830 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14831 index c097e7d..c689cf4 100644
14832 --- a/arch/x86/kernel/entry_32.S
14833 +++ b/arch/x86/kernel/entry_32.S
14834 @@ -185,13 +185,146 @@
14835 /*CFI_REL_OFFSET gs, PT_GS*/
14836 .endm
14837 .macro SET_KERNEL_GS reg
14838 +
14839 +#ifdef CONFIG_CC_STACKPROTECTOR
14840 movl $(__KERNEL_STACK_CANARY), \reg
14841 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14842 + movl $(__USER_DS), \reg
14843 +#else
14844 + xorl \reg, \reg
14845 +#endif
14846 +
14847 movl \reg, %gs
14848 .endm
14849
14850 #endif /* CONFIG_X86_32_LAZY_GS */
14851
14852 -.macro SAVE_ALL
14853 +.macro pax_enter_kernel
14854 +#ifdef CONFIG_PAX_KERNEXEC
14855 + call pax_enter_kernel
14856 +#endif
14857 +.endm
14858 +
14859 +.macro pax_exit_kernel
14860 +#ifdef CONFIG_PAX_KERNEXEC
14861 + call pax_exit_kernel
14862 +#endif
14863 +.endm
14864 +
14865 +#ifdef CONFIG_PAX_KERNEXEC
14866 +ENTRY(pax_enter_kernel)
14867 +#ifdef CONFIG_PARAVIRT
14868 + pushl %eax
14869 + pushl %ecx
14870 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14871 + mov %eax, %esi
14872 +#else
14873 + mov %cr0, %esi
14874 +#endif
14875 + bts $16, %esi
14876 + jnc 1f
14877 + mov %cs, %esi
14878 + cmp $__KERNEL_CS, %esi
14879 + jz 3f
14880 + ljmp $__KERNEL_CS, $3f
14881 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14882 +2:
14883 +#ifdef CONFIG_PARAVIRT
14884 + mov %esi, %eax
14885 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14886 +#else
14887 + mov %esi, %cr0
14888 +#endif
14889 +3:
14890 +#ifdef CONFIG_PARAVIRT
14891 + popl %ecx
14892 + popl %eax
14893 +#endif
14894 + ret
14895 +ENDPROC(pax_enter_kernel)
14896 +
14897 +ENTRY(pax_exit_kernel)
14898 +#ifdef CONFIG_PARAVIRT
14899 + pushl %eax
14900 + pushl %ecx
14901 +#endif
14902 + mov %cs, %esi
14903 + cmp $__KERNEXEC_KERNEL_CS, %esi
14904 + jnz 2f
14905 +#ifdef CONFIG_PARAVIRT
14906 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14907 + mov %eax, %esi
14908 +#else
14909 + mov %cr0, %esi
14910 +#endif
14911 + btr $16, %esi
14912 + ljmp $__KERNEL_CS, $1f
14913 +1:
14914 +#ifdef CONFIG_PARAVIRT
14915 + mov %esi, %eax
14916 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14917 +#else
14918 + mov %esi, %cr0
14919 +#endif
14920 +2:
14921 +#ifdef CONFIG_PARAVIRT
14922 + popl %ecx
14923 + popl %eax
14924 +#endif
14925 + ret
14926 +ENDPROC(pax_exit_kernel)
14927 +#endif
14928 +
14929 +.macro pax_erase_kstack
14930 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14931 + call pax_erase_kstack
14932 +#endif
14933 +.endm
14934 +
14935 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14936 +/*
14937 + * ebp: thread_info
14938 + * ecx, edx: can be clobbered
14939 + */
14940 +ENTRY(pax_erase_kstack)
14941 + pushl %edi
14942 + pushl %eax
14943 +
14944 + mov TI_lowest_stack(%ebp), %edi
14945 + mov $-0xBEEF, %eax
14946 + std
14947 +
14948 +1: mov %edi, %ecx
14949 + and $THREAD_SIZE_asm - 1, %ecx
14950 + shr $2, %ecx
14951 + repne scasl
14952 + jecxz 2f
14953 +
14954 + cmp $2*16, %ecx
14955 + jc 2f
14956 +
14957 + mov $2*16, %ecx
14958 + repe scasl
14959 + jecxz 2f
14960 + jne 1b
14961 +
14962 +2: cld
14963 + mov %esp, %ecx
14964 + sub %edi, %ecx
14965 + shr $2, %ecx
14966 + rep stosl
14967 +
14968 + mov TI_task_thread_sp0(%ebp), %edi
14969 + sub $128, %edi
14970 + mov %edi, TI_lowest_stack(%ebp)
14971 +
14972 + popl %eax
14973 + popl %edi
14974 + ret
14975 +ENDPROC(pax_erase_kstack)
14976 +#endif
14977 +
14978 +.macro __SAVE_ALL _DS
14979 cld
14980 PUSH_GS
14981 pushl %fs
14982 @@ -224,7 +357,7 @@
14983 pushl %ebx
14984 CFI_ADJUST_CFA_OFFSET 4
14985 CFI_REL_OFFSET ebx, 0
14986 - movl $(__USER_DS), %edx
14987 + movl $\_DS, %edx
14988 movl %edx, %ds
14989 movl %edx, %es
14990 movl $(__KERNEL_PERCPU), %edx
14991 @@ -232,6 +365,15 @@
14992 SET_KERNEL_GS %edx
14993 .endm
14994
14995 +.macro SAVE_ALL
14996 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14997 + __SAVE_ALL __KERNEL_DS
14998 + pax_enter_kernel
14999 +#else
15000 + __SAVE_ALL __USER_DS
15001 +#endif
15002 +.endm
15003 +
15004 .macro RESTORE_INT_REGS
15005 popl %ebx
15006 CFI_ADJUST_CFA_OFFSET -4
15007 @@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15008 CFI_ADJUST_CFA_OFFSET -4
15009 jmp syscall_exit
15010 CFI_ENDPROC
15011 -END(ret_from_fork)
15012 +ENDPROC(ret_from_fork)
15013
15014 /*
15015 * Return to user mode is not as complex as all this looks,
15016 @@ -352,7 +494,15 @@ check_userspace:
15017 movb PT_CS(%esp), %al
15018 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15019 cmpl $USER_RPL, %eax
15020 +
15021 +#ifdef CONFIG_PAX_KERNEXEC
15022 + jae resume_userspace
15023 +
15024 + PAX_EXIT_KERNEL
15025 + jmp resume_kernel
15026 +#else
15027 jb resume_kernel # not returning to v8086 or userspace
15028 +#endif
15029
15030 ENTRY(resume_userspace)
15031 LOCKDEP_SYS_EXIT
15032 @@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15033 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15034 # int/exception return?
15035 jne work_pending
15036 - jmp restore_all
15037 -END(ret_from_exception)
15038 + jmp restore_all_pax
15039 +ENDPROC(ret_from_exception)
15040
15041 #ifdef CONFIG_PREEMPT
15042 ENTRY(resume_kernel)
15043 @@ -380,7 +530,7 @@ need_resched:
15044 jz restore_all
15045 call preempt_schedule_irq
15046 jmp need_resched
15047 -END(resume_kernel)
15048 +ENDPROC(resume_kernel)
15049 #endif
15050 CFI_ENDPROC
15051
15052 @@ -414,25 +564,36 @@ sysenter_past_esp:
15053 /*CFI_REL_OFFSET cs, 0*/
15054 /*
15055 * Push current_thread_info()->sysenter_return to the stack.
15056 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15057 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15058 */
15059 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15060 + pushl $0
15061 CFI_ADJUST_CFA_OFFSET 4
15062 CFI_REL_OFFSET eip, 0
15063
15064 pushl %eax
15065 CFI_ADJUST_CFA_OFFSET 4
15066 SAVE_ALL
15067 + GET_THREAD_INFO(%ebp)
15068 + movl TI_sysenter_return(%ebp),%ebp
15069 + movl %ebp,PT_EIP(%esp)
15070 ENABLE_INTERRUPTS(CLBR_NONE)
15071
15072 /*
15073 * Load the potential sixth argument from user stack.
15074 * Careful about security.
15075 */
15076 + movl PT_OLDESP(%esp),%ebp
15077 +
15078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15079 + mov PT_OLDSS(%esp),%ds
15080 +1: movl %ds:(%ebp),%ebp
15081 + push %ss
15082 + pop %ds
15083 +#else
15084 cmpl $__PAGE_OFFSET-3,%ebp
15085 jae syscall_fault
15086 1: movl (%ebp),%ebp
15087 +#endif
15088 +
15089 movl %ebp,PT_EBP(%esp)
15090 .section __ex_table,"a"
15091 .align 4
15092 @@ -455,12 +616,24 @@ sysenter_do_call:
15093 testl $_TIF_ALLWORK_MASK, %ecx
15094 jne sysexit_audit
15095 sysenter_exit:
15096 +
15097 +#ifdef CONFIG_PAX_RANDKSTACK
15098 + pushl_cfi %eax
15099 + movl %esp, %eax
15100 + call pax_randomize_kstack
15101 + popl_cfi %eax
15102 +#endif
15103 +
15104 + pax_erase_kstack
15105 +
15106 /* if something modifies registers it must also disable sysexit */
15107 movl PT_EIP(%esp), %edx
15108 movl PT_OLDESP(%esp), %ecx
15109 xorl %ebp,%ebp
15110 TRACE_IRQS_ON
15111 1: mov PT_FS(%esp), %fs
15112 +2: mov PT_DS(%esp), %ds
15113 +3: mov PT_ES(%esp), %es
15114 PTGS_TO_GS
15115 ENABLE_INTERRUPTS_SYSEXIT
15116
15117 @@ -477,6 +650,9 @@ sysenter_audit:
15118 movl %eax,%edx /* 2nd arg: syscall number */
15119 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15120 call audit_syscall_entry
15121 +
15122 + pax_erase_kstack
15123 +
15124 pushl %ebx
15125 CFI_ADJUST_CFA_OFFSET 4
15126 movl PT_EAX(%esp),%eax /* reload syscall number */
15127 @@ -504,11 +680,17 @@ sysexit_audit:
15128
15129 CFI_ENDPROC
15130 .pushsection .fixup,"ax"
15131 -2: movl $0,PT_FS(%esp)
15132 +4: movl $0,PT_FS(%esp)
15133 + jmp 1b
15134 +5: movl $0,PT_DS(%esp)
15135 + jmp 1b
15136 +6: movl $0,PT_ES(%esp)
15137 jmp 1b
15138 .section __ex_table,"a"
15139 .align 4
15140 - .long 1b,2b
15141 + .long 1b,4b
15142 + .long 2b,5b
15143 + .long 3b,6b
15144 .popsection
15145 PTGS_TO_GS_EX
15146 ENDPROC(ia32_sysenter_target)
15147 @@ -538,6 +720,15 @@ syscall_exit:
15148 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15149 jne syscall_exit_work
15150
15151 +restore_all_pax:
15152 +
15153 +#ifdef CONFIG_PAX_RANDKSTACK
15154 + movl %esp, %eax
15155 + call pax_randomize_kstack
15156 +#endif
15157 +
15158 + pax_erase_kstack
15159 +
15160 restore_all:
15161 TRACE_IRQS_IRET
15162 restore_all_notrace:
15163 @@ -602,10 +793,29 @@ ldt_ss:
15164 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15165 mov %dx, %ax /* eax: new kernel esp */
15166 sub %eax, %edx /* offset (low word is 0) */
15167 - PER_CPU(gdt_page, %ebx)
15168 +#ifdef CONFIG_SMP
15169 + movl PER_CPU_VAR(cpu_number), %ebx
15170 + shll $PAGE_SHIFT_asm, %ebx
15171 + addl $cpu_gdt_table, %ebx
15172 +#else
15173 + movl $cpu_gdt_table, %ebx
15174 +#endif
15175 shr $16, %edx
15176 +
15177 +#ifdef CONFIG_PAX_KERNEXEC
15178 + mov %cr0, %esi
15179 + btr $16, %esi
15180 + mov %esi, %cr0
15181 +#endif
15182 +
15183 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15184 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15185 +
15186 +#ifdef CONFIG_PAX_KERNEXEC
15187 + bts $16, %esi
15188 + mov %esi, %cr0
15189 +#endif
15190 +
15191 pushl $__ESPFIX_SS
15192 CFI_ADJUST_CFA_OFFSET 4
15193 push %eax /* new kernel esp */
15194 @@ -636,36 +846,30 @@ work_resched:
15195 movl TI_flags(%ebp), %ecx
15196 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15197 # than syscall tracing?
15198 - jz restore_all
15199 + jz restore_all_pax
15200 testb $_TIF_NEED_RESCHED, %cl
15201 jnz work_resched
15202
15203 work_notifysig: # deal with pending signals and
15204 # notify-resume requests
15205 + movl %esp, %eax
15206 #ifdef CONFIG_VM86
15207 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15208 - movl %esp, %eax
15209 - jne work_notifysig_v86 # returning to kernel-space or
15210 + jz 1f # returning to kernel-space or
15211 # vm86-space
15212 - xorl %edx, %edx
15213 - call do_notify_resume
15214 - jmp resume_userspace_sig
15215
15216 - ALIGN
15217 -work_notifysig_v86:
15218 pushl %ecx # save ti_flags for do_notify_resume
15219 CFI_ADJUST_CFA_OFFSET 4
15220 call save_v86_state # %eax contains pt_regs pointer
15221 popl %ecx
15222 CFI_ADJUST_CFA_OFFSET -4
15223 movl %eax, %esp
15224 -#else
15225 - movl %esp, %eax
15226 +1:
15227 #endif
15228 xorl %edx, %edx
15229 call do_notify_resume
15230 jmp resume_userspace_sig
15231 -END(work_pending)
15232 +ENDPROC(work_pending)
15233
15234 # perform syscall exit tracing
15235 ALIGN
15236 @@ -673,11 +877,14 @@ syscall_trace_entry:
15237 movl $-ENOSYS,PT_EAX(%esp)
15238 movl %esp, %eax
15239 call syscall_trace_enter
15240 +
15241 + pax_erase_kstack
15242 +
15243 /* What it returned is what we'll actually use. */
15244 cmpl $(nr_syscalls), %eax
15245 jnae syscall_call
15246 jmp syscall_exit
15247 -END(syscall_trace_entry)
15248 +ENDPROC(syscall_trace_entry)
15249
15250 # perform syscall exit tracing
15251 ALIGN
15252 @@ -690,20 +897,24 @@ syscall_exit_work:
15253 movl %esp, %eax
15254 call syscall_trace_leave
15255 jmp resume_userspace
15256 -END(syscall_exit_work)
15257 +ENDPROC(syscall_exit_work)
15258 CFI_ENDPROC
15259
15260 RING0_INT_FRAME # can't unwind into user space anyway
15261 syscall_fault:
15262 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15263 + push %ss
15264 + pop %ds
15265 +#endif
15266 GET_THREAD_INFO(%ebp)
15267 movl $-EFAULT,PT_EAX(%esp)
15268 jmp resume_userspace
15269 -END(syscall_fault)
15270 +ENDPROC(syscall_fault)
15271
15272 syscall_badsys:
15273 movl $-ENOSYS,PT_EAX(%esp)
15274 jmp resume_userspace
15275 -END(syscall_badsys)
15276 +ENDPROC(syscall_badsys)
15277 CFI_ENDPROC
15278
15279 /*
15280 @@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15281 PTREGSCALL(vm86)
15282 PTREGSCALL(vm86old)
15283
15284 + ALIGN;
15285 +ENTRY(kernel_execve)
15286 + push %ebp
15287 + sub $PT_OLDSS+4,%esp
15288 + push %edi
15289 + push %ecx
15290 + push %eax
15291 + lea 3*4(%esp),%edi
15292 + mov $PT_OLDSS/4+1,%ecx
15293 + xorl %eax,%eax
15294 + rep stosl
15295 + pop %eax
15296 + pop %ecx
15297 + pop %edi
15298 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15299 + mov %eax,PT_EBX(%esp)
15300 + mov %edx,PT_ECX(%esp)
15301 + mov %ecx,PT_EDX(%esp)
15302 + mov %esp,%eax
15303 + call sys_execve
15304 + GET_THREAD_INFO(%ebp)
15305 + test %eax,%eax
15306 + jz syscall_exit
15307 + add $PT_OLDSS+4,%esp
15308 + pop %ebp
15309 + ret
15310 +
15311 .macro FIXUP_ESPFIX_STACK
15312 /*
15313 * Switch back for ESPFIX stack to the normal zerobased stack
15314 @@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15315 * normal stack and adjusts ESP with the matching offset.
15316 */
15317 /* fixup the stack */
15318 - PER_CPU(gdt_page, %ebx)
15319 +#ifdef CONFIG_SMP
15320 + movl PER_CPU_VAR(cpu_number), %ebx
15321 + shll $PAGE_SHIFT_asm, %ebx
15322 + addl $cpu_gdt_table, %ebx
15323 +#else
15324 + movl $cpu_gdt_table, %ebx
15325 +#endif
15326 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15327 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15328 shl $16, %eax
15329 @@ -793,7 +1037,7 @@ vector=vector+1
15330 .endr
15331 2: jmp common_interrupt
15332 .endr
15333 -END(irq_entries_start)
15334 +ENDPROC(irq_entries_start)
15335
15336 .previous
15337 END(interrupt)
15338 @@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15339 CFI_ADJUST_CFA_OFFSET 4
15340 jmp error_code
15341 CFI_ENDPROC
15342 -END(coprocessor_error)
15343 +ENDPROC(coprocessor_error)
15344
15345 ENTRY(simd_coprocessor_error)
15346 RING0_INT_FRAME
15347 @@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15348 CFI_ADJUST_CFA_OFFSET 4
15349 jmp error_code
15350 CFI_ENDPROC
15351 -END(simd_coprocessor_error)
15352 +ENDPROC(simd_coprocessor_error)
15353
15354 ENTRY(device_not_available)
15355 RING0_INT_FRAME
15356 @@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15357 CFI_ADJUST_CFA_OFFSET 4
15358 jmp error_code
15359 CFI_ENDPROC
15360 -END(device_not_available)
15361 +ENDPROC(device_not_available)
15362
15363 #ifdef CONFIG_PARAVIRT
15364 ENTRY(native_iret)
15365 @@ -869,12 +1113,12 @@ ENTRY(native_iret)
15366 .align 4
15367 .long native_iret, iret_exc
15368 .previous
15369 -END(native_iret)
15370 +ENDPROC(native_iret)
15371
15372 ENTRY(native_irq_enable_sysexit)
15373 sti
15374 sysexit
15375 -END(native_irq_enable_sysexit)
15376 +ENDPROC(native_irq_enable_sysexit)
15377 #endif
15378
15379 ENTRY(overflow)
15380 @@ -885,7 +1129,7 @@ ENTRY(overflow)
15381 CFI_ADJUST_CFA_OFFSET 4
15382 jmp error_code
15383 CFI_ENDPROC
15384 -END(overflow)
15385 +ENDPROC(overflow)
15386
15387 ENTRY(bounds)
15388 RING0_INT_FRAME
15389 @@ -895,7 +1139,7 @@ ENTRY(bounds)
15390 CFI_ADJUST_CFA_OFFSET 4
15391 jmp error_code
15392 CFI_ENDPROC
15393 -END(bounds)
15394 +ENDPROC(bounds)
15395
15396 ENTRY(invalid_op)
15397 RING0_INT_FRAME
15398 @@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15399 CFI_ADJUST_CFA_OFFSET 4
15400 jmp error_code
15401 CFI_ENDPROC
15402 -END(invalid_op)
15403 +ENDPROC(invalid_op)
15404
15405 ENTRY(coprocessor_segment_overrun)
15406 RING0_INT_FRAME
15407 @@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15408 CFI_ADJUST_CFA_OFFSET 4
15409 jmp error_code
15410 CFI_ENDPROC
15411 -END(coprocessor_segment_overrun)
15412 +ENDPROC(coprocessor_segment_overrun)
15413
15414 ENTRY(invalid_TSS)
15415 RING0_EC_FRAME
15416 @@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15417 CFI_ADJUST_CFA_OFFSET 4
15418 jmp error_code
15419 CFI_ENDPROC
15420 -END(invalid_TSS)
15421 +ENDPROC(invalid_TSS)
15422
15423 ENTRY(segment_not_present)
15424 RING0_EC_FRAME
15425 @@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15426 CFI_ADJUST_CFA_OFFSET 4
15427 jmp error_code
15428 CFI_ENDPROC
15429 -END(segment_not_present)
15430 +ENDPROC(segment_not_present)
15431
15432 ENTRY(stack_segment)
15433 RING0_EC_FRAME
15434 @@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15435 CFI_ADJUST_CFA_OFFSET 4
15436 jmp error_code
15437 CFI_ENDPROC
15438 -END(stack_segment)
15439 +ENDPROC(stack_segment)
15440
15441 ENTRY(alignment_check)
15442 RING0_EC_FRAME
15443 @@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15444 CFI_ADJUST_CFA_OFFSET 4
15445 jmp error_code
15446 CFI_ENDPROC
15447 -END(alignment_check)
15448 +ENDPROC(alignment_check)
15449
15450 ENTRY(divide_error)
15451 RING0_INT_FRAME
15452 @@ -957,7 +1201,7 @@ ENTRY(divide_error)
15453 CFI_ADJUST_CFA_OFFSET 4
15454 jmp error_code
15455 CFI_ENDPROC
15456 -END(divide_error)
15457 +ENDPROC(divide_error)
15458
15459 #ifdef CONFIG_X86_MCE
15460 ENTRY(machine_check)
15461 @@ -968,7 +1212,7 @@ ENTRY(machine_check)
15462 CFI_ADJUST_CFA_OFFSET 4
15463 jmp error_code
15464 CFI_ENDPROC
15465 -END(machine_check)
15466 +ENDPROC(machine_check)
15467 #endif
15468
15469 ENTRY(spurious_interrupt_bug)
15470 @@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15471 CFI_ADJUST_CFA_OFFSET 4
15472 jmp error_code
15473 CFI_ENDPROC
15474 -END(spurious_interrupt_bug)
15475 +ENDPROC(spurious_interrupt_bug)
15476
15477 ENTRY(kernel_thread_helper)
15478 pushl $0 # fake return address for unwinder
15479 @@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15480
15481 ENTRY(mcount)
15482 ret
15483 -END(mcount)
15484 +ENDPROC(mcount)
15485
15486 ENTRY(ftrace_caller)
15487 cmpl $0, function_trace_stop
15488 @@ -1124,7 +1368,7 @@ ftrace_graph_call:
15489 .globl ftrace_stub
15490 ftrace_stub:
15491 ret
15492 -END(ftrace_caller)
15493 +ENDPROC(ftrace_caller)
15494
15495 #else /* ! CONFIG_DYNAMIC_FTRACE */
15496
15497 @@ -1160,7 +1404,7 @@ trace:
15498 popl %ecx
15499 popl %eax
15500 jmp ftrace_stub
15501 -END(mcount)
15502 +ENDPROC(mcount)
15503 #endif /* CONFIG_DYNAMIC_FTRACE */
15504 #endif /* CONFIG_FUNCTION_TRACER */
15505
15506 @@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15507 popl %ecx
15508 popl %eax
15509 ret
15510 -END(ftrace_graph_caller)
15511 +ENDPROC(ftrace_graph_caller)
15512
15513 .globl return_to_handler
15514 return_to_handler:
15515 @@ -1198,7 +1442,6 @@ return_to_handler:
15516 ret
15517 #endif
15518
15519 -.section .rodata,"a"
15520 #include "syscall_table_32.S"
15521
15522 syscall_table_size=(.-sys_call_table)
15523 @@ -1255,15 +1498,18 @@ error_code:
15524 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15525 REG_TO_PTGS %ecx
15526 SET_KERNEL_GS %ecx
15527 - movl $(__USER_DS), %ecx
15528 + movl $(__KERNEL_DS), %ecx
15529 movl %ecx, %ds
15530 movl %ecx, %es
15531 +
15532 + pax_enter_kernel
15533 +
15534 TRACE_IRQS_OFF
15535 movl %esp,%eax # pt_regs pointer
15536 call *%edi
15537 jmp ret_from_exception
15538 CFI_ENDPROC
15539 -END(page_fault)
15540 +ENDPROC(page_fault)
15541
15542 /*
15543 * Debug traps and NMI can happen at the one SYSENTER instruction
15544 @@ -1309,7 +1555,7 @@ debug_stack_correct:
15545 call do_debug
15546 jmp ret_from_exception
15547 CFI_ENDPROC
15548 -END(debug)
15549 +ENDPROC(debug)
15550
15551 /*
15552 * NMI is doubly nasty. It can happen _while_ we're handling
15553 @@ -1351,6 +1597,9 @@ nmi_stack_correct:
15554 xorl %edx,%edx # zero error code
15555 movl %esp,%eax # pt_regs pointer
15556 call do_nmi
15557 +
15558 + pax_exit_kernel
15559 +
15560 jmp restore_all_notrace
15561 CFI_ENDPROC
15562
15563 @@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15564 FIXUP_ESPFIX_STACK # %eax == %esp
15565 xorl %edx,%edx # zero error code
15566 call do_nmi
15567 +
15568 + pax_exit_kernel
15569 +
15570 RESTORE_REGS
15571 lss 12+4(%esp), %esp # back to espfix stack
15572 CFI_ADJUST_CFA_OFFSET -24
15573 jmp irq_return
15574 CFI_ENDPROC
15575 -END(nmi)
15576 +ENDPROC(nmi)
15577
15578 ENTRY(int3)
15579 RING0_INT_FRAME
15580 @@ -1409,7 +1661,7 @@ ENTRY(int3)
15581 call do_int3
15582 jmp ret_from_exception
15583 CFI_ENDPROC
15584 -END(int3)
15585 +ENDPROC(int3)
15586
15587 ENTRY(general_protection)
15588 RING0_EC_FRAME
15589 @@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15590 CFI_ADJUST_CFA_OFFSET 4
15591 jmp error_code
15592 CFI_ENDPROC
15593 -END(general_protection)
15594 +ENDPROC(general_protection)
15595
15596 /*
15597 * End of kprobes section
15598 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15599 index 34a56a9..4aa5c8b 100644
15600 --- a/arch/x86/kernel/entry_64.S
15601 +++ b/arch/x86/kernel/entry_64.S
15602 @@ -53,6 +53,8 @@
15603 #include <asm/paravirt.h>
15604 #include <asm/ftrace.h>
15605 #include <asm/percpu.h>
15606 +#include <asm/pgtable.h>
15607 +#include <asm/alternative-asm.h>
15608
15609 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15610 #include <linux/elf-em.h>
15611 @@ -64,8 +66,9 @@
15612 #ifdef CONFIG_FUNCTION_TRACER
15613 #ifdef CONFIG_DYNAMIC_FTRACE
15614 ENTRY(mcount)
15615 + pax_force_retaddr
15616 retq
15617 -END(mcount)
15618 +ENDPROC(mcount)
15619
15620 ENTRY(ftrace_caller)
15621 cmpl $0, function_trace_stop
15622 @@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15623 #endif
15624
15625 GLOBAL(ftrace_stub)
15626 + pax_force_retaddr
15627 retq
15628 -END(ftrace_caller)
15629 +ENDPROC(ftrace_caller)
15630
15631 #else /* ! CONFIG_DYNAMIC_FTRACE */
15632 ENTRY(mcount)
15633 @@ -108,6 +112,7 @@ ENTRY(mcount)
15634 #endif
15635
15636 GLOBAL(ftrace_stub)
15637 + pax_force_retaddr
15638 retq
15639
15640 trace:
15641 @@ -117,12 +122,13 @@ trace:
15642 movq 8(%rbp), %rsi
15643 subq $MCOUNT_INSN_SIZE, %rdi
15644
15645 + pax_force_fptr ftrace_trace_function
15646 call *ftrace_trace_function
15647
15648 MCOUNT_RESTORE_FRAME
15649
15650 jmp ftrace_stub
15651 -END(mcount)
15652 +ENDPROC(mcount)
15653 #endif /* CONFIG_DYNAMIC_FTRACE */
15654 #endif /* CONFIG_FUNCTION_TRACER */
15655
15656 @@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15657
15658 MCOUNT_RESTORE_FRAME
15659
15660 + pax_force_retaddr
15661 retq
15662 -END(ftrace_graph_caller)
15663 +ENDPROC(ftrace_graph_caller)
15664
15665 GLOBAL(return_to_handler)
15666 subq $24, %rsp
15667 @@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15668 movq 8(%rsp), %rdx
15669 movq (%rsp), %rax
15670 addq $16, %rsp
15671 + pax_force_retaddr
15672 retq
15673 #endif
15674
15675 @@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15676 ENDPROC(native_usergs_sysret64)
15677 #endif /* CONFIG_PARAVIRT */
15678
15679 + .macro ljmpq sel, off
15680 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15681 + .byte 0x48; ljmp *1234f(%rip)
15682 + .pushsection .rodata
15683 + .align 16
15684 + 1234: .quad \off; .word \sel
15685 + .popsection
15686 +#else
15687 + pushq $\sel
15688 + pushq $\off
15689 + lretq
15690 +#endif
15691 + .endm
15692 +
15693 + .macro pax_enter_kernel
15694 + pax_set_fptr_mask
15695 +#ifdef CONFIG_PAX_KERNEXEC
15696 + call pax_enter_kernel
15697 +#endif
15698 + .endm
15699 +
15700 + .macro pax_exit_kernel
15701 +#ifdef CONFIG_PAX_KERNEXEC
15702 + call pax_exit_kernel
15703 +#endif
15704 + .endm
15705 +
15706 +#ifdef CONFIG_PAX_KERNEXEC
15707 +ENTRY(pax_enter_kernel)
15708 + pushq %rdi
15709 +
15710 +#ifdef CONFIG_PARAVIRT
15711 + PV_SAVE_REGS(CLBR_RDI)
15712 +#endif
15713 +
15714 + GET_CR0_INTO_RDI
15715 + bts $16,%rdi
15716 + jnc 3f
15717 + mov %cs,%edi
15718 + cmp $__KERNEL_CS,%edi
15719 + jnz 2f
15720 +1:
15721 +
15722 +#ifdef CONFIG_PARAVIRT
15723 + PV_RESTORE_REGS(CLBR_RDI)
15724 +#endif
15725 +
15726 + popq %rdi
15727 + pax_force_retaddr
15728 + retq
15729 +
15730 +2: ljmpq __KERNEL_CS,1f
15731 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15732 +4: SET_RDI_INTO_CR0
15733 + jmp 1b
15734 +ENDPROC(pax_enter_kernel)
15735 +
15736 +ENTRY(pax_exit_kernel)
15737 + pushq %rdi
15738 +
15739 +#ifdef CONFIG_PARAVIRT
15740 + PV_SAVE_REGS(CLBR_RDI)
15741 +#endif
15742 +
15743 + mov %cs,%rdi
15744 + cmp $__KERNEXEC_KERNEL_CS,%edi
15745 + jz 2f
15746 +1:
15747 +
15748 +#ifdef CONFIG_PARAVIRT
15749 + PV_RESTORE_REGS(CLBR_RDI);
15750 +#endif
15751 +
15752 + popq %rdi
15753 + pax_force_retaddr
15754 + retq
15755 +
15756 +2: GET_CR0_INTO_RDI
15757 + btr $16,%rdi
15758 + ljmpq __KERNEL_CS,3f
15759 +3: SET_RDI_INTO_CR0
15760 + jmp 1b
15761 +#ifdef CONFIG_PARAVIRT
15762 + PV_RESTORE_REGS(CLBR_RDI);
15763 +#endif
15764 +
15765 + popq %rdi
15766 + pax_force_retaddr
15767 + retq
15768 +ENDPROC(pax_exit_kernel)
15769 +#endif
15770 +
15771 + .macro pax_enter_kernel_user
15772 + pax_set_fptr_mask
15773 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15774 + call pax_enter_kernel_user
15775 +#endif
15776 + .endm
15777 +
15778 + .macro pax_exit_kernel_user
15779 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15780 + call pax_exit_kernel_user
15781 +#endif
15782 +#ifdef CONFIG_PAX_RANDKSTACK
15783 + pushq %rax
15784 + call pax_randomize_kstack
15785 + popq %rax
15786 +#endif
15787 + .endm
15788 +
15789 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15790 +ENTRY(pax_enter_kernel_user)
15791 + pushq %rdi
15792 + pushq %rbx
15793 +
15794 +#ifdef CONFIG_PARAVIRT
15795 + PV_SAVE_REGS(CLBR_RDI)
15796 +#endif
15797 +
15798 + GET_CR3_INTO_RDI
15799 + mov %rdi,%rbx
15800 + add $__START_KERNEL_map,%rbx
15801 + sub phys_base(%rip),%rbx
15802 +
15803 +#ifdef CONFIG_PARAVIRT
15804 + pushq %rdi
15805 + cmpl $0, pv_info+PARAVIRT_enabled
15806 + jz 1f
15807 + i = 0
15808 + .rept USER_PGD_PTRS
15809 + mov i*8(%rbx),%rsi
15810 + mov $0,%sil
15811 + lea i*8(%rbx),%rdi
15812 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15813 + i = i + 1
15814 + .endr
15815 + jmp 2f
15816 +1:
15817 +#endif
15818 +
15819 + i = 0
15820 + .rept USER_PGD_PTRS
15821 + movb $0,i*8(%rbx)
15822 + i = i + 1
15823 + .endr
15824 +
15825 +#ifdef CONFIG_PARAVIRT
15826 +2: popq %rdi
15827 +#endif
15828 + SET_RDI_INTO_CR3
15829 +
15830 +#ifdef CONFIG_PAX_KERNEXEC
15831 + GET_CR0_INTO_RDI
15832 + bts $16,%rdi
15833 + SET_RDI_INTO_CR0
15834 +#endif
15835 +
15836 +#ifdef CONFIG_PARAVIRT
15837 + PV_RESTORE_REGS(CLBR_RDI)
15838 +#endif
15839 +
15840 + popq %rbx
15841 + popq %rdi
15842 + pax_force_retaddr
15843 + retq
15844 +ENDPROC(pax_enter_kernel_user)
15845 +
15846 +ENTRY(pax_exit_kernel_user)
15847 + push %rdi
15848 +
15849 +#ifdef CONFIG_PARAVIRT
15850 + pushq %rbx
15851 + PV_SAVE_REGS(CLBR_RDI)
15852 +#endif
15853 +
15854 +#ifdef CONFIG_PAX_KERNEXEC
15855 + GET_CR0_INTO_RDI
15856 + btr $16,%rdi
15857 + SET_RDI_INTO_CR0
15858 +#endif
15859 +
15860 + GET_CR3_INTO_RDI
15861 + add $__START_KERNEL_map,%rdi
15862 + sub phys_base(%rip),%rdi
15863 +
15864 +#ifdef CONFIG_PARAVIRT
15865 + cmpl $0, pv_info+PARAVIRT_enabled
15866 + jz 1f
15867 + mov %rdi,%rbx
15868 + i = 0
15869 + .rept USER_PGD_PTRS
15870 + mov i*8(%rbx),%rsi
15871 + mov $0x67,%sil
15872 + lea i*8(%rbx),%rdi
15873 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15874 + i = i + 1
15875 + .endr
15876 + jmp 2f
15877 +1:
15878 +#endif
15879 +
15880 + i = 0
15881 + .rept USER_PGD_PTRS
15882 + movb $0x67,i*8(%rdi)
15883 + i = i + 1
15884 + .endr
15885 +
15886 +#ifdef CONFIG_PARAVIRT
15887 +2: PV_RESTORE_REGS(CLBR_RDI)
15888 + popq %rbx
15889 +#endif
15890 +
15891 + popq %rdi
15892 + pax_force_retaddr
15893 + retq
15894 +ENDPROC(pax_exit_kernel_user)
15895 +#endif
15896 +
15897 +.macro pax_erase_kstack
15898 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15899 + call pax_erase_kstack
15900 +#endif
15901 +.endm
15902 +
15903 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15904 +/*
15905 + * r11: thread_info
15906 + * rcx, rdx: can be clobbered
15907 + */
15908 +ENTRY(pax_erase_kstack)
15909 + pushq %rdi
15910 + pushq %rax
15911 + pushq %r11
15912 +
15913 + GET_THREAD_INFO(%r11)
15914 + mov TI_lowest_stack(%r11), %rdi
15915 + mov $-0xBEEF, %rax
15916 + std
15917 +
15918 +1: mov %edi, %ecx
15919 + and $THREAD_SIZE_asm - 1, %ecx
15920 + shr $3, %ecx
15921 + repne scasq
15922 + jecxz 2f
15923 +
15924 + cmp $2*8, %ecx
15925 + jc 2f
15926 +
15927 + mov $2*8, %ecx
15928 + repe scasq
15929 + jecxz 2f
15930 + jne 1b
15931 +
15932 +2: cld
15933 + mov %esp, %ecx
15934 + sub %edi, %ecx
15935 +
15936 + cmp $THREAD_SIZE_asm, %rcx
15937 + jb 3f
15938 + ud2
15939 +3:
15940 +
15941 + shr $3, %ecx
15942 + rep stosq
15943 +
15944 + mov TI_task_thread_sp0(%r11), %rdi
15945 + sub $256, %rdi
15946 + mov %rdi, TI_lowest_stack(%r11)
15947 +
15948 + popq %r11
15949 + popq %rax
15950 + popq %rdi
15951 + pax_force_retaddr
15952 + ret
15953 +ENDPROC(pax_erase_kstack)
15954 +#endif
15955
15956 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15957 #ifdef CONFIG_TRACE_IRQFLAGS
15958 @@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
15959 .endm
15960
15961 .macro UNFAKE_STACK_FRAME
15962 - addq $8*6, %rsp
15963 - CFI_ADJUST_CFA_OFFSET -(6*8)
15964 + addq $8*6 + ARG_SKIP, %rsp
15965 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15966 .endm
15967
15968 /*
15969 @@ -317,7 +601,7 @@ ENTRY(save_args)
15970 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
15971 movq_cfi rbp, 8 /* push %rbp */
15972 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
15973 - testl $3, CS(%rdi)
15974 + testb $3, CS(%rdi)
15975 je 1f
15976 SWAPGS
15977 /*
15978 @@ -337,9 +621,10 @@ ENTRY(save_args)
15979 * We entered an interrupt context - irqs are off:
15980 */
15981 2: TRACE_IRQS_OFF
15982 + pax_force_retaddr
15983 ret
15984 CFI_ENDPROC
15985 -END(save_args)
15986 +ENDPROC(save_args)
15987
15988 ENTRY(save_rest)
15989 PARTIAL_FRAME 1 REST_SKIP+8
15990 @@ -352,9 +637,10 @@ ENTRY(save_rest)
15991 movq_cfi r15, R15+16
15992 movq %r11, 8(%rsp) /* return address */
15993 FIXUP_TOP_OF_STACK %r11, 16
15994 + pax_force_retaddr
15995 ret
15996 CFI_ENDPROC
15997 -END(save_rest)
15998 +ENDPROC(save_rest)
15999
16000 /* save complete stack frame */
16001 .pushsection .kprobes.text, "ax"
16002 @@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16003 js 1f /* negative -> in kernel */
16004 SWAPGS
16005 xorl %ebx,%ebx
16006 -1: ret
16007 +1: pax_force_retaddr_bts
16008 + ret
16009 CFI_ENDPROC
16010 -END(save_paranoid)
16011 +ENDPROC(save_paranoid)
16012 .popsection
16013
16014 /*
16015 @@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16016
16017 RESTORE_REST
16018
16019 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16020 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16021 je int_ret_from_sys_call
16022
16023 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16024 @@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16025 jmp ret_from_sys_call # go to the SYSRET fastpath
16026
16027 CFI_ENDPROC
16028 -END(ret_from_fork)
16029 +ENDPROC(ret_from_fork)
16030
16031 /*
16032 * System call entry. Upto 6 arguments in registers are supported.
16033 @@ -455,7 +742,7 @@ END(ret_from_fork)
16034 ENTRY(system_call)
16035 CFI_STARTPROC simple
16036 CFI_SIGNAL_FRAME
16037 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16038 + CFI_DEF_CFA rsp,0
16039 CFI_REGISTER rip,rcx
16040 /*CFI_REGISTER rflags,r11*/
16041 SWAPGS_UNSAFE_STACK
16042 @@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16043
16044 movq %rsp,PER_CPU_VAR(old_rsp)
16045 movq PER_CPU_VAR(kernel_stack),%rsp
16046 + SAVE_ARGS 8*6,1
16047 + pax_enter_kernel_user
16048 /*
16049 * No need to follow this irqs off/on section - it's straight
16050 * and short:
16051 */
16052 ENABLE_INTERRUPTS(CLBR_NONE)
16053 - SAVE_ARGS 8,1
16054 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16055 movq %rcx,RIP-ARGOFFSET(%rsp)
16056 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16057 @@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16058 system_call_fastpath:
16059 cmpq $__NR_syscall_max,%rax
16060 ja badsys
16061 - movq %r10,%rcx
16062 + movq R10-ARGOFFSET(%rsp),%rcx
16063 call *sys_call_table(,%rax,8) # XXX: rip relative
16064 movq %rax,RAX-ARGOFFSET(%rsp)
16065 /*
16066 @@ -502,6 +790,8 @@ sysret_check:
16067 andl %edi,%edx
16068 jnz sysret_careful
16069 CFI_REMEMBER_STATE
16070 + pax_exit_kernel_user
16071 + pax_erase_kstack
16072 /*
16073 * sysretq will re-enable interrupts:
16074 */
16075 @@ -555,14 +845,18 @@ badsys:
16076 * jump back to the normal fast path.
16077 */
16078 auditsys:
16079 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16080 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16081 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16082 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16083 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16084 movq %rax,%rsi /* 2nd arg: syscall number */
16085 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16086 call audit_syscall_entry
16087 +
16088 + pax_erase_kstack
16089 +
16090 LOAD_ARGS 0 /* reload call-clobbered registers */
16091 + pax_set_fptr_mask
16092 jmp system_call_fastpath
16093
16094 /*
16095 @@ -592,16 +886,20 @@ tracesys:
16096 FIXUP_TOP_OF_STACK %rdi
16097 movq %rsp,%rdi
16098 call syscall_trace_enter
16099 +
16100 + pax_erase_kstack
16101 +
16102 /*
16103 * Reload arg registers from stack in case ptrace changed them.
16104 * We don't reload %rax because syscall_trace_enter() returned
16105 * the value it wants us to use in the table lookup.
16106 */
16107 LOAD_ARGS ARGOFFSET, 1
16108 + pax_set_fptr_mask
16109 RESTORE_REST
16110 cmpq $__NR_syscall_max,%rax
16111 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16112 - movq %r10,%rcx /* fixup for C */
16113 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16114 call *sys_call_table(,%rax,8)
16115 movq %rax,RAX-ARGOFFSET(%rsp)
16116 /* Use IRET because user could have changed frame */
16117 @@ -613,7 +911,7 @@ tracesys:
16118 GLOBAL(int_ret_from_sys_call)
16119 DISABLE_INTERRUPTS(CLBR_NONE)
16120 TRACE_IRQS_OFF
16121 - testl $3,CS-ARGOFFSET(%rsp)
16122 + testb $3,CS-ARGOFFSET(%rsp)
16123 je retint_restore_args
16124 movl $_TIF_ALLWORK_MASK,%edi
16125 /* edi: mask to check */
16126 @@ -674,7 +972,7 @@ int_restore_rest:
16127 TRACE_IRQS_OFF
16128 jmp int_with_check
16129 CFI_ENDPROC
16130 -END(system_call)
16131 +ENDPROC(system_call)
16132
16133 /*
16134 * Certain special system calls that need to save a complete full stack frame.
16135 @@ -690,7 +988,7 @@ ENTRY(\label)
16136 call \func
16137 jmp ptregscall_common
16138 CFI_ENDPROC
16139 -END(\label)
16140 +ENDPROC(\label)
16141 .endm
16142
16143 PTREGSCALL stub_clone, sys_clone, %r8
16144 @@ -708,9 +1006,10 @@ ENTRY(ptregscall_common)
16145 movq_cfi_restore R12+8, r12
16146 movq_cfi_restore RBP+8, rbp
16147 movq_cfi_restore RBX+8, rbx
16148 + pax_force_retaddr
16149 ret $REST_SKIP /* pop extended registers */
16150 CFI_ENDPROC
16151 -END(ptregscall_common)
16152 +ENDPROC(ptregscall_common)
16153
16154 ENTRY(stub_execve)
16155 CFI_STARTPROC
16156 @@ -726,7 +1025,7 @@ ENTRY(stub_execve)
16157 RESTORE_REST
16158 jmp int_ret_from_sys_call
16159 CFI_ENDPROC
16160 -END(stub_execve)
16161 +ENDPROC(stub_execve)
16162
16163 /*
16164 * sigreturn is special because it needs to restore all registers on return.
16165 @@ -744,7 +1043,7 @@ ENTRY(stub_rt_sigreturn)
16166 RESTORE_REST
16167 jmp int_ret_from_sys_call
16168 CFI_ENDPROC
16169 -END(stub_rt_sigreturn)
16170 +ENDPROC(stub_rt_sigreturn)
16171
16172 /*
16173 * Build the entry stubs and pointer table with some assembler magic.
16174 @@ -780,7 +1079,7 @@ vector=vector+1
16175 2: jmp common_interrupt
16176 .endr
16177 CFI_ENDPROC
16178 -END(irq_entries_start)
16179 +ENDPROC(irq_entries_start)
16180
16181 .previous
16182 END(interrupt)
16183 @@ -800,6 +1099,16 @@ END(interrupt)
16184 CFI_ADJUST_CFA_OFFSET 10*8
16185 call save_args
16186 PARTIAL_FRAME 0
16187 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16188 + testb $3, CS(%rdi)
16189 + jnz 1f
16190 + pax_enter_kernel
16191 + jmp 2f
16192 +1: pax_enter_kernel_user
16193 +2:
16194 +#else
16195 + pax_enter_kernel
16196 +#endif
16197 call \func
16198 .endm
16199
16200 @@ -822,7 +1131,7 @@ ret_from_intr:
16201 CFI_ADJUST_CFA_OFFSET -8
16202 exit_intr:
16203 GET_THREAD_INFO(%rcx)
16204 - testl $3,CS-ARGOFFSET(%rsp)
16205 + testb $3,CS-ARGOFFSET(%rsp)
16206 je retint_kernel
16207
16208 /* Interrupt came from user space */
16209 @@ -844,12 +1153,16 @@ retint_swapgs: /* return to user-space */
16210 * The iretq could re-enable interrupts:
16211 */
16212 DISABLE_INTERRUPTS(CLBR_ANY)
16213 + pax_exit_kernel_user
16214 + pax_erase_kstack
16215 TRACE_IRQS_IRETQ
16216 SWAPGS
16217 jmp restore_args
16218
16219 retint_restore_args: /* return to kernel space */
16220 DISABLE_INTERRUPTS(CLBR_ANY)
16221 + pax_exit_kernel
16222 + pax_force_retaddr RIP-ARGOFFSET
16223 /*
16224 * The iretq could re-enable interrupts:
16225 */
16226 @@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16227 #endif
16228
16229 CFI_ENDPROC
16230 -END(common_interrupt)
16231 +ENDPROC(common_interrupt)
16232
16233 /*
16234 * APIC interrupts.
16235 @@ -953,7 +1266,7 @@ ENTRY(\sym)
16236 interrupt \do_sym
16237 jmp ret_from_intr
16238 CFI_ENDPROC
16239 -END(\sym)
16240 +ENDPROC(\sym)
16241 .endm
16242
16243 #ifdef CONFIG_SMP
16244 @@ -1032,12 +1345,22 @@ ENTRY(\sym)
16245 CFI_ADJUST_CFA_OFFSET 15*8
16246 call error_entry
16247 DEFAULT_FRAME 0
16248 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16249 + testb $3, CS(%rsp)
16250 + jnz 1f
16251 + pax_enter_kernel
16252 + jmp 2f
16253 +1: pax_enter_kernel_user
16254 +2:
16255 +#else
16256 + pax_enter_kernel
16257 +#endif
16258 movq %rsp,%rdi /* pt_regs pointer */
16259 xorl %esi,%esi /* no error code */
16260 call \do_sym
16261 jmp error_exit /* %ebx: no swapgs flag */
16262 CFI_ENDPROC
16263 -END(\sym)
16264 +ENDPROC(\sym)
16265 .endm
16266
16267 .macro paranoidzeroentry sym do_sym
16268 @@ -1049,12 +1372,22 @@ ENTRY(\sym)
16269 subq $15*8, %rsp
16270 call save_paranoid
16271 TRACE_IRQS_OFF
16272 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16273 + testb $3, CS(%rsp)
16274 + jnz 1f
16275 + pax_enter_kernel
16276 + jmp 2f
16277 +1: pax_enter_kernel_user
16278 +2:
16279 +#else
16280 + pax_enter_kernel
16281 +#endif
16282 movq %rsp,%rdi /* pt_regs pointer */
16283 xorl %esi,%esi /* no error code */
16284 call \do_sym
16285 jmp paranoid_exit /* %ebx: no swapgs flag */
16286 CFI_ENDPROC
16287 -END(\sym)
16288 +ENDPROC(\sym)
16289 .endm
16290
16291 .macro paranoidzeroentry_ist sym do_sym ist
16292 @@ -1066,15 +1399,30 @@ ENTRY(\sym)
16293 subq $15*8, %rsp
16294 call save_paranoid
16295 TRACE_IRQS_OFF
16296 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16297 + testb $3, CS(%rsp)
16298 + jnz 1f
16299 + pax_enter_kernel
16300 + jmp 2f
16301 +1: pax_enter_kernel_user
16302 +2:
16303 +#else
16304 + pax_enter_kernel
16305 +#endif
16306 movq %rsp,%rdi /* pt_regs pointer */
16307 xorl %esi,%esi /* no error code */
16308 - PER_CPU(init_tss, %rbp)
16309 +#ifdef CONFIG_SMP
16310 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16311 + lea init_tss(%rbp), %rbp
16312 +#else
16313 + lea init_tss(%rip), %rbp
16314 +#endif
16315 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16316 call \do_sym
16317 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16318 jmp paranoid_exit /* %ebx: no swapgs flag */
16319 CFI_ENDPROC
16320 -END(\sym)
16321 +ENDPROC(\sym)
16322 .endm
16323
16324 .macro errorentry sym do_sym
16325 @@ -1085,13 +1433,23 @@ ENTRY(\sym)
16326 CFI_ADJUST_CFA_OFFSET 15*8
16327 call error_entry
16328 DEFAULT_FRAME 0
16329 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16330 + testb $3, CS(%rsp)
16331 + jnz 1f
16332 + pax_enter_kernel
16333 + jmp 2f
16334 +1: pax_enter_kernel_user
16335 +2:
16336 +#else
16337 + pax_enter_kernel
16338 +#endif
16339 movq %rsp,%rdi /* pt_regs pointer */
16340 movq ORIG_RAX(%rsp),%rsi /* get error code */
16341 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16342 call \do_sym
16343 jmp error_exit /* %ebx: no swapgs flag */
16344 CFI_ENDPROC
16345 -END(\sym)
16346 +ENDPROC(\sym)
16347 .endm
16348
16349 /* error code is on the stack already */
16350 @@ -1104,13 +1462,23 @@ ENTRY(\sym)
16351 call save_paranoid
16352 DEFAULT_FRAME 0
16353 TRACE_IRQS_OFF
16354 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16355 + testb $3, CS(%rsp)
16356 + jnz 1f
16357 + pax_enter_kernel
16358 + jmp 2f
16359 +1: pax_enter_kernel_user
16360 +2:
16361 +#else
16362 + pax_enter_kernel
16363 +#endif
16364 movq %rsp,%rdi /* pt_regs pointer */
16365 movq ORIG_RAX(%rsp),%rsi /* get error code */
16366 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16367 call \do_sym
16368 jmp paranoid_exit /* %ebx: no swapgs flag */
16369 CFI_ENDPROC
16370 -END(\sym)
16371 +ENDPROC(\sym)
16372 .endm
16373
16374 zeroentry divide_error do_divide_error
16375 @@ -1141,9 +1509,10 @@ gs_change:
16376 SWAPGS
16377 popf
16378 CFI_ADJUST_CFA_OFFSET -8
16379 + pax_force_retaddr
16380 ret
16381 CFI_ENDPROC
16382 -END(native_load_gs_index)
16383 +ENDPROC(native_load_gs_index)
16384
16385 .section __ex_table,"a"
16386 .align 8
16387 @@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16388 * of hacks for example to fork off the per-CPU idle tasks.
16389 * [Hopefully no generic code relies on the reschedule -AK]
16390 */
16391 - RESTORE_ALL
16392 + RESTORE_REST
16393 UNFAKE_STACK_FRAME
16394 + pax_force_retaddr
16395 ret
16396 CFI_ENDPROC
16397 -END(kernel_thread)
16398 +ENDPROC(kernel_thread)
16399
16400 ENTRY(child_rip)
16401 pushq $0 # fake return address
16402 @@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16403 */
16404 movq %rdi, %rax
16405 movq %rsi, %rdi
16406 + pax_force_fptr %rax
16407 call *%rax
16408 # exit
16409 mov %eax, %edi
16410 call do_exit
16411 ud2 # padding for call trace
16412 CFI_ENDPROC
16413 -END(child_rip)
16414 +ENDPROC(child_rip)
16415
16416 /*
16417 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16418 @@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16419 RESTORE_REST
16420 testq %rax,%rax
16421 je int_ret_from_sys_call
16422 - RESTORE_ARGS
16423 UNFAKE_STACK_FRAME
16424 + pax_force_retaddr
16425 ret
16426 CFI_ENDPROC
16427 -END(kernel_execve)
16428 +ENDPROC(kernel_execve)
16429
16430 /* Call softirq on interrupt stack. Interrupts are off. */
16431 ENTRY(call_softirq)
16432 @@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16433 CFI_DEF_CFA_REGISTER rsp
16434 CFI_ADJUST_CFA_OFFSET -8
16435 decl PER_CPU_VAR(irq_count)
16436 + pax_force_retaddr
16437 ret
16438 CFI_ENDPROC
16439 -END(call_softirq)
16440 +ENDPROC(call_softirq)
16441
16442 #ifdef CONFIG_XEN
16443 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16444 @@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16445 decl PER_CPU_VAR(irq_count)
16446 jmp error_exit
16447 CFI_ENDPROC
16448 -END(xen_do_hypervisor_callback)
16449 +ENDPROC(xen_do_hypervisor_callback)
16450
16451 /*
16452 * Hypervisor uses this for application faults while it executes.
16453 @@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16454 SAVE_ALL
16455 jmp error_exit
16456 CFI_ENDPROC
16457 -END(xen_failsafe_callback)
16458 +ENDPROC(xen_failsafe_callback)
16459
16460 #endif /* CONFIG_XEN */
16461
16462 @@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16463 TRACE_IRQS_OFF
16464 testl %ebx,%ebx /* swapgs needed? */
16465 jnz paranoid_restore
16466 - testl $3,CS(%rsp)
16467 + testb $3,CS(%rsp)
16468 jnz paranoid_userspace
16469 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16470 + pax_exit_kernel
16471 + TRACE_IRQS_IRETQ 0
16472 + SWAPGS_UNSAFE_STACK
16473 + RESTORE_ALL 8
16474 + pax_force_retaddr_bts
16475 + jmp irq_return
16476 +#endif
16477 paranoid_swapgs:
16478 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16479 + pax_exit_kernel_user
16480 +#else
16481 + pax_exit_kernel
16482 +#endif
16483 TRACE_IRQS_IRETQ 0
16484 SWAPGS_UNSAFE_STACK
16485 RESTORE_ALL 8
16486 jmp irq_return
16487 paranoid_restore:
16488 + pax_exit_kernel
16489 TRACE_IRQS_IRETQ 0
16490 RESTORE_ALL 8
16491 + pax_force_retaddr_bts
16492 jmp irq_return
16493 paranoid_userspace:
16494 GET_THREAD_INFO(%rcx)
16495 @@ -1443,7 +1830,7 @@ paranoid_schedule:
16496 TRACE_IRQS_OFF
16497 jmp paranoid_userspace
16498 CFI_ENDPROC
16499 -END(paranoid_exit)
16500 +ENDPROC(paranoid_exit)
16501
16502 /*
16503 * Exception entry point. This expects an error code/orig_rax on the stack.
16504 @@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16505 movq_cfi r14, R14+8
16506 movq_cfi r15, R15+8
16507 xorl %ebx,%ebx
16508 - testl $3,CS+8(%rsp)
16509 + testb $3,CS+8(%rsp)
16510 je error_kernelspace
16511 error_swapgs:
16512 SWAPGS
16513 error_sti:
16514 TRACE_IRQS_OFF
16515 + pax_force_retaddr_bts
16516 ret
16517 CFI_ENDPROC
16518
16519 @@ -1497,7 +1885,7 @@ error_kernelspace:
16520 cmpq $gs_change,RIP+8(%rsp)
16521 je error_swapgs
16522 jmp error_sti
16523 -END(error_entry)
16524 +ENDPROC(error_entry)
16525
16526
16527 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16528 @@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16529 jnz retint_careful
16530 jmp retint_swapgs
16531 CFI_ENDPROC
16532 -END(error_exit)
16533 +ENDPROC(error_exit)
16534
16535
16536 /* runs on exception stack */
16537 @@ -1529,6 +1917,16 @@ ENTRY(nmi)
16538 CFI_ADJUST_CFA_OFFSET 15*8
16539 call save_paranoid
16540 DEFAULT_FRAME 0
16541 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16542 + testb $3, CS(%rsp)
16543 + jnz 1f
16544 + pax_enter_kernel
16545 + jmp 2f
16546 +1: pax_enter_kernel_user
16547 +2:
16548 +#else
16549 + pax_enter_kernel
16550 +#endif
16551 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16552 movq %rsp,%rdi
16553 movq $-1,%rsi
16554 @@ -1539,12 +1937,28 @@ ENTRY(nmi)
16555 DISABLE_INTERRUPTS(CLBR_NONE)
16556 testl %ebx,%ebx /* swapgs needed? */
16557 jnz nmi_restore
16558 - testl $3,CS(%rsp)
16559 + testb $3,CS(%rsp)
16560 jnz nmi_userspace
16561 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16562 + pax_exit_kernel
16563 + SWAPGS_UNSAFE_STACK
16564 + RESTORE_ALL 8
16565 + pax_force_retaddr_bts
16566 + jmp irq_return
16567 +#endif
16568 nmi_swapgs:
16569 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16570 + pax_exit_kernel_user
16571 +#else
16572 + pax_exit_kernel
16573 +#endif
16574 SWAPGS_UNSAFE_STACK
16575 + RESTORE_ALL 8
16576 + jmp irq_return
16577 nmi_restore:
16578 + pax_exit_kernel
16579 RESTORE_ALL 8
16580 + pax_force_retaddr_bts
16581 jmp irq_return
16582 nmi_userspace:
16583 GET_THREAD_INFO(%rcx)
16584 @@ -1573,14 +1987,14 @@ nmi_schedule:
16585 jmp paranoid_exit
16586 CFI_ENDPROC
16587 #endif
16588 -END(nmi)
16589 +ENDPROC(nmi)
16590
16591 ENTRY(ignore_sysret)
16592 CFI_STARTPROC
16593 mov $-ENOSYS,%eax
16594 sysret
16595 CFI_ENDPROC
16596 -END(ignore_sysret)
16597 +ENDPROC(ignore_sysret)
16598
16599 /*
16600 * End of kprobes section
16601 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16602 index 9dbb527..7b3615a 100644
16603 --- a/arch/x86/kernel/ftrace.c
16604 +++ b/arch/x86/kernel/ftrace.c
16605 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16606 static void *mod_code_newcode; /* holds the text to write to the IP */
16607
16608 static unsigned nmi_wait_count;
16609 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16610 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16611
16612 int ftrace_arch_read_dyn_info(char *buf, int size)
16613 {
16614 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16615
16616 r = snprintf(buf, size, "%u %u",
16617 nmi_wait_count,
16618 - atomic_read(&nmi_update_count));
16619 + atomic_read_unchecked(&nmi_update_count));
16620 return r;
16621 }
16622
16623 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16624 {
16625 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16626 smp_rmb();
16627 + pax_open_kernel();
16628 ftrace_mod_code();
16629 - atomic_inc(&nmi_update_count);
16630 + pax_close_kernel();
16631 + atomic_inc_unchecked(&nmi_update_count);
16632 }
16633 /* Must have previous changes seen before executions */
16634 smp_mb();
16635 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16636
16637
16638
16639 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16640 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16641
16642 static unsigned char *ftrace_nop_replace(void)
16643 {
16644 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16645 {
16646 unsigned char replaced[MCOUNT_INSN_SIZE];
16647
16648 + ip = ktla_ktva(ip);
16649 +
16650 /*
16651 * Note: Due to modules and __init, code can
16652 * disappear and change, we need to protect against faulting
16653 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16654 unsigned char old[MCOUNT_INSN_SIZE], *new;
16655 int ret;
16656
16657 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16658 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16659 new = ftrace_call_replace(ip, (unsigned long)func);
16660 ret = ftrace_modify_code(ip, old, new);
16661
16662 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16663 switch (faulted) {
16664 case 0:
16665 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16666 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16667 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16668 break;
16669 case 1:
16670 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16671 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16672 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16673 break;
16674 case 2:
16675 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16676 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16677 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16678 break;
16679 }
16680
16681 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16682 {
16683 unsigned char code[MCOUNT_INSN_SIZE];
16684
16685 + ip = ktla_ktva(ip);
16686 +
16687 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16688 return -EFAULT;
16689
16690 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16691 index 4f8e250..df24706 100644
16692 --- a/arch/x86/kernel/head32.c
16693 +++ b/arch/x86/kernel/head32.c
16694 @@ -16,6 +16,7 @@
16695 #include <asm/apic.h>
16696 #include <asm/io_apic.h>
16697 #include <asm/bios_ebda.h>
16698 +#include <asm/boot.h>
16699
16700 static void __init i386_default_early_setup(void)
16701 {
16702 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16703 {
16704 reserve_trampoline_memory();
16705
16706 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16707 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16708
16709 #ifdef CONFIG_BLK_DEV_INITRD
16710 /* Reserve INITRD */
16711 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16712 index 34c3308..6fc4e76 100644
16713 --- a/arch/x86/kernel/head_32.S
16714 +++ b/arch/x86/kernel/head_32.S
16715 @@ -19,10 +19,17 @@
16716 #include <asm/setup.h>
16717 #include <asm/processor-flags.h>
16718 #include <asm/percpu.h>
16719 +#include <asm/msr-index.h>
16720
16721 /* Physical address */
16722 #define pa(X) ((X) - __PAGE_OFFSET)
16723
16724 +#ifdef CONFIG_PAX_KERNEXEC
16725 +#define ta(X) (X)
16726 +#else
16727 +#define ta(X) ((X) - __PAGE_OFFSET)
16728 +#endif
16729 +
16730 /*
16731 * References to members of the new_cpu_data structure.
16732 */
16733 @@ -52,11 +59,7 @@
16734 * and small than max_low_pfn, otherwise will waste some page table entries
16735 */
16736
16737 -#if PTRS_PER_PMD > 1
16738 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16739 -#else
16740 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16741 -#endif
16742 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16743
16744 /* Enough space to fit pagetables for the low memory linear map */
16745 MAPPING_BEYOND_END = \
16746 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
16747 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16748
16749 /*
16750 + * Real beginning of normal "text" segment
16751 + */
16752 +ENTRY(stext)
16753 +ENTRY(_stext)
16754 +
16755 +/*
16756 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16757 * %esi points to the real-mode code as a 32-bit pointer.
16758 * CS and DS must be 4 GB flat segments, but we don't depend on
16759 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16760 * can.
16761 */
16762 __HEAD
16763 +
16764 +#ifdef CONFIG_PAX_KERNEXEC
16765 + jmp startup_32
16766 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16767 +.fill PAGE_SIZE-5,1,0xcc
16768 +#endif
16769 +
16770 ENTRY(startup_32)
16771 + movl pa(stack_start),%ecx
16772 +
16773 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
16774 us to not reload segments */
16775 testb $(1<<6), BP_loadflags(%esi)
16776 @@ -95,7 +113,60 @@ ENTRY(startup_32)
16777 movl %eax,%es
16778 movl %eax,%fs
16779 movl %eax,%gs
16780 + movl %eax,%ss
16781 2:
16782 + leal -__PAGE_OFFSET(%ecx),%esp
16783 +
16784 +#ifdef CONFIG_SMP
16785 + movl $pa(cpu_gdt_table),%edi
16786 + movl $__per_cpu_load,%eax
16787 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16788 + rorl $16,%eax
16789 + movb %al,__KERNEL_PERCPU + 4(%edi)
16790 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16791 + movl $__per_cpu_end - 1,%eax
16792 + subl $__per_cpu_start,%eax
16793 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16794 +#endif
16795 +
16796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16797 + movl $NR_CPUS,%ecx
16798 + movl $pa(cpu_gdt_table),%edi
16799 +1:
16800 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16801 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16802 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16803 + addl $PAGE_SIZE_asm,%edi
16804 + loop 1b
16805 +#endif
16806 +
16807 +#ifdef CONFIG_PAX_KERNEXEC
16808 + movl $pa(boot_gdt),%edi
16809 + movl $__LOAD_PHYSICAL_ADDR,%eax
16810 + movw %ax,__BOOT_CS + 2(%edi)
16811 + rorl $16,%eax
16812 + movb %al,__BOOT_CS + 4(%edi)
16813 + movb %ah,__BOOT_CS + 7(%edi)
16814 + rorl $16,%eax
16815 +
16816 + ljmp $(__BOOT_CS),$1f
16817 +1:
16818 +
16819 + movl $NR_CPUS,%ecx
16820 + movl $pa(cpu_gdt_table),%edi
16821 + addl $__PAGE_OFFSET,%eax
16822 +1:
16823 + movw %ax,__KERNEL_CS + 2(%edi)
16824 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16825 + rorl $16,%eax
16826 + movb %al,__KERNEL_CS + 4(%edi)
16827 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16828 + movb %ah,__KERNEL_CS + 7(%edi)
16829 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16830 + rorl $16,%eax
16831 + addl $PAGE_SIZE_asm,%edi
16832 + loop 1b
16833 +#endif
16834
16835 /*
16836 * Clear BSS first so that there are no surprises...
16837 @@ -140,9 +211,7 @@ ENTRY(startup_32)
16838 cmpl $num_subarch_entries, %eax
16839 jae bad_subarch
16840
16841 - movl pa(subarch_entries)(,%eax,4), %eax
16842 - subl $__PAGE_OFFSET, %eax
16843 - jmp *%eax
16844 + jmp *pa(subarch_entries)(,%eax,4)
16845
16846 bad_subarch:
16847 WEAK(lguest_entry)
16848 @@ -154,10 +223,10 @@ WEAK(xen_entry)
16849 __INITDATA
16850
16851 subarch_entries:
16852 - .long default_entry /* normal x86/PC */
16853 - .long lguest_entry /* lguest hypervisor */
16854 - .long xen_entry /* Xen hypervisor */
16855 - .long default_entry /* Moorestown MID */
16856 + .long ta(default_entry) /* normal x86/PC */
16857 + .long ta(lguest_entry) /* lguest hypervisor */
16858 + .long ta(xen_entry) /* Xen hypervisor */
16859 + .long ta(default_entry) /* Moorestown MID */
16860 num_subarch_entries = (. - subarch_entries) / 4
16861 .previous
16862 #endif /* CONFIG_PARAVIRT */
16863 @@ -218,8 +287,11 @@ default_entry:
16864 movl %eax, pa(max_pfn_mapped)
16865
16866 /* Do early initialization of the fixmap area */
16867 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16868 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16869 +#ifdef CONFIG_COMPAT_VDSO
16870 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16871 +#else
16872 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16873 +#endif
16874 #else /* Not PAE */
16875
16876 page_pde_offset = (__PAGE_OFFSET >> 20);
16877 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16878 movl %eax, pa(max_pfn_mapped)
16879
16880 /* Do early initialization of the fixmap area */
16881 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16882 - movl %eax,pa(swapper_pg_dir+0xffc)
16883 +#ifdef CONFIG_COMPAT_VDSO
16884 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
16885 +#else
16886 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
16887 +#endif
16888 #endif
16889 jmp 3f
16890 /*
16891 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
16892 movl %eax,%es
16893 movl %eax,%fs
16894 movl %eax,%gs
16895 + movl pa(stack_start),%ecx
16896 + movl %eax,%ss
16897 + leal -__PAGE_OFFSET(%ecx),%esp
16898 #endif /* CONFIG_SMP */
16899 3:
16900
16901 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
16902 orl %edx,%eax
16903 movl %eax,%cr4
16904
16905 +#ifdef CONFIG_X86_PAE
16906 btl $5, %eax # check if PAE is enabled
16907 jnc 6f
16908
16909 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
16910 cpuid
16911 cmpl $0x80000000, %eax
16912 jbe 6f
16913 +
16914 + /* Clear bogus XD_DISABLE bits */
16915 + call verify_cpu
16916 +
16917 mov $0x80000001, %eax
16918 cpuid
16919 /* Execute Disable bit supported? */
16920 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
16921 jnc 6f
16922
16923 /* Setup EFER (Extended Feature Enable Register) */
16924 - movl $0xc0000080, %ecx
16925 + movl $MSR_EFER, %ecx
16926 rdmsr
16927
16928 btsl $11, %eax
16929 /* Make changes effective */
16930 wrmsr
16931
16932 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16933 + movl $1,pa(nx_enabled)
16934 +#endif
16935 +
16936 6:
16937
16938 /*
16939 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
16940 movl %eax,%cr0 /* ..and set paging (PG) bit */
16941 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
16942 1:
16943 - /* Set up the stack pointer */
16944 - lss stack_start,%esp
16945 + /* Shift the stack pointer to a virtual address */
16946 + addl $__PAGE_OFFSET, %esp
16947
16948 /*
16949 * Initialize eflags. Some BIOS's leave bits like NT set. This would
16950 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
16951
16952 #ifdef CONFIG_SMP
16953 cmpb $0, ready
16954 - jz 1f /* Initial CPU cleans BSS */
16955 - jmp checkCPUtype
16956 -1:
16957 + jnz checkCPUtype
16958 #endif /* CONFIG_SMP */
16959
16960 /*
16961 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
16962 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16963 movl %eax,%ss # after changing gdt.
16964
16965 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16966 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16967 movl %eax,%ds
16968 movl %eax,%es
16969
16970 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
16971 */
16972 cmpb $0,ready
16973 jne 1f
16974 - movl $per_cpu__gdt_page,%eax
16975 + movl $cpu_gdt_table,%eax
16976 movl $per_cpu__stack_canary,%ecx
16977 +#ifdef CONFIG_SMP
16978 + addl $__per_cpu_load,%ecx
16979 +#endif
16980 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16981 shrl $16, %ecx
16982 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16983 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16984 1:
16985 -#endif
16986 movl $(__KERNEL_STACK_CANARY),%eax
16987 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16988 + movl $(__USER_DS),%eax
16989 +#else
16990 + xorl %eax,%eax
16991 +#endif
16992 movl %eax,%gs
16993
16994 xorl %eax,%eax # Clear LDT
16995 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
16996
16997 cld # gcc2 wants the direction flag cleared at all times
16998 pushl $0 # fake return address for unwinder
16999 -#ifdef CONFIG_SMP
17000 - movb ready, %cl
17001 movb $1, ready
17002 - cmpb $0,%cl # the first CPU calls start_kernel
17003 - je 1f
17004 - movl (stack_start), %esp
17005 -1:
17006 -#endif /* CONFIG_SMP */
17007 jmp *(initial_code)
17008
17009 /*
17010 @@ -546,22 +631,22 @@ early_page_fault:
17011 jmp early_fault
17012
17013 early_fault:
17014 - cld
17015 #ifdef CONFIG_PRINTK
17016 + cmpl $1,%ss:early_recursion_flag
17017 + je hlt_loop
17018 + incl %ss:early_recursion_flag
17019 + cld
17020 pusha
17021 movl $(__KERNEL_DS),%eax
17022 movl %eax,%ds
17023 movl %eax,%es
17024 - cmpl $2,early_recursion_flag
17025 - je hlt_loop
17026 - incl early_recursion_flag
17027 movl %cr2,%eax
17028 pushl %eax
17029 pushl %edx /* trapno */
17030 pushl $fault_msg
17031 call printk
17032 +; call dump_stack
17033 #endif
17034 - call dump_stack
17035 hlt_loop:
17036 hlt
17037 jmp hlt_loop
17038 @@ -569,8 +654,11 @@ hlt_loop:
17039 /* This is the default interrupt "handler" :-) */
17040 ALIGN
17041 ignore_int:
17042 - cld
17043 #ifdef CONFIG_PRINTK
17044 + cmpl $2,%ss:early_recursion_flag
17045 + je hlt_loop
17046 + incl %ss:early_recursion_flag
17047 + cld
17048 pushl %eax
17049 pushl %ecx
17050 pushl %edx
17051 @@ -579,9 +667,6 @@ ignore_int:
17052 movl $(__KERNEL_DS),%eax
17053 movl %eax,%ds
17054 movl %eax,%es
17055 - cmpl $2,early_recursion_flag
17056 - je hlt_loop
17057 - incl early_recursion_flag
17058 pushl 16(%esp)
17059 pushl 24(%esp)
17060 pushl 32(%esp)
17061 @@ -600,6 +685,8 @@ ignore_int:
17062 #endif
17063 iret
17064
17065 +#include "verify_cpu.S"
17066 +
17067 __REFDATA
17068 .align 4
17069 ENTRY(initial_code)
17070 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17071 /*
17072 * BSS section
17073 */
17074 -__PAGE_ALIGNED_BSS
17075 - .align PAGE_SIZE_asm
17076 #ifdef CONFIG_X86_PAE
17077 +.section .swapper_pg_pmd,"a",@progbits
17078 swapper_pg_pmd:
17079 .fill 1024*KPMDS,4,0
17080 #else
17081 +.section .swapper_pg_dir,"a",@progbits
17082 ENTRY(swapper_pg_dir)
17083 .fill 1024,4,0
17084 #endif
17085 +.section .swapper_pg_fixmap,"a",@progbits
17086 swapper_pg_fixmap:
17087 .fill 1024,4,0
17088 #ifdef CONFIG_X86_TRAMPOLINE
17089 +.section .trampoline_pg_dir,"a",@progbits
17090 ENTRY(trampoline_pg_dir)
17091 +#ifdef CONFIG_X86_PAE
17092 + .fill 4,8,0
17093 +#else
17094 .fill 1024,4,0
17095 #endif
17096 +#endif
17097 +
17098 +.section .empty_zero_page,"a",@progbits
17099 ENTRY(empty_zero_page)
17100 .fill 4096,1,0
17101
17102 /*
17103 + * The IDT has to be page-aligned to simplify the Pentium
17104 + * F0 0F bug workaround.. We have a special link segment
17105 + * for this.
17106 + */
17107 +.section .idt,"a",@progbits
17108 +ENTRY(idt_table)
17109 + .fill 256,8,0
17110 +
17111 +/*
17112 * This starts the data section.
17113 */
17114 #ifdef CONFIG_X86_PAE
17115 -__PAGE_ALIGNED_DATA
17116 - /* Page-aligned for the benefit of paravirt? */
17117 - .align PAGE_SIZE_asm
17118 +.section .swapper_pg_dir,"a",@progbits
17119 +
17120 ENTRY(swapper_pg_dir)
17121 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17122 # if KPMDS == 3
17123 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17124 # error "Kernel PMDs should be 1, 2 or 3"
17125 # endif
17126 .align PAGE_SIZE_asm /* needs to be page-sized too */
17127 +
17128 +#ifdef CONFIG_PAX_PER_CPU_PGD
17129 +ENTRY(cpu_pgd)
17130 + .rept NR_CPUS
17131 + .fill 4,8,0
17132 + .endr
17133 +#endif
17134 +
17135 #endif
17136
17137 .data
17138 +.balign 4
17139 ENTRY(stack_start)
17140 - .long init_thread_union+THREAD_SIZE
17141 - .long __BOOT_DS
17142 + .long init_thread_union+THREAD_SIZE-8
17143
17144 ready: .byte 0
17145
17146 +.section .rodata,"a",@progbits
17147 early_recursion_flag:
17148 .long 0
17149
17150 @@ -697,7 +809,7 @@ fault_msg:
17151 .word 0 # 32 bit align gdt_desc.address
17152 boot_gdt_descr:
17153 .word __BOOT_DS+7
17154 - .long boot_gdt - __PAGE_OFFSET
17155 + .long pa(boot_gdt)
17156
17157 .word 0 # 32-bit align idt_desc.address
17158 idt_descr:
17159 @@ -708,7 +820,7 @@ idt_descr:
17160 .word 0 # 32 bit align gdt_desc.address
17161 ENTRY(early_gdt_descr)
17162 .word GDT_ENTRIES*8-1
17163 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17164 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17165
17166 /*
17167 * The boot_gdt must mirror the equivalent in setup.S and is
17168 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17169 .align L1_CACHE_BYTES
17170 ENTRY(boot_gdt)
17171 .fill GDT_ENTRY_BOOT_CS,8,0
17172 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17173 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17174 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17175 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17176 +
17177 + .align PAGE_SIZE_asm
17178 +ENTRY(cpu_gdt_table)
17179 + .rept NR_CPUS
17180 + .quad 0x0000000000000000 /* NULL descriptor */
17181 + .quad 0x0000000000000000 /* 0x0b reserved */
17182 + .quad 0x0000000000000000 /* 0x13 reserved */
17183 + .quad 0x0000000000000000 /* 0x1b reserved */
17184 +
17185 +#ifdef CONFIG_PAX_KERNEXEC
17186 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17187 +#else
17188 + .quad 0x0000000000000000 /* 0x20 unused */
17189 +#endif
17190 +
17191 + .quad 0x0000000000000000 /* 0x28 unused */
17192 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17193 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17194 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17195 + .quad 0x0000000000000000 /* 0x4b reserved */
17196 + .quad 0x0000000000000000 /* 0x53 reserved */
17197 + .quad 0x0000000000000000 /* 0x5b reserved */
17198 +
17199 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17200 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17201 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17202 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17203 +
17204 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17205 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17206 +
17207 + /*
17208 + * Segments used for calling PnP BIOS have byte granularity.
17209 + * The code segments and data segments have fixed 64k limits,
17210 + * the transfer segment sizes are set at run time.
17211 + */
17212 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17213 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17214 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17215 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17216 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17217 +
17218 + /*
17219 + * The APM segments have byte granularity and their bases
17220 + * are set at run time. All have 64k limits.
17221 + */
17222 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17223 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17224 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17225 +
17226 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17227 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17228 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17229 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17230 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17231 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17232 +
17233 + /* Be sure this is zeroed to avoid false validations in Xen */
17234 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17235 + .endr
17236 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17237 index 780cd92..758b2a6 100644
17238 --- a/arch/x86/kernel/head_64.S
17239 +++ b/arch/x86/kernel/head_64.S
17240 @@ -19,6 +19,8 @@
17241 #include <asm/cache.h>
17242 #include <asm/processor-flags.h>
17243 #include <asm/percpu.h>
17244 +#include <asm/cpufeature.h>
17245 +#include <asm/alternative-asm.h>
17246
17247 #ifdef CONFIG_PARAVIRT
17248 #include <asm/asm-offsets.h>
17249 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17250 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17251 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17252 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17253 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17254 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17255 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17256 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17257 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17258 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17259
17260 .text
17261 __HEAD
17262 @@ -85,35 +93,23 @@ startup_64:
17263 */
17264 addq %rbp, init_level4_pgt + 0(%rip)
17265 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17266 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17267 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17268 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17269 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17270
17271 addq %rbp, level3_ident_pgt + 0(%rip)
17272 +#ifndef CONFIG_XEN
17273 + addq %rbp, level3_ident_pgt + 8(%rip)
17274 +#endif
17275
17276 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17277 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17278 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17279 +
17280 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17281 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17282
17283 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17284 -
17285 - /* Add an Identity mapping if I am above 1G */
17286 - leaq _text(%rip), %rdi
17287 - andq $PMD_PAGE_MASK, %rdi
17288 -
17289 - movq %rdi, %rax
17290 - shrq $PUD_SHIFT, %rax
17291 - andq $(PTRS_PER_PUD - 1), %rax
17292 - jz ident_complete
17293 -
17294 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17295 - leaq level3_ident_pgt(%rip), %rbx
17296 - movq %rdx, 0(%rbx, %rax, 8)
17297 -
17298 - movq %rdi, %rax
17299 - shrq $PMD_SHIFT, %rax
17300 - andq $(PTRS_PER_PMD - 1), %rax
17301 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17302 - leaq level2_spare_pgt(%rip), %rbx
17303 - movq %rdx, 0(%rbx, %rax, 8)
17304 -ident_complete:
17305 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17306
17307 /*
17308 * Fixup the kernel text+data virtual addresses. Note that
17309 @@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17310 * after the boot processor executes this code.
17311 */
17312
17313 - /* Enable PAE mode and PGE */
17314 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17315 + /* Enable PAE mode and PSE/PGE */
17316 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17317 movq %rax, %cr4
17318
17319 /* Setup early boot stage 4 level pagetables. */
17320 @@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17321 movl $MSR_EFER, %ecx
17322 rdmsr
17323 btsl $_EFER_SCE, %eax /* Enable System Call */
17324 - btl $20,%edi /* No Execute supported? */
17325 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17326 jnc 1f
17327 btsl $_EFER_NX, %eax
17328 + leaq init_level4_pgt(%rip), %rdi
17329 +#ifndef CONFIG_EFI
17330 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17331 +#endif
17332 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17333 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17334 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17335 1: wrmsr /* Make changes effective */
17336
17337 /* Setup cr0 */
17338 @@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17339 * jump. In addition we need to ensure %cs is set so we make this
17340 * a far return.
17341 */
17342 + pax_set_fptr_mask
17343 movq initial_code(%rip),%rax
17344 pushq $0 # fake return address to stop unwinder
17345 pushq $__KERNEL_CS # set correct cs
17346 @@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17347 .quad x86_64_start_kernel
17348 ENTRY(initial_gs)
17349 .quad INIT_PER_CPU_VAR(irq_stack_union)
17350 - __FINITDATA
17351
17352 ENTRY(stack_start)
17353 .quad init_thread_union+THREAD_SIZE-8
17354 .word 0
17355 + __FINITDATA
17356
17357 bad_address:
17358 jmp bad_address
17359
17360 - .section ".init.text","ax"
17361 + __INIT
17362 #ifdef CONFIG_EARLY_PRINTK
17363 .globl early_idt_handlers
17364 early_idt_handlers:
17365 @@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17366 #endif /* EARLY_PRINTK */
17367 1: hlt
17368 jmp 1b
17369 + .previous
17370
17371 #ifdef CONFIG_EARLY_PRINTK
17372 + __INITDATA
17373 early_recursion_flag:
17374 .long 0
17375 + .previous
17376
17377 + .section .rodata,"a",@progbits
17378 early_idt_msg:
17379 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17380 early_idt_ripmsg:
17381 .asciz "RIP %s\n"
17382 + .previous
17383 #endif /* CONFIG_EARLY_PRINTK */
17384 - .previous
17385
17386 + .section .rodata,"a",@progbits
17387 #define NEXT_PAGE(name) \
17388 .balign PAGE_SIZE; \
17389 ENTRY(name)
17390 @@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17391 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17392 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17393 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17394 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17395 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17396 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17397 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17398 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17399 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17400 .org init_level4_pgt + L4_START_KERNEL*8, 0
17401 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17402 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17403
17404 +#ifdef CONFIG_PAX_PER_CPU_PGD
17405 +NEXT_PAGE(cpu_pgd)
17406 + .rept NR_CPUS
17407 + .fill 512,8,0
17408 + .endr
17409 +#endif
17410 +
17411 NEXT_PAGE(level3_ident_pgt)
17412 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17413 +#ifdef CONFIG_XEN
17414 .fill 511,8,0
17415 +#else
17416 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17417 + .fill 510,8,0
17418 +#endif
17419 +
17420 +NEXT_PAGE(level3_vmalloc_start_pgt)
17421 + .fill 512,8,0
17422 +
17423 +NEXT_PAGE(level3_vmalloc_end_pgt)
17424 + .fill 512,8,0
17425 +
17426 +NEXT_PAGE(level3_vmemmap_pgt)
17427 + .fill L3_VMEMMAP_START,8,0
17428 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17429
17430 NEXT_PAGE(level3_kernel_pgt)
17431 .fill L3_START_KERNEL,8,0
17432 @@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17433 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17434 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17435
17436 +NEXT_PAGE(level2_vmemmap_pgt)
17437 + .fill 512,8,0
17438 +
17439 NEXT_PAGE(level2_fixmap_pgt)
17440 - .fill 506,8,0
17441 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17442 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17443 - .fill 5,8,0
17444 + .fill 507,8,0
17445 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17446 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17447 + .fill 4,8,0
17448
17449 -NEXT_PAGE(level1_fixmap_pgt)
17450 +NEXT_PAGE(level1_vsyscall_pgt)
17451 .fill 512,8,0
17452
17453 -NEXT_PAGE(level2_ident_pgt)
17454 - /* Since I easily can, map the first 1G.
17455 + /* Since I easily can, map the first 2G.
17456 * Don't set NX because code runs from these pages.
17457 */
17458 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17459 +NEXT_PAGE(level2_ident_pgt)
17460 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17461
17462 NEXT_PAGE(level2_kernel_pgt)
17463 /*
17464 @@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17465 * If you want to increase this then increase MODULES_VADDR
17466 * too.)
17467 */
17468 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17469 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17470 -
17471 -NEXT_PAGE(level2_spare_pgt)
17472 - .fill 512, 8, 0
17473 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17474
17475 #undef PMDS
17476 #undef NEXT_PAGE
17477
17478 - .data
17479 + .align PAGE_SIZE
17480 +ENTRY(cpu_gdt_table)
17481 + .rept NR_CPUS
17482 + .quad 0x0000000000000000 /* NULL descriptor */
17483 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17484 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17485 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17486 + .quad 0x00cffb000000ffff /* __USER32_CS */
17487 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17488 + .quad 0x00affb000000ffff /* __USER_CS */
17489 +
17490 +#ifdef CONFIG_PAX_KERNEXEC
17491 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17492 +#else
17493 + .quad 0x0 /* unused */
17494 +#endif
17495 +
17496 + .quad 0,0 /* TSS */
17497 + .quad 0,0 /* LDT */
17498 + .quad 0,0,0 /* three TLS descriptors */
17499 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17500 + /* asm/segment.h:GDT_ENTRIES must match this */
17501 +
17502 + /* zero the remaining page */
17503 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17504 + .endr
17505 +
17506 .align 16
17507 .globl early_gdt_descr
17508 early_gdt_descr:
17509 .word GDT_ENTRIES*8-1
17510 early_gdt_descr_base:
17511 - .quad INIT_PER_CPU_VAR(gdt_page)
17512 + .quad cpu_gdt_table
17513
17514 ENTRY(phys_base)
17515 /* This must match the first entry in level2_kernel_pgt */
17516 .quad 0x0000000000000000
17517
17518 #include "../../x86/xen/xen-head.S"
17519 -
17520 - .section .bss, "aw", @nobits
17521 +
17522 + .section .rodata,"a",@progbits
17523 .align L1_CACHE_BYTES
17524 ENTRY(idt_table)
17525 - .skip IDT_ENTRIES * 16
17526 + .fill 512,8,0
17527
17528 __PAGE_ALIGNED_BSS
17529 .align PAGE_SIZE
17530 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17531 index 9c3bd4a..e1d9b35 100644
17532 --- a/arch/x86/kernel/i386_ksyms_32.c
17533 +++ b/arch/x86/kernel/i386_ksyms_32.c
17534 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17535 EXPORT_SYMBOL(cmpxchg8b_emu);
17536 #endif
17537
17538 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17539 +
17540 /* Networking helper routines. */
17541 EXPORT_SYMBOL(csum_partial_copy_generic);
17542 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17543 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17544
17545 EXPORT_SYMBOL(__get_user_1);
17546 EXPORT_SYMBOL(__get_user_2);
17547 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17548
17549 EXPORT_SYMBOL(csum_partial);
17550 EXPORT_SYMBOL(empty_zero_page);
17551 +
17552 +#ifdef CONFIG_PAX_KERNEXEC
17553 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17554 +#endif
17555 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17556 index df89102..a244320 100644
17557 --- a/arch/x86/kernel/i8259.c
17558 +++ b/arch/x86/kernel/i8259.c
17559 @@ -208,7 +208,7 @@ spurious_8259A_irq:
17560 "spurious 8259A interrupt: IRQ%d.\n", irq);
17561 spurious_irq_mask |= irqmask;
17562 }
17563 - atomic_inc(&irq_err_count);
17564 + atomic_inc_unchecked(&irq_err_count);
17565 /*
17566 * Theoretically we do not have to handle this IRQ,
17567 * but in Linux this does not cause problems and is
17568 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17569 index 3a54dcb..1c22348 100644
17570 --- a/arch/x86/kernel/init_task.c
17571 +++ b/arch/x86/kernel/init_task.c
17572 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17573 * way process stacks are handled. This is done by having a special
17574 * "init_task" linker map entry..
17575 */
17576 -union thread_union init_thread_union __init_task_data =
17577 - { INIT_THREAD_INFO(init_task) };
17578 +union thread_union init_thread_union __init_task_data;
17579
17580 /*
17581 * Initial task structure.
17582 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17583 * section. Since TSS's are completely CPU-local, we want them
17584 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17585 */
17586 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17587 -
17588 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17589 +EXPORT_SYMBOL(init_tss);
17590 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17591 index 99c4d30..74c84e9 100644
17592 --- a/arch/x86/kernel/ioport.c
17593 +++ b/arch/x86/kernel/ioport.c
17594 @@ -6,6 +6,7 @@
17595 #include <linux/sched.h>
17596 #include <linux/kernel.h>
17597 #include <linux/capability.h>
17598 +#include <linux/security.h>
17599 #include <linux/errno.h>
17600 #include <linux/types.h>
17601 #include <linux/ioport.h>
17602 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17603
17604 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17605 return -EINVAL;
17606 +#ifdef CONFIG_GRKERNSEC_IO
17607 + if (turn_on && grsec_disable_privio) {
17608 + gr_handle_ioperm();
17609 + return -EPERM;
17610 + }
17611 +#endif
17612 if (turn_on && !capable(CAP_SYS_RAWIO))
17613 return -EPERM;
17614
17615 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17616 * because the ->io_bitmap_max value must match the bitmap
17617 * contents:
17618 */
17619 - tss = &per_cpu(init_tss, get_cpu());
17620 + tss = init_tss + get_cpu();
17621
17622 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17623
17624 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17625 return -EINVAL;
17626 /* Trying to gain more privileges? */
17627 if (level > old) {
17628 +#ifdef CONFIG_GRKERNSEC_IO
17629 + if (grsec_disable_privio) {
17630 + gr_handle_iopl();
17631 + return -EPERM;
17632 + }
17633 +#endif
17634 if (!capable(CAP_SYS_RAWIO))
17635 return -EPERM;
17636 }
17637 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17638 index 04bbd52..83a07d9 100644
17639 --- a/arch/x86/kernel/irq.c
17640 +++ b/arch/x86/kernel/irq.c
17641 @@ -15,7 +15,7 @@
17642 #include <asm/mce.h>
17643 #include <asm/hw_irq.h>
17644
17645 -atomic_t irq_err_count;
17646 +atomic_unchecked_t irq_err_count;
17647
17648 /* Function pointer for generic interrupt vector handling */
17649 void (*generic_interrupt_extension)(void) = NULL;
17650 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17651 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17652 seq_printf(p, " Machine check polls\n");
17653 #endif
17654 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17655 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17656 #if defined(CONFIG_X86_IO_APIC)
17657 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17658 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17659 #endif
17660 return 0;
17661 }
17662 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17663
17664 u64 arch_irq_stat(void)
17665 {
17666 - u64 sum = atomic_read(&irq_err_count);
17667 + u64 sum = atomic_read_unchecked(&irq_err_count);
17668
17669 #ifdef CONFIG_X86_IO_APIC
17670 - sum += atomic_read(&irq_mis_count);
17671 + sum += atomic_read_unchecked(&irq_mis_count);
17672 #endif
17673 return sum;
17674 }
17675 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17676 index 7d35d0f..03f1d52 100644
17677 --- a/arch/x86/kernel/irq_32.c
17678 +++ b/arch/x86/kernel/irq_32.c
17679 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17680 __asm__ __volatile__("andl %%esp,%0" :
17681 "=r" (sp) : "0" (THREAD_SIZE - 1));
17682
17683 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17684 + return sp < STACK_WARN;
17685 }
17686
17687 static void print_stack_overflow(void)
17688 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17689 * per-CPU IRQ handling contexts (thread information and stack)
17690 */
17691 union irq_ctx {
17692 - struct thread_info tinfo;
17693 - u32 stack[THREAD_SIZE/sizeof(u32)];
17694 -} __attribute__((aligned(PAGE_SIZE)));
17695 + unsigned long previous_esp;
17696 + u32 stack[THREAD_SIZE/sizeof(u32)];
17697 +} __attribute__((aligned(THREAD_SIZE)));
17698
17699 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17700 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17701 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17702 static inline int
17703 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17704 {
17705 - union irq_ctx *curctx, *irqctx;
17706 + union irq_ctx *irqctx;
17707 u32 *isp, arg1, arg2;
17708
17709 - curctx = (union irq_ctx *) current_thread_info();
17710 irqctx = __get_cpu_var(hardirq_ctx);
17711
17712 /*
17713 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17714 * handler) we can't do that and just have to keep using the
17715 * current stack (which is the irq stack already after all)
17716 */
17717 - if (unlikely(curctx == irqctx))
17718 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17719 return 0;
17720
17721 /* build the stack frame on the IRQ stack */
17722 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17723 - irqctx->tinfo.task = curctx->tinfo.task;
17724 - irqctx->tinfo.previous_esp = current_stack_pointer;
17725 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17726 + irqctx->previous_esp = current_stack_pointer;
17727
17728 - /*
17729 - * Copy the softirq bits in preempt_count so that the
17730 - * softirq checks work in the hardirq context.
17731 - */
17732 - irqctx->tinfo.preempt_count =
17733 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17734 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17735 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17736 + __set_fs(MAKE_MM_SEG(0));
17737 +#endif
17738
17739 if (unlikely(overflow))
17740 call_on_stack(print_stack_overflow, isp);
17741 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17742 : "0" (irq), "1" (desc), "2" (isp),
17743 "D" (desc->handle_irq)
17744 : "memory", "cc", "ecx");
17745 +
17746 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17747 + __set_fs(current_thread_info()->addr_limit);
17748 +#endif
17749 +
17750 return 1;
17751 }
17752
17753 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17754 */
17755 void __cpuinit irq_ctx_init(int cpu)
17756 {
17757 - union irq_ctx *irqctx;
17758 -
17759 if (per_cpu(hardirq_ctx, cpu))
17760 return;
17761
17762 - irqctx = &per_cpu(hardirq_stack, cpu);
17763 - irqctx->tinfo.task = NULL;
17764 - irqctx->tinfo.exec_domain = NULL;
17765 - irqctx->tinfo.cpu = cpu;
17766 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17767 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17768 -
17769 - per_cpu(hardirq_ctx, cpu) = irqctx;
17770 -
17771 - irqctx = &per_cpu(softirq_stack, cpu);
17772 - irqctx->tinfo.task = NULL;
17773 - irqctx->tinfo.exec_domain = NULL;
17774 - irqctx->tinfo.cpu = cpu;
17775 - irqctx->tinfo.preempt_count = 0;
17776 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17777 -
17778 - per_cpu(softirq_ctx, cpu) = irqctx;
17779 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
17780 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
17781
17782 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17783 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17784 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
17785 asmlinkage void do_softirq(void)
17786 {
17787 unsigned long flags;
17788 - struct thread_info *curctx;
17789 union irq_ctx *irqctx;
17790 u32 *isp;
17791
17792 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
17793 local_irq_save(flags);
17794
17795 if (local_softirq_pending()) {
17796 - curctx = current_thread_info();
17797 irqctx = __get_cpu_var(softirq_ctx);
17798 - irqctx->tinfo.task = curctx->task;
17799 - irqctx->tinfo.previous_esp = current_stack_pointer;
17800 + irqctx->previous_esp = current_stack_pointer;
17801
17802 /* build the stack frame on the softirq stack */
17803 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17804 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17805 +
17806 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17807 + __set_fs(MAKE_MM_SEG(0));
17808 +#endif
17809
17810 call_on_stack(__do_softirq, isp);
17811 +
17812 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17813 + __set_fs(current_thread_info()->addr_limit);
17814 +#endif
17815 +
17816 /*
17817 * Shouldnt happen, we returned above if in_interrupt():
17818 */
17819 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17820 index 8d82a77..0baf312 100644
17821 --- a/arch/x86/kernel/kgdb.c
17822 +++ b/arch/x86/kernel/kgdb.c
17823 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17824
17825 /* clear the trace bit */
17826 linux_regs->flags &= ~X86_EFLAGS_TF;
17827 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17828 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17829
17830 /* set the trace bit if we're stepping */
17831 if (remcomInBuffer[0] == 's') {
17832 linux_regs->flags |= X86_EFLAGS_TF;
17833 kgdb_single_step = 1;
17834 - atomic_set(&kgdb_cpu_doing_single_step,
17835 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17836 raw_smp_processor_id());
17837 }
17838
17839 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17840 break;
17841
17842 case DIE_DEBUG:
17843 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
17844 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
17845 raw_smp_processor_id()) {
17846 if (user_mode(regs))
17847 return single_step_cont(regs, args);
17848 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
17849 return instruction_pointer(regs);
17850 }
17851
17852 -struct kgdb_arch arch_kgdb_ops = {
17853 +const struct kgdb_arch arch_kgdb_ops = {
17854 /* Breakpoint instruction: */
17855 .gdb_bpt_instr = { 0xcc },
17856 .flags = KGDB_HW_BREAKPOINT,
17857 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17858 index 7a67820..8d15b75 100644
17859 --- a/arch/x86/kernel/kprobes.c
17860 +++ b/arch/x86/kernel/kprobes.c
17861 @@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
17862 char op;
17863 s32 raddr;
17864 } __attribute__((packed)) * jop;
17865 - jop = (struct __arch_jmp_op *)from;
17866 +
17867 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
17868 +
17869 + pax_open_kernel();
17870 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
17871 jop->op = RELATIVEJUMP_INSTRUCTION;
17872 + pax_close_kernel();
17873 }
17874
17875 /*
17876 @@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17877 kprobe_opcode_t opcode;
17878 kprobe_opcode_t *orig_opcodes = opcodes;
17879
17880 - if (search_exception_tables((unsigned long)opcodes))
17881 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17882 return 0; /* Page fault may occur on this address. */
17883
17884 retry:
17885 @@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
17886 disp = (u8 *) p->addr + *((s32 *) insn) -
17887 (u8 *) p->ainsn.insn;
17888 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
17889 + pax_open_kernel();
17890 *(s32 *)insn = (s32) disp;
17891 + pax_close_kernel();
17892 }
17893 }
17894 #endif
17895 @@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
17896
17897 static void __kprobes arch_copy_kprobe(struct kprobe *p)
17898 {
17899 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17900 + pax_open_kernel();
17901 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17902 + pax_close_kernel();
17903
17904 fix_riprel(p);
17905
17906 - if (can_boost(p->addr))
17907 + if (can_boost(ktla_ktva(p->addr)))
17908 p->ainsn.boostable = 0;
17909 else
17910 p->ainsn.boostable = -1;
17911
17912 - p->opcode = *p->addr;
17913 + p->opcode = *(ktla_ktva(p->addr));
17914 }
17915
17916 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17917 @@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
17918 if (p->opcode == BREAKPOINT_INSTRUCTION)
17919 regs->ip = (unsigned long)p->addr;
17920 else
17921 - regs->ip = (unsigned long)p->ainsn.insn;
17922 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17923 }
17924
17925 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
17926 @@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17927 if (p->ainsn.boostable == 1 && !p->post_handler) {
17928 /* Boost up -- we can execute copied instructions directly */
17929 reset_current_kprobe();
17930 - regs->ip = (unsigned long)p->ainsn.insn;
17931 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17932 preempt_enable_no_resched();
17933 return;
17934 }
17935 @@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17936 struct kprobe_ctlblk *kcb;
17937
17938 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
17939 - if (*addr != BREAKPOINT_INSTRUCTION) {
17940 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17941 /*
17942 * The breakpoint instruction was removed right
17943 * after we hit it. Another cpu has removed
17944 @@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17945 /* Skip orig_ax, ip, cs */
17946 " addq $24, %rsp\n"
17947 " popfq\n"
17948 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17949 + " btsq $63,(%rsp)\n"
17950 +#endif
17951 #else
17952 " pushf\n"
17953 /*
17954 @@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17955 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17956 {
17957 unsigned long *tos = stack_addr(regs);
17958 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17959 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17960 unsigned long orig_ip = (unsigned long)p->addr;
17961 kprobe_opcode_t *insn = p->ainsn.insn;
17962
17963 @@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17964 struct die_args *args = data;
17965 int ret = NOTIFY_DONE;
17966
17967 - if (args->regs && user_mode_vm(args->regs))
17968 + if (args->regs && user_mode(args->regs))
17969 return ret;
17970
17971 switch (val) {
17972 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
17973 index 63b0ec8..6d92227 100644
17974 --- a/arch/x86/kernel/kvm.c
17975 +++ b/arch/x86/kernel/kvm.c
17976 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
17977 pv_mmu_ops.set_pud = kvm_set_pud;
17978 #if PAGETABLE_LEVELS == 4
17979 pv_mmu_ops.set_pgd = kvm_set_pgd;
17980 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
17981 #endif
17982 #endif
17983 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
17984 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17985 index ec6ef60..ab2c824 100644
17986 --- a/arch/x86/kernel/ldt.c
17987 +++ b/arch/x86/kernel/ldt.c
17988 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17989 if (reload) {
17990 #ifdef CONFIG_SMP
17991 preempt_disable();
17992 - load_LDT(pc);
17993 + load_LDT_nolock(pc);
17994 if (!cpumask_equal(mm_cpumask(current->mm),
17995 cpumask_of(smp_processor_id())))
17996 smp_call_function(flush_ldt, current->mm, 1);
17997 preempt_enable();
17998 #else
17999 - load_LDT(pc);
18000 + load_LDT_nolock(pc);
18001 #endif
18002 }
18003 if (oldsize) {
18004 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18005 return err;
18006
18007 for (i = 0; i < old->size; i++)
18008 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18009 + write_ldt_entry(new->ldt, i, old->ldt + i);
18010 return 0;
18011 }
18012
18013 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18014 retval = copy_ldt(&mm->context, &old_mm->context);
18015 mutex_unlock(&old_mm->context.lock);
18016 }
18017 +
18018 + if (tsk == current) {
18019 + mm->context.vdso = 0;
18020 +
18021 +#ifdef CONFIG_X86_32
18022 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18023 + mm->context.user_cs_base = 0UL;
18024 + mm->context.user_cs_limit = ~0UL;
18025 +
18026 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18027 + cpus_clear(mm->context.cpu_user_cs_mask);
18028 +#endif
18029 +
18030 +#endif
18031 +#endif
18032 +
18033 + }
18034 +
18035 return retval;
18036 }
18037
18038 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18039 }
18040 }
18041
18042 +#ifdef CONFIG_PAX_SEGMEXEC
18043 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18044 + error = -EINVAL;
18045 + goto out_unlock;
18046 + }
18047 +#endif
18048 +
18049 fill_ldt(&ldt, &ldt_info);
18050 if (oldmode)
18051 ldt.avl = 0;
18052 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18053 index c1c429d..f02eaf9 100644
18054 --- a/arch/x86/kernel/machine_kexec_32.c
18055 +++ b/arch/x86/kernel/machine_kexec_32.c
18056 @@ -26,7 +26,7 @@
18057 #include <asm/system.h>
18058 #include <asm/cacheflush.h>
18059
18060 -static void set_idt(void *newidt, __u16 limit)
18061 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18062 {
18063 struct desc_ptr curidt;
18064
18065 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18066 }
18067
18068
18069 -static void set_gdt(void *newgdt, __u16 limit)
18070 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18071 {
18072 struct desc_ptr curgdt;
18073
18074 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18075 }
18076
18077 control_page = page_address(image->control_code_page);
18078 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18079 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18080
18081 relocate_kernel_ptr = control_page;
18082 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18083 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18084 index 1e47679..e73449d 100644
18085 --- a/arch/x86/kernel/microcode_amd.c
18086 +++ b/arch/x86/kernel/microcode_amd.c
18087 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18088 uci->mc = NULL;
18089 }
18090
18091 -static struct microcode_ops microcode_amd_ops = {
18092 +static const struct microcode_ops microcode_amd_ops = {
18093 .request_microcode_user = request_microcode_user,
18094 .request_microcode_fw = request_microcode_fw,
18095 .collect_cpu_info = collect_cpu_info_amd,
18096 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18097 .microcode_fini_cpu = microcode_fini_cpu_amd,
18098 };
18099
18100 -struct microcode_ops * __init init_amd_microcode(void)
18101 +const struct microcode_ops * __init init_amd_microcode(void)
18102 {
18103 return &microcode_amd_ops;
18104 }
18105 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18106 index 378e9a8..b5a6ea9 100644
18107 --- a/arch/x86/kernel/microcode_core.c
18108 +++ b/arch/x86/kernel/microcode_core.c
18109 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18110
18111 #define MICROCODE_VERSION "2.00"
18112
18113 -static struct microcode_ops *microcode_ops;
18114 +static const struct microcode_ops *microcode_ops;
18115
18116 /*
18117 * Synchronization.
18118 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18119 index 0d334dd..14cedaf 100644
18120 --- a/arch/x86/kernel/microcode_intel.c
18121 +++ b/arch/x86/kernel/microcode_intel.c
18122 @@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18123
18124 static int get_ucode_user(void *to, const void *from, size_t n)
18125 {
18126 - return copy_from_user(to, from, n);
18127 + return copy_from_user(to, (const void __force_user *)from, n);
18128 }
18129
18130 static enum ucode_state
18131 request_microcode_user(int cpu, const void __user *buf, size_t size)
18132 {
18133 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18134 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18135 }
18136
18137 static void microcode_fini_cpu(int cpu)
18138 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18139 uci->mc = NULL;
18140 }
18141
18142 -static struct microcode_ops microcode_intel_ops = {
18143 +static const struct microcode_ops microcode_intel_ops = {
18144 .request_microcode_user = request_microcode_user,
18145 .request_microcode_fw = request_microcode_fw,
18146 .collect_cpu_info = collect_cpu_info,
18147 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18148 .microcode_fini_cpu = microcode_fini_cpu,
18149 };
18150
18151 -struct microcode_ops * __init init_intel_microcode(void)
18152 +const struct microcode_ops * __init init_intel_microcode(void)
18153 {
18154 return &microcode_intel_ops;
18155 }
18156 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18157 index 89f386f..9028f51 100644
18158 --- a/arch/x86/kernel/module.c
18159 +++ b/arch/x86/kernel/module.c
18160 @@ -34,7 +34,7 @@
18161 #define DEBUGP(fmt...)
18162 #endif
18163
18164 -void *module_alloc(unsigned long size)
18165 +static void *__module_alloc(unsigned long size, pgprot_t prot)
18166 {
18167 struct vm_struct *area;
18168
18169 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18170 if (!area)
18171 return NULL;
18172
18173 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18174 - PAGE_KERNEL_EXEC);
18175 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18176 +}
18177 +
18178 +void *module_alloc(unsigned long size)
18179 +{
18180 +
18181 +#ifdef CONFIG_PAX_KERNEXEC
18182 + return __module_alloc(size, PAGE_KERNEL);
18183 +#else
18184 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18185 +#endif
18186 +
18187 }
18188
18189 /* Free memory returned from module_alloc */
18190 @@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18191 vfree(module_region);
18192 }
18193
18194 +#ifdef CONFIG_PAX_KERNEXEC
18195 +#ifdef CONFIG_X86_32
18196 +void *module_alloc_exec(unsigned long size)
18197 +{
18198 + struct vm_struct *area;
18199 +
18200 + if (size == 0)
18201 + return NULL;
18202 +
18203 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18204 + return area ? area->addr : NULL;
18205 +}
18206 +EXPORT_SYMBOL(module_alloc_exec);
18207 +
18208 +void module_free_exec(struct module *mod, void *module_region)
18209 +{
18210 + vunmap(module_region);
18211 +}
18212 +EXPORT_SYMBOL(module_free_exec);
18213 +#else
18214 +void module_free_exec(struct module *mod, void *module_region)
18215 +{
18216 + module_free(mod, module_region);
18217 +}
18218 +EXPORT_SYMBOL(module_free_exec);
18219 +
18220 +void *module_alloc_exec(unsigned long size)
18221 +{
18222 + return __module_alloc(size, PAGE_KERNEL_RX);
18223 +}
18224 +EXPORT_SYMBOL(module_alloc_exec);
18225 +#endif
18226 +#endif
18227 +
18228 /* We don't need anything special. */
18229 int module_frob_arch_sections(Elf_Ehdr *hdr,
18230 Elf_Shdr *sechdrs,
18231 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18232 unsigned int i;
18233 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18234 Elf32_Sym *sym;
18235 - uint32_t *location;
18236 + uint32_t *plocation, location;
18237
18238 DEBUGP("Applying relocate section %u to %u\n", relsec,
18239 sechdrs[relsec].sh_info);
18240 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18241 /* This is where to make the change */
18242 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18243 - + rel[i].r_offset;
18244 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18245 + location = (uint32_t)plocation;
18246 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18247 + plocation = ktla_ktva((void *)plocation);
18248 /* This is the symbol it is referring to. Note that all
18249 undefined symbols have been resolved. */
18250 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18251 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18252 switch (ELF32_R_TYPE(rel[i].r_info)) {
18253 case R_386_32:
18254 /* We add the value into the location given */
18255 - *location += sym->st_value;
18256 + pax_open_kernel();
18257 + *plocation += sym->st_value;
18258 + pax_close_kernel();
18259 break;
18260 case R_386_PC32:
18261 /* Add the value, subtract its postition */
18262 - *location += sym->st_value - (uint32_t)location;
18263 + pax_open_kernel();
18264 + *plocation += sym->st_value - location;
18265 + pax_close_kernel();
18266 break;
18267 default:
18268 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18269 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18270 case R_X86_64_NONE:
18271 break;
18272 case R_X86_64_64:
18273 + pax_open_kernel();
18274 *(u64 *)loc = val;
18275 + pax_close_kernel();
18276 break;
18277 case R_X86_64_32:
18278 + pax_open_kernel();
18279 *(u32 *)loc = val;
18280 + pax_close_kernel();
18281 if (val != *(u32 *)loc)
18282 goto overflow;
18283 break;
18284 case R_X86_64_32S:
18285 + pax_open_kernel();
18286 *(s32 *)loc = val;
18287 + pax_close_kernel();
18288 if ((s64)val != *(s32 *)loc)
18289 goto overflow;
18290 break;
18291 case R_X86_64_PC32:
18292 val -= (u64)loc;
18293 + pax_open_kernel();
18294 *(u32 *)loc = val;
18295 + pax_close_kernel();
18296 +
18297 #if 0
18298 if ((s64)val != *(s32 *)loc)
18299 goto overflow;
18300 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18301 index 3a7c5a4..9191528 100644
18302 --- a/arch/x86/kernel/paravirt-spinlocks.c
18303 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18304 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18305 __raw_spin_lock(lock);
18306 }
18307
18308 -struct pv_lock_ops pv_lock_ops = {
18309 +struct pv_lock_ops pv_lock_ops __read_only = {
18310 #ifdef CONFIG_SMP
18311 .spin_is_locked = __ticket_spin_is_locked,
18312 .spin_is_contended = __ticket_spin_is_contended,
18313 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18314 index 1b1739d..dea6077 100644
18315 --- a/arch/x86/kernel/paravirt.c
18316 +++ b/arch/x86/kernel/paravirt.c
18317 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18318 {
18319 return x;
18320 }
18321 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18322 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18323 +#endif
18324
18325 void __init default_banner(void)
18326 {
18327 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18328 * corresponding structure. */
18329 static void *get_call_destination(u8 type)
18330 {
18331 - struct paravirt_patch_template tmpl = {
18332 + const struct paravirt_patch_template tmpl = {
18333 .pv_init_ops = pv_init_ops,
18334 .pv_time_ops = pv_time_ops,
18335 .pv_cpu_ops = pv_cpu_ops,
18336 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18337 .pv_lock_ops = pv_lock_ops,
18338 #endif
18339 };
18340 +
18341 + pax_track_stack();
18342 return *((void **)&tmpl + type);
18343 }
18344
18345 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18346 if (opfunc == NULL)
18347 /* If there's no function, patch it with a ud2a (BUG) */
18348 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18349 - else if (opfunc == _paravirt_nop)
18350 + else if (opfunc == (void *)_paravirt_nop)
18351 /* If the operation is a nop, then nop the callsite */
18352 ret = paravirt_patch_nop();
18353
18354 /* identity functions just return their single argument */
18355 - else if (opfunc == _paravirt_ident_32)
18356 + else if (opfunc == (void *)_paravirt_ident_32)
18357 ret = paravirt_patch_ident_32(insnbuf, len);
18358 - else if (opfunc == _paravirt_ident_64)
18359 + else if (opfunc == (void *)_paravirt_ident_64)
18360 ret = paravirt_patch_ident_64(insnbuf, len);
18361 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18362 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18363 + ret = paravirt_patch_ident_64(insnbuf, len);
18364 +#endif
18365
18366 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18367 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18368 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18369 if (insn_len > len || start == NULL)
18370 insn_len = len;
18371 else
18372 - memcpy(insnbuf, start, insn_len);
18373 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18374
18375 return insn_len;
18376 }
18377 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18378 preempt_enable();
18379 }
18380
18381 -struct pv_info pv_info = {
18382 +struct pv_info pv_info __read_only = {
18383 .name = "bare hardware",
18384 .paravirt_enabled = 0,
18385 .kernel_rpl = 0,
18386 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18387 };
18388
18389 -struct pv_init_ops pv_init_ops = {
18390 +struct pv_init_ops pv_init_ops __read_only = {
18391 .patch = native_patch,
18392 };
18393
18394 -struct pv_time_ops pv_time_ops = {
18395 +struct pv_time_ops pv_time_ops __read_only = {
18396 .sched_clock = native_sched_clock,
18397 };
18398
18399 -struct pv_irq_ops pv_irq_ops = {
18400 +struct pv_irq_ops pv_irq_ops __read_only = {
18401 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18402 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18403 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18404 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18405 #endif
18406 };
18407
18408 -struct pv_cpu_ops pv_cpu_ops = {
18409 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18410 .cpuid = native_cpuid,
18411 .get_debugreg = native_get_debugreg,
18412 .set_debugreg = native_set_debugreg,
18413 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18414 .end_context_switch = paravirt_nop,
18415 };
18416
18417 -struct pv_apic_ops pv_apic_ops = {
18418 +struct pv_apic_ops pv_apic_ops __read_only = {
18419 #ifdef CONFIG_X86_LOCAL_APIC
18420 .startup_ipi_hook = paravirt_nop,
18421 #endif
18422 };
18423
18424 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18425 +#ifdef CONFIG_X86_32
18426 +#ifdef CONFIG_X86_PAE
18427 +/* 64-bit pagetable entries */
18428 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18429 +#else
18430 /* 32-bit pagetable entries */
18431 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18432 +#endif
18433 #else
18434 /* 64-bit pagetable entries */
18435 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18436 #endif
18437
18438 -struct pv_mmu_ops pv_mmu_ops = {
18439 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18440
18441 .read_cr2 = native_read_cr2,
18442 .write_cr2 = native_write_cr2,
18443 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18444 .make_pud = PTE_IDENT,
18445
18446 .set_pgd = native_set_pgd,
18447 + .set_pgd_batched = native_set_pgd_batched,
18448 #endif
18449 #endif /* PAGETABLE_LEVELS >= 3 */
18450
18451 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18452 },
18453
18454 .set_fixmap = native_set_fixmap,
18455 +
18456 +#ifdef CONFIG_PAX_KERNEXEC
18457 + .pax_open_kernel = native_pax_open_kernel,
18458 + .pax_close_kernel = native_pax_close_kernel,
18459 +#endif
18460 +
18461 };
18462
18463 EXPORT_SYMBOL_GPL(pv_time_ops);
18464 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18465 index 1a2d4b1..6a0dd55 100644
18466 --- a/arch/x86/kernel/pci-calgary_64.c
18467 +++ b/arch/x86/kernel/pci-calgary_64.c
18468 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18469 free_pages((unsigned long)vaddr, get_order(size));
18470 }
18471
18472 -static struct dma_map_ops calgary_dma_ops = {
18473 +static const struct dma_map_ops calgary_dma_ops = {
18474 .alloc_coherent = calgary_alloc_coherent,
18475 .free_coherent = calgary_free_coherent,
18476 .map_sg = calgary_map_sg,
18477 diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18478 index 6ac3931..42b4414 100644
18479 --- a/arch/x86/kernel/pci-dma.c
18480 +++ b/arch/x86/kernel/pci-dma.c
18481 @@ -14,7 +14,7 @@
18482
18483 static int forbid_dac __read_mostly;
18484
18485 -struct dma_map_ops *dma_ops;
18486 +const struct dma_map_ops *dma_ops;
18487 EXPORT_SYMBOL(dma_ops);
18488
18489 static int iommu_sac_force __read_mostly;
18490 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18491
18492 int dma_supported(struct device *dev, u64 mask)
18493 {
18494 - struct dma_map_ops *ops = get_dma_ops(dev);
18495 + const struct dma_map_ops *ops = get_dma_ops(dev);
18496
18497 #ifdef CONFIG_PCI
18498 if (mask > 0xffffffff && forbid_dac > 0) {
18499 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18500 index 1c76691..e3632db 100644
18501 --- a/arch/x86/kernel/pci-gart_64.c
18502 +++ b/arch/x86/kernel/pci-gart_64.c
18503 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18504 return -1;
18505 }
18506
18507 -static struct dma_map_ops gart_dma_ops = {
18508 +static const struct dma_map_ops gart_dma_ops = {
18509 .map_sg = gart_map_sg,
18510 .unmap_sg = gart_unmap_sg,
18511 .map_page = gart_map_page,
18512 diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18513 index a3933d4..c898869 100644
18514 --- a/arch/x86/kernel/pci-nommu.c
18515 +++ b/arch/x86/kernel/pci-nommu.c
18516 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18517 flush_write_buffers();
18518 }
18519
18520 -struct dma_map_ops nommu_dma_ops = {
18521 +const struct dma_map_ops nommu_dma_ops = {
18522 .alloc_coherent = dma_generic_alloc_coherent,
18523 .free_coherent = nommu_free_coherent,
18524 .map_sg = nommu_map_sg,
18525 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18526 index aaa6b78..4de1881 100644
18527 --- a/arch/x86/kernel/pci-swiotlb.c
18528 +++ b/arch/x86/kernel/pci-swiotlb.c
18529 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18530 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18531 }
18532
18533 -static struct dma_map_ops swiotlb_dma_ops = {
18534 +static const struct dma_map_ops swiotlb_dma_ops = {
18535 .mapping_error = swiotlb_dma_mapping_error,
18536 .alloc_coherent = x86_swiotlb_alloc_coherent,
18537 .free_coherent = swiotlb_free_coherent,
18538 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18539 index fc6c84d..0312ca2 100644
18540 --- a/arch/x86/kernel/process.c
18541 +++ b/arch/x86/kernel/process.c
18542 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18543
18544 void free_thread_info(struct thread_info *ti)
18545 {
18546 - free_thread_xstate(ti->task);
18547 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18548 }
18549
18550 +static struct kmem_cache *task_struct_cachep;
18551 +
18552 void arch_task_cache_init(void)
18553 {
18554 - task_xstate_cachep =
18555 - kmem_cache_create("task_xstate", xstate_size,
18556 + /* create a slab on which task_structs can be allocated */
18557 + task_struct_cachep =
18558 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18559 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18560 +
18561 + task_xstate_cachep =
18562 + kmem_cache_create("task_xstate", xstate_size,
18563 __alignof__(union thread_xstate),
18564 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18565 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18566 +}
18567 +
18568 +struct task_struct *alloc_task_struct(void)
18569 +{
18570 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18571 +}
18572 +
18573 +void free_task_struct(struct task_struct *task)
18574 +{
18575 + free_thread_xstate(task);
18576 + kmem_cache_free(task_struct_cachep, task);
18577 }
18578
18579 /*
18580 @@ -73,7 +90,7 @@ void exit_thread(void)
18581 unsigned long *bp = t->io_bitmap_ptr;
18582
18583 if (bp) {
18584 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18585 + struct tss_struct *tss = init_tss + get_cpu();
18586
18587 t->io_bitmap_ptr = NULL;
18588 clear_thread_flag(TIF_IO_BITMAP);
18589 @@ -93,6 +110,9 @@ void flush_thread(void)
18590
18591 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18592
18593 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18594 + loadsegment(gs, 0);
18595 +#endif
18596 tsk->thread.debugreg0 = 0;
18597 tsk->thread.debugreg1 = 0;
18598 tsk->thread.debugreg2 = 0;
18599 @@ -307,7 +327,7 @@ void default_idle(void)
18600 EXPORT_SYMBOL(default_idle);
18601 #endif
18602
18603 -void stop_this_cpu(void *dummy)
18604 +__noreturn void stop_this_cpu(void *dummy)
18605 {
18606 local_irq_disable();
18607 /*
18608 @@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18609 }
18610 early_param("idle", idle_setup);
18611
18612 -unsigned long arch_align_stack(unsigned long sp)
18613 +#ifdef CONFIG_PAX_RANDKSTACK
18614 +void pax_randomize_kstack(struct pt_regs *regs)
18615 {
18616 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18617 - sp -= get_random_int() % 8192;
18618 - return sp & ~0xf;
18619 -}
18620 + struct thread_struct *thread = &current->thread;
18621 + unsigned long time;
18622
18623 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18624 -{
18625 - unsigned long range_end = mm->brk + 0x02000000;
18626 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18627 + if (!randomize_va_space)
18628 + return;
18629 +
18630 + if (v8086_mode(regs))
18631 + return;
18632 +
18633 + rdtscl(time);
18634 +
18635 + /* P4 seems to return a 0 LSB, ignore it */
18636 +#ifdef CONFIG_MPENTIUM4
18637 + time &= 0x3EUL;
18638 + time <<= 2;
18639 +#elif defined(CONFIG_X86_64)
18640 + time &= 0xFUL;
18641 + time <<= 4;
18642 +#else
18643 + time &= 0x1FUL;
18644 + time <<= 3;
18645 +#endif
18646 +
18647 + thread->sp0 ^= time;
18648 + load_sp0(init_tss + smp_processor_id(), thread);
18649 +
18650 +#ifdef CONFIG_X86_64
18651 + percpu_write(kernel_stack, thread->sp0);
18652 +#endif
18653 }
18654 +#endif
18655
18656 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18657 index c40c432..6e1df72 100644
18658 --- a/arch/x86/kernel/process_32.c
18659 +++ b/arch/x86/kernel/process_32.c
18660 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18661 unsigned long thread_saved_pc(struct task_struct *tsk)
18662 {
18663 return ((unsigned long *)tsk->thread.sp)[3];
18664 +//XXX return tsk->thread.eip;
18665 }
18666
18667 #ifndef CONFIG_SMP
18668 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18669 unsigned short ss, gs;
18670 const char *board;
18671
18672 - if (user_mode_vm(regs)) {
18673 + if (user_mode(regs)) {
18674 sp = regs->sp;
18675 ss = regs->ss & 0xffff;
18676 - gs = get_user_gs(regs);
18677 } else {
18678 sp = (unsigned long) (&regs->sp);
18679 savesegment(ss, ss);
18680 - savesegment(gs, gs);
18681 }
18682 + gs = get_user_gs(regs);
18683
18684 printk("\n");
18685
18686 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18687 regs.bx = (unsigned long) fn;
18688 regs.dx = (unsigned long) arg;
18689
18690 - regs.ds = __USER_DS;
18691 - regs.es = __USER_DS;
18692 + regs.ds = __KERNEL_DS;
18693 + regs.es = __KERNEL_DS;
18694 regs.fs = __KERNEL_PERCPU;
18695 - regs.gs = __KERNEL_STACK_CANARY;
18696 + savesegment(gs, regs.gs);
18697 regs.orig_ax = -1;
18698 regs.ip = (unsigned long) kernel_thread_helper;
18699 regs.cs = __KERNEL_CS | get_kernel_rpl();
18700 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18701 struct task_struct *tsk;
18702 int err;
18703
18704 - childregs = task_pt_regs(p);
18705 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18706 *childregs = *regs;
18707 childregs->ax = 0;
18708 childregs->sp = sp;
18709
18710 p->thread.sp = (unsigned long) childregs;
18711 p->thread.sp0 = (unsigned long) (childregs+1);
18712 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18713
18714 p->thread.ip = (unsigned long) ret_from_fork;
18715
18716 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18717 struct thread_struct *prev = &prev_p->thread,
18718 *next = &next_p->thread;
18719 int cpu = smp_processor_id();
18720 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18721 + struct tss_struct *tss = init_tss + cpu;
18722 bool preload_fpu;
18723
18724 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18725 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18726 */
18727 lazy_save_gs(prev->gs);
18728
18729 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18730 + __set_fs(task_thread_info(next_p)->addr_limit);
18731 +#endif
18732 +
18733 /*
18734 * Load the per-thread Thread-Local Storage descriptor.
18735 */
18736 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18737 */
18738 arch_end_context_switch(next_p);
18739
18740 + percpu_write(current_task, next_p);
18741 + percpu_write(current_tinfo, &next_p->tinfo);
18742 +
18743 if (preload_fpu)
18744 __math_state_restore();
18745
18746 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18747 if (prev->gs | next->gs)
18748 lazy_load_gs(next->gs);
18749
18750 - percpu_write(current_task, next_p);
18751 -
18752 return prev_p;
18753 }
18754
18755 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
18756 } while (count++ < 16);
18757 return 0;
18758 }
18759 -
18760 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18761 index 39493bc..196816d 100644
18762 --- a/arch/x86/kernel/process_64.c
18763 +++ b/arch/x86/kernel/process_64.c
18764 @@ -91,7 +91,7 @@ static void __exit_idle(void)
18765 void exit_idle(void)
18766 {
18767 /* idle loop has pid 0 */
18768 - if (current->pid)
18769 + if (task_pid_nr(current))
18770 return;
18771 __exit_idle();
18772 }
18773 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
18774 if (!board)
18775 board = "";
18776 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
18777 - current->pid, current->comm, print_tainted(),
18778 + task_pid_nr(current), current->comm, print_tainted(),
18779 init_utsname()->release,
18780 (int)strcspn(init_utsname()->version, " "),
18781 init_utsname()->version, board);
18782 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18783 struct pt_regs *childregs;
18784 struct task_struct *me = current;
18785
18786 - childregs = ((struct pt_regs *)
18787 - (THREAD_SIZE + task_stack_page(p))) - 1;
18788 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18789 *childregs = *regs;
18790
18791 childregs->ax = 0;
18792 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18793 p->thread.sp = (unsigned long) childregs;
18794 p->thread.sp0 = (unsigned long) (childregs+1);
18795 p->thread.usersp = me->thread.usersp;
18796 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18797
18798 set_tsk_thread_flag(p, TIF_FORK);
18799
18800 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18801 struct thread_struct *prev = &prev_p->thread;
18802 struct thread_struct *next = &next_p->thread;
18803 int cpu = smp_processor_id();
18804 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18805 + struct tss_struct *tss = init_tss + cpu;
18806 unsigned fsindex, gsindex;
18807 bool preload_fpu;
18808
18809 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18810 prev->usersp = percpu_read(old_rsp);
18811 percpu_write(old_rsp, next->usersp);
18812 percpu_write(current_task, next_p);
18813 + percpu_write(current_tinfo, &next_p->tinfo);
18814
18815 - percpu_write(kernel_stack,
18816 - (unsigned long)task_stack_page(next_p) +
18817 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18818 + percpu_write(kernel_stack, next->sp0);
18819
18820 /*
18821 * Now maybe reload the debug registers and handle I/O bitmaps
18822 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
18823 if (!p || p == current || p->state == TASK_RUNNING)
18824 return 0;
18825 stack = (unsigned long)task_stack_page(p);
18826 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18827 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18828 return 0;
18829 fp = *(u64 *)(p->thread.sp);
18830 do {
18831 - if (fp < (unsigned long)stack ||
18832 - fp >= (unsigned long)stack+THREAD_SIZE)
18833 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18834 return 0;
18835 ip = *(u64 *)(fp+8);
18836 if (!in_sched_functions(ip))
18837 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18838 index c06acdd..3f5fff5 100644
18839 --- a/arch/x86/kernel/ptrace.c
18840 +++ b/arch/x86/kernel/ptrace.c
18841 @@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
18842 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18843 {
18844 int ret;
18845 - unsigned long __user *datap = (unsigned long __user *)data;
18846 + unsigned long __user *datap = (__force unsigned long __user *)data;
18847
18848 switch (request) {
18849 /* read the word at location addr in the USER area. */
18850 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18851 if (addr < 0)
18852 return -EIO;
18853 ret = do_get_thread_area(child, addr,
18854 - (struct user_desc __user *) data);
18855 + (__force struct user_desc __user *) data);
18856 break;
18857
18858 case PTRACE_SET_THREAD_AREA:
18859 if (addr < 0)
18860 return -EIO;
18861 ret = do_set_thread_area(child, addr,
18862 - (struct user_desc __user *) data, 0);
18863 + (__force struct user_desc __user *) data, 0);
18864 break;
18865 #endif
18866
18867 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18868 #ifdef CONFIG_X86_PTRACE_BTS
18869 case PTRACE_BTS_CONFIG:
18870 ret = ptrace_bts_config
18871 - (child, data, (struct ptrace_bts_config __user *)addr);
18872 + (child, data, (__force struct ptrace_bts_config __user *)addr);
18873 break;
18874
18875 case PTRACE_BTS_STATUS:
18876 ret = ptrace_bts_status
18877 - (child, data, (struct ptrace_bts_config __user *)addr);
18878 + (child, data, (__force struct ptrace_bts_config __user *)addr);
18879 break;
18880
18881 case PTRACE_BTS_SIZE:
18882 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18883
18884 case PTRACE_BTS_GET:
18885 ret = ptrace_bts_read_record
18886 - (child, data, (struct bts_struct __user *) addr);
18887 + (child, data, (__force struct bts_struct __user *) addr);
18888 break;
18889
18890 case PTRACE_BTS_CLEAR:
18891 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18892
18893 case PTRACE_BTS_DRAIN:
18894 ret = ptrace_bts_drain
18895 - (child, data, (struct bts_struct __user *) addr);
18896 + (child, data, (__force struct bts_struct __user *) addr);
18897 break;
18898 #endif /* CONFIG_X86_PTRACE_BTS */
18899
18900 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18901 info.si_code = si_code;
18902
18903 /* User-mode ip? */
18904 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
18905 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
18906
18907 /* Send us the fake SIGTRAP */
18908 force_sig_info(SIGTRAP, &info, tsk);
18909 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18910 * We must return the syscall number to actually look up in the table.
18911 * This can be -1L to skip running any syscall at all.
18912 */
18913 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
18914 +long syscall_trace_enter(struct pt_regs *regs)
18915 {
18916 long ret = 0;
18917
18918 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
18919 return ret ?: regs->orig_ax;
18920 }
18921
18922 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
18923 +void syscall_trace_leave(struct pt_regs *regs)
18924 {
18925 if (unlikely(current->audit_context))
18926 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
18927 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18928 index cf98100..e76e03d 100644
18929 --- a/arch/x86/kernel/reboot.c
18930 +++ b/arch/x86/kernel/reboot.c
18931 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
18932 EXPORT_SYMBOL(pm_power_off);
18933
18934 static const struct desc_ptr no_idt = {};
18935 -static int reboot_mode;
18936 +static unsigned short reboot_mode;
18937 enum reboot_type reboot_type = BOOT_KBD;
18938 int reboot_force;
18939
18940 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
18941 controller to pulse the CPU reset line, which is more thorough, but
18942 doesn't work with at least one type of 486 motherboard. It is easy
18943 to stop this code working; hence the copious comments. */
18944 -static const unsigned long long
18945 -real_mode_gdt_entries [3] =
18946 +static struct desc_struct
18947 +real_mode_gdt_entries [3] __read_only =
18948 {
18949 - 0x0000000000000000ULL, /* Null descriptor */
18950 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
18951 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
18952 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
18953 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
18954 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
18955 };
18956
18957 static const struct desc_ptr
18958 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
18959 * specified by the code and length parameters.
18960 * We assume that length will aways be less that 100!
18961 */
18962 -void machine_real_restart(const unsigned char *code, int length)
18963 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
18964 {
18965 local_irq_disable();
18966
18967 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
18968 /* Remap the kernel at virtual address zero, as well as offset zero
18969 from the kernel segment. This assumes the kernel segment starts at
18970 virtual address PAGE_OFFSET. */
18971 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18972 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
18973 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18974 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
18975
18976 /*
18977 * Use `swapper_pg_dir' as our page directory.
18978 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
18979 boot)". This seems like a fairly standard thing that gets set by
18980 REBOOT.COM programs, and the previous reset routine did this
18981 too. */
18982 - *((unsigned short *)0x472) = reboot_mode;
18983 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18984
18985 /* For the switch to real mode, copy some code to low memory. It has
18986 to be in the first 64k because it is running in 16-bit mode, and it
18987 has to have the same physical and virtual address, because it turns
18988 off paging. Copy it near the end of the first page, out of the way
18989 of BIOS variables. */
18990 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
18991 - real_mode_switch, sizeof (real_mode_switch));
18992 - memcpy((void *)(0x1000 - 100), code, length);
18993 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
18994 + memcpy(__va(0x1000 - 100), code, length);
18995
18996 /* Set up the IDT for real mode. */
18997 load_idt(&real_mode_idt);
18998 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
18999 __asm__ __volatile__ ("ljmp $0x0008,%0"
19000 :
19001 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19002 + do { } while (1);
19003 }
19004 #ifdef CONFIG_APM_MODULE
19005 EXPORT_SYMBOL(machine_real_restart);
19006 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19007 {
19008 }
19009
19010 -static void native_machine_emergency_restart(void)
19011 +__noreturn static void native_machine_emergency_restart(void)
19012 {
19013 int i;
19014
19015 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19016 #endif
19017 }
19018
19019 -static void __machine_emergency_restart(int emergency)
19020 +static __noreturn void __machine_emergency_restart(int emergency)
19021 {
19022 reboot_emergency = emergency;
19023 machine_ops.emergency_restart();
19024 }
19025
19026 -static void native_machine_restart(char *__unused)
19027 +static __noreturn void native_machine_restart(char *__unused)
19028 {
19029 printk("machine restart\n");
19030
19031 @@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19032 __machine_emergency_restart(0);
19033 }
19034
19035 -static void native_machine_halt(void)
19036 +static __noreturn void native_machine_halt(void)
19037 {
19038 /* stop other cpus and apics */
19039 machine_shutdown();
19040 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
19041 stop_this_cpu(NULL);
19042 }
19043
19044 -static void native_machine_power_off(void)
19045 +__noreturn static void native_machine_power_off(void)
19046 {
19047 if (pm_power_off) {
19048 if (!reboot_force)
19049 @@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19050 }
19051 /* a fallback in case there is no PM info available */
19052 tboot_shutdown(TB_SHUTDOWN_HALT);
19053 + do { } while (1);
19054 }
19055
19056 struct machine_ops machine_ops = {
19057 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19058 index 7a6f3b3..976a959 100644
19059 --- a/arch/x86/kernel/relocate_kernel_64.S
19060 +++ b/arch/x86/kernel/relocate_kernel_64.S
19061 @@ -11,6 +11,7 @@
19062 #include <asm/kexec.h>
19063 #include <asm/processor-flags.h>
19064 #include <asm/pgtable_types.h>
19065 +#include <asm/alternative-asm.h>
19066
19067 /*
19068 * Must be relocatable PIC code callable as a C function
19069 @@ -167,6 +168,7 @@ identity_mapped:
19070 xorq %r14, %r14
19071 xorq %r15, %r15
19072
19073 + pax_force_retaddr 0, 1
19074 ret
19075
19076 1:
19077 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19078 index 5449a26..0b6c759 100644
19079 --- a/arch/x86/kernel/setup.c
19080 +++ b/arch/x86/kernel/setup.c
19081 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19082
19083 if (!boot_params.hdr.root_flags)
19084 root_mountflags &= ~MS_RDONLY;
19085 - init_mm.start_code = (unsigned long) _text;
19086 - init_mm.end_code = (unsigned long) _etext;
19087 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19088 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19089 init_mm.end_data = (unsigned long) _edata;
19090 init_mm.brk = _brk_end;
19091
19092 - code_resource.start = virt_to_phys(_text);
19093 - code_resource.end = virt_to_phys(_etext)-1;
19094 - data_resource.start = virt_to_phys(_etext);
19095 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19096 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19097 + data_resource.start = virt_to_phys(_sdata);
19098 data_resource.end = virt_to_phys(_edata)-1;
19099 bss_resource.start = virt_to_phys(&__bss_start);
19100 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19101 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19102 index d559af9..524c6ad 100644
19103 --- a/arch/x86/kernel/setup_percpu.c
19104 +++ b/arch/x86/kernel/setup_percpu.c
19105 @@ -25,19 +25,17 @@
19106 # define DBG(x...)
19107 #endif
19108
19109 -DEFINE_PER_CPU(int, cpu_number);
19110 +#ifdef CONFIG_SMP
19111 +DEFINE_PER_CPU(unsigned int, cpu_number);
19112 EXPORT_PER_CPU_SYMBOL(cpu_number);
19113 +#endif
19114
19115 -#ifdef CONFIG_X86_64
19116 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19117 -#else
19118 -#define BOOT_PERCPU_OFFSET 0
19119 -#endif
19120
19121 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19122 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19123
19124 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19125 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19126 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19127 };
19128 EXPORT_SYMBOL(__per_cpu_offset);
19129 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19130 {
19131 #ifdef CONFIG_X86_32
19132 struct desc_struct gdt;
19133 + unsigned long base = per_cpu_offset(cpu);
19134
19135 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19136 - 0x2 | DESCTYPE_S, 0x8);
19137 - gdt.s = 1;
19138 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19139 + 0x83 | DESCTYPE_S, 0xC);
19140 write_gdt_entry(get_cpu_gdt_table(cpu),
19141 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19142 #endif
19143 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19144 /* alrighty, percpu areas up and running */
19145 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19146 for_each_possible_cpu(cpu) {
19147 +#ifdef CONFIG_CC_STACKPROTECTOR
19148 +#ifdef CONFIG_X86_32
19149 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19150 +#endif
19151 +#endif
19152 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19153 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19154 per_cpu(cpu_number, cpu) = cpu;
19155 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19156 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19157 #endif
19158 #endif
19159 +#ifdef CONFIG_CC_STACKPROTECTOR
19160 +#ifdef CONFIG_X86_32
19161 + if (!cpu)
19162 + per_cpu(stack_canary.canary, cpu) = canary;
19163 +#endif
19164 +#endif
19165 /*
19166 * Up to this point, the boot CPU has been using .data.init
19167 * area. Reload any changed state for the boot CPU.
19168 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19169 index 6a44a76..a9287a1 100644
19170 --- a/arch/x86/kernel/signal.c
19171 +++ b/arch/x86/kernel/signal.c
19172 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19173 * Align the stack pointer according to the i386 ABI,
19174 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19175 */
19176 - sp = ((sp + 4) & -16ul) - 4;
19177 + sp = ((sp - 12) & -16ul) - 4;
19178 #else /* !CONFIG_X86_32 */
19179 sp = round_down(sp, 16) - 8;
19180 #endif
19181 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19182 * Return an always-bogus address instead so we will die with SIGSEGV.
19183 */
19184 if (onsigstack && !likely(on_sig_stack(sp)))
19185 - return (void __user *)-1L;
19186 + return (__force void __user *)-1L;
19187
19188 /* save i387 state */
19189 if (used_math() && save_i387_xstate(*fpstate) < 0)
19190 - return (void __user *)-1L;
19191 + return (__force void __user *)-1L;
19192
19193 return (void __user *)sp;
19194 }
19195 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19196 }
19197
19198 if (current->mm->context.vdso)
19199 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19200 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19201 else
19202 - restorer = &frame->retcode;
19203 + restorer = (void __user *)&frame->retcode;
19204 if (ka->sa.sa_flags & SA_RESTORER)
19205 restorer = ka->sa.sa_restorer;
19206
19207 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19208 * reasons and because gdb uses it as a signature to notice
19209 * signal handler stack frames.
19210 */
19211 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19212 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19213
19214 if (err)
19215 return -EFAULT;
19216 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19217 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19218
19219 /* Set up to return from userspace. */
19220 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19221 + if (current->mm->context.vdso)
19222 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19223 + else
19224 + restorer = (void __user *)&frame->retcode;
19225 if (ka->sa.sa_flags & SA_RESTORER)
19226 restorer = ka->sa.sa_restorer;
19227 put_user_ex(restorer, &frame->pretcode);
19228 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19229 * reasons and because gdb uses it as a signature to notice
19230 * signal handler stack frames.
19231 */
19232 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19233 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19234 } put_user_catch(err);
19235
19236 if (err)
19237 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19238 int signr;
19239 sigset_t *oldset;
19240
19241 + pax_track_stack();
19242 +
19243 /*
19244 * We want the common case to go fast, which is why we may in certain
19245 * cases get here from kernel mode. Just return without doing anything
19246 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19247 * X86_32: vm86 regs switched out by assembly code before reaching
19248 * here, so testing against kernel CS suffices.
19249 */
19250 - if (!user_mode(regs))
19251 + if (!user_mode_novm(regs))
19252 return;
19253
19254 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19255 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19256 index 7e8e905..64d5c32 100644
19257 --- a/arch/x86/kernel/smpboot.c
19258 +++ b/arch/x86/kernel/smpboot.c
19259 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19260 */
19261 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19262
19263 -void cpu_hotplug_driver_lock()
19264 +void cpu_hotplug_driver_lock(void)
19265 {
19266 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
19267 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
19268 }
19269
19270 -void cpu_hotplug_driver_unlock()
19271 +void cpu_hotplug_driver_unlock(void)
19272 {
19273 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19274 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19275 }
19276
19277 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19278 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19279 * target processor state.
19280 */
19281 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19282 - (unsigned long)stack_start.sp);
19283 + stack_start);
19284
19285 /*
19286 * Run STARTUP IPI loop.
19287 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19288 set_idle_for_cpu(cpu, c_idle.idle);
19289 do_rest:
19290 per_cpu(current_task, cpu) = c_idle.idle;
19291 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19292 #ifdef CONFIG_X86_32
19293 /* Stack for startup_32 can be just as for start_secondary onwards */
19294 irq_ctx_init(cpu);
19295 @@ -750,13 +751,15 @@ do_rest:
19296 #else
19297 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19298 initial_gs = per_cpu_offset(cpu);
19299 - per_cpu(kernel_stack, cpu) =
19300 - (unsigned long)task_stack_page(c_idle.idle) -
19301 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19302 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19303 #endif
19304 +
19305 + pax_open_kernel();
19306 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19307 + pax_close_kernel();
19308 +
19309 initial_code = (unsigned long)start_secondary;
19310 - stack_start.sp = (void *) c_idle.idle->thread.sp;
19311 + stack_start = c_idle.idle->thread.sp;
19312
19313 /* start_ip had better be page-aligned! */
19314 start_ip = setup_trampoline();
19315 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19316
19317 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19318
19319 +#ifdef CONFIG_PAX_PER_CPU_PGD
19320 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19321 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19322 + KERNEL_PGD_PTRS);
19323 +#endif
19324 +
19325 err = do_boot_cpu(apicid, cpu);
19326
19327 if (err) {
19328 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19329 index 3149032..14f1053 100644
19330 --- a/arch/x86/kernel/step.c
19331 +++ b/arch/x86/kernel/step.c
19332 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19333 struct desc_struct *desc;
19334 unsigned long base;
19335
19336 - seg &= ~7UL;
19337 + seg >>= 3;
19338
19339 mutex_lock(&child->mm->context.lock);
19340 - if (unlikely((seg >> 3) >= child->mm->context.size))
19341 + if (unlikely(seg >= child->mm->context.size))
19342 addr = -1L; /* bogus selector, access would fault */
19343 else {
19344 desc = child->mm->context.ldt + seg;
19345 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19346 addr += base;
19347 }
19348 mutex_unlock(&child->mm->context.lock);
19349 - }
19350 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19351 + addr = ktla_ktva(addr);
19352
19353 return addr;
19354 }
19355 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19356 unsigned char opcode[15];
19357 unsigned long addr = convert_ip_to_linear(child, regs);
19358
19359 + if (addr == -EINVAL)
19360 + return 0;
19361 +
19362 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19363 for (i = 0; i < copied; i++) {
19364 switch (opcode[i]) {
19365 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19366
19367 #ifdef CONFIG_X86_64
19368 case 0x40 ... 0x4f:
19369 - if (regs->cs != __USER_CS)
19370 + if ((regs->cs & 0xffff) != __USER_CS)
19371 /* 32-bit mode: register increment */
19372 return 0;
19373 /* 64-bit mode: REX prefix */
19374 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19375 index dee1ff7..a397f7f 100644
19376 --- a/arch/x86/kernel/sys_i386_32.c
19377 +++ b/arch/x86/kernel/sys_i386_32.c
19378 @@ -24,6 +24,21 @@
19379
19380 #include <asm/syscalls.h>
19381
19382 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19383 +{
19384 + unsigned long pax_task_size = TASK_SIZE;
19385 +
19386 +#ifdef CONFIG_PAX_SEGMEXEC
19387 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19388 + pax_task_size = SEGMEXEC_TASK_SIZE;
19389 +#endif
19390 +
19391 + if (len > pax_task_size || addr > pax_task_size - len)
19392 + return -EINVAL;
19393 +
19394 + return 0;
19395 +}
19396 +
19397 /*
19398 * Perform the select(nd, in, out, ex, tv) and mmap() system
19399 * calls. Linux/i386 didn't use to be able to handle more than
19400 @@ -58,6 +73,212 @@ out:
19401 return err;
19402 }
19403
19404 +unsigned long
19405 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19406 + unsigned long len, unsigned long pgoff, unsigned long flags)
19407 +{
19408 + struct mm_struct *mm = current->mm;
19409 + struct vm_area_struct *vma;
19410 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19411 +
19412 +#ifdef CONFIG_PAX_SEGMEXEC
19413 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19414 + pax_task_size = SEGMEXEC_TASK_SIZE;
19415 +#endif
19416 +
19417 + pax_task_size -= PAGE_SIZE;
19418 +
19419 + if (len > pax_task_size)
19420 + return -ENOMEM;
19421 +
19422 + if (flags & MAP_FIXED)
19423 + return addr;
19424 +
19425 +#ifdef CONFIG_PAX_RANDMMAP
19426 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19427 +#endif
19428 +
19429 + if (addr) {
19430 + addr = PAGE_ALIGN(addr);
19431 + if (pax_task_size - len >= addr) {
19432 + vma = find_vma(mm, addr);
19433 + if (check_heap_stack_gap(vma, addr, len))
19434 + return addr;
19435 + }
19436 + }
19437 + if (len > mm->cached_hole_size) {
19438 + start_addr = addr = mm->free_area_cache;
19439 + } else {
19440 + start_addr = addr = mm->mmap_base;
19441 + mm->cached_hole_size = 0;
19442 + }
19443 +
19444 +#ifdef CONFIG_PAX_PAGEEXEC
19445 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19446 + start_addr = 0x00110000UL;
19447 +
19448 +#ifdef CONFIG_PAX_RANDMMAP
19449 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19450 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19451 +#endif
19452 +
19453 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19454 + start_addr = addr = mm->mmap_base;
19455 + else
19456 + addr = start_addr;
19457 + }
19458 +#endif
19459 +
19460 +full_search:
19461 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19462 + /* At this point: (!vma || addr < vma->vm_end). */
19463 + if (pax_task_size - len < addr) {
19464 + /*
19465 + * Start a new search - just in case we missed
19466 + * some holes.
19467 + */
19468 + if (start_addr != mm->mmap_base) {
19469 + start_addr = addr = mm->mmap_base;
19470 + mm->cached_hole_size = 0;
19471 + goto full_search;
19472 + }
19473 + return -ENOMEM;
19474 + }
19475 + if (check_heap_stack_gap(vma, addr, len))
19476 + break;
19477 + if (addr + mm->cached_hole_size < vma->vm_start)
19478 + mm->cached_hole_size = vma->vm_start - addr;
19479 + addr = vma->vm_end;
19480 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19481 + start_addr = addr = mm->mmap_base;
19482 + mm->cached_hole_size = 0;
19483 + goto full_search;
19484 + }
19485 + }
19486 +
19487 + /*
19488 + * Remember the place where we stopped the search:
19489 + */
19490 + mm->free_area_cache = addr + len;
19491 + return addr;
19492 +}
19493 +
19494 +unsigned long
19495 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19496 + const unsigned long len, const unsigned long pgoff,
19497 + const unsigned long flags)
19498 +{
19499 + struct vm_area_struct *vma;
19500 + struct mm_struct *mm = current->mm;
19501 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19502 +
19503 +#ifdef CONFIG_PAX_SEGMEXEC
19504 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19505 + pax_task_size = SEGMEXEC_TASK_SIZE;
19506 +#endif
19507 +
19508 + pax_task_size -= PAGE_SIZE;
19509 +
19510 + /* requested length too big for entire address space */
19511 + if (len > pax_task_size)
19512 + return -ENOMEM;
19513 +
19514 + if (flags & MAP_FIXED)
19515 + return addr;
19516 +
19517 +#ifdef CONFIG_PAX_PAGEEXEC
19518 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19519 + goto bottomup;
19520 +#endif
19521 +
19522 +#ifdef CONFIG_PAX_RANDMMAP
19523 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19524 +#endif
19525 +
19526 + /* requesting a specific address */
19527 + if (addr) {
19528 + addr = PAGE_ALIGN(addr);
19529 + if (pax_task_size - len >= addr) {
19530 + vma = find_vma(mm, addr);
19531 + if (check_heap_stack_gap(vma, addr, len))
19532 + return addr;
19533 + }
19534 + }
19535 +
19536 + /* check if free_area_cache is useful for us */
19537 + if (len <= mm->cached_hole_size) {
19538 + mm->cached_hole_size = 0;
19539 + mm->free_area_cache = mm->mmap_base;
19540 + }
19541 +
19542 + /* either no address requested or can't fit in requested address hole */
19543 + addr = mm->free_area_cache;
19544 +
19545 + /* make sure it can fit in the remaining address space */
19546 + if (addr > len) {
19547 + vma = find_vma(mm, addr-len);
19548 + if (check_heap_stack_gap(vma, addr - len, len))
19549 + /* remember the address as a hint for next time */
19550 + return (mm->free_area_cache = addr-len);
19551 + }
19552 +
19553 + if (mm->mmap_base < len)
19554 + goto bottomup;
19555 +
19556 + addr = mm->mmap_base-len;
19557 +
19558 + do {
19559 + /*
19560 + * Lookup failure means no vma is above this address,
19561 + * else if new region fits below vma->vm_start,
19562 + * return with success:
19563 + */
19564 + vma = find_vma(mm, addr);
19565 + if (check_heap_stack_gap(vma, addr, len))
19566 + /* remember the address as a hint for next time */
19567 + return (mm->free_area_cache = addr);
19568 +
19569 + /* remember the largest hole we saw so far */
19570 + if (addr + mm->cached_hole_size < vma->vm_start)
19571 + mm->cached_hole_size = vma->vm_start - addr;
19572 +
19573 + /* try just below the current vma->vm_start */
19574 + addr = skip_heap_stack_gap(vma, len);
19575 + } while (!IS_ERR_VALUE(addr));
19576 +
19577 +bottomup:
19578 + /*
19579 + * A failed mmap() very likely causes application failure,
19580 + * so fall back to the bottom-up function here. This scenario
19581 + * can happen with large stack limits and large mmap()
19582 + * allocations.
19583 + */
19584 +
19585 +#ifdef CONFIG_PAX_SEGMEXEC
19586 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19587 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19588 + else
19589 +#endif
19590 +
19591 + mm->mmap_base = TASK_UNMAPPED_BASE;
19592 +
19593 +#ifdef CONFIG_PAX_RANDMMAP
19594 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19595 + mm->mmap_base += mm->delta_mmap;
19596 +#endif
19597 +
19598 + mm->free_area_cache = mm->mmap_base;
19599 + mm->cached_hole_size = ~0UL;
19600 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19601 + /*
19602 + * Restore the topdown base:
19603 + */
19604 + mm->mmap_base = base;
19605 + mm->free_area_cache = base;
19606 + mm->cached_hole_size = ~0UL;
19607 +
19608 + return addr;
19609 +}
19610
19611 struct sel_arg_struct {
19612 unsigned long n;
19613 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19614 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19615 case SEMTIMEDOP:
19616 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19617 - (const struct timespec __user *)fifth);
19618 + (__force const struct timespec __user *)fifth);
19619
19620 case SEMGET:
19621 return sys_semget(first, second, third);
19622 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19623 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19624 if (ret)
19625 return ret;
19626 - return put_user(raddr, (ulong __user *) third);
19627 + return put_user(raddr, (__force ulong __user *) third);
19628 }
19629 case 1: /* iBCS2 emulator entry point */
19630 if (!segment_eq(get_fs(), get_ds()))
19631 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19632
19633 return error;
19634 }
19635 -
19636 -
19637 -/*
19638 - * Do a system call from kernel instead of calling sys_execve so we
19639 - * end up with proper pt_regs.
19640 - */
19641 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19642 -{
19643 - long __res;
19644 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19645 - : "=a" (__res)
19646 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19647 - return __res;
19648 -}
19649 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19650 index 8aa2057..b604bc1 100644
19651 --- a/arch/x86/kernel/sys_x86_64.c
19652 +++ b/arch/x86/kernel/sys_x86_64.c
19653 @@ -32,8 +32,8 @@ out:
19654 return error;
19655 }
19656
19657 -static void find_start_end(unsigned long flags, unsigned long *begin,
19658 - unsigned long *end)
19659 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19660 + unsigned long *begin, unsigned long *end)
19661 {
19662 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19663 unsigned long new_begin;
19664 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19665 *begin = new_begin;
19666 }
19667 } else {
19668 - *begin = TASK_UNMAPPED_BASE;
19669 + *begin = mm->mmap_base;
19670 *end = TASK_SIZE;
19671 }
19672 }
19673 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19674 if (flags & MAP_FIXED)
19675 return addr;
19676
19677 - find_start_end(flags, &begin, &end);
19678 + find_start_end(mm, flags, &begin, &end);
19679
19680 if (len > end)
19681 return -ENOMEM;
19682
19683 +#ifdef CONFIG_PAX_RANDMMAP
19684 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19685 +#endif
19686 +
19687 if (addr) {
19688 addr = PAGE_ALIGN(addr);
19689 vma = find_vma(mm, addr);
19690 - if (end - len >= addr &&
19691 - (!vma || addr + len <= vma->vm_start))
19692 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19693 return addr;
19694 }
19695 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19696 @@ -106,7 +109,7 @@ full_search:
19697 }
19698 return -ENOMEM;
19699 }
19700 - if (!vma || addr + len <= vma->vm_start) {
19701 + if (check_heap_stack_gap(vma, addr, len)) {
19702 /*
19703 * Remember the place where we stopped the search:
19704 */
19705 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19706 {
19707 struct vm_area_struct *vma;
19708 struct mm_struct *mm = current->mm;
19709 - unsigned long addr = addr0;
19710 + unsigned long base = mm->mmap_base, addr = addr0;
19711
19712 /* requested length too big for entire address space */
19713 if (len > TASK_SIZE)
19714 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19715 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19716 goto bottomup;
19717
19718 +#ifdef CONFIG_PAX_RANDMMAP
19719 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19720 +#endif
19721 +
19722 /* requesting a specific address */
19723 if (addr) {
19724 addr = PAGE_ALIGN(addr);
19725 - vma = find_vma(mm, addr);
19726 - if (TASK_SIZE - len >= addr &&
19727 - (!vma || addr + len <= vma->vm_start))
19728 - return addr;
19729 + if (TASK_SIZE - len >= addr) {
19730 + vma = find_vma(mm, addr);
19731 + if (check_heap_stack_gap(vma, addr, len))
19732 + return addr;
19733 + }
19734 }
19735
19736 /* check if free_area_cache is useful for us */
19737 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19738 /* make sure it can fit in the remaining address space */
19739 if (addr > len) {
19740 vma = find_vma(mm, addr-len);
19741 - if (!vma || addr <= vma->vm_start)
19742 + if (check_heap_stack_gap(vma, addr - len, len))
19743 /* remember the address as a hint for next time */
19744 return mm->free_area_cache = addr-len;
19745 }
19746 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19747 * return with success:
19748 */
19749 vma = find_vma(mm, addr);
19750 - if (!vma || addr+len <= vma->vm_start)
19751 + if (check_heap_stack_gap(vma, addr, len))
19752 /* remember the address as a hint for next time */
19753 return mm->free_area_cache = addr;
19754
19755 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19756 mm->cached_hole_size = vma->vm_start - addr;
19757
19758 /* try just below the current vma->vm_start */
19759 - addr = vma->vm_start-len;
19760 - } while (len < vma->vm_start);
19761 + addr = skip_heap_stack_gap(vma, len);
19762 + } while (!IS_ERR_VALUE(addr));
19763
19764 bottomup:
19765 /*
19766 @@ -198,13 +206,21 @@ bottomup:
19767 * can happen with large stack limits and large mmap()
19768 * allocations.
19769 */
19770 + mm->mmap_base = TASK_UNMAPPED_BASE;
19771 +
19772 +#ifdef CONFIG_PAX_RANDMMAP
19773 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19774 + mm->mmap_base += mm->delta_mmap;
19775 +#endif
19776 +
19777 + mm->free_area_cache = mm->mmap_base;
19778 mm->cached_hole_size = ~0UL;
19779 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19780 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19781 /*
19782 * Restore the topdown base:
19783 */
19784 - mm->free_area_cache = mm->mmap_base;
19785 + mm->mmap_base = base;
19786 + mm->free_area_cache = base;
19787 mm->cached_hole_size = ~0UL;
19788
19789 return addr;
19790 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
19791 index 76d70a4..4c94a44 100644
19792 --- a/arch/x86/kernel/syscall_table_32.S
19793 +++ b/arch/x86/kernel/syscall_table_32.S
19794 @@ -1,3 +1,4 @@
19795 +.section .rodata,"a",@progbits
19796 ENTRY(sys_call_table)
19797 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
19798 .long sys_exit
19799 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19800 index 46b8277..3349d55 100644
19801 --- a/arch/x86/kernel/tboot.c
19802 +++ b/arch/x86/kernel/tboot.c
19803 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
19804
19805 void tboot_shutdown(u32 shutdown_type)
19806 {
19807 - void (*shutdown)(void);
19808 + void (* __noreturn shutdown)(void);
19809
19810 if (!tboot_enabled())
19811 return;
19812 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
19813
19814 switch_to_tboot_pt();
19815
19816 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19817 + shutdown = (void *)tboot->shutdown_entry;
19818 shutdown();
19819
19820 /* should not reach here */
19821 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19822 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19823 }
19824
19825 -static atomic_t ap_wfs_count;
19826 +static atomic_unchecked_t ap_wfs_count;
19827
19828 static int tboot_wait_for_aps(int num_aps)
19829 {
19830 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19831 {
19832 switch (action) {
19833 case CPU_DYING:
19834 - atomic_inc(&ap_wfs_count);
19835 + atomic_inc_unchecked(&ap_wfs_count);
19836 if (num_online_cpus() == 1)
19837 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19838 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19839 return NOTIFY_BAD;
19840 break;
19841 }
19842 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
19843
19844 tboot_create_trampoline();
19845
19846 - atomic_set(&ap_wfs_count, 0);
19847 + atomic_set_unchecked(&ap_wfs_count, 0);
19848 register_hotcpu_notifier(&tboot_cpu_notifier);
19849 return 0;
19850 }
19851 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19852 index be25734..87fe232 100644
19853 --- a/arch/x86/kernel/time.c
19854 +++ b/arch/x86/kernel/time.c
19855 @@ -26,17 +26,13 @@
19856 int timer_ack;
19857 #endif
19858
19859 -#ifdef CONFIG_X86_64
19860 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
19861 -#endif
19862 -
19863 unsigned long profile_pc(struct pt_regs *regs)
19864 {
19865 unsigned long pc = instruction_pointer(regs);
19866
19867 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19868 + if (!user_mode(regs) && in_lock_functions(pc)) {
19869 #ifdef CONFIG_FRAME_POINTER
19870 - return *(unsigned long *)(regs->bp + sizeof(long));
19871 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19872 #else
19873 unsigned long *sp =
19874 (unsigned long *)kernel_stack_pointer(regs);
19875 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19876 * or above a saved flags. Eflags has bits 22-31 zero,
19877 * kernel addresses don't.
19878 */
19879 +
19880 +#ifdef CONFIG_PAX_KERNEXEC
19881 + return ktla_ktva(sp[0]);
19882 +#else
19883 if (sp[0] >> 22)
19884 return sp[0];
19885 if (sp[1] >> 22)
19886 return sp[1];
19887 #endif
19888 +
19889 +#endif
19890 }
19891 return pc;
19892 }
19893 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19894 index 6bb7b85..dd853e1 100644
19895 --- a/arch/x86/kernel/tls.c
19896 +++ b/arch/x86/kernel/tls.c
19897 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19898 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19899 return -EINVAL;
19900
19901 +#ifdef CONFIG_PAX_SEGMEXEC
19902 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19903 + return -EINVAL;
19904 +#endif
19905 +
19906 set_tls_desc(p, idx, &info, 1);
19907
19908 return 0;
19909 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19910 index 8508237..229b664 100644
19911 --- a/arch/x86/kernel/trampoline_32.S
19912 +++ b/arch/x86/kernel/trampoline_32.S
19913 @@ -32,6 +32,12 @@
19914 #include <asm/segment.h>
19915 #include <asm/page_types.h>
19916
19917 +#ifdef CONFIG_PAX_KERNEXEC
19918 +#define ta(X) (X)
19919 +#else
19920 +#define ta(X) ((X) - __PAGE_OFFSET)
19921 +#endif
19922 +
19923 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
19924 __CPUINITRODATA
19925 .code16
19926 @@ -60,7 +66,7 @@ r_base = .
19927 inc %ax # protected mode (PE) bit
19928 lmsw %ax # into protected mode
19929 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19930 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19931 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19932
19933 # These need to be in the same 64K segment as the above;
19934 # hence we don't use the boot_gdt_descr defined in head.S
19935 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19936 index 3af2dff..ba8aa49 100644
19937 --- a/arch/x86/kernel/trampoline_64.S
19938 +++ b/arch/x86/kernel/trampoline_64.S
19939 @@ -91,7 +91,7 @@ startup_32:
19940 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19941 movl %eax, %ds
19942
19943 - movl $X86_CR4_PAE, %eax
19944 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19945 movl %eax, %cr4 # Enable PAE mode
19946
19947 # Setup trampoline 4 level pagetables
19948 @@ -127,7 +127,7 @@ startup_64:
19949 no_longmode:
19950 hlt
19951 jmp no_longmode
19952 -#include "verify_cpu_64.S"
19953 +#include "verify_cpu.S"
19954
19955 # Careful these need to be in the same 64K segment as the above;
19956 tidt:
19957 @@ -138,7 +138,7 @@ tidt:
19958 # so the kernel can live anywhere
19959 .balign 4
19960 tgdt:
19961 - .short tgdt_end - tgdt # gdt limit
19962 + .short tgdt_end - tgdt - 1 # gdt limit
19963 .long tgdt - r_base
19964 .short 0
19965 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19966 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19967 index 7e37dce..ec3f8e5 100644
19968 --- a/arch/x86/kernel/traps.c
19969 +++ b/arch/x86/kernel/traps.c
19970 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
19971
19972 /* Do we ignore FPU interrupts ? */
19973 char ignore_fpu_irq;
19974 -
19975 -/*
19976 - * The IDT has to be page-aligned to simplify the Pentium
19977 - * F0 0F bug workaround.
19978 - */
19979 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19980 #endif
19981
19982 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19983 @@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19984 static inline void
19985 die_if_kernel(const char *str, struct pt_regs *regs, long err)
19986 {
19987 - if (!user_mode_vm(regs))
19988 + if (!user_mode(regs))
19989 die(str, regs, err);
19990 }
19991 #endif
19992
19993 static void __kprobes
19994 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19995 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19996 long error_code, siginfo_t *info)
19997 {
19998 struct task_struct *tsk = current;
19999
20000 #ifdef CONFIG_X86_32
20001 - if (regs->flags & X86_VM_MASK) {
20002 + if (v8086_mode(regs)) {
20003 /*
20004 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20005 * On nmi (interrupt 2), do_trap should not be called.
20006 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20007 }
20008 #endif
20009
20010 - if (!user_mode(regs))
20011 + if (!user_mode_novm(regs))
20012 goto kernel_trap;
20013
20014 #ifdef CONFIG_X86_32
20015 @@ -158,7 +152,7 @@ trap_signal:
20016 printk_ratelimit()) {
20017 printk(KERN_INFO
20018 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20019 - tsk->comm, tsk->pid, str,
20020 + tsk->comm, task_pid_nr(tsk), str,
20021 regs->ip, regs->sp, error_code);
20022 print_vma_addr(" in ", regs->ip);
20023 printk("\n");
20024 @@ -175,8 +169,20 @@ kernel_trap:
20025 if (!fixup_exception(regs)) {
20026 tsk->thread.error_code = error_code;
20027 tsk->thread.trap_no = trapnr;
20028 +
20029 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20030 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20031 + str = "PAX: suspicious stack segment fault";
20032 +#endif
20033 +
20034 die(str, regs, error_code);
20035 }
20036 +
20037 +#ifdef CONFIG_PAX_REFCOUNT
20038 + if (trapnr == 4)
20039 + pax_report_refcount_overflow(regs);
20040 +#endif
20041 +
20042 return;
20043
20044 #ifdef CONFIG_X86_32
20045 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20046 conditional_sti(regs);
20047
20048 #ifdef CONFIG_X86_32
20049 - if (regs->flags & X86_VM_MASK)
20050 + if (v8086_mode(regs))
20051 goto gp_in_vm86;
20052 #endif
20053
20054 tsk = current;
20055 - if (!user_mode(regs))
20056 + if (!user_mode_novm(regs))
20057 goto gp_in_kernel;
20058
20059 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20060 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20061 + struct mm_struct *mm = tsk->mm;
20062 + unsigned long limit;
20063 +
20064 + down_write(&mm->mmap_sem);
20065 + limit = mm->context.user_cs_limit;
20066 + if (limit < TASK_SIZE) {
20067 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20068 + up_write(&mm->mmap_sem);
20069 + return;
20070 + }
20071 + up_write(&mm->mmap_sem);
20072 + }
20073 +#endif
20074 +
20075 tsk->thread.error_code = error_code;
20076 tsk->thread.trap_no = 13;
20077
20078 @@ -305,6 +327,13 @@ gp_in_kernel:
20079 if (notify_die(DIE_GPF, "general protection fault", regs,
20080 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20081 return;
20082 +
20083 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20084 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20085 + die("PAX: suspicious general protection fault", regs, error_code);
20086 + else
20087 +#endif
20088 +
20089 die("general protection fault", regs, error_code);
20090 }
20091
20092 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20093 dotraplinkage notrace __kprobes void
20094 do_nmi(struct pt_regs *regs, long error_code)
20095 {
20096 +
20097 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20098 + if (!user_mode(regs)) {
20099 + unsigned long cs = regs->cs & 0xFFFF;
20100 + unsigned long ip = ktva_ktla(regs->ip);
20101 +
20102 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20103 + regs->ip = ip;
20104 + }
20105 +#endif
20106 +
20107 nmi_enter();
20108
20109 inc_irq_stat(__nmi_count);
20110 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20111 }
20112
20113 #ifdef CONFIG_X86_32
20114 - if (regs->flags & X86_VM_MASK)
20115 + if (v8086_mode(regs))
20116 goto debug_vm86;
20117 #endif
20118
20119 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20120 * kernel space (but re-enable TF when returning to user mode).
20121 */
20122 if (condition & DR_STEP) {
20123 - if (!user_mode(regs))
20124 + if (!user_mode_novm(regs))
20125 goto clear_TF_reenable;
20126 }
20127
20128 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20129 * Handle strange cache flush from user space exception
20130 * in all other cases. This is undocumented behaviour.
20131 */
20132 - if (regs->flags & X86_VM_MASK) {
20133 + if (v8086_mode(regs)) {
20134 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20135 return;
20136 }
20137 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20138 void __math_state_restore(void)
20139 {
20140 struct thread_info *thread = current_thread_info();
20141 - struct task_struct *tsk = thread->task;
20142 + struct task_struct *tsk = current;
20143
20144 /*
20145 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20146 @@ -825,8 +865,7 @@ void __math_state_restore(void)
20147 */
20148 asmlinkage void math_state_restore(void)
20149 {
20150 - struct thread_info *thread = current_thread_info();
20151 - struct task_struct *tsk = thread->task;
20152 + struct task_struct *tsk = current;
20153
20154 if (!tsk_used_math(tsk)) {
20155 local_irq_enable();
20156 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20157 new file mode 100644
20158 index 0000000..50c5edd
20159 --- /dev/null
20160 +++ b/arch/x86/kernel/verify_cpu.S
20161 @@ -0,0 +1,140 @@
20162 +/*
20163 + *
20164 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
20165 + * code has been borrowed from boot/setup.S and was introduced by
20166 + * Andi Kleen.
20167 + *
20168 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20169 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20170 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20171 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20172 + *
20173 + * This source code is licensed under the GNU General Public License,
20174 + * Version 2. See the file COPYING for more details.
20175 + *
20176 + * This is a common code for verification whether CPU supports
20177 + * long mode and SSE or not. It is not called directly instead this
20178 + * file is included at various places and compiled in that context.
20179 + * This file is expected to run in 32bit code. Currently:
20180 + *
20181 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20182 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
20183 + * arch/x86/kernel/head_32.S: processor startup
20184 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20185 + *
20186 + * verify_cpu, returns the status of longmode and SSE in register %eax.
20187 + * 0: Success 1: Failure
20188 + *
20189 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20190 + *
20191 + * The caller needs to check for the error code and take the action
20192 + * appropriately. Either display a message or halt.
20193 + */
20194 +
20195 +#include <asm/cpufeature.h>
20196 +#include <asm/msr-index.h>
20197 +
20198 +verify_cpu:
20199 + pushfl # Save caller passed flags
20200 + pushl $0 # Kill any dangerous flags
20201 + popfl
20202 +
20203 + pushfl # standard way to check for cpuid
20204 + popl %eax
20205 + movl %eax,%ebx
20206 + xorl $0x200000,%eax
20207 + pushl %eax
20208 + popfl
20209 + pushfl
20210 + popl %eax
20211 + cmpl %eax,%ebx
20212 + jz verify_cpu_no_longmode # cpu has no cpuid
20213 +
20214 + movl $0x0,%eax # See if cpuid 1 is implemented
20215 + cpuid
20216 + cmpl $0x1,%eax
20217 + jb verify_cpu_no_longmode # no cpuid 1
20218 +
20219 + xor %di,%di
20220 + cmpl $0x68747541,%ebx # AuthenticAMD
20221 + jnz verify_cpu_noamd
20222 + cmpl $0x69746e65,%edx
20223 + jnz verify_cpu_noamd
20224 + cmpl $0x444d4163,%ecx
20225 + jnz verify_cpu_noamd
20226 + mov $1,%di # cpu is from AMD
20227 + jmp verify_cpu_check
20228 +
20229 +verify_cpu_noamd:
20230 + cmpl $0x756e6547,%ebx # GenuineIntel?
20231 + jnz verify_cpu_check
20232 + cmpl $0x49656e69,%edx
20233 + jnz verify_cpu_check
20234 + cmpl $0x6c65746e,%ecx
20235 + jnz verify_cpu_check
20236 +
20237 + # only call IA32_MISC_ENABLE when:
20238 + # family > 6 || (family == 6 && model >= 0xd)
20239 + movl $0x1, %eax # check CPU family and model
20240 + cpuid
20241 + movl %eax, %ecx
20242 +
20243 + andl $0x0ff00f00, %eax # mask family and extended family
20244 + shrl $8, %eax
20245 + cmpl $6, %eax
20246 + ja verify_cpu_clear_xd # family > 6, ok
20247 + jb verify_cpu_check # family < 6, skip
20248 +
20249 + andl $0x000f00f0, %ecx # mask model and extended model
20250 + shrl $4, %ecx
20251 + cmpl $0xd, %ecx
20252 + jb verify_cpu_check # family == 6, model < 0xd, skip
20253 +
20254 +verify_cpu_clear_xd:
20255 + movl $MSR_IA32_MISC_ENABLE, %ecx
20256 + rdmsr
20257 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20258 + jnc verify_cpu_check # only write MSR if bit was changed
20259 + wrmsr
20260 +
20261 +verify_cpu_check:
20262 + movl $0x1,%eax # Does the cpu have what it takes
20263 + cpuid
20264 + andl $REQUIRED_MASK0,%edx
20265 + xorl $REQUIRED_MASK0,%edx
20266 + jnz verify_cpu_no_longmode
20267 +
20268 + movl $0x80000000,%eax # See if extended cpuid is implemented
20269 + cpuid
20270 + cmpl $0x80000001,%eax
20271 + jb verify_cpu_no_longmode # no extended cpuid
20272 +
20273 + movl $0x80000001,%eax # Does the cpu have what it takes
20274 + cpuid
20275 + andl $REQUIRED_MASK1,%edx
20276 + xorl $REQUIRED_MASK1,%edx
20277 + jnz verify_cpu_no_longmode
20278 +
20279 +verify_cpu_sse_test:
20280 + movl $1,%eax
20281 + cpuid
20282 + andl $SSE_MASK,%edx
20283 + cmpl $SSE_MASK,%edx
20284 + je verify_cpu_sse_ok
20285 + test %di,%di
20286 + jz verify_cpu_no_longmode # only try to force SSE on AMD
20287 + movl $MSR_K7_HWCR,%ecx
20288 + rdmsr
20289 + btr $15,%eax # enable SSE
20290 + wrmsr
20291 + xor %di,%di # don't loop
20292 + jmp verify_cpu_sse_test # try again
20293 +
20294 +verify_cpu_no_longmode:
20295 + popfl # Restore caller passed flags
20296 + movl $1,%eax
20297 + ret
20298 +verify_cpu_sse_ok:
20299 + popfl # Restore caller passed flags
20300 + xorl %eax, %eax
20301 + ret
20302 diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20303 deleted file mode 100644
20304 index 45b6f8a..0000000
20305 --- a/arch/x86/kernel/verify_cpu_64.S
20306 +++ /dev/null
20307 @@ -1,105 +0,0 @@
20308 -/*
20309 - *
20310 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
20311 - * code has been borrowed from boot/setup.S and was introduced by
20312 - * Andi Kleen.
20313 - *
20314 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20315 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20316 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20317 - *
20318 - * This source code is licensed under the GNU General Public License,
20319 - * Version 2. See the file COPYING for more details.
20320 - *
20321 - * This is a common code for verification whether CPU supports
20322 - * long mode and SSE or not. It is not called directly instead this
20323 - * file is included at various places and compiled in that context.
20324 - * Following are the current usage.
20325 - *
20326 - * This file is included by both 16bit and 32bit code.
20327 - *
20328 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20329 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20330 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20331 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20332 - *
20333 - * verify_cpu, returns the status of cpu check in register %eax.
20334 - * 0: Success 1: Failure
20335 - *
20336 - * The caller needs to check for the error code and take the action
20337 - * appropriately. Either display a message or halt.
20338 - */
20339 -
20340 -#include <asm/cpufeature.h>
20341 -
20342 -verify_cpu:
20343 - pushfl # Save caller passed flags
20344 - pushl $0 # Kill any dangerous flags
20345 - popfl
20346 -
20347 - pushfl # standard way to check for cpuid
20348 - popl %eax
20349 - movl %eax,%ebx
20350 - xorl $0x200000,%eax
20351 - pushl %eax
20352 - popfl
20353 - pushfl
20354 - popl %eax
20355 - cmpl %eax,%ebx
20356 - jz verify_cpu_no_longmode # cpu has no cpuid
20357 -
20358 - movl $0x0,%eax # See if cpuid 1 is implemented
20359 - cpuid
20360 - cmpl $0x1,%eax
20361 - jb verify_cpu_no_longmode # no cpuid 1
20362 -
20363 - xor %di,%di
20364 - cmpl $0x68747541,%ebx # AuthenticAMD
20365 - jnz verify_cpu_noamd
20366 - cmpl $0x69746e65,%edx
20367 - jnz verify_cpu_noamd
20368 - cmpl $0x444d4163,%ecx
20369 - jnz verify_cpu_noamd
20370 - mov $1,%di # cpu is from AMD
20371 -
20372 -verify_cpu_noamd:
20373 - movl $0x1,%eax # Does the cpu have what it takes
20374 - cpuid
20375 - andl $REQUIRED_MASK0,%edx
20376 - xorl $REQUIRED_MASK0,%edx
20377 - jnz verify_cpu_no_longmode
20378 -
20379 - movl $0x80000000,%eax # See if extended cpuid is implemented
20380 - cpuid
20381 - cmpl $0x80000001,%eax
20382 - jb verify_cpu_no_longmode # no extended cpuid
20383 -
20384 - movl $0x80000001,%eax # Does the cpu have what it takes
20385 - cpuid
20386 - andl $REQUIRED_MASK1,%edx
20387 - xorl $REQUIRED_MASK1,%edx
20388 - jnz verify_cpu_no_longmode
20389 -
20390 -verify_cpu_sse_test:
20391 - movl $1,%eax
20392 - cpuid
20393 - andl $SSE_MASK,%edx
20394 - cmpl $SSE_MASK,%edx
20395 - je verify_cpu_sse_ok
20396 - test %di,%di
20397 - jz verify_cpu_no_longmode # only try to force SSE on AMD
20398 - movl $0xc0010015,%ecx # HWCR
20399 - rdmsr
20400 - btr $15,%eax # enable SSE
20401 - wrmsr
20402 - xor %di,%di # don't loop
20403 - jmp verify_cpu_sse_test # try again
20404 -
20405 -verify_cpu_no_longmode:
20406 - popfl # Restore caller passed flags
20407 - movl $1,%eax
20408 - ret
20409 -verify_cpu_sse_ok:
20410 - popfl # Restore caller passed flags
20411 - xorl %eax, %eax
20412 - ret
20413 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20414 index 9c4e625..c992817 100644
20415 --- a/arch/x86/kernel/vm86_32.c
20416 +++ b/arch/x86/kernel/vm86_32.c
20417 @@ -41,6 +41,7 @@
20418 #include <linux/ptrace.h>
20419 #include <linux/audit.h>
20420 #include <linux/stddef.h>
20421 +#include <linux/grsecurity.h>
20422
20423 #include <asm/uaccess.h>
20424 #include <asm/io.h>
20425 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20426 do_exit(SIGSEGV);
20427 }
20428
20429 - tss = &per_cpu(init_tss, get_cpu());
20430 + tss = init_tss + get_cpu();
20431 current->thread.sp0 = current->thread.saved_sp0;
20432 current->thread.sysenter_cs = __KERNEL_CS;
20433 load_sp0(tss, &current->thread);
20434 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20435 struct task_struct *tsk;
20436 int tmp, ret = -EPERM;
20437
20438 +#ifdef CONFIG_GRKERNSEC_VM86
20439 + if (!capable(CAP_SYS_RAWIO)) {
20440 + gr_handle_vm86();
20441 + goto out;
20442 + }
20443 +#endif
20444 +
20445 tsk = current;
20446 if (tsk->thread.saved_sp0)
20447 goto out;
20448 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20449 int tmp, ret;
20450 struct vm86plus_struct __user *v86;
20451
20452 +#ifdef CONFIG_GRKERNSEC_VM86
20453 + if (!capable(CAP_SYS_RAWIO)) {
20454 + gr_handle_vm86();
20455 + ret = -EPERM;
20456 + goto out;
20457 + }
20458 +#endif
20459 +
20460 tsk = current;
20461 switch (regs->bx) {
20462 case VM86_REQUEST_IRQ:
20463 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20464 tsk->thread.saved_fs = info->regs32->fs;
20465 tsk->thread.saved_gs = get_user_gs(info->regs32);
20466
20467 - tss = &per_cpu(init_tss, get_cpu());
20468 + tss = init_tss + get_cpu();
20469 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20470 if (cpu_has_sep)
20471 tsk->thread.sysenter_cs = 0;
20472 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20473 goto cannot_handle;
20474 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20475 goto cannot_handle;
20476 - intr_ptr = (unsigned long __user *) (i << 2);
20477 + intr_ptr = (__force unsigned long __user *) (i << 2);
20478 if (get_user(segoffs, intr_ptr))
20479 goto cannot_handle;
20480 if ((segoffs >> 16) == BIOSSEG)
20481 diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20482 index d430e4c..831f817 100644
20483 --- a/arch/x86/kernel/vmi_32.c
20484 +++ b/arch/x86/kernel/vmi_32.c
20485 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20486 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20487
20488 #define call_vrom_func(rom,func) \
20489 - (((VROMFUNC *)(rom->func))())
20490 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
20491
20492 #define call_vrom_long_func(rom,func,arg) \
20493 - (((VROMLONGFUNC *)(rom->func)) (arg))
20494 +({\
20495 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20496 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20497 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20498 + __reloc;\
20499 +})
20500
20501 -static struct vrom_header *vmi_rom;
20502 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20503 static int disable_pge;
20504 static int disable_pse;
20505 static int disable_sep;
20506 @@ -76,10 +81,10 @@ static struct {
20507 void (*set_initial_ap_state)(int, int);
20508 void (*halt)(void);
20509 void (*set_lazy_mode)(int mode);
20510 -} vmi_ops;
20511 +} __no_const vmi_ops __read_only;
20512
20513 /* Cached VMI operations */
20514 -struct vmi_timer_ops vmi_timer_ops;
20515 +struct vmi_timer_ops vmi_timer_ops __read_only;
20516
20517 /*
20518 * VMI patching routines.
20519 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20520 static inline void patch_offset(void *insnbuf,
20521 unsigned long ip, unsigned long dest)
20522 {
20523 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
20524 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
20525 }
20526
20527 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20528 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20529 {
20530 u64 reloc;
20531 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20532 +
20533 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20534 switch(rel->type) {
20535 case VMI_RELOCATION_CALL_REL:
20536 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20537
20538 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20539 {
20540 - const pte_t pte = { .pte = 0 };
20541 + const pte_t pte = __pte(0ULL);
20542 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20543 }
20544
20545 static void vmi_pmd_clear(pmd_t *pmd)
20546 {
20547 - const pte_t pte = { .pte = 0 };
20548 + const pte_t pte = __pte(0ULL);
20549 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20550 }
20551 #endif
20552 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20553 ap.ss = __KERNEL_DS;
20554 ap.esp = (unsigned long) start_esp;
20555
20556 - ap.ds = __USER_DS;
20557 - ap.es = __USER_DS;
20558 + ap.ds = __KERNEL_DS;
20559 + ap.es = __KERNEL_DS;
20560 ap.fs = __KERNEL_PERCPU;
20561 - ap.gs = __KERNEL_STACK_CANARY;
20562 + savesegment(gs, ap.gs);
20563
20564 ap.eflags = 0;
20565
20566 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20567 paravirt_leave_lazy_mmu();
20568 }
20569
20570 +#ifdef CONFIG_PAX_KERNEXEC
20571 +static unsigned long vmi_pax_open_kernel(void)
20572 +{
20573 + return 0;
20574 +}
20575 +
20576 +static unsigned long vmi_pax_close_kernel(void)
20577 +{
20578 + return 0;
20579 +}
20580 +#endif
20581 +
20582 static inline int __init check_vmi_rom(struct vrom_header *rom)
20583 {
20584 struct pci_header *pci;
20585 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20586 return 0;
20587 if (rom->vrom_signature != VMI_SIGNATURE)
20588 return 0;
20589 + if (rom->rom_length * 512 > sizeof(*rom)) {
20590 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20591 + return 0;
20592 + }
20593 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20594 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20595 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20596 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20597 struct vrom_header *romstart;
20598 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20599 if (check_vmi_rom(romstart)) {
20600 - vmi_rom = romstart;
20601 + vmi_rom = *romstart;
20602 return 1;
20603 }
20604 }
20605 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20606
20607 para_fill(pv_irq_ops.safe_halt, Halt);
20608
20609 +#ifdef CONFIG_PAX_KERNEXEC
20610 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20611 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20612 +#endif
20613 +
20614 /*
20615 * Alternative instruction rewriting doesn't happen soon enough
20616 * to convert VMI_IRET to a call instead of a jump; so we have
20617 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20618
20619 void __init vmi_init(void)
20620 {
20621 - if (!vmi_rom)
20622 + if (!vmi_rom.rom_signature)
20623 probe_vmi_rom();
20624 else
20625 - check_vmi_rom(vmi_rom);
20626 + check_vmi_rom(&vmi_rom);
20627
20628 /* In case probing for or validating the ROM failed, basil */
20629 - if (!vmi_rom)
20630 + if (!vmi_rom.rom_signature)
20631 return;
20632
20633 - reserve_top_address(-vmi_rom->virtual_top);
20634 + reserve_top_address(-vmi_rom.virtual_top);
20635
20636 #ifdef CONFIG_X86_IO_APIC
20637 /* This is virtual hardware; timer routing is wired correctly */
20638 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
20639 {
20640 unsigned long flags;
20641
20642 - if (!vmi_rom)
20643 + if (!vmi_rom.rom_signature)
20644 return;
20645
20646 local_irq_save(flags);
20647 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20648 index 3c68fe2..12c8280 100644
20649 --- a/arch/x86/kernel/vmlinux.lds.S
20650 +++ b/arch/x86/kernel/vmlinux.lds.S
20651 @@ -26,6 +26,13 @@
20652 #include <asm/page_types.h>
20653 #include <asm/cache.h>
20654 #include <asm/boot.h>
20655 +#include <asm/segment.h>
20656 +
20657 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20658 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20659 +#else
20660 +#define __KERNEL_TEXT_OFFSET 0
20661 +#endif
20662
20663 #undef i386 /* in case the preprocessor is a 32bit one */
20664
20665 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20666 #ifdef CONFIG_X86_32
20667 OUTPUT_ARCH(i386)
20668 ENTRY(phys_startup_32)
20669 -jiffies = jiffies_64;
20670 #else
20671 OUTPUT_ARCH(i386:x86-64)
20672 ENTRY(phys_startup_64)
20673 -jiffies_64 = jiffies;
20674 #endif
20675
20676 PHDRS {
20677 text PT_LOAD FLAGS(5); /* R_E */
20678 - data PT_LOAD FLAGS(7); /* RWE */
20679 +#ifdef CONFIG_X86_32
20680 + module PT_LOAD FLAGS(5); /* R_E */
20681 +#endif
20682 +#ifdef CONFIG_XEN
20683 + rodata PT_LOAD FLAGS(5); /* R_E */
20684 +#else
20685 + rodata PT_LOAD FLAGS(4); /* R__ */
20686 +#endif
20687 + data PT_LOAD FLAGS(6); /* RW_ */
20688 #ifdef CONFIG_X86_64
20689 user PT_LOAD FLAGS(5); /* R_E */
20690 +#endif
20691 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20692 #ifdef CONFIG_SMP
20693 percpu PT_LOAD FLAGS(6); /* RW_ */
20694 #endif
20695 + text.init PT_LOAD FLAGS(5); /* R_E */
20696 + text.exit PT_LOAD FLAGS(5); /* R_E */
20697 init PT_LOAD FLAGS(7); /* RWE */
20698 -#endif
20699 note PT_NOTE FLAGS(0); /* ___ */
20700 }
20701
20702 SECTIONS
20703 {
20704 #ifdef CONFIG_X86_32
20705 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20706 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20707 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20708 #else
20709 - . = __START_KERNEL;
20710 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20711 + . = __START_KERNEL;
20712 #endif
20713
20714 /* Text and read-only data */
20715 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20716 - _text = .;
20717 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20718 /* bootstrapping code */
20719 +#ifdef CONFIG_X86_32
20720 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20721 +#else
20722 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20723 +#endif
20724 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20725 + _text = .;
20726 HEAD_TEXT
20727 #ifdef CONFIG_X86_32
20728 . = ALIGN(PAGE_SIZE);
20729 @@ -82,28 +102,71 @@ SECTIONS
20730 IRQENTRY_TEXT
20731 *(.fixup)
20732 *(.gnu.warning)
20733 - /* End of text section */
20734 - _etext = .;
20735 } :text = 0x9090
20736
20737 - NOTES :text :note
20738 + . += __KERNEL_TEXT_OFFSET;
20739
20740 - EXCEPTION_TABLE(16) :text = 0x9090
20741 +#ifdef CONFIG_X86_32
20742 + . = ALIGN(PAGE_SIZE);
20743 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
20744 + *(.vmi.rom)
20745 + } :module
20746 +
20747 + . = ALIGN(PAGE_SIZE);
20748 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20749 +
20750 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20751 + MODULES_EXEC_VADDR = .;
20752 + BYTE(0)
20753 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20754 + . = ALIGN(HPAGE_SIZE);
20755 + MODULES_EXEC_END = . - 1;
20756 +#endif
20757 +
20758 + } :module
20759 +#endif
20760 +
20761 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20762 + /* End of text section */
20763 + _etext = . - __KERNEL_TEXT_OFFSET;
20764 + }
20765 +
20766 +#ifdef CONFIG_X86_32
20767 + . = ALIGN(PAGE_SIZE);
20768 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20769 + *(.idt)
20770 + . = ALIGN(PAGE_SIZE);
20771 + *(.empty_zero_page)
20772 + *(.swapper_pg_fixmap)
20773 + *(.swapper_pg_pmd)
20774 + *(.swapper_pg_dir)
20775 + *(.trampoline_pg_dir)
20776 + } :rodata
20777 +#endif
20778 +
20779 + . = ALIGN(PAGE_SIZE);
20780 + NOTES :rodata :note
20781 +
20782 + EXCEPTION_TABLE(16) :rodata
20783
20784 RO_DATA(PAGE_SIZE)
20785
20786 /* Data */
20787 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20788 +
20789 +#ifdef CONFIG_PAX_KERNEXEC
20790 + . = ALIGN(HPAGE_SIZE);
20791 +#else
20792 + . = ALIGN(PAGE_SIZE);
20793 +#endif
20794 +
20795 /* Start of data section */
20796 _sdata = .;
20797
20798 /* init_task */
20799 INIT_TASK_DATA(THREAD_SIZE)
20800
20801 -#ifdef CONFIG_X86_32
20802 - /* 32 bit has nosave before _edata */
20803 NOSAVE_DATA
20804 -#endif
20805
20806 PAGE_ALIGNED_DATA(PAGE_SIZE)
20807
20808 @@ -112,6 +175,8 @@ SECTIONS
20809 DATA_DATA
20810 CONSTRUCTORS
20811
20812 + jiffies = jiffies_64;
20813 +
20814 /* rarely changed data like cpu maps */
20815 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
20816
20817 @@ -166,12 +231,6 @@ SECTIONS
20818 }
20819 vgetcpu_mode = VVIRT(.vgetcpu_mode);
20820
20821 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
20822 - .jiffies : AT(VLOAD(.jiffies)) {
20823 - *(.jiffies)
20824 - }
20825 - jiffies = VVIRT(.jiffies);
20826 -
20827 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
20828 *(.vsyscall_3)
20829 }
20830 @@ -187,12 +246,19 @@ SECTIONS
20831 #endif /* CONFIG_X86_64 */
20832
20833 /* Init code and data - will be freed after init */
20834 - . = ALIGN(PAGE_SIZE);
20835 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20836 + BYTE(0)
20837 +
20838 +#ifdef CONFIG_PAX_KERNEXEC
20839 + . = ALIGN(HPAGE_SIZE);
20840 +#else
20841 + . = ALIGN(PAGE_SIZE);
20842 +#endif
20843 +
20844 __init_begin = .; /* paired with __init_end */
20845 - }
20846 + } :init.begin
20847
20848 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20849 +#ifdef CONFIG_SMP
20850 /*
20851 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20852 * output PHDR, so the next output section - .init.text - should
20853 @@ -201,12 +267,27 @@ SECTIONS
20854 PERCPU_VADDR(0, :percpu)
20855 #endif
20856
20857 - INIT_TEXT_SECTION(PAGE_SIZE)
20858 -#ifdef CONFIG_X86_64
20859 - :init
20860 -#endif
20861 + . = ALIGN(PAGE_SIZE);
20862 + init_begin = .;
20863 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20864 + VMLINUX_SYMBOL(_sinittext) = .;
20865 + INIT_TEXT
20866 + VMLINUX_SYMBOL(_einittext) = .;
20867 + . = ALIGN(PAGE_SIZE);
20868 + } :text.init
20869
20870 - INIT_DATA_SECTION(16)
20871 + /*
20872 + * .exit.text is discard at runtime, not link time, to deal with
20873 + * references from .altinstructions and .eh_frame
20874 + */
20875 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20876 + EXIT_TEXT
20877 + . = ALIGN(16);
20878 + } :text.exit
20879 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20880 +
20881 + . = ALIGN(PAGE_SIZE);
20882 + INIT_DATA_SECTION(16) :init
20883
20884 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20885 __x86_cpu_dev_start = .;
20886 @@ -232,19 +313,11 @@ SECTIONS
20887 *(.altinstr_replacement)
20888 }
20889
20890 - /*
20891 - * .exit.text is discard at runtime, not link time, to deal with
20892 - * references from .altinstructions and .eh_frame
20893 - */
20894 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20895 - EXIT_TEXT
20896 - }
20897 -
20898 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20899 EXIT_DATA
20900 }
20901
20902 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20903 +#ifndef CONFIG_SMP
20904 PERCPU(PAGE_SIZE)
20905 #endif
20906
20907 @@ -267,12 +340,6 @@ SECTIONS
20908 . = ALIGN(PAGE_SIZE);
20909 }
20910
20911 -#ifdef CONFIG_X86_64
20912 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20913 - NOSAVE_DATA
20914 - }
20915 -#endif
20916 -
20917 /* BSS */
20918 . = ALIGN(PAGE_SIZE);
20919 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20920 @@ -288,6 +355,7 @@ SECTIONS
20921 __brk_base = .;
20922 . += 64 * 1024; /* 64k alignment slop space */
20923 *(.brk_reservation) /* areas brk users have reserved */
20924 + . = ALIGN(HPAGE_SIZE);
20925 __brk_limit = .;
20926 }
20927
20928 @@ -316,13 +384,12 @@ SECTIONS
20929 * for the boot processor.
20930 */
20931 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
20932 -INIT_PER_CPU(gdt_page);
20933 INIT_PER_CPU(irq_stack_union);
20934
20935 /*
20936 * Build-time check on the image size:
20937 */
20938 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20939 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20940 "kernel image bigger than KERNEL_IMAGE_SIZE");
20941
20942 #ifdef CONFIG_SMP
20943 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20944 index 62f39d7..3bc46a1 100644
20945 --- a/arch/x86/kernel/vsyscall_64.c
20946 +++ b/arch/x86/kernel/vsyscall_64.c
20947 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
20948
20949 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
20950 /* copy vsyscall data */
20951 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
20952 vsyscall_gtod_data.clock.vread = clock->vread;
20953 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
20954 vsyscall_gtod_data.clock.mask = clock->mask;
20955 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
20956 We do this here because otherwise user space would do it on
20957 its own in a likely inferior way (no access to jiffies).
20958 If you don't like it pass NULL. */
20959 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
20960 + if (tcache && tcache->blob[0] == (j = jiffies)) {
20961 p = tcache->blob[1];
20962 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
20963 /* Load per CPU data from RDTSCP */
20964 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20965 index 3909e3b..5433a97 100644
20966 --- a/arch/x86/kernel/x8664_ksyms_64.c
20967 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20968 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
20969
20970 EXPORT_SYMBOL(copy_user_generic);
20971 EXPORT_SYMBOL(__copy_user_nocache);
20972 -EXPORT_SYMBOL(copy_from_user);
20973 -EXPORT_SYMBOL(copy_to_user);
20974 EXPORT_SYMBOL(__copy_from_user_inatomic);
20975
20976 EXPORT_SYMBOL(copy_page);
20977 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20978 index c5ee17e..d63218f 100644
20979 --- a/arch/x86/kernel/xsave.c
20980 +++ b/arch/x86/kernel/xsave.c
20981 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20982 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20983 return -1;
20984
20985 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20986 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20987 fx_sw_user->extended_size -
20988 FP_XSTATE_MAGIC2_SIZE));
20989 /*
20990 @@ -196,7 +196,7 @@ fx_only:
20991 * the other extended state.
20992 */
20993 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20994 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20995 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20996 }
20997
20998 /*
20999 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21000 if (task_thread_info(tsk)->status & TS_XSAVE)
21001 err = restore_user_xstate(buf);
21002 else
21003 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
21004 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
21005 buf);
21006 if (unlikely(err)) {
21007 /*
21008 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21009 index 1350e43..a94b011 100644
21010 --- a/arch/x86/kvm/emulate.c
21011 +++ b/arch/x86/kvm/emulate.c
21012 @@ -81,8 +81,8 @@
21013 #define Src2CL (1<<29)
21014 #define Src2ImmByte (2<<29)
21015 #define Src2One (3<<29)
21016 -#define Src2Imm16 (4<<29)
21017 -#define Src2Mask (7<<29)
21018 +#define Src2Imm16 (4U<<29)
21019 +#define Src2Mask (7U<<29)
21020
21021 enum {
21022 Group1_80, Group1_81, Group1_82, Group1_83,
21023 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
21024
21025 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21026 do { \
21027 + unsigned long _tmp; \
21028 __asm__ __volatile__ ( \
21029 _PRE_EFLAGS("0", "4", "2") \
21030 _op _suffix " %"_x"3,%1; " \
21031 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
21032 /* Raw emulation: instruction has two explicit operands. */
21033 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21034 do { \
21035 - unsigned long _tmp; \
21036 - \
21037 switch ((_dst).bytes) { \
21038 case 2: \
21039 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21040 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
21041
21042 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21043 do { \
21044 - unsigned long _tmp; \
21045 switch ((_dst).bytes) { \
21046 case 1: \
21047 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21048 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21049 index 8dfeaaa..4daa395 100644
21050 --- a/arch/x86/kvm/lapic.c
21051 +++ b/arch/x86/kvm/lapic.c
21052 @@ -52,7 +52,7 @@
21053 #define APIC_BUS_CYCLE_NS 1
21054
21055 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21056 -#define apic_debug(fmt, arg...)
21057 +#define apic_debug(fmt, arg...) do {} while (0)
21058
21059 #define APIC_LVT_NUM 6
21060 /* 14 is the version for Xeon and Pentium 8.4.8*/
21061 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21062 index 3bc2707..dd157e2 100644
21063 --- a/arch/x86/kvm/paging_tmpl.h
21064 +++ b/arch/x86/kvm/paging_tmpl.h
21065 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21066 int level = PT_PAGE_TABLE_LEVEL;
21067 unsigned long mmu_seq;
21068
21069 + pax_track_stack();
21070 +
21071 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21072 kvm_mmu_audit(vcpu, "pre page fault");
21073
21074 @@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21075 kvm_mmu_free_some_pages(vcpu);
21076 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21077 level, &write_pt, pfn);
21078 + (void)sptep;
21079 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21080 sptep, *sptep, write_pt);
21081
21082 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21083 index 7c6e63e..c5d92c1 100644
21084 --- a/arch/x86/kvm/svm.c
21085 +++ b/arch/x86/kvm/svm.c
21086 @@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21087 int cpu = raw_smp_processor_id();
21088
21089 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21090 +
21091 + pax_open_kernel();
21092 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21093 + pax_close_kernel();
21094 +
21095 load_TR_desc();
21096 }
21097
21098 @@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21099 return true;
21100 }
21101
21102 -static struct kvm_x86_ops svm_x86_ops = {
21103 +static const struct kvm_x86_ops svm_x86_ops = {
21104 .cpu_has_kvm_support = has_svm,
21105 .disabled_by_bios = is_disabled,
21106 .hardware_setup = svm_hardware_setup,
21107 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21108 index e6d925f..e7a4af8 100644
21109 --- a/arch/x86/kvm/vmx.c
21110 +++ b/arch/x86/kvm/vmx.c
21111 @@ -570,7 +570,11 @@ static void reload_tss(void)
21112
21113 kvm_get_gdt(&gdt);
21114 descs = (void *)gdt.base;
21115 +
21116 + pax_open_kernel();
21117 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21118 + pax_close_kernel();
21119 +
21120 load_TR_desc();
21121 }
21122
21123 @@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21124 if (!cpu_has_vmx_flexpriority())
21125 flexpriority_enabled = 0;
21126
21127 - if (!cpu_has_vmx_tpr_shadow())
21128 - kvm_x86_ops->update_cr8_intercept = NULL;
21129 + if (!cpu_has_vmx_tpr_shadow()) {
21130 + pax_open_kernel();
21131 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21132 + pax_close_kernel();
21133 + }
21134
21135 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21136 kvm_disable_largepages();
21137 @@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21138 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21139
21140 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21141 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21142 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21143 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21144 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21145 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21146 @@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21147 "jmp .Lkvm_vmx_return \n\t"
21148 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21149 ".Lkvm_vmx_return: "
21150 +
21151 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21152 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21153 + ".Lkvm_vmx_return2: "
21154 +#endif
21155 +
21156 /* Save guest registers, load host registers, keep flags */
21157 "xchg %0, (%%"R"sp) \n\t"
21158 "mov %%"R"ax, %c[rax](%0) \n\t"
21159 @@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21160 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21161 #endif
21162 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21163 +
21164 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21165 + ,[cs]"i"(__KERNEL_CS)
21166 +#endif
21167 +
21168 : "cc", "memory"
21169 - , R"bx", R"di", R"si"
21170 + , R"ax", R"bx", R"di", R"si"
21171 #ifdef CONFIG_X86_64
21172 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21173 #endif
21174 @@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21175 if (vmx->rmode.irq.pending)
21176 fixup_rmode_irq(vmx);
21177
21178 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21179 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21180 +
21181 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21182 + loadsegment(fs, __KERNEL_PERCPU);
21183 +#endif
21184 +
21185 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21186 + __set_fs(current_thread_info()->addr_limit);
21187 +#endif
21188 +
21189 vmx->launched = 1;
21190
21191 vmx_complete_interrupts(vmx);
21192 @@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21193 return false;
21194 }
21195
21196 -static struct kvm_x86_ops vmx_x86_ops = {
21197 +static const struct kvm_x86_ops vmx_x86_ops = {
21198 .cpu_has_kvm_support = cpu_has_kvm_support,
21199 .disabled_by_bios = vmx_disabled_by_bios,
21200 .hardware_setup = hardware_setup,
21201 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21202 index df1cefb..5e882ad 100644
21203 --- a/arch/x86/kvm/x86.c
21204 +++ b/arch/x86/kvm/x86.c
21205 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21206 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21207 struct kvm_cpuid_entry2 __user *entries);
21208
21209 -struct kvm_x86_ops *kvm_x86_ops;
21210 +const struct kvm_x86_ops *kvm_x86_ops;
21211 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21212
21213 int ignore_msrs = 0;
21214 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21215 struct kvm_cpuid2 *cpuid,
21216 struct kvm_cpuid_entry2 __user *entries)
21217 {
21218 - int r;
21219 + int r, i;
21220
21221 r = -E2BIG;
21222 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21223 goto out;
21224 r = -EFAULT;
21225 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21226 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21227 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21228 goto out;
21229 + for (i = 0; i < cpuid->nent; ++i) {
21230 + struct kvm_cpuid_entry2 cpuid_entry;
21231 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21232 + goto out;
21233 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
21234 + }
21235 vcpu->arch.cpuid_nent = cpuid->nent;
21236 kvm_apic_set_version(vcpu);
21237 return 0;
21238 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21239 struct kvm_cpuid2 *cpuid,
21240 struct kvm_cpuid_entry2 __user *entries)
21241 {
21242 - int r;
21243 + int r, i;
21244
21245 vcpu_load(vcpu);
21246 r = -E2BIG;
21247 if (cpuid->nent < vcpu->arch.cpuid_nent)
21248 goto out;
21249 r = -EFAULT;
21250 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21251 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21252 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21253 goto out;
21254 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21255 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21256 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21257 + goto out;
21258 + }
21259 return 0;
21260
21261 out:
21262 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21263 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21264 struct kvm_interrupt *irq)
21265 {
21266 - if (irq->irq < 0 || irq->irq >= 256)
21267 + if (irq->irq >= 256)
21268 return -EINVAL;
21269 if (irqchip_in_kernel(vcpu->kvm))
21270 return -ENXIO;
21271 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21272 .notifier_call = kvmclock_cpufreq_notifier
21273 };
21274
21275 -int kvm_arch_init(void *opaque)
21276 +int kvm_arch_init(const void *opaque)
21277 {
21278 int r, cpu;
21279 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21280 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21281
21282 if (kvm_x86_ops) {
21283 printk(KERN_ERR "kvm: already loaded the other module\n");
21284 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21285 index 7e59dc1..b88c98f 100644
21286 --- a/arch/x86/lguest/boot.c
21287 +++ b/arch/x86/lguest/boot.c
21288 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21289 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21290 * Launcher to reboot us.
21291 */
21292 -static void lguest_restart(char *reason)
21293 +static __noreturn void lguest_restart(char *reason)
21294 {
21295 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21296 + BUG();
21297 }
21298
21299 /*G:050
21300 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21301 index 824fa0b..c619e96 100644
21302 --- a/arch/x86/lib/atomic64_32.c
21303 +++ b/arch/x86/lib/atomic64_32.c
21304 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21305 }
21306 EXPORT_SYMBOL(atomic64_cmpxchg);
21307
21308 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21309 +{
21310 + return cmpxchg8b(&ptr->counter, old_val, new_val);
21311 +}
21312 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21313 +
21314 /**
21315 * atomic64_xchg - xchg atomic64 variable
21316 * @ptr: pointer to type atomic64_t
21317 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21318 EXPORT_SYMBOL(atomic64_xchg);
21319
21320 /**
21321 + * atomic64_xchg_unchecked - xchg atomic64 variable
21322 + * @ptr: pointer to type atomic64_unchecked_t
21323 + * @new_val: value to assign
21324 + *
21325 + * Atomically xchgs the value of @ptr to @new_val and returns
21326 + * the old value.
21327 + */
21328 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21329 +{
21330 + /*
21331 + * Try first with a (possibly incorrect) assumption about
21332 + * what we have there. We'll do two loops most likely,
21333 + * but we'll get an ownership MESI transaction straight away
21334 + * instead of a read transaction followed by a
21335 + * flush-for-ownership transaction:
21336 + */
21337 + u64 old_val, real_val = 0;
21338 +
21339 + do {
21340 + old_val = real_val;
21341 +
21342 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21343 +
21344 + } while (real_val != old_val);
21345 +
21346 + return old_val;
21347 +}
21348 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
21349 +
21350 +/**
21351 * atomic64_set - set atomic64 variable
21352 * @ptr: pointer to type atomic64_t
21353 * @new_val: value to assign
21354 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21355 EXPORT_SYMBOL(atomic64_set);
21356
21357 /**
21358 -EXPORT_SYMBOL(atomic64_read);
21359 + * atomic64_unchecked_set - set atomic64 variable
21360 + * @ptr: pointer to type atomic64_unchecked_t
21361 + * @new_val: value to assign
21362 + *
21363 + * Atomically sets the value of @ptr to @new_val.
21364 + */
21365 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21366 +{
21367 + atomic64_xchg_unchecked(ptr, new_val);
21368 +}
21369 +EXPORT_SYMBOL(atomic64_set_unchecked);
21370 +
21371 +/**
21372 * atomic64_add_return - add and return
21373 * @delta: integer value to add
21374 * @ptr: pointer to type atomic64_t
21375 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21376 }
21377 EXPORT_SYMBOL(atomic64_add_return);
21378
21379 +/**
21380 + * atomic64_add_return_unchecked - add and return
21381 + * @delta: integer value to add
21382 + * @ptr: pointer to type atomic64_unchecked_t
21383 + *
21384 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
21385 + */
21386 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21387 +{
21388 + /*
21389 + * Try first with a (possibly incorrect) assumption about
21390 + * what we have there. We'll do two loops most likely,
21391 + * but we'll get an ownership MESI transaction straight away
21392 + * instead of a read transaction followed by a
21393 + * flush-for-ownership transaction:
21394 + */
21395 + u64 old_val, new_val, real_val = 0;
21396 +
21397 + do {
21398 + old_val = real_val;
21399 + new_val = old_val + delta;
21400 +
21401 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21402 +
21403 + } while (real_val != old_val);
21404 +
21405 + return new_val;
21406 +}
21407 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
21408 +
21409 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21410 {
21411 return atomic64_add_return(-delta, ptr);
21412 }
21413 EXPORT_SYMBOL(atomic64_sub_return);
21414
21415 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21416 +{
21417 + return atomic64_add_return_unchecked(-delta, ptr);
21418 +}
21419 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21420 +
21421 u64 atomic64_inc_return(atomic64_t *ptr)
21422 {
21423 return atomic64_add_return(1, ptr);
21424 }
21425 EXPORT_SYMBOL(atomic64_inc_return);
21426
21427 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21428 +{
21429 + return atomic64_add_return_unchecked(1, ptr);
21430 +}
21431 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21432 +
21433 u64 atomic64_dec_return(atomic64_t *ptr)
21434 {
21435 return atomic64_sub_return(1, ptr);
21436 }
21437 EXPORT_SYMBOL(atomic64_dec_return);
21438
21439 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21440 +{
21441 + return atomic64_sub_return_unchecked(1, ptr);
21442 +}
21443 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21444 +
21445 /**
21446 * atomic64_add - add integer to atomic64 variable
21447 * @delta: integer value to add
21448 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21449 EXPORT_SYMBOL(atomic64_add);
21450
21451 /**
21452 + * atomic64_add_unchecked - add integer to atomic64 variable
21453 + * @delta: integer value to add
21454 + * @ptr: pointer to type atomic64_unchecked_t
21455 + *
21456 + * Atomically adds @delta to @ptr.
21457 + */
21458 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21459 +{
21460 + atomic64_add_return_unchecked(delta, ptr);
21461 +}
21462 +EXPORT_SYMBOL(atomic64_add_unchecked);
21463 +
21464 +/**
21465 * atomic64_sub - subtract the atomic64 variable
21466 * @delta: integer value to subtract
21467 * @ptr: pointer to type atomic64_t
21468 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21469 EXPORT_SYMBOL(atomic64_sub);
21470
21471 /**
21472 + * atomic64_sub_unchecked - subtract the atomic64 variable
21473 + * @delta: integer value to subtract
21474 + * @ptr: pointer to type atomic64_unchecked_t
21475 + *
21476 + * Atomically subtracts @delta from @ptr.
21477 + */
21478 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21479 +{
21480 + atomic64_add_unchecked(-delta, ptr);
21481 +}
21482 +EXPORT_SYMBOL(atomic64_sub_unchecked);
21483 +
21484 +/**
21485 * atomic64_sub_and_test - subtract value from variable and test result
21486 * @delta: integer value to subtract
21487 * @ptr: pointer to type atomic64_t
21488 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21489 EXPORT_SYMBOL(atomic64_inc);
21490
21491 /**
21492 + * atomic64_inc_unchecked - increment atomic64 variable
21493 + * @ptr: pointer to type atomic64_unchecked_t
21494 + *
21495 + * Atomically increments @ptr by 1.
21496 + */
21497 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21498 +{
21499 + atomic64_add_unchecked(1, ptr);
21500 +}
21501 +EXPORT_SYMBOL(atomic64_inc_unchecked);
21502 +
21503 +/**
21504 * atomic64_dec - decrement atomic64 variable
21505 * @ptr: pointer to type atomic64_t
21506 *
21507 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21508 EXPORT_SYMBOL(atomic64_dec);
21509
21510 /**
21511 + * atomic64_dec_unchecked - decrement atomic64 variable
21512 + * @ptr: pointer to type atomic64_unchecked_t
21513 + *
21514 + * Atomically decrements @ptr by 1.
21515 + */
21516 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21517 +{
21518 + atomic64_sub_unchecked(1, ptr);
21519 +}
21520 +EXPORT_SYMBOL(atomic64_dec_unchecked);
21521 +
21522 +/**
21523 * atomic64_dec_and_test - decrement and test
21524 * @ptr: pointer to type atomic64_t
21525 *
21526 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21527 index adbccd0..98f96c8 100644
21528 --- a/arch/x86/lib/checksum_32.S
21529 +++ b/arch/x86/lib/checksum_32.S
21530 @@ -28,7 +28,8 @@
21531 #include <linux/linkage.h>
21532 #include <asm/dwarf2.h>
21533 #include <asm/errno.h>
21534 -
21535 +#include <asm/segment.h>
21536 +
21537 /*
21538 * computes a partial checksum, e.g. for TCP/UDP fragments
21539 */
21540 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21541
21542 #define ARGBASE 16
21543 #define FP 12
21544 -
21545 -ENTRY(csum_partial_copy_generic)
21546 +
21547 +ENTRY(csum_partial_copy_generic_to_user)
21548 CFI_STARTPROC
21549 +
21550 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21551 + pushl %gs
21552 + CFI_ADJUST_CFA_OFFSET 4
21553 + popl %es
21554 + CFI_ADJUST_CFA_OFFSET -4
21555 + jmp csum_partial_copy_generic
21556 +#endif
21557 +
21558 +ENTRY(csum_partial_copy_generic_from_user)
21559 +
21560 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21561 + pushl %gs
21562 + CFI_ADJUST_CFA_OFFSET 4
21563 + popl %ds
21564 + CFI_ADJUST_CFA_OFFSET -4
21565 +#endif
21566 +
21567 +ENTRY(csum_partial_copy_generic)
21568 subl $4,%esp
21569 CFI_ADJUST_CFA_OFFSET 4
21570 pushl %edi
21571 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21572 jmp 4f
21573 SRC(1: movw (%esi), %bx )
21574 addl $2, %esi
21575 -DST( movw %bx, (%edi) )
21576 +DST( movw %bx, %es:(%edi) )
21577 addl $2, %edi
21578 addw %bx, %ax
21579 adcl $0, %eax
21580 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21581 SRC(1: movl (%esi), %ebx )
21582 SRC( movl 4(%esi), %edx )
21583 adcl %ebx, %eax
21584 -DST( movl %ebx, (%edi) )
21585 +DST( movl %ebx, %es:(%edi) )
21586 adcl %edx, %eax
21587 -DST( movl %edx, 4(%edi) )
21588 +DST( movl %edx, %es:4(%edi) )
21589
21590 SRC( movl 8(%esi), %ebx )
21591 SRC( movl 12(%esi), %edx )
21592 adcl %ebx, %eax
21593 -DST( movl %ebx, 8(%edi) )
21594 +DST( movl %ebx, %es:8(%edi) )
21595 adcl %edx, %eax
21596 -DST( movl %edx, 12(%edi) )
21597 +DST( movl %edx, %es:12(%edi) )
21598
21599 SRC( movl 16(%esi), %ebx )
21600 SRC( movl 20(%esi), %edx )
21601 adcl %ebx, %eax
21602 -DST( movl %ebx, 16(%edi) )
21603 +DST( movl %ebx, %es:16(%edi) )
21604 adcl %edx, %eax
21605 -DST( movl %edx, 20(%edi) )
21606 +DST( movl %edx, %es:20(%edi) )
21607
21608 SRC( movl 24(%esi), %ebx )
21609 SRC( movl 28(%esi), %edx )
21610 adcl %ebx, %eax
21611 -DST( movl %ebx, 24(%edi) )
21612 +DST( movl %ebx, %es:24(%edi) )
21613 adcl %edx, %eax
21614 -DST( movl %edx, 28(%edi) )
21615 +DST( movl %edx, %es:28(%edi) )
21616
21617 lea 32(%esi), %esi
21618 lea 32(%edi), %edi
21619 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21620 shrl $2, %edx # This clears CF
21621 SRC(3: movl (%esi), %ebx )
21622 adcl %ebx, %eax
21623 -DST( movl %ebx, (%edi) )
21624 +DST( movl %ebx, %es:(%edi) )
21625 lea 4(%esi), %esi
21626 lea 4(%edi), %edi
21627 dec %edx
21628 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21629 jb 5f
21630 SRC( movw (%esi), %cx )
21631 leal 2(%esi), %esi
21632 -DST( movw %cx, (%edi) )
21633 +DST( movw %cx, %es:(%edi) )
21634 leal 2(%edi), %edi
21635 je 6f
21636 shll $16,%ecx
21637 SRC(5: movb (%esi), %cl )
21638 -DST( movb %cl, (%edi) )
21639 +DST( movb %cl, %es:(%edi) )
21640 6: addl %ecx, %eax
21641 adcl $0, %eax
21642 7:
21643 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21644
21645 6001:
21646 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21647 - movl $-EFAULT, (%ebx)
21648 + movl $-EFAULT, %ss:(%ebx)
21649
21650 # zero the complete destination - computing the rest
21651 # is too much work
21652 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21653
21654 6002:
21655 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21656 - movl $-EFAULT,(%ebx)
21657 + movl $-EFAULT,%ss:(%ebx)
21658 jmp 5000b
21659
21660 .previous
21661
21662 + pushl %ss
21663 + CFI_ADJUST_CFA_OFFSET 4
21664 + popl %ds
21665 + CFI_ADJUST_CFA_OFFSET -4
21666 + pushl %ss
21667 + CFI_ADJUST_CFA_OFFSET 4
21668 + popl %es
21669 + CFI_ADJUST_CFA_OFFSET -4
21670 popl %ebx
21671 CFI_ADJUST_CFA_OFFSET -4
21672 CFI_RESTORE ebx
21673 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21674 CFI_ADJUST_CFA_OFFSET -4
21675 ret
21676 CFI_ENDPROC
21677 -ENDPROC(csum_partial_copy_generic)
21678 +ENDPROC(csum_partial_copy_generic_to_user)
21679
21680 #else
21681
21682 /* Version for PentiumII/PPro */
21683
21684 #define ROUND1(x) \
21685 + nop; nop; nop; \
21686 SRC(movl x(%esi), %ebx ) ; \
21687 addl %ebx, %eax ; \
21688 - DST(movl %ebx, x(%edi) ) ;
21689 + DST(movl %ebx, %es:x(%edi)) ;
21690
21691 #define ROUND(x) \
21692 + nop; nop; nop; \
21693 SRC(movl x(%esi), %ebx ) ; \
21694 adcl %ebx, %eax ; \
21695 - DST(movl %ebx, x(%edi) ) ;
21696 + DST(movl %ebx, %es:x(%edi)) ;
21697
21698 #define ARGBASE 12
21699 -
21700 -ENTRY(csum_partial_copy_generic)
21701 +
21702 +ENTRY(csum_partial_copy_generic_to_user)
21703 CFI_STARTPROC
21704 +
21705 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21706 + pushl %gs
21707 + CFI_ADJUST_CFA_OFFSET 4
21708 + popl %es
21709 + CFI_ADJUST_CFA_OFFSET -4
21710 + jmp csum_partial_copy_generic
21711 +#endif
21712 +
21713 +ENTRY(csum_partial_copy_generic_from_user)
21714 +
21715 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21716 + pushl %gs
21717 + CFI_ADJUST_CFA_OFFSET 4
21718 + popl %ds
21719 + CFI_ADJUST_CFA_OFFSET -4
21720 +#endif
21721 +
21722 +ENTRY(csum_partial_copy_generic)
21723 pushl %ebx
21724 CFI_ADJUST_CFA_OFFSET 4
21725 CFI_REL_OFFSET ebx, 0
21726 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21727 subl %ebx, %edi
21728 lea -1(%esi),%edx
21729 andl $-32,%edx
21730 - lea 3f(%ebx,%ebx), %ebx
21731 + lea 3f(%ebx,%ebx,2), %ebx
21732 testl %esi, %esi
21733 jmp *%ebx
21734 1: addl $64,%esi
21735 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
21736 jb 5f
21737 SRC( movw (%esi), %dx )
21738 leal 2(%esi), %esi
21739 -DST( movw %dx, (%edi) )
21740 +DST( movw %dx, %es:(%edi) )
21741 leal 2(%edi), %edi
21742 je 6f
21743 shll $16,%edx
21744 5:
21745 SRC( movb (%esi), %dl )
21746 -DST( movb %dl, (%edi) )
21747 +DST( movb %dl, %es:(%edi) )
21748 6: addl %edx, %eax
21749 adcl $0, %eax
21750 7:
21751 .section .fixup, "ax"
21752 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21753 - movl $-EFAULT, (%ebx)
21754 + movl $-EFAULT, %ss:(%ebx)
21755 # zero the complete destination (computing the rest is too much work)
21756 movl ARGBASE+8(%esp),%edi # dst
21757 movl ARGBASE+12(%esp),%ecx # len
21758 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
21759 rep; stosb
21760 jmp 7b
21761 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21762 - movl $-EFAULT, (%ebx)
21763 + movl $-EFAULT, %ss:(%ebx)
21764 jmp 7b
21765 .previous
21766
21767 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21768 + pushl %ss
21769 + CFI_ADJUST_CFA_OFFSET 4
21770 + popl %ds
21771 + CFI_ADJUST_CFA_OFFSET -4
21772 + pushl %ss
21773 + CFI_ADJUST_CFA_OFFSET 4
21774 + popl %es
21775 + CFI_ADJUST_CFA_OFFSET -4
21776 +#endif
21777 +
21778 popl %esi
21779 CFI_ADJUST_CFA_OFFSET -4
21780 CFI_RESTORE esi
21781 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
21782 CFI_RESTORE ebx
21783 ret
21784 CFI_ENDPROC
21785 -ENDPROC(csum_partial_copy_generic)
21786 +ENDPROC(csum_partial_copy_generic_to_user)
21787
21788 #undef ROUND
21789 #undef ROUND1
21790 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21791 index ebeafcc..1e3a402 100644
21792 --- a/arch/x86/lib/clear_page_64.S
21793 +++ b/arch/x86/lib/clear_page_64.S
21794 @@ -1,5 +1,6 @@
21795 #include <linux/linkage.h>
21796 #include <asm/dwarf2.h>
21797 +#include <asm/alternative-asm.h>
21798
21799 /*
21800 * Zero a page.
21801 @@ -10,6 +11,7 @@ ENTRY(clear_page_c)
21802 movl $4096/8,%ecx
21803 xorl %eax,%eax
21804 rep stosq
21805 + pax_force_retaddr
21806 ret
21807 CFI_ENDPROC
21808 ENDPROC(clear_page_c)
21809 @@ -33,6 +35,7 @@ ENTRY(clear_page)
21810 leaq 64(%rdi),%rdi
21811 jnz .Lloop
21812 nop
21813 + pax_force_retaddr
21814 ret
21815 CFI_ENDPROC
21816 .Lclear_page_end:
21817 @@ -43,7 +46,7 @@ ENDPROC(clear_page)
21818
21819 #include <asm/cpufeature.h>
21820
21821 - .section .altinstr_replacement,"ax"
21822 + .section .altinstr_replacement,"a"
21823 1: .byte 0xeb /* jmp <disp8> */
21824 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21825 2:
21826 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21827 index 727a5d4..333818a 100644
21828 --- a/arch/x86/lib/copy_page_64.S
21829 +++ b/arch/x86/lib/copy_page_64.S
21830 @@ -2,12 +2,14 @@
21831
21832 #include <linux/linkage.h>
21833 #include <asm/dwarf2.h>
21834 +#include <asm/alternative-asm.h>
21835
21836 ALIGN
21837 copy_page_c:
21838 CFI_STARTPROC
21839 movl $4096/8,%ecx
21840 rep movsq
21841 + pax_force_retaddr
21842 ret
21843 CFI_ENDPROC
21844 ENDPROC(copy_page_c)
21845 @@ -38,7 +40,7 @@ ENTRY(copy_page)
21846 movq 16 (%rsi), %rdx
21847 movq 24 (%rsi), %r8
21848 movq 32 (%rsi), %r9
21849 - movq 40 (%rsi), %r10
21850 + movq 40 (%rsi), %r13
21851 movq 48 (%rsi), %r11
21852 movq 56 (%rsi), %r12
21853
21854 @@ -49,7 +51,7 @@ ENTRY(copy_page)
21855 movq %rdx, 16 (%rdi)
21856 movq %r8, 24 (%rdi)
21857 movq %r9, 32 (%rdi)
21858 - movq %r10, 40 (%rdi)
21859 + movq %r13, 40 (%rdi)
21860 movq %r11, 48 (%rdi)
21861 movq %r12, 56 (%rdi)
21862
21863 @@ -68,7 +70,7 @@ ENTRY(copy_page)
21864 movq 16 (%rsi), %rdx
21865 movq 24 (%rsi), %r8
21866 movq 32 (%rsi), %r9
21867 - movq 40 (%rsi), %r10
21868 + movq 40 (%rsi), %r13
21869 movq 48 (%rsi), %r11
21870 movq 56 (%rsi), %r12
21871
21872 @@ -77,7 +79,7 @@ ENTRY(copy_page)
21873 movq %rdx, 16 (%rdi)
21874 movq %r8, 24 (%rdi)
21875 movq %r9, 32 (%rdi)
21876 - movq %r10, 40 (%rdi)
21877 + movq %r13, 40 (%rdi)
21878 movq %r11, 48 (%rdi)
21879 movq %r12, 56 (%rdi)
21880
21881 @@ -94,6 +96,7 @@ ENTRY(copy_page)
21882 CFI_RESTORE r13
21883 addq $3*8,%rsp
21884 CFI_ADJUST_CFA_OFFSET -3*8
21885 + pax_force_retaddr
21886 ret
21887 .Lcopy_page_end:
21888 CFI_ENDPROC
21889 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
21890
21891 #include <asm/cpufeature.h>
21892
21893 - .section .altinstr_replacement,"ax"
21894 + .section .altinstr_replacement,"a"
21895 1: .byte 0xeb /* jmp <disp8> */
21896 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21897 2:
21898 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21899 index af8debd..40c75f3 100644
21900 --- a/arch/x86/lib/copy_user_64.S
21901 +++ b/arch/x86/lib/copy_user_64.S
21902 @@ -15,13 +15,15 @@
21903 #include <asm/asm-offsets.h>
21904 #include <asm/thread_info.h>
21905 #include <asm/cpufeature.h>
21906 +#include <asm/pgtable.h>
21907 +#include <asm/alternative-asm.h>
21908
21909 .macro ALTERNATIVE_JUMP feature,orig,alt
21910 0:
21911 .byte 0xe9 /* 32bit jump */
21912 .long \orig-1f /* by default jump to orig */
21913 1:
21914 - .section .altinstr_replacement,"ax"
21915 + .section .altinstr_replacement,"a"
21916 2: .byte 0xe9 /* near jump with 32bit immediate */
21917 .long \alt-1b /* offset */ /* or alternatively to alt */
21918 .previous
21919 @@ -64,55 +66,26 @@
21920 #endif
21921 .endm
21922
21923 -/* Standard copy_to_user with segment limit checking */
21924 -ENTRY(copy_to_user)
21925 - CFI_STARTPROC
21926 - GET_THREAD_INFO(%rax)
21927 - movq %rdi,%rcx
21928 - addq %rdx,%rcx
21929 - jc bad_to_user
21930 - cmpq TI_addr_limit(%rax),%rcx
21931 - ja bad_to_user
21932 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21933 - CFI_ENDPROC
21934 -ENDPROC(copy_to_user)
21935 -
21936 -/* Standard copy_from_user with segment limit checking */
21937 -ENTRY(copy_from_user)
21938 - CFI_STARTPROC
21939 - GET_THREAD_INFO(%rax)
21940 - movq %rsi,%rcx
21941 - addq %rdx,%rcx
21942 - jc bad_from_user
21943 - cmpq TI_addr_limit(%rax),%rcx
21944 - ja bad_from_user
21945 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21946 - CFI_ENDPROC
21947 -ENDPROC(copy_from_user)
21948 -
21949 ENTRY(copy_user_generic)
21950 CFI_STARTPROC
21951 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21952 CFI_ENDPROC
21953 ENDPROC(copy_user_generic)
21954
21955 -ENTRY(__copy_from_user_inatomic)
21956 - CFI_STARTPROC
21957 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21958 - CFI_ENDPROC
21959 -ENDPROC(__copy_from_user_inatomic)
21960 -
21961 .section .fixup,"ax"
21962 /* must zero dest */
21963 ENTRY(bad_from_user)
21964 bad_from_user:
21965 CFI_STARTPROC
21966 + testl %edx,%edx
21967 + js bad_to_user
21968 movl %edx,%ecx
21969 xorl %eax,%eax
21970 rep
21971 stosb
21972 bad_to_user:
21973 movl %edx,%eax
21974 + pax_force_retaddr
21975 ret
21976 CFI_ENDPROC
21977 ENDPROC(bad_from_user)
21978 @@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21979 jz 17f
21980 1: movq (%rsi),%r8
21981 2: movq 1*8(%rsi),%r9
21982 -3: movq 2*8(%rsi),%r10
21983 +3: movq 2*8(%rsi),%rax
21984 4: movq 3*8(%rsi),%r11
21985 5: movq %r8,(%rdi)
21986 6: movq %r9,1*8(%rdi)
21987 -7: movq %r10,2*8(%rdi)
21988 +7: movq %rax,2*8(%rdi)
21989 8: movq %r11,3*8(%rdi)
21990 9: movq 4*8(%rsi),%r8
21991 10: movq 5*8(%rsi),%r9
21992 -11: movq 6*8(%rsi),%r10
21993 +11: movq 6*8(%rsi),%rax
21994 12: movq 7*8(%rsi),%r11
21995 13: movq %r8,4*8(%rdi)
21996 14: movq %r9,5*8(%rdi)
21997 -15: movq %r10,6*8(%rdi)
21998 +15: movq %rax,6*8(%rdi)
21999 16: movq %r11,7*8(%rdi)
22000 leaq 64(%rsi),%rsi
22001 leaq 64(%rdi),%rdi
22002 @@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22003 decl %ecx
22004 jnz 21b
22005 23: xor %eax,%eax
22006 + pax_force_retaddr
22007 ret
22008
22009 .section .fixup,"ax"
22010 @@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22011 3: rep
22012 movsb
22013 4: xorl %eax,%eax
22014 + pax_force_retaddr
22015 ret
22016
22017 .section .fixup,"ax"
22018 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22019 index cb0c112..e3a6895 100644
22020 --- a/arch/x86/lib/copy_user_nocache_64.S
22021 +++ b/arch/x86/lib/copy_user_nocache_64.S
22022 @@ -8,12 +8,14 @@
22023
22024 #include <linux/linkage.h>
22025 #include <asm/dwarf2.h>
22026 +#include <asm/alternative-asm.h>
22027
22028 #define FIX_ALIGNMENT 1
22029
22030 #include <asm/current.h>
22031 #include <asm/asm-offsets.h>
22032 #include <asm/thread_info.h>
22033 +#include <asm/pgtable.h>
22034
22035 .macro ALIGN_DESTINATION
22036 #ifdef FIX_ALIGNMENT
22037 @@ -50,6 +52,15 @@
22038 */
22039 ENTRY(__copy_user_nocache)
22040 CFI_STARTPROC
22041 +
22042 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22043 + mov $PAX_USER_SHADOW_BASE,%rcx
22044 + cmp %rcx,%rsi
22045 + jae 1f
22046 + add %rcx,%rsi
22047 +1:
22048 +#endif
22049 +
22050 cmpl $8,%edx
22051 jb 20f /* less then 8 bytes, go to byte copy loop */
22052 ALIGN_DESTINATION
22053 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22054 jz 17f
22055 1: movq (%rsi),%r8
22056 2: movq 1*8(%rsi),%r9
22057 -3: movq 2*8(%rsi),%r10
22058 +3: movq 2*8(%rsi),%rax
22059 4: movq 3*8(%rsi),%r11
22060 5: movnti %r8,(%rdi)
22061 6: movnti %r9,1*8(%rdi)
22062 -7: movnti %r10,2*8(%rdi)
22063 +7: movnti %rax,2*8(%rdi)
22064 8: movnti %r11,3*8(%rdi)
22065 9: movq 4*8(%rsi),%r8
22066 10: movq 5*8(%rsi),%r9
22067 -11: movq 6*8(%rsi),%r10
22068 +11: movq 6*8(%rsi),%rax
22069 12: movq 7*8(%rsi),%r11
22070 13: movnti %r8,4*8(%rdi)
22071 14: movnti %r9,5*8(%rdi)
22072 -15: movnti %r10,6*8(%rdi)
22073 +15: movnti %rax,6*8(%rdi)
22074 16: movnti %r11,7*8(%rdi)
22075 leaq 64(%rsi),%rsi
22076 leaq 64(%rdi),%rdi
22077 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22078 jnz 21b
22079 23: xorl %eax,%eax
22080 sfence
22081 + pax_force_retaddr
22082 ret
22083
22084 .section .fixup,"ax"
22085 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22086 index f0dba36..48cb4d6 100644
22087 --- a/arch/x86/lib/csum-copy_64.S
22088 +++ b/arch/x86/lib/csum-copy_64.S
22089 @@ -8,6 +8,7 @@
22090 #include <linux/linkage.h>
22091 #include <asm/dwarf2.h>
22092 #include <asm/errno.h>
22093 +#include <asm/alternative-asm.h>
22094
22095 /*
22096 * Checksum copy with exception handling.
22097 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22098 CFI_RESTORE rbp
22099 addq $7*8,%rsp
22100 CFI_ADJUST_CFA_OFFSET -7*8
22101 + pax_force_retaddr 0, 1
22102 ret
22103 CFI_RESTORE_STATE
22104
22105 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22106 index 459b58a..9570bc7 100644
22107 --- a/arch/x86/lib/csum-wrappers_64.c
22108 +++ b/arch/x86/lib/csum-wrappers_64.c
22109 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22110 len -= 2;
22111 }
22112 }
22113 - isum = csum_partial_copy_generic((__force const void *)src,
22114 +
22115 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22116 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22117 + src += PAX_USER_SHADOW_BASE;
22118 +#endif
22119 +
22120 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
22121 dst, len, isum, errp, NULL);
22122 if (unlikely(*errp))
22123 goto out_err;
22124 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22125 }
22126
22127 *errp = 0;
22128 - return csum_partial_copy_generic(src, (void __force *)dst,
22129 +
22130 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22131 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22132 + dst += PAX_USER_SHADOW_BASE;
22133 +#endif
22134 +
22135 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22136 len, isum, NULL, errp);
22137 }
22138 EXPORT_SYMBOL(csum_partial_copy_to_user);
22139 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22140 index 51f1504..ddac4c1 100644
22141 --- a/arch/x86/lib/getuser.S
22142 +++ b/arch/x86/lib/getuser.S
22143 @@ -33,15 +33,38 @@
22144 #include <asm/asm-offsets.h>
22145 #include <asm/thread_info.h>
22146 #include <asm/asm.h>
22147 +#include <asm/segment.h>
22148 +#include <asm/pgtable.h>
22149 +#include <asm/alternative-asm.h>
22150 +
22151 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22152 +#define __copyuser_seg gs;
22153 +#else
22154 +#define __copyuser_seg
22155 +#endif
22156
22157 .text
22158 ENTRY(__get_user_1)
22159 CFI_STARTPROC
22160 +
22161 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22162 GET_THREAD_INFO(%_ASM_DX)
22163 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22164 jae bad_get_user
22165 -1: movzb (%_ASM_AX),%edx
22166 +
22167 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22168 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22169 + cmp %_ASM_DX,%_ASM_AX
22170 + jae 1234f
22171 + add %_ASM_DX,%_ASM_AX
22172 +1234:
22173 +#endif
22174 +
22175 +#endif
22176 +
22177 +1: __copyuser_seg movzb (%_ASM_AX),%edx
22178 xor %eax,%eax
22179 + pax_force_retaddr
22180 ret
22181 CFI_ENDPROC
22182 ENDPROC(__get_user_1)
22183 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22184 ENTRY(__get_user_2)
22185 CFI_STARTPROC
22186 add $1,%_ASM_AX
22187 +
22188 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22189 jc bad_get_user
22190 GET_THREAD_INFO(%_ASM_DX)
22191 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22192 jae bad_get_user
22193 -2: movzwl -1(%_ASM_AX),%edx
22194 +
22195 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22196 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22197 + cmp %_ASM_DX,%_ASM_AX
22198 + jae 1234f
22199 + add %_ASM_DX,%_ASM_AX
22200 +1234:
22201 +#endif
22202 +
22203 +#endif
22204 +
22205 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22206 xor %eax,%eax
22207 + pax_force_retaddr
22208 ret
22209 CFI_ENDPROC
22210 ENDPROC(__get_user_2)
22211 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22212 ENTRY(__get_user_4)
22213 CFI_STARTPROC
22214 add $3,%_ASM_AX
22215 +
22216 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22217 jc bad_get_user
22218 GET_THREAD_INFO(%_ASM_DX)
22219 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22220 jae bad_get_user
22221 -3: mov -3(%_ASM_AX),%edx
22222 +
22223 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22224 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22225 + cmp %_ASM_DX,%_ASM_AX
22226 + jae 1234f
22227 + add %_ASM_DX,%_ASM_AX
22228 +1234:
22229 +#endif
22230 +
22231 +#endif
22232 +
22233 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
22234 xor %eax,%eax
22235 + pax_force_retaddr
22236 ret
22237 CFI_ENDPROC
22238 ENDPROC(__get_user_4)
22239 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22240 GET_THREAD_INFO(%_ASM_DX)
22241 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22242 jae bad_get_user
22243 +
22244 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22245 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22246 + cmp %_ASM_DX,%_ASM_AX
22247 + jae 1234f
22248 + add %_ASM_DX,%_ASM_AX
22249 +1234:
22250 +#endif
22251 +
22252 4: movq -7(%_ASM_AX),%_ASM_DX
22253 xor %eax,%eax
22254 + pax_force_retaddr
22255 ret
22256 CFI_ENDPROC
22257 ENDPROC(__get_user_8)
22258 @@ -91,6 +152,7 @@ bad_get_user:
22259 CFI_STARTPROC
22260 xor %edx,%edx
22261 mov $(-EFAULT),%_ASM_AX
22262 + pax_force_retaddr
22263 ret
22264 CFI_ENDPROC
22265 END(bad_get_user)
22266 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22267 index 05a95e7..326f2fa 100644
22268 --- a/arch/x86/lib/iomap_copy_64.S
22269 +++ b/arch/x86/lib/iomap_copy_64.S
22270 @@ -17,6 +17,7 @@
22271
22272 #include <linux/linkage.h>
22273 #include <asm/dwarf2.h>
22274 +#include <asm/alternative-asm.h>
22275
22276 /*
22277 * override generic version in lib/iomap_copy.c
22278 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22279 CFI_STARTPROC
22280 movl %edx,%ecx
22281 rep movsd
22282 + pax_force_retaddr
22283 ret
22284 CFI_ENDPROC
22285 ENDPROC(__iowrite32_copy)
22286 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22287 index ad5441e..610e351 100644
22288 --- a/arch/x86/lib/memcpy_64.S
22289 +++ b/arch/x86/lib/memcpy_64.S
22290 @@ -4,6 +4,7 @@
22291
22292 #include <asm/cpufeature.h>
22293 #include <asm/dwarf2.h>
22294 +#include <asm/alternative-asm.h>
22295
22296 /*
22297 * memcpy - Copy a memory block.
22298 @@ -34,6 +35,7 @@ memcpy_c:
22299 rep movsq
22300 movl %edx, %ecx
22301 rep movsb
22302 + pax_force_retaddr
22303 ret
22304 CFI_ENDPROC
22305 ENDPROC(memcpy_c)
22306 @@ -118,6 +120,7 @@ ENTRY(memcpy)
22307 jnz .Lloop_1
22308
22309 .Lend:
22310 + pax_force_retaddr 0, 1
22311 ret
22312 CFI_ENDPROC
22313 ENDPROC(memcpy)
22314 @@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22315 * It is also a lot simpler. Use this when possible:
22316 */
22317
22318 - .section .altinstr_replacement, "ax"
22319 + .section .altinstr_replacement, "a"
22320 1: .byte 0xeb /* jmp <disp8> */
22321 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22322 2:
22323 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22324 index 2c59481..7e9ba4e 100644
22325 --- a/arch/x86/lib/memset_64.S
22326 +++ b/arch/x86/lib/memset_64.S
22327 @@ -2,6 +2,7 @@
22328
22329 #include <linux/linkage.h>
22330 #include <asm/dwarf2.h>
22331 +#include <asm/alternative-asm.h>
22332
22333 /*
22334 * ISO C memset - set a memory block to a byte value.
22335 @@ -28,6 +29,7 @@ memset_c:
22336 movl %r8d,%ecx
22337 rep stosb
22338 movq %r9,%rax
22339 + pax_force_retaddr
22340 ret
22341 CFI_ENDPROC
22342 ENDPROC(memset_c)
22343 @@ -35,13 +37,13 @@ ENDPROC(memset_c)
22344 ENTRY(memset)
22345 ENTRY(__memset)
22346 CFI_STARTPROC
22347 - movq %rdi,%r10
22348 movq %rdx,%r11
22349
22350 /* expand byte value */
22351 movzbl %sil,%ecx
22352 movabs $0x0101010101010101,%rax
22353 mul %rcx /* with rax, clobbers rdx */
22354 + movq %rdi,%rdx
22355
22356 /* align dst */
22357 movl %edi,%r9d
22358 @@ -95,7 +97,8 @@ ENTRY(__memset)
22359 jnz .Lloop_1
22360
22361 .Lende:
22362 - movq %r10,%rax
22363 + movq %rdx,%rax
22364 + pax_force_retaddr
22365 ret
22366
22367 CFI_RESTORE_STATE
22368 @@ -118,7 +121,7 @@ ENDPROC(__memset)
22369
22370 #include <asm/cpufeature.h>
22371
22372 - .section .altinstr_replacement,"ax"
22373 + .section .altinstr_replacement,"a"
22374 1: .byte 0xeb /* jmp <disp8> */
22375 .byte (memset_c - memset) - (2f - 1b) /* offset */
22376 2:
22377 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22378 index c9f2d9b..e7fd2c0 100644
22379 --- a/arch/x86/lib/mmx_32.c
22380 +++ b/arch/x86/lib/mmx_32.c
22381 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22382 {
22383 void *p;
22384 int i;
22385 + unsigned long cr0;
22386
22387 if (unlikely(in_interrupt()))
22388 return __memcpy(to, from, len);
22389 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22390 kernel_fpu_begin();
22391
22392 __asm__ __volatile__ (
22393 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22394 - " prefetch 64(%0)\n"
22395 - " prefetch 128(%0)\n"
22396 - " prefetch 192(%0)\n"
22397 - " prefetch 256(%0)\n"
22398 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22399 + " prefetch 64(%1)\n"
22400 + " prefetch 128(%1)\n"
22401 + " prefetch 192(%1)\n"
22402 + " prefetch 256(%1)\n"
22403 "2: \n"
22404 ".section .fixup, \"ax\"\n"
22405 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22406 + "3: \n"
22407 +
22408 +#ifdef CONFIG_PAX_KERNEXEC
22409 + " movl %%cr0, %0\n"
22410 + " movl %0, %%eax\n"
22411 + " andl $0xFFFEFFFF, %%eax\n"
22412 + " movl %%eax, %%cr0\n"
22413 +#endif
22414 +
22415 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22416 +
22417 +#ifdef CONFIG_PAX_KERNEXEC
22418 + " movl %0, %%cr0\n"
22419 +#endif
22420 +
22421 " jmp 2b\n"
22422 ".previous\n"
22423 _ASM_EXTABLE(1b, 3b)
22424 - : : "r" (from));
22425 + : "=&r" (cr0) : "r" (from) : "ax");
22426
22427 for ( ; i > 5; i--) {
22428 __asm__ __volatile__ (
22429 - "1: prefetch 320(%0)\n"
22430 - "2: movq (%0), %%mm0\n"
22431 - " movq 8(%0), %%mm1\n"
22432 - " movq 16(%0), %%mm2\n"
22433 - " movq 24(%0), %%mm3\n"
22434 - " movq %%mm0, (%1)\n"
22435 - " movq %%mm1, 8(%1)\n"
22436 - " movq %%mm2, 16(%1)\n"
22437 - " movq %%mm3, 24(%1)\n"
22438 - " movq 32(%0), %%mm0\n"
22439 - " movq 40(%0), %%mm1\n"
22440 - " movq 48(%0), %%mm2\n"
22441 - " movq 56(%0), %%mm3\n"
22442 - " movq %%mm0, 32(%1)\n"
22443 - " movq %%mm1, 40(%1)\n"
22444 - " movq %%mm2, 48(%1)\n"
22445 - " movq %%mm3, 56(%1)\n"
22446 + "1: prefetch 320(%1)\n"
22447 + "2: movq (%1), %%mm0\n"
22448 + " movq 8(%1), %%mm1\n"
22449 + " movq 16(%1), %%mm2\n"
22450 + " movq 24(%1), %%mm3\n"
22451 + " movq %%mm0, (%2)\n"
22452 + " movq %%mm1, 8(%2)\n"
22453 + " movq %%mm2, 16(%2)\n"
22454 + " movq %%mm3, 24(%2)\n"
22455 + " movq 32(%1), %%mm0\n"
22456 + " movq 40(%1), %%mm1\n"
22457 + " movq 48(%1), %%mm2\n"
22458 + " movq 56(%1), %%mm3\n"
22459 + " movq %%mm0, 32(%2)\n"
22460 + " movq %%mm1, 40(%2)\n"
22461 + " movq %%mm2, 48(%2)\n"
22462 + " movq %%mm3, 56(%2)\n"
22463 ".section .fixup, \"ax\"\n"
22464 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22465 + "3:\n"
22466 +
22467 +#ifdef CONFIG_PAX_KERNEXEC
22468 + " movl %%cr0, %0\n"
22469 + " movl %0, %%eax\n"
22470 + " andl $0xFFFEFFFF, %%eax\n"
22471 + " movl %%eax, %%cr0\n"
22472 +#endif
22473 +
22474 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22475 +
22476 +#ifdef CONFIG_PAX_KERNEXEC
22477 + " movl %0, %%cr0\n"
22478 +#endif
22479 +
22480 " jmp 2b\n"
22481 ".previous\n"
22482 _ASM_EXTABLE(1b, 3b)
22483 - : : "r" (from), "r" (to) : "memory");
22484 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22485
22486 from += 64;
22487 to += 64;
22488 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22489 static void fast_copy_page(void *to, void *from)
22490 {
22491 int i;
22492 + unsigned long cr0;
22493
22494 kernel_fpu_begin();
22495
22496 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22497 * but that is for later. -AV
22498 */
22499 __asm__ __volatile__(
22500 - "1: prefetch (%0)\n"
22501 - " prefetch 64(%0)\n"
22502 - " prefetch 128(%0)\n"
22503 - " prefetch 192(%0)\n"
22504 - " prefetch 256(%0)\n"
22505 + "1: prefetch (%1)\n"
22506 + " prefetch 64(%1)\n"
22507 + " prefetch 128(%1)\n"
22508 + " prefetch 192(%1)\n"
22509 + " prefetch 256(%1)\n"
22510 "2: \n"
22511 ".section .fixup, \"ax\"\n"
22512 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22513 + "3: \n"
22514 +
22515 +#ifdef CONFIG_PAX_KERNEXEC
22516 + " movl %%cr0, %0\n"
22517 + " movl %0, %%eax\n"
22518 + " andl $0xFFFEFFFF, %%eax\n"
22519 + " movl %%eax, %%cr0\n"
22520 +#endif
22521 +
22522 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22523 +
22524 +#ifdef CONFIG_PAX_KERNEXEC
22525 + " movl %0, %%cr0\n"
22526 +#endif
22527 +
22528 " jmp 2b\n"
22529 ".previous\n"
22530 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22531 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22532
22533 for (i = 0; i < (4096-320)/64; i++) {
22534 __asm__ __volatile__ (
22535 - "1: prefetch 320(%0)\n"
22536 - "2: movq (%0), %%mm0\n"
22537 - " movntq %%mm0, (%1)\n"
22538 - " movq 8(%0), %%mm1\n"
22539 - " movntq %%mm1, 8(%1)\n"
22540 - " movq 16(%0), %%mm2\n"
22541 - " movntq %%mm2, 16(%1)\n"
22542 - " movq 24(%0), %%mm3\n"
22543 - " movntq %%mm3, 24(%1)\n"
22544 - " movq 32(%0), %%mm4\n"
22545 - " movntq %%mm4, 32(%1)\n"
22546 - " movq 40(%0), %%mm5\n"
22547 - " movntq %%mm5, 40(%1)\n"
22548 - " movq 48(%0), %%mm6\n"
22549 - " movntq %%mm6, 48(%1)\n"
22550 - " movq 56(%0), %%mm7\n"
22551 - " movntq %%mm7, 56(%1)\n"
22552 + "1: prefetch 320(%1)\n"
22553 + "2: movq (%1), %%mm0\n"
22554 + " movntq %%mm0, (%2)\n"
22555 + " movq 8(%1), %%mm1\n"
22556 + " movntq %%mm1, 8(%2)\n"
22557 + " movq 16(%1), %%mm2\n"
22558 + " movntq %%mm2, 16(%2)\n"
22559 + " movq 24(%1), %%mm3\n"
22560 + " movntq %%mm3, 24(%2)\n"
22561 + " movq 32(%1), %%mm4\n"
22562 + " movntq %%mm4, 32(%2)\n"
22563 + " movq 40(%1), %%mm5\n"
22564 + " movntq %%mm5, 40(%2)\n"
22565 + " movq 48(%1), %%mm6\n"
22566 + " movntq %%mm6, 48(%2)\n"
22567 + " movq 56(%1), %%mm7\n"
22568 + " movntq %%mm7, 56(%2)\n"
22569 ".section .fixup, \"ax\"\n"
22570 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22571 + "3:\n"
22572 +
22573 +#ifdef CONFIG_PAX_KERNEXEC
22574 + " movl %%cr0, %0\n"
22575 + " movl %0, %%eax\n"
22576 + " andl $0xFFFEFFFF, %%eax\n"
22577 + " movl %%eax, %%cr0\n"
22578 +#endif
22579 +
22580 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22581 +
22582 +#ifdef CONFIG_PAX_KERNEXEC
22583 + " movl %0, %%cr0\n"
22584 +#endif
22585 +
22586 " jmp 2b\n"
22587 ".previous\n"
22588 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22589 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22590
22591 from += 64;
22592 to += 64;
22593 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22594 static void fast_copy_page(void *to, void *from)
22595 {
22596 int i;
22597 + unsigned long cr0;
22598
22599 kernel_fpu_begin();
22600
22601 __asm__ __volatile__ (
22602 - "1: prefetch (%0)\n"
22603 - " prefetch 64(%0)\n"
22604 - " prefetch 128(%0)\n"
22605 - " prefetch 192(%0)\n"
22606 - " prefetch 256(%0)\n"
22607 + "1: prefetch (%1)\n"
22608 + " prefetch 64(%1)\n"
22609 + " prefetch 128(%1)\n"
22610 + " prefetch 192(%1)\n"
22611 + " prefetch 256(%1)\n"
22612 "2: \n"
22613 ".section .fixup, \"ax\"\n"
22614 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22615 + "3: \n"
22616 +
22617 +#ifdef CONFIG_PAX_KERNEXEC
22618 + " movl %%cr0, %0\n"
22619 + " movl %0, %%eax\n"
22620 + " andl $0xFFFEFFFF, %%eax\n"
22621 + " movl %%eax, %%cr0\n"
22622 +#endif
22623 +
22624 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22625 +
22626 +#ifdef CONFIG_PAX_KERNEXEC
22627 + " movl %0, %%cr0\n"
22628 +#endif
22629 +
22630 " jmp 2b\n"
22631 ".previous\n"
22632 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22633 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22634
22635 for (i = 0; i < 4096/64; i++) {
22636 __asm__ __volatile__ (
22637 - "1: prefetch 320(%0)\n"
22638 - "2: movq (%0), %%mm0\n"
22639 - " movq 8(%0), %%mm1\n"
22640 - " movq 16(%0), %%mm2\n"
22641 - " movq 24(%0), %%mm3\n"
22642 - " movq %%mm0, (%1)\n"
22643 - " movq %%mm1, 8(%1)\n"
22644 - " movq %%mm2, 16(%1)\n"
22645 - " movq %%mm3, 24(%1)\n"
22646 - " movq 32(%0), %%mm0\n"
22647 - " movq 40(%0), %%mm1\n"
22648 - " movq 48(%0), %%mm2\n"
22649 - " movq 56(%0), %%mm3\n"
22650 - " movq %%mm0, 32(%1)\n"
22651 - " movq %%mm1, 40(%1)\n"
22652 - " movq %%mm2, 48(%1)\n"
22653 - " movq %%mm3, 56(%1)\n"
22654 + "1: prefetch 320(%1)\n"
22655 + "2: movq (%1), %%mm0\n"
22656 + " movq 8(%1), %%mm1\n"
22657 + " movq 16(%1), %%mm2\n"
22658 + " movq 24(%1), %%mm3\n"
22659 + " movq %%mm0, (%2)\n"
22660 + " movq %%mm1, 8(%2)\n"
22661 + " movq %%mm2, 16(%2)\n"
22662 + " movq %%mm3, 24(%2)\n"
22663 + " movq 32(%1), %%mm0\n"
22664 + " movq 40(%1), %%mm1\n"
22665 + " movq 48(%1), %%mm2\n"
22666 + " movq 56(%1), %%mm3\n"
22667 + " movq %%mm0, 32(%2)\n"
22668 + " movq %%mm1, 40(%2)\n"
22669 + " movq %%mm2, 48(%2)\n"
22670 + " movq %%mm3, 56(%2)\n"
22671 ".section .fixup, \"ax\"\n"
22672 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22673 + "3:\n"
22674 +
22675 +#ifdef CONFIG_PAX_KERNEXEC
22676 + " movl %%cr0, %0\n"
22677 + " movl %0, %%eax\n"
22678 + " andl $0xFFFEFFFF, %%eax\n"
22679 + " movl %%eax, %%cr0\n"
22680 +#endif
22681 +
22682 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22683 +
22684 +#ifdef CONFIG_PAX_KERNEXEC
22685 + " movl %0, %%cr0\n"
22686 +#endif
22687 +
22688 " jmp 2b\n"
22689 ".previous\n"
22690 _ASM_EXTABLE(1b, 3b)
22691 - : : "r" (from), "r" (to) : "memory");
22692 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22693
22694 from += 64;
22695 to += 64;
22696 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22697 index 69fa106..adda88b 100644
22698 --- a/arch/x86/lib/msr-reg.S
22699 +++ b/arch/x86/lib/msr-reg.S
22700 @@ -3,6 +3,7 @@
22701 #include <asm/dwarf2.h>
22702 #include <asm/asm.h>
22703 #include <asm/msr.h>
22704 +#include <asm/alternative-asm.h>
22705
22706 #ifdef CONFIG_X86_64
22707 /*
22708 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22709 CFI_STARTPROC
22710 pushq_cfi %rbx
22711 pushq_cfi %rbp
22712 - movq %rdi, %r10 /* Save pointer */
22713 + movq %rdi, %r9 /* Save pointer */
22714 xorl %r11d, %r11d /* Return value */
22715 movl (%rdi), %eax
22716 movl 4(%rdi), %ecx
22717 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22718 movl 28(%rdi), %edi
22719 CFI_REMEMBER_STATE
22720 1: \op
22721 -2: movl %eax, (%r10)
22722 +2: movl %eax, (%r9)
22723 movl %r11d, %eax /* Return value */
22724 - movl %ecx, 4(%r10)
22725 - movl %edx, 8(%r10)
22726 - movl %ebx, 12(%r10)
22727 - movl %ebp, 20(%r10)
22728 - movl %esi, 24(%r10)
22729 - movl %edi, 28(%r10)
22730 + movl %ecx, 4(%r9)
22731 + movl %edx, 8(%r9)
22732 + movl %ebx, 12(%r9)
22733 + movl %ebp, 20(%r9)
22734 + movl %esi, 24(%r9)
22735 + movl %edi, 28(%r9)
22736 popq_cfi %rbp
22737 popq_cfi %rbx
22738 + pax_force_retaddr
22739 ret
22740 3:
22741 CFI_RESTORE_STATE
22742 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22743 index 36b0d15..d381858 100644
22744 --- a/arch/x86/lib/putuser.S
22745 +++ b/arch/x86/lib/putuser.S
22746 @@ -15,7 +15,9 @@
22747 #include <asm/thread_info.h>
22748 #include <asm/errno.h>
22749 #include <asm/asm.h>
22750 -
22751 +#include <asm/segment.h>
22752 +#include <asm/pgtable.h>
22753 +#include <asm/alternative-asm.h>
22754
22755 /*
22756 * __put_user_X
22757 @@ -29,52 +31,119 @@
22758 * as they get called from within inline assembly.
22759 */
22760
22761 -#define ENTER CFI_STARTPROC ; \
22762 - GET_THREAD_INFO(%_ASM_BX)
22763 -#define EXIT ret ; \
22764 +#define ENTER CFI_STARTPROC
22765 +#define EXIT pax_force_retaddr; ret ; \
22766 CFI_ENDPROC
22767
22768 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22769 +#define _DEST %_ASM_CX,%_ASM_BX
22770 +#else
22771 +#define _DEST %_ASM_CX
22772 +#endif
22773 +
22774 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22775 +#define __copyuser_seg gs;
22776 +#else
22777 +#define __copyuser_seg
22778 +#endif
22779 +
22780 .text
22781 ENTRY(__put_user_1)
22782 ENTER
22783 +
22784 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22785 + GET_THREAD_INFO(%_ASM_BX)
22786 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22787 jae bad_put_user
22788 -1: movb %al,(%_ASM_CX)
22789 +
22790 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22791 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22792 + cmp %_ASM_BX,%_ASM_CX
22793 + jb 1234f
22794 + xor %ebx,%ebx
22795 +1234:
22796 +#endif
22797 +
22798 +#endif
22799 +
22800 +1: __copyuser_seg movb %al,(_DEST)
22801 xor %eax,%eax
22802 EXIT
22803 ENDPROC(__put_user_1)
22804
22805 ENTRY(__put_user_2)
22806 ENTER
22807 +
22808 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22809 + GET_THREAD_INFO(%_ASM_BX)
22810 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22811 sub $1,%_ASM_BX
22812 cmp %_ASM_BX,%_ASM_CX
22813 jae bad_put_user
22814 -2: movw %ax,(%_ASM_CX)
22815 +
22816 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22817 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22818 + cmp %_ASM_BX,%_ASM_CX
22819 + jb 1234f
22820 + xor %ebx,%ebx
22821 +1234:
22822 +#endif
22823 +
22824 +#endif
22825 +
22826 +2: __copyuser_seg movw %ax,(_DEST)
22827 xor %eax,%eax
22828 EXIT
22829 ENDPROC(__put_user_2)
22830
22831 ENTRY(__put_user_4)
22832 ENTER
22833 +
22834 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22835 + GET_THREAD_INFO(%_ASM_BX)
22836 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22837 sub $3,%_ASM_BX
22838 cmp %_ASM_BX,%_ASM_CX
22839 jae bad_put_user
22840 -3: movl %eax,(%_ASM_CX)
22841 +
22842 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22843 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22844 + cmp %_ASM_BX,%_ASM_CX
22845 + jb 1234f
22846 + xor %ebx,%ebx
22847 +1234:
22848 +#endif
22849 +
22850 +#endif
22851 +
22852 +3: __copyuser_seg movl %eax,(_DEST)
22853 xor %eax,%eax
22854 EXIT
22855 ENDPROC(__put_user_4)
22856
22857 ENTRY(__put_user_8)
22858 ENTER
22859 +
22860 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22861 + GET_THREAD_INFO(%_ASM_BX)
22862 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22863 sub $7,%_ASM_BX
22864 cmp %_ASM_BX,%_ASM_CX
22865 jae bad_put_user
22866 -4: mov %_ASM_AX,(%_ASM_CX)
22867 +
22868 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22869 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22870 + cmp %_ASM_BX,%_ASM_CX
22871 + jb 1234f
22872 + xor %ebx,%ebx
22873 +1234:
22874 +#endif
22875 +
22876 +#endif
22877 +
22878 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22879 #ifdef CONFIG_X86_32
22880 -5: movl %edx,4(%_ASM_CX)
22881 +5: __copyuser_seg movl %edx,4(_DEST)
22882 #endif
22883 xor %eax,%eax
22884 EXIT
22885 diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
22886 index 05ea55f..6345b9a 100644
22887 --- a/arch/x86/lib/rwlock_64.S
22888 +++ b/arch/x86/lib/rwlock_64.S
22889 @@ -2,6 +2,7 @@
22890
22891 #include <linux/linkage.h>
22892 #include <asm/rwlock.h>
22893 +#include <asm/asm.h>
22894 #include <asm/alternative-asm.h>
22895 #include <asm/dwarf2.h>
22896
22897 @@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
22898 CFI_STARTPROC
22899 LOCK_PREFIX
22900 addl $RW_LOCK_BIAS,(%rdi)
22901 +
22902 +#ifdef CONFIG_PAX_REFCOUNT
22903 + jno 1234f
22904 + LOCK_PREFIX
22905 + subl $RW_LOCK_BIAS,(%rdi)
22906 + int $4
22907 +1234:
22908 + _ASM_EXTABLE(1234b, 1234b)
22909 +#endif
22910 +
22911 1: rep
22912 nop
22913 cmpl $RW_LOCK_BIAS,(%rdi)
22914 jne 1b
22915 LOCK_PREFIX
22916 subl $RW_LOCK_BIAS,(%rdi)
22917 +
22918 +#ifdef CONFIG_PAX_REFCOUNT
22919 + jno 1234f
22920 + LOCK_PREFIX
22921 + addl $RW_LOCK_BIAS,(%rdi)
22922 + int $4
22923 +1234:
22924 + _ASM_EXTABLE(1234b, 1234b)
22925 +#endif
22926 +
22927 jnz __write_lock_failed
22928 + pax_force_retaddr
22929 ret
22930 CFI_ENDPROC
22931 END(__write_lock_failed)
22932 @@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
22933 CFI_STARTPROC
22934 LOCK_PREFIX
22935 incl (%rdi)
22936 +
22937 +#ifdef CONFIG_PAX_REFCOUNT
22938 + jno 1234f
22939 + LOCK_PREFIX
22940 + decl (%rdi)
22941 + int $4
22942 +1234:
22943 + _ASM_EXTABLE(1234b, 1234b)
22944 +#endif
22945 +
22946 1: rep
22947 nop
22948 cmpl $1,(%rdi)
22949 js 1b
22950 LOCK_PREFIX
22951 decl (%rdi)
22952 +
22953 +#ifdef CONFIG_PAX_REFCOUNT
22954 + jno 1234f
22955 + LOCK_PREFIX
22956 + incl (%rdi)
22957 + int $4
22958 +1234:
22959 + _ASM_EXTABLE(1234b, 1234b)
22960 +#endif
22961 +
22962 js __read_lock_failed
22963 + pax_force_retaddr
22964 ret
22965 CFI_ENDPROC
22966 END(__read_lock_failed)
22967 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
22968 index 15acecf..f768b10 100644
22969 --- a/arch/x86/lib/rwsem_64.S
22970 +++ b/arch/x86/lib/rwsem_64.S
22971 @@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
22972 call rwsem_down_read_failed
22973 popq %rdx
22974 restore_common_regs
22975 + pax_force_retaddr
22976 ret
22977 ENDPROC(call_rwsem_down_read_failed)
22978
22979 @@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
22980 movq %rax,%rdi
22981 call rwsem_down_write_failed
22982 restore_common_regs
22983 + pax_force_retaddr
22984 ret
22985 ENDPROC(call_rwsem_down_write_failed)
22986
22987 @@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
22988 movq %rax,%rdi
22989 call rwsem_wake
22990 restore_common_regs
22991 -1: ret
22992 +1: pax_force_retaddr
22993 + ret
22994 ENDPROC(call_rwsem_wake)
22995
22996 /* Fix up special calling conventions */
22997 @@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
22998 call rwsem_downgrade_wake
22999 popq %rdx
23000 restore_common_regs
23001 + pax_force_retaddr
23002 ret
23003 ENDPROC(call_rwsem_downgrade_wake)
23004 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23005 index bf9a7d5..fb06ab5 100644
23006 --- a/arch/x86/lib/thunk_64.S
23007 +++ b/arch/x86/lib/thunk_64.S
23008 @@ -10,7 +10,8 @@
23009 #include <asm/dwarf2.h>
23010 #include <asm/calling.h>
23011 #include <asm/rwlock.h>
23012 -
23013 + #include <asm/alternative-asm.h>
23014 +
23015 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23016 .macro thunk name,func
23017 .globl \name
23018 @@ -70,6 +71,7 @@
23019 SAVE_ARGS
23020 restore:
23021 RESTORE_ARGS
23022 + pax_force_retaddr
23023 ret
23024 CFI_ENDPROC
23025
23026 @@ -77,5 +79,6 @@ restore:
23027 SAVE_ARGS
23028 restore_norax:
23029 RESTORE_ARGS 1
23030 + pax_force_retaddr
23031 ret
23032 CFI_ENDPROC
23033 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23034 index 1f118d4..ec4a953 100644
23035 --- a/arch/x86/lib/usercopy_32.c
23036 +++ b/arch/x86/lib/usercopy_32.c
23037 @@ -43,7 +43,7 @@ do { \
23038 __asm__ __volatile__( \
23039 " testl %1,%1\n" \
23040 " jz 2f\n" \
23041 - "0: lodsb\n" \
23042 + "0: "__copyuser_seg"lodsb\n" \
23043 " stosb\n" \
23044 " testb %%al,%%al\n" \
23045 " jz 1f\n" \
23046 @@ -128,10 +128,12 @@ do { \
23047 int __d0; \
23048 might_fault(); \
23049 __asm__ __volatile__( \
23050 + __COPYUSER_SET_ES \
23051 "0: rep; stosl\n" \
23052 " movl %2,%0\n" \
23053 "1: rep; stosb\n" \
23054 "2:\n" \
23055 + __COPYUSER_RESTORE_ES \
23056 ".section .fixup,\"ax\"\n" \
23057 "3: lea 0(%2,%0,4),%0\n" \
23058 " jmp 2b\n" \
23059 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23060 might_fault();
23061
23062 __asm__ __volatile__(
23063 + __COPYUSER_SET_ES
23064 " testl %0, %0\n"
23065 " jz 3f\n"
23066 " andl %0,%%ecx\n"
23067 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23068 " subl %%ecx,%0\n"
23069 " addl %0,%%eax\n"
23070 "1:\n"
23071 + __COPYUSER_RESTORE_ES
23072 ".section .fixup,\"ax\"\n"
23073 "2: xorl %%eax,%%eax\n"
23074 " jmp 1b\n"
23075 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23076
23077 #ifdef CONFIG_X86_INTEL_USERCOPY
23078 static unsigned long
23079 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
23080 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23081 {
23082 int d0, d1;
23083 __asm__ __volatile__(
23084 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23085 " .align 2,0x90\n"
23086 "3: movl 0(%4), %%eax\n"
23087 "4: movl 4(%4), %%edx\n"
23088 - "5: movl %%eax, 0(%3)\n"
23089 - "6: movl %%edx, 4(%3)\n"
23090 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23091 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23092 "7: movl 8(%4), %%eax\n"
23093 "8: movl 12(%4),%%edx\n"
23094 - "9: movl %%eax, 8(%3)\n"
23095 - "10: movl %%edx, 12(%3)\n"
23096 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23097 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23098 "11: movl 16(%4), %%eax\n"
23099 "12: movl 20(%4), %%edx\n"
23100 - "13: movl %%eax, 16(%3)\n"
23101 - "14: movl %%edx, 20(%3)\n"
23102 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23103 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23104 "15: movl 24(%4), %%eax\n"
23105 "16: movl 28(%4), %%edx\n"
23106 - "17: movl %%eax, 24(%3)\n"
23107 - "18: movl %%edx, 28(%3)\n"
23108 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23109 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23110 "19: movl 32(%4), %%eax\n"
23111 "20: movl 36(%4), %%edx\n"
23112 - "21: movl %%eax, 32(%3)\n"
23113 - "22: movl %%edx, 36(%3)\n"
23114 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23115 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23116 "23: movl 40(%4), %%eax\n"
23117 "24: movl 44(%4), %%edx\n"
23118 - "25: movl %%eax, 40(%3)\n"
23119 - "26: movl %%edx, 44(%3)\n"
23120 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23121 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23122 "27: movl 48(%4), %%eax\n"
23123 "28: movl 52(%4), %%edx\n"
23124 - "29: movl %%eax, 48(%3)\n"
23125 - "30: movl %%edx, 52(%3)\n"
23126 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23127 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23128 "31: movl 56(%4), %%eax\n"
23129 "32: movl 60(%4), %%edx\n"
23130 - "33: movl %%eax, 56(%3)\n"
23131 - "34: movl %%edx, 60(%3)\n"
23132 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23133 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23134 " addl $-64, %0\n"
23135 " addl $64, %4\n"
23136 " addl $64, %3\n"
23137 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23138 " shrl $2, %0\n"
23139 " andl $3, %%eax\n"
23140 " cld\n"
23141 + __COPYUSER_SET_ES
23142 "99: rep; movsl\n"
23143 "36: movl %%eax, %0\n"
23144 "37: rep; movsb\n"
23145 "100:\n"
23146 + __COPYUSER_RESTORE_ES
23147 + ".section .fixup,\"ax\"\n"
23148 + "101: lea 0(%%eax,%0,4),%0\n"
23149 + " jmp 100b\n"
23150 + ".previous\n"
23151 + ".section __ex_table,\"a\"\n"
23152 + " .align 4\n"
23153 + " .long 1b,100b\n"
23154 + " .long 2b,100b\n"
23155 + " .long 3b,100b\n"
23156 + " .long 4b,100b\n"
23157 + " .long 5b,100b\n"
23158 + " .long 6b,100b\n"
23159 + " .long 7b,100b\n"
23160 + " .long 8b,100b\n"
23161 + " .long 9b,100b\n"
23162 + " .long 10b,100b\n"
23163 + " .long 11b,100b\n"
23164 + " .long 12b,100b\n"
23165 + " .long 13b,100b\n"
23166 + " .long 14b,100b\n"
23167 + " .long 15b,100b\n"
23168 + " .long 16b,100b\n"
23169 + " .long 17b,100b\n"
23170 + " .long 18b,100b\n"
23171 + " .long 19b,100b\n"
23172 + " .long 20b,100b\n"
23173 + " .long 21b,100b\n"
23174 + " .long 22b,100b\n"
23175 + " .long 23b,100b\n"
23176 + " .long 24b,100b\n"
23177 + " .long 25b,100b\n"
23178 + " .long 26b,100b\n"
23179 + " .long 27b,100b\n"
23180 + " .long 28b,100b\n"
23181 + " .long 29b,100b\n"
23182 + " .long 30b,100b\n"
23183 + " .long 31b,100b\n"
23184 + " .long 32b,100b\n"
23185 + " .long 33b,100b\n"
23186 + " .long 34b,100b\n"
23187 + " .long 35b,100b\n"
23188 + " .long 36b,100b\n"
23189 + " .long 37b,100b\n"
23190 + " .long 99b,101b\n"
23191 + ".previous"
23192 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23193 + : "1"(to), "2"(from), "0"(size)
23194 + : "eax", "edx", "memory");
23195 + return size;
23196 +}
23197 +
23198 +static unsigned long
23199 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23200 +{
23201 + int d0, d1;
23202 + __asm__ __volatile__(
23203 + " .align 2,0x90\n"
23204 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23205 + " cmpl $67, %0\n"
23206 + " jbe 3f\n"
23207 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23208 + " .align 2,0x90\n"
23209 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23210 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23211 + "5: movl %%eax, 0(%3)\n"
23212 + "6: movl %%edx, 4(%3)\n"
23213 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23214 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23215 + "9: movl %%eax, 8(%3)\n"
23216 + "10: movl %%edx, 12(%3)\n"
23217 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23218 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23219 + "13: movl %%eax, 16(%3)\n"
23220 + "14: movl %%edx, 20(%3)\n"
23221 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23222 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23223 + "17: movl %%eax, 24(%3)\n"
23224 + "18: movl %%edx, 28(%3)\n"
23225 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23226 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23227 + "21: movl %%eax, 32(%3)\n"
23228 + "22: movl %%edx, 36(%3)\n"
23229 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23230 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23231 + "25: movl %%eax, 40(%3)\n"
23232 + "26: movl %%edx, 44(%3)\n"
23233 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23234 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23235 + "29: movl %%eax, 48(%3)\n"
23236 + "30: movl %%edx, 52(%3)\n"
23237 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23238 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23239 + "33: movl %%eax, 56(%3)\n"
23240 + "34: movl %%edx, 60(%3)\n"
23241 + " addl $-64, %0\n"
23242 + " addl $64, %4\n"
23243 + " addl $64, %3\n"
23244 + " cmpl $63, %0\n"
23245 + " ja 1b\n"
23246 + "35: movl %0, %%eax\n"
23247 + " shrl $2, %0\n"
23248 + " andl $3, %%eax\n"
23249 + " cld\n"
23250 + "99: rep; "__copyuser_seg" movsl\n"
23251 + "36: movl %%eax, %0\n"
23252 + "37: rep; "__copyuser_seg" movsb\n"
23253 + "100:\n"
23254 ".section .fixup,\"ax\"\n"
23255 "101: lea 0(%%eax,%0,4),%0\n"
23256 " jmp 100b\n"
23257 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23258 int d0, d1;
23259 __asm__ __volatile__(
23260 " .align 2,0x90\n"
23261 - "0: movl 32(%4), %%eax\n"
23262 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23263 " cmpl $67, %0\n"
23264 " jbe 2f\n"
23265 - "1: movl 64(%4), %%eax\n"
23266 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23267 " .align 2,0x90\n"
23268 - "2: movl 0(%4), %%eax\n"
23269 - "21: movl 4(%4), %%edx\n"
23270 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23271 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23272 " movl %%eax, 0(%3)\n"
23273 " movl %%edx, 4(%3)\n"
23274 - "3: movl 8(%4), %%eax\n"
23275 - "31: movl 12(%4),%%edx\n"
23276 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23277 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23278 " movl %%eax, 8(%3)\n"
23279 " movl %%edx, 12(%3)\n"
23280 - "4: movl 16(%4), %%eax\n"
23281 - "41: movl 20(%4), %%edx\n"
23282 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23283 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23284 " movl %%eax, 16(%3)\n"
23285 " movl %%edx, 20(%3)\n"
23286 - "10: movl 24(%4), %%eax\n"
23287 - "51: movl 28(%4), %%edx\n"
23288 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23289 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23290 " movl %%eax, 24(%3)\n"
23291 " movl %%edx, 28(%3)\n"
23292 - "11: movl 32(%4), %%eax\n"
23293 - "61: movl 36(%4), %%edx\n"
23294 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23295 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23296 " movl %%eax, 32(%3)\n"
23297 " movl %%edx, 36(%3)\n"
23298 - "12: movl 40(%4), %%eax\n"
23299 - "71: movl 44(%4), %%edx\n"
23300 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23301 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23302 " movl %%eax, 40(%3)\n"
23303 " movl %%edx, 44(%3)\n"
23304 - "13: movl 48(%4), %%eax\n"
23305 - "81: movl 52(%4), %%edx\n"
23306 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23307 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23308 " movl %%eax, 48(%3)\n"
23309 " movl %%edx, 52(%3)\n"
23310 - "14: movl 56(%4), %%eax\n"
23311 - "91: movl 60(%4), %%edx\n"
23312 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23313 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23314 " movl %%eax, 56(%3)\n"
23315 " movl %%edx, 60(%3)\n"
23316 " addl $-64, %0\n"
23317 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23318 " shrl $2, %0\n"
23319 " andl $3, %%eax\n"
23320 " cld\n"
23321 - "6: rep; movsl\n"
23322 + "6: rep; "__copyuser_seg" movsl\n"
23323 " movl %%eax,%0\n"
23324 - "7: rep; movsb\n"
23325 + "7: rep; "__copyuser_seg" movsb\n"
23326 "8:\n"
23327 ".section .fixup,\"ax\"\n"
23328 "9: lea 0(%%eax,%0,4),%0\n"
23329 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23330
23331 __asm__ __volatile__(
23332 " .align 2,0x90\n"
23333 - "0: movl 32(%4), %%eax\n"
23334 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23335 " cmpl $67, %0\n"
23336 " jbe 2f\n"
23337 - "1: movl 64(%4), %%eax\n"
23338 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23339 " .align 2,0x90\n"
23340 - "2: movl 0(%4), %%eax\n"
23341 - "21: movl 4(%4), %%edx\n"
23342 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23343 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23344 " movnti %%eax, 0(%3)\n"
23345 " movnti %%edx, 4(%3)\n"
23346 - "3: movl 8(%4), %%eax\n"
23347 - "31: movl 12(%4),%%edx\n"
23348 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23349 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23350 " movnti %%eax, 8(%3)\n"
23351 " movnti %%edx, 12(%3)\n"
23352 - "4: movl 16(%4), %%eax\n"
23353 - "41: movl 20(%4), %%edx\n"
23354 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23355 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23356 " movnti %%eax, 16(%3)\n"
23357 " movnti %%edx, 20(%3)\n"
23358 - "10: movl 24(%4), %%eax\n"
23359 - "51: movl 28(%4), %%edx\n"
23360 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23361 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23362 " movnti %%eax, 24(%3)\n"
23363 " movnti %%edx, 28(%3)\n"
23364 - "11: movl 32(%4), %%eax\n"
23365 - "61: movl 36(%4), %%edx\n"
23366 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23367 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23368 " movnti %%eax, 32(%3)\n"
23369 " movnti %%edx, 36(%3)\n"
23370 - "12: movl 40(%4), %%eax\n"
23371 - "71: movl 44(%4), %%edx\n"
23372 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23373 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23374 " movnti %%eax, 40(%3)\n"
23375 " movnti %%edx, 44(%3)\n"
23376 - "13: movl 48(%4), %%eax\n"
23377 - "81: movl 52(%4), %%edx\n"
23378 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23379 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23380 " movnti %%eax, 48(%3)\n"
23381 " movnti %%edx, 52(%3)\n"
23382 - "14: movl 56(%4), %%eax\n"
23383 - "91: movl 60(%4), %%edx\n"
23384 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23385 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23386 " movnti %%eax, 56(%3)\n"
23387 " movnti %%edx, 60(%3)\n"
23388 " addl $-64, %0\n"
23389 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23390 " shrl $2, %0\n"
23391 " andl $3, %%eax\n"
23392 " cld\n"
23393 - "6: rep; movsl\n"
23394 + "6: rep; "__copyuser_seg" movsl\n"
23395 " movl %%eax,%0\n"
23396 - "7: rep; movsb\n"
23397 + "7: rep; "__copyuser_seg" movsb\n"
23398 "8:\n"
23399 ".section .fixup,\"ax\"\n"
23400 "9: lea 0(%%eax,%0,4),%0\n"
23401 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23402
23403 __asm__ __volatile__(
23404 " .align 2,0x90\n"
23405 - "0: movl 32(%4), %%eax\n"
23406 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23407 " cmpl $67, %0\n"
23408 " jbe 2f\n"
23409 - "1: movl 64(%4), %%eax\n"
23410 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23411 " .align 2,0x90\n"
23412 - "2: movl 0(%4), %%eax\n"
23413 - "21: movl 4(%4), %%edx\n"
23414 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23415 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23416 " movnti %%eax, 0(%3)\n"
23417 " movnti %%edx, 4(%3)\n"
23418 - "3: movl 8(%4), %%eax\n"
23419 - "31: movl 12(%4),%%edx\n"
23420 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23421 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23422 " movnti %%eax, 8(%3)\n"
23423 " movnti %%edx, 12(%3)\n"
23424 - "4: movl 16(%4), %%eax\n"
23425 - "41: movl 20(%4), %%edx\n"
23426 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23427 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23428 " movnti %%eax, 16(%3)\n"
23429 " movnti %%edx, 20(%3)\n"
23430 - "10: movl 24(%4), %%eax\n"
23431 - "51: movl 28(%4), %%edx\n"
23432 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23433 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23434 " movnti %%eax, 24(%3)\n"
23435 " movnti %%edx, 28(%3)\n"
23436 - "11: movl 32(%4), %%eax\n"
23437 - "61: movl 36(%4), %%edx\n"
23438 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23439 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23440 " movnti %%eax, 32(%3)\n"
23441 " movnti %%edx, 36(%3)\n"
23442 - "12: movl 40(%4), %%eax\n"
23443 - "71: movl 44(%4), %%edx\n"
23444 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23445 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23446 " movnti %%eax, 40(%3)\n"
23447 " movnti %%edx, 44(%3)\n"
23448 - "13: movl 48(%4), %%eax\n"
23449 - "81: movl 52(%4), %%edx\n"
23450 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23451 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23452 " movnti %%eax, 48(%3)\n"
23453 " movnti %%edx, 52(%3)\n"
23454 - "14: movl 56(%4), %%eax\n"
23455 - "91: movl 60(%4), %%edx\n"
23456 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23457 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23458 " movnti %%eax, 56(%3)\n"
23459 " movnti %%edx, 60(%3)\n"
23460 " addl $-64, %0\n"
23461 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23462 " shrl $2, %0\n"
23463 " andl $3, %%eax\n"
23464 " cld\n"
23465 - "6: rep; movsl\n"
23466 + "6: rep; "__copyuser_seg" movsl\n"
23467 " movl %%eax,%0\n"
23468 - "7: rep; movsb\n"
23469 + "7: rep; "__copyuser_seg" movsb\n"
23470 "8:\n"
23471 ".section .fixup,\"ax\"\n"
23472 "9: lea 0(%%eax,%0,4),%0\n"
23473 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23474 */
23475 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23476 unsigned long size);
23477 -unsigned long __copy_user_intel(void __user *to, const void *from,
23478 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23479 + unsigned long size);
23480 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23481 unsigned long size);
23482 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23483 const void __user *from, unsigned long size);
23484 #endif /* CONFIG_X86_INTEL_USERCOPY */
23485
23486 /* Generic arbitrary sized copy. */
23487 -#define __copy_user(to, from, size) \
23488 +#define __copy_user(to, from, size, prefix, set, restore) \
23489 do { \
23490 int __d0, __d1, __d2; \
23491 __asm__ __volatile__( \
23492 + set \
23493 " cmp $7,%0\n" \
23494 " jbe 1f\n" \
23495 " movl %1,%0\n" \
23496 " negl %0\n" \
23497 " andl $7,%0\n" \
23498 " subl %0,%3\n" \
23499 - "4: rep; movsb\n" \
23500 + "4: rep; "prefix"movsb\n" \
23501 " movl %3,%0\n" \
23502 " shrl $2,%0\n" \
23503 " andl $3,%3\n" \
23504 " .align 2,0x90\n" \
23505 - "0: rep; movsl\n" \
23506 + "0: rep; "prefix"movsl\n" \
23507 " movl %3,%0\n" \
23508 - "1: rep; movsb\n" \
23509 + "1: rep; "prefix"movsb\n" \
23510 "2:\n" \
23511 + restore \
23512 ".section .fixup,\"ax\"\n" \
23513 "5: addl %3,%0\n" \
23514 " jmp 2b\n" \
23515 @@ -682,14 +799,14 @@ do { \
23516 " negl %0\n" \
23517 " andl $7,%0\n" \
23518 " subl %0,%3\n" \
23519 - "4: rep; movsb\n" \
23520 + "4: rep; "__copyuser_seg"movsb\n" \
23521 " movl %3,%0\n" \
23522 " shrl $2,%0\n" \
23523 " andl $3,%3\n" \
23524 " .align 2,0x90\n" \
23525 - "0: rep; movsl\n" \
23526 + "0: rep; "__copyuser_seg"movsl\n" \
23527 " movl %3,%0\n" \
23528 - "1: rep; movsb\n" \
23529 + "1: rep; "__copyuser_seg"movsb\n" \
23530 "2:\n" \
23531 ".section .fixup,\"ax\"\n" \
23532 "5: addl %3,%0\n" \
23533 @@ -775,9 +892,9 @@ survive:
23534 }
23535 #endif
23536 if (movsl_is_ok(to, from, n))
23537 - __copy_user(to, from, n);
23538 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23539 else
23540 - n = __copy_user_intel(to, from, n);
23541 + n = __generic_copy_to_user_intel(to, from, n);
23542 return n;
23543 }
23544 EXPORT_SYMBOL(__copy_to_user_ll);
23545 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23546 unsigned long n)
23547 {
23548 if (movsl_is_ok(to, from, n))
23549 - __copy_user(to, from, n);
23550 + __copy_user(to, from, n, __copyuser_seg, "", "");
23551 else
23552 - n = __copy_user_intel((void __user *)to,
23553 - (const void *)from, n);
23554 + n = __generic_copy_from_user_intel(to, from, n);
23555 return n;
23556 }
23557 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23558 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23559 if (n > 64 && cpu_has_xmm2)
23560 n = __copy_user_intel_nocache(to, from, n);
23561 else
23562 - __copy_user(to, from, n);
23563 + __copy_user(to, from, n, __copyuser_seg, "", "");
23564 #else
23565 - __copy_user(to, from, n);
23566 + __copy_user(to, from, n, __copyuser_seg, "", "");
23567 #endif
23568 return n;
23569 }
23570 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23571
23572 -/**
23573 - * copy_to_user: - Copy a block of data into user space.
23574 - * @to: Destination address, in user space.
23575 - * @from: Source address, in kernel space.
23576 - * @n: Number of bytes to copy.
23577 - *
23578 - * Context: User context only. This function may sleep.
23579 - *
23580 - * Copy data from kernel space to user space.
23581 - *
23582 - * Returns number of bytes that could not be copied.
23583 - * On success, this will be zero.
23584 - */
23585 -unsigned long
23586 -copy_to_user(void __user *to, const void *from, unsigned long n)
23587 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23588 +void __set_fs(mm_segment_t x)
23589 {
23590 - if (access_ok(VERIFY_WRITE, to, n))
23591 - n = __copy_to_user(to, from, n);
23592 - return n;
23593 + switch (x.seg) {
23594 + case 0:
23595 + loadsegment(gs, 0);
23596 + break;
23597 + case TASK_SIZE_MAX:
23598 + loadsegment(gs, __USER_DS);
23599 + break;
23600 + case -1UL:
23601 + loadsegment(gs, __KERNEL_DS);
23602 + break;
23603 + default:
23604 + BUG();
23605 + }
23606 + return;
23607 }
23608 -EXPORT_SYMBOL(copy_to_user);
23609 +EXPORT_SYMBOL(__set_fs);
23610
23611 -/**
23612 - * copy_from_user: - Copy a block of data from user space.
23613 - * @to: Destination address, in kernel space.
23614 - * @from: Source address, in user space.
23615 - * @n: Number of bytes to copy.
23616 - *
23617 - * Context: User context only. This function may sleep.
23618 - *
23619 - * Copy data from user space to kernel space.
23620 - *
23621 - * Returns number of bytes that could not be copied.
23622 - * On success, this will be zero.
23623 - *
23624 - * If some data could not be copied, this function will pad the copied
23625 - * data to the requested size using zero bytes.
23626 - */
23627 -unsigned long
23628 -copy_from_user(void *to, const void __user *from, unsigned long n)
23629 +void set_fs(mm_segment_t x)
23630 {
23631 - if (access_ok(VERIFY_READ, from, n))
23632 - n = __copy_from_user(to, from, n);
23633 - else
23634 - memset(to, 0, n);
23635 - return n;
23636 + current_thread_info()->addr_limit = x;
23637 + __set_fs(x);
23638 }
23639 -EXPORT_SYMBOL(copy_from_user);
23640 +EXPORT_SYMBOL(set_fs);
23641 +#endif
23642 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23643 index b7c2849..8633ad8 100644
23644 --- a/arch/x86/lib/usercopy_64.c
23645 +++ b/arch/x86/lib/usercopy_64.c
23646 @@ -42,6 +42,12 @@ long
23647 __strncpy_from_user(char *dst, const char __user *src, long count)
23648 {
23649 long res;
23650 +
23651 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23652 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23653 + src += PAX_USER_SHADOW_BASE;
23654 +#endif
23655 +
23656 __do_strncpy_from_user(dst, src, count, res);
23657 return res;
23658 }
23659 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23660 {
23661 long __d0;
23662 might_fault();
23663 +
23664 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23665 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23666 + addr += PAX_USER_SHADOW_BASE;
23667 +#endif
23668 +
23669 /* no memory constraint because it doesn't change any memory gcc knows
23670 about */
23671 asm volatile(
23672 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23673 }
23674 EXPORT_SYMBOL(strlen_user);
23675
23676 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23677 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23678 {
23679 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23680 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23681 - }
23682 - return len;
23683 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23684 +
23685 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23686 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23687 + to += PAX_USER_SHADOW_BASE;
23688 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23689 + from += PAX_USER_SHADOW_BASE;
23690 +#endif
23691 +
23692 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23693 + }
23694 + return len;
23695 }
23696 EXPORT_SYMBOL(copy_in_user);
23697
23698 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23699 * it is not necessary to optimize tail handling.
23700 */
23701 unsigned long
23702 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23703 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23704 {
23705 char c;
23706 unsigned zero_len;
23707 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23708 index 61b41ca..5fef66a 100644
23709 --- a/arch/x86/mm/extable.c
23710 +++ b/arch/x86/mm/extable.c
23711 @@ -1,14 +1,71 @@
23712 #include <linux/module.h>
23713 #include <linux/spinlock.h>
23714 +#include <linux/sort.h>
23715 #include <asm/uaccess.h>
23716 +#include <asm/pgtable.h>
23717
23718 +/*
23719 + * The exception table needs to be sorted so that the binary
23720 + * search that we use to find entries in it works properly.
23721 + * This is used both for the kernel exception table and for
23722 + * the exception tables of modules that get loaded.
23723 + */
23724 +static int cmp_ex(const void *a, const void *b)
23725 +{
23726 + const struct exception_table_entry *x = a, *y = b;
23727 +
23728 + /* avoid overflow */
23729 + if (x->insn > y->insn)
23730 + return 1;
23731 + if (x->insn < y->insn)
23732 + return -1;
23733 + return 0;
23734 +}
23735 +
23736 +static void swap_ex(void *a, void *b, int size)
23737 +{
23738 + struct exception_table_entry t, *x = a, *y = b;
23739 +
23740 + t = *x;
23741 +
23742 + pax_open_kernel();
23743 + *x = *y;
23744 + *y = t;
23745 + pax_close_kernel();
23746 +}
23747 +
23748 +void sort_extable(struct exception_table_entry *start,
23749 + struct exception_table_entry *finish)
23750 +{
23751 + sort(start, finish - start, sizeof(struct exception_table_entry),
23752 + cmp_ex, swap_ex);
23753 +}
23754 +
23755 +#ifdef CONFIG_MODULES
23756 +/*
23757 + * If the exception table is sorted, any referring to the module init
23758 + * will be at the beginning or the end.
23759 + */
23760 +void trim_init_extable(struct module *m)
23761 +{
23762 + /*trim the beginning*/
23763 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
23764 + m->extable++;
23765 + m->num_exentries--;
23766 + }
23767 + /*trim the end*/
23768 + while (m->num_exentries &&
23769 + within_module_init(m->extable[m->num_exentries-1].insn, m))
23770 + m->num_exentries--;
23771 +}
23772 +#endif /* CONFIG_MODULES */
23773
23774 int fixup_exception(struct pt_regs *regs)
23775 {
23776 const struct exception_table_entry *fixup;
23777
23778 #ifdef CONFIG_PNPBIOS
23779 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23780 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23781 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23782 extern u32 pnp_bios_is_utter_crap;
23783 pnp_bios_is_utter_crap = 1;
23784 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23785 index 8ac0d76..3f191dc 100644
23786 --- a/arch/x86/mm/fault.c
23787 +++ b/arch/x86/mm/fault.c
23788 @@ -11,10 +11,19 @@
23789 #include <linux/kprobes.h> /* __kprobes, ... */
23790 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
23791 #include <linux/perf_event.h> /* perf_sw_event */
23792 +#include <linux/unistd.h>
23793 +#include <linux/compiler.h>
23794
23795 #include <asm/traps.h> /* dotraplinkage, ... */
23796 #include <asm/pgalloc.h> /* pgd_*(), ... */
23797 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23798 +#include <asm/vsyscall.h>
23799 +#include <asm/tlbflush.h>
23800 +
23801 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23802 +#include <asm/stacktrace.h>
23803 +#include "../kernel/dumpstack.h"
23804 +#endif
23805
23806 /*
23807 * Page fault error code bits:
23808 @@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
23809 int ret = 0;
23810
23811 /* kprobe_running() needs smp_processor_id() */
23812 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23813 + if (kprobes_built_in() && !user_mode(regs)) {
23814 preempt_disable();
23815 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23816 ret = 1;
23817 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23818 return !instr_lo || (instr_lo>>1) == 1;
23819 case 0x00:
23820 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23821 - if (probe_kernel_address(instr, opcode))
23822 + if (user_mode(regs)) {
23823 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23824 + return 0;
23825 + } else if (probe_kernel_address(instr, opcode))
23826 return 0;
23827
23828 *prefetch = (instr_lo == 0xF) &&
23829 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23830 while (instr < max_instr) {
23831 unsigned char opcode;
23832
23833 - if (probe_kernel_address(instr, opcode))
23834 + if (user_mode(regs)) {
23835 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23836 + break;
23837 + } else if (probe_kernel_address(instr, opcode))
23838 break;
23839
23840 instr++;
23841 @@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23842 force_sig_info(si_signo, &info, tsk);
23843 }
23844
23845 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23846 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23847 +#endif
23848 +
23849 +#ifdef CONFIG_PAX_EMUTRAMP
23850 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23851 +#endif
23852 +
23853 +#ifdef CONFIG_PAX_PAGEEXEC
23854 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23855 +{
23856 + pgd_t *pgd;
23857 + pud_t *pud;
23858 + pmd_t *pmd;
23859 +
23860 + pgd = pgd_offset(mm, address);
23861 + if (!pgd_present(*pgd))
23862 + return NULL;
23863 + pud = pud_offset(pgd, address);
23864 + if (!pud_present(*pud))
23865 + return NULL;
23866 + pmd = pmd_offset(pud, address);
23867 + if (!pmd_present(*pmd))
23868 + return NULL;
23869 + return pmd;
23870 +}
23871 +#endif
23872 +
23873 DEFINE_SPINLOCK(pgd_lock);
23874 LIST_HEAD(pgd_list);
23875
23876 @@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
23877 address += PMD_SIZE) {
23878
23879 unsigned long flags;
23880 +
23881 +#ifdef CONFIG_PAX_PER_CPU_PGD
23882 + unsigned long cpu;
23883 +#else
23884 struct page *page;
23885 +#endif
23886
23887 spin_lock_irqsave(&pgd_lock, flags);
23888 +
23889 +#ifdef CONFIG_PAX_PER_CPU_PGD
23890 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23891 + pgd_t *pgd = get_cpu_pgd(cpu);
23892 +#else
23893 list_for_each_entry(page, &pgd_list, lru) {
23894 - if (!vmalloc_sync_one(page_address(page), address))
23895 + pgd_t *pgd = page_address(page);
23896 +#endif
23897 +
23898 + if (!vmalloc_sync_one(pgd, address))
23899 break;
23900 }
23901 spin_unlock_irqrestore(&pgd_lock, flags);
23902 @@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
23903 * an interrupt in the middle of a task switch..
23904 */
23905 pgd_paddr = read_cr3();
23906 +
23907 +#ifdef CONFIG_PAX_PER_CPU_PGD
23908 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23909 +#endif
23910 +
23911 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23912 if (!pmd_k)
23913 return -1;
23914 @@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
23915
23916 const pgd_t *pgd_ref = pgd_offset_k(address);
23917 unsigned long flags;
23918 +
23919 +#ifdef CONFIG_PAX_PER_CPU_PGD
23920 + unsigned long cpu;
23921 +#else
23922 struct page *page;
23923 +#endif
23924
23925 if (pgd_none(*pgd_ref))
23926 continue;
23927
23928 spin_lock_irqsave(&pgd_lock, flags);
23929 +
23930 +#ifdef CONFIG_PAX_PER_CPU_PGD
23931 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23932 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
23933 +#else
23934 list_for_each_entry(page, &pgd_list, lru) {
23935 pgd_t *pgd;
23936 pgd = (pgd_t *)page_address(page) + pgd_index(address);
23937 +#endif
23938 +
23939 if (pgd_none(*pgd))
23940 set_pgd(pgd, *pgd_ref);
23941 else
23942 @@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
23943 * happen within a race in page table update. In the later
23944 * case just flush:
23945 */
23946 +
23947 +#ifdef CONFIG_PAX_PER_CPU_PGD
23948 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23949 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23950 +#else
23951 pgd = pgd_offset(current->active_mm, address);
23952 +#endif
23953 +
23954 pgd_ref = pgd_offset_k(address);
23955 if (pgd_none(*pgd_ref))
23956 return -1;
23957 @@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23958 static int is_errata100(struct pt_regs *regs, unsigned long address)
23959 {
23960 #ifdef CONFIG_X86_64
23961 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23962 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23963 return 1;
23964 #endif
23965 return 0;
23966 @@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23967 }
23968
23969 static const char nx_warning[] = KERN_CRIT
23970 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23971 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23972
23973 static void
23974 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23975 @@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23976 if (!oops_may_print())
23977 return;
23978
23979 - if (error_code & PF_INSTR) {
23980 + if (nx_enabled && (error_code & PF_INSTR)) {
23981 unsigned int level;
23982
23983 pte_t *pte = lookup_address(address, &level);
23984
23985 if (pte && pte_present(*pte) && !pte_exec(*pte))
23986 - printk(nx_warning, current_uid());
23987 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23988 }
23989
23990 +#ifdef CONFIG_PAX_KERNEXEC
23991 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23992 + if (current->signal->curr_ip)
23993 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23994 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23995 + else
23996 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23997 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23998 + }
23999 +#endif
24000 +
24001 printk(KERN_ALERT "BUG: unable to handle kernel ");
24002 if (address < PAGE_SIZE)
24003 printk(KERN_CONT "NULL pointer dereference");
24004 @@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24005 {
24006 struct task_struct *tsk = current;
24007
24008 +#ifdef CONFIG_X86_64
24009 + struct mm_struct *mm = tsk->mm;
24010 +
24011 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24012 + if (regs->ip == (unsigned long)vgettimeofday) {
24013 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24014 + return;
24015 + } else if (regs->ip == (unsigned long)vtime) {
24016 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24017 + return;
24018 + } else if (regs->ip == (unsigned long)vgetcpu) {
24019 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24020 + return;
24021 + }
24022 + }
24023 +#endif
24024 +
24025 /* User mode accesses just cause a SIGSEGV */
24026 if (error_code & PF_USER) {
24027 /*
24028 @@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24029 if (is_errata100(regs, address))
24030 return;
24031
24032 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24033 + if (pax_is_fetch_fault(regs, error_code, address)) {
24034 +
24035 +#ifdef CONFIG_PAX_EMUTRAMP
24036 + switch (pax_handle_fetch_fault(regs)) {
24037 + case 2:
24038 + return;
24039 + }
24040 +#endif
24041 +
24042 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24043 + do_group_exit(SIGKILL);
24044 + }
24045 +#endif
24046 +
24047 if (unlikely(show_unhandled_signals))
24048 show_signal_msg(regs, error_code, address, tsk);
24049
24050 @@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24051 if (fault & VM_FAULT_HWPOISON) {
24052 printk(KERN_ERR
24053 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24054 - tsk->comm, tsk->pid, address);
24055 + tsk->comm, task_pid_nr(tsk), address);
24056 code = BUS_MCEERR_AR;
24057 }
24058 #endif
24059 @@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24060 return 1;
24061 }
24062
24063 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24064 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24065 +{
24066 + pte_t *pte;
24067 + pmd_t *pmd;
24068 + spinlock_t *ptl;
24069 + unsigned char pte_mask;
24070 +
24071 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24072 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
24073 + return 0;
24074 +
24075 + /* PaX: it's our fault, let's handle it if we can */
24076 +
24077 + /* PaX: take a look at read faults before acquiring any locks */
24078 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24079 + /* instruction fetch attempt from a protected page in user mode */
24080 + up_read(&mm->mmap_sem);
24081 +
24082 +#ifdef CONFIG_PAX_EMUTRAMP
24083 + switch (pax_handle_fetch_fault(regs)) {
24084 + case 2:
24085 + return 1;
24086 + }
24087 +#endif
24088 +
24089 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24090 + do_group_exit(SIGKILL);
24091 + }
24092 +
24093 + pmd = pax_get_pmd(mm, address);
24094 + if (unlikely(!pmd))
24095 + return 0;
24096 +
24097 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24098 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24099 + pte_unmap_unlock(pte, ptl);
24100 + return 0;
24101 + }
24102 +
24103 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24104 + /* write attempt to a protected page in user mode */
24105 + pte_unmap_unlock(pte, ptl);
24106 + return 0;
24107 + }
24108 +
24109 +#ifdef CONFIG_SMP
24110 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24111 +#else
24112 + if (likely(address > get_limit(regs->cs)))
24113 +#endif
24114 + {
24115 + set_pte(pte, pte_mkread(*pte));
24116 + __flush_tlb_one(address);
24117 + pte_unmap_unlock(pte, ptl);
24118 + up_read(&mm->mmap_sem);
24119 + return 1;
24120 + }
24121 +
24122 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24123 +
24124 + /*
24125 + * PaX: fill DTLB with user rights and retry
24126 + */
24127 + __asm__ __volatile__ (
24128 + "orb %2,(%1)\n"
24129 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24130 +/*
24131 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24132 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24133 + * page fault when examined during a TLB load attempt. this is true not only
24134 + * for PTEs holding a non-present entry but also present entries that will
24135 + * raise a page fault (such as those set up by PaX, or the copy-on-write
24136 + * mechanism). in effect it means that we do *not* need to flush the TLBs
24137 + * for our target pages since their PTEs are simply not in the TLBs at all.
24138 +
24139 + * the best thing in omitting it is that we gain around 15-20% speed in the
24140 + * fast path of the page fault handler and can get rid of tracing since we
24141 + * can no longer flush unintended entries.
24142 + */
24143 + "invlpg (%0)\n"
24144 +#endif
24145 + __copyuser_seg"testb $0,(%0)\n"
24146 + "xorb %3,(%1)\n"
24147 + :
24148 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24149 + : "memory", "cc");
24150 + pte_unmap_unlock(pte, ptl);
24151 + up_read(&mm->mmap_sem);
24152 + return 1;
24153 +}
24154 +#endif
24155 +
24156 /*
24157 * Handle a spurious fault caused by a stale TLB entry.
24158 *
24159 @@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24160 static inline int
24161 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24162 {
24163 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24164 + return 1;
24165 +
24166 if (write) {
24167 /* write, present and write, not present: */
24168 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24169 @@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24170 {
24171 struct vm_area_struct *vma;
24172 struct task_struct *tsk;
24173 - unsigned long address;
24174 struct mm_struct *mm;
24175 int write;
24176 int fault;
24177
24178 - tsk = current;
24179 - mm = tsk->mm;
24180 -
24181 /* Get the faulting address: */
24182 - address = read_cr2();
24183 + unsigned long address = read_cr2();
24184 +
24185 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24186 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24187 + if (!search_exception_tables(regs->ip)) {
24188 + bad_area_nosemaphore(regs, error_code, address);
24189 + return;
24190 + }
24191 + if (address < PAX_USER_SHADOW_BASE) {
24192 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24193 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24194 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24195 + } else
24196 + address -= PAX_USER_SHADOW_BASE;
24197 + }
24198 +#endif
24199 +
24200 + tsk = current;
24201 + mm = tsk->mm;
24202
24203 /*
24204 * Detect and handle instructions that would cause a page fault for
24205 @@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24206 * User-mode registers count as a user access even for any
24207 * potential system fault or CPU buglet:
24208 */
24209 - if (user_mode_vm(regs)) {
24210 + if (user_mode(regs)) {
24211 local_irq_enable();
24212 error_code |= PF_USER;
24213 } else {
24214 @@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24215 might_sleep();
24216 }
24217
24218 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24219 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24220 + return;
24221 +#endif
24222 +
24223 vma = find_vma(mm, address);
24224 if (unlikely(!vma)) {
24225 bad_area(regs, error_code, address);
24226 @@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24227 bad_area(regs, error_code, address);
24228 return;
24229 }
24230 - if (error_code & PF_USER) {
24231 - /*
24232 - * Accessing the stack below %sp is always a bug.
24233 - * The large cushion allows instructions like enter
24234 - * and pusha to work. ("enter $65535, $31" pushes
24235 - * 32 pointers and then decrements %sp by 65535.)
24236 - */
24237 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24238 - bad_area(regs, error_code, address);
24239 - return;
24240 - }
24241 + /*
24242 + * Accessing the stack below %sp is always a bug.
24243 + * The large cushion allows instructions like enter
24244 + * and pusha to work. ("enter $65535, $31" pushes
24245 + * 32 pointers and then decrements %sp by 65535.)
24246 + */
24247 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24248 + bad_area(regs, error_code, address);
24249 + return;
24250 }
24251 +
24252 +#ifdef CONFIG_PAX_SEGMEXEC
24253 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24254 + bad_area(regs, error_code, address);
24255 + return;
24256 + }
24257 +#endif
24258 +
24259 if (unlikely(expand_stack(vma, address))) {
24260 bad_area(regs, error_code, address);
24261 return;
24262 @@ -1146,3 +1390,240 @@ good_area:
24263
24264 up_read(&mm->mmap_sem);
24265 }
24266 +
24267 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24268 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24269 +{
24270 + struct mm_struct *mm = current->mm;
24271 + unsigned long ip = regs->ip;
24272 +
24273 + if (v8086_mode(regs))
24274 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24275 +
24276 +#ifdef CONFIG_PAX_PAGEEXEC
24277 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24278 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24279 + return true;
24280 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24281 + return true;
24282 + return false;
24283 + }
24284 +#endif
24285 +
24286 +#ifdef CONFIG_PAX_SEGMEXEC
24287 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24288 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24289 + return true;
24290 + return false;
24291 + }
24292 +#endif
24293 +
24294 + return false;
24295 +}
24296 +#endif
24297 +
24298 +#ifdef CONFIG_PAX_EMUTRAMP
24299 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24300 +{
24301 + int err;
24302 +
24303 + do { /* PaX: gcc trampoline emulation #1 */
24304 + unsigned char mov1, mov2;
24305 + unsigned short jmp;
24306 + unsigned int addr1, addr2;
24307 +
24308 +#ifdef CONFIG_X86_64
24309 + if ((regs->ip + 11) >> 32)
24310 + break;
24311 +#endif
24312 +
24313 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24314 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24315 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24316 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24317 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24318 +
24319 + if (err)
24320 + break;
24321 +
24322 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24323 + regs->cx = addr1;
24324 + regs->ax = addr2;
24325 + regs->ip = addr2;
24326 + return 2;
24327 + }
24328 + } while (0);
24329 +
24330 + do { /* PaX: gcc trampoline emulation #2 */
24331 + unsigned char mov, jmp;
24332 + unsigned int addr1, addr2;
24333 +
24334 +#ifdef CONFIG_X86_64
24335 + if ((regs->ip + 9) >> 32)
24336 + break;
24337 +#endif
24338 +
24339 + err = get_user(mov, (unsigned char __user *)regs->ip);
24340 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24341 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24342 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24343 +
24344 + if (err)
24345 + break;
24346 +
24347 + if (mov == 0xB9 && jmp == 0xE9) {
24348 + regs->cx = addr1;
24349 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24350 + return 2;
24351 + }
24352 + } while (0);
24353 +
24354 + return 1; /* PaX in action */
24355 +}
24356 +
24357 +#ifdef CONFIG_X86_64
24358 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24359 +{
24360 + int err;
24361 +
24362 + do { /* PaX: gcc trampoline emulation #1 */
24363 + unsigned short mov1, mov2, jmp1;
24364 + unsigned char jmp2;
24365 + unsigned int addr1;
24366 + unsigned long addr2;
24367 +
24368 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24369 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24370 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24371 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24372 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24373 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24374 +
24375 + if (err)
24376 + break;
24377 +
24378 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24379 + regs->r11 = addr1;
24380 + regs->r10 = addr2;
24381 + regs->ip = addr1;
24382 + return 2;
24383 + }
24384 + } while (0);
24385 +
24386 + do { /* PaX: gcc trampoline emulation #2 */
24387 + unsigned short mov1, mov2, jmp1;
24388 + unsigned char jmp2;
24389 + unsigned long addr1, addr2;
24390 +
24391 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24392 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24393 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24394 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24395 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24396 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24397 +
24398 + if (err)
24399 + break;
24400 +
24401 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24402 + regs->r11 = addr1;
24403 + regs->r10 = addr2;
24404 + regs->ip = addr1;
24405 + return 2;
24406 + }
24407 + } while (0);
24408 +
24409 + return 1; /* PaX in action */
24410 +}
24411 +#endif
24412 +
24413 +/*
24414 + * PaX: decide what to do with offenders (regs->ip = fault address)
24415 + *
24416 + * returns 1 when task should be killed
24417 + * 2 when gcc trampoline was detected
24418 + */
24419 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24420 +{
24421 + if (v8086_mode(regs))
24422 + return 1;
24423 +
24424 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24425 + return 1;
24426 +
24427 +#ifdef CONFIG_X86_32
24428 + return pax_handle_fetch_fault_32(regs);
24429 +#else
24430 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24431 + return pax_handle_fetch_fault_32(regs);
24432 + else
24433 + return pax_handle_fetch_fault_64(regs);
24434 +#endif
24435 +}
24436 +#endif
24437 +
24438 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24439 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24440 +{
24441 + long i;
24442 +
24443 + printk(KERN_ERR "PAX: bytes at PC: ");
24444 + for (i = 0; i < 20; i++) {
24445 + unsigned char c;
24446 + if (get_user(c, (unsigned char __force_user *)pc+i))
24447 + printk(KERN_CONT "?? ");
24448 + else
24449 + printk(KERN_CONT "%02x ", c);
24450 + }
24451 + printk("\n");
24452 +
24453 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24454 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24455 + unsigned long c;
24456 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24457 +#ifdef CONFIG_X86_32
24458 + printk(KERN_CONT "???????? ");
24459 +#else
24460 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24461 + printk(KERN_CONT "???????? ???????? ");
24462 + else
24463 + printk(KERN_CONT "???????????????? ");
24464 +#endif
24465 + } else {
24466 +#ifdef CONFIG_X86_64
24467 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24468 + printk(KERN_CONT "%08x ", (unsigned int)c);
24469 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24470 + } else
24471 +#endif
24472 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24473 + }
24474 + }
24475 + printk("\n");
24476 +}
24477 +#endif
24478 +
24479 +/**
24480 + * probe_kernel_write(): safely attempt to write to a location
24481 + * @dst: address to write to
24482 + * @src: pointer to the data that shall be written
24483 + * @size: size of the data chunk
24484 + *
24485 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24486 + * happens, handle that and return -EFAULT.
24487 + */
24488 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24489 +{
24490 + long ret;
24491 + mm_segment_t old_fs = get_fs();
24492 +
24493 + set_fs(KERNEL_DS);
24494 + pagefault_disable();
24495 + pax_open_kernel();
24496 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24497 + pax_close_kernel();
24498 + pagefault_enable();
24499 + set_fs(old_fs);
24500 +
24501 + return ret ? -EFAULT : 0;
24502 +}
24503 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24504 index 71da1bc..7a16bf4 100644
24505 --- a/arch/x86/mm/gup.c
24506 +++ b/arch/x86/mm/gup.c
24507 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24508 addr = start;
24509 len = (unsigned long) nr_pages << PAGE_SHIFT;
24510 end = start + len;
24511 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24512 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24513 (void __user *)start, len)))
24514 return 0;
24515
24516 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24517 index 63a6ba6..79abd7a 100644
24518 --- a/arch/x86/mm/highmem_32.c
24519 +++ b/arch/x86/mm/highmem_32.c
24520 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24521 idx = type + KM_TYPE_NR*smp_processor_id();
24522 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24523 BUG_ON(!pte_none(*(kmap_pte-idx)));
24524 +
24525 + pax_open_kernel();
24526 set_pte(kmap_pte-idx, mk_pte(page, prot));
24527 + pax_close_kernel();
24528
24529 return (void *)vaddr;
24530 }
24531 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24532 index f46c340..6ff9a26 100644
24533 --- a/arch/x86/mm/hugetlbpage.c
24534 +++ b/arch/x86/mm/hugetlbpage.c
24535 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24536 struct hstate *h = hstate_file(file);
24537 struct mm_struct *mm = current->mm;
24538 struct vm_area_struct *vma;
24539 - unsigned long start_addr;
24540 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24541 +
24542 +#ifdef CONFIG_PAX_SEGMEXEC
24543 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24544 + pax_task_size = SEGMEXEC_TASK_SIZE;
24545 +#endif
24546 +
24547 + pax_task_size -= PAGE_SIZE;
24548
24549 if (len > mm->cached_hole_size) {
24550 - start_addr = mm->free_area_cache;
24551 + start_addr = mm->free_area_cache;
24552 } else {
24553 - start_addr = TASK_UNMAPPED_BASE;
24554 - mm->cached_hole_size = 0;
24555 + start_addr = mm->mmap_base;
24556 + mm->cached_hole_size = 0;
24557 }
24558
24559 full_search:
24560 @@ -281,26 +288,27 @@ full_search:
24561
24562 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24563 /* At this point: (!vma || addr < vma->vm_end). */
24564 - if (TASK_SIZE - len < addr) {
24565 + if (pax_task_size - len < addr) {
24566 /*
24567 * Start a new search - just in case we missed
24568 * some holes.
24569 */
24570 - if (start_addr != TASK_UNMAPPED_BASE) {
24571 - start_addr = TASK_UNMAPPED_BASE;
24572 + if (start_addr != mm->mmap_base) {
24573 + start_addr = mm->mmap_base;
24574 mm->cached_hole_size = 0;
24575 goto full_search;
24576 }
24577 return -ENOMEM;
24578 }
24579 - if (!vma || addr + len <= vma->vm_start) {
24580 - mm->free_area_cache = addr + len;
24581 - return addr;
24582 - }
24583 + if (check_heap_stack_gap(vma, addr, len))
24584 + break;
24585 if (addr + mm->cached_hole_size < vma->vm_start)
24586 mm->cached_hole_size = vma->vm_start - addr;
24587 addr = ALIGN(vma->vm_end, huge_page_size(h));
24588 }
24589 +
24590 + mm->free_area_cache = addr + len;
24591 + return addr;
24592 }
24593
24594 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24595 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24596 {
24597 struct hstate *h = hstate_file(file);
24598 struct mm_struct *mm = current->mm;
24599 - struct vm_area_struct *vma, *prev_vma;
24600 - unsigned long base = mm->mmap_base, addr = addr0;
24601 + struct vm_area_struct *vma;
24602 + unsigned long base = mm->mmap_base, addr;
24603 unsigned long largest_hole = mm->cached_hole_size;
24604 - int first_time = 1;
24605
24606 /* don't allow allocations above current base */
24607 if (mm->free_area_cache > base)
24608 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24609 largest_hole = 0;
24610 mm->free_area_cache = base;
24611 }
24612 -try_again:
24613 +
24614 /* make sure it can fit in the remaining address space */
24615 if (mm->free_area_cache < len)
24616 goto fail;
24617
24618 /* either no address requested or cant fit in requested address hole */
24619 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24620 + addr = (mm->free_area_cache - len);
24621 do {
24622 + addr &= huge_page_mask(h);
24623 + vma = find_vma(mm, addr);
24624 /*
24625 * Lookup failure means no vma is above this address,
24626 * i.e. return with success:
24627 - */
24628 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24629 - return addr;
24630 -
24631 - /*
24632 * new region fits between prev_vma->vm_end and
24633 * vma->vm_start, use it:
24634 */
24635 - if (addr + len <= vma->vm_start &&
24636 - (!prev_vma || (addr >= prev_vma->vm_end))) {
24637 + if (check_heap_stack_gap(vma, addr, len)) {
24638 /* remember the address as a hint for next time */
24639 - mm->cached_hole_size = largest_hole;
24640 - return (mm->free_area_cache = addr);
24641 - } else {
24642 - /* pull free_area_cache down to the first hole */
24643 - if (mm->free_area_cache == vma->vm_end) {
24644 - mm->free_area_cache = vma->vm_start;
24645 - mm->cached_hole_size = largest_hole;
24646 - }
24647 + mm->cached_hole_size = largest_hole;
24648 + return (mm->free_area_cache = addr);
24649 + }
24650 + /* pull free_area_cache down to the first hole */
24651 + if (mm->free_area_cache == vma->vm_end) {
24652 + mm->free_area_cache = vma->vm_start;
24653 + mm->cached_hole_size = largest_hole;
24654 }
24655
24656 /* remember the largest hole we saw so far */
24657 if (addr + largest_hole < vma->vm_start)
24658 - largest_hole = vma->vm_start - addr;
24659 + largest_hole = vma->vm_start - addr;
24660
24661 /* try just below the current vma->vm_start */
24662 - addr = (vma->vm_start - len) & huge_page_mask(h);
24663 - } while (len <= vma->vm_start);
24664 + addr = skip_heap_stack_gap(vma, len);
24665 + } while (!IS_ERR_VALUE(addr));
24666
24667 fail:
24668 /*
24669 - * if hint left us with no space for the requested
24670 - * mapping then try again:
24671 - */
24672 - if (first_time) {
24673 - mm->free_area_cache = base;
24674 - largest_hole = 0;
24675 - first_time = 0;
24676 - goto try_again;
24677 - }
24678 - /*
24679 * A failed mmap() very likely causes application failure,
24680 * so fall back to the bottom-up function here. This scenario
24681 * can happen with large stack limits and large mmap()
24682 * allocations.
24683 */
24684 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24685 +
24686 +#ifdef CONFIG_PAX_SEGMEXEC
24687 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24688 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24689 + else
24690 +#endif
24691 +
24692 + mm->mmap_base = TASK_UNMAPPED_BASE;
24693 +
24694 +#ifdef CONFIG_PAX_RANDMMAP
24695 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24696 + mm->mmap_base += mm->delta_mmap;
24697 +#endif
24698 +
24699 + mm->free_area_cache = mm->mmap_base;
24700 mm->cached_hole_size = ~0UL;
24701 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24702 len, pgoff, flags);
24703 @@ -387,6 +393,7 @@ fail:
24704 /*
24705 * Restore the topdown base:
24706 */
24707 + mm->mmap_base = base;
24708 mm->free_area_cache = base;
24709 mm->cached_hole_size = ~0UL;
24710
24711 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24712 struct hstate *h = hstate_file(file);
24713 struct mm_struct *mm = current->mm;
24714 struct vm_area_struct *vma;
24715 + unsigned long pax_task_size = TASK_SIZE;
24716
24717 if (len & ~huge_page_mask(h))
24718 return -EINVAL;
24719 - if (len > TASK_SIZE)
24720 +
24721 +#ifdef CONFIG_PAX_SEGMEXEC
24722 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24723 + pax_task_size = SEGMEXEC_TASK_SIZE;
24724 +#endif
24725 +
24726 + pax_task_size -= PAGE_SIZE;
24727 +
24728 + if (len > pax_task_size)
24729 return -ENOMEM;
24730
24731 if (flags & MAP_FIXED) {
24732 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24733 if (addr) {
24734 addr = ALIGN(addr, huge_page_size(h));
24735 vma = find_vma(mm, addr);
24736 - if (TASK_SIZE - len >= addr &&
24737 - (!vma || addr + len <= vma->vm_start))
24738 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24739 return addr;
24740 }
24741 if (mm->get_unmapped_area == arch_get_unmapped_area)
24742 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24743 index 73ffd55..ad78676 100644
24744 --- a/arch/x86/mm/init.c
24745 +++ b/arch/x86/mm/init.c
24746 @@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
24747 * cause a hotspot and fill up ZONE_DMA. The page tables
24748 * need roughly 0.5KB per GB.
24749 */
24750 -#ifdef CONFIG_X86_32
24751 - start = 0x7000;
24752 -#else
24753 - start = 0x8000;
24754 -#endif
24755 + start = 0x100000;
24756 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
24757 tables, PAGE_SIZE);
24758 if (e820_table_start == -1UL)
24759 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24760 #endif
24761
24762 set_nx();
24763 - if (nx_enabled)
24764 + if (nx_enabled && cpu_has_nx)
24765 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
24766
24767 /* Enable PSE if available */
24768 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24769 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24770 * mmio resources as well as potential bios/acpi data regions.
24771 */
24772 +
24773 int devmem_is_allowed(unsigned long pagenr)
24774 {
24775 +#ifdef CONFIG_GRKERNSEC_KMEM
24776 + /* allow BDA */
24777 + if (!pagenr)
24778 + return 1;
24779 + /* allow EBDA */
24780 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24781 + return 1;
24782 + /* allow ISA/video mem */
24783 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24784 + return 1;
24785 + /* throw out everything else below 1MB */
24786 + if (pagenr <= 256)
24787 + return 0;
24788 +#else
24789 if (pagenr <= 256)
24790 return 1;
24791 +#endif
24792 +
24793 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24794 return 0;
24795 if (!page_is_ram(pagenr))
24796 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24797
24798 void free_initmem(void)
24799 {
24800 +
24801 +#ifdef CONFIG_PAX_KERNEXEC
24802 +#ifdef CONFIG_X86_32
24803 + /* PaX: limit KERNEL_CS to actual size */
24804 + unsigned long addr, limit;
24805 + struct desc_struct d;
24806 + int cpu;
24807 +
24808 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24809 + limit = (limit - 1UL) >> PAGE_SHIFT;
24810 +
24811 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24812 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
24813 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24814 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24815 + }
24816 +
24817 + /* PaX: make KERNEL_CS read-only */
24818 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24819 + if (!paravirt_enabled())
24820 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24821 +/*
24822 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24823 + pgd = pgd_offset_k(addr);
24824 + pud = pud_offset(pgd, addr);
24825 + pmd = pmd_offset(pud, addr);
24826 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24827 + }
24828 +*/
24829 +#ifdef CONFIG_X86_PAE
24830 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24831 +/*
24832 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24833 + pgd = pgd_offset_k(addr);
24834 + pud = pud_offset(pgd, addr);
24835 + pmd = pmd_offset(pud, addr);
24836 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24837 + }
24838 +*/
24839 +#endif
24840 +
24841 +#ifdef CONFIG_MODULES
24842 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24843 +#endif
24844 +
24845 +#else
24846 + pgd_t *pgd;
24847 + pud_t *pud;
24848 + pmd_t *pmd;
24849 + unsigned long addr, end;
24850 +
24851 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24852 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24853 + pgd = pgd_offset_k(addr);
24854 + pud = pud_offset(pgd, addr);
24855 + pmd = pmd_offset(pud, addr);
24856 + if (!pmd_present(*pmd))
24857 + continue;
24858 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24859 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24860 + else
24861 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24862 + }
24863 +
24864 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24865 + end = addr + KERNEL_IMAGE_SIZE;
24866 + for (; addr < end; addr += PMD_SIZE) {
24867 + pgd = pgd_offset_k(addr);
24868 + pud = pud_offset(pgd, addr);
24869 + pmd = pmd_offset(pud, addr);
24870 + if (!pmd_present(*pmd))
24871 + continue;
24872 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24873 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24874 + }
24875 +#endif
24876 +
24877 + flush_tlb_all();
24878 +#endif
24879 +
24880 free_init_pages("unused kernel memory",
24881 (unsigned long)(&__init_begin),
24882 (unsigned long)(&__init_end));
24883 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24884 index 30938c1..bda3d5d 100644
24885 --- a/arch/x86/mm/init_32.c
24886 +++ b/arch/x86/mm/init_32.c
24887 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
24888 }
24889
24890 /*
24891 - * Creates a middle page table and puts a pointer to it in the
24892 - * given global directory entry. This only returns the gd entry
24893 - * in non-PAE compilation mode, since the middle layer is folded.
24894 - */
24895 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24896 -{
24897 - pud_t *pud;
24898 - pmd_t *pmd_table;
24899 -
24900 -#ifdef CONFIG_X86_PAE
24901 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24902 - if (after_bootmem)
24903 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24904 - else
24905 - pmd_table = (pmd_t *)alloc_low_page();
24906 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24907 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24908 - pud = pud_offset(pgd, 0);
24909 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24910 -
24911 - return pmd_table;
24912 - }
24913 -#endif
24914 - pud = pud_offset(pgd, 0);
24915 - pmd_table = pmd_offset(pud, 0);
24916 -
24917 - return pmd_table;
24918 -}
24919 -
24920 -/*
24921 * Create a page table and place a pointer to it in a middle page
24922 * directory entry:
24923 */
24924 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24925 page_table = (pte_t *)alloc_low_page();
24926
24927 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24928 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24929 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24930 +#else
24931 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24932 +#endif
24933 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24934 }
24935
24936 return pte_offset_kernel(pmd, 0);
24937 }
24938
24939 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24940 +{
24941 + pud_t *pud;
24942 + pmd_t *pmd_table;
24943 +
24944 + pud = pud_offset(pgd, 0);
24945 + pmd_table = pmd_offset(pud, 0);
24946 +
24947 + return pmd_table;
24948 +}
24949 +
24950 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24951 {
24952 int pgd_idx = pgd_index(vaddr);
24953 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24954 int pgd_idx, pmd_idx;
24955 unsigned long vaddr;
24956 pgd_t *pgd;
24957 + pud_t *pud;
24958 pmd_t *pmd;
24959 pte_t *pte = NULL;
24960
24961 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24962 pgd = pgd_base + pgd_idx;
24963
24964 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24965 - pmd = one_md_table_init(pgd);
24966 - pmd = pmd + pmd_index(vaddr);
24967 + pud = pud_offset(pgd, vaddr);
24968 + pmd = pmd_offset(pud, vaddr);
24969 +
24970 +#ifdef CONFIG_X86_PAE
24971 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24972 +#endif
24973 +
24974 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24975 pmd++, pmd_idx++) {
24976 pte = page_table_kmap_check(one_page_table_init(pmd),
24977 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24978 }
24979 }
24980
24981 -static inline int is_kernel_text(unsigned long addr)
24982 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24983 {
24984 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
24985 - return 1;
24986 - return 0;
24987 + if ((start > ktla_ktva((unsigned long)_etext) ||
24988 + end <= ktla_ktva((unsigned long)_stext)) &&
24989 + (start > ktla_ktva((unsigned long)_einittext) ||
24990 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24991 +
24992 +#ifdef CONFIG_ACPI_SLEEP
24993 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24994 +#endif
24995 +
24996 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24997 + return 0;
24998 + return 1;
24999 }
25000
25001 /*
25002 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25003 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25004 unsigned long start_pfn, end_pfn;
25005 pgd_t *pgd_base = swapper_pg_dir;
25006 - int pgd_idx, pmd_idx, pte_ofs;
25007 + unsigned int pgd_idx, pmd_idx, pte_ofs;
25008 unsigned long pfn;
25009 pgd_t *pgd;
25010 + pud_t *pud;
25011 pmd_t *pmd;
25012 pte_t *pte;
25013 unsigned pages_2m, pages_4k;
25014 @@ -278,8 +279,13 @@ repeat:
25015 pfn = start_pfn;
25016 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25017 pgd = pgd_base + pgd_idx;
25018 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25019 - pmd = one_md_table_init(pgd);
25020 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25021 + pud = pud_offset(pgd, 0);
25022 + pmd = pmd_offset(pud, 0);
25023 +
25024 +#ifdef CONFIG_X86_PAE
25025 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25026 +#endif
25027
25028 if (pfn >= end_pfn)
25029 continue;
25030 @@ -291,14 +297,13 @@ repeat:
25031 #endif
25032 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25033 pmd++, pmd_idx++) {
25034 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25035 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25036
25037 /*
25038 * Map with big pages if possible, otherwise
25039 * create normal page tables:
25040 */
25041 if (use_pse) {
25042 - unsigned int addr2;
25043 pgprot_t prot = PAGE_KERNEL_LARGE;
25044 /*
25045 * first pass will use the same initial
25046 @@ -308,11 +313,7 @@ repeat:
25047 __pgprot(PTE_IDENT_ATTR |
25048 _PAGE_PSE);
25049
25050 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25051 - PAGE_OFFSET + PAGE_SIZE-1;
25052 -
25053 - if (is_kernel_text(addr) ||
25054 - is_kernel_text(addr2))
25055 + if (is_kernel_text(address, address + PMD_SIZE))
25056 prot = PAGE_KERNEL_LARGE_EXEC;
25057
25058 pages_2m++;
25059 @@ -329,7 +330,7 @@ repeat:
25060 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25061 pte += pte_ofs;
25062 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25063 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25064 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25065 pgprot_t prot = PAGE_KERNEL;
25066 /*
25067 * first pass will use the same initial
25068 @@ -337,7 +338,7 @@ repeat:
25069 */
25070 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25071
25072 - if (is_kernel_text(addr))
25073 + if (is_kernel_text(address, address + PAGE_SIZE))
25074 prot = PAGE_KERNEL_EXEC;
25075
25076 pages_4k++;
25077 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25078
25079 pud = pud_offset(pgd, va);
25080 pmd = pmd_offset(pud, va);
25081 - if (!pmd_present(*pmd))
25082 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
25083 break;
25084
25085 pte = pte_offset_kernel(pmd, va);
25086 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25087
25088 static void __init pagetable_init(void)
25089 {
25090 - pgd_t *pgd_base = swapper_pg_dir;
25091 -
25092 - permanent_kmaps_init(pgd_base);
25093 + permanent_kmaps_init(swapper_pg_dir);
25094 }
25095
25096 #ifdef CONFIG_ACPI_SLEEP
25097 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25098 * ACPI suspend needs this for resume, because things like the intel-agp
25099 * driver might have split up a kernel 4MB mapping.
25100 */
25101 -char swsusp_pg_dir[PAGE_SIZE]
25102 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25103 __attribute__ ((aligned(PAGE_SIZE)));
25104
25105 static inline void save_pg_dir(void)
25106 {
25107 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25108 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25109 }
25110 #else /* !CONFIG_ACPI_SLEEP */
25111 static inline void save_pg_dir(void)
25112 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25113 flush_tlb_all();
25114 }
25115
25116 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25117 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25118 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25119
25120 /* user-defined highmem size */
25121 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25122 * Initialize the boot-time allocator (with low memory only):
25123 */
25124 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25125 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25126 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25127 PAGE_SIZE);
25128 if (bootmap == -1L)
25129 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25130 @@ -864,6 +863,12 @@ void __init mem_init(void)
25131
25132 pci_iommu_alloc();
25133
25134 +#ifdef CONFIG_PAX_PER_CPU_PGD
25135 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25136 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25137 + KERNEL_PGD_PTRS);
25138 +#endif
25139 +
25140 #ifdef CONFIG_FLATMEM
25141 BUG_ON(!mem_map);
25142 #endif
25143 @@ -881,7 +886,7 @@ void __init mem_init(void)
25144 set_highmem_pages_init();
25145
25146 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25147 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25148 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25149 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25150
25151 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25152 @@ -923,10 +928,10 @@ void __init mem_init(void)
25153 ((unsigned long)&__init_end -
25154 (unsigned long)&__init_begin) >> 10,
25155
25156 - (unsigned long)&_etext, (unsigned long)&_edata,
25157 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25158 + (unsigned long)&_sdata, (unsigned long)&_edata,
25159 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25160
25161 - (unsigned long)&_text, (unsigned long)&_etext,
25162 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25163 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25164
25165 /*
25166 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25167 if (!kernel_set_to_readonly)
25168 return;
25169
25170 + start = ktla_ktva(start);
25171 pr_debug("Set kernel text: %lx - %lx for read write\n",
25172 start, start+size);
25173
25174 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25175 if (!kernel_set_to_readonly)
25176 return;
25177
25178 + start = ktla_ktva(start);
25179 pr_debug("Set kernel text: %lx - %lx for read only\n",
25180 start, start+size);
25181
25182 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25183 unsigned long start = PFN_ALIGN(_text);
25184 unsigned long size = PFN_ALIGN(_etext) - start;
25185
25186 + start = ktla_ktva(start);
25187 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25188 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25189 size >> 10);
25190 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25191 index 7d095ad..25d2549 100644
25192 --- a/arch/x86/mm/init_64.c
25193 +++ b/arch/x86/mm/init_64.c
25194 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25195 pmd = fill_pmd(pud, vaddr);
25196 pte = fill_pte(pmd, vaddr);
25197
25198 + pax_open_kernel();
25199 set_pte(pte, new_pte);
25200 + pax_close_kernel();
25201
25202 /*
25203 * It's enough to flush this one mapping.
25204 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25205 pgd = pgd_offset_k((unsigned long)__va(phys));
25206 if (pgd_none(*pgd)) {
25207 pud = (pud_t *) spp_getpage();
25208 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25209 - _PAGE_USER));
25210 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25211 }
25212 pud = pud_offset(pgd, (unsigned long)__va(phys));
25213 if (pud_none(*pud)) {
25214 pmd = (pmd_t *) spp_getpage();
25215 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25216 - _PAGE_USER));
25217 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25218 }
25219 pmd = pmd_offset(pud, phys);
25220 BUG_ON(!pmd_none(*pmd));
25221 @@ -675,6 +675,12 @@ void __init mem_init(void)
25222
25223 pci_iommu_alloc();
25224
25225 +#ifdef CONFIG_PAX_PER_CPU_PGD
25226 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25227 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25228 + KERNEL_PGD_PTRS);
25229 +#endif
25230 +
25231 /* clear_bss() already clear the empty_zero_page */
25232
25233 reservedpages = 0;
25234 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25235 static struct vm_area_struct gate_vma = {
25236 .vm_start = VSYSCALL_START,
25237 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25238 - .vm_page_prot = PAGE_READONLY_EXEC,
25239 - .vm_flags = VM_READ | VM_EXEC
25240 + .vm_page_prot = PAGE_READONLY,
25241 + .vm_flags = VM_READ
25242 };
25243
25244 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25245 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25246
25247 const char *arch_vma_name(struct vm_area_struct *vma)
25248 {
25249 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25250 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25251 return "[vdso]";
25252 if (vma == &gate_vma)
25253 return "[vsyscall]";
25254 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25255 index 84e236c..69bd3f6 100644
25256 --- a/arch/x86/mm/iomap_32.c
25257 +++ b/arch/x86/mm/iomap_32.c
25258 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25259 debug_kmap_atomic(type);
25260 idx = type + KM_TYPE_NR * smp_processor_id();
25261 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25262 +
25263 + pax_open_kernel();
25264 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25265 + pax_close_kernel();
25266 +
25267 arch_flush_lazy_mmu_mode();
25268
25269 return (void *)vaddr;
25270 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25271 index 2feb9bd..3646202 100644
25272 --- a/arch/x86/mm/ioremap.c
25273 +++ b/arch/x86/mm/ioremap.c
25274 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25275 * Second special case: Some BIOSen report the PC BIOS
25276 * area (640->1Mb) as ram even though it is not.
25277 */
25278 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25279 - pagenr < (BIOS_END >> PAGE_SHIFT))
25280 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25281 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25282 return 0;
25283
25284 for (i = 0; i < e820.nr_map; i++) {
25285 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25286 /*
25287 * Don't allow anybody to remap normal RAM that we're using..
25288 */
25289 - for (pfn = phys_addr >> PAGE_SHIFT;
25290 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25291 - pfn++) {
25292 -
25293 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25294 int is_ram = page_is_ram(pfn);
25295
25296 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25297 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25298 return NULL;
25299 WARN_ON_ONCE(is_ram);
25300 }
25301 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_setup(char *str)
25302 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25303
25304 static __initdata int after_paging_init;
25305 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25306 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25307
25308 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25309 {
25310 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
25311 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25312
25313 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25314 - memset(bm_pte, 0, sizeof(bm_pte));
25315 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25316 + pmd_populate_user(&init_mm, pmd, bm_pte);
25317
25318 /*
25319 * The boot-ioremap range spans multiple pmds, for which
25320 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25321 index 8cc1833..1abbc5b 100644
25322 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25323 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25324 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25325 * memory (e.g. tracked pages)? For now, we need this to avoid
25326 * invoking kmemcheck for PnP BIOS calls.
25327 */
25328 - if (regs->flags & X86_VM_MASK)
25329 + if (v8086_mode(regs))
25330 return false;
25331 - if (regs->cs != __KERNEL_CS)
25332 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25333 return false;
25334
25335 pte = kmemcheck_pte_lookup(address);
25336 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25337 index c8191de..2975082 100644
25338 --- a/arch/x86/mm/mmap.c
25339 +++ b/arch/x86/mm/mmap.c
25340 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25341 * Leave an at least ~128 MB hole with possible stack randomization.
25342 */
25343 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25344 -#define MAX_GAP (TASK_SIZE/6*5)
25345 +#define MAX_GAP (pax_task_size/6*5)
25346
25347 /*
25348 * True on X86_32 or when emulating IA32 on X86_64
25349 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25350 return rnd << PAGE_SHIFT;
25351 }
25352
25353 -static unsigned long mmap_base(void)
25354 +static unsigned long mmap_base(struct mm_struct *mm)
25355 {
25356 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25357 + unsigned long pax_task_size = TASK_SIZE;
25358 +
25359 +#ifdef CONFIG_PAX_SEGMEXEC
25360 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25361 + pax_task_size = SEGMEXEC_TASK_SIZE;
25362 +#endif
25363
25364 if (gap < MIN_GAP)
25365 gap = MIN_GAP;
25366 else if (gap > MAX_GAP)
25367 gap = MAX_GAP;
25368
25369 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25370 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25371 }
25372
25373 /*
25374 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25375 * does, but not when emulating X86_32
25376 */
25377 -static unsigned long mmap_legacy_base(void)
25378 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25379 {
25380 - if (mmap_is_ia32())
25381 + if (mmap_is_ia32()) {
25382 +
25383 +#ifdef CONFIG_PAX_SEGMEXEC
25384 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25385 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25386 + else
25387 +#endif
25388 +
25389 return TASK_UNMAPPED_BASE;
25390 - else
25391 + } else
25392 return TASK_UNMAPPED_BASE + mmap_rnd();
25393 }
25394
25395 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25396 void arch_pick_mmap_layout(struct mm_struct *mm)
25397 {
25398 if (mmap_is_legacy()) {
25399 - mm->mmap_base = mmap_legacy_base();
25400 + mm->mmap_base = mmap_legacy_base(mm);
25401 +
25402 +#ifdef CONFIG_PAX_RANDMMAP
25403 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25404 + mm->mmap_base += mm->delta_mmap;
25405 +#endif
25406 +
25407 mm->get_unmapped_area = arch_get_unmapped_area;
25408 mm->unmap_area = arch_unmap_area;
25409 } else {
25410 - mm->mmap_base = mmap_base();
25411 + mm->mmap_base = mmap_base(mm);
25412 +
25413 +#ifdef CONFIG_PAX_RANDMMAP
25414 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25415 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25416 +#endif
25417 +
25418 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25419 mm->unmap_area = arch_unmap_area_topdown;
25420 }
25421 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25422 index 132772a..b961f11 100644
25423 --- a/arch/x86/mm/mmio-mod.c
25424 +++ b/arch/x86/mm/mmio-mod.c
25425 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25426 break;
25427 default:
25428 {
25429 - unsigned char *ip = (unsigned char *)instptr;
25430 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25431 my_trace->opcode = MMIO_UNKNOWN_OP;
25432 my_trace->width = 0;
25433 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25434 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25435 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25436 void __iomem *addr)
25437 {
25438 - static atomic_t next_id;
25439 + static atomic_unchecked_t next_id;
25440 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25441 /* These are page-unaligned. */
25442 struct mmiotrace_map map = {
25443 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25444 .private = trace
25445 },
25446 .phys = offset,
25447 - .id = atomic_inc_return(&next_id)
25448 + .id = atomic_inc_return_unchecked(&next_id)
25449 };
25450 map.map_id = trace->id;
25451
25452 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25453 index d253006..e56dd6a 100644
25454 --- a/arch/x86/mm/numa_32.c
25455 +++ b/arch/x86/mm/numa_32.c
25456 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25457 }
25458 #endif
25459
25460 -extern unsigned long find_max_low_pfn(void);
25461 extern unsigned long highend_pfn, highstart_pfn;
25462
25463 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25464 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25465 index e1d1069..2251ff3 100644
25466 --- a/arch/x86/mm/pageattr-test.c
25467 +++ b/arch/x86/mm/pageattr-test.c
25468 @@ -36,7 +36,7 @@ enum {
25469
25470 static int pte_testbit(pte_t pte)
25471 {
25472 - return pte_flags(pte) & _PAGE_UNUSED1;
25473 + return pte_flags(pte) & _PAGE_CPA_TEST;
25474 }
25475
25476 struct split_state {
25477 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25478 index dd38bfb..8c12306 100644
25479 --- a/arch/x86/mm/pageattr.c
25480 +++ b/arch/x86/mm/pageattr.c
25481 @@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25482 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25483 */
25484 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25485 - pgprot_val(forbidden) |= _PAGE_NX;
25486 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25487
25488 /*
25489 * The kernel text needs to be executable for obvious reasons
25490 * Does not cover __inittext since that is gone later on. On
25491 * 64bit we do not enforce !NX on the low mapping
25492 */
25493 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25494 - pgprot_val(forbidden) |= _PAGE_NX;
25495 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25496 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25497
25498 +#ifdef CONFIG_DEBUG_RODATA
25499 /*
25500 * The .rodata section needs to be read-only. Using the pfn
25501 * catches all aliases.
25502 @@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25503 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25504 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25505 pgprot_val(forbidden) |= _PAGE_RW;
25506 +#endif
25507 +
25508 +#ifdef CONFIG_PAX_KERNEXEC
25509 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25510 + pgprot_val(forbidden) |= _PAGE_RW;
25511 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25512 + }
25513 +#endif
25514
25515 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25516
25517 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25518 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25519 {
25520 /* change init_mm */
25521 + pax_open_kernel();
25522 set_pte_atomic(kpte, pte);
25523 +
25524 #ifdef CONFIG_X86_32
25525 if (!SHARED_KERNEL_PMD) {
25526 +
25527 +#ifdef CONFIG_PAX_PER_CPU_PGD
25528 + unsigned long cpu;
25529 +#else
25530 struct page *page;
25531 +#endif
25532
25533 +#ifdef CONFIG_PAX_PER_CPU_PGD
25534 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
25535 + pgd_t *pgd = get_cpu_pgd(cpu);
25536 +#else
25537 list_for_each_entry(page, &pgd_list, lru) {
25538 - pgd_t *pgd;
25539 + pgd_t *pgd = (pgd_t *)page_address(page);
25540 +#endif
25541 +
25542 pud_t *pud;
25543 pmd_t *pmd;
25544
25545 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25546 + pgd += pgd_index(address);
25547 pud = pud_offset(pgd, address);
25548 pmd = pmd_offset(pud, address);
25549 set_pte_atomic((pte_t *)pmd, pte);
25550 }
25551 }
25552 #endif
25553 + pax_close_kernel();
25554 }
25555
25556 static int
25557 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25558 index e78cd0e..de0a817 100644
25559 --- a/arch/x86/mm/pat.c
25560 +++ b/arch/x86/mm/pat.c
25561 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25562
25563 conflict:
25564 printk(KERN_INFO "%s:%d conflicting memory types "
25565 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25566 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25567 new->end, cattr_name(new->type), cattr_name(entry->type));
25568 return -EBUSY;
25569 }
25570 @@ -559,7 +559,7 @@ unlock_ret:
25571
25572 if (err) {
25573 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25574 - current->comm, current->pid, start, end);
25575 + current->comm, task_pid_nr(current), start, end);
25576 }
25577
25578 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25579 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25580 while (cursor < to) {
25581 if (!devmem_is_allowed(pfn)) {
25582 printk(KERN_INFO
25583 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25584 - current->comm, from, to);
25585 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25586 + current->comm, from, to, cursor);
25587 return 0;
25588 }
25589 cursor += PAGE_SIZE;
25590 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25591 printk(KERN_INFO
25592 "%s:%d ioremap_change_attr failed %s "
25593 "for %Lx-%Lx\n",
25594 - current->comm, current->pid,
25595 + current->comm, task_pid_nr(current),
25596 cattr_name(flags),
25597 base, (unsigned long long)(base + size));
25598 return -EINVAL;
25599 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25600 free_memtype(paddr, paddr + size);
25601 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25602 " for %Lx-%Lx, got %s\n",
25603 - current->comm, current->pid,
25604 + current->comm, task_pid_nr(current),
25605 cattr_name(want_flags),
25606 (unsigned long long)paddr,
25607 (unsigned long long)(paddr + size),
25608 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25609 index df3d5c8..c2223e1 100644
25610 --- a/arch/x86/mm/pf_in.c
25611 +++ b/arch/x86/mm/pf_in.c
25612 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25613 int i;
25614 enum reason_type rv = OTHERS;
25615
25616 - p = (unsigned char *)ins_addr;
25617 + p = (unsigned char *)ktla_ktva(ins_addr);
25618 p += skip_prefix(p, &prf);
25619 p += get_opcode(p, &opcode);
25620
25621 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25622 struct prefix_bits prf;
25623 int i;
25624
25625 - p = (unsigned char *)ins_addr;
25626 + p = (unsigned char *)ktla_ktva(ins_addr);
25627 p += skip_prefix(p, &prf);
25628 p += get_opcode(p, &opcode);
25629
25630 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25631 struct prefix_bits prf;
25632 int i;
25633
25634 - p = (unsigned char *)ins_addr;
25635 + p = (unsigned char *)ktla_ktva(ins_addr);
25636 p += skip_prefix(p, &prf);
25637 p += get_opcode(p, &opcode);
25638
25639 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25640 int i;
25641 unsigned long rv;
25642
25643 - p = (unsigned char *)ins_addr;
25644 + p = (unsigned char *)ktla_ktva(ins_addr);
25645 p += skip_prefix(p, &prf);
25646 p += get_opcode(p, &opcode);
25647 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25648 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25649 int i;
25650 unsigned long rv;
25651
25652 - p = (unsigned char *)ins_addr;
25653 + p = (unsigned char *)ktla_ktva(ins_addr);
25654 p += skip_prefix(p, &prf);
25655 p += get_opcode(p, &opcode);
25656 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25657 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25658 index e0e6fad..6b90017 100644
25659 --- a/arch/x86/mm/pgtable.c
25660 +++ b/arch/x86/mm/pgtable.c
25661 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25662 list_del(&page->lru);
25663 }
25664
25665 -#define UNSHARED_PTRS_PER_PGD \
25666 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25667 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25668 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25669
25670 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25671 +{
25672 + while (count--)
25673 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25674 +}
25675 +#endif
25676 +
25677 +#ifdef CONFIG_PAX_PER_CPU_PGD
25678 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25679 +{
25680 + while (count--)
25681 +
25682 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25683 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
25684 +#else
25685 + *dst++ = *src++;
25686 +#endif
25687 +
25688 +}
25689 +#endif
25690 +
25691 +#ifdef CONFIG_X86_64
25692 +#define pxd_t pud_t
25693 +#define pyd_t pgd_t
25694 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25695 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25696 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25697 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
25698 +#define PYD_SIZE PGDIR_SIZE
25699 +#else
25700 +#define pxd_t pmd_t
25701 +#define pyd_t pud_t
25702 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25703 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25704 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25705 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
25706 +#define PYD_SIZE PUD_SIZE
25707 +#endif
25708 +
25709 +#ifdef CONFIG_PAX_PER_CPU_PGD
25710 +static inline void pgd_ctor(pgd_t *pgd) {}
25711 +static inline void pgd_dtor(pgd_t *pgd) {}
25712 +#else
25713 static void pgd_ctor(pgd_t *pgd)
25714 {
25715 /* If the pgd points to a shared pagetable level (either the
25716 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
25717 pgd_list_del(pgd);
25718 spin_unlock_irqrestore(&pgd_lock, flags);
25719 }
25720 +#endif
25721
25722 /*
25723 * List of all pgd's needed for non-PAE so it can invalidate entries
25724 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
25725 * -- wli
25726 */
25727
25728 -#ifdef CONFIG_X86_PAE
25729 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25730 /*
25731 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25732 * updating the top-level pagetable entries to guarantee the
25733 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
25734 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25735 * and initialize the kernel pmds here.
25736 */
25737 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25738 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25739
25740 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25741 {
25742 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25743 */
25744 flush_tlb_mm(mm);
25745 }
25746 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25747 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25748 #else /* !CONFIG_X86_PAE */
25749
25750 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25751 -#define PREALLOCATED_PMDS 0
25752 +#define PREALLOCATED_PXDS 0
25753
25754 #endif /* CONFIG_X86_PAE */
25755
25756 -static void free_pmds(pmd_t *pmds[])
25757 +static void free_pxds(pxd_t *pxds[])
25758 {
25759 int i;
25760
25761 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25762 - if (pmds[i])
25763 - free_page((unsigned long)pmds[i]);
25764 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25765 + if (pxds[i])
25766 + free_page((unsigned long)pxds[i]);
25767 }
25768
25769 -static int preallocate_pmds(pmd_t *pmds[])
25770 +static int preallocate_pxds(pxd_t *pxds[])
25771 {
25772 int i;
25773 bool failed = false;
25774
25775 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25776 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25777 - if (pmd == NULL)
25778 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25779 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25780 + if (pxd == NULL)
25781 failed = true;
25782 - pmds[i] = pmd;
25783 + pxds[i] = pxd;
25784 }
25785
25786 if (failed) {
25787 - free_pmds(pmds);
25788 + free_pxds(pxds);
25789 return -ENOMEM;
25790 }
25791
25792 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
25793 * preallocate which never got a corresponding vma will need to be
25794 * freed manually.
25795 */
25796 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25797 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25798 {
25799 int i;
25800
25801 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25802 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25803 pgd_t pgd = pgdp[i];
25804
25805 if (pgd_val(pgd) != 0) {
25806 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25807 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25808
25809 - pgdp[i] = native_make_pgd(0);
25810 + set_pgd(pgdp + i, native_make_pgd(0));
25811
25812 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25813 - pmd_free(mm, pmd);
25814 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25815 + pxd_free(mm, pxd);
25816 }
25817 }
25818 }
25819
25820 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25821 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25822 {
25823 - pud_t *pud;
25824 + pyd_t *pyd;
25825 unsigned long addr;
25826 int i;
25827
25828 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25829 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25830 return;
25831
25832 - pud = pud_offset(pgd, 0);
25833 +#ifdef CONFIG_X86_64
25834 + pyd = pyd_offset(mm, 0L);
25835 +#else
25836 + pyd = pyd_offset(pgd, 0L);
25837 +#endif
25838
25839 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25840 - i++, pud++, addr += PUD_SIZE) {
25841 - pmd_t *pmd = pmds[i];
25842 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25843 + i++, pyd++, addr += PYD_SIZE) {
25844 + pxd_t *pxd = pxds[i];
25845
25846 if (i >= KERNEL_PGD_BOUNDARY)
25847 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25848 - sizeof(pmd_t) * PTRS_PER_PMD);
25849 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25850 + sizeof(pxd_t) * PTRS_PER_PMD);
25851
25852 - pud_populate(mm, pud, pmd);
25853 + pyd_populate(mm, pyd, pxd);
25854 }
25855 }
25856
25857 pgd_t *pgd_alloc(struct mm_struct *mm)
25858 {
25859 pgd_t *pgd;
25860 - pmd_t *pmds[PREALLOCATED_PMDS];
25861 + pxd_t *pxds[PREALLOCATED_PXDS];
25862 +
25863 unsigned long flags;
25864
25865 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25866 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25867
25868 mm->pgd = pgd;
25869
25870 - if (preallocate_pmds(pmds) != 0)
25871 + if (preallocate_pxds(pxds) != 0)
25872 goto out_free_pgd;
25873
25874 if (paravirt_pgd_alloc(mm) != 0)
25875 - goto out_free_pmds;
25876 + goto out_free_pxds;
25877
25878 /*
25879 * Make sure that pre-populating the pmds is atomic with
25880 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25881 spin_lock_irqsave(&pgd_lock, flags);
25882
25883 pgd_ctor(pgd);
25884 - pgd_prepopulate_pmd(mm, pgd, pmds);
25885 + pgd_prepopulate_pxd(mm, pgd, pxds);
25886
25887 spin_unlock_irqrestore(&pgd_lock, flags);
25888
25889 return pgd;
25890
25891 -out_free_pmds:
25892 - free_pmds(pmds);
25893 +out_free_pxds:
25894 + free_pxds(pxds);
25895 out_free_pgd:
25896 free_page((unsigned long)pgd);
25897 out:
25898 @@ -287,7 +338,7 @@ out:
25899
25900 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25901 {
25902 - pgd_mop_up_pmds(mm, pgd);
25903 + pgd_mop_up_pxds(mm, pgd);
25904 pgd_dtor(pgd);
25905 paravirt_pgd_free(mm, pgd);
25906 free_page((unsigned long)pgd);
25907 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25908 index 46c8834..fcab43d 100644
25909 --- a/arch/x86/mm/pgtable_32.c
25910 +++ b/arch/x86/mm/pgtable_32.c
25911 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25912 return;
25913 }
25914 pte = pte_offset_kernel(pmd, vaddr);
25915 +
25916 + pax_open_kernel();
25917 if (pte_val(pteval))
25918 set_pte_at(&init_mm, vaddr, pte, pteval);
25919 else
25920 pte_clear(&init_mm, vaddr, pte);
25921 + pax_close_kernel();
25922
25923 /*
25924 * It's enough to flush this one mapping.
25925 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25926 index 513d8ed..978c161 100644
25927 --- a/arch/x86/mm/setup_nx.c
25928 +++ b/arch/x86/mm/setup_nx.c
25929 @@ -4,11 +4,10 @@
25930
25931 #include <asm/pgtable.h>
25932
25933 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25934 int nx_enabled;
25935
25936 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25937 -static int disable_nx __cpuinitdata;
25938 -
25939 +#ifndef CONFIG_PAX_PAGEEXEC
25940 /*
25941 * noexec = on|off
25942 *
25943 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
25944 if (!str)
25945 return -EINVAL;
25946 if (!strncmp(str, "on", 2)) {
25947 - __supported_pte_mask |= _PAGE_NX;
25948 - disable_nx = 0;
25949 + nx_enabled = 1;
25950 } else if (!strncmp(str, "off", 3)) {
25951 - disable_nx = 1;
25952 - __supported_pte_mask &= ~_PAGE_NX;
25953 + nx_enabled = 0;
25954 }
25955 return 0;
25956 }
25957 early_param("noexec", noexec_setup);
25958 #endif
25959 +#endif
25960
25961 #ifdef CONFIG_X86_PAE
25962 void __init set_nx(void)
25963 {
25964 - unsigned int v[4], l, h;
25965 + if (!nx_enabled && cpu_has_nx) {
25966 + unsigned l, h;
25967
25968 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
25969 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
25970 -
25971 - if ((v[3] & (1 << 20)) && !disable_nx) {
25972 - rdmsr(MSR_EFER, l, h);
25973 - l |= EFER_NX;
25974 - wrmsr(MSR_EFER, l, h);
25975 - nx_enabled = 1;
25976 - __supported_pte_mask |= _PAGE_NX;
25977 - }
25978 + __supported_pte_mask &= ~_PAGE_NX;
25979 + rdmsr(MSR_EFER, l, h);
25980 + l &= ~EFER_NX;
25981 + wrmsr(MSR_EFER, l, h);
25982 }
25983 }
25984 #else
25985 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
25986 unsigned long efer;
25987
25988 rdmsrl(MSR_EFER, efer);
25989 - if (!(efer & EFER_NX) || disable_nx)
25990 + if (!(efer & EFER_NX) || !nx_enabled)
25991 __supported_pte_mask &= ~_PAGE_NX;
25992 }
25993 #endif
25994 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25995 index 36fe08e..b123d3a 100644
25996 --- a/arch/x86/mm/tlb.c
25997 +++ b/arch/x86/mm/tlb.c
25998 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
25999 BUG();
26000 cpumask_clear_cpu(cpu,
26001 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26002 +
26003 +#ifndef CONFIG_PAX_PER_CPU_PGD
26004 load_cr3(swapper_pg_dir);
26005 +#endif
26006 +
26007 }
26008 EXPORT_SYMBOL_GPL(leave_mm);
26009
26010 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26011 index 829edf0..672adb3 100644
26012 --- a/arch/x86/oprofile/backtrace.c
26013 +++ b/arch/x86/oprofile/backtrace.c
26014 @@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26015 {
26016 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26017
26018 - if (!user_mode_vm(regs)) {
26019 + if (!user_mode(regs)) {
26020 unsigned long stack = kernel_stack_pointer(regs);
26021 if (depth)
26022 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26023 diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26024 index e6a160a..36deff6 100644
26025 --- a/arch/x86/oprofile/op_model_p4.c
26026 +++ b/arch/x86/oprofile/op_model_p4.c
26027 @@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26028 #endif
26029 }
26030
26031 -static int inline addr_increment(void)
26032 +static inline int addr_increment(void)
26033 {
26034 #ifdef CONFIG_SMP
26035 return smp_num_siblings == 2 ? 2 : 1;
26036 diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26037 index 1331fcf..03901b2 100644
26038 --- a/arch/x86/pci/common.c
26039 +++ b/arch/x86/pci/common.c
26040 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
26041 int pcibios_last_bus = -1;
26042 unsigned long pirq_table_addr;
26043 struct pci_bus *pci_root_bus;
26044 -struct pci_raw_ops *raw_pci_ops;
26045 -struct pci_raw_ops *raw_pci_ext_ops;
26046 +const struct pci_raw_ops *raw_pci_ops;
26047 +const struct pci_raw_ops *raw_pci_ext_ops;
26048
26049 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26050 int reg, int len, u32 *val)
26051 diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26052 index 347d882..4baf6b6 100644
26053 --- a/arch/x86/pci/direct.c
26054 +++ b/arch/x86/pci/direct.c
26055 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26056
26057 #undef PCI_CONF1_ADDRESS
26058
26059 -struct pci_raw_ops pci_direct_conf1 = {
26060 +const struct pci_raw_ops pci_direct_conf1 = {
26061 .read = pci_conf1_read,
26062 .write = pci_conf1_write,
26063 };
26064 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26065
26066 #undef PCI_CONF2_ADDRESS
26067
26068 -struct pci_raw_ops pci_direct_conf2 = {
26069 +const struct pci_raw_ops pci_direct_conf2 = {
26070 .read = pci_conf2_read,
26071 .write = pci_conf2_write,
26072 };
26073 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26074 * This should be close to trivial, but it isn't, because there are buggy
26075 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26076 */
26077 -static int __init pci_sanity_check(struct pci_raw_ops *o)
26078 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
26079 {
26080 u32 x = 0;
26081 int year, devfn;
26082 diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26083 index f10a7e9..0425342 100644
26084 --- a/arch/x86/pci/mmconfig_32.c
26085 +++ b/arch/x86/pci/mmconfig_32.c
26086 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26087 return 0;
26088 }
26089
26090 -static struct pci_raw_ops pci_mmcfg = {
26091 +static const struct pci_raw_ops pci_mmcfg = {
26092 .read = pci_mmcfg_read,
26093 .write = pci_mmcfg_write,
26094 };
26095 diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26096 index 94349f8..41600a7 100644
26097 --- a/arch/x86/pci/mmconfig_64.c
26098 +++ b/arch/x86/pci/mmconfig_64.c
26099 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26100 return 0;
26101 }
26102
26103 -static struct pci_raw_ops pci_mmcfg = {
26104 +static const struct pci_raw_ops pci_mmcfg = {
26105 .read = pci_mmcfg_read,
26106 .write = pci_mmcfg_write,
26107 };
26108 diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26109 index 8eb295e..86bd657 100644
26110 --- a/arch/x86/pci/numaq_32.c
26111 +++ b/arch/x86/pci/numaq_32.c
26112 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26113
26114 #undef PCI_CONF1_MQ_ADDRESS
26115
26116 -static struct pci_raw_ops pci_direct_conf1_mq = {
26117 +static const struct pci_raw_ops pci_direct_conf1_mq = {
26118 .read = pci_conf1_mq_read,
26119 .write = pci_conf1_mq_write
26120 };
26121 diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26122 index b889d82..5a58a0a 100644
26123 --- a/arch/x86/pci/olpc.c
26124 +++ b/arch/x86/pci/olpc.c
26125 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26126 return 0;
26127 }
26128
26129 -static struct pci_raw_ops pci_olpc_conf = {
26130 +static const struct pci_raw_ops pci_olpc_conf = {
26131 .read = pci_olpc_read,
26132 .write = pci_olpc_write,
26133 };
26134 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26135 index 1c975cc..ffd0536 100644
26136 --- a/arch/x86/pci/pcbios.c
26137 +++ b/arch/x86/pci/pcbios.c
26138 @@ -56,50 +56,93 @@ union bios32 {
26139 static struct {
26140 unsigned long address;
26141 unsigned short segment;
26142 -} bios32_indirect = { 0, __KERNEL_CS };
26143 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26144
26145 /*
26146 * Returns the entry point for the given service, NULL on error
26147 */
26148
26149 -static unsigned long bios32_service(unsigned long service)
26150 +static unsigned long __devinit bios32_service(unsigned long service)
26151 {
26152 unsigned char return_code; /* %al */
26153 unsigned long address; /* %ebx */
26154 unsigned long length; /* %ecx */
26155 unsigned long entry; /* %edx */
26156 unsigned long flags;
26157 + struct desc_struct d, *gdt;
26158
26159 local_irq_save(flags);
26160 - __asm__("lcall *(%%edi); cld"
26161 +
26162 + gdt = get_cpu_gdt_table(smp_processor_id());
26163 +
26164 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26165 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26166 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26167 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26168 +
26169 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26170 : "=a" (return_code),
26171 "=b" (address),
26172 "=c" (length),
26173 "=d" (entry)
26174 : "0" (service),
26175 "1" (0),
26176 - "D" (&bios32_indirect));
26177 + "D" (&bios32_indirect),
26178 + "r"(__PCIBIOS_DS)
26179 + : "memory");
26180 +
26181 + pax_open_kernel();
26182 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26183 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26184 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26185 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26186 + pax_close_kernel();
26187 +
26188 local_irq_restore(flags);
26189
26190 switch (return_code) {
26191 - case 0:
26192 - return address + entry;
26193 - case 0x80: /* Not present */
26194 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26195 - return 0;
26196 - default: /* Shouldn't happen */
26197 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26198 - service, return_code);
26199 + case 0: {
26200 + int cpu;
26201 + unsigned char flags;
26202 +
26203 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26204 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26205 + printk(KERN_WARNING "bios32_service: not valid\n");
26206 return 0;
26207 + }
26208 + address = address + PAGE_OFFSET;
26209 + length += 16UL; /* some BIOSs underreport this... */
26210 + flags = 4;
26211 + if (length >= 64*1024*1024) {
26212 + length >>= PAGE_SHIFT;
26213 + flags |= 8;
26214 + }
26215 +
26216 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
26217 + gdt = get_cpu_gdt_table(cpu);
26218 + pack_descriptor(&d, address, length, 0x9b, flags);
26219 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26220 + pack_descriptor(&d, address, length, 0x93, flags);
26221 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26222 + }
26223 + return entry;
26224 + }
26225 + case 0x80: /* Not present */
26226 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26227 + return 0;
26228 + default: /* Shouldn't happen */
26229 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26230 + service, return_code);
26231 + return 0;
26232 }
26233 }
26234
26235 static struct {
26236 unsigned long address;
26237 unsigned short segment;
26238 -} pci_indirect = { 0, __KERNEL_CS };
26239 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26240
26241 -static int pci_bios_present;
26242 +static int pci_bios_present __read_only;
26243
26244 static int __devinit check_pcibios(void)
26245 {
26246 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26247 unsigned long flags, pcibios_entry;
26248
26249 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26250 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26251 + pci_indirect.address = pcibios_entry;
26252
26253 local_irq_save(flags);
26254 - __asm__(
26255 - "lcall *(%%edi); cld\n\t"
26256 + __asm__("movw %w6, %%ds\n\t"
26257 + "lcall *%%ss:(%%edi); cld\n\t"
26258 + "push %%ss\n\t"
26259 + "pop %%ds\n\t"
26260 "jc 1f\n\t"
26261 "xor %%ah, %%ah\n"
26262 "1:"
26263 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26264 "=b" (ebx),
26265 "=c" (ecx)
26266 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26267 - "D" (&pci_indirect)
26268 + "D" (&pci_indirect),
26269 + "r" (__PCIBIOS_DS)
26270 : "memory");
26271 local_irq_restore(flags);
26272
26273 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26274
26275 switch (len) {
26276 case 1:
26277 - __asm__("lcall *(%%esi); cld\n\t"
26278 + __asm__("movw %w6, %%ds\n\t"
26279 + "lcall *%%ss:(%%esi); cld\n\t"
26280 + "push %%ss\n\t"
26281 + "pop %%ds\n\t"
26282 "jc 1f\n\t"
26283 "xor %%ah, %%ah\n"
26284 "1:"
26285 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26286 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26287 "b" (bx),
26288 "D" ((long)reg),
26289 - "S" (&pci_indirect));
26290 + "S" (&pci_indirect),
26291 + "r" (__PCIBIOS_DS));
26292 /*
26293 * Zero-extend the result beyond 8 bits, do not trust the
26294 * BIOS having done it:
26295 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26296 *value &= 0xff;
26297 break;
26298 case 2:
26299 - __asm__("lcall *(%%esi); cld\n\t"
26300 + __asm__("movw %w6, %%ds\n\t"
26301 + "lcall *%%ss:(%%esi); cld\n\t"
26302 + "push %%ss\n\t"
26303 + "pop %%ds\n\t"
26304 "jc 1f\n\t"
26305 "xor %%ah, %%ah\n"
26306 "1:"
26307 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26308 : "1" (PCIBIOS_READ_CONFIG_WORD),
26309 "b" (bx),
26310 "D" ((long)reg),
26311 - "S" (&pci_indirect));
26312 + "S" (&pci_indirect),
26313 + "r" (__PCIBIOS_DS));
26314 /*
26315 * Zero-extend the result beyond 16 bits, do not trust the
26316 * BIOS having done it:
26317 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26318 *value &= 0xffff;
26319 break;
26320 case 4:
26321 - __asm__("lcall *(%%esi); cld\n\t"
26322 + __asm__("movw %w6, %%ds\n\t"
26323 + "lcall *%%ss:(%%esi); cld\n\t"
26324 + "push %%ss\n\t"
26325 + "pop %%ds\n\t"
26326 "jc 1f\n\t"
26327 "xor %%ah, %%ah\n"
26328 "1:"
26329 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26330 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26331 "b" (bx),
26332 "D" ((long)reg),
26333 - "S" (&pci_indirect));
26334 + "S" (&pci_indirect),
26335 + "r" (__PCIBIOS_DS));
26336 break;
26337 }
26338
26339 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26340
26341 switch (len) {
26342 case 1:
26343 - __asm__("lcall *(%%esi); cld\n\t"
26344 + __asm__("movw %w6, %%ds\n\t"
26345 + "lcall *%%ss:(%%esi); cld\n\t"
26346 + "push %%ss\n\t"
26347 + "pop %%ds\n\t"
26348 "jc 1f\n\t"
26349 "xor %%ah, %%ah\n"
26350 "1:"
26351 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26352 "c" (value),
26353 "b" (bx),
26354 "D" ((long)reg),
26355 - "S" (&pci_indirect));
26356 + "S" (&pci_indirect),
26357 + "r" (__PCIBIOS_DS));
26358 break;
26359 case 2:
26360 - __asm__("lcall *(%%esi); cld\n\t"
26361 + __asm__("movw %w6, %%ds\n\t"
26362 + "lcall *%%ss:(%%esi); cld\n\t"
26363 + "push %%ss\n\t"
26364 + "pop %%ds\n\t"
26365 "jc 1f\n\t"
26366 "xor %%ah, %%ah\n"
26367 "1:"
26368 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26369 "c" (value),
26370 "b" (bx),
26371 "D" ((long)reg),
26372 - "S" (&pci_indirect));
26373 + "S" (&pci_indirect),
26374 + "r" (__PCIBIOS_DS));
26375 break;
26376 case 4:
26377 - __asm__("lcall *(%%esi); cld\n\t"
26378 + __asm__("movw %w6, %%ds\n\t"
26379 + "lcall *%%ss:(%%esi); cld\n\t"
26380 + "push %%ss\n\t"
26381 + "pop %%ds\n\t"
26382 "jc 1f\n\t"
26383 "xor %%ah, %%ah\n"
26384 "1:"
26385 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26386 "c" (value),
26387 "b" (bx),
26388 "D" ((long)reg),
26389 - "S" (&pci_indirect));
26390 + "S" (&pci_indirect),
26391 + "r" (__PCIBIOS_DS));
26392 break;
26393 }
26394
26395 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26396 * Function table for BIOS32 access
26397 */
26398
26399 -static struct pci_raw_ops pci_bios_access = {
26400 +static const struct pci_raw_ops pci_bios_access = {
26401 .read = pci_bios_read,
26402 .write = pci_bios_write
26403 };
26404 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26405 * Try to find PCI BIOS.
26406 */
26407
26408 -static struct pci_raw_ops * __devinit pci_find_bios(void)
26409 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
26410 {
26411 union bios32 *check;
26412 unsigned char sum;
26413 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26414
26415 DBG("PCI: Fetching IRQ routing table... ");
26416 __asm__("push %%es\n\t"
26417 + "movw %w8, %%ds\n\t"
26418 "push %%ds\n\t"
26419 "pop %%es\n\t"
26420 - "lcall *(%%esi); cld\n\t"
26421 + "lcall *%%ss:(%%esi); cld\n\t"
26422 "pop %%es\n\t"
26423 + "push %%ss\n\t"
26424 + "pop %%ds\n"
26425 "jc 1f\n\t"
26426 "xor %%ah, %%ah\n"
26427 "1:"
26428 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26429 "1" (0),
26430 "D" ((long) &opt),
26431 "S" (&pci_indirect),
26432 - "m" (opt)
26433 + "m" (opt),
26434 + "r" (__PCIBIOS_DS)
26435 : "memory");
26436 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26437 if (ret & 0xff00)
26438 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26439 {
26440 int ret;
26441
26442 - __asm__("lcall *(%%esi); cld\n\t"
26443 + __asm__("movw %w5, %%ds\n\t"
26444 + "lcall *%%ss:(%%esi); cld\n\t"
26445 + "push %%ss\n\t"
26446 + "pop %%ds\n"
26447 "jc 1f\n\t"
26448 "xor %%ah, %%ah\n"
26449 "1:"
26450 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26451 : "0" (PCIBIOS_SET_PCI_HW_INT),
26452 "b" ((dev->bus->number << 8) | dev->devfn),
26453 "c" ((irq << 8) | (pin + 10)),
26454 - "S" (&pci_indirect));
26455 + "S" (&pci_indirect),
26456 + "r" (__PCIBIOS_DS));
26457 return !(ret & 0xff00);
26458 }
26459 EXPORT_SYMBOL(pcibios_set_irq_routing);
26460 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26461 index fa0f651..9d8f3d9 100644
26462 --- a/arch/x86/power/cpu.c
26463 +++ b/arch/x86/power/cpu.c
26464 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
26465 static void fix_processor_context(void)
26466 {
26467 int cpu = smp_processor_id();
26468 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26469 + struct tss_struct *t = init_tss + cpu;
26470
26471 set_tss_desc(cpu, t); /*
26472 * This just modifies memory; should not be
26473 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
26474 */
26475
26476 #ifdef CONFIG_X86_64
26477 + pax_open_kernel();
26478 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26479 + pax_close_kernel();
26480
26481 syscall_init(); /* This sets MSR_*STAR and related */
26482 #endif
26483 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26484 index dd78ef6..f9d928d 100644
26485 --- a/arch/x86/vdso/Makefile
26486 +++ b/arch/x86/vdso/Makefile
26487 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26488 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26489 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26490
26491 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26492 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26493 GCOV_PROFILE := n
26494
26495 #
26496 diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26497 index ee55754..0013b2e 100644
26498 --- a/arch/x86/vdso/vclock_gettime.c
26499 +++ b/arch/x86/vdso/vclock_gettime.c
26500 @@ -22,24 +22,48 @@
26501 #include <asm/hpet.h>
26502 #include <asm/unistd.h>
26503 #include <asm/io.h>
26504 +#include <asm/fixmap.h>
26505 #include "vextern.h"
26506
26507 #define gtod vdso_vsyscall_gtod_data
26508
26509 +notrace noinline long __vdso_fallback_time(long *t)
26510 +{
26511 + long secs;
26512 + asm volatile("syscall"
26513 + : "=a" (secs)
26514 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26515 + return secs;
26516 +}
26517 +
26518 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26519 {
26520 long ret;
26521 asm("syscall" : "=a" (ret) :
26522 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26523 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26524 return ret;
26525 }
26526
26527 +notrace static inline cycle_t __vdso_vread_hpet(void)
26528 +{
26529 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26530 +}
26531 +
26532 +notrace static inline cycle_t __vdso_vread_tsc(void)
26533 +{
26534 + cycle_t ret = (cycle_t)vget_cycles();
26535 +
26536 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26537 +}
26538 +
26539 notrace static inline long vgetns(void)
26540 {
26541 long v;
26542 - cycles_t (*vread)(void);
26543 - vread = gtod->clock.vread;
26544 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26545 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26546 + v = __vdso_vread_tsc();
26547 + else
26548 + v = __vdso_vread_hpet();
26549 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26550 return (v * gtod->clock.mult) >> gtod->clock.shift;
26551 }
26552
26553 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26554
26555 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26556 {
26557 - if (likely(gtod->sysctl_enabled))
26558 + if (likely(gtod->sysctl_enabled &&
26559 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26560 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26561 switch (clock) {
26562 case CLOCK_REALTIME:
26563 if (likely(gtod->clock.vread))
26564 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26565 int clock_gettime(clockid_t, struct timespec *)
26566 __attribute__((weak, alias("__vdso_clock_gettime")));
26567
26568 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26569 +{
26570 + long ret;
26571 + asm("syscall" : "=a" (ret) :
26572 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26573 + return ret;
26574 +}
26575 +
26576 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26577 {
26578 - long ret;
26579 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26580 + if (likely(gtod->sysctl_enabled &&
26581 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26582 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26583 + {
26584 if (likely(tv != NULL)) {
26585 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26586 offsetof(struct timespec, tv_nsec) ||
26587 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26588 }
26589 return 0;
26590 }
26591 - asm("syscall" : "=a" (ret) :
26592 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26593 - return ret;
26594 + return __vdso_fallback_gettimeofday(tv, tz);
26595 }
26596 int gettimeofday(struct timeval *, struct timezone *)
26597 __attribute__((weak, alias("__vdso_gettimeofday")));
26598 diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26599 index 4e5dd3b..00ba15e 100644
26600 --- a/arch/x86/vdso/vdso.lds.S
26601 +++ b/arch/x86/vdso/vdso.lds.S
26602 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26603 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26604 #include "vextern.h"
26605 #undef VEXTERN
26606 +
26607 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26608 +VEXTERN(fallback_gettimeofday)
26609 +VEXTERN(fallback_time)
26610 +VEXTERN(getcpu)
26611 +#undef VEXTERN
26612 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26613 index 58bc00f..d53fb48 100644
26614 --- a/arch/x86/vdso/vdso32-setup.c
26615 +++ b/arch/x86/vdso/vdso32-setup.c
26616 @@ -25,6 +25,7 @@
26617 #include <asm/tlbflush.h>
26618 #include <asm/vdso.h>
26619 #include <asm/proto.h>
26620 +#include <asm/mman.h>
26621
26622 enum {
26623 VDSO_DISABLED = 0,
26624 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26625 void enable_sep_cpu(void)
26626 {
26627 int cpu = get_cpu();
26628 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26629 + struct tss_struct *tss = init_tss + cpu;
26630
26631 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26632 put_cpu();
26633 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26634 gate_vma.vm_start = FIXADDR_USER_START;
26635 gate_vma.vm_end = FIXADDR_USER_END;
26636 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26637 - gate_vma.vm_page_prot = __P101;
26638 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26639 /*
26640 * Make sure the vDSO gets into every core dump.
26641 * Dumping its contents makes post-mortem fully interpretable later
26642 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26643 if (compat)
26644 addr = VDSO_HIGH_BASE;
26645 else {
26646 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26647 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26648 if (IS_ERR_VALUE(addr)) {
26649 ret = addr;
26650 goto up_fail;
26651 }
26652 }
26653
26654 - current->mm->context.vdso = (void *)addr;
26655 + current->mm->context.vdso = addr;
26656
26657 if (compat_uses_vma || !compat) {
26658 /*
26659 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26660 }
26661
26662 current_thread_info()->sysenter_return =
26663 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26664 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26665
26666 up_fail:
26667 if (ret)
26668 - current->mm->context.vdso = NULL;
26669 + current->mm->context.vdso = 0;
26670
26671 up_write(&mm->mmap_sem);
26672
26673 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
26674
26675 const char *arch_vma_name(struct vm_area_struct *vma)
26676 {
26677 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26678 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26679 return "[vdso]";
26680 +
26681 +#ifdef CONFIG_PAX_SEGMEXEC
26682 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26683 + return "[vdso]";
26684 +#endif
26685 +
26686 return NULL;
26687 }
26688
26689 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26690 struct mm_struct *mm = tsk->mm;
26691
26692 /* Check to see if this task was created in compat vdso mode */
26693 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26694 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26695 return &gate_vma;
26696 return NULL;
26697 }
26698 diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
26699 index 1683ba2..48d07f3 100644
26700 --- a/arch/x86/vdso/vextern.h
26701 +++ b/arch/x86/vdso/vextern.h
26702 @@ -11,6 +11,5 @@
26703 put into vextern.h and be referenced as a pointer with vdso prefix.
26704 The main kernel later fills in the values. */
26705
26706 -VEXTERN(jiffies)
26707 VEXTERN(vgetcpu_mode)
26708 VEXTERN(vsyscall_gtod_data)
26709 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26710 index 21e1aeb..2c0b3c4 100644
26711 --- a/arch/x86/vdso/vma.c
26712 +++ b/arch/x86/vdso/vma.c
26713 @@ -17,8 +17,6 @@
26714 #include "vextern.h" /* Just for VMAGIC. */
26715 #undef VEXTERN
26716
26717 -unsigned int __read_mostly vdso_enabled = 1;
26718 -
26719 extern char vdso_start[], vdso_end[];
26720 extern unsigned short vdso_sync_cpuid;
26721
26722 @@ -27,10 +25,8 @@ static unsigned vdso_size;
26723
26724 static inline void *var_ref(void *p, char *name)
26725 {
26726 - if (*(void **)p != (void *)VMAGIC) {
26727 - printk("VDSO: variable %s broken\n", name);
26728 - vdso_enabled = 0;
26729 - }
26730 + if (*(void **)p != (void *)VMAGIC)
26731 + panic("VDSO: variable %s broken\n", name);
26732 return p;
26733 }
26734
26735 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
26736 if (!vbase)
26737 goto oom;
26738
26739 - if (memcmp(vbase, "\177ELF", 4)) {
26740 - printk("VDSO: I'm broken; not ELF\n");
26741 - vdso_enabled = 0;
26742 - }
26743 + if (memcmp(vbase, ELFMAG, SELFMAG))
26744 + panic("VDSO: I'm broken; not ELF\n");
26745
26746 #define VEXTERN(x) \
26747 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
26748 #include "vextern.h"
26749 #undef VEXTERN
26750 + vunmap(vbase);
26751 return 0;
26752
26753 oom:
26754 - printk("Cannot allocate vdso\n");
26755 - vdso_enabled = 0;
26756 - return -ENOMEM;
26757 + panic("Cannot allocate vdso\n");
26758 }
26759 __initcall(init_vdso_vars);
26760
26761 @@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26762 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26763 {
26764 struct mm_struct *mm = current->mm;
26765 - unsigned long addr;
26766 + unsigned long addr = 0;
26767 int ret;
26768
26769 - if (!vdso_enabled)
26770 - return 0;
26771 -
26772 down_write(&mm->mmap_sem);
26773 +
26774 +#ifdef CONFIG_PAX_RANDMMAP
26775 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26776 +#endif
26777 +
26778 addr = vdso_addr(mm->start_stack, vdso_size);
26779 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26780 if (IS_ERR_VALUE(addr)) {
26781 @@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26782 goto up_fail;
26783 }
26784
26785 - current->mm->context.vdso = (void *)addr;
26786 + current->mm->context.vdso = addr;
26787
26788 ret = install_special_mapping(mm, addr, vdso_size,
26789 VM_READ|VM_EXEC|
26790 @@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26791 VM_ALWAYSDUMP,
26792 vdso_pages);
26793 if (ret) {
26794 - current->mm->context.vdso = NULL;
26795 + current->mm->context.vdso = 0;
26796 goto up_fail;
26797 }
26798
26799 @@ -132,10 +127,3 @@ up_fail:
26800 up_write(&mm->mmap_sem);
26801 return ret;
26802 }
26803 -
26804 -static __init int vdso_setup(char *s)
26805 -{
26806 - vdso_enabled = simple_strtoul(s, NULL, 0);
26807 - return 0;
26808 -}
26809 -__setup("vdso=", vdso_setup);
26810 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26811 index 0087b00..eecb34f 100644
26812 --- a/arch/x86/xen/enlighten.c
26813 +++ b/arch/x86/xen/enlighten.c
26814 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26815
26816 struct shared_info xen_dummy_shared_info;
26817
26818 -void *xen_initial_gdt;
26819 -
26820 /*
26821 * Point at some empty memory to start with. We map the real shared_info
26822 * page as soon as fixmap is up and running.
26823 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
26824
26825 preempt_disable();
26826
26827 - start = __get_cpu_var(idt_desc).address;
26828 + start = (unsigned long)__get_cpu_var(idt_desc).address;
26829 end = start + __get_cpu_var(idt_desc).size + 1;
26830
26831 xen_mc_flush();
26832 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
26833 #endif
26834 };
26835
26836 -static void xen_reboot(int reason)
26837 +static __noreturn void xen_reboot(int reason)
26838 {
26839 struct sched_shutdown r = { .reason = reason };
26840
26841 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
26842 BUG();
26843 }
26844
26845 -static void xen_restart(char *msg)
26846 +static __noreturn void xen_restart(char *msg)
26847 {
26848 xen_reboot(SHUTDOWN_reboot);
26849 }
26850
26851 -static void xen_emergency_restart(void)
26852 +static __noreturn void xen_emergency_restart(void)
26853 {
26854 xen_reboot(SHUTDOWN_reboot);
26855 }
26856
26857 -static void xen_machine_halt(void)
26858 +static __noreturn void xen_machine_halt(void)
26859 {
26860 xen_reboot(SHUTDOWN_poweroff);
26861 }
26862 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
26863 */
26864 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26865
26866 -#ifdef CONFIG_X86_64
26867 /* Work out if we support NX */
26868 - check_efer();
26869 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26870 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26871 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26872 + unsigned l, h;
26873 +
26874 +#ifdef CONFIG_X86_PAE
26875 + nx_enabled = 1;
26876 +#endif
26877 + __supported_pte_mask |= _PAGE_NX;
26878 + rdmsr(MSR_EFER, l, h);
26879 + l |= EFER_NX;
26880 + wrmsr(MSR_EFER, l, h);
26881 + }
26882 #endif
26883
26884 xen_setup_features();
26885 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
26886
26887 machine_ops = xen_machine_ops;
26888
26889 - /*
26890 - * The only reliable way to retain the initial address of the
26891 - * percpu gdt_page is to remember it here, so we can go and
26892 - * mark it RW later, when the initial percpu area is freed.
26893 - */
26894 - xen_initial_gdt = &per_cpu(gdt_page, 0);
26895 -
26896 xen_smp_init();
26897
26898 pgd = (pgd_t *)xen_start_info->pt_base;
26899 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26900 index 3f90a2c..2c2ad84 100644
26901 --- a/arch/x86/xen/mmu.c
26902 +++ b/arch/x86/xen/mmu.c
26903 @@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26904 convert_pfn_mfn(init_level4_pgt);
26905 convert_pfn_mfn(level3_ident_pgt);
26906 convert_pfn_mfn(level3_kernel_pgt);
26907 + convert_pfn_mfn(level3_vmalloc_start_pgt);
26908 + convert_pfn_mfn(level3_vmalloc_end_pgt);
26909 + convert_pfn_mfn(level3_vmemmap_pgt);
26910
26911 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26912 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26913 @@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26914 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26915 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26916 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26917 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
26918 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
26919 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26920 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26921 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26922 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26923 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26924
26925 @@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
26926 pv_mmu_ops.set_pud = xen_set_pud;
26927 #if PAGETABLE_LEVELS == 4
26928 pv_mmu_ops.set_pgd = xen_set_pgd;
26929 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26930 #endif
26931
26932 /* This will work as long as patching hasn't happened yet
26933 @@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
26934 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26935 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26936 .set_pgd = xen_set_pgd_hyper,
26937 + .set_pgd_batched = xen_set_pgd_hyper,
26938
26939 .alloc_pud = xen_alloc_pmd_init,
26940 .release_pud = xen_release_pmd_init,
26941 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26942 index a96204a..fca9b8e 100644
26943 --- a/arch/x86/xen/smp.c
26944 +++ b/arch/x86/xen/smp.c
26945 @@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26946 {
26947 BUG_ON(smp_processor_id() != 0);
26948 native_smp_prepare_boot_cpu();
26949 -
26950 - /* We've switched to the "real" per-cpu gdt, so make sure the
26951 - old memory can be recycled */
26952 - make_lowmem_page_readwrite(xen_initial_gdt);
26953 -
26954 xen_setup_vcpu_info_placement();
26955 }
26956
26957 @@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26958 gdt = get_cpu_gdt_table(cpu);
26959
26960 ctxt->flags = VGCF_IN_KERNEL;
26961 - ctxt->user_regs.ds = __USER_DS;
26962 - ctxt->user_regs.es = __USER_DS;
26963 + ctxt->user_regs.ds = __KERNEL_DS;
26964 + ctxt->user_regs.es = __KERNEL_DS;
26965 ctxt->user_regs.ss = __KERNEL_DS;
26966 #ifdef CONFIG_X86_32
26967 ctxt->user_regs.fs = __KERNEL_PERCPU;
26968 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26969 + savesegment(gs, ctxt->user_regs.gs);
26970 #else
26971 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26972 #endif
26973 @@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26974 int rc;
26975
26976 per_cpu(current_task, cpu) = idle;
26977 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
26978 #ifdef CONFIG_X86_32
26979 irq_ctx_init(cpu);
26980 #else
26981 clear_tsk_thread_flag(idle, TIF_FORK);
26982 - per_cpu(kernel_stack, cpu) =
26983 - (unsigned long)task_stack_page(idle) -
26984 - KERNEL_STACK_OFFSET + THREAD_SIZE;
26985 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26986 #endif
26987 xen_setup_runstate_info(cpu);
26988 xen_setup_timer(cpu);
26989 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26990 index 9a95a9c..4f39e774 100644
26991 --- a/arch/x86/xen/xen-asm_32.S
26992 +++ b/arch/x86/xen/xen-asm_32.S
26993 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
26994 ESP_OFFSET=4 # bytes pushed onto stack
26995
26996 /*
26997 - * Store vcpu_info pointer for easy access. Do it this way to
26998 - * avoid having to reload %fs
26999 + * Store vcpu_info pointer for easy access.
27000 */
27001 #ifdef CONFIG_SMP
27002 - GET_THREAD_INFO(%eax)
27003 - movl TI_cpu(%eax), %eax
27004 - movl __per_cpu_offset(,%eax,4), %eax
27005 - mov per_cpu__xen_vcpu(%eax), %eax
27006 + push %fs
27007 + mov $(__KERNEL_PERCPU), %eax
27008 + mov %eax, %fs
27009 + mov PER_CPU_VAR(xen_vcpu), %eax
27010 + pop %fs
27011 #else
27012 movl per_cpu__xen_vcpu, %eax
27013 #endif
27014 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27015 index 1a5ff24..a187d40 100644
27016 --- a/arch/x86/xen/xen-head.S
27017 +++ b/arch/x86/xen/xen-head.S
27018 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27019 #ifdef CONFIG_X86_32
27020 mov %esi,xen_start_info
27021 mov $init_thread_union+THREAD_SIZE,%esp
27022 +#ifdef CONFIG_SMP
27023 + movl $cpu_gdt_table,%edi
27024 + movl $__per_cpu_load,%eax
27025 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27026 + rorl $16,%eax
27027 + movb %al,__KERNEL_PERCPU + 4(%edi)
27028 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27029 + movl $__per_cpu_end - 1,%eax
27030 + subl $__per_cpu_start,%eax
27031 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27032 +#endif
27033 #else
27034 mov %rsi,xen_start_info
27035 mov $init_thread_union+THREAD_SIZE,%rsp
27036 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27037 index f9153a3..51eab3d 100644
27038 --- a/arch/x86/xen/xen-ops.h
27039 +++ b/arch/x86/xen/xen-ops.h
27040 @@ -10,8 +10,6 @@
27041 extern const char xen_hypervisor_callback[];
27042 extern const char xen_failsafe_callback[];
27043
27044 -extern void *xen_initial_gdt;
27045 -
27046 struct trap_info;
27047 void xen_copy_trap_info(struct trap_info *traps);
27048
27049 diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27050 index 15c6308..96e83c2 100644
27051 --- a/block/blk-integrity.c
27052 +++ b/block/blk-integrity.c
27053 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27054 NULL,
27055 };
27056
27057 -static struct sysfs_ops integrity_ops = {
27058 +static const struct sysfs_ops integrity_ops = {
27059 .show = &integrity_attr_show,
27060 .store = &integrity_attr_store,
27061 };
27062 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27063 index ca56420..f2fc409 100644
27064 --- a/block/blk-iopoll.c
27065 +++ b/block/blk-iopoll.c
27066 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27067 }
27068 EXPORT_SYMBOL(blk_iopoll_complete);
27069
27070 -static void blk_iopoll_softirq(struct softirq_action *h)
27071 +static void blk_iopoll_softirq(void)
27072 {
27073 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27074 int rearm = 0, budget = blk_iopoll_budget;
27075 diff --git a/block/blk-map.c b/block/blk-map.c
27076 index 30a7e51..0aeec6a 100644
27077 --- a/block/blk-map.c
27078 +++ b/block/blk-map.c
27079 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27080 * direct dma. else, set up kernel bounce buffers
27081 */
27082 uaddr = (unsigned long) ubuf;
27083 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
27084 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27085 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27086 else
27087 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27088 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27089 for (i = 0; i < iov_count; i++) {
27090 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27091
27092 + if (!iov[i].iov_len)
27093 + return -EINVAL;
27094 +
27095 if (uaddr & queue_dma_alignment(q)) {
27096 unaligned = 1;
27097 break;
27098 }
27099 - if (!iov[i].iov_len)
27100 - return -EINVAL;
27101 }
27102
27103 if (unaligned || (q->dma_pad_mask & len) || map_data)
27104 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27105 if (!len || !kbuf)
27106 return -EINVAL;
27107
27108 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27109 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27110 if (do_copy)
27111 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27112 else
27113 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27114 index ee9c216..58d410a 100644
27115 --- a/block/blk-softirq.c
27116 +++ b/block/blk-softirq.c
27117 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27118 * Softirq action handler - move entries to local list and loop over them
27119 * while passing them to the queue registered handler.
27120 */
27121 -static void blk_done_softirq(struct softirq_action *h)
27122 +static void blk_done_softirq(void)
27123 {
27124 struct list_head *cpu_list, local_list;
27125
27126 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27127 index bb9c5ea..5330d48 100644
27128 --- a/block/blk-sysfs.c
27129 +++ b/block/blk-sysfs.c
27130 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27131 kmem_cache_free(blk_requestq_cachep, q);
27132 }
27133
27134 -static struct sysfs_ops queue_sysfs_ops = {
27135 +static const struct sysfs_ops queue_sysfs_ops = {
27136 .show = queue_attr_show,
27137 .store = queue_attr_store,
27138 };
27139 diff --git a/block/bsg.c b/block/bsg.c
27140 index 7154a7a..08ac2f0 100644
27141 --- a/block/bsg.c
27142 +++ b/block/bsg.c
27143 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27144 struct sg_io_v4 *hdr, struct bsg_device *bd,
27145 fmode_t has_write_perm)
27146 {
27147 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27148 + unsigned char *cmdptr;
27149 +
27150 if (hdr->request_len > BLK_MAX_CDB) {
27151 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27152 if (!rq->cmd)
27153 return -ENOMEM;
27154 - }
27155 + cmdptr = rq->cmd;
27156 + } else
27157 + cmdptr = tmpcmd;
27158
27159 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27160 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27161 hdr->request_len))
27162 return -EFAULT;
27163
27164 + if (cmdptr != rq->cmd)
27165 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27166 +
27167 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27168 if (blk_verify_command(rq->cmd, has_write_perm))
27169 return -EPERM;
27170 @@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27171 rq->next_rq = next_rq;
27172 next_rq->cmd_type = rq->cmd_type;
27173
27174 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27175 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27176 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27177 hdr->din_xfer_len, GFP_KERNEL);
27178 if (ret)
27179 @@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27180
27181 if (hdr->dout_xfer_len) {
27182 dxfer_len = hdr->dout_xfer_len;
27183 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
27184 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27185 } else if (hdr->din_xfer_len) {
27186 dxfer_len = hdr->din_xfer_len;
27187 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27188 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27189 } else
27190 dxfer_len = 0;
27191
27192 @@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27193 int len = min_t(unsigned int, hdr->max_response_len,
27194 rq->sense_len);
27195
27196 - ret = copy_to_user((void*)(unsigned long)hdr->response,
27197 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27198 rq->sense, len);
27199 if (!ret)
27200 hdr->response_len = len;
27201 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27202 index 9bd086c..ca1fc22 100644
27203 --- a/block/compat_ioctl.c
27204 +++ b/block/compat_ioctl.c
27205 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27206 err |= __get_user(f->spec1, &uf->spec1);
27207 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27208 err |= __get_user(name, &uf->name);
27209 - f->name = compat_ptr(name);
27210 + f->name = (void __force_kernel *)compat_ptr(name);
27211 if (err) {
27212 err = -EFAULT;
27213 goto out;
27214 diff --git a/block/elevator.c b/block/elevator.c
27215 index a847046..75a1746 100644
27216 --- a/block/elevator.c
27217 +++ b/block/elevator.c
27218 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27219 return error;
27220 }
27221
27222 -static struct sysfs_ops elv_sysfs_ops = {
27223 +static const struct sysfs_ops elv_sysfs_ops = {
27224 .show = elv_attr_show,
27225 .store = elv_attr_store,
27226 };
27227 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27228 index 1d5a780..0e2fb8c 100644
27229 --- a/block/scsi_ioctl.c
27230 +++ b/block/scsi_ioctl.c
27231 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
27232 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27233 struct sg_io_hdr *hdr, fmode_t mode)
27234 {
27235 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27236 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27237 + unsigned char *cmdptr;
27238 +
27239 + if (rq->cmd != rq->__cmd)
27240 + cmdptr = rq->cmd;
27241 + else
27242 + cmdptr = tmpcmd;
27243 +
27244 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27245 return -EFAULT;
27246 +
27247 + if (cmdptr != rq->cmd)
27248 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27249 +
27250 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27251 return -EPERM;
27252
27253 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27254 int err;
27255 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27256 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27257 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27258 + unsigned char *cmdptr;
27259
27260 if (!sic)
27261 return -EINVAL;
27262 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27263 */
27264 err = -EFAULT;
27265 rq->cmd_len = cmdlen;
27266 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27267 +
27268 + if (rq->cmd != rq->__cmd)
27269 + cmdptr = rq->cmd;
27270 + else
27271 + cmdptr = tmpcmd;
27272 +
27273 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27274 goto error;
27275
27276 + if (rq->cmd != cmdptr)
27277 + memcpy(rq->cmd, cmdptr, cmdlen);
27278 +
27279 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27280 goto error;
27281
27282 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27283 index 3533582..f143117 100644
27284 --- a/crypto/cryptd.c
27285 +++ b/crypto/cryptd.c
27286 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27287
27288 struct cryptd_blkcipher_request_ctx {
27289 crypto_completion_t complete;
27290 -};
27291 +} __no_const;
27292
27293 struct cryptd_hash_ctx {
27294 struct crypto_shash *child;
27295 diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27296 index a90d260..7a9765e 100644
27297 --- a/crypto/gf128mul.c
27298 +++ b/crypto/gf128mul.c
27299 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27300 for (i = 0; i < 7; ++i)
27301 gf128mul_x_lle(&p[i + 1], &p[i]);
27302
27303 - memset(r, 0, sizeof(r));
27304 + memset(r, 0, sizeof(*r));
27305 for (i = 0;;) {
27306 u8 ch = ((u8 *)b)[15 - i];
27307
27308 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27309 for (i = 0; i < 7; ++i)
27310 gf128mul_x_bbe(&p[i + 1], &p[i]);
27311
27312 - memset(r, 0, sizeof(r));
27313 + memset(r, 0, sizeof(*r));
27314 for (i = 0;;) {
27315 u8 ch = ((u8 *)b)[i];
27316
27317 diff --git a/crypto/serpent.c b/crypto/serpent.c
27318 index b651a55..023297d 100644
27319 --- a/crypto/serpent.c
27320 +++ b/crypto/serpent.c
27321 @@ -21,6 +21,7 @@
27322 #include <asm/byteorder.h>
27323 #include <linux/crypto.h>
27324 #include <linux/types.h>
27325 +#include <linux/sched.h>
27326
27327 /* Key is padded to the maximum of 256 bits before round key generation.
27328 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27329 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27330 u32 r0,r1,r2,r3,r4;
27331 int i;
27332
27333 + pax_track_stack();
27334 +
27335 /* Copy key, add padding */
27336
27337 for (i = 0; i < keylen; ++i)
27338 diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27339 index 0d2cdb8..d8de48d 100644
27340 --- a/drivers/acpi/acpi_pad.c
27341 +++ b/drivers/acpi/acpi_pad.c
27342 @@ -30,7 +30,7 @@
27343 #include <acpi/acpi_bus.h>
27344 #include <acpi/acpi_drivers.h>
27345
27346 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27347 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27348 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27349 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27350 static DEFINE_MUTEX(isolated_cpus_lock);
27351 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27352 index 3f4602b..2e41d36 100644
27353 --- a/drivers/acpi/battery.c
27354 +++ b/drivers/acpi/battery.c
27355 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27356 }
27357
27358 static struct battery_file {
27359 - struct file_operations ops;
27360 + const struct file_operations ops;
27361 mode_t mode;
27362 const char *name;
27363 } acpi_battery_file[] = {
27364 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27365 index 7338b6a..82f0257 100644
27366 --- a/drivers/acpi/dock.c
27367 +++ b/drivers/acpi/dock.c
27368 @@ -77,7 +77,7 @@ struct dock_dependent_device {
27369 struct list_head list;
27370 struct list_head hotplug_list;
27371 acpi_handle handle;
27372 - struct acpi_dock_ops *ops;
27373 + const struct acpi_dock_ops *ops;
27374 void *context;
27375 };
27376
27377 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27378 * the dock driver after _DCK is executed.
27379 */
27380 int
27381 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27382 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27383 void *context)
27384 {
27385 struct dock_dependent_device *dd;
27386 diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27387 index 7c1c59e..2993595 100644
27388 --- a/drivers/acpi/osl.c
27389 +++ b/drivers/acpi/osl.c
27390 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27391 void __iomem *virt_addr;
27392
27393 virt_addr = ioremap(phys_addr, width);
27394 + if (!virt_addr)
27395 + return AE_NO_MEMORY;
27396 if (!value)
27397 value = &dummy;
27398
27399 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27400 void __iomem *virt_addr;
27401
27402 virt_addr = ioremap(phys_addr, width);
27403 + if (!virt_addr)
27404 + return AE_NO_MEMORY;
27405
27406 switch (width) {
27407 case 8:
27408 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27409 index c216062..eec10d2 100644
27410 --- a/drivers/acpi/power_meter.c
27411 +++ b/drivers/acpi/power_meter.c
27412 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27413 return res;
27414
27415 temp /= 1000;
27416 - if (temp < 0)
27417 - return -EINVAL;
27418
27419 mutex_lock(&resource->lock);
27420 resource->trip[attr->index - 7] = temp;
27421 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27422 index d0d25e2..961643d 100644
27423 --- a/drivers/acpi/proc.c
27424 +++ b/drivers/acpi/proc.c
27425 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27426 size_t count, loff_t * ppos)
27427 {
27428 struct list_head *node, *next;
27429 - char strbuf[5];
27430 - char str[5] = "";
27431 - unsigned int len = count;
27432 + char strbuf[5] = {0};
27433 struct acpi_device *found_dev = NULL;
27434
27435 - if (len > 4)
27436 - len = 4;
27437 - if (len < 0)
27438 - return -EFAULT;
27439 + if (count > 4)
27440 + count = 4;
27441
27442 - if (copy_from_user(strbuf, buffer, len))
27443 + if (copy_from_user(strbuf, buffer, count))
27444 return -EFAULT;
27445 - strbuf[len] = '\0';
27446 - sscanf(strbuf, "%s", str);
27447 + strbuf[count] = '\0';
27448
27449 mutex_lock(&acpi_device_lock);
27450 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27451 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27452 if (!dev->wakeup.flags.valid)
27453 continue;
27454
27455 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27456 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27457 dev->wakeup.state.enabled =
27458 dev->wakeup.state.enabled ? 0 : 1;
27459 found_dev = dev;
27460 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27461 index 7102474..de8ad22 100644
27462 --- a/drivers/acpi/processor_core.c
27463 +++ b/drivers/acpi/processor_core.c
27464 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27465 return 0;
27466 }
27467
27468 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27469 + BUG_ON(pr->id >= nr_cpu_ids);
27470
27471 /*
27472 * Buggy BIOS check
27473 diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27474 index d933980..5761f13 100644
27475 --- a/drivers/acpi/sbshc.c
27476 +++ b/drivers/acpi/sbshc.c
27477 @@ -17,7 +17,7 @@
27478
27479 #define PREFIX "ACPI: "
27480
27481 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27482 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27483 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27484
27485 struct acpi_smb_hc {
27486 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27487 index 0458094..6978e7b 100644
27488 --- a/drivers/acpi/sleep.c
27489 +++ b/drivers/acpi/sleep.c
27490 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27491 }
27492 }
27493
27494 -static struct platform_suspend_ops acpi_suspend_ops = {
27495 +static const struct platform_suspend_ops acpi_suspend_ops = {
27496 .valid = acpi_suspend_state_valid,
27497 .begin = acpi_suspend_begin,
27498 .prepare_late = acpi_pm_prepare,
27499 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27500 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27501 * been requested.
27502 */
27503 -static struct platform_suspend_ops acpi_suspend_ops_old = {
27504 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
27505 .valid = acpi_suspend_state_valid,
27506 .begin = acpi_suspend_begin_old,
27507 .prepare_late = acpi_pm_disable_gpes,
27508 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27509 acpi_enable_all_runtime_gpes();
27510 }
27511
27512 -static struct platform_hibernation_ops acpi_hibernation_ops = {
27513 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
27514 .begin = acpi_hibernation_begin,
27515 .end = acpi_pm_end,
27516 .pre_snapshot = acpi_hibernation_pre_snapshot,
27517 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27518 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27519 * been requested.
27520 */
27521 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27522 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27523 .begin = acpi_hibernation_begin_old,
27524 .end = acpi_pm_end,
27525 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27526 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27527 index 05dff63..b662ab7 100644
27528 --- a/drivers/acpi/video.c
27529 +++ b/drivers/acpi/video.c
27530 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27531 vd->brightness->levels[request_level]);
27532 }
27533
27534 -static struct backlight_ops acpi_backlight_ops = {
27535 +static const struct backlight_ops acpi_backlight_ops = {
27536 .get_brightness = acpi_video_get_brightness,
27537 .update_status = acpi_video_set_brightness,
27538 };
27539 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27540 index 6787aab..23ffb0e 100644
27541 --- a/drivers/ata/ahci.c
27542 +++ b/drivers/ata/ahci.c
27543 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27544 .sdev_attrs = ahci_sdev_attrs,
27545 };
27546
27547 -static struct ata_port_operations ahci_ops = {
27548 +static const struct ata_port_operations ahci_ops = {
27549 .inherits = &sata_pmp_port_ops,
27550
27551 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27552 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27553 .port_stop = ahci_port_stop,
27554 };
27555
27556 -static struct ata_port_operations ahci_vt8251_ops = {
27557 +static const struct ata_port_operations ahci_vt8251_ops = {
27558 .inherits = &ahci_ops,
27559 .hardreset = ahci_vt8251_hardreset,
27560 };
27561
27562 -static struct ata_port_operations ahci_p5wdh_ops = {
27563 +static const struct ata_port_operations ahci_p5wdh_ops = {
27564 .inherits = &ahci_ops,
27565 .hardreset = ahci_p5wdh_hardreset,
27566 };
27567
27568 -static struct ata_port_operations ahci_sb600_ops = {
27569 +static const struct ata_port_operations ahci_sb600_ops = {
27570 .inherits = &ahci_ops,
27571 .softreset = ahci_sb600_softreset,
27572 .pmp_softreset = ahci_sb600_softreset,
27573 diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27574 index 99e7196..4968c77 100644
27575 --- a/drivers/ata/ata_generic.c
27576 +++ b/drivers/ata/ata_generic.c
27577 @@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27578 ATA_BMDMA_SHT(DRV_NAME),
27579 };
27580
27581 -static struct ata_port_operations generic_port_ops = {
27582 +static const struct ata_port_operations generic_port_ops = {
27583 .inherits = &ata_bmdma_port_ops,
27584 .cable_detect = ata_cable_unknown,
27585 .set_mode = generic_set_mode,
27586 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27587 index c33591d..000c121 100644
27588 --- a/drivers/ata/ata_piix.c
27589 +++ b/drivers/ata/ata_piix.c
27590 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27591 ATA_BMDMA_SHT(DRV_NAME),
27592 };
27593
27594 -static struct ata_port_operations piix_pata_ops = {
27595 +static const struct ata_port_operations piix_pata_ops = {
27596 .inherits = &ata_bmdma32_port_ops,
27597 .cable_detect = ata_cable_40wire,
27598 .set_piomode = piix_set_piomode,
27599 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27600 .prereset = piix_pata_prereset,
27601 };
27602
27603 -static struct ata_port_operations piix_vmw_ops = {
27604 +static const struct ata_port_operations piix_vmw_ops = {
27605 .inherits = &piix_pata_ops,
27606 .bmdma_status = piix_vmw_bmdma_status,
27607 };
27608
27609 -static struct ata_port_operations ich_pata_ops = {
27610 +static const struct ata_port_operations ich_pata_ops = {
27611 .inherits = &piix_pata_ops,
27612 .cable_detect = ich_pata_cable_detect,
27613 .set_dmamode = ich_set_dmamode,
27614 };
27615
27616 -static struct ata_port_operations piix_sata_ops = {
27617 +static const struct ata_port_operations piix_sata_ops = {
27618 .inherits = &ata_bmdma_port_ops,
27619 };
27620
27621 -static struct ata_port_operations piix_sidpr_sata_ops = {
27622 +static const struct ata_port_operations piix_sidpr_sata_ops = {
27623 .inherits = &piix_sata_ops,
27624 .hardreset = sata_std_hardreset,
27625 .scr_read = piix_sidpr_scr_read,
27626 diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27627 index b0882cd..c295d65 100644
27628 --- a/drivers/ata/libata-acpi.c
27629 +++ b/drivers/ata/libata-acpi.c
27630 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27631 ata_acpi_uevent(dev->link->ap, dev, event);
27632 }
27633
27634 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27635 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27636 .handler = ata_acpi_dev_notify_dock,
27637 .uevent = ata_acpi_dev_uevent,
27638 };
27639
27640 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27641 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27642 .handler = ata_acpi_ap_notify_dock,
27643 .uevent = ata_acpi_ap_uevent,
27644 };
27645 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27646 index d4f7f99..94f603e 100644
27647 --- a/drivers/ata/libata-core.c
27648 +++ b/drivers/ata/libata-core.c
27649 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27650 struct ata_port *ap;
27651 unsigned int tag;
27652
27653 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27654 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27655 ap = qc->ap;
27656
27657 qc->flags = 0;
27658 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27659 struct ata_port *ap;
27660 struct ata_link *link;
27661
27662 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27663 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27664 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27665 ap = qc->ap;
27666 link = qc->dev->link;
27667 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27668 * LOCKING:
27669 * None.
27670 */
27671 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
27672 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
27673 {
27674 static DEFINE_SPINLOCK(lock);
27675 const struct ata_port_operations *cur;
27676 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27677 return;
27678
27679 spin_lock(&lock);
27680 + pax_open_kernel();
27681
27682 for (cur = ops->inherits; cur; cur = cur->inherits) {
27683 void **inherit = (void **)cur;
27684 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27685 if (IS_ERR(*pp))
27686 *pp = NULL;
27687
27688 - ops->inherits = NULL;
27689 + *(struct ata_port_operations **)&ops->inherits = NULL;
27690
27691 + pax_close_kernel();
27692 spin_unlock(&lock);
27693 }
27694
27695 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
27696 */
27697 /* KILLME - the only user left is ipr */
27698 void ata_host_init(struct ata_host *host, struct device *dev,
27699 - unsigned long flags, struct ata_port_operations *ops)
27700 + unsigned long flags, const struct ata_port_operations *ops)
27701 {
27702 spin_lock_init(&host->lock);
27703 host->dev = dev;
27704 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
27705 /* truly dummy */
27706 }
27707
27708 -struct ata_port_operations ata_dummy_port_ops = {
27709 +const struct ata_port_operations ata_dummy_port_ops = {
27710 .qc_prep = ata_noop_qc_prep,
27711 .qc_issue = ata_dummy_qc_issue,
27712 .error_handler = ata_dummy_error_handler,
27713 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
27714 index e5bdb9b..45a8e72 100644
27715 --- a/drivers/ata/libata-eh.c
27716 +++ b/drivers/ata/libata-eh.c
27717 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
27718 {
27719 struct ata_link *link;
27720
27721 + pax_track_stack();
27722 +
27723 ata_for_each_link(link, ap, HOST_FIRST)
27724 ata_eh_link_report(link);
27725 }
27726 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
27727 */
27728 void ata_std_error_handler(struct ata_port *ap)
27729 {
27730 - struct ata_port_operations *ops = ap->ops;
27731 + const struct ata_port_operations *ops = ap->ops;
27732 ata_reset_fn_t hardreset = ops->hardreset;
27733
27734 /* ignore built-in hardreset if SCR access is not available */
27735 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
27736 index 51f0ffb..19ce3e3 100644
27737 --- a/drivers/ata/libata-pmp.c
27738 +++ b/drivers/ata/libata-pmp.c
27739 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
27740 */
27741 static int sata_pmp_eh_recover(struct ata_port *ap)
27742 {
27743 - struct ata_port_operations *ops = ap->ops;
27744 + const struct ata_port_operations *ops = ap->ops;
27745 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
27746 struct ata_link *pmp_link = &ap->link;
27747 struct ata_device *pmp_dev = pmp_link->device;
27748 diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
27749 index d8f35fe..288180a 100644
27750 --- a/drivers/ata/pata_acpi.c
27751 +++ b/drivers/ata/pata_acpi.c
27752 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
27753 ATA_BMDMA_SHT(DRV_NAME),
27754 };
27755
27756 -static struct ata_port_operations pacpi_ops = {
27757 +static const struct ata_port_operations pacpi_ops = {
27758 .inherits = &ata_bmdma_port_ops,
27759 .qc_issue = pacpi_qc_issue,
27760 .cable_detect = pacpi_cable_detect,
27761 diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
27762 index 9434114..1f2f364 100644
27763 --- a/drivers/ata/pata_ali.c
27764 +++ b/drivers/ata/pata_ali.c
27765 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
27766 * Port operations for PIO only ALi
27767 */
27768
27769 -static struct ata_port_operations ali_early_port_ops = {
27770 +static const struct ata_port_operations ali_early_port_ops = {
27771 .inherits = &ata_sff_port_ops,
27772 .cable_detect = ata_cable_40wire,
27773 .set_piomode = ali_set_piomode,
27774 @@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
27775 * Port operations for DMA capable ALi without cable
27776 * detect
27777 */
27778 -static struct ata_port_operations ali_20_port_ops = {
27779 +static const struct ata_port_operations ali_20_port_ops = {
27780 .inherits = &ali_dma_base_ops,
27781 .cable_detect = ata_cable_40wire,
27782 .mode_filter = ali_20_filter,
27783 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
27784 /*
27785 * Port operations for DMA capable ALi with cable detect
27786 */
27787 -static struct ata_port_operations ali_c2_port_ops = {
27788 +static const struct ata_port_operations ali_c2_port_ops = {
27789 .inherits = &ali_dma_base_ops,
27790 .check_atapi_dma = ali_check_atapi_dma,
27791 .cable_detect = ali_c2_cable_detect,
27792 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
27793 /*
27794 * Port operations for DMA capable ALi with cable detect
27795 */
27796 -static struct ata_port_operations ali_c4_port_ops = {
27797 +static const struct ata_port_operations ali_c4_port_ops = {
27798 .inherits = &ali_dma_base_ops,
27799 .check_atapi_dma = ali_check_atapi_dma,
27800 .cable_detect = ali_c2_cable_detect,
27801 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
27802 /*
27803 * Port operations for DMA capable ALi with cable detect and LBA48
27804 */
27805 -static struct ata_port_operations ali_c5_port_ops = {
27806 +static const struct ata_port_operations ali_c5_port_ops = {
27807 .inherits = &ali_dma_base_ops,
27808 .check_atapi_dma = ali_check_atapi_dma,
27809 .dev_config = ali_warn_atapi_dma,
27810 diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
27811 index 567f3f7..c8ee0da 100644
27812 --- a/drivers/ata/pata_amd.c
27813 +++ b/drivers/ata/pata_amd.c
27814 @@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
27815 .prereset = amd_pre_reset,
27816 };
27817
27818 -static struct ata_port_operations amd33_port_ops = {
27819 +static const struct ata_port_operations amd33_port_ops = {
27820 .inherits = &amd_base_port_ops,
27821 .cable_detect = ata_cable_40wire,
27822 .set_piomode = amd33_set_piomode,
27823 .set_dmamode = amd33_set_dmamode,
27824 };
27825
27826 -static struct ata_port_operations amd66_port_ops = {
27827 +static const struct ata_port_operations amd66_port_ops = {
27828 .inherits = &amd_base_port_ops,
27829 .cable_detect = ata_cable_unknown,
27830 .set_piomode = amd66_set_piomode,
27831 .set_dmamode = amd66_set_dmamode,
27832 };
27833
27834 -static struct ata_port_operations amd100_port_ops = {
27835 +static const struct ata_port_operations amd100_port_ops = {
27836 .inherits = &amd_base_port_ops,
27837 .cable_detect = ata_cable_unknown,
27838 .set_piomode = amd100_set_piomode,
27839 .set_dmamode = amd100_set_dmamode,
27840 };
27841
27842 -static struct ata_port_operations amd133_port_ops = {
27843 +static const struct ata_port_operations amd133_port_ops = {
27844 .inherits = &amd_base_port_ops,
27845 .cable_detect = amd_cable_detect,
27846 .set_piomode = amd133_set_piomode,
27847 @@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
27848 .host_stop = nv_host_stop,
27849 };
27850
27851 -static struct ata_port_operations nv100_port_ops = {
27852 +static const struct ata_port_operations nv100_port_ops = {
27853 .inherits = &nv_base_port_ops,
27854 .set_piomode = nv100_set_piomode,
27855 .set_dmamode = nv100_set_dmamode,
27856 };
27857
27858 -static struct ata_port_operations nv133_port_ops = {
27859 +static const struct ata_port_operations nv133_port_ops = {
27860 .inherits = &nv_base_port_ops,
27861 .set_piomode = nv133_set_piomode,
27862 .set_dmamode = nv133_set_dmamode,
27863 diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
27864 index d332cfd..4b7eaae 100644
27865 --- a/drivers/ata/pata_artop.c
27866 +++ b/drivers/ata/pata_artop.c
27867 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
27868 ATA_BMDMA_SHT(DRV_NAME),
27869 };
27870
27871 -static struct ata_port_operations artop6210_ops = {
27872 +static const struct ata_port_operations artop6210_ops = {
27873 .inherits = &ata_bmdma_port_ops,
27874 .cable_detect = ata_cable_40wire,
27875 .set_piomode = artop6210_set_piomode,
27876 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
27877 .qc_defer = artop6210_qc_defer,
27878 };
27879
27880 -static struct ata_port_operations artop6260_ops = {
27881 +static const struct ata_port_operations artop6260_ops = {
27882 .inherits = &ata_bmdma_port_ops,
27883 .cable_detect = artop6260_cable_detect,
27884 .set_piomode = artop6260_set_piomode,
27885 diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
27886 index 5c129f9..7bb7ccb 100644
27887 --- a/drivers/ata/pata_at32.c
27888 +++ b/drivers/ata/pata_at32.c
27889 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
27890 ATA_PIO_SHT(DRV_NAME),
27891 };
27892
27893 -static struct ata_port_operations at32_port_ops = {
27894 +static const struct ata_port_operations at32_port_ops = {
27895 .inherits = &ata_sff_port_ops,
27896 .cable_detect = ata_cable_40wire,
27897 .set_piomode = pata_at32_set_piomode,
27898 diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
27899 index 41c94b1..829006d 100644
27900 --- a/drivers/ata/pata_at91.c
27901 +++ b/drivers/ata/pata_at91.c
27902 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
27903 ATA_PIO_SHT(DRV_NAME),
27904 };
27905
27906 -static struct ata_port_operations pata_at91_port_ops = {
27907 +static const struct ata_port_operations pata_at91_port_ops = {
27908 .inherits = &ata_sff_port_ops,
27909
27910 .sff_data_xfer = pata_at91_data_xfer_noirq,
27911 diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
27912 index ae4454d..d391eb4 100644
27913 --- a/drivers/ata/pata_atiixp.c
27914 +++ b/drivers/ata/pata_atiixp.c
27915 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
27916 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
27917 };
27918
27919 -static struct ata_port_operations atiixp_port_ops = {
27920 +static const struct ata_port_operations atiixp_port_ops = {
27921 .inherits = &ata_bmdma_port_ops,
27922
27923 .qc_prep = ata_sff_dumb_qc_prep,
27924 diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
27925 index 6fe7ded..2a425dc 100644
27926 --- a/drivers/ata/pata_atp867x.c
27927 +++ b/drivers/ata/pata_atp867x.c
27928 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
27929 ATA_BMDMA_SHT(DRV_NAME),
27930 };
27931
27932 -static struct ata_port_operations atp867x_ops = {
27933 +static const struct ata_port_operations atp867x_ops = {
27934 .inherits = &ata_bmdma_port_ops,
27935 .cable_detect = atp867x_cable_detect,
27936 .set_piomode = atp867x_set_piomode,
27937 diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
27938 index c4b47a3..b27a367 100644
27939 --- a/drivers/ata/pata_bf54x.c
27940 +++ b/drivers/ata/pata_bf54x.c
27941 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
27942 .dma_boundary = ATA_DMA_BOUNDARY,
27943 };
27944
27945 -static struct ata_port_operations bfin_pata_ops = {
27946 +static const struct ata_port_operations bfin_pata_ops = {
27947 .inherits = &ata_sff_port_ops,
27948
27949 .set_piomode = bfin_set_piomode,
27950 diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
27951 index 5acf9fa..84248be 100644
27952 --- a/drivers/ata/pata_cmd640.c
27953 +++ b/drivers/ata/pata_cmd640.c
27954 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
27955 ATA_BMDMA_SHT(DRV_NAME),
27956 };
27957
27958 -static struct ata_port_operations cmd640_port_ops = {
27959 +static const struct ata_port_operations cmd640_port_ops = {
27960 .inherits = &ata_bmdma_port_ops,
27961 /* In theory xfer_noirq is not needed once we kill the prefetcher */
27962 .sff_data_xfer = ata_sff_data_xfer_noirq,
27963 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
27964 index ccd2694..c869c3d 100644
27965 --- a/drivers/ata/pata_cmd64x.c
27966 +++ b/drivers/ata/pata_cmd64x.c
27967 @@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
27968 .set_dmamode = cmd64x_set_dmamode,
27969 };
27970
27971 -static struct ata_port_operations cmd64x_port_ops = {
27972 +static const struct ata_port_operations cmd64x_port_ops = {
27973 .inherits = &cmd64x_base_ops,
27974 .cable_detect = ata_cable_40wire,
27975 };
27976
27977 -static struct ata_port_operations cmd646r1_port_ops = {
27978 +static const struct ata_port_operations cmd646r1_port_ops = {
27979 .inherits = &cmd64x_base_ops,
27980 .bmdma_stop = cmd646r1_bmdma_stop,
27981 .cable_detect = ata_cable_40wire,
27982 };
27983
27984 -static struct ata_port_operations cmd648_port_ops = {
27985 +static const struct ata_port_operations cmd648_port_ops = {
27986 .inherits = &cmd64x_base_ops,
27987 .bmdma_stop = cmd648_bmdma_stop,
27988 .cable_detect = cmd648_cable_detect,
27989 diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
27990 index 0df83cf..d7595b0 100644
27991 --- a/drivers/ata/pata_cs5520.c
27992 +++ b/drivers/ata/pata_cs5520.c
27993 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
27994 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
27995 };
27996
27997 -static struct ata_port_operations cs5520_port_ops = {
27998 +static const struct ata_port_operations cs5520_port_ops = {
27999 .inherits = &ata_bmdma_port_ops,
28000 .qc_prep = ata_sff_dumb_qc_prep,
28001 .cable_detect = ata_cable_40wire,
28002 diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28003 index c974b05..6d26b11 100644
28004 --- a/drivers/ata/pata_cs5530.c
28005 +++ b/drivers/ata/pata_cs5530.c
28006 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28007 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28008 };
28009
28010 -static struct ata_port_operations cs5530_port_ops = {
28011 +static const struct ata_port_operations cs5530_port_ops = {
28012 .inherits = &ata_bmdma_port_ops,
28013
28014 .qc_prep = ata_sff_dumb_qc_prep,
28015 diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28016 index 403f561..aacd26b 100644
28017 --- a/drivers/ata/pata_cs5535.c
28018 +++ b/drivers/ata/pata_cs5535.c
28019 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28020 ATA_BMDMA_SHT(DRV_NAME),
28021 };
28022
28023 -static struct ata_port_operations cs5535_port_ops = {
28024 +static const struct ata_port_operations cs5535_port_ops = {
28025 .inherits = &ata_bmdma_port_ops,
28026 .cable_detect = cs5535_cable_detect,
28027 .set_piomode = cs5535_set_piomode,
28028 diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28029 index 6da4cb4..de24a25 100644
28030 --- a/drivers/ata/pata_cs5536.c
28031 +++ b/drivers/ata/pata_cs5536.c
28032 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28033 ATA_BMDMA_SHT(DRV_NAME),
28034 };
28035
28036 -static struct ata_port_operations cs5536_port_ops = {
28037 +static const struct ata_port_operations cs5536_port_ops = {
28038 .inherits = &ata_bmdma_port_ops,
28039 .cable_detect = cs5536_cable_detect,
28040 .set_piomode = cs5536_set_piomode,
28041 diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28042 index 8fb040b..b16a9c9 100644
28043 --- a/drivers/ata/pata_cypress.c
28044 +++ b/drivers/ata/pata_cypress.c
28045 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28046 ATA_BMDMA_SHT(DRV_NAME),
28047 };
28048
28049 -static struct ata_port_operations cy82c693_port_ops = {
28050 +static const struct ata_port_operations cy82c693_port_ops = {
28051 .inherits = &ata_bmdma_port_ops,
28052 .cable_detect = ata_cable_40wire,
28053 .set_piomode = cy82c693_set_piomode,
28054 diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28055 index 2a6412f..555ee11 100644
28056 --- a/drivers/ata/pata_efar.c
28057 +++ b/drivers/ata/pata_efar.c
28058 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28059 ATA_BMDMA_SHT(DRV_NAME),
28060 };
28061
28062 -static struct ata_port_operations efar_ops = {
28063 +static const struct ata_port_operations efar_ops = {
28064 .inherits = &ata_bmdma_port_ops,
28065 .cable_detect = efar_cable_detect,
28066 .set_piomode = efar_set_piomode,
28067 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28068 index b9d8836..0b92030 100644
28069 --- a/drivers/ata/pata_hpt366.c
28070 +++ b/drivers/ata/pata_hpt366.c
28071 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28072 * Configuration for HPT366/68
28073 */
28074
28075 -static struct ata_port_operations hpt366_port_ops = {
28076 +static const struct ata_port_operations hpt366_port_ops = {
28077 .inherits = &ata_bmdma_port_ops,
28078 .cable_detect = hpt36x_cable_detect,
28079 .mode_filter = hpt366_filter,
28080 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28081 index 5af7f19..00c4980 100644
28082 --- a/drivers/ata/pata_hpt37x.c
28083 +++ b/drivers/ata/pata_hpt37x.c
28084 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28085 * Configuration for HPT370
28086 */
28087
28088 -static struct ata_port_operations hpt370_port_ops = {
28089 +static const struct ata_port_operations hpt370_port_ops = {
28090 .inherits = &ata_bmdma_port_ops,
28091
28092 .bmdma_stop = hpt370_bmdma_stop,
28093 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28094 * Configuration for HPT370A. Close to 370 but less filters
28095 */
28096
28097 -static struct ata_port_operations hpt370a_port_ops = {
28098 +static const struct ata_port_operations hpt370a_port_ops = {
28099 .inherits = &hpt370_port_ops,
28100 .mode_filter = hpt370a_filter,
28101 };
28102 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28103 * and DMA mode setting functionality.
28104 */
28105
28106 -static struct ata_port_operations hpt372_port_ops = {
28107 +static const struct ata_port_operations hpt372_port_ops = {
28108 .inherits = &ata_bmdma_port_ops,
28109
28110 .bmdma_stop = hpt37x_bmdma_stop,
28111 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28112 * but we have a different cable detection procedure for function 1.
28113 */
28114
28115 -static struct ata_port_operations hpt374_fn1_port_ops = {
28116 +static const struct ata_port_operations hpt374_fn1_port_ops = {
28117 .inherits = &hpt372_port_ops,
28118 .prereset = hpt374_fn1_pre_reset,
28119 };
28120 diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28121 index 100f227..2e39382 100644
28122 --- a/drivers/ata/pata_hpt3x2n.c
28123 +++ b/drivers/ata/pata_hpt3x2n.c
28124 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28125 * Configuration for HPT3x2n.
28126 */
28127
28128 -static struct ata_port_operations hpt3x2n_port_ops = {
28129 +static const struct ata_port_operations hpt3x2n_port_ops = {
28130 .inherits = &ata_bmdma_port_ops,
28131
28132 .bmdma_stop = hpt3x2n_bmdma_stop,
28133 diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28134 index 7e31025..6fca8f4 100644
28135 --- a/drivers/ata/pata_hpt3x3.c
28136 +++ b/drivers/ata/pata_hpt3x3.c
28137 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28138 ATA_BMDMA_SHT(DRV_NAME),
28139 };
28140
28141 -static struct ata_port_operations hpt3x3_port_ops = {
28142 +static const struct ata_port_operations hpt3x3_port_ops = {
28143 .inherits = &ata_bmdma_port_ops,
28144 .cable_detect = ata_cable_40wire,
28145 .set_piomode = hpt3x3_set_piomode,
28146 diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28147 index b663b7f..9a26c2a 100644
28148 --- a/drivers/ata/pata_icside.c
28149 +++ b/drivers/ata/pata_icside.c
28150 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28151 }
28152 }
28153
28154 -static struct ata_port_operations pata_icside_port_ops = {
28155 +static const struct ata_port_operations pata_icside_port_ops = {
28156 .inherits = &ata_sff_port_ops,
28157 /* no need to build any PRD tables for DMA */
28158 .qc_prep = ata_noop_qc_prep,
28159 diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28160 index 4bceb88..457dfb6 100644
28161 --- a/drivers/ata/pata_isapnp.c
28162 +++ b/drivers/ata/pata_isapnp.c
28163 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28164 ATA_PIO_SHT(DRV_NAME),
28165 };
28166
28167 -static struct ata_port_operations isapnp_port_ops = {
28168 +static const struct ata_port_operations isapnp_port_ops = {
28169 .inherits = &ata_sff_port_ops,
28170 .cable_detect = ata_cable_40wire,
28171 };
28172
28173 -static struct ata_port_operations isapnp_noalt_port_ops = {
28174 +static const struct ata_port_operations isapnp_noalt_port_ops = {
28175 .inherits = &ata_sff_port_ops,
28176 .cable_detect = ata_cable_40wire,
28177 /* No altstatus so we don't want to use the lost interrupt poll */
28178 diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28179 index f156da8..24976e2 100644
28180 --- a/drivers/ata/pata_it8213.c
28181 +++ b/drivers/ata/pata_it8213.c
28182 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28183 };
28184
28185
28186 -static struct ata_port_operations it8213_ops = {
28187 +static const struct ata_port_operations it8213_ops = {
28188 .inherits = &ata_bmdma_port_ops,
28189 .cable_detect = it8213_cable_detect,
28190 .set_piomode = it8213_set_piomode,
28191 diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28192 index 188bc2f..ca9e785 100644
28193 --- a/drivers/ata/pata_it821x.c
28194 +++ b/drivers/ata/pata_it821x.c
28195 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28196 ATA_BMDMA_SHT(DRV_NAME),
28197 };
28198
28199 -static struct ata_port_operations it821x_smart_port_ops = {
28200 +static const struct ata_port_operations it821x_smart_port_ops = {
28201 .inherits = &ata_bmdma_port_ops,
28202
28203 .check_atapi_dma= it821x_check_atapi_dma,
28204 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28205 .port_start = it821x_port_start,
28206 };
28207
28208 -static struct ata_port_operations it821x_passthru_port_ops = {
28209 +static const struct ata_port_operations it821x_passthru_port_ops = {
28210 .inherits = &ata_bmdma_port_ops,
28211
28212 .check_atapi_dma= it821x_check_atapi_dma,
28213 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28214 .port_start = it821x_port_start,
28215 };
28216
28217 -static struct ata_port_operations it821x_rdc_port_ops = {
28218 +static const struct ata_port_operations it821x_rdc_port_ops = {
28219 .inherits = &ata_bmdma_port_ops,
28220
28221 .check_atapi_dma= it821x_check_atapi_dma,
28222 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28223 index ba54b08..4b952b7 100644
28224 --- a/drivers/ata/pata_ixp4xx_cf.c
28225 +++ b/drivers/ata/pata_ixp4xx_cf.c
28226 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28227 ATA_PIO_SHT(DRV_NAME),
28228 };
28229
28230 -static struct ata_port_operations ixp4xx_port_ops = {
28231 +static const struct ata_port_operations ixp4xx_port_ops = {
28232 .inherits = &ata_sff_port_ops,
28233 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28234 .cable_detect = ata_cable_40wire,
28235 diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28236 index 3a1474a..434b0ff 100644
28237 --- a/drivers/ata/pata_jmicron.c
28238 +++ b/drivers/ata/pata_jmicron.c
28239 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28240 ATA_BMDMA_SHT(DRV_NAME),
28241 };
28242
28243 -static struct ata_port_operations jmicron_ops = {
28244 +static const struct ata_port_operations jmicron_ops = {
28245 .inherits = &ata_bmdma_port_ops,
28246 .prereset = jmicron_pre_reset,
28247 };
28248 diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28249 index 6932e56..220e71d 100644
28250 --- a/drivers/ata/pata_legacy.c
28251 +++ b/drivers/ata/pata_legacy.c
28252 @@ -106,7 +106,7 @@ struct legacy_probe {
28253
28254 struct legacy_controller {
28255 const char *name;
28256 - struct ata_port_operations *ops;
28257 + const struct ata_port_operations *ops;
28258 unsigned int pio_mask;
28259 unsigned int flags;
28260 unsigned int pflags;
28261 @@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28262 * pio_mask as well.
28263 */
28264
28265 -static struct ata_port_operations simple_port_ops = {
28266 +static const struct ata_port_operations simple_port_ops = {
28267 .inherits = &legacy_base_port_ops,
28268 .sff_data_xfer = ata_sff_data_xfer_noirq,
28269 };
28270
28271 -static struct ata_port_operations legacy_port_ops = {
28272 +static const struct ata_port_operations legacy_port_ops = {
28273 .inherits = &legacy_base_port_ops,
28274 .sff_data_xfer = ata_sff_data_xfer_noirq,
28275 .set_mode = legacy_set_mode,
28276 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28277 return buflen;
28278 }
28279
28280 -static struct ata_port_operations pdc20230_port_ops = {
28281 +static const struct ata_port_operations pdc20230_port_ops = {
28282 .inherits = &legacy_base_port_ops,
28283 .set_piomode = pdc20230_set_piomode,
28284 .sff_data_xfer = pdc_data_xfer_vlb,
28285 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28286 ioread8(ap->ioaddr.status_addr);
28287 }
28288
28289 -static struct ata_port_operations ht6560a_port_ops = {
28290 +static const struct ata_port_operations ht6560a_port_ops = {
28291 .inherits = &legacy_base_port_ops,
28292 .set_piomode = ht6560a_set_piomode,
28293 };
28294 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28295 ioread8(ap->ioaddr.status_addr);
28296 }
28297
28298 -static struct ata_port_operations ht6560b_port_ops = {
28299 +static const struct ata_port_operations ht6560b_port_ops = {
28300 .inherits = &legacy_base_port_ops,
28301 .set_piomode = ht6560b_set_piomode,
28302 };
28303 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28304 }
28305
28306
28307 -static struct ata_port_operations opti82c611a_port_ops = {
28308 +static const struct ata_port_operations opti82c611a_port_ops = {
28309 .inherits = &legacy_base_port_ops,
28310 .set_piomode = opti82c611a_set_piomode,
28311 };
28312 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28313 return ata_sff_qc_issue(qc);
28314 }
28315
28316 -static struct ata_port_operations opti82c46x_port_ops = {
28317 +static const struct ata_port_operations opti82c46x_port_ops = {
28318 .inherits = &legacy_base_port_ops,
28319 .set_piomode = opti82c46x_set_piomode,
28320 .qc_issue = opti82c46x_qc_issue,
28321 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28322 return 0;
28323 }
28324
28325 -static struct ata_port_operations qdi6500_port_ops = {
28326 +static const struct ata_port_operations qdi6500_port_ops = {
28327 .inherits = &legacy_base_port_ops,
28328 .set_piomode = qdi6500_set_piomode,
28329 .qc_issue = qdi_qc_issue,
28330 .sff_data_xfer = vlb32_data_xfer,
28331 };
28332
28333 -static struct ata_port_operations qdi6580_port_ops = {
28334 +static const struct ata_port_operations qdi6580_port_ops = {
28335 .inherits = &legacy_base_port_ops,
28336 .set_piomode = qdi6580_set_piomode,
28337 .sff_data_xfer = vlb32_data_xfer,
28338 };
28339
28340 -static struct ata_port_operations qdi6580dp_port_ops = {
28341 +static const struct ata_port_operations qdi6580dp_port_ops = {
28342 .inherits = &legacy_base_port_ops,
28343 .set_piomode = qdi6580dp_set_piomode,
28344 .sff_data_xfer = vlb32_data_xfer,
28345 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28346 return 0;
28347 }
28348
28349 -static struct ata_port_operations winbond_port_ops = {
28350 +static const struct ata_port_operations winbond_port_ops = {
28351 .inherits = &legacy_base_port_ops,
28352 .set_piomode = winbond_set_piomode,
28353 .sff_data_xfer = vlb32_data_xfer,
28354 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28355 int pio_modes = controller->pio_mask;
28356 unsigned long io = probe->port;
28357 u32 mask = (1 << probe->slot);
28358 - struct ata_port_operations *ops = controller->ops;
28359 + const struct ata_port_operations *ops = controller->ops;
28360 struct legacy_data *ld = &legacy_data[probe->slot];
28361 struct ata_host *host = NULL;
28362 struct ata_port *ap;
28363 diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28364 index 2096fb7..4d090fc 100644
28365 --- a/drivers/ata/pata_marvell.c
28366 +++ b/drivers/ata/pata_marvell.c
28367 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28368 ATA_BMDMA_SHT(DRV_NAME),
28369 };
28370
28371 -static struct ata_port_operations marvell_ops = {
28372 +static const struct ata_port_operations marvell_ops = {
28373 .inherits = &ata_bmdma_port_ops,
28374 .cable_detect = marvell_cable_detect,
28375 .prereset = marvell_pre_reset,
28376 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28377 index 99d41be..7d56aa8 100644
28378 --- a/drivers/ata/pata_mpc52xx.c
28379 +++ b/drivers/ata/pata_mpc52xx.c
28380 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28381 ATA_PIO_SHT(DRV_NAME),
28382 };
28383
28384 -static struct ata_port_operations mpc52xx_ata_port_ops = {
28385 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
28386 .inherits = &ata_bmdma_port_ops,
28387 .sff_dev_select = mpc52xx_ata_dev_select,
28388 .set_piomode = mpc52xx_ata_set_piomode,
28389 diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28390 index b21f002..0a27e7f 100644
28391 --- a/drivers/ata/pata_mpiix.c
28392 +++ b/drivers/ata/pata_mpiix.c
28393 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28394 ATA_PIO_SHT(DRV_NAME),
28395 };
28396
28397 -static struct ata_port_operations mpiix_port_ops = {
28398 +static const struct ata_port_operations mpiix_port_ops = {
28399 .inherits = &ata_sff_port_ops,
28400 .qc_issue = mpiix_qc_issue,
28401 .cable_detect = ata_cable_40wire,
28402 diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28403 index f0d52f7..89c3be3 100644
28404 --- a/drivers/ata/pata_netcell.c
28405 +++ b/drivers/ata/pata_netcell.c
28406 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28407 ATA_BMDMA_SHT(DRV_NAME),
28408 };
28409
28410 -static struct ata_port_operations netcell_ops = {
28411 +static const struct ata_port_operations netcell_ops = {
28412 .inherits = &ata_bmdma_port_ops,
28413 .cable_detect = ata_cable_80wire,
28414 .read_id = netcell_read_id,
28415 diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28416 index dd53a66..a3f4317 100644
28417 --- a/drivers/ata/pata_ninja32.c
28418 +++ b/drivers/ata/pata_ninja32.c
28419 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28420 ATA_BMDMA_SHT(DRV_NAME),
28421 };
28422
28423 -static struct ata_port_operations ninja32_port_ops = {
28424 +static const struct ata_port_operations ninja32_port_ops = {
28425 .inherits = &ata_bmdma_port_ops,
28426 .sff_dev_select = ninja32_dev_select,
28427 .cable_detect = ata_cable_40wire,
28428 diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28429 index ca53fac..9aa93ef 100644
28430 --- a/drivers/ata/pata_ns87410.c
28431 +++ b/drivers/ata/pata_ns87410.c
28432 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28433 ATA_PIO_SHT(DRV_NAME),
28434 };
28435
28436 -static struct ata_port_operations ns87410_port_ops = {
28437 +static const struct ata_port_operations ns87410_port_ops = {
28438 .inherits = &ata_sff_port_ops,
28439 .qc_issue = ns87410_qc_issue,
28440 .cable_detect = ata_cable_40wire,
28441 diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28442 index 773b159..55f454e 100644
28443 --- a/drivers/ata/pata_ns87415.c
28444 +++ b/drivers/ata/pata_ns87415.c
28445 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28446 }
28447 #endif /* 87560 SuperIO Support */
28448
28449 -static struct ata_port_operations ns87415_pata_ops = {
28450 +static const struct ata_port_operations ns87415_pata_ops = {
28451 .inherits = &ata_bmdma_port_ops,
28452
28453 .check_atapi_dma = ns87415_check_atapi_dma,
28454 @@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28455 };
28456
28457 #if defined(CONFIG_SUPERIO)
28458 -static struct ata_port_operations ns87560_pata_ops = {
28459 +static const struct ata_port_operations ns87560_pata_ops = {
28460 .inherits = &ns87415_pata_ops,
28461 .sff_tf_read = ns87560_tf_read,
28462 .sff_check_status = ns87560_check_status,
28463 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28464 index d6f6956..639295b 100644
28465 --- a/drivers/ata/pata_octeon_cf.c
28466 +++ b/drivers/ata/pata_octeon_cf.c
28467 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28468 return 0;
28469 }
28470
28471 +/* cannot be const */
28472 static struct ata_port_operations octeon_cf_ops = {
28473 .inherits = &ata_sff_port_ops,
28474 .check_atapi_dma = octeon_cf_check_atapi_dma,
28475 diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28476 index 84ac503..adee1cd 100644
28477 --- a/drivers/ata/pata_oldpiix.c
28478 +++ b/drivers/ata/pata_oldpiix.c
28479 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28480 ATA_BMDMA_SHT(DRV_NAME),
28481 };
28482
28483 -static struct ata_port_operations oldpiix_pata_ops = {
28484 +static const struct ata_port_operations oldpiix_pata_ops = {
28485 .inherits = &ata_bmdma_port_ops,
28486 .qc_issue = oldpiix_qc_issue,
28487 .cable_detect = ata_cable_40wire,
28488 diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28489 index 99eddda..3a4c0aa 100644
28490 --- a/drivers/ata/pata_opti.c
28491 +++ b/drivers/ata/pata_opti.c
28492 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28493 ATA_PIO_SHT(DRV_NAME),
28494 };
28495
28496 -static struct ata_port_operations opti_port_ops = {
28497 +static const struct ata_port_operations opti_port_ops = {
28498 .inherits = &ata_sff_port_ops,
28499 .cable_detect = ata_cable_40wire,
28500 .set_piomode = opti_set_piomode,
28501 diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28502 index 86885a4..8e9968d 100644
28503 --- a/drivers/ata/pata_optidma.c
28504 +++ b/drivers/ata/pata_optidma.c
28505 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28506 ATA_BMDMA_SHT(DRV_NAME),
28507 };
28508
28509 -static struct ata_port_operations optidma_port_ops = {
28510 +static const struct ata_port_operations optidma_port_ops = {
28511 .inherits = &ata_bmdma_port_ops,
28512 .cable_detect = ata_cable_40wire,
28513 .set_piomode = optidma_set_pio_mode,
28514 @@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28515 .prereset = optidma_pre_reset,
28516 };
28517
28518 -static struct ata_port_operations optiplus_port_ops = {
28519 +static const struct ata_port_operations optiplus_port_ops = {
28520 .inherits = &optidma_port_ops,
28521 .set_piomode = optiplus_set_pio_mode,
28522 .set_dmamode = optiplus_set_dma_mode,
28523 diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28524 index 11fb4cc..1a14022 100644
28525 --- a/drivers/ata/pata_palmld.c
28526 +++ b/drivers/ata/pata_palmld.c
28527 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28528 ATA_PIO_SHT(DRV_NAME),
28529 };
28530
28531 -static struct ata_port_operations palmld_port_ops = {
28532 +static const struct ata_port_operations palmld_port_ops = {
28533 .inherits = &ata_sff_port_ops,
28534 .sff_data_xfer = ata_sff_data_xfer_noirq,
28535 .cable_detect = ata_cable_40wire,
28536 diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28537 index dc99e26..7f4b1e4 100644
28538 --- a/drivers/ata/pata_pcmcia.c
28539 +++ b/drivers/ata/pata_pcmcia.c
28540 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28541 ATA_PIO_SHT(DRV_NAME),
28542 };
28543
28544 -static struct ata_port_operations pcmcia_port_ops = {
28545 +static const struct ata_port_operations pcmcia_port_ops = {
28546 .inherits = &ata_sff_port_ops,
28547 .sff_data_xfer = ata_sff_data_xfer_noirq,
28548 .cable_detect = ata_cable_40wire,
28549 .set_mode = pcmcia_set_mode,
28550 };
28551
28552 -static struct ata_port_operations pcmcia_8bit_port_ops = {
28553 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
28554 .inherits = &ata_sff_port_ops,
28555 .sff_data_xfer = ata_data_xfer_8bit,
28556 .cable_detect = ata_cable_40wire,
28557 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28558 unsigned long io_base, ctl_base;
28559 void __iomem *io_addr, *ctl_addr;
28560 int n_ports = 1;
28561 - struct ata_port_operations *ops = &pcmcia_port_ops;
28562 + const struct ata_port_operations *ops = &pcmcia_port_ops;
28563
28564 info = kzalloc(sizeof(*info), GFP_KERNEL);
28565 if (info == NULL)
28566 diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28567 index ca5cad0..3a1f125 100644
28568 --- a/drivers/ata/pata_pdc2027x.c
28569 +++ b/drivers/ata/pata_pdc2027x.c
28570 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28571 ATA_BMDMA_SHT(DRV_NAME),
28572 };
28573
28574 -static struct ata_port_operations pdc2027x_pata100_ops = {
28575 +static const struct ata_port_operations pdc2027x_pata100_ops = {
28576 .inherits = &ata_bmdma_port_ops,
28577 .check_atapi_dma = pdc2027x_check_atapi_dma,
28578 .cable_detect = pdc2027x_cable_detect,
28579 .prereset = pdc2027x_prereset,
28580 };
28581
28582 -static struct ata_port_operations pdc2027x_pata133_ops = {
28583 +static const struct ata_port_operations pdc2027x_pata133_ops = {
28584 .inherits = &pdc2027x_pata100_ops,
28585 .mode_filter = pdc2027x_mode_filter,
28586 .set_piomode = pdc2027x_set_piomode,
28587 diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28588 index 2911120..4bf62aa 100644
28589 --- a/drivers/ata/pata_pdc202xx_old.c
28590 +++ b/drivers/ata/pata_pdc202xx_old.c
28591 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28592 ATA_BMDMA_SHT(DRV_NAME),
28593 };
28594
28595 -static struct ata_port_operations pdc2024x_port_ops = {
28596 +static const struct ata_port_operations pdc2024x_port_ops = {
28597 .inherits = &ata_bmdma_port_ops,
28598
28599 .cable_detect = ata_cable_40wire,
28600 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28601 .sff_exec_command = pdc202xx_exec_command,
28602 };
28603
28604 -static struct ata_port_operations pdc2026x_port_ops = {
28605 +static const struct ata_port_operations pdc2026x_port_ops = {
28606 .inherits = &pdc2024x_port_ops,
28607
28608 .check_atapi_dma = pdc2026x_check_atapi_dma,
28609 diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28610 index 3f6ebc6..a18c358 100644
28611 --- a/drivers/ata/pata_platform.c
28612 +++ b/drivers/ata/pata_platform.c
28613 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28614 ATA_PIO_SHT(DRV_NAME),
28615 };
28616
28617 -static struct ata_port_operations pata_platform_port_ops = {
28618 +static const struct ata_port_operations pata_platform_port_ops = {
28619 .inherits = &ata_sff_port_ops,
28620 .sff_data_xfer = ata_sff_data_xfer_noirq,
28621 .cable_detect = ata_cable_unknown,
28622 diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28623 index 45879dc..165a9f9 100644
28624 --- a/drivers/ata/pata_qdi.c
28625 +++ b/drivers/ata/pata_qdi.c
28626 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28627 ATA_PIO_SHT(DRV_NAME),
28628 };
28629
28630 -static struct ata_port_operations qdi6500_port_ops = {
28631 +static const struct ata_port_operations qdi6500_port_ops = {
28632 .inherits = &ata_sff_port_ops,
28633 .qc_issue = qdi_qc_issue,
28634 .sff_data_xfer = qdi_data_xfer,
28635 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28636 .set_piomode = qdi6500_set_piomode,
28637 };
28638
28639 -static struct ata_port_operations qdi6580_port_ops = {
28640 +static const struct ata_port_operations qdi6580_port_ops = {
28641 .inherits = &qdi6500_port_ops,
28642 .set_piomode = qdi6580_set_piomode,
28643 };
28644 diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28645 index 4401b33..716c5cc 100644
28646 --- a/drivers/ata/pata_radisys.c
28647 +++ b/drivers/ata/pata_radisys.c
28648 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28649 ATA_BMDMA_SHT(DRV_NAME),
28650 };
28651
28652 -static struct ata_port_operations radisys_pata_ops = {
28653 +static const struct ata_port_operations radisys_pata_ops = {
28654 .inherits = &ata_bmdma_port_ops,
28655 .qc_issue = radisys_qc_issue,
28656 .cable_detect = ata_cable_unknown,
28657 diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28658 index 45f1e10..fab6bca 100644
28659 --- a/drivers/ata/pata_rb532_cf.c
28660 +++ b/drivers/ata/pata_rb532_cf.c
28661 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28662 return IRQ_HANDLED;
28663 }
28664
28665 -static struct ata_port_operations rb532_pata_port_ops = {
28666 +static const struct ata_port_operations rb532_pata_port_ops = {
28667 .inherits = &ata_sff_port_ops,
28668 .sff_data_xfer = ata_sff_data_xfer32,
28669 };
28670 diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
28671 index c843a1e..b5853c3 100644
28672 --- a/drivers/ata/pata_rdc.c
28673 +++ b/drivers/ata/pata_rdc.c
28674 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
28675 pci_write_config_byte(dev, 0x48, udma_enable);
28676 }
28677
28678 -static struct ata_port_operations rdc_pata_ops = {
28679 +static const struct ata_port_operations rdc_pata_ops = {
28680 .inherits = &ata_bmdma32_port_ops,
28681 .cable_detect = rdc_pata_cable_detect,
28682 .set_piomode = rdc_set_piomode,
28683 diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
28684 index a5e4dfe..080c8c9 100644
28685 --- a/drivers/ata/pata_rz1000.c
28686 +++ b/drivers/ata/pata_rz1000.c
28687 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
28688 ATA_PIO_SHT(DRV_NAME),
28689 };
28690
28691 -static struct ata_port_operations rz1000_port_ops = {
28692 +static const struct ata_port_operations rz1000_port_ops = {
28693 .inherits = &ata_sff_port_ops,
28694 .cable_detect = ata_cable_40wire,
28695 .set_mode = rz1000_set_mode,
28696 diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
28697 index 3bbed83..e309daf 100644
28698 --- a/drivers/ata/pata_sc1200.c
28699 +++ b/drivers/ata/pata_sc1200.c
28700 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
28701 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28702 };
28703
28704 -static struct ata_port_operations sc1200_port_ops = {
28705 +static const struct ata_port_operations sc1200_port_ops = {
28706 .inherits = &ata_bmdma_port_ops,
28707 .qc_prep = ata_sff_dumb_qc_prep,
28708 .qc_issue = sc1200_qc_issue,
28709 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
28710 index 4257d6b..4c1d9d5 100644
28711 --- a/drivers/ata/pata_scc.c
28712 +++ b/drivers/ata/pata_scc.c
28713 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
28714 ATA_BMDMA_SHT(DRV_NAME),
28715 };
28716
28717 -static struct ata_port_operations scc_pata_ops = {
28718 +static const struct ata_port_operations scc_pata_ops = {
28719 .inherits = &ata_bmdma_port_ops,
28720
28721 .set_piomode = scc_set_piomode,
28722 diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
28723 index 99cceb4..e2e0a87 100644
28724 --- a/drivers/ata/pata_sch.c
28725 +++ b/drivers/ata/pata_sch.c
28726 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
28727 ATA_BMDMA_SHT(DRV_NAME),
28728 };
28729
28730 -static struct ata_port_operations sch_pata_ops = {
28731 +static const struct ata_port_operations sch_pata_ops = {
28732 .inherits = &ata_bmdma_port_ops,
28733 .cable_detect = ata_cable_unknown,
28734 .set_piomode = sch_set_piomode,
28735 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
28736 index beaed12..39969f1 100644
28737 --- a/drivers/ata/pata_serverworks.c
28738 +++ b/drivers/ata/pata_serverworks.c
28739 @@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
28740 ATA_BMDMA_SHT(DRV_NAME),
28741 };
28742
28743 -static struct ata_port_operations serverworks_osb4_port_ops = {
28744 +static const struct ata_port_operations serverworks_osb4_port_ops = {
28745 .inherits = &ata_bmdma_port_ops,
28746 .cable_detect = serverworks_cable_detect,
28747 .mode_filter = serverworks_osb4_filter,
28748 @@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
28749 .set_dmamode = serverworks_set_dmamode,
28750 };
28751
28752 -static struct ata_port_operations serverworks_csb_port_ops = {
28753 +static const struct ata_port_operations serverworks_csb_port_ops = {
28754 .inherits = &serverworks_osb4_port_ops,
28755 .mode_filter = serverworks_csb_filter,
28756 };
28757 diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
28758 index a2ace48..0463b44 100644
28759 --- a/drivers/ata/pata_sil680.c
28760 +++ b/drivers/ata/pata_sil680.c
28761 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
28762 ATA_BMDMA_SHT(DRV_NAME),
28763 };
28764
28765 -static struct ata_port_operations sil680_port_ops = {
28766 +static const struct ata_port_operations sil680_port_ops = {
28767 .inherits = &ata_bmdma32_port_ops,
28768 .cable_detect = sil680_cable_detect,
28769 .set_piomode = sil680_set_piomode,
28770 diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
28771 index 488e77b..b3724d5 100644
28772 --- a/drivers/ata/pata_sis.c
28773 +++ b/drivers/ata/pata_sis.c
28774 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
28775 ATA_BMDMA_SHT(DRV_NAME),
28776 };
28777
28778 -static struct ata_port_operations sis_133_for_sata_ops = {
28779 +static const struct ata_port_operations sis_133_for_sata_ops = {
28780 .inherits = &ata_bmdma_port_ops,
28781 .set_piomode = sis_133_set_piomode,
28782 .set_dmamode = sis_133_set_dmamode,
28783 .cable_detect = sis_133_cable_detect,
28784 };
28785
28786 -static struct ata_port_operations sis_base_ops = {
28787 +static const struct ata_port_operations sis_base_ops = {
28788 .inherits = &ata_bmdma_port_ops,
28789 .prereset = sis_pre_reset,
28790 };
28791
28792 -static struct ata_port_operations sis_133_ops = {
28793 +static const struct ata_port_operations sis_133_ops = {
28794 .inherits = &sis_base_ops,
28795 .set_piomode = sis_133_set_piomode,
28796 .set_dmamode = sis_133_set_dmamode,
28797 .cable_detect = sis_133_cable_detect,
28798 };
28799
28800 -static struct ata_port_operations sis_133_early_ops = {
28801 +static const struct ata_port_operations sis_133_early_ops = {
28802 .inherits = &sis_base_ops,
28803 .set_piomode = sis_100_set_piomode,
28804 .set_dmamode = sis_133_early_set_dmamode,
28805 .cable_detect = sis_66_cable_detect,
28806 };
28807
28808 -static struct ata_port_operations sis_100_ops = {
28809 +static const struct ata_port_operations sis_100_ops = {
28810 .inherits = &sis_base_ops,
28811 .set_piomode = sis_100_set_piomode,
28812 .set_dmamode = sis_100_set_dmamode,
28813 .cable_detect = sis_66_cable_detect,
28814 };
28815
28816 -static struct ata_port_operations sis_66_ops = {
28817 +static const struct ata_port_operations sis_66_ops = {
28818 .inherits = &sis_base_ops,
28819 .set_piomode = sis_old_set_piomode,
28820 .set_dmamode = sis_66_set_dmamode,
28821 .cable_detect = sis_66_cable_detect,
28822 };
28823
28824 -static struct ata_port_operations sis_old_ops = {
28825 +static const struct ata_port_operations sis_old_ops = {
28826 .inherits = &sis_base_ops,
28827 .set_piomode = sis_old_set_piomode,
28828 .set_dmamode = sis_old_set_dmamode,
28829 diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
28830 index 29f733c..43e9ca0 100644
28831 --- a/drivers/ata/pata_sl82c105.c
28832 +++ b/drivers/ata/pata_sl82c105.c
28833 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
28834 ATA_BMDMA_SHT(DRV_NAME),
28835 };
28836
28837 -static struct ata_port_operations sl82c105_port_ops = {
28838 +static const struct ata_port_operations sl82c105_port_ops = {
28839 .inherits = &ata_bmdma_port_ops,
28840 .qc_defer = sl82c105_qc_defer,
28841 .bmdma_start = sl82c105_bmdma_start,
28842 diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
28843 index f1f13ff..df39e99 100644
28844 --- a/drivers/ata/pata_triflex.c
28845 +++ b/drivers/ata/pata_triflex.c
28846 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
28847 ATA_BMDMA_SHT(DRV_NAME),
28848 };
28849
28850 -static struct ata_port_operations triflex_port_ops = {
28851 +static const struct ata_port_operations triflex_port_ops = {
28852 .inherits = &ata_bmdma_port_ops,
28853 .bmdma_start = triflex_bmdma_start,
28854 .bmdma_stop = triflex_bmdma_stop,
28855 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
28856 index 1d73b8d..98a4b29 100644
28857 --- a/drivers/ata/pata_via.c
28858 +++ b/drivers/ata/pata_via.c
28859 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
28860 ATA_BMDMA_SHT(DRV_NAME),
28861 };
28862
28863 -static struct ata_port_operations via_port_ops = {
28864 +static const struct ata_port_operations via_port_ops = {
28865 .inherits = &ata_bmdma_port_ops,
28866 .cable_detect = via_cable_detect,
28867 .set_piomode = via_set_piomode,
28868 @@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
28869 .port_start = via_port_start,
28870 };
28871
28872 -static struct ata_port_operations via_port_ops_noirq = {
28873 +static const struct ata_port_operations via_port_ops_noirq = {
28874 .inherits = &via_port_ops,
28875 .sff_data_xfer = ata_sff_data_xfer_noirq,
28876 };
28877 diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
28878 index 6d8619b..ad511c4 100644
28879 --- a/drivers/ata/pata_winbond.c
28880 +++ b/drivers/ata/pata_winbond.c
28881 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
28882 ATA_PIO_SHT(DRV_NAME),
28883 };
28884
28885 -static struct ata_port_operations winbond_port_ops = {
28886 +static const struct ata_port_operations winbond_port_ops = {
28887 .inherits = &ata_sff_port_ops,
28888 .sff_data_xfer = winbond_data_xfer,
28889 .cable_detect = ata_cable_40wire,
28890 diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
28891 index 6c65b07..f996ec7 100644
28892 --- a/drivers/ata/pdc_adma.c
28893 +++ b/drivers/ata/pdc_adma.c
28894 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
28895 .dma_boundary = ADMA_DMA_BOUNDARY,
28896 };
28897
28898 -static struct ata_port_operations adma_ata_ops = {
28899 +static const struct ata_port_operations adma_ata_ops = {
28900 .inherits = &ata_sff_port_ops,
28901
28902 .lost_interrupt = ATA_OP_NULL,
28903 diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
28904 index 172b57e..c49bc1e 100644
28905 --- a/drivers/ata/sata_fsl.c
28906 +++ b/drivers/ata/sata_fsl.c
28907 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
28908 .dma_boundary = ATA_DMA_BOUNDARY,
28909 };
28910
28911 -static struct ata_port_operations sata_fsl_ops = {
28912 +static const struct ata_port_operations sata_fsl_ops = {
28913 .inherits = &sata_pmp_port_ops,
28914
28915 .qc_defer = ata_std_qc_defer,
28916 diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
28917 index 4406902..60603ef 100644
28918 --- a/drivers/ata/sata_inic162x.c
28919 +++ b/drivers/ata/sata_inic162x.c
28920 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
28921 return 0;
28922 }
28923
28924 -static struct ata_port_operations inic_port_ops = {
28925 +static const struct ata_port_operations inic_port_ops = {
28926 .inherits = &sata_port_ops,
28927
28928 .check_atapi_dma = inic_check_atapi_dma,
28929 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
28930 index cf41126..8107be6 100644
28931 --- a/drivers/ata/sata_mv.c
28932 +++ b/drivers/ata/sata_mv.c
28933 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
28934 .dma_boundary = MV_DMA_BOUNDARY,
28935 };
28936
28937 -static struct ata_port_operations mv5_ops = {
28938 +static const struct ata_port_operations mv5_ops = {
28939 .inherits = &ata_sff_port_ops,
28940
28941 .lost_interrupt = ATA_OP_NULL,
28942 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
28943 .port_stop = mv_port_stop,
28944 };
28945
28946 -static struct ata_port_operations mv6_ops = {
28947 +static const struct ata_port_operations mv6_ops = {
28948 .inherits = &mv5_ops,
28949 .dev_config = mv6_dev_config,
28950 .scr_read = mv_scr_read,
28951 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
28952 .bmdma_status = mv_bmdma_status,
28953 };
28954
28955 -static struct ata_port_operations mv_iie_ops = {
28956 +static const struct ata_port_operations mv_iie_ops = {
28957 .inherits = &mv6_ops,
28958 .dev_config = ATA_OP_NULL,
28959 .qc_prep = mv_qc_prep_iie,
28960 diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
28961 index ae2297c..d5c9c33 100644
28962 --- a/drivers/ata/sata_nv.c
28963 +++ b/drivers/ata/sata_nv.c
28964 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
28965 * cases. Define nv_hardreset() which only kicks in for post-boot
28966 * probing and use it for all variants.
28967 */
28968 -static struct ata_port_operations nv_generic_ops = {
28969 +static const struct ata_port_operations nv_generic_ops = {
28970 .inherits = &ata_bmdma_port_ops,
28971 .lost_interrupt = ATA_OP_NULL,
28972 .scr_read = nv_scr_read,
28973 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
28974 .hardreset = nv_hardreset,
28975 };
28976
28977 -static struct ata_port_operations nv_nf2_ops = {
28978 +static const struct ata_port_operations nv_nf2_ops = {
28979 .inherits = &nv_generic_ops,
28980 .freeze = nv_nf2_freeze,
28981 .thaw = nv_nf2_thaw,
28982 };
28983
28984 -static struct ata_port_operations nv_ck804_ops = {
28985 +static const struct ata_port_operations nv_ck804_ops = {
28986 .inherits = &nv_generic_ops,
28987 .freeze = nv_ck804_freeze,
28988 .thaw = nv_ck804_thaw,
28989 .host_stop = nv_ck804_host_stop,
28990 };
28991
28992 -static struct ata_port_operations nv_adma_ops = {
28993 +static const struct ata_port_operations nv_adma_ops = {
28994 .inherits = &nv_ck804_ops,
28995
28996 .check_atapi_dma = nv_adma_check_atapi_dma,
28997 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
28998 .host_stop = nv_adma_host_stop,
28999 };
29000
29001 -static struct ata_port_operations nv_swncq_ops = {
29002 +static const struct ata_port_operations nv_swncq_ops = {
29003 .inherits = &nv_generic_ops,
29004
29005 .qc_defer = ata_std_qc_defer,
29006 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29007 index 07d8d00..6cc70bb 100644
29008 --- a/drivers/ata/sata_promise.c
29009 +++ b/drivers/ata/sata_promise.c
29010 @@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29011 .error_handler = pdc_error_handler,
29012 };
29013
29014 -static struct ata_port_operations pdc_sata_ops = {
29015 +static const struct ata_port_operations pdc_sata_ops = {
29016 .inherits = &pdc_common_ops,
29017 .cable_detect = pdc_sata_cable_detect,
29018 .freeze = pdc_sata_freeze,
29019 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29020
29021 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29022 and ->freeze/thaw that ignore the hotplug controls. */
29023 -static struct ata_port_operations pdc_old_sata_ops = {
29024 +static const struct ata_port_operations pdc_old_sata_ops = {
29025 .inherits = &pdc_sata_ops,
29026 .freeze = pdc_freeze,
29027 .thaw = pdc_thaw,
29028 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29029 };
29030
29031 -static struct ata_port_operations pdc_pata_ops = {
29032 +static const struct ata_port_operations pdc_pata_ops = {
29033 .inherits = &pdc_common_ops,
29034 .cable_detect = pdc_pata_cable_detect,
29035 .freeze = pdc_freeze,
29036 diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29037 index 326c0cf..36ecebe 100644
29038 --- a/drivers/ata/sata_qstor.c
29039 +++ b/drivers/ata/sata_qstor.c
29040 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29041 .dma_boundary = QS_DMA_BOUNDARY,
29042 };
29043
29044 -static struct ata_port_operations qs_ata_ops = {
29045 +static const struct ata_port_operations qs_ata_ops = {
29046 .inherits = &ata_sff_port_ops,
29047
29048 .check_atapi_dma = qs_check_atapi_dma,
29049 diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29050 index 3cb69d5..0871d3c 100644
29051 --- a/drivers/ata/sata_sil.c
29052 +++ b/drivers/ata/sata_sil.c
29053 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29054 .sg_tablesize = ATA_MAX_PRD
29055 };
29056
29057 -static struct ata_port_operations sil_ops = {
29058 +static const struct ata_port_operations sil_ops = {
29059 .inherits = &ata_bmdma32_port_ops,
29060 .dev_config = sil_dev_config,
29061 .set_mode = sil_set_mode,
29062 diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29063 index e6946fc..eddb794 100644
29064 --- a/drivers/ata/sata_sil24.c
29065 +++ b/drivers/ata/sata_sil24.c
29066 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29067 .dma_boundary = ATA_DMA_BOUNDARY,
29068 };
29069
29070 -static struct ata_port_operations sil24_ops = {
29071 +static const struct ata_port_operations sil24_ops = {
29072 .inherits = &sata_pmp_port_ops,
29073
29074 .qc_defer = sil24_qc_defer,
29075 diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29076 index f8a91bf..9cb06b6 100644
29077 --- a/drivers/ata/sata_sis.c
29078 +++ b/drivers/ata/sata_sis.c
29079 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29080 ATA_BMDMA_SHT(DRV_NAME),
29081 };
29082
29083 -static struct ata_port_operations sis_ops = {
29084 +static const struct ata_port_operations sis_ops = {
29085 .inherits = &ata_bmdma_port_ops,
29086 .scr_read = sis_scr_read,
29087 .scr_write = sis_scr_write,
29088 diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29089 index 7257f2d..d04c6f5 100644
29090 --- a/drivers/ata/sata_svw.c
29091 +++ b/drivers/ata/sata_svw.c
29092 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29093 };
29094
29095
29096 -static struct ata_port_operations k2_sata_ops = {
29097 +static const struct ata_port_operations k2_sata_ops = {
29098 .inherits = &ata_bmdma_port_ops,
29099 .sff_tf_load = k2_sata_tf_load,
29100 .sff_tf_read = k2_sata_tf_read,
29101 diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29102 index bbcf970..cd0df0d 100644
29103 --- a/drivers/ata/sata_sx4.c
29104 +++ b/drivers/ata/sata_sx4.c
29105 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29106 };
29107
29108 /* TODO: inherit from base port_ops after converting to new EH */
29109 -static struct ata_port_operations pdc_20621_ops = {
29110 +static const struct ata_port_operations pdc_20621_ops = {
29111 .inherits = &ata_sff_port_ops,
29112
29113 .check_atapi_dma = pdc_check_atapi_dma,
29114 diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29115 index e5bff47..089d859 100644
29116 --- a/drivers/ata/sata_uli.c
29117 +++ b/drivers/ata/sata_uli.c
29118 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29119 ATA_BMDMA_SHT(DRV_NAME),
29120 };
29121
29122 -static struct ata_port_operations uli_ops = {
29123 +static const struct ata_port_operations uli_ops = {
29124 .inherits = &ata_bmdma_port_ops,
29125 .scr_read = uli_scr_read,
29126 .scr_write = uli_scr_write,
29127 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29128 index f5dcca7..77b94eb 100644
29129 --- a/drivers/ata/sata_via.c
29130 +++ b/drivers/ata/sata_via.c
29131 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29132 ATA_BMDMA_SHT(DRV_NAME),
29133 };
29134
29135 -static struct ata_port_operations svia_base_ops = {
29136 +static const struct ata_port_operations svia_base_ops = {
29137 .inherits = &ata_bmdma_port_ops,
29138 .sff_tf_load = svia_tf_load,
29139 };
29140
29141 -static struct ata_port_operations vt6420_sata_ops = {
29142 +static const struct ata_port_operations vt6420_sata_ops = {
29143 .inherits = &svia_base_ops,
29144 .freeze = svia_noop_freeze,
29145 .prereset = vt6420_prereset,
29146 .bmdma_start = vt6420_bmdma_start,
29147 };
29148
29149 -static struct ata_port_operations vt6421_pata_ops = {
29150 +static const struct ata_port_operations vt6421_pata_ops = {
29151 .inherits = &svia_base_ops,
29152 .cable_detect = vt6421_pata_cable_detect,
29153 .set_piomode = vt6421_set_pio_mode,
29154 .set_dmamode = vt6421_set_dma_mode,
29155 };
29156
29157 -static struct ata_port_operations vt6421_sata_ops = {
29158 +static const struct ata_port_operations vt6421_sata_ops = {
29159 .inherits = &svia_base_ops,
29160 .scr_read = svia_scr_read,
29161 .scr_write = svia_scr_write,
29162 };
29163
29164 -static struct ata_port_operations vt8251_ops = {
29165 +static const struct ata_port_operations vt8251_ops = {
29166 .inherits = &svia_base_ops,
29167 .hardreset = sata_std_hardreset,
29168 .scr_read = vt8251_scr_read,
29169 diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29170 index 8b2a278..51e65d3 100644
29171 --- a/drivers/ata/sata_vsc.c
29172 +++ b/drivers/ata/sata_vsc.c
29173 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29174 };
29175
29176
29177 -static struct ata_port_operations vsc_sata_ops = {
29178 +static const struct ata_port_operations vsc_sata_ops = {
29179 .inherits = &ata_bmdma_port_ops,
29180 /* The IRQ handling is not quite standard SFF behaviour so we
29181 cannot use the default lost interrupt handler */
29182 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29183 index 5effec6..7e4019a 100644
29184 --- a/drivers/atm/adummy.c
29185 +++ b/drivers/atm/adummy.c
29186 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29187 vcc->pop(vcc, skb);
29188 else
29189 dev_kfree_skb_any(skb);
29190 - atomic_inc(&vcc->stats->tx);
29191 + atomic_inc_unchecked(&vcc->stats->tx);
29192
29193 return 0;
29194 }
29195 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29196 index 66e1813..26a27c6 100644
29197 --- a/drivers/atm/ambassador.c
29198 +++ b/drivers/atm/ambassador.c
29199 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29200 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29201
29202 // VC layer stats
29203 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29204 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29205
29206 // free the descriptor
29207 kfree (tx_descr);
29208 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29209 dump_skb ("<<<", vc, skb);
29210
29211 // VC layer stats
29212 - atomic_inc(&atm_vcc->stats->rx);
29213 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29214 __net_timestamp(skb);
29215 // end of our responsability
29216 atm_vcc->push (atm_vcc, skb);
29217 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29218 } else {
29219 PRINTK (KERN_INFO, "dropped over-size frame");
29220 // should we count this?
29221 - atomic_inc(&atm_vcc->stats->rx_drop);
29222 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29223 }
29224
29225 } else {
29226 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29227 }
29228
29229 if (check_area (skb->data, skb->len)) {
29230 - atomic_inc(&atm_vcc->stats->tx_err);
29231 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29232 return -ENOMEM; // ?
29233 }
29234
29235 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29236 index 02ad83d..6daffeb 100644
29237 --- a/drivers/atm/atmtcp.c
29238 +++ b/drivers/atm/atmtcp.c
29239 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29240 if (vcc->pop) vcc->pop(vcc,skb);
29241 else dev_kfree_skb(skb);
29242 if (dev_data) return 0;
29243 - atomic_inc(&vcc->stats->tx_err);
29244 + atomic_inc_unchecked(&vcc->stats->tx_err);
29245 return -ENOLINK;
29246 }
29247 size = skb->len+sizeof(struct atmtcp_hdr);
29248 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29249 if (!new_skb) {
29250 if (vcc->pop) vcc->pop(vcc,skb);
29251 else dev_kfree_skb(skb);
29252 - atomic_inc(&vcc->stats->tx_err);
29253 + atomic_inc_unchecked(&vcc->stats->tx_err);
29254 return -ENOBUFS;
29255 }
29256 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29257 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29258 if (vcc->pop) vcc->pop(vcc,skb);
29259 else dev_kfree_skb(skb);
29260 out_vcc->push(out_vcc,new_skb);
29261 - atomic_inc(&vcc->stats->tx);
29262 - atomic_inc(&out_vcc->stats->rx);
29263 + atomic_inc_unchecked(&vcc->stats->tx);
29264 + atomic_inc_unchecked(&out_vcc->stats->rx);
29265 return 0;
29266 }
29267
29268 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29269 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29270 read_unlock(&vcc_sklist_lock);
29271 if (!out_vcc) {
29272 - atomic_inc(&vcc->stats->tx_err);
29273 + atomic_inc_unchecked(&vcc->stats->tx_err);
29274 goto done;
29275 }
29276 skb_pull(skb,sizeof(struct atmtcp_hdr));
29277 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29278 __net_timestamp(new_skb);
29279 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29280 out_vcc->push(out_vcc,new_skb);
29281 - atomic_inc(&vcc->stats->tx);
29282 - atomic_inc(&out_vcc->stats->rx);
29283 + atomic_inc_unchecked(&vcc->stats->tx);
29284 + atomic_inc_unchecked(&out_vcc->stats->rx);
29285 done:
29286 if (vcc->pop) vcc->pop(vcc,skb);
29287 else dev_kfree_skb(skb);
29288 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29289 index 0c30261..3da356e 100644
29290 --- a/drivers/atm/eni.c
29291 +++ b/drivers/atm/eni.c
29292 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29293 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29294 vcc->dev->number);
29295 length = 0;
29296 - atomic_inc(&vcc->stats->rx_err);
29297 + atomic_inc_unchecked(&vcc->stats->rx_err);
29298 }
29299 else {
29300 length = ATM_CELL_SIZE-1; /* no HEC */
29301 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29302 size);
29303 }
29304 eff = length = 0;
29305 - atomic_inc(&vcc->stats->rx_err);
29306 + atomic_inc_unchecked(&vcc->stats->rx_err);
29307 }
29308 else {
29309 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29310 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29311 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29312 vcc->dev->number,vcc->vci,length,size << 2,descr);
29313 length = eff = 0;
29314 - atomic_inc(&vcc->stats->rx_err);
29315 + atomic_inc_unchecked(&vcc->stats->rx_err);
29316 }
29317 }
29318 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29319 @@ -770,7 +770,7 @@ rx_dequeued++;
29320 vcc->push(vcc,skb);
29321 pushed++;
29322 }
29323 - atomic_inc(&vcc->stats->rx);
29324 + atomic_inc_unchecked(&vcc->stats->rx);
29325 }
29326 wake_up(&eni_dev->rx_wait);
29327 }
29328 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29329 PCI_DMA_TODEVICE);
29330 if (vcc->pop) vcc->pop(vcc,skb);
29331 else dev_kfree_skb_irq(skb);
29332 - atomic_inc(&vcc->stats->tx);
29333 + atomic_inc_unchecked(&vcc->stats->tx);
29334 wake_up(&eni_dev->tx_wait);
29335 dma_complete++;
29336 }
29337 @@ -1570,7 +1570,7 @@ tx_complete++;
29338 /*--------------------------------- entries ---------------------------------*/
29339
29340
29341 -static const char *media_name[] __devinitdata = {
29342 +static const char *media_name[] __devinitconst = {
29343 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29344 "UTP", "05?", "06?", "07?", /* 4- 7 */
29345 "TAXI","09?", "10?", "11?", /* 8-11 */
29346 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29347 index cd5049a..a51209f 100644
29348 --- a/drivers/atm/firestream.c
29349 +++ b/drivers/atm/firestream.c
29350 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29351 }
29352 }
29353
29354 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29355 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29356
29357 fs_dprintk (FS_DEBUG_TXMEM, "i");
29358 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29359 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29360 #endif
29361 skb_put (skb, qe->p1 & 0xffff);
29362 ATM_SKB(skb)->vcc = atm_vcc;
29363 - atomic_inc(&atm_vcc->stats->rx);
29364 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29365 __net_timestamp(skb);
29366 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29367 atm_vcc->push (atm_vcc, skb);
29368 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29369 kfree (pe);
29370 }
29371 if (atm_vcc)
29372 - atomic_inc(&atm_vcc->stats->rx_drop);
29373 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29374 break;
29375 case 0x1f: /* Reassembly abort: no buffers. */
29376 /* Silently increment error counter. */
29377 if (atm_vcc)
29378 - atomic_inc(&atm_vcc->stats->rx_drop);
29379 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29380 break;
29381 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29382 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29383 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29384 index f766cc4..a34002e 100644
29385 --- a/drivers/atm/fore200e.c
29386 +++ b/drivers/atm/fore200e.c
29387 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29388 #endif
29389 /* check error condition */
29390 if (*entry->status & STATUS_ERROR)
29391 - atomic_inc(&vcc->stats->tx_err);
29392 + atomic_inc_unchecked(&vcc->stats->tx_err);
29393 else
29394 - atomic_inc(&vcc->stats->tx);
29395 + atomic_inc_unchecked(&vcc->stats->tx);
29396 }
29397 }
29398
29399 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29400 if (skb == NULL) {
29401 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29402
29403 - atomic_inc(&vcc->stats->rx_drop);
29404 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29405 return -ENOMEM;
29406 }
29407
29408 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29409
29410 dev_kfree_skb_any(skb);
29411
29412 - atomic_inc(&vcc->stats->rx_drop);
29413 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29414 return -ENOMEM;
29415 }
29416
29417 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29418
29419 vcc->push(vcc, skb);
29420 - atomic_inc(&vcc->stats->rx);
29421 + atomic_inc_unchecked(&vcc->stats->rx);
29422
29423 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29424
29425 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29426 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29427 fore200e->atm_dev->number,
29428 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29429 - atomic_inc(&vcc->stats->rx_err);
29430 + atomic_inc_unchecked(&vcc->stats->rx_err);
29431 }
29432 }
29433
29434 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29435 goto retry_here;
29436 }
29437
29438 - atomic_inc(&vcc->stats->tx_err);
29439 + atomic_inc_unchecked(&vcc->stats->tx_err);
29440
29441 fore200e->tx_sat++;
29442 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29443 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29444 index 7066703..2b130de 100644
29445 --- a/drivers/atm/he.c
29446 +++ b/drivers/atm/he.c
29447 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29448
29449 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29450 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29451 - atomic_inc(&vcc->stats->rx_drop);
29452 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29453 goto return_host_buffers;
29454 }
29455
29456 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29457 RBRQ_LEN_ERR(he_dev->rbrq_head)
29458 ? "LEN_ERR" : "",
29459 vcc->vpi, vcc->vci);
29460 - atomic_inc(&vcc->stats->rx_err);
29461 + atomic_inc_unchecked(&vcc->stats->rx_err);
29462 goto return_host_buffers;
29463 }
29464
29465 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29466 vcc->push(vcc, skb);
29467 spin_lock(&he_dev->global_lock);
29468
29469 - atomic_inc(&vcc->stats->rx);
29470 + atomic_inc_unchecked(&vcc->stats->rx);
29471
29472 return_host_buffers:
29473 ++pdus_assembled;
29474 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29475 tpd->vcc->pop(tpd->vcc, tpd->skb);
29476 else
29477 dev_kfree_skb_any(tpd->skb);
29478 - atomic_inc(&tpd->vcc->stats->tx_err);
29479 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29480 }
29481 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29482 return;
29483 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29484 vcc->pop(vcc, skb);
29485 else
29486 dev_kfree_skb_any(skb);
29487 - atomic_inc(&vcc->stats->tx_err);
29488 + atomic_inc_unchecked(&vcc->stats->tx_err);
29489 return -EINVAL;
29490 }
29491
29492 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29493 vcc->pop(vcc, skb);
29494 else
29495 dev_kfree_skb_any(skb);
29496 - atomic_inc(&vcc->stats->tx_err);
29497 + atomic_inc_unchecked(&vcc->stats->tx_err);
29498 return -EINVAL;
29499 }
29500 #endif
29501 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29502 vcc->pop(vcc, skb);
29503 else
29504 dev_kfree_skb_any(skb);
29505 - atomic_inc(&vcc->stats->tx_err);
29506 + atomic_inc_unchecked(&vcc->stats->tx_err);
29507 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29508 return -ENOMEM;
29509 }
29510 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29511 vcc->pop(vcc, skb);
29512 else
29513 dev_kfree_skb_any(skb);
29514 - atomic_inc(&vcc->stats->tx_err);
29515 + atomic_inc_unchecked(&vcc->stats->tx_err);
29516 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29517 return -ENOMEM;
29518 }
29519 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29520 __enqueue_tpd(he_dev, tpd, cid);
29521 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29522
29523 - atomic_inc(&vcc->stats->tx);
29524 + atomic_inc_unchecked(&vcc->stats->tx);
29525
29526 return 0;
29527 }
29528 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29529 index 4e49021..01b1512 100644
29530 --- a/drivers/atm/horizon.c
29531 +++ b/drivers/atm/horizon.c
29532 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29533 {
29534 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29535 // VC layer stats
29536 - atomic_inc(&vcc->stats->rx);
29537 + atomic_inc_unchecked(&vcc->stats->rx);
29538 __net_timestamp(skb);
29539 // end of our responsability
29540 vcc->push (vcc, skb);
29541 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29542 dev->tx_iovec = NULL;
29543
29544 // VC layer stats
29545 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29546 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29547
29548 // free the skb
29549 hrz_kfree_skb (skb);
29550 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29551 index e33ae00..9deb4ab 100644
29552 --- a/drivers/atm/idt77252.c
29553 +++ b/drivers/atm/idt77252.c
29554 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29555 else
29556 dev_kfree_skb(skb);
29557
29558 - atomic_inc(&vcc->stats->tx);
29559 + atomic_inc_unchecked(&vcc->stats->tx);
29560 }
29561
29562 atomic_dec(&scq->used);
29563 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29564 if ((sb = dev_alloc_skb(64)) == NULL) {
29565 printk("%s: Can't allocate buffers for aal0.\n",
29566 card->name);
29567 - atomic_add(i, &vcc->stats->rx_drop);
29568 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
29569 break;
29570 }
29571 if (!atm_charge(vcc, sb->truesize)) {
29572 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29573 card->name);
29574 - atomic_add(i - 1, &vcc->stats->rx_drop);
29575 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29576 dev_kfree_skb(sb);
29577 break;
29578 }
29579 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29580 ATM_SKB(sb)->vcc = vcc;
29581 __net_timestamp(sb);
29582 vcc->push(vcc, sb);
29583 - atomic_inc(&vcc->stats->rx);
29584 + atomic_inc_unchecked(&vcc->stats->rx);
29585
29586 cell += ATM_CELL_PAYLOAD;
29587 }
29588 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29589 "(CDC: %08x)\n",
29590 card->name, len, rpp->len, readl(SAR_REG_CDC));
29591 recycle_rx_pool_skb(card, rpp);
29592 - atomic_inc(&vcc->stats->rx_err);
29593 + atomic_inc_unchecked(&vcc->stats->rx_err);
29594 return;
29595 }
29596 if (stat & SAR_RSQE_CRC) {
29597 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29598 recycle_rx_pool_skb(card, rpp);
29599 - atomic_inc(&vcc->stats->rx_err);
29600 + atomic_inc_unchecked(&vcc->stats->rx_err);
29601 return;
29602 }
29603 if (skb_queue_len(&rpp->queue) > 1) {
29604 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29605 RXPRINTK("%s: Can't alloc RX skb.\n",
29606 card->name);
29607 recycle_rx_pool_skb(card, rpp);
29608 - atomic_inc(&vcc->stats->rx_err);
29609 + atomic_inc_unchecked(&vcc->stats->rx_err);
29610 return;
29611 }
29612 if (!atm_charge(vcc, skb->truesize)) {
29613 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29614 __net_timestamp(skb);
29615
29616 vcc->push(vcc, skb);
29617 - atomic_inc(&vcc->stats->rx);
29618 + atomic_inc_unchecked(&vcc->stats->rx);
29619
29620 return;
29621 }
29622 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29623 __net_timestamp(skb);
29624
29625 vcc->push(vcc, skb);
29626 - atomic_inc(&vcc->stats->rx);
29627 + atomic_inc_unchecked(&vcc->stats->rx);
29628
29629 if (skb->truesize > SAR_FB_SIZE_3)
29630 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29631 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29632 if (vcc->qos.aal != ATM_AAL0) {
29633 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29634 card->name, vpi, vci);
29635 - atomic_inc(&vcc->stats->rx_drop);
29636 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29637 goto drop;
29638 }
29639
29640 if ((sb = dev_alloc_skb(64)) == NULL) {
29641 printk("%s: Can't allocate buffers for AAL0.\n",
29642 card->name);
29643 - atomic_inc(&vcc->stats->rx_err);
29644 + atomic_inc_unchecked(&vcc->stats->rx_err);
29645 goto drop;
29646 }
29647
29648 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29649 ATM_SKB(sb)->vcc = vcc;
29650 __net_timestamp(sb);
29651 vcc->push(vcc, sb);
29652 - atomic_inc(&vcc->stats->rx);
29653 + atomic_inc_unchecked(&vcc->stats->rx);
29654
29655 drop:
29656 skb_pull(queue, 64);
29657 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29658
29659 if (vc == NULL) {
29660 printk("%s: NULL connection in send().\n", card->name);
29661 - atomic_inc(&vcc->stats->tx_err);
29662 + atomic_inc_unchecked(&vcc->stats->tx_err);
29663 dev_kfree_skb(skb);
29664 return -EINVAL;
29665 }
29666 if (!test_bit(VCF_TX, &vc->flags)) {
29667 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29668 - atomic_inc(&vcc->stats->tx_err);
29669 + atomic_inc_unchecked(&vcc->stats->tx_err);
29670 dev_kfree_skb(skb);
29671 return -EINVAL;
29672 }
29673 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29674 break;
29675 default:
29676 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29677 - atomic_inc(&vcc->stats->tx_err);
29678 + atomic_inc_unchecked(&vcc->stats->tx_err);
29679 dev_kfree_skb(skb);
29680 return -EINVAL;
29681 }
29682
29683 if (skb_shinfo(skb)->nr_frags != 0) {
29684 printk("%s: No scatter-gather yet.\n", card->name);
29685 - atomic_inc(&vcc->stats->tx_err);
29686 + atomic_inc_unchecked(&vcc->stats->tx_err);
29687 dev_kfree_skb(skb);
29688 return -EINVAL;
29689 }
29690 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29691
29692 err = queue_skb(card, vc, skb, oam);
29693 if (err) {
29694 - atomic_inc(&vcc->stats->tx_err);
29695 + atomic_inc_unchecked(&vcc->stats->tx_err);
29696 dev_kfree_skb(skb);
29697 return err;
29698 }
29699 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
29700 skb = dev_alloc_skb(64);
29701 if (!skb) {
29702 printk("%s: Out of memory in send_oam().\n", card->name);
29703 - atomic_inc(&vcc->stats->tx_err);
29704 + atomic_inc_unchecked(&vcc->stats->tx_err);
29705 return -ENOMEM;
29706 }
29707 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
29708 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
29709 index b2c1b37..faa672b 100644
29710 --- a/drivers/atm/iphase.c
29711 +++ b/drivers/atm/iphase.c
29712 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
29713 status = (u_short) (buf_desc_ptr->desc_mode);
29714 if (status & (RX_CER | RX_PTE | RX_OFL))
29715 {
29716 - atomic_inc(&vcc->stats->rx_err);
29717 + atomic_inc_unchecked(&vcc->stats->rx_err);
29718 IF_ERR(printk("IA: bad packet, dropping it");)
29719 if (status & RX_CER) {
29720 IF_ERR(printk(" cause: packet CRC error\n");)
29721 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
29722 len = dma_addr - buf_addr;
29723 if (len > iadev->rx_buf_sz) {
29724 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
29725 - atomic_inc(&vcc->stats->rx_err);
29726 + atomic_inc_unchecked(&vcc->stats->rx_err);
29727 goto out_free_desc;
29728 }
29729
29730 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29731 ia_vcc = INPH_IA_VCC(vcc);
29732 if (ia_vcc == NULL)
29733 {
29734 - atomic_inc(&vcc->stats->rx_err);
29735 + atomic_inc_unchecked(&vcc->stats->rx_err);
29736 dev_kfree_skb_any(skb);
29737 atm_return(vcc, atm_guess_pdu2truesize(len));
29738 goto INCR_DLE;
29739 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29740 if ((length > iadev->rx_buf_sz) || (length >
29741 (skb->len - sizeof(struct cpcs_trailer))))
29742 {
29743 - atomic_inc(&vcc->stats->rx_err);
29744 + atomic_inc_unchecked(&vcc->stats->rx_err);
29745 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
29746 length, skb->len);)
29747 dev_kfree_skb_any(skb);
29748 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29749
29750 IF_RX(printk("rx_dle_intr: skb push");)
29751 vcc->push(vcc,skb);
29752 - atomic_inc(&vcc->stats->rx);
29753 + atomic_inc_unchecked(&vcc->stats->rx);
29754 iadev->rx_pkt_cnt++;
29755 }
29756 INCR_DLE:
29757 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
29758 {
29759 struct k_sonet_stats *stats;
29760 stats = &PRIV(_ia_dev[board])->sonet_stats;
29761 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
29762 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
29763 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
29764 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
29765 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
29766 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
29767 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
29768 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
29769 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
29770 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
29771 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
29772 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
29773 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
29774 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
29775 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
29776 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
29777 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
29778 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
29779 }
29780 ia_cmds.status = 0;
29781 break;
29782 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29783 if ((desc == 0) || (desc > iadev->num_tx_desc))
29784 {
29785 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
29786 - atomic_inc(&vcc->stats->tx);
29787 + atomic_inc_unchecked(&vcc->stats->tx);
29788 if (vcc->pop)
29789 vcc->pop(vcc, skb);
29790 else
29791 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29792 ATM_DESC(skb) = vcc->vci;
29793 skb_queue_tail(&iadev->tx_dma_q, skb);
29794
29795 - atomic_inc(&vcc->stats->tx);
29796 + atomic_inc_unchecked(&vcc->stats->tx);
29797 iadev->tx_pkt_cnt++;
29798 /* Increment transaction counter */
29799 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
29800
29801 #if 0
29802 /* add flow control logic */
29803 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
29804 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
29805 if (iavcc->vc_desc_cnt > 10) {
29806 vcc->tx_quota = vcc->tx_quota * 3 / 4;
29807 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
29808 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
29809 index cf97c34..8d30655 100644
29810 --- a/drivers/atm/lanai.c
29811 +++ b/drivers/atm/lanai.c
29812 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
29813 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
29814 lanai_endtx(lanai, lvcc);
29815 lanai_free_skb(lvcc->tx.atmvcc, skb);
29816 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
29817 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
29818 }
29819
29820 /* Try to fill the buffer - don't call unless there is backlog */
29821 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
29822 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
29823 __net_timestamp(skb);
29824 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
29825 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
29826 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
29827 out:
29828 lvcc->rx.buf.ptr = end;
29829 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
29830 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29831 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
29832 "vcc %d\n", lanai->number, (unsigned int) s, vci);
29833 lanai->stats.service_rxnotaal5++;
29834 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29835 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29836 return 0;
29837 }
29838 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
29839 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29840 int bytes;
29841 read_unlock(&vcc_sklist_lock);
29842 DPRINTK("got trashed rx pdu on vci %d\n", vci);
29843 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29844 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29845 lvcc->stats.x.aal5.service_trash++;
29846 bytes = (SERVICE_GET_END(s) * 16) -
29847 (((unsigned long) lvcc->rx.buf.ptr) -
29848 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29849 }
29850 if (s & SERVICE_STREAM) {
29851 read_unlock(&vcc_sklist_lock);
29852 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29853 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29854 lvcc->stats.x.aal5.service_stream++;
29855 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
29856 "PDU on VCI %d!\n", lanai->number, vci);
29857 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29858 return 0;
29859 }
29860 DPRINTK("got rx crc error on vci %d\n", vci);
29861 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29862 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29863 lvcc->stats.x.aal5.service_rxcrc++;
29864 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
29865 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
29866 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
29867 index 3da804b..d3b0eed 100644
29868 --- a/drivers/atm/nicstar.c
29869 +++ b/drivers/atm/nicstar.c
29870 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29871 if ((vc = (vc_map *) vcc->dev_data) == NULL)
29872 {
29873 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
29874 - atomic_inc(&vcc->stats->tx_err);
29875 + atomic_inc_unchecked(&vcc->stats->tx_err);
29876 dev_kfree_skb_any(skb);
29877 return -EINVAL;
29878 }
29879 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29880 if (!vc->tx)
29881 {
29882 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
29883 - atomic_inc(&vcc->stats->tx_err);
29884 + atomic_inc_unchecked(&vcc->stats->tx_err);
29885 dev_kfree_skb_any(skb);
29886 return -EINVAL;
29887 }
29888 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29889 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
29890 {
29891 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
29892 - atomic_inc(&vcc->stats->tx_err);
29893 + atomic_inc_unchecked(&vcc->stats->tx_err);
29894 dev_kfree_skb_any(skb);
29895 return -EINVAL;
29896 }
29897 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29898 if (skb_shinfo(skb)->nr_frags != 0)
29899 {
29900 printk("nicstar%d: No scatter-gather yet.\n", card->index);
29901 - atomic_inc(&vcc->stats->tx_err);
29902 + atomic_inc_unchecked(&vcc->stats->tx_err);
29903 dev_kfree_skb_any(skb);
29904 return -EINVAL;
29905 }
29906 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29907
29908 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
29909 {
29910 - atomic_inc(&vcc->stats->tx_err);
29911 + atomic_inc_unchecked(&vcc->stats->tx_err);
29912 dev_kfree_skb_any(skb);
29913 return -EIO;
29914 }
29915 - atomic_inc(&vcc->stats->tx);
29916 + atomic_inc_unchecked(&vcc->stats->tx);
29917
29918 return 0;
29919 }
29920 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29921 {
29922 printk("nicstar%d: Can't allocate buffers for aal0.\n",
29923 card->index);
29924 - atomic_add(i,&vcc->stats->rx_drop);
29925 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
29926 break;
29927 }
29928 if (!atm_charge(vcc, sb->truesize))
29929 {
29930 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
29931 card->index);
29932 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
29933 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
29934 dev_kfree_skb_any(sb);
29935 break;
29936 }
29937 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29938 ATM_SKB(sb)->vcc = vcc;
29939 __net_timestamp(sb);
29940 vcc->push(vcc, sb);
29941 - atomic_inc(&vcc->stats->rx);
29942 + atomic_inc_unchecked(&vcc->stats->rx);
29943 cell += ATM_CELL_PAYLOAD;
29944 }
29945
29946 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29947 if (iovb == NULL)
29948 {
29949 printk("nicstar%d: Out of iovec buffers.\n", card->index);
29950 - atomic_inc(&vcc->stats->rx_drop);
29951 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29952 recycle_rx_buf(card, skb);
29953 return;
29954 }
29955 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29956 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
29957 {
29958 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
29959 - atomic_inc(&vcc->stats->rx_err);
29960 + atomic_inc_unchecked(&vcc->stats->rx_err);
29961 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
29962 NS_SKB(iovb)->iovcnt = 0;
29963 iovb->len = 0;
29964 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29965 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
29966 card->index);
29967 which_list(card, skb);
29968 - atomic_inc(&vcc->stats->rx_err);
29969 + atomic_inc_unchecked(&vcc->stats->rx_err);
29970 recycle_rx_buf(card, skb);
29971 vc->rx_iov = NULL;
29972 recycle_iov_buf(card, iovb);
29973 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29974 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
29975 card->index);
29976 which_list(card, skb);
29977 - atomic_inc(&vcc->stats->rx_err);
29978 + atomic_inc_unchecked(&vcc->stats->rx_err);
29979 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
29980 NS_SKB(iovb)->iovcnt);
29981 vc->rx_iov = NULL;
29982 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29983 printk(" - PDU size mismatch.\n");
29984 else
29985 printk(".\n");
29986 - atomic_inc(&vcc->stats->rx_err);
29987 + atomic_inc_unchecked(&vcc->stats->rx_err);
29988 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
29989 NS_SKB(iovb)->iovcnt);
29990 vc->rx_iov = NULL;
29991 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
29992 if (!atm_charge(vcc, skb->truesize))
29993 {
29994 push_rxbufs(card, skb);
29995 - atomic_inc(&vcc->stats->rx_drop);
29996 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29997 }
29998 else
29999 {
30000 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30001 ATM_SKB(skb)->vcc = vcc;
30002 __net_timestamp(skb);
30003 vcc->push(vcc, skb);
30004 - atomic_inc(&vcc->stats->rx);
30005 + atomic_inc_unchecked(&vcc->stats->rx);
30006 }
30007 }
30008 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30009 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30010 if (!atm_charge(vcc, sb->truesize))
30011 {
30012 push_rxbufs(card, sb);
30013 - atomic_inc(&vcc->stats->rx_drop);
30014 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30015 }
30016 else
30017 {
30018 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30019 ATM_SKB(sb)->vcc = vcc;
30020 __net_timestamp(sb);
30021 vcc->push(vcc, sb);
30022 - atomic_inc(&vcc->stats->rx);
30023 + atomic_inc_unchecked(&vcc->stats->rx);
30024 }
30025
30026 push_rxbufs(card, skb);
30027 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30028 if (!atm_charge(vcc, skb->truesize))
30029 {
30030 push_rxbufs(card, skb);
30031 - atomic_inc(&vcc->stats->rx_drop);
30032 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30033 }
30034 else
30035 {
30036 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30037 ATM_SKB(skb)->vcc = vcc;
30038 __net_timestamp(skb);
30039 vcc->push(vcc, skb);
30040 - atomic_inc(&vcc->stats->rx);
30041 + atomic_inc_unchecked(&vcc->stats->rx);
30042 }
30043
30044 push_rxbufs(card, sb);
30045 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30046 if (hb == NULL)
30047 {
30048 printk("nicstar%d: Out of huge buffers.\n", card->index);
30049 - atomic_inc(&vcc->stats->rx_drop);
30050 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30051 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30052 NS_SKB(iovb)->iovcnt);
30053 vc->rx_iov = NULL;
30054 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30055 }
30056 else
30057 dev_kfree_skb_any(hb);
30058 - atomic_inc(&vcc->stats->rx_drop);
30059 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30060 }
30061 else
30062 {
30063 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30064 #endif /* NS_USE_DESTRUCTORS */
30065 __net_timestamp(hb);
30066 vcc->push(vcc, hb);
30067 - atomic_inc(&vcc->stats->rx);
30068 + atomic_inc_unchecked(&vcc->stats->rx);
30069 }
30070 }
30071
30072 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30073 index 84c93ff..e6ed269 100644
30074 --- a/drivers/atm/solos-pci.c
30075 +++ b/drivers/atm/solos-pci.c
30076 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30077 }
30078 atm_charge(vcc, skb->truesize);
30079 vcc->push(vcc, skb);
30080 - atomic_inc(&vcc->stats->rx);
30081 + atomic_inc_unchecked(&vcc->stats->rx);
30082 break;
30083
30084 case PKT_STATUS:
30085 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30086 char msg[500];
30087 char item[10];
30088
30089 + pax_track_stack();
30090 +
30091 len = buf->len;
30092 for (i = 0; i < len; i++){
30093 if(i % 8 == 0)
30094 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30095 vcc = SKB_CB(oldskb)->vcc;
30096
30097 if (vcc) {
30098 - atomic_inc(&vcc->stats->tx);
30099 + atomic_inc_unchecked(&vcc->stats->tx);
30100 solos_pop(vcc, oldskb);
30101 } else
30102 dev_kfree_skb_irq(oldskb);
30103 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30104 index 6dd3f59..ee377f3 100644
30105 --- a/drivers/atm/suni.c
30106 +++ b/drivers/atm/suni.c
30107 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30108
30109
30110 #define ADD_LIMITED(s,v) \
30111 - atomic_add((v),&stats->s); \
30112 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30113 + atomic_add_unchecked((v),&stats->s); \
30114 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30115
30116
30117 static void suni_hz(unsigned long from_timer)
30118 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30119 index fc8cb07..4a80e53 100644
30120 --- a/drivers/atm/uPD98402.c
30121 +++ b/drivers/atm/uPD98402.c
30122 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30123 struct sonet_stats tmp;
30124 int error = 0;
30125
30126 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30127 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30128 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30129 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30130 if (zero && !error) {
30131 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30132
30133
30134 #define ADD_LIMITED(s,v) \
30135 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30136 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30137 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30138 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30139 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30140 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30141
30142
30143 static void stat_event(struct atm_dev *dev)
30144 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30145 if (reason & uPD98402_INT_PFM) stat_event(dev);
30146 if (reason & uPD98402_INT_PCO) {
30147 (void) GET(PCOCR); /* clear interrupt cause */
30148 - atomic_add(GET(HECCT),
30149 + atomic_add_unchecked(GET(HECCT),
30150 &PRIV(dev)->sonet_stats.uncorr_hcs);
30151 }
30152 if ((reason & uPD98402_INT_RFO) &&
30153 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30154 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30155 uPD98402_INT_LOS),PIMR); /* enable them */
30156 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30157 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30158 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30159 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30160 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30161 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30162 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30163 return 0;
30164 }
30165
30166 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30167 index 2e9635b..32927b4 100644
30168 --- a/drivers/atm/zatm.c
30169 +++ b/drivers/atm/zatm.c
30170 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30171 }
30172 if (!size) {
30173 dev_kfree_skb_irq(skb);
30174 - if (vcc) atomic_inc(&vcc->stats->rx_err);
30175 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30176 continue;
30177 }
30178 if (!atm_charge(vcc,skb->truesize)) {
30179 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30180 skb->len = size;
30181 ATM_SKB(skb)->vcc = vcc;
30182 vcc->push(vcc,skb);
30183 - atomic_inc(&vcc->stats->rx);
30184 + atomic_inc_unchecked(&vcc->stats->rx);
30185 }
30186 zout(pos & 0xffff,MTA(mbx));
30187 #if 0 /* probably a stupid idea */
30188 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30189 skb_queue_head(&zatm_vcc->backlog,skb);
30190 break;
30191 }
30192 - atomic_inc(&vcc->stats->tx);
30193 + atomic_inc_unchecked(&vcc->stats->tx);
30194 wake_up(&zatm_vcc->tx_wait);
30195 }
30196
30197 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30198 index 63c143e..fece183 100644
30199 --- a/drivers/base/bus.c
30200 +++ b/drivers/base/bus.c
30201 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30202 return ret;
30203 }
30204
30205 -static struct sysfs_ops driver_sysfs_ops = {
30206 +static const struct sysfs_ops driver_sysfs_ops = {
30207 .show = drv_attr_show,
30208 .store = drv_attr_store,
30209 };
30210 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30211 return ret;
30212 }
30213
30214 -static struct sysfs_ops bus_sysfs_ops = {
30215 +static const struct sysfs_ops bus_sysfs_ops = {
30216 .show = bus_attr_show,
30217 .store = bus_attr_store,
30218 };
30219 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30220 return 0;
30221 }
30222
30223 -static struct kset_uevent_ops bus_uevent_ops = {
30224 +static const struct kset_uevent_ops bus_uevent_ops = {
30225 .filter = bus_uevent_filter,
30226 };
30227
30228 diff --git a/drivers/base/class.c b/drivers/base/class.c
30229 index 6e2c3b0..cb61871 100644
30230 --- a/drivers/base/class.c
30231 +++ b/drivers/base/class.c
30232 @@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30233 kfree(cp);
30234 }
30235
30236 -static struct sysfs_ops class_sysfs_ops = {
30237 +static const struct sysfs_ops class_sysfs_ops = {
30238 .show = class_attr_show,
30239 .store = class_attr_store,
30240 };
30241 diff --git a/drivers/base/core.c b/drivers/base/core.c
30242 index f33d768..a9358d0 100644
30243 --- a/drivers/base/core.c
30244 +++ b/drivers/base/core.c
30245 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30246 return ret;
30247 }
30248
30249 -static struct sysfs_ops dev_sysfs_ops = {
30250 +static const struct sysfs_ops dev_sysfs_ops = {
30251 .show = dev_attr_show,
30252 .store = dev_attr_store,
30253 };
30254 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30255 return retval;
30256 }
30257
30258 -static struct kset_uevent_ops device_uevent_ops = {
30259 +static const struct kset_uevent_ops device_uevent_ops = {
30260 .filter = dev_uevent_filter,
30261 .name = dev_uevent_name,
30262 .uevent = dev_uevent,
30263 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30264 index 989429c..2272b00 100644
30265 --- a/drivers/base/memory.c
30266 +++ b/drivers/base/memory.c
30267 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30268 return retval;
30269 }
30270
30271 -static struct kset_uevent_ops memory_uevent_ops = {
30272 +static const struct kset_uevent_ops memory_uevent_ops = {
30273 .name = memory_uevent_name,
30274 .uevent = memory_uevent,
30275 };
30276 diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30277 index 3f202f7..61c4a6f 100644
30278 --- a/drivers/base/sys.c
30279 +++ b/drivers/base/sys.c
30280 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30281 return -EIO;
30282 }
30283
30284 -static struct sysfs_ops sysfs_ops = {
30285 +static const struct sysfs_ops sysfs_ops = {
30286 .show = sysdev_show,
30287 .store = sysdev_store,
30288 };
30289 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30290 return -EIO;
30291 }
30292
30293 -static struct sysfs_ops sysfs_class_ops = {
30294 +static const struct sysfs_ops sysfs_class_ops = {
30295 .show = sysdev_class_show,
30296 .store = sysdev_class_store,
30297 };
30298 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30299 index eb4fa19..1954777 100644
30300 --- a/drivers/block/DAC960.c
30301 +++ b/drivers/block/DAC960.c
30302 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30303 unsigned long flags;
30304 int Channel, TargetID;
30305
30306 + pax_track_stack();
30307 +
30308 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30309 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30310 sizeof(DAC960_SCSI_Inquiry_T) +
30311 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30312 index ca9c548..ca6899c 100644
30313 --- a/drivers/block/cciss.c
30314 +++ b/drivers/block/cciss.c
30315 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30316 int err;
30317 u32 cp;
30318
30319 + memset(&arg64, 0, sizeof(arg64));
30320 +
30321 err = 0;
30322 err |=
30323 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30324 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30325 /* Wait (up to 20 seconds) for a command to complete */
30326
30327 for (i = 20 * HZ; i > 0; i--) {
30328 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30329 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30330 if (done == FIFO_EMPTY)
30331 schedule_timeout_uninterruptible(1);
30332 else
30333 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30334 resend_cmd1:
30335
30336 /* Disable interrupt on the board. */
30337 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30338 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30339
30340 /* Make sure there is room in the command FIFO */
30341 /* Actually it should be completely empty at this time */
30342 @@ -2884,13 +2886,13 @@ resend_cmd1:
30343 /* tape side of the driver. */
30344 for (i = 200000; i > 0; i--) {
30345 /* if fifo isn't full go */
30346 - if (!(h->access.fifo_full(h)))
30347 + if (!(h->access->fifo_full(h)))
30348 break;
30349 udelay(10);
30350 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30351 " waiting!\n", h->ctlr);
30352 }
30353 - h->access.submit_command(h, c); /* Send the cmd */
30354 + h->access->submit_command(h, c); /* Send the cmd */
30355 do {
30356 complete = pollcomplete(h->ctlr);
30357
30358 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30359 while (!hlist_empty(&h->reqQ)) {
30360 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30361 /* can't do anything if fifo is full */
30362 - if ((h->access.fifo_full(h))) {
30363 + if ((h->access->fifo_full(h))) {
30364 printk(KERN_WARNING "cciss: fifo full\n");
30365 break;
30366 }
30367 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30368 h->Qdepth--;
30369
30370 /* Tell the controller execute command */
30371 - h->access.submit_command(h, c);
30372 + h->access->submit_command(h, c);
30373
30374 /* Put job onto the completed Q */
30375 addQ(&h->cmpQ, c);
30376 @@ -3393,17 +3395,17 @@ startio:
30377
30378 static inline unsigned long get_next_completion(ctlr_info_t *h)
30379 {
30380 - return h->access.command_completed(h);
30381 + return h->access->command_completed(h);
30382 }
30383
30384 static inline int interrupt_pending(ctlr_info_t *h)
30385 {
30386 - return h->access.intr_pending(h);
30387 + return h->access->intr_pending(h);
30388 }
30389
30390 static inline long interrupt_not_for_us(ctlr_info_t *h)
30391 {
30392 - return (((h->access.intr_pending(h) == 0) ||
30393 + return (((h->access->intr_pending(h) == 0) ||
30394 (h->interrupts_enabled == 0)));
30395 }
30396
30397 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30398 */
30399 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30400 c->product_name = products[prod_index].product_name;
30401 - c->access = *(products[prod_index].access);
30402 + c->access = products[prod_index].access;
30403 c->nr_cmds = c->max_commands - 4;
30404 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30405 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30406 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30407 }
30408
30409 /* make sure the board interrupts are off */
30410 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30411 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30412 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30413 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30414 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30415 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30416 cciss_scsi_setup(i);
30417
30418 /* Turn the interrupts on so we can service requests */
30419 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30420 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30421
30422 /* Get the firmware version */
30423 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30424 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30425 index 04d6bf8..36e712d 100644
30426 --- a/drivers/block/cciss.h
30427 +++ b/drivers/block/cciss.h
30428 @@ -90,7 +90,7 @@ struct ctlr_info
30429 // information about each logical volume
30430 drive_info_struct *drv[CISS_MAX_LUN];
30431
30432 - struct access_method access;
30433 + struct access_method *access;
30434
30435 /* queue and queue Info */
30436 struct hlist_head reqQ;
30437 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30438 index 6422651..bb1bdef 100644
30439 --- a/drivers/block/cpqarray.c
30440 +++ b/drivers/block/cpqarray.c
30441 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30442 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30443 goto Enomem4;
30444 }
30445 - hba[i]->access.set_intr_mask(hba[i], 0);
30446 + hba[i]->access->set_intr_mask(hba[i], 0);
30447 if (request_irq(hba[i]->intr, do_ida_intr,
30448 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30449 {
30450 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30451 add_timer(&hba[i]->timer);
30452
30453 /* Enable IRQ now that spinlock and rate limit timer are set up */
30454 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30455 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30456
30457 for(j=0; j<NWD; j++) {
30458 struct gendisk *disk = ida_gendisk[i][j];
30459 @@ -695,7 +695,7 @@ DBGINFO(
30460 for(i=0; i<NR_PRODUCTS; i++) {
30461 if (board_id == products[i].board_id) {
30462 c->product_name = products[i].product_name;
30463 - c->access = *(products[i].access);
30464 + c->access = products[i].access;
30465 break;
30466 }
30467 }
30468 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30469 hba[ctlr]->intr = intr;
30470 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30471 hba[ctlr]->product_name = products[j].product_name;
30472 - hba[ctlr]->access = *(products[j].access);
30473 + hba[ctlr]->access = products[j].access;
30474 hba[ctlr]->ctlr = ctlr;
30475 hba[ctlr]->board_id = board_id;
30476 hba[ctlr]->pci_dev = NULL; /* not PCI */
30477 @@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30478 struct scatterlist tmp_sg[SG_MAX];
30479 int i, dir, seg;
30480
30481 + pax_track_stack();
30482 +
30483 if (blk_queue_plugged(q))
30484 goto startio;
30485
30486 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30487
30488 while((c = h->reqQ) != NULL) {
30489 /* Can't do anything if we're busy */
30490 - if (h->access.fifo_full(h) == 0)
30491 + if (h->access->fifo_full(h) == 0)
30492 return;
30493
30494 /* Get the first entry from the request Q */
30495 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30496 h->Qdepth--;
30497
30498 /* Tell the controller to do our bidding */
30499 - h->access.submit_command(h, c);
30500 + h->access->submit_command(h, c);
30501
30502 /* Get onto the completion Q */
30503 addQ(&h->cmpQ, c);
30504 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30505 unsigned long flags;
30506 __u32 a,a1;
30507
30508 - istat = h->access.intr_pending(h);
30509 + istat = h->access->intr_pending(h);
30510 /* Is this interrupt for us? */
30511 if (istat == 0)
30512 return IRQ_NONE;
30513 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30514 */
30515 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30516 if (istat & FIFO_NOT_EMPTY) {
30517 - while((a = h->access.command_completed(h))) {
30518 + while((a = h->access->command_completed(h))) {
30519 a1 = a; a &= ~3;
30520 if ((c = h->cmpQ) == NULL)
30521 {
30522 @@ -1434,11 +1436,11 @@ static int sendcmd(
30523 /*
30524 * Disable interrupt
30525 */
30526 - info_p->access.set_intr_mask(info_p, 0);
30527 + info_p->access->set_intr_mask(info_p, 0);
30528 /* Make sure there is room in the command FIFO */
30529 /* Actually it should be completely empty at this time. */
30530 for (i = 200000; i > 0; i--) {
30531 - temp = info_p->access.fifo_full(info_p);
30532 + temp = info_p->access->fifo_full(info_p);
30533 if (temp != 0) {
30534 break;
30535 }
30536 @@ -1451,7 +1453,7 @@ DBG(
30537 /*
30538 * Send the cmd
30539 */
30540 - info_p->access.submit_command(info_p, c);
30541 + info_p->access->submit_command(info_p, c);
30542 complete = pollcomplete(ctlr);
30543
30544 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30545 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30546 * we check the new geometry. Then turn interrupts back on when
30547 * we're done.
30548 */
30549 - host->access.set_intr_mask(host, 0);
30550 + host->access->set_intr_mask(host, 0);
30551 getgeometry(ctlr);
30552 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30553 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30554
30555 for(i=0; i<NWD; i++) {
30556 struct gendisk *disk = ida_gendisk[ctlr][i];
30557 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30558 /* Wait (up to 2 seconds) for a command to complete */
30559
30560 for (i = 200000; i > 0; i--) {
30561 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30562 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30563 if (done == 0) {
30564 udelay(10); /* a short fixed delay */
30565 } else
30566 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30567 index be73e9d..7fbf140 100644
30568 --- a/drivers/block/cpqarray.h
30569 +++ b/drivers/block/cpqarray.h
30570 @@ -99,7 +99,7 @@ struct ctlr_info {
30571 drv_info_t drv[NWD];
30572 struct proc_dir_entry *proc;
30573
30574 - struct access_method access;
30575 + struct access_method *access;
30576
30577 cmdlist_t *reqQ;
30578 cmdlist_t *cmpQ;
30579 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30580 index 8ec2d70..2804b30 100644
30581 --- a/drivers/block/loop.c
30582 +++ b/drivers/block/loop.c
30583 @@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30584 mm_segment_t old_fs = get_fs();
30585
30586 set_fs(get_ds());
30587 - bw = file->f_op->write(file, buf, len, &pos);
30588 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30589 set_fs(old_fs);
30590 if (likely(bw == len))
30591 return 0;
30592 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30593 index 26ada47..083c480 100644
30594 --- a/drivers/block/nbd.c
30595 +++ b/drivers/block/nbd.c
30596 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30597 struct kvec iov;
30598 sigset_t blocked, oldset;
30599
30600 + pax_track_stack();
30601 +
30602 if (unlikely(!sock)) {
30603 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30604 lo->disk->disk_name, (send ? "send" : "recv"));
30605 @@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30606 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30607 unsigned int cmd, unsigned long arg)
30608 {
30609 + pax_track_stack();
30610 +
30611 switch (cmd) {
30612 case NBD_DISCONNECT: {
30613 struct request sreq;
30614 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30615 index a5d585d..d087be3 100644
30616 --- a/drivers/block/pktcdvd.c
30617 +++ b/drivers/block/pktcdvd.c
30618 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30619 return len;
30620 }
30621
30622 -static struct sysfs_ops kobj_pkt_ops = {
30623 +static const struct sysfs_ops kobj_pkt_ops = {
30624 .show = kobj_pkt_show,
30625 .store = kobj_pkt_store
30626 };
30627 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30628 index 6aad99e..89cd142 100644
30629 --- a/drivers/char/Kconfig
30630 +++ b/drivers/char/Kconfig
30631 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30632
30633 config DEVKMEM
30634 bool "/dev/kmem virtual device support"
30635 - default y
30636 + default n
30637 + depends on !GRKERNSEC_KMEM
30638 help
30639 Say Y here if you want to support the /dev/kmem device. The
30640 /dev/kmem device is rarely used, but can be used for certain
30641 @@ -1114,6 +1115,7 @@ config DEVPORT
30642 bool
30643 depends on !M68K
30644 depends on ISA || PCI
30645 + depends on !GRKERNSEC_KMEM
30646 default y
30647
30648 source "drivers/s390/char/Kconfig"
30649 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30650 index a96f319..a778a5b 100644
30651 --- a/drivers/char/agp/frontend.c
30652 +++ b/drivers/char/agp/frontend.c
30653 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30654 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30655 return -EFAULT;
30656
30657 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30658 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30659 return -EFAULT;
30660
30661 client = agp_find_client_by_pid(reserve.pid);
30662 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30663 index d8cff90..9628e70 100644
30664 --- a/drivers/char/briq_panel.c
30665 +++ b/drivers/char/briq_panel.c
30666 @@ -10,6 +10,7 @@
30667 #include <linux/types.h>
30668 #include <linux/errno.h>
30669 #include <linux/tty.h>
30670 +#include <linux/mutex.h>
30671 #include <linux/timer.h>
30672 #include <linux/kernel.h>
30673 #include <linux/wait.h>
30674 @@ -36,6 +37,7 @@ static int vfd_is_open;
30675 static unsigned char vfd[40];
30676 static int vfd_cursor;
30677 static unsigned char ledpb, led;
30678 +static DEFINE_MUTEX(vfd_mutex);
30679
30680 static void update_vfd(void)
30681 {
30682 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30683 if (!vfd_is_open)
30684 return -EBUSY;
30685
30686 + mutex_lock(&vfd_mutex);
30687 for (;;) {
30688 char c;
30689 if (!indx)
30690 break;
30691 - if (get_user(c, buf))
30692 + if (get_user(c, buf)) {
30693 + mutex_unlock(&vfd_mutex);
30694 return -EFAULT;
30695 + }
30696 if (esc) {
30697 set_led(c);
30698 esc = 0;
30699 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30700 buf++;
30701 }
30702 update_vfd();
30703 + mutex_unlock(&vfd_mutex);
30704
30705 return len;
30706 }
30707 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
30708 index 31e7c91..161afc0 100644
30709 --- a/drivers/char/genrtc.c
30710 +++ b/drivers/char/genrtc.c
30711 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
30712 switch (cmd) {
30713
30714 case RTC_PLL_GET:
30715 + memset(&pll, 0, sizeof(pll));
30716 if (get_rtc_pll(&pll))
30717 return -EINVAL;
30718 else
30719 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
30720 index 006466d..a2bb21c 100644
30721 --- a/drivers/char/hpet.c
30722 +++ b/drivers/char/hpet.c
30723 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
30724 return 0;
30725 }
30726
30727 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
30728 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
30729
30730 static int
30731 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
30732 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
30733 }
30734
30735 static int
30736 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30737 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
30738 {
30739 struct hpet_timer __iomem *timer;
30740 struct hpet __iomem *hpet;
30741 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30742 {
30743 struct hpet_info info;
30744
30745 + memset(&info, 0, sizeof(info));
30746 +
30747 if (devp->hd_ireqfreq)
30748 info.hi_ireqfreq =
30749 hpet_time_div(hpetp, devp->hd_ireqfreq);
30750 - else
30751 - info.hi_ireqfreq = 0;
30752 info.hi_flags =
30753 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
30754 info.hi_hpet = hpetp->hp_which;
30755 diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
30756 index 0afc8b8..6913fc3 100644
30757 --- a/drivers/char/hvc_beat.c
30758 +++ b/drivers/char/hvc_beat.c
30759 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
30760 return cnt;
30761 }
30762
30763 -static struct hv_ops hvc_beat_get_put_ops = {
30764 +static const struct hv_ops hvc_beat_get_put_ops = {
30765 .get_chars = hvc_beat_get_chars,
30766 .put_chars = hvc_beat_put_chars,
30767 };
30768 diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
30769 index 98097f2..407dddc 100644
30770 --- a/drivers/char/hvc_console.c
30771 +++ b/drivers/char/hvc_console.c
30772 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
30773 * console interfaces but can still be used as a tty device. This has to be
30774 * static because kmalloc will not work during early console init.
30775 */
30776 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30777 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30778 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
30779 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
30780
30781 @@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
30782 * vty adapters do NOT get an hvc_instantiate() callback since they
30783 * appear after early console init.
30784 */
30785 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
30786 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
30787 {
30788 struct hvc_struct *hp;
30789
30790 @@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
30791 };
30792
30793 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
30794 - struct hv_ops *ops, int outbuf_size)
30795 + const struct hv_ops *ops, int outbuf_size)
30796 {
30797 struct hvc_struct *hp;
30798 int i;
30799 diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
30800 index 10950ca..ed176c3 100644
30801 --- a/drivers/char/hvc_console.h
30802 +++ b/drivers/char/hvc_console.h
30803 @@ -55,7 +55,7 @@ struct hvc_struct {
30804 int outbuf_size;
30805 int n_outbuf;
30806 uint32_t vtermno;
30807 - struct hv_ops *ops;
30808 + const struct hv_ops *ops;
30809 int irq_requested;
30810 int data;
30811 struct winsize ws;
30812 @@ -76,11 +76,11 @@ struct hv_ops {
30813 };
30814
30815 /* Register a vterm and a slot index for use as a console (console_init) */
30816 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
30817 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
30818
30819 /* register a vterm for hvc tty operation (module_init or hotplug add) */
30820 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
30821 - struct hv_ops *ops, int outbuf_size);
30822 + const struct hv_ops *ops, int outbuf_size);
30823 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
30824 extern int hvc_remove(struct hvc_struct *hp);
30825
30826 diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
30827 index 936d05b..fd02426 100644
30828 --- a/drivers/char/hvc_iseries.c
30829 +++ b/drivers/char/hvc_iseries.c
30830 @@ -197,7 +197,7 @@ done:
30831 return sent;
30832 }
30833
30834 -static struct hv_ops hvc_get_put_ops = {
30835 +static const struct hv_ops hvc_get_put_ops = {
30836 .get_chars = get_chars,
30837 .put_chars = put_chars,
30838 .notifier_add = notifier_add_irq,
30839 diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
30840 index b0e168f..69cda2a 100644
30841 --- a/drivers/char/hvc_iucv.c
30842 +++ b/drivers/char/hvc_iucv.c
30843 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
30844
30845
30846 /* HVC operations */
30847 -static struct hv_ops hvc_iucv_ops = {
30848 +static const struct hv_ops hvc_iucv_ops = {
30849 .get_chars = hvc_iucv_get_chars,
30850 .put_chars = hvc_iucv_put_chars,
30851 .notifier_add = hvc_iucv_notifier_add,
30852 diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
30853 index 88590d0..61c4a61 100644
30854 --- a/drivers/char/hvc_rtas.c
30855 +++ b/drivers/char/hvc_rtas.c
30856 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
30857 return i;
30858 }
30859
30860 -static struct hv_ops hvc_rtas_get_put_ops = {
30861 +static const struct hv_ops hvc_rtas_get_put_ops = {
30862 .get_chars = hvc_rtas_read_console,
30863 .put_chars = hvc_rtas_write_console,
30864 };
30865 diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
30866 index bd63ba8..b0957e6 100644
30867 --- a/drivers/char/hvc_udbg.c
30868 +++ b/drivers/char/hvc_udbg.c
30869 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
30870 return i;
30871 }
30872
30873 -static struct hv_ops hvc_udbg_ops = {
30874 +static const struct hv_ops hvc_udbg_ops = {
30875 .get_chars = hvc_udbg_get,
30876 .put_chars = hvc_udbg_put,
30877 };
30878 diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
30879 index 10be343..27370e9 100644
30880 --- a/drivers/char/hvc_vio.c
30881 +++ b/drivers/char/hvc_vio.c
30882 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
30883 return got;
30884 }
30885
30886 -static struct hv_ops hvc_get_put_ops = {
30887 +static const struct hv_ops hvc_get_put_ops = {
30888 .get_chars = filtered_get_chars,
30889 .put_chars = hvc_put_chars,
30890 .notifier_add = notifier_add_irq,
30891 diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
30892 index a6ee32b..94f8c26 100644
30893 --- a/drivers/char/hvc_xen.c
30894 +++ b/drivers/char/hvc_xen.c
30895 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
30896 return recv;
30897 }
30898
30899 -static struct hv_ops hvc_ops = {
30900 +static const struct hv_ops hvc_ops = {
30901 .get_chars = read_console,
30902 .put_chars = write_console,
30903 .notifier_add = notifier_add_irq,
30904 diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
30905 index 266b858..f3ee0bb 100644
30906 --- a/drivers/char/hvcs.c
30907 +++ b/drivers/char/hvcs.c
30908 @@ -82,6 +82,7 @@
30909 #include <asm/hvcserver.h>
30910 #include <asm/uaccess.h>
30911 #include <asm/vio.h>
30912 +#include <asm/local.h>
30913
30914 /*
30915 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
30916 @@ -269,7 +270,7 @@ struct hvcs_struct {
30917 unsigned int index;
30918
30919 struct tty_struct *tty;
30920 - int open_count;
30921 + local_t open_count;
30922
30923 /*
30924 * Used to tell the driver kernel_thread what operations need to take
30925 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
30926
30927 spin_lock_irqsave(&hvcsd->lock, flags);
30928
30929 - if (hvcsd->open_count > 0) {
30930 + if (local_read(&hvcsd->open_count) > 0) {
30931 spin_unlock_irqrestore(&hvcsd->lock, flags);
30932 printk(KERN_INFO "HVCS: vterm state unchanged. "
30933 "The hvcs device node is still in use.\n");
30934 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
30935 if ((retval = hvcs_partner_connect(hvcsd)))
30936 goto error_release;
30937
30938 - hvcsd->open_count = 1;
30939 + local_set(&hvcsd->open_count, 1);
30940 hvcsd->tty = tty;
30941 tty->driver_data = hvcsd;
30942
30943 @@ -1169,7 +1170,7 @@ fast_open:
30944
30945 spin_lock_irqsave(&hvcsd->lock, flags);
30946 kref_get(&hvcsd->kref);
30947 - hvcsd->open_count++;
30948 + local_inc(&hvcsd->open_count);
30949 hvcsd->todo_mask |= HVCS_SCHED_READ;
30950 spin_unlock_irqrestore(&hvcsd->lock, flags);
30951
30952 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
30953 hvcsd = tty->driver_data;
30954
30955 spin_lock_irqsave(&hvcsd->lock, flags);
30956 - if (--hvcsd->open_count == 0) {
30957 + if (local_dec_and_test(&hvcsd->open_count)) {
30958
30959 vio_disable_interrupts(hvcsd->vdev);
30960
30961 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
30962 free_irq(irq, hvcsd);
30963 kref_put(&hvcsd->kref, destroy_hvcs_struct);
30964 return;
30965 - } else if (hvcsd->open_count < 0) {
30966 + } else if (local_read(&hvcsd->open_count) < 0) {
30967 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
30968 " is missmanaged.\n",
30969 - hvcsd->vdev->unit_address, hvcsd->open_count);
30970 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
30971 }
30972
30973 spin_unlock_irqrestore(&hvcsd->lock, flags);
30974 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
30975
30976 spin_lock_irqsave(&hvcsd->lock, flags);
30977 /* Preserve this so that we know how many kref refs to put */
30978 - temp_open_count = hvcsd->open_count;
30979 + temp_open_count = local_read(&hvcsd->open_count);
30980
30981 /*
30982 * Don't kref put inside the spinlock because the destruction
30983 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
30984 hvcsd->tty->driver_data = NULL;
30985 hvcsd->tty = NULL;
30986
30987 - hvcsd->open_count = 0;
30988 + local_set(&hvcsd->open_count, 0);
30989
30990 /* This will drop any buffered data on the floor which is OK in a hangup
30991 * scenario. */
30992 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
30993 * the middle of a write operation? This is a crummy place to do this
30994 * but we want to keep it all in the spinlock.
30995 */
30996 - if (hvcsd->open_count <= 0) {
30997 + if (local_read(&hvcsd->open_count) <= 0) {
30998 spin_unlock_irqrestore(&hvcsd->lock, flags);
30999 return -ENODEV;
31000 }
31001 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31002 {
31003 struct hvcs_struct *hvcsd = tty->driver_data;
31004
31005 - if (!hvcsd || hvcsd->open_count <= 0)
31006 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31007 return 0;
31008
31009 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31010 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31011 index ec5e3f8..02455ba 100644
31012 --- a/drivers/char/ipmi/ipmi_msghandler.c
31013 +++ b/drivers/char/ipmi/ipmi_msghandler.c
31014 @@ -414,7 +414,7 @@ struct ipmi_smi {
31015 struct proc_dir_entry *proc_dir;
31016 char proc_dir_name[10];
31017
31018 - atomic_t stats[IPMI_NUM_STATS];
31019 + atomic_unchecked_t stats[IPMI_NUM_STATS];
31020
31021 /*
31022 * run_to_completion duplicate of smb_info, smi_info
31023 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31024
31025
31026 #define ipmi_inc_stat(intf, stat) \
31027 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31028 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31029 #define ipmi_get_stat(intf, stat) \
31030 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31031 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31032
31033 static int is_lan_addr(struct ipmi_addr *addr)
31034 {
31035 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31036 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31037 init_waitqueue_head(&intf->waitq);
31038 for (i = 0; i < IPMI_NUM_STATS; i++)
31039 - atomic_set(&intf->stats[i], 0);
31040 + atomic_set_unchecked(&intf->stats[i], 0);
31041
31042 intf->proc_dir = NULL;
31043
31044 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31045 struct ipmi_smi_msg smi_msg;
31046 struct ipmi_recv_msg recv_msg;
31047
31048 + pax_track_stack();
31049 +
31050 si = (struct ipmi_system_interface_addr *) &addr;
31051 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31052 si->channel = IPMI_BMC_CHANNEL;
31053 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31054 index abae8c9..8021979 100644
31055 --- a/drivers/char/ipmi/ipmi_si_intf.c
31056 +++ b/drivers/char/ipmi/ipmi_si_intf.c
31057 @@ -277,7 +277,7 @@ struct smi_info {
31058 unsigned char slave_addr;
31059
31060 /* Counters and things for the proc filesystem. */
31061 - atomic_t stats[SI_NUM_STATS];
31062 + atomic_unchecked_t stats[SI_NUM_STATS];
31063
31064 struct task_struct *thread;
31065
31066 @@ -285,9 +285,9 @@ struct smi_info {
31067 };
31068
31069 #define smi_inc_stat(smi, stat) \
31070 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31071 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31072 #define smi_get_stat(smi, stat) \
31073 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31074 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31075
31076 #define SI_MAX_PARMS 4
31077
31078 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31079 atomic_set(&new_smi->req_events, 0);
31080 new_smi->run_to_completion = 0;
31081 for (i = 0; i < SI_NUM_STATS; i++)
31082 - atomic_set(&new_smi->stats[i], 0);
31083 + atomic_set_unchecked(&new_smi->stats[i], 0);
31084
31085 new_smi->interrupt_disabled = 0;
31086 atomic_set(&new_smi->stop_operation, 0);
31087 diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31088 index 402838f..55e2200 100644
31089 --- a/drivers/char/istallion.c
31090 +++ b/drivers/char/istallion.c
31091 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31092 * re-used for each stats call.
31093 */
31094 static comstats_t stli_comstats;
31095 -static combrd_t stli_brdstats;
31096 static struct asystats stli_cdkstats;
31097
31098 /*****************************************************************************/
31099 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31100 {
31101 struct stlibrd *brdp;
31102 unsigned int i;
31103 + combrd_t stli_brdstats;
31104
31105 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31106 return -EFAULT;
31107 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31108 struct stliport stli_dummyport;
31109 struct stliport *portp;
31110
31111 + pax_track_stack();
31112 +
31113 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31114 return -EFAULT;
31115 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31116 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31117 struct stlibrd stli_dummybrd;
31118 struct stlibrd *brdp;
31119
31120 + pax_track_stack();
31121 +
31122 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31123 return -EFAULT;
31124 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31125 diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31126 index 950837c..e55a288 100644
31127 --- a/drivers/char/keyboard.c
31128 +++ b/drivers/char/keyboard.c
31129 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31130 kbd->kbdmode == VC_MEDIUMRAW) &&
31131 value != KVAL(K_SAK))
31132 return; /* SAK is allowed even in raw mode */
31133 +
31134 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31135 + {
31136 + void *func = fn_handler[value];
31137 + if (func == fn_show_state || func == fn_show_ptregs ||
31138 + func == fn_show_mem)
31139 + return;
31140 + }
31141 +#endif
31142 +
31143 fn_handler[value](vc);
31144 }
31145
31146 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31147 .evbit = { BIT_MASK(EV_SND) },
31148 },
31149
31150 - { }, /* Terminating entry */
31151 + { 0 }, /* Terminating entry */
31152 };
31153
31154 MODULE_DEVICE_TABLE(input, kbd_ids);
31155 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31156 index 87c67b4..230527a 100644
31157 --- a/drivers/char/mbcs.c
31158 +++ b/drivers/char/mbcs.c
31159 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31160 return 0;
31161 }
31162
31163 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31164 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31165 {
31166 .part_num = MBCS_PART_NUM,
31167 .mfg_num = MBCS_MFG_NUM,
31168 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31169 index 1270f64..8495f49 100644
31170 --- a/drivers/char/mem.c
31171 +++ b/drivers/char/mem.c
31172 @@ -18,6 +18,7 @@
31173 #include <linux/raw.h>
31174 #include <linux/tty.h>
31175 #include <linux/capability.h>
31176 +#include <linux/security.h>
31177 #include <linux/ptrace.h>
31178 #include <linux/device.h>
31179 #include <linux/highmem.h>
31180 @@ -35,6 +36,10 @@
31181 # include <linux/efi.h>
31182 #endif
31183
31184 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31185 +extern struct file_operations grsec_fops;
31186 +#endif
31187 +
31188 static inline unsigned long size_inside_page(unsigned long start,
31189 unsigned long size)
31190 {
31191 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31192
31193 while (cursor < to) {
31194 if (!devmem_is_allowed(pfn)) {
31195 +#ifdef CONFIG_GRKERNSEC_KMEM
31196 + gr_handle_mem_readwrite(from, to);
31197 +#else
31198 printk(KERN_INFO
31199 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31200 current->comm, from, to);
31201 +#endif
31202 return 0;
31203 }
31204 cursor += PAGE_SIZE;
31205 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31206 }
31207 return 1;
31208 }
31209 +#elif defined(CONFIG_GRKERNSEC_KMEM)
31210 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31211 +{
31212 + return 0;
31213 +}
31214 #else
31215 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31216 {
31217 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31218 #endif
31219
31220 while (count > 0) {
31221 + char *temp;
31222 +
31223 /*
31224 * Handle first page in case it's not aligned
31225 */
31226 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31227 if (!ptr)
31228 return -EFAULT;
31229
31230 - if (copy_to_user(buf, ptr, sz)) {
31231 +#ifdef CONFIG_PAX_USERCOPY
31232 + temp = kmalloc(sz, GFP_KERNEL);
31233 + if (!temp) {
31234 + unxlate_dev_mem_ptr(p, ptr);
31235 + return -ENOMEM;
31236 + }
31237 + memcpy(temp, ptr, sz);
31238 +#else
31239 + temp = ptr;
31240 +#endif
31241 +
31242 + if (copy_to_user(buf, temp, sz)) {
31243 +
31244 +#ifdef CONFIG_PAX_USERCOPY
31245 + kfree(temp);
31246 +#endif
31247 +
31248 unxlate_dev_mem_ptr(p, ptr);
31249 return -EFAULT;
31250 }
31251
31252 +#ifdef CONFIG_PAX_USERCOPY
31253 + kfree(temp);
31254 +#endif
31255 +
31256 unxlate_dev_mem_ptr(p, ptr);
31257
31258 buf += sz;
31259 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31260 size_t count, loff_t *ppos)
31261 {
31262 unsigned long p = *ppos;
31263 - ssize_t low_count, read, sz;
31264 + ssize_t low_count, read, sz, err = 0;
31265 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31266 - int err = 0;
31267
31268 read = 0;
31269 if (p < (unsigned long) high_memory) {
31270 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31271 }
31272 #endif
31273 while (low_count > 0) {
31274 + char *temp;
31275 +
31276 sz = size_inside_page(p, low_count);
31277
31278 /*
31279 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31280 */
31281 kbuf = xlate_dev_kmem_ptr((char *)p);
31282
31283 - if (copy_to_user(buf, kbuf, sz))
31284 +#ifdef CONFIG_PAX_USERCOPY
31285 + temp = kmalloc(sz, GFP_KERNEL);
31286 + if (!temp)
31287 + return -ENOMEM;
31288 + memcpy(temp, kbuf, sz);
31289 +#else
31290 + temp = kbuf;
31291 +#endif
31292 +
31293 + err = copy_to_user(buf, temp, sz);
31294 +
31295 +#ifdef CONFIG_PAX_USERCOPY
31296 + kfree(temp);
31297 +#endif
31298 +
31299 + if (err)
31300 return -EFAULT;
31301 buf += sz;
31302 p += sz;
31303 @@ -889,6 +941,9 @@ static const struct memdev {
31304 #ifdef CONFIG_CRASH_DUMP
31305 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31306 #endif
31307 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31308 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31309 +#endif
31310 };
31311
31312 static int memory_open(struct inode *inode, struct file *filp)
31313 diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31314 index 674b3ab..a8d1970 100644
31315 --- a/drivers/char/pcmcia/ipwireless/tty.c
31316 +++ b/drivers/char/pcmcia/ipwireless/tty.c
31317 @@ -29,6 +29,7 @@
31318 #include <linux/tty_driver.h>
31319 #include <linux/tty_flip.h>
31320 #include <linux/uaccess.h>
31321 +#include <asm/local.h>
31322
31323 #include "tty.h"
31324 #include "network.h"
31325 @@ -51,7 +52,7 @@ struct ipw_tty {
31326 int tty_type;
31327 struct ipw_network *network;
31328 struct tty_struct *linux_tty;
31329 - int open_count;
31330 + local_t open_count;
31331 unsigned int control_lines;
31332 struct mutex ipw_tty_mutex;
31333 int tx_bytes_queued;
31334 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31335 mutex_unlock(&tty->ipw_tty_mutex);
31336 return -ENODEV;
31337 }
31338 - if (tty->open_count == 0)
31339 + if (local_read(&tty->open_count) == 0)
31340 tty->tx_bytes_queued = 0;
31341
31342 - tty->open_count++;
31343 + local_inc(&tty->open_count);
31344
31345 tty->linux_tty = linux_tty;
31346 linux_tty->driver_data = tty;
31347 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31348
31349 static void do_ipw_close(struct ipw_tty *tty)
31350 {
31351 - tty->open_count--;
31352 -
31353 - if (tty->open_count == 0) {
31354 + if (local_dec_return(&tty->open_count) == 0) {
31355 struct tty_struct *linux_tty = tty->linux_tty;
31356
31357 if (linux_tty != NULL) {
31358 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31359 return;
31360
31361 mutex_lock(&tty->ipw_tty_mutex);
31362 - if (tty->open_count == 0) {
31363 + if (local_read(&tty->open_count) == 0) {
31364 mutex_unlock(&tty->ipw_tty_mutex);
31365 return;
31366 }
31367 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31368 return;
31369 }
31370
31371 - if (!tty->open_count) {
31372 + if (!local_read(&tty->open_count)) {
31373 mutex_unlock(&tty->ipw_tty_mutex);
31374 return;
31375 }
31376 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31377 return -ENODEV;
31378
31379 mutex_lock(&tty->ipw_tty_mutex);
31380 - if (!tty->open_count) {
31381 + if (!local_read(&tty->open_count)) {
31382 mutex_unlock(&tty->ipw_tty_mutex);
31383 return -EINVAL;
31384 }
31385 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31386 if (!tty)
31387 return -ENODEV;
31388
31389 - if (!tty->open_count)
31390 + if (!local_read(&tty->open_count))
31391 return -EINVAL;
31392
31393 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31394 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31395 if (!tty)
31396 return 0;
31397
31398 - if (!tty->open_count)
31399 + if (!local_read(&tty->open_count))
31400 return 0;
31401
31402 return tty->tx_bytes_queued;
31403 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31404 if (!tty)
31405 return -ENODEV;
31406
31407 - if (!tty->open_count)
31408 + if (!local_read(&tty->open_count))
31409 return -EINVAL;
31410
31411 return get_control_lines(tty);
31412 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31413 if (!tty)
31414 return -ENODEV;
31415
31416 - if (!tty->open_count)
31417 + if (!local_read(&tty->open_count))
31418 return -EINVAL;
31419
31420 return set_control_lines(tty, set, clear);
31421 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31422 if (!tty)
31423 return -ENODEV;
31424
31425 - if (!tty->open_count)
31426 + if (!local_read(&tty->open_count))
31427 return -EINVAL;
31428
31429 /* FIXME: Exactly how is the tty object locked here .. */
31430 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31431 against a parallel ioctl etc */
31432 mutex_lock(&ttyj->ipw_tty_mutex);
31433 }
31434 - while (ttyj->open_count)
31435 + while (local_read(&ttyj->open_count))
31436 do_ipw_close(ttyj);
31437 ipwireless_disassociate_network_ttys(network,
31438 ttyj->channel_idx);
31439 diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31440 index 62f282e..e45c45c 100644
31441 --- a/drivers/char/pty.c
31442 +++ b/drivers/char/pty.c
31443 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31444 register_sysctl_table(pty_root_table);
31445
31446 /* Now create the /dev/ptmx special device */
31447 + pax_open_kernel();
31448 tty_default_fops(&ptmx_fops);
31449 - ptmx_fops.open = ptmx_open;
31450 + *(void **)&ptmx_fops.open = ptmx_open;
31451 + pax_close_kernel();
31452
31453 cdev_init(&ptmx_cdev, &ptmx_fops);
31454 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31455 diff --git a/drivers/char/random.c b/drivers/char/random.c
31456 index 3a19e2d..6ed09d3 100644
31457 --- a/drivers/char/random.c
31458 +++ b/drivers/char/random.c
31459 @@ -254,8 +254,13 @@
31460 /*
31461 * Configuration information
31462 */
31463 +#ifdef CONFIG_GRKERNSEC_RANDNET
31464 +#define INPUT_POOL_WORDS 512
31465 +#define OUTPUT_POOL_WORDS 128
31466 +#else
31467 #define INPUT_POOL_WORDS 128
31468 #define OUTPUT_POOL_WORDS 32
31469 +#endif
31470 #define SEC_XFER_SIZE 512
31471
31472 /*
31473 @@ -292,10 +297,17 @@ static struct poolinfo {
31474 int poolwords;
31475 int tap1, tap2, tap3, tap4, tap5;
31476 } poolinfo_table[] = {
31477 +#ifdef CONFIG_GRKERNSEC_RANDNET
31478 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31479 + { 512, 411, 308, 208, 104, 1 },
31480 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31481 + { 128, 103, 76, 51, 25, 1 },
31482 +#else
31483 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31484 { 128, 103, 76, 51, 25, 1 },
31485 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31486 { 32, 26, 20, 14, 7, 1 },
31487 +#endif
31488 #if 0
31489 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31490 { 2048, 1638, 1231, 819, 411, 1 },
31491 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31492 #include <linux/sysctl.h>
31493
31494 static int min_read_thresh = 8, min_write_thresh;
31495 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
31496 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31497 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31498 static char sysctl_bootid[16];
31499
31500 diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31501 index 0e29a23..0efc2c2 100644
31502 --- a/drivers/char/rocket.c
31503 +++ b/drivers/char/rocket.c
31504 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31505 struct rocket_ports tmp;
31506 int board;
31507
31508 + pax_track_stack();
31509 +
31510 if (!retports)
31511 return -EFAULT;
31512 memset(&tmp, 0, sizeof (tmp));
31513 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31514 index 8c262aa..4d3b058 100644
31515 --- a/drivers/char/sonypi.c
31516 +++ b/drivers/char/sonypi.c
31517 @@ -55,6 +55,7 @@
31518 #include <asm/uaccess.h>
31519 #include <asm/io.h>
31520 #include <asm/system.h>
31521 +#include <asm/local.h>
31522
31523 #include <linux/sonypi.h>
31524
31525 @@ -491,7 +492,7 @@ static struct sonypi_device {
31526 spinlock_t fifo_lock;
31527 wait_queue_head_t fifo_proc_list;
31528 struct fasync_struct *fifo_async;
31529 - int open_count;
31530 + local_t open_count;
31531 int model;
31532 struct input_dev *input_jog_dev;
31533 struct input_dev *input_key_dev;
31534 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31535 static int sonypi_misc_release(struct inode *inode, struct file *file)
31536 {
31537 mutex_lock(&sonypi_device.lock);
31538 - sonypi_device.open_count--;
31539 + local_dec(&sonypi_device.open_count);
31540 mutex_unlock(&sonypi_device.lock);
31541 return 0;
31542 }
31543 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31544 lock_kernel();
31545 mutex_lock(&sonypi_device.lock);
31546 /* Flush input queue on first open */
31547 - if (!sonypi_device.open_count)
31548 + if (!local_read(&sonypi_device.open_count))
31549 kfifo_reset(sonypi_device.fifo);
31550 - sonypi_device.open_count++;
31551 + local_inc(&sonypi_device.open_count);
31552 mutex_unlock(&sonypi_device.lock);
31553 unlock_kernel();
31554 return 0;
31555 diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31556 index db6dcfa..13834cb 100644
31557 --- a/drivers/char/stallion.c
31558 +++ b/drivers/char/stallion.c
31559 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31560 struct stlport stl_dummyport;
31561 struct stlport *portp;
31562
31563 + pax_track_stack();
31564 +
31565 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31566 return -EFAULT;
31567 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31568 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31569 index a0789f6..cea3902 100644
31570 --- a/drivers/char/tpm/tpm.c
31571 +++ b/drivers/char/tpm/tpm.c
31572 @@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31573 chip->vendor.req_complete_val)
31574 goto out_recv;
31575
31576 - if ((status == chip->vendor.req_canceled)) {
31577 + if (status == chip->vendor.req_canceled) {
31578 dev_err(chip->dev, "Operation Canceled\n");
31579 rc = -ECANCELED;
31580 goto out;
31581 @@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31582
31583 struct tpm_chip *chip = dev_get_drvdata(dev);
31584
31585 + pax_track_stack();
31586 +
31587 tpm_cmd.header.in = tpm_readpubek_header;
31588 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31589 "attempting to read the PUBEK");
31590 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31591 index bf2170f..ce8cab9 100644
31592 --- a/drivers/char/tpm/tpm_bios.c
31593 +++ b/drivers/char/tpm/tpm_bios.c
31594 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31595 event = addr;
31596
31597 if ((event->event_type == 0 && event->event_size == 0) ||
31598 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31599 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31600 return NULL;
31601
31602 return addr;
31603 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31604 return NULL;
31605
31606 if ((event->event_type == 0 && event->event_size == 0) ||
31607 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31608 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31609 return NULL;
31610
31611 (*pos)++;
31612 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31613 int i;
31614
31615 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31616 - seq_putc(m, data[i]);
31617 + if (!seq_putc(m, data[i]))
31618 + return -EFAULT;
31619
31620 return 0;
31621 }
31622 @@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31623 log->bios_event_log_end = log->bios_event_log + len;
31624
31625 virt = acpi_os_map_memory(start, len);
31626 + if (!virt) {
31627 + kfree(log->bios_event_log);
31628 + log->bios_event_log = NULL;
31629 + return -EFAULT;
31630 + }
31631
31632 - memcpy(log->bios_event_log, virt, len);
31633 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31634
31635 acpi_os_unmap_memory(virt, len);
31636 return 0;
31637 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31638 index 123cedf..137edef 100644
31639 --- a/drivers/char/tty_io.c
31640 +++ b/drivers/char/tty_io.c
31641 @@ -1774,6 +1774,7 @@ got_driver:
31642
31643 if (IS_ERR(tty)) {
31644 mutex_unlock(&tty_mutex);
31645 + tty_driver_kref_put(driver);
31646 return PTR_ERR(tty);
31647 }
31648 }
31649 @@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31650 return retval;
31651 }
31652
31653 +EXPORT_SYMBOL(tty_ioctl);
31654 +
31655 #ifdef CONFIG_COMPAT
31656 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31657 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
31658 unsigned long arg)
31659 {
31660 struct inode *inode = file->f_dentry->d_inode;
31661 @@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31662
31663 return retval;
31664 }
31665 +
31666 +EXPORT_SYMBOL(tty_compat_ioctl);
31667 #endif
31668
31669 /*
31670 @@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
31671
31672 void tty_default_fops(struct file_operations *fops)
31673 {
31674 - *fops = tty_fops;
31675 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
31676 }
31677
31678 /*
31679 diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
31680 index d814a3d..b55b9c9 100644
31681 --- a/drivers/char/tty_ldisc.c
31682 +++ b/drivers/char/tty_ldisc.c
31683 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
31684 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
31685 struct tty_ldisc_ops *ldo = ld->ops;
31686
31687 - ldo->refcount--;
31688 + atomic_dec(&ldo->refcount);
31689 module_put(ldo->owner);
31690 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31691
31692 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
31693 spin_lock_irqsave(&tty_ldisc_lock, flags);
31694 tty_ldiscs[disc] = new_ldisc;
31695 new_ldisc->num = disc;
31696 - new_ldisc->refcount = 0;
31697 + atomic_set(&new_ldisc->refcount, 0);
31698 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31699
31700 return ret;
31701 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
31702 return -EINVAL;
31703
31704 spin_lock_irqsave(&tty_ldisc_lock, flags);
31705 - if (tty_ldiscs[disc]->refcount)
31706 + if (atomic_read(&tty_ldiscs[disc]->refcount))
31707 ret = -EBUSY;
31708 else
31709 tty_ldiscs[disc] = NULL;
31710 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
31711 if (ldops) {
31712 ret = ERR_PTR(-EAGAIN);
31713 if (try_module_get(ldops->owner)) {
31714 - ldops->refcount++;
31715 + atomic_inc(&ldops->refcount);
31716 ret = ldops;
31717 }
31718 }
31719 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
31720 unsigned long flags;
31721
31722 spin_lock_irqsave(&tty_ldisc_lock, flags);
31723 - ldops->refcount--;
31724 + atomic_dec(&ldops->refcount);
31725 module_put(ldops->owner);
31726 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31727 }
31728 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
31729 index a035ae3..c27fe2c 100644
31730 --- a/drivers/char/virtio_console.c
31731 +++ b/drivers/char/virtio_console.c
31732 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
31733 * virtqueue, so we let the drivers do some boutique early-output thing. */
31734 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
31735 {
31736 - virtio_cons.put_chars = put_chars;
31737 + pax_open_kernel();
31738 + *(void **)&virtio_cons.put_chars = put_chars;
31739 + pax_close_kernel();
31740 return hvc_instantiate(0, 0, &virtio_cons);
31741 }
31742
31743 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
31744 out_vq = vqs[1];
31745
31746 /* Start using the new console output. */
31747 - virtio_cons.get_chars = get_chars;
31748 - virtio_cons.put_chars = put_chars;
31749 - virtio_cons.notifier_add = notifier_add_vio;
31750 - virtio_cons.notifier_del = notifier_del_vio;
31751 - virtio_cons.notifier_hangup = notifier_del_vio;
31752 + pax_open_kernel();
31753 + *(void **)&virtio_cons.get_chars = get_chars;
31754 + *(void **)&virtio_cons.put_chars = put_chars;
31755 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
31756 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
31757 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
31758 + pax_close_kernel();
31759
31760 /* The first argument of hvc_alloc() is the virtual console number, so
31761 * we use zero. The second argument is the parameter for the
31762 diff --git a/drivers/char/vt.c b/drivers/char/vt.c
31763 index 0c80c68..53d59c1 100644
31764 --- a/drivers/char/vt.c
31765 +++ b/drivers/char/vt.c
31766 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
31767
31768 static void notify_write(struct vc_data *vc, unsigned int unicode)
31769 {
31770 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
31771 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
31772 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
31773 }
31774
31775 diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
31776 index 6351a26..999af95 100644
31777 --- a/drivers/char/vt_ioctl.c
31778 +++ b/drivers/char/vt_ioctl.c
31779 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31780 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
31781 return -EFAULT;
31782
31783 - if (!capable(CAP_SYS_TTY_CONFIG))
31784 - perm = 0;
31785 -
31786 switch (cmd) {
31787 case KDGKBENT:
31788 key_map = key_maps[s];
31789 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31790 val = (i ? K_HOLE : K_NOSUCHMAP);
31791 return put_user(val, &user_kbe->kb_value);
31792 case KDSKBENT:
31793 + if (!capable(CAP_SYS_TTY_CONFIG))
31794 + perm = 0;
31795 +
31796 if (!perm)
31797 return -EPERM;
31798 +
31799 if (!i && v == K_NOSUCHMAP) {
31800 /* deallocate map */
31801 key_map = key_maps[s];
31802 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31803 int i, j, k;
31804 int ret;
31805
31806 - if (!capable(CAP_SYS_TTY_CONFIG))
31807 - perm = 0;
31808 -
31809 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
31810 if (!kbs) {
31811 ret = -ENOMEM;
31812 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31813 kfree(kbs);
31814 return ((p && *p) ? -EOVERFLOW : 0);
31815 case KDSKBSENT:
31816 + if (!capable(CAP_SYS_TTY_CONFIG))
31817 + perm = 0;
31818 +
31819 if (!perm) {
31820 ret = -EPERM;
31821 goto reterr;
31822 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
31823 index c7ae026..1769c1d 100644
31824 --- a/drivers/cpufreq/cpufreq.c
31825 +++ b/drivers/cpufreq/cpufreq.c
31826 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
31827 complete(&policy->kobj_unregister);
31828 }
31829
31830 -static struct sysfs_ops sysfs_ops = {
31831 +static const struct sysfs_ops sysfs_ops = {
31832 .show = show,
31833 .store = store,
31834 };
31835 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
31836 index 97b0038..2056670 100644
31837 --- a/drivers/cpuidle/sysfs.c
31838 +++ b/drivers/cpuidle/sysfs.c
31839 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
31840 return ret;
31841 }
31842
31843 -static struct sysfs_ops cpuidle_sysfs_ops = {
31844 +static const struct sysfs_ops cpuidle_sysfs_ops = {
31845 .show = cpuidle_show,
31846 .store = cpuidle_store,
31847 };
31848 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
31849 return ret;
31850 }
31851
31852 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
31853 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
31854 .show = cpuidle_state_show,
31855 };
31856
31857 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
31858 .release = cpuidle_state_sysfs_release,
31859 };
31860
31861 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31862 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31863 {
31864 kobject_put(&device->kobjs[i]->kobj);
31865 wait_for_completion(&device->kobjs[i]->kobj_unregister);
31866 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
31867 index 5f753fc..0377ae9 100644
31868 --- a/drivers/crypto/hifn_795x.c
31869 +++ b/drivers/crypto/hifn_795x.c
31870 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
31871 0xCA, 0x34, 0x2B, 0x2E};
31872 struct scatterlist sg;
31873
31874 + pax_track_stack();
31875 +
31876 memset(src, 0, sizeof(src));
31877 memset(ctx.key, 0, sizeof(ctx.key));
31878
31879 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
31880 index 71e6482..de8d96c 100644
31881 --- a/drivers/crypto/padlock-aes.c
31882 +++ b/drivers/crypto/padlock-aes.c
31883 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
31884 struct crypto_aes_ctx gen_aes;
31885 int cpu;
31886
31887 + pax_track_stack();
31888 +
31889 if (key_len % 8) {
31890 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
31891 return -EINVAL;
31892 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
31893 index dcc4ab7..cc834bb 100644
31894 --- a/drivers/dma/ioat/dma.c
31895 +++ b/drivers/dma/ioat/dma.c
31896 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
31897 return entry->show(&chan->common, page);
31898 }
31899
31900 -struct sysfs_ops ioat_sysfs_ops = {
31901 +const struct sysfs_ops ioat_sysfs_ops = {
31902 .show = ioat_attr_show,
31903 };
31904
31905 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
31906 index bbc3e78..f2db62c 100644
31907 --- a/drivers/dma/ioat/dma.h
31908 +++ b/drivers/dma/ioat/dma.h
31909 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
31910 unsigned long *phys_complete);
31911 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
31912 void ioat_kobject_del(struct ioatdma_device *device);
31913 -extern struct sysfs_ops ioat_sysfs_ops;
31914 +extern const struct sysfs_ops ioat_sysfs_ops;
31915 extern struct ioat_sysfs_entry ioat_version_attr;
31916 extern struct ioat_sysfs_entry ioat_cap_attr;
31917 #endif /* IOATDMA_H */
31918 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
31919 index 9908c9e..3ceb0e5 100644
31920 --- a/drivers/dma/ioat/dma_v3.c
31921 +++ b/drivers/dma/ioat/dma_v3.c
31922 @@ -71,10 +71,10 @@
31923 /* provide a lookup table for setting the source address in the base or
31924 * extended descriptor of an xor or pq descriptor
31925 */
31926 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
31927 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
31928 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
31929 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
31930 +static const u8 xor_idx_to_desc = 0xd0;
31931 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
31932 +static const u8 pq_idx_to_desc = 0xf8;
31933 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
31934
31935 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
31936 {
31937 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
31938 index 85c464a..afd1e73 100644
31939 --- a/drivers/edac/amd64_edac.c
31940 +++ b/drivers/edac/amd64_edac.c
31941 @@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
31942 * PCI core identifies what devices are on a system during boot, and then
31943 * inquiry this table to see if this driver is for a given device found.
31944 */
31945 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
31946 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
31947 {
31948 .vendor = PCI_VENDOR_ID_AMD,
31949 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
31950 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
31951 index 2b95f1a..4f52793 100644
31952 --- a/drivers/edac/amd76x_edac.c
31953 +++ b/drivers/edac/amd76x_edac.c
31954 @@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
31955 edac_mc_free(mci);
31956 }
31957
31958 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
31959 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
31960 {
31961 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31962 AMD762},
31963 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
31964 index d205d49..74c9672 100644
31965 --- a/drivers/edac/e752x_edac.c
31966 +++ b/drivers/edac/e752x_edac.c
31967 @@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
31968 edac_mc_free(mci);
31969 }
31970
31971 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
31972 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
31973 {
31974 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31975 E7520},
31976 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
31977 index c7d11cc..c59c1ca 100644
31978 --- a/drivers/edac/e7xxx_edac.c
31979 +++ b/drivers/edac/e7xxx_edac.c
31980 @@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
31981 edac_mc_free(mci);
31982 }
31983
31984 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
31985 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
31986 {
31987 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
31988 E7205},
31989 diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
31990 index 5376457..5fdedbc 100644
31991 --- a/drivers/edac/edac_device_sysfs.c
31992 +++ b/drivers/edac/edac_device_sysfs.c
31993 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
31994 }
31995
31996 /* edac_dev file operations for an 'ctl_info' */
31997 -static struct sysfs_ops device_ctl_info_ops = {
31998 +static const struct sysfs_ops device_ctl_info_ops = {
31999 .show = edac_dev_ctl_info_show,
32000 .store = edac_dev_ctl_info_store
32001 };
32002 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32003 }
32004
32005 /* edac_dev file operations for an 'instance' */
32006 -static struct sysfs_ops device_instance_ops = {
32007 +static const struct sysfs_ops device_instance_ops = {
32008 .show = edac_dev_instance_show,
32009 .store = edac_dev_instance_store
32010 };
32011 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32012 }
32013
32014 /* edac_dev file operations for a 'block' */
32015 -static struct sysfs_ops device_block_ops = {
32016 +static const struct sysfs_ops device_block_ops = {
32017 .show = edac_dev_block_show,
32018 .store = edac_dev_block_store
32019 };
32020 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32021 index e1d4ce0..88840e9 100644
32022 --- a/drivers/edac/edac_mc_sysfs.c
32023 +++ b/drivers/edac/edac_mc_sysfs.c
32024 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32025 return -EIO;
32026 }
32027
32028 -static struct sysfs_ops csrowfs_ops = {
32029 +static const struct sysfs_ops csrowfs_ops = {
32030 .show = csrowdev_show,
32031 .store = csrowdev_store
32032 };
32033 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32034 }
32035
32036 /* Intermediate show/store table */
32037 -static struct sysfs_ops mci_ops = {
32038 +static const struct sysfs_ops mci_ops = {
32039 .show = mcidev_show,
32040 .store = mcidev_store
32041 };
32042 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32043 index 422728c..d8d9c88 100644
32044 --- a/drivers/edac/edac_pci_sysfs.c
32045 +++ b/drivers/edac/edac_pci_sysfs.c
32046 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32047 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32048 static int edac_pci_poll_msec = 1000; /* one second workq period */
32049
32050 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
32051 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32052 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32053 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32054
32055 static struct kobject *edac_pci_top_main_kobj;
32056 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32057 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32058 }
32059
32060 /* fs_ops table */
32061 -static struct sysfs_ops pci_instance_ops = {
32062 +static const struct sysfs_ops pci_instance_ops = {
32063 .show = edac_pci_instance_show,
32064 .store = edac_pci_instance_store
32065 };
32066 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32067 return -EIO;
32068 }
32069
32070 -static struct sysfs_ops edac_pci_sysfs_ops = {
32071 +static const struct sysfs_ops edac_pci_sysfs_ops = {
32072 .show = edac_pci_dev_show,
32073 .store = edac_pci_dev_store
32074 };
32075 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32076 edac_printk(KERN_CRIT, EDAC_PCI,
32077 "Signaled System Error on %s\n",
32078 pci_name(dev));
32079 - atomic_inc(&pci_nonparity_count);
32080 + atomic_inc_unchecked(&pci_nonparity_count);
32081 }
32082
32083 if (status & (PCI_STATUS_PARITY)) {
32084 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32085 "Master Data Parity Error on %s\n",
32086 pci_name(dev));
32087
32088 - atomic_inc(&pci_parity_count);
32089 + atomic_inc_unchecked(&pci_parity_count);
32090 }
32091
32092 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32093 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32094 "Detected Parity Error on %s\n",
32095 pci_name(dev));
32096
32097 - atomic_inc(&pci_parity_count);
32098 + atomic_inc_unchecked(&pci_parity_count);
32099 }
32100 }
32101
32102 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32103 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32104 "Signaled System Error on %s\n",
32105 pci_name(dev));
32106 - atomic_inc(&pci_nonparity_count);
32107 + atomic_inc_unchecked(&pci_nonparity_count);
32108 }
32109
32110 if (status & (PCI_STATUS_PARITY)) {
32111 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32112 "Master Data Parity Error on "
32113 "%s\n", pci_name(dev));
32114
32115 - atomic_inc(&pci_parity_count);
32116 + atomic_inc_unchecked(&pci_parity_count);
32117 }
32118
32119 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32120 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32121 "Detected Parity Error on %s\n",
32122 pci_name(dev));
32123
32124 - atomic_inc(&pci_parity_count);
32125 + atomic_inc_unchecked(&pci_parity_count);
32126 }
32127 }
32128 }
32129 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32130 if (!check_pci_errors)
32131 return;
32132
32133 - before_count = atomic_read(&pci_parity_count);
32134 + before_count = atomic_read_unchecked(&pci_parity_count);
32135
32136 /* scan all PCI devices looking for a Parity Error on devices and
32137 * bridges.
32138 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32139 /* Only if operator has selected panic on PCI Error */
32140 if (edac_pci_get_panic_on_pe()) {
32141 /* If the count is different 'after' from 'before' */
32142 - if (before_count != atomic_read(&pci_parity_count))
32143 + if (before_count != atomic_read_unchecked(&pci_parity_count))
32144 panic("EDAC: PCI Parity Error");
32145 }
32146 }
32147 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32148 index 6c9a0f2..9c1cf7e 100644
32149 --- a/drivers/edac/i3000_edac.c
32150 +++ b/drivers/edac/i3000_edac.c
32151 @@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32152 edac_mc_free(mci);
32153 }
32154
32155 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32156 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32157 {
32158 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32159 I3000},
32160 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32161 index fde4db9..fe108f9 100644
32162 --- a/drivers/edac/i3200_edac.c
32163 +++ b/drivers/edac/i3200_edac.c
32164 @@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32165 edac_mc_free(mci);
32166 }
32167
32168 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32169 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32170 {
32171 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32172 I3200},
32173 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32174 index adc10a2..57d4ccf 100644
32175 --- a/drivers/edac/i5000_edac.c
32176 +++ b/drivers/edac/i5000_edac.c
32177 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32178 *
32179 * The "E500P" device is the first device supported.
32180 */
32181 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32182 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32183 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32184 .driver_data = I5000P},
32185
32186 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32187 index 22db05a..b2b5503 100644
32188 --- a/drivers/edac/i5100_edac.c
32189 +++ b/drivers/edac/i5100_edac.c
32190 @@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32191 edac_mc_free(mci);
32192 }
32193
32194 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32195 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32196 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32197 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32198 { 0, }
32199 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32200 index f99d106..f050710 100644
32201 --- a/drivers/edac/i5400_edac.c
32202 +++ b/drivers/edac/i5400_edac.c
32203 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32204 *
32205 * The "E500P" device is the first device supported.
32206 */
32207 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32208 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32209 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32210 {0,} /* 0 terminated list. */
32211 };
32212 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32213 index 577760a..9ce16ce 100644
32214 --- a/drivers/edac/i82443bxgx_edac.c
32215 +++ b/drivers/edac/i82443bxgx_edac.c
32216 @@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32217
32218 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32219
32220 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32221 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32222 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32223 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32224 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32225 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32226 index c0088ba..64a7b98 100644
32227 --- a/drivers/edac/i82860_edac.c
32228 +++ b/drivers/edac/i82860_edac.c
32229 @@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32230 edac_mc_free(mci);
32231 }
32232
32233 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32234 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32235 {
32236 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32237 I82860},
32238 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32239 index b2d83b9..a34357b 100644
32240 --- a/drivers/edac/i82875p_edac.c
32241 +++ b/drivers/edac/i82875p_edac.c
32242 @@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32243 edac_mc_free(mci);
32244 }
32245
32246 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32247 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32248 {
32249 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32250 I82875P},
32251 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32252 index 2eed3ea..87bbbd1 100644
32253 --- a/drivers/edac/i82975x_edac.c
32254 +++ b/drivers/edac/i82975x_edac.c
32255 @@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32256 edac_mc_free(mci);
32257 }
32258
32259 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32260 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32261 {
32262 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32263 I82975X
32264 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32265 index 9900675..78ac2b6 100644
32266 --- a/drivers/edac/r82600_edac.c
32267 +++ b/drivers/edac/r82600_edac.c
32268 @@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32269 edac_mc_free(mci);
32270 }
32271
32272 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32273 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32274 {
32275 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32276 },
32277 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32278 index d4ec605..4cfec4e 100644
32279 --- a/drivers/edac/x38_edac.c
32280 +++ b/drivers/edac/x38_edac.c
32281 @@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32282 edac_mc_free(mci);
32283 }
32284
32285 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32286 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32287 {
32288 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32289 X38},
32290 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32291 index 3fc2ceb..daf098f 100644
32292 --- a/drivers/firewire/core-card.c
32293 +++ b/drivers/firewire/core-card.c
32294 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32295
32296 void fw_core_remove_card(struct fw_card *card)
32297 {
32298 - struct fw_card_driver dummy_driver = dummy_driver_template;
32299 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
32300
32301 card->driver->update_phy_reg(card, 4,
32302 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32303 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32304 index 4560d8f..36db24a 100644
32305 --- a/drivers/firewire/core-cdev.c
32306 +++ b/drivers/firewire/core-cdev.c
32307 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32308 int ret;
32309
32310 if ((request->channels == 0 && request->bandwidth == 0) ||
32311 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32312 - request->bandwidth < 0)
32313 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32314 return -EINVAL;
32315
32316 r = kmalloc(sizeof(*r), GFP_KERNEL);
32317 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32318 index da628c7..cf54a2c 100644
32319 --- a/drivers/firewire/core-transaction.c
32320 +++ b/drivers/firewire/core-transaction.c
32321 @@ -36,6 +36,7 @@
32322 #include <linux/string.h>
32323 #include <linux/timer.h>
32324 #include <linux/types.h>
32325 +#include <linux/sched.h>
32326
32327 #include <asm/byteorder.h>
32328
32329 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32330 struct transaction_callback_data d;
32331 struct fw_transaction t;
32332
32333 + pax_track_stack();
32334 +
32335 init_completion(&d.done);
32336 d.payload = payload;
32337 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32338 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32339 index 7ff6e75..a2965d9 100644
32340 --- a/drivers/firewire/core.h
32341 +++ b/drivers/firewire/core.h
32342 @@ -86,6 +86,7 @@ struct fw_card_driver {
32343
32344 int (*stop_iso)(struct fw_iso_context *ctx);
32345 };
32346 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32347
32348 void fw_card_initialize(struct fw_card *card,
32349 const struct fw_card_driver *driver, struct device *device);
32350 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32351 index 3a2ccb0..82fd7c4 100644
32352 --- a/drivers/firmware/dmi_scan.c
32353 +++ b/drivers/firmware/dmi_scan.c
32354 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32355 }
32356 }
32357 else {
32358 - /*
32359 - * no iounmap() for that ioremap(); it would be a no-op, but
32360 - * it's so early in setup that sucker gets confused into doing
32361 - * what it shouldn't if we actually call it.
32362 - */
32363 p = dmi_ioremap(0xF0000, 0x10000);
32364 if (p == NULL)
32365 goto error;
32366 @@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32367 if (buf == NULL)
32368 return -1;
32369
32370 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32371 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32372
32373 iounmap(buf);
32374 return 0;
32375 diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32376 index 9e4f59d..110e24e 100644
32377 --- a/drivers/firmware/edd.c
32378 +++ b/drivers/firmware/edd.c
32379 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32380 return ret;
32381 }
32382
32383 -static struct sysfs_ops edd_attr_ops = {
32384 +static const struct sysfs_ops edd_attr_ops = {
32385 .show = edd_attr_show,
32386 };
32387
32388 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32389 index f4f709d..082f06e 100644
32390 --- a/drivers/firmware/efivars.c
32391 +++ b/drivers/firmware/efivars.c
32392 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32393 return ret;
32394 }
32395
32396 -static struct sysfs_ops efivar_attr_ops = {
32397 +static const struct sysfs_ops efivar_attr_ops = {
32398 .show = efivar_attr_show,
32399 .store = efivar_attr_store,
32400 };
32401 diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32402 index 051d1eb..0a5d4e7 100644
32403 --- a/drivers/firmware/iscsi_ibft.c
32404 +++ b/drivers/firmware/iscsi_ibft.c
32405 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32406 return ret;
32407 }
32408
32409 -static struct sysfs_ops ibft_attr_ops = {
32410 +static const struct sysfs_ops ibft_attr_ops = {
32411 .show = ibft_show_attribute,
32412 };
32413
32414 diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32415 index 56f9234..8c58c7b 100644
32416 --- a/drivers/firmware/memmap.c
32417 +++ b/drivers/firmware/memmap.c
32418 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32419 NULL
32420 };
32421
32422 -static struct sysfs_ops memmap_attr_ops = {
32423 +static const struct sysfs_ops memmap_attr_ops = {
32424 .show = memmap_attr_show,
32425 };
32426
32427 diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32428 index b16c9a8..2af7d3f 100644
32429 --- a/drivers/gpio/vr41xx_giu.c
32430 +++ b/drivers/gpio/vr41xx_giu.c
32431 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32432 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32433 maskl, pendl, maskh, pendh);
32434
32435 - atomic_inc(&irq_err_count);
32436 + atomic_inc_unchecked(&irq_err_count);
32437
32438 return -EINVAL;
32439 }
32440 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32441 index bea6efc..3dc0f42 100644
32442 --- a/drivers/gpu/drm/drm_crtc.c
32443 +++ b/drivers/gpu/drm/drm_crtc.c
32444 @@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32445 */
32446 if ((out_resp->count_modes >= mode_count) && mode_count) {
32447 copied = 0;
32448 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32449 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32450 list_for_each_entry(mode, &connector->modes, head) {
32451 drm_crtc_convert_to_umode(&u_mode, mode);
32452 if (copy_to_user(mode_ptr + copied,
32453 @@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32454
32455 if ((out_resp->count_props >= props_count) && props_count) {
32456 copied = 0;
32457 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32458 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32459 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32460 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32461 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32462 if (connector->property_ids[i] != 0) {
32463 if (put_user(connector->property_ids[i],
32464 @@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32465
32466 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32467 copied = 0;
32468 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32469 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32470 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32471 if (connector->encoder_ids[i] != 0) {
32472 if (put_user(connector->encoder_ids[i],
32473 @@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32474 }
32475
32476 for (i = 0; i < crtc_req->count_connectors; i++) {
32477 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32478 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32479 if (get_user(out_id, &set_connectors_ptr[i])) {
32480 ret = -EFAULT;
32481 goto out;
32482 @@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32483 out_resp->flags = property->flags;
32484
32485 if ((out_resp->count_values >= value_count) && value_count) {
32486 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32487 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32488 for (i = 0; i < value_count; i++) {
32489 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32490 ret = -EFAULT;
32491 @@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32492 if (property->flags & DRM_MODE_PROP_ENUM) {
32493 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32494 copied = 0;
32495 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32496 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32497 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32498
32499 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32500 @@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32501 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32502 copied = 0;
32503 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32504 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32505 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32506
32507 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32508 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32509 @@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32510 blob = obj_to_blob(obj);
32511
32512 if (out_resp->length == blob->length) {
32513 - blob_ptr = (void *)(unsigned long)out_resp->data;
32514 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
32515 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32516 ret = -EFAULT;
32517 goto done;
32518 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32519 index 1b8745d..92fdbf6 100644
32520 --- a/drivers/gpu/drm/drm_crtc_helper.c
32521 +++ b/drivers/gpu/drm/drm_crtc_helper.c
32522 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32523 struct drm_crtc *tmp;
32524 int crtc_mask = 1;
32525
32526 - WARN(!crtc, "checking null crtc?");
32527 + BUG_ON(!crtc);
32528
32529 dev = crtc->dev;
32530
32531 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32532
32533 adjusted_mode = drm_mode_duplicate(dev, mode);
32534
32535 + pax_track_stack();
32536 +
32537 crtc->enabled = drm_helper_crtc_in_use(crtc);
32538
32539 if (!crtc->enabled)
32540 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32541 index 0e27d98..dec8768 100644
32542 --- a/drivers/gpu/drm/drm_drv.c
32543 +++ b/drivers/gpu/drm/drm_drv.c
32544 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32545 char *kdata = NULL;
32546
32547 atomic_inc(&dev->ioctl_count);
32548 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32549 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32550 ++file_priv->ioctl_count;
32551
32552 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32553 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32554 index ba14553..182d0bb 100644
32555 --- a/drivers/gpu/drm/drm_fops.c
32556 +++ b/drivers/gpu/drm/drm_fops.c
32557 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32558 }
32559
32560 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32561 - atomic_set(&dev->counts[i], 0);
32562 + atomic_set_unchecked(&dev->counts[i], 0);
32563
32564 dev->sigdata.lock = NULL;
32565
32566 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32567
32568 retcode = drm_open_helper(inode, filp, dev);
32569 if (!retcode) {
32570 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32571 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32572 spin_lock(&dev->count_lock);
32573 - if (!dev->open_count++) {
32574 + if (local_inc_return(&dev->open_count) == 1) {
32575 spin_unlock(&dev->count_lock);
32576 retcode = drm_setup(dev);
32577 goto out;
32578 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32579
32580 lock_kernel();
32581
32582 - DRM_DEBUG("open_count = %d\n", dev->open_count);
32583 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32584
32585 if (dev->driver->preclose)
32586 dev->driver->preclose(dev, file_priv);
32587 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32588 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32589 task_pid_nr(current),
32590 (long)old_encode_dev(file_priv->minor->device),
32591 - dev->open_count);
32592 + local_read(&dev->open_count));
32593
32594 /* if the master has gone away we can't do anything with the lock */
32595 if (file_priv->minor->master)
32596 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, struct file *filp)
32597 * End inline drm_release
32598 */
32599
32600 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32601 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32602 spin_lock(&dev->count_lock);
32603 - if (!--dev->open_count) {
32604 + if (local_dec_and_test(&dev->open_count)) {
32605 if (atomic_read(&dev->ioctl_count)) {
32606 DRM_ERROR("Device busy: %d\n",
32607 atomic_read(&dev->ioctl_count));
32608 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32609 index 8bf3770..7942280 100644
32610 --- a/drivers/gpu/drm/drm_gem.c
32611 +++ b/drivers/gpu/drm/drm_gem.c
32612 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32613 spin_lock_init(&dev->object_name_lock);
32614 idr_init(&dev->object_name_idr);
32615 atomic_set(&dev->object_count, 0);
32616 - atomic_set(&dev->object_memory, 0);
32617 + atomic_set_unchecked(&dev->object_memory, 0);
32618 atomic_set(&dev->pin_count, 0);
32619 - atomic_set(&dev->pin_memory, 0);
32620 + atomic_set_unchecked(&dev->pin_memory, 0);
32621 atomic_set(&dev->gtt_count, 0);
32622 - atomic_set(&dev->gtt_memory, 0);
32623 + atomic_set_unchecked(&dev->gtt_memory, 0);
32624
32625 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32626 if (!mm) {
32627 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32628 goto fput;
32629 }
32630 atomic_inc(&dev->object_count);
32631 - atomic_add(obj->size, &dev->object_memory);
32632 + atomic_add_unchecked(obj->size, &dev->object_memory);
32633 return obj;
32634 fput:
32635 fput(obj->filp);
32636 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32637
32638 fput(obj->filp);
32639 atomic_dec(&dev->object_count);
32640 - atomic_sub(obj->size, &dev->object_memory);
32641 + atomic_sub_unchecked(obj->size, &dev->object_memory);
32642 kfree(obj);
32643 }
32644 EXPORT_SYMBOL(drm_gem_object_free);
32645 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32646 index f0f6c6b..34af322 100644
32647 --- a/drivers/gpu/drm/drm_info.c
32648 +++ b/drivers/gpu/drm/drm_info.c
32649 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32650 struct drm_local_map *map;
32651 struct drm_map_list *r_list;
32652
32653 - /* Hardcoded from _DRM_FRAME_BUFFER,
32654 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32655 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32656 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32657 + static const char * const types[] = {
32658 + [_DRM_FRAME_BUFFER] = "FB",
32659 + [_DRM_REGISTERS] = "REG",
32660 + [_DRM_SHM] = "SHM",
32661 + [_DRM_AGP] = "AGP",
32662 + [_DRM_SCATTER_GATHER] = "SG",
32663 + [_DRM_CONSISTENT] = "PCI",
32664 + [_DRM_GEM] = "GEM" };
32665 const char *type;
32666 int i;
32667
32668 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32669 map = r_list->map;
32670 if (!map)
32671 continue;
32672 - if (map->type < 0 || map->type > 5)
32673 + if (map->type >= ARRAY_SIZE(types))
32674 type = "??";
32675 else
32676 type = types[map->type];
32677 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
32678 struct drm_device *dev = node->minor->dev;
32679
32680 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
32681 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
32682 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
32683 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
32684 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
32685 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
32686 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
32687 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
32688 seq_printf(m, "%d gtt total\n", dev->gtt_total);
32689 return 0;
32690 }
32691 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32692 mutex_lock(&dev->struct_mutex);
32693 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
32694 atomic_read(&dev->vma_count),
32695 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32696 + NULL, 0);
32697 +#else
32698 high_memory, (u64)virt_to_phys(high_memory));
32699 +#endif
32700
32701 list_for_each_entry(pt, &dev->vmalist, head) {
32702 vma = pt->vma;
32703 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
32704 continue;
32705 seq_printf(m,
32706 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
32707 - pt->pid, vma->vm_start, vma->vm_end,
32708 + pt->pid,
32709 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32710 + 0, 0,
32711 +#else
32712 + vma->vm_start, vma->vm_end,
32713 +#endif
32714 vma->vm_flags & VM_READ ? 'r' : '-',
32715 vma->vm_flags & VM_WRITE ? 'w' : '-',
32716 vma->vm_flags & VM_EXEC ? 'x' : '-',
32717 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32718 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32719 vma->vm_flags & VM_IO ? 'i' : '-',
32720 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32721 + 0);
32722 +#else
32723 vma->vm_pgoff);
32724 +#endif
32725
32726 #if defined(__i386__)
32727 pgprot = pgprot_val(vma->vm_page_prot);
32728 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32729 index 282d9fd..71e5f11 100644
32730 --- a/drivers/gpu/drm/drm_ioc32.c
32731 +++ b/drivers/gpu/drm/drm_ioc32.c
32732 @@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32733 request = compat_alloc_user_space(nbytes);
32734 if (!access_ok(VERIFY_WRITE, request, nbytes))
32735 return -EFAULT;
32736 - list = (struct drm_buf_desc *) (request + 1);
32737 + list = (struct drm_buf_desc __user *) (request + 1);
32738
32739 if (__put_user(count, &request->count)
32740 || __put_user(list, &request->list))
32741 @@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32742 request = compat_alloc_user_space(nbytes);
32743 if (!access_ok(VERIFY_WRITE, request, nbytes))
32744 return -EFAULT;
32745 - list = (struct drm_buf_pub *) (request + 1);
32746 + list = (struct drm_buf_pub __user *) (request + 1);
32747
32748 if (__put_user(count, &request->count)
32749 || __put_user(list, &request->list))
32750 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32751 index 9b9ff46..4ea724c 100644
32752 --- a/drivers/gpu/drm/drm_ioctl.c
32753 +++ b/drivers/gpu/drm/drm_ioctl.c
32754 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32755 stats->data[i].value =
32756 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32757 else
32758 - stats->data[i].value = atomic_read(&dev->counts[i]);
32759 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32760 stats->data[i].type = dev->types[i];
32761 }
32762
32763 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32764 index e2f70a5..c703e86 100644
32765 --- a/drivers/gpu/drm/drm_lock.c
32766 +++ b/drivers/gpu/drm/drm_lock.c
32767 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32768 if (drm_lock_take(&master->lock, lock->context)) {
32769 master->lock.file_priv = file_priv;
32770 master->lock.lock_time = jiffies;
32771 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32772 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32773 break; /* Got lock */
32774 }
32775
32776 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32777 return -EINVAL;
32778 }
32779
32780 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32781 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32782
32783 /* kernel_context_switch isn't used by any of the x86 drm
32784 * modules but is required by the Sparc driver.
32785 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32786 index 7d1d88c..b9131b2 100644
32787 --- a/drivers/gpu/drm/i810/i810_dma.c
32788 +++ b/drivers/gpu/drm/i810/i810_dma.c
32789 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
32790 dma->buflist[vertex->idx],
32791 vertex->discard, vertex->used);
32792
32793 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32794 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32795 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32796 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32797 sarea_priv->last_enqueue = dev_priv->counter - 1;
32798 sarea_priv->last_dispatch = (int)hw_status[5];
32799
32800 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32801 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32802 mc->last_render);
32803
32804 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32805 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32806 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32807 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32808 sarea_priv->last_enqueue = dev_priv->counter - 1;
32809 sarea_priv->last_dispatch = (int)hw_status[5];
32810
32811 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32812 index 21e2691..7321edd 100644
32813 --- a/drivers/gpu/drm/i810/i810_drv.h
32814 +++ b/drivers/gpu/drm/i810/i810_drv.h
32815 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32816 int page_flipping;
32817
32818 wait_queue_head_t irq_queue;
32819 - atomic_t irq_received;
32820 - atomic_t irq_emitted;
32821 + atomic_unchecked_t irq_received;
32822 + atomic_unchecked_t irq_emitted;
32823
32824 int front_offset;
32825 } drm_i810_private_t;
32826 diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
32827 index da82afe..48a45de 100644
32828 --- a/drivers/gpu/drm/i830/i830_drv.h
32829 +++ b/drivers/gpu/drm/i830/i830_drv.h
32830 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
32831 int page_flipping;
32832
32833 wait_queue_head_t irq_queue;
32834 - atomic_t irq_received;
32835 - atomic_t irq_emitted;
32836 + atomic_unchecked_t irq_received;
32837 + atomic_unchecked_t irq_emitted;
32838
32839 int use_mi_batchbuffer_start;
32840
32841 diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
32842 index 91ec2bb..6f21fab 100644
32843 --- a/drivers/gpu/drm/i830/i830_irq.c
32844 +++ b/drivers/gpu/drm/i830/i830_irq.c
32845 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
32846
32847 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
32848
32849 - atomic_inc(&dev_priv->irq_received);
32850 + atomic_inc_unchecked(&dev_priv->irq_received);
32851 wake_up_interruptible(&dev_priv->irq_queue);
32852
32853 return IRQ_HANDLED;
32854 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
32855
32856 DRM_DEBUG("%s\n", __func__);
32857
32858 - atomic_inc(&dev_priv->irq_emitted);
32859 + atomic_inc_unchecked(&dev_priv->irq_emitted);
32860
32861 BEGIN_LP_RING(2);
32862 OUT_RING(0);
32863 OUT_RING(GFX_OP_USER_INTERRUPT);
32864 ADVANCE_LP_RING();
32865
32866 - return atomic_read(&dev_priv->irq_emitted);
32867 + return atomic_read_unchecked(&dev_priv->irq_emitted);
32868 }
32869
32870 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32871 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32872
32873 DRM_DEBUG("%s\n", __func__);
32874
32875 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32876 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32877 return 0;
32878
32879 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
32880 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32881
32882 for (;;) {
32883 __set_current_state(TASK_INTERRUPTIBLE);
32884 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32885 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32886 break;
32887 if ((signed)(end - jiffies) <= 0) {
32888 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
32889 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
32890 I830_WRITE16(I830REG_HWSTAM, 0xffff);
32891 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
32892 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
32893 - atomic_set(&dev_priv->irq_received, 0);
32894 - atomic_set(&dev_priv->irq_emitted, 0);
32895 + atomic_set_unchecked(&dev_priv->irq_received, 0);
32896 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
32897 init_waitqueue_head(&dev_priv->irq_queue);
32898 }
32899
32900 diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
32901 index 288fc50..c6092055 100644
32902 --- a/drivers/gpu/drm/i915/dvo.h
32903 +++ b/drivers/gpu/drm/i915/dvo.h
32904 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
32905 *
32906 * \return singly-linked list of modes or NULL if no modes found.
32907 */
32908 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
32909 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
32910
32911 /**
32912 * Clean up driver-specific bits of the output
32913 */
32914 - void (*destroy) (struct intel_dvo_device *dvo);
32915 + void (* const destroy) (struct intel_dvo_device *dvo);
32916
32917 /**
32918 * Debugging hook to dump device registers to log file
32919 */
32920 - void (*dump_regs)(struct intel_dvo_device *dvo);
32921 + void (* const dump_regs)(struct intel_dvo_device *dvo);
32922 };
32923
32924 -extern struct intel_dvo_dev_ops sil164_ops;
32925 -extern struct intel_dvo_dev_ops ch7xxx_ops;
32926 -extern struct intel_dvo_dev_ops ivch_ops;
32927 -extern struct intel_dvo_dev_ops tfp410_ops;
32928 -extern struct intel_dvo_dev_ops ch7017_ops;
32929 +extern const struct intel_dvo_dev_ops sil164_ops;
32930 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
32931 +extern const struct intel_dvo_dev_ops ivch_ops;
32932 +extern const struct intel_dvo_dev_ops tfp410_ops;
32933 +extern const struct intel_dvo_dev_ops ch7017_ops;
32934
32935 #endif /* _INTEL_DVO_H */
32936 diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
32937 index 621815b..499d82e 100644
32938 --- a/drivers/gpu/drm/i915/dvo_ch7017.c
32939 +++ b/drivers/gpu/drm/i915/dvo_ch7017.c
32940 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
32941 }
32942 }
32943
32944 -struct intel_dvo_dev_ops ch7017_ops = {
32945 +const struct intel_dvo_dev_ops ch7017_ops = {
32946 .init = ch7017_init,
32947 .detect = ch7017_detect,
32948 .mode_valid = ch7017_mode_valid,
32949 diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
32950 index a9b8962..ac769ba 100644
32951 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
32952 +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
32953 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
32954 }
32955 }
32956
32957 -struct intel_dvo_dev_ops ch7xxx_ops = {
32958 +const struct intel_dvo_dev_ops ch7xxx_ops = {
32959 .init = ch7xxx_init,
32960 .detect = ch7xxx_detect,
32961 .mode_valid = ch7xxx_mode_valid,
32962 diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
32963 index aa176f9..ed2930c 100644
32964 --- a/drivers/gpu/drm/i915/dvo_ivch.c
32965 +++ b/drivers/gpu/drm/i915/dvo_ivch.c
32966 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
32967 }
32968 }
32969
32970 -struct intel_dvo_dev_ops ivch_ops= {
32971 +const struct intel_dvo_dev_ops ivch_ops= {
32972 .init = ivch_init,
32973 .dpms = ivch_dpms,
32974 .save = ivch_save,
32975 diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
32976 index e1c1f73..7dbebcf 100644
32977 --- a/drivers/gpu/drm/i915/dvo_sil164.c
32978 +++ b/drivers/gpu/drm/i915/dvo_sil164.c
32979 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
32980 }
32981 }
32982
32983 -struct intel_dvo_dev_ops sil164_ops = {
32984 +const struct intel_dvo_dev_ops sil164_ops = {
32985 .init = sil164_init,
32986 .detect = sil164_detect,
32987 .mode_valid = sil164_mode_valid,
32988 diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
32989 index 16dce84..7e1b6f8 100644
32990 --- a/drivers/gpu/drm/i915/dvo_tfp410.c
32991 +++ b/drivers/gpu/drm/i915/dvo_tfp410.c
32992 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
32993 }
32994 }
32995
32996 -struct intel_dvo_dev_ops tfp410_ops = {
32997 +const struct intel_dvo_dev_ops tfp410_ops = {
32998 .init = tfp410_init,
32999 .detect = tfp410_detect,
33000 .mode_valid = tfp410_mode_valid,
33001 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33002 index 7e859d6..7d1cf2b 100644
33003 --- a/drivers/gpu/drm/i915/i915_debugfs.c
33004 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
33005 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33006 I915_READ(GTIMR));
33007 }
33008 seq_printf(m, "Interrupts received: %d\n",
33009 - atomic_read(&dev_priv->irq_received));
33010 + atomic_read_unchecked(&dev_priv->irq_received));
33011 if (dev_priv->hw_status_page != NULL) {
33012 seq_printf(m, "Current sequence: %d\n",
33013 i915_get_gem_seqno(dev));
33014 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33015 index 5449239..7e4f68d 100644
33016 --- a/drivers/gpu/drm/i915/i915_drv.c
33017 +++ b/drivers/gpu/drm/i915/i915_drv.c
33018 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33019 return i915_resume(dev);
33020 }
33021
33022 -static struct vm_operations_struct i915_gem_vm_ops = {
33023 +static const struct vm_operations_struct i915_gem_vm_ops = {
33024 .fault = i915_gem_fault,
33025 .open = drm_gem_vm_open,
33026 .close = drm_gem_vm_close,
33027 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33028 index 97163f7..c24c7c7 100644
33029 --- a/drivers/gpu/drm/i915/i915_drv.h
33030 +++ b/drivers/gpu/drm/i915/i915_drv.h
33031 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33032 /* display clock increase/decrease */
33033 /* pll clock increase/decrease */
33034 /* clock gating init */
33035 -};
33036 +} __no_const;
33037
33038 typedef struct drm_i915_private {
33039 struct drm_device *dev;
33040 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33041 int page_flipping;
33042
33043 wait_queue_head_t irq_queue;
33044 - atomic_t irq_received;
33045 + atomic_unchecked_t irq_received;
33046 /** Protects user_irq_refcount and irq_mask_reg */
33047 spinlock_t user_irq_lock;
33048 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33049 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33050 index 27a3074..eb3f959 100644
33051 --- a/drivers/gpu/drm/i915/i915_gem.c
33052 +++ b/drivers/gpu/drm/i915/i915_gem.c
33053 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33054
33055 args->aper_size = dev->gtt_total;
33056 args->aper_available_size = (args->aper_size -
33057 - atomic_read(&dev->pin_memory));
33058 + atomic_read_unchecked(&dev->pin_memory));
33059
33060 return 0;
33061 }
33062 @@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33063
33064 if (obj_priv->gtt_space) {
33065 atomic_dec(&dev->gtt_count);
33066 - atomic_sub(obj->size, &dev->gtt_memory);
33067 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33068
33069 drm_mm_put_block(obj_priv->gtt_space);
33070 obj_priv->gtt_space = NULL;
33071 @@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33072 goto search_free;
33073 }
33074 atomic_inc(&dev->gtt_count);
33075 - atomic_add(obj->size, &dev->gtt_memory);
33076 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
33077
33078 /* Assert that the object is not currently in any GPU domain. As it
33079 * wasn't in the GTT, there shouldn't be any way it could have been in
33080 @@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33081 "%d/%d gtt bytes\n",
33082 atomic_read(&dev->object_count),
33083 atomic_read(&dev->pin_count),
33084 - atomic_read(&dev->object_memory),
33085 - atomic_read(&dev->pin_memory),
33086 - atomic_read(&dev->gtt_memory),
33087 + atomic_read_unchecked(&dev->object_memory),
33088 + atomic_read_unchecked(&dev->pin_memory),
33089 + atomic_read_unchecked(&dev->gtt_memory),
33090 dev->gtt_total);
33091 }
33092 goto err;
33093 @@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33094 */
33095 if (obj_priv->pin_count == 1) {
33096 atomic_inc(&dev->pin_count);
33097 - atomic_add(obj->size, &dev->pin_memory);
33098 + atomic_add_unchecked(obj->size, &dev->pin_memory);
33099 if (!obj_priv->active &&
33100 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33101 !list_empty(&obj_priv->list))
33102 @@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33103 list_move_tail(&obj_priv->list,
33104 &dev_priv->mm.inactive_list);
33105 atomic_dec(&dev->pin_count);
33106 - atomic_sub(obj->size, &dev->pin_memory);
33107 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
33108 }
33109 i915_verify_inactive(dev, __FILE__, __LINE__);
33110 }
33111 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33112 index 63f28ad..f5469da 100644
33113 --- a/drivers/gpu/drm/i915/i915_irq.c
33114 +++ b/drivers/gpu/drm/i915/i915_irq.c
33115 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33116 int irq_received;
33117 int ret = IRQ_NONE;
33118
33119 - atomic_inc(&dev_priv->irq_received);
33120 + atomic_inc_unchecked(&dev_priv->irq_received);
33121
33122 if (IS_IGDNG(dev))
33123 return igdng_irq_handler(dev);
33124 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33125 {
33126 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33127
33128 - atomic_set(&dev_priv->irq_received, 0);
33129 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33130
33131 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33132 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33133 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33134 index 5d9c6a7..d1b0e29 100644
33135 --- a/drivers/gpu/drm/i915/intel_sdvo.c
33136 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
33137 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33138 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33139
33140 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33141 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33142 + pax_open_kernel();
33143 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33144 + pax_close_kernel();
33145
33146 /* Read the regs to test if we can talk to the device */
33147 for (i = 0; i < 0x40; i++) {
33148 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33149 index be6c6b9..8615d9c 100644
33150 --- a/drivers/gpu/drm/mga/mga_drv.h
33151 +++ b/drivers/gpu/drm/mga/mga_drv.h
33152 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33153 u32 clear_cmd;
33154 u32 maccess;
33155
33156 - atomic_t vbl_received; /**< Number of vblanks received. */
33157 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33158 wait_queue_head_t fence_queue;
33159 - atomic_t last_fence_retired;
33160 + atomic_unchecked_t last_fence_retired;
33161 u32 next_fence_to_post;
33162
33163 unsigned int fb_cpp;
33164 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33165 index daa6041..a28a5da 100644
33166 --- a/drivers/gpu/drm/mga/mga_irq.c
33167 +++ b/drivers/gpu/drm/mga/mga_irq.c
33168 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33169 if (crtc != 0)
33170 return 0;
33171
33172 - return atomic_read(&dev_priv->vbl_received);
33173 + return atomic_read_unchecked(&dev_priv->vbl_received);
33174 }
33175
33176
33177 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33178 /* VBLANK interrupt */
33179 if (status & MGA_VLINEPEN) {
33180 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33181 - atomic_inc(&dev_priv->vbl_received);
33182 + atomic_inc_unchecked(&dev_priv->vbl_received);
33183 drm_handle_vblank(dev, 0);
33184 handled = 1;
33185 }
33186 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33187 MGA_WRITE(MGA_PRIMEND, prim_end);
33188 }
33189
33190 - atomic_inc(&dev_priv->last_fence_retired);
33191 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
33192 DRM_WAKEUP(&dev_priv->fence_queue);
33193 handled = 1;
33194 }
33195 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33196 * using fences.
33197 */
33198 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33199 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33200 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33201 - *sequence) <= (1 << 23)));
33202
33203 *sequence = cur_fence;
33204 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33205 index 4c39a40..b22a9ea 100644
33206 --- a/drivers/gpu/drm/r128/r128_cce.c
33207 +++ b/drivers/gpu/drm/r128/r128_cce.c
33208 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33209
33210 /* GH: Simple idle check.
33211 */
33212 - atomic_set(&dev_priv->idle_count, 0);
33213 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33214
33215 /* We don't support anything other than bus-mastering ring mode,
33216 * but the ring can be in either AGP or PCI space for the ring
33217 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33218 index 3c60829..4faf484 100644
33219 --- a/drivers/gpu/drm/r128/r128_drv.h
33220 +++ b/drivers/gpu/drm/r128/r128_drv.h
33221 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33222 int is_pci;
33223 unsigned long cce_buffers_offset;
33224
33225 - atomic_t idle_count;
33226 + atomic_unchecked_t idle_count;
33227
33228 int page_flipping;
33229 int current_page;
33230 u32 crtc_offset;
33231 u32 crtc_offset_cntl;
33232
33233 - atomic_t vbl_received;
33234 + atomic_unchecked_t vbl_received;
33235
33236 u32 color_fmt;
33237 unsigned int front_offset;
33238 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33239 index 69810fb..97bf17a 100644
33240 --- a/drivers/gpu/drm/r128/r128_irq.c
33241 +++ b/drivers/gpu/drm/r128/r128_irq.c
33242 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33243 if (crtc != 0)
33244 return 0;
33245
33246 - return atomic_read(&dev_priv->vbl_received);
33247 + return atomic_read_unchecked(&dev_priv->vbl_received);
33248 }
33249
33250 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33251 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33252 /* VBLANK interrupt */
33253 if (status & R128_CRTC_VBLANK_INT) {
33254 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33255 - atomic_inc(&dev_priv->vbl_received);
33256 + atomic_inc_unchecked(&dev_priv->vbl_received);
33257 drm_handle_vblank(dev, 0);
33258 return IRQ_HANDLED;
33259 }
33260 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33261 index af2665c..51922d2 100644
33262 --- a/drivers/gpu/drm/r128/r128_state.c
33263 +++ b/drivers/gpu/drm/r128/r128_state.c
33264 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33265
33266 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33267 {
33268 - if (atomic_read(&dev_priv->idle_count) == 0) {
33269 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33270 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33271 } else {
33272 - atomic_set(&dev_priv->idle_count, 0);
33273 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33274 }
33275 }
33276
33277 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33278 index dd72b91..8644b3c 100644
33279 --- a/drivers/gpu/drm/radeon/atom.c
33280 +++ b/drivers/gpu/drm/radeon/atom.c
33281 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33282 char name[512];
33283 int i;
33284
33285 + pax_track_stack();
33286 +
33287 ctx->card = card;
33288 ctx->bios = bios;
33289
33290 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33291 index 0d79577..efaa7a5 100644
33292 --- a/drivers/gpu/drm/radeon/mkregtable.c
33293 +++ b/drivers/gpu/drm/radeon/mkregtable.c
33294 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33295 regex_t mask_rex;
33296 regmatch_t match[4];
33297 char buf[1024];
33298 - size_t end;
33299 + long end;
33300 int len;
33301 int done = 0;
33302 int r;
33303 unsigned o;
33304 struct offset *offset;
33305 char last_reg_s[10];
33306 - int last_reg;
33307 + unsigned long last_reg;
33308
33309 if (regcomp
33310 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33311 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33312 index 6735213..38c2c67 100644
33313 --- a/drivers/gpu/drm/radeon/radeon.h
33314 +++ b/drivers/gpu/drm/radeon/radeon.h
33315 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33316 */
33317 struct radeon_fence_driver {
33318 uint32_t scratch_reg;
33319 - atomic_t seq;
33320 + atomic_unchecked_t seq;
33321 uint32_t last_seq;
33322 unsigned long count_timeout;
33323 wait_queue_head_t queue;
33324 @@ -640,7 +640,7 @@ struct radeon_asic {
33325 uint32_t offset, uint32_t obj_size);
33326 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33327 void (*bandwidth_update)(struct radeon_device *rdev);
33328 -};
33329 +} __no_const;
33330
33331 /*
33332 * Asic structures
33333 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33334 index 4e928b9..d8b6008 100644
33335 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
33336 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33337 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33338 bool linkb;
33339 struct radeon_i2c_bus_rec ddc_bus;
33340
33341 + pax_track_stack();
33342 +
33343 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33344
33345 if (data_offset == 0)
33346 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33347 }
33348 }
33349
33350 -struct bios_connector {
33351 +static struct bios_connector {
33352 bool valid;
33353 uint16_t line_mux;
33354 uint16_t devices;
33355 int connector_type;
33356 struct radeon_i2c_bus_rec ddc_bus;
33357 -};
33358 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33359
33360 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33361 drm_device
33362 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33363 uint8_t dac;
33364 union atom_supported_devices *supported_devices;
33365 int i, j;
33366 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33367
33368 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33369
33370 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33371 index 083a181..ccccae0 100644
33372 --- a/drivers/gpu/drm/radeon/radeon_display.c
33373 +++ b/drivers/gpu/drm/radeon/radeon_display.c
33374 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33375
33376 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33377 error = freq - current_freq;
33378 - error = error < 0 ? 0xffffffff : error;
33379 + error = (int32_t)error < 0 ? 0xffffffff : error;
33380 } else
33381 error = abs(current_freq - freq);
33382 vco_diff = abs(vco - best_vco);
33383 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33384 index 76e4070..193fa7f 100644
33385 --- a/drivers/gpu/drm/radeon/radeon_drv.h
33386 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
33387 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33388
33389 /* SW interrupt */
33390 wait_queue_head_t swi_queue;
33391 - atomic_t swi_emitted;
33392 + atomic_unchecked_t swi_emitted;
33393 int vblank_crtc;
33394 uint32_t irq_enable_reg;
33395 uint32_t r500_disp_irq_reg;
33396 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33397 index 3beb26d..6ce9c4a 100644
33398 --- a/drivers/gpu/drm/radeon/radeon_fence.c
33399 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
33400 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33401 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33402 return 0;
33403 }
33404 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33405 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33406 if (!rdev->cp.ready) {
33407 /* FIXME: cp is not running assume everythings is done right
33408 * away
33409 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33410 return r;
33411 }
33412 WREG32(rdev->fence_drv.scratch_reg, 0);
33413 - atomic_set(&rdev->fence_drv.seq, 0);
33414 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33415 INIT_LIST_HEAD(&rdev->fence_drv.created);
33416 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33417 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33418 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33419 index a1bf11d..4a123c0 100644
33420 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33421 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33422 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33423 request = compat_alloc_user_space(sizeof(*request));
33424 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33425 || __put_user(req32.param, &request->param)
33426 - || __put_user((void __user *)(unsigned long)req32.value,
33427 + || __put_user((unsigned long)req32.value,
33428 &request->value))
33429 return -EFAULT;
33430
33431 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33432 index b79ecc4..8dab92d 100644
33433 --- a/drivers/gpu/drm/radeon/radeon_irq.c
33434 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
33435 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33436 unsigned int ret;
33437 RING_LOCALS;
33438
33439 - atomic_inc(&dev_priv->swi_emitted);
33440 - ret = atomic_read(&dev_priv->swi_emitted);
33441 + atomic_inc_unchecked(&dev_priv->swi_emitted);
33442 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33443
33444 BEGIN_RING(4);
33445 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33446 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33447 drm_radeon_private_t *dev_priv =
33448 (drm_radeon_private_t *) dev->dev_private;
33449
33450 - atomic_set(&dev_priv->swi_emitted, 0);
33451 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33452 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33453
33454 dev->max_vblank_count = 0x001fffff;
33455 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33456 index 4747910..48ca4b3 100644
33457 --- a/drivers/gpu/drm/radeon/radeon_state.c
33458 +++ b/drivers/gpu/drm/radeon/radeon_state.c
33459 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33460 {
33461 drm_radeon_private_t *dev_priv = dev->dev_private;
33462 drm_radeon_getparam_t *param = data;
33463 - int value;
33464 + int value = 0;
33465
33466 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33467
33468 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33469 index 1381e06..0e53b17 100644
33470 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
33471 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33472 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33473 DRM_INFO("radeon: ttm finalized\n");
33474 }
33475
33476 -static struct vm_operations_struct radeon_ttm_vm_ops;
33477 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
33478 -
33479 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33480 -{
33481 - struct ttm_buffer_object *bo;
33482 - int r;
33483 -
33484 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
33485 - if (bo == NULL) {
33486 - return VM_FAULT_NOPAGE;
33487 - }
33488 - r = ttm_vm_ops->fault(vma, vmf);
33489 - return r;
33490 -}
33491 -
33492 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33493 {
33494 struct drm_file *file_priv;
33495 struct radeon_device *rdev;
33496 - int r;
33497
33498 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33499 return drm_mmap(filp, vma);
33500 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33501
33502 file_priv = (struct drm_file *)filp->private_data;
33503 rdev = file_priv->minor->dev->dev_private;
33504 - if (rdev == NULL) {
33505 + if (!rdev)
33506 return -EINVAL;
33507 - }
33508 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33509 - if (unlikely(r != 0)) {
33510 - return r;
33511 - }
33512 - if (unlikely(ttm_vm_ops == NULL)) {
33513 - ttm_vm_ops = vma->vm_ops;
33514 - radeon_ttm_vm_ops = *ttm_vm_ops;
33515 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33516 - }
33517 - vma->vm_ops = &radeon_ttm_vm_ops;
33518 - return 0;
33519 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33520 }
33521
33522
33523 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33524 index b12ff76..0bd0c6e 100644
33525 --- a/drivers/gpu/drm/radeon/rs690.c
33526 +++ b/drivers/gpu/drm/radeon/rs690.c
33527 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33528 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33529 rdev->pm.sideport_bandwidth.full)
33530 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33531 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33532 + read_delay_latency.full = rfixed_const(800 * 1000);
33533 read_delay_latency.full = rfixed_div(read_delay_latency,
33534 rdev->pm.igp_sideport_mclk);
33535 + a.full = rfixed_const(370);
33536 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33537 } else {
33538 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33539 rdev->pm.k8_bandwidth.full)
33540 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33541 index 0ed436e..e6e7ce3 100644
33542 --- a/drivers/gpu/drm/ttm/ttm_bo.c
33543 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
33544 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33545 NULL
33546 };
33547
33548 -static struct sysfs_ops ttm_bo_global_ops = {
33549 +static const struct sysfs_ops ttm_bo_global_ops = {
33550 .show = &ttm_bo_global_show
33551 };
33552
33553 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33554 index 1c040d0..f9e4af8 100644
33555 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33556 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33557 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33558 {
33559 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33560 vma->vm_private_data;
33561 - struct ttm_bo_device *bdev = bo->bdev;
33562 + struct ttm_bo_device *bdev;
33563 unsigned long bus_base;
33564 unsigned long bus_offset;
33565 unsigned long bus_size;
33566 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33567 unsigned long address = (unsigned long)vmf->virtual_address;
33568 int retval = VM_FAULT_NOPAGE;
33569
33570 + if (!bo)
33571 + return VM_FAULT_NOPAGE;
33572 + bdev = bo->bdev;
33573 +
33574 /*
33575 * Work around locking order reversal in fault / nopfn
33576 * between mmap_sem and bo_reserve: Perform a trylock operation
33577 diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33578 index b170071..28ae90e 100644
33579 --- a/drivers/gpu/drm/ttm/ttm_global.c
33580 +++ b/drivers/gpu/drm/ttm/ttm_global.c
33581 @@ -36,7 +36,7 @@
33582 struct ttm_global_item {
33583 struct mutex mutex;
33584 void *object;
33585 - int refcount;
33586 + atomic_t refcount;
33587 };
33588
33589 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33590 @@ -49,7 +49,7 @@ void ttm_global_init(void)
33591 struct ttm_global_item *item = &glob[i];
33592 mutex_init(&item->mutex);
33593 item->object = NULL;
33594 - item->refcount = 0;
33595 + atomic_set(&item->refcount, 0);
33596 }
33597 }
33598
33599 @@ -59,7 +59,7 @@ void ttm_global_release(void)
33600 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33601 struct ttm_global_item *item = &glob[i];
33602 BUG_ON(item->object != NULL);
33603 - BUG_ON(item->refcount != 0);
33604 + BUG_ON(atomic_read(&item->refcount) != 0);
33605 }
33606 }
33607
33608 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33609 void *object;
33610
33611 mutex_lock(&item->mutex);
33612 - if (item->refcount == 0) {
33613 + if (atomic_read(&item->refcount) == 0) {
33614 item->object = kzalloc(ref->size, GFP_KERNEL);
33615 if (unlikely(item->object == NULL)) {
33616 ret = -ENOMEM;
33617 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33618 goto out_err;
33619
33620 }
33621 - ++item->refcount;
33622 + atomic_inc(&item->refcount);
33623 ref->object = item->object;
33624 object = item->object;
33625 mutex_unlock(&item->mutex);
33626 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33627 struct ttm_global_item *item = &glob[ref->global_type];
33628
33629 mutex_lock(&item->mutex);
33630 - BUG_ON(item->refcount == 0);
33631 + BUG_ON(atomic_read(&item->refcount) == 0);
33632 BUG_ON(ref->object != item->object);
33633 - if (--item->refcount == 0) {
33634 + if (atomic_dec_and_test(&item->refcount)) {
33635 ref->release(ref);
33636 item->object = NULL;
33637 }
33638 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33639 index 072c281..d8ef483 100644
33640 --- a/drivers/gpu/drm/ttm/ttm_memory.c
33641 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
33642 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33643 NULL
33644 };
33645
33646 -static struct sysfs_ops ttm_mem_zone_ops = {
33647 +static const struct sysfs_ops ttm_mem_zone_ops = {
33648 .show = &ttm_mem_zone_show,
33649 .store = &ttm_mem_zone_store
33650 };
33651 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33652 index cafcb84..b8e66cc 100644
33653 --- a/drivers/gpu/drm/via/via_drv.h
33654 +++ b/drivers/gpu/drm/via/via_drv.h
33655 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33656 typedef uint32_t maskarray_t[5];
33657
33658 typedef struct drm_via_irq {
33659 - atomic_t irq_received;
33660 + atomic_unchecked_t irq_received;
33661 uint32_t pending_mask;
33662 uint32_t enable_mask;
33663 wait_queue_head_t irq_queue;
33664 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
33665 struct timeval last_vblank;
33666 int last_vblank_valid;
33667 unsigned usec_per_vblank;
33668 - atomic_t vbl_received;
33669 + atomic_unchecked_t vbl_received;
33670 drm_via_state_t hc_state;
33671 char pci_buf[VIA_PCI_BUF_SIZE];
33672 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33673 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33674 index 5935b88..127a8a6 100644
33675 --- a/drivers/gpu/drm/via/via_irq.c
33676 +++ b/drivers/gpu/drm/via/via_irq.c
33677 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33678 if (crtc != 0)
33679 return 0;
33680
33681 - return atomic_read(&dev_priv->vbl_received);
33682 + return atomic_read_unchecked(&dev_priv->vbl_received);
33683 }
33684
33685 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33686 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33687
33688 status = VIA_READ(VIA_REG_INTERRUPT);
33689 if (status & VIA_IRQ_VBLANK_PENDING) {
33690 - atomic_inc(&dev_priv->vbl_received);
33691 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33692 + atomic_inc_unchecked(&dev_priv->vbl_received);
33693 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33694 do_gettimeofday(&cur_vblank);
33695 if (dev_priv->last_vblank_valid) {
33696 dev_priv->usec_per_vblank =
33697 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33698 dev_priv->last_vblank = cur_vblank;
33699 dev_priv->last_vblank_valid = 1;
33700 }
33701 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33702 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33703 DRM_DEBUG("US per vblank is: %u\n",
33704 dev_priv->usec_per_vblank);
33705 }
33706 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33707
33708 for (i = 0; i < dev_priv->num_irqs; ++i) {
33709 if (status & cur_irq->pending_mask) {
33710 - atomic_inc(&cur_irq->irq_received);
33711 + atomic_inc_unchecked(&cur_irq->irq_received);
33712 DRM_WAKEUP(&cur_irq->irq_queue);
33713 handled = 1;
33714 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
33715 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
33716 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33717 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33718 masks[irq][4]));
33719 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33720 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33721 } else {
33722 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33723 (((cur_irq_sequence =
33724 - atomic_read(&cur_irq->irq_received)) -
33725 + atomic_read_unchecked(&cur_irq->irq_received)) -
33726 *sequence) <= (1 << 23)));
33727 }
33728 *sequence = cur_irq_sequence;
33729 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
33730 }
33731
33732 for (i = 0; i < dev_priv->num_irqs; ++i) {
33733 - atomic_set(&cur_irq->irq_received, 0);
33734 + atomic_set_unchecked(&cur_irq->irq_received, 0);
33735 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33736 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33737 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33738 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33739 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33740 case VIA_IRQ_RELATIVE:
33741 irqwait->request.sequence +=
33742 - atomic_read(&cur_irq->irq_received);
33743 + atomic_read_unchecked(&cur_irq->irq_received);
33744 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33745 case VIA_IRQ_ABSOLUTE:
33746 break;
33747 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
33748 index aa8688d..6a0140c 100644
33749 --- a/drivers/gpu/vga/vgaarb.c
33750 +++ b/drivers/gpu/vga/vgaarb.c
33751 @@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
33752 uc = &priv->cards[i];
33753 }
33754
33755 - if (!uc)
33756 - return -EINVAL;
33757 + if (!uc) {
33758 + ret_val = -EINVAL;
33759 + goto done;
33760 + }
33761
33762 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
33763 - return -EINVAL;
33764 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
33765 + ret_val = -EINVAL;
33766 + goto done;
33767 + }
33768
33769 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
33770 - return -EINVAL;
33771 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
33772 + ret_val = -EINVAL;
33773 + goto done;
33774 + }
33775
33776 vga_put(pdev, io_state);
33777
33778 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33779 index f3f1415..e561d90 100644
33780 --- a/drivers/hid/hid-core.c
33781 +++ b/drivers/hid/hid-core.c
33782 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
33783
33784 int hid_add_device(struct hid_device *hdev)
33785 {
33786 - static atomic_t id = ATOMIC_INIT(0);
33787 + static atomic_unchecked_t id = ATOMIC_INIT(0);
33788 int ret;
33789
33790 if (WARN_ON(hdev->status & HID_STAT_ADDED))
33791 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
33792 /* XXX hack, any other cleaner solution after the driver core
33793 * is converted to allow more than 20 bytes as the device name? */
33794 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
33795 - hdev->vendor, hdev->product, atomic_inc_return(&id));
33796 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
33797
33798 ret = device_add(&hdev->dev);
33799 if (!ret)
33800 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
33801 index 8b6ee24..70f657d 100644
33802 --- a/drivers/hid/usbhid/hiddev.c
33803 +++ b/drivers/hid/usbhid/hiddev.c
33804 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33805 return put_user(HID_VERSION, (int __user *)arg);
33806
33807 case HIDIOCAPPLICATION:
33808 - if (arg < 0 || arg >= hid->maxapplication)
33809 + if (arg >= hid->maxapplication)
33810 return -EINVAL;
33811
33812 for (i = 0; i < hid->maxcollection; i++)
33813 diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
33814 index 5d5ed69..f40533e 100644
33815 --- a/drivers/hwmon/lis3lv02d.c
33816 +++ b/drivers/hwmon/lis3lv02d.c
33817 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
33818 * the lid is closed. This leads to interrupts as soon as a little move
33819 * is done.
33820 */
33821 - atomic_inc(&lis3_dev.count);
33822 + atomic_inc_unchecked(&lis3_dev.count);
33823
33824 wake_up_interruptible(&lis3_dev.misc_wait);
33825 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
33826 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33827 if (test_and_set_bit(0, &lis3_dev.misc_opened))
33828 return -EBUSY; /* already open */
33829
33830 - atomic_set(&lis3_dev.count, 0);
33831 + atomic_set_unchecked(&lis3_dev.count, 0);
33832
33833 /*
33834 * The sensor can generate interrupts for free-fall and direction
33835 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33836 add_wait_queue(&lis3_dev.misc_wait, &wait);
33837 while (true) {
33838 set_current_state(TASK_INTERRUPTIBLE);
33839 - data = atomic_xchg(&lis3_dev.count, 0);
33840 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
33841 if (data)
33842 break;
33843
33844 @@ -244,7 +244,7 @@ out:
33845 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33846 {
33847 poll_wait(file, &lis3_dev.misc_wait, wait);
33848 - if (atomic_read(&lis3_dev.count))
33849 + if (atomic_read_unchecked(&lis3_dev.count))
33850 return POLLIN | POLLRDNORM;
33851 return 0;
33852 }
33853 diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
33854 index 7cdd76f..fe0efdf 100644
33855 --- a/drivers/hwmon/lis3lv02d.h
33856 +++ b/drivers/hwmon/lis3lv02d.h
33857 @@ -201,7 +201,7 @@ struct lis3lv02d {
33858
33859 struct input_polled_dev *idev; /* input device */
33860 struct platform_device *pdev; /* platform device */
33861 - atomic_t count; /* interrupt count after last read */
33862 + atomic_unchecked_t count; /* interrupt count after last read */
33863 int xcalib; /* calibrated null value for x */
33864 int ycalib; /* calibrated null value for y */
33865 int zcalib; /* calibrated null value for z */
33866 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33867 index 2040507..706ec1e 100644
33868 --- a/drivers/hwmon/sht15.c
33869 +++ b/drivers/hwmon/sht15.c
33870 @@ -112,7 +112,7 @@ struct sht15_data {
33871 int supply_uV;
33872 int supply_uV_valid;
33873 struct work_struct update_supply_work;
33874 - atomic_t interrupt_handled;
33875 + atomic_unchecked_t interrupt_handled;
33876 };
33877
33878 /**
33879 @@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
33880 return ret;
33881
33882 gpio_direction_input(data->pdata->gpio_data);
33883 - atomic_set(&data->interrupt_handled, 0);
33884 + atomic_set_unchecked(&data->interrupt_handled, 0);
33885
33886 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33887 if (gpio_get_value(data->pdata->gpio_data) == 0) {
33888 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
33889 /* Only relevant if the interrupt hasn't occured. */
33890 - if (!atomic_read(&data->interrupt_handled))
33891 + if (!atomic_read_unchecked(&data->interrupt_handled))
33892 schedule_work(&data->read_work);
33893 }
33894 ret = wait_event_timeout(data->wait_queue,
33895 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
33896 struct sht15_data *data = d;
33897 /* First disable the interrupt */
33898 disable_irq_nosync(irq);
33899 - atomic_inc(&data->interrupt_handled);
33900 + atomic_inc_unchecked(&data->interrupt_handled);
33901 /* Then schedule a reading work struct */
33902 if (data->flag != SHT15_READING_NOTHING)
33903 schedule_work(&data->read_work);
33904 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
33905 here as could have gone low in meantime so verify
33906 it hasn't!
33907 */
33908 - atomic_set(&data->interrupt_handled, 0);
33909 + atomic_set_unchecked(&data->interrupt_handled, 0);
33910 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33911 /* If still not occured or another handler has been scheduled */
33912 if (gpio_get_value(data->pdata->gpio_data)
33913 - || atomic_read(&data->interrupt_handled))
33914 + || atomic_read_unchecked(&data->interrupt_handled))
33915 return;
33916 }
33917 /* Read the data back from the device */
33918 diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
33919 index 97851c5..cb40626 100644
33920 --- a/drivers/hwmon/w83791d.c
33921 +++ b/drivers/hwmon/w83791d.c
33922 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
33923 struct i2c_board_info *info);
33924 static int w83791d_remove(struct i2c_client *client);
33925
33926 -static int w83791d_read(struct i2c_client *client, u8 register);
33927 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
33928 +static int w83791d_read(struct i2c_client *client, u8 reg);
33929 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
33930 static struct w83791d_data *w83791d_update_device(struct device *dev);
33931
33932 #ifdef DEBUG
33933 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
33934 index 378fcb5..5e91fa8 100644
33935 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
33936 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
33937 @@ -43,7 +43,7 @@
33938 extern struct i2c_adapter amd756_smbus;
33939
33940 static struct i2c_adapter *s4882_adapter;
33941 -static struct i2c_algorithm *s4882_algo;
33942 +static i2c_algorithm_no_const *s4882_algo;
33943
33944 /* Wrapper access functions for multiplexed SMBus */
33945 static DEFINE_MUTEX(amd756_lock);
33946 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
33947 index 29015eb..af2d8e9 100644
33948 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
33949 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
33950 @@ -41,7 +41,7 @@
33951 extern struct i2c_adapter *nforce2_smbus;
33952
33953 static struct i2c_adapter *s4985_adapter;
33954 -static struct i2c_algorithm *s4985_algo;
33955 +static i2c_algorithm_no_const *s4985_algo;
33956
33957 /* Wrapper access functions for multiplexed SMBus */
33958 static DEFINE_MUTEX(nforce2_lock);
33959 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
33960 index 878f8ec..12376fc 100644
33961 --- a/drivers/ide/aec62xx.c
33962 +++ b/drivers/ide/aec62xx.c
33963 @@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
33964 .cable_detect = atp86x_cable_detect,
33965 };
33966
33967 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
33968 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
33969 { /* 0: AEC6210 */
33970 .name = DRV_NAME,
33971 .init_chipset = init_chipset_aec62xx,
33972 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
33973 index e59b6de..4b4fc65 100644
33974 --- a/drivers/ide/alim15x3.c
33975 +++ b/drivers/ide/alim15x3.c
33976 @@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
33977 .dma_sff_read_status = ide_dma_sff_read_status,
33978 };
33979
33980 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
33981 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
33982 .name = DRV_NAME,
33983 .init_chipset = init_chipset_ali15x3,
33984 .init_hwif = init_hwif_ali15x3,
33985 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
33986 index 628cd2e..087a414 100644
33987 --- a/drivers/ide/amd74xx.c
33988 +++ b/drivers/ide/amd74xx.c
33989 @@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
33990 .udma_mask = udma, \
33991 }
33992
33993 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
33994 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
33995 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
33996 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
33997 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
33998 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
33999 index 837322b..837fd71 100644
34000 --- a/drivers/ide/atiixp.c
34001 +++ b/drivers/ide/atiixp.c
34002 @@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34003 .cable_detect = atiixp_cable_detect,
34004 };
34005
34006 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34007 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34008 { /* 0: IXP200/300/400/700 */
34009 .name = DRV_NAME,
34010 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34011 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34012 index ca0c46f..d55318a 100644
34013 --- a/drivers/ide/cmd64x.c
34014 +++ b/drivers/ide/cmd64x.c
34015 @@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34016 .dma_sff_read_status = ide_dma_sff_read_status,
34017 };
34018
34019 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34020 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34021 { /* 0: CMD643 */
34022 .name = DRV_NAME,
34023 .init_chipset = init_chipset_cmd64x,
34024 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34025 index 09f98ed..cebc5bc 100644
34026 --- a/drivers/ide/cs5520.c
34027 +++ b/drivers/ide/cs5520.c
34028 @@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34029 .set_dma_mode = cs5520_set_dma_mode,
34030 };
34031
34032 -static const struct ide_port_info cyrix_chipset __devinitdata = {
34033 +static const struct ide_port_info cyrix_chipset __devinitconst = {
34034 .name = DRV_NAME,
34035 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34036 .port_ops = &cs5520_port_ops,
34037 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34038 index 40bf05e..7d58ca0 100644
34039 --- a/drivers/ide/cs5530.c
34040 +++ b/drivers/ide/cs5530.c
34041 @@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34042 .udma_filter = cs5530_udma_filter,
34043 };
34044
34045 -static const struct ide_port_info cs5530_chipset __devinitdata = {
34046 +static const struct ide_port_info cs5530_chipset __devinitconst = {
34047 .name = DRV_NAME,
34048 .init_chipset = init_chipset_cs5530,
34049 .init_hwif = init_hwif_cs5530,
34050 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34051 index 983d957..53e6172 100644
34052 --- a/drivers/ide/cs5535.c
34053 +++ b/drivers/ide/cs5535.c
34054 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34055 .cable_detect = cs5535_cable_detect,
34056 };
34057
34058 -static const struct ide_port_info cs5535_chipset __devinitdata = {
34059 +static const struct ide_port_info cs5535_chipset __devinitconst = {
34060 .name = DRV_NAME,
34061 .port_ops = &cs5535_port_ops,
34062 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34063 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34064 index 74fc540..8e933d8 100644
34065 --- a/drivers/ide/cy82c693.c
34066 +++ b/drivers/ide/cy82c693.c
34067 @@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34068 .set_dma_mode = cy82c693_set_dma_mode,
34069 };
34070
34071 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
34072 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
34073 .name = DRV_NAME,
34074 .init_iops = init_iops_cy82c693,
34075 .port_ops = &cy82c693_port_ops,
34076 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34077 index 7ce68ef..e78197d 100644
34078 --- a/drivers/ide/hpt366.c
34079 +++ b/drivers/ide/hpt366.c
34080 @@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34081 }
34082 };
34083
34084 -static const struct hpt_info hpt36x __devinitdata = {
34085 +static const struct hpt_info hpt36x __devinitconst = {
34086 .chip_name = "HPT36x",
34087 .chip_type = HPT36x,
34088 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34089 @@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34090 .timings = &hpt36x_timings
34091 };
34092
34093 -static const struct hpt_info hpt370 __devinitdata = {
34094 +static const struct hpt_info hpt370 __devinitconst = {
34095 .chip_name = "HPT370",
34096 .chip_type = HPT370,
34097 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34098 @@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34099 .timings = &hpt37x_timings
34100 };
34101
34102 -static const struct hpt_info hpt370a __devinitdata = {
34103 +static const struct hpt_info hpt370a __devinitconst = {
34104 .chip_name = "HPT370A",
34105 .chip_type = HPT370A,
34106 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34107 @@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34108 .timings = &hpt37x_timings
34109 };
34110
34111 -static const struct hpt_info hpt374 __devinitdata = {
34112 +static const struct hpt_info hpt374 __devinitconst = {
34113 .chip_name = "HPT374",
34114 .chip_type = HPT374,
34115 .udma_mask = ATA_UDMA5,
34116 @@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34117 .timings = &hpt37x_timings
34118 };
34119
34120 -static const struct hpt_info hpt372 __devinitdata = {
34121 +static const struct hpt_info hpt372 __devinitconst = {
34122 .chip_name = "HPT372",
34123 .chip_type = HPT372,
34124 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34125 @@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34126 .timings = &hpt37x_timings
34127 };
34128
34129 -static const struct hpt_info hpt372a __devinitdata = {
34130 +static const struct hpt_info hpt372a __devinitconst = {
34131 .chip_name = "HPT372A",
34132 .chip_type = HPT372A,
34133 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34134 @@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34135 .timings = &hpt37x_timings
34136 };
34137
34138 -static const struct hpt_info hpt302 __devinitdata = {
34139 +static const struct hpt_info hpt302 __devinitconst = {
34140 .chip_name = "HPT302",
34141 .chip_type = HPT302,
34142 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34143 @@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34144 .timings = &hpt37x_timings
34145 };
34146
34147 -static const struct hpt_info hpt371 __devinitdata = {
34148 +static const struct hpt_info hpt371 __devinitconst = {
34149 .chip_name = "HPT371",
34150 .chip_type = HPT371,
34151 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34152 @@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34153 .timings = &hpt37x_timings
34154 };
34155
34156 -static const struct hpt_info hpt372n __devinitdata = {
34157 +static const struct hpt_info hpt372n __devinitconst = {
34158 .chip_name = "HPT372N",
34159 .chip_type = HPT372N,
34160 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34161 @@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34162 .timings = &hpt37x_timings
34163 };
34164
34165 -static const struct hpt_info hpt302n __devinitdata = {
34166 +static const struct hpt_info hpt302n __devinitconst = {
34167 .chip_name = "HPT302N",
34168 .chip_type = HPT302N,
34169 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34170 @@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34171 .timings = &hpt37x_timings
34172 };
34173
34174 -static const struct hpt_info hpt371n __devinitdata = {
34175 +static const struct hpt_info hpt371n __devinitconst = {
34176 .chip_name = "HPT371N",
34177 .chip_type = HPT371N,
34178 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34179 @@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34180 .dma_sff_read_status = ide_dma_sff_read_status,
34181 };
34182
34183 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34184 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34185 { /* 0: HPT36x */
34186 .name = DRV_NAME,
34187 .init_chipset = init_chipset_hpt366,
34188 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34189 index 2de76cc..74186a1 100644
34190 --- a/drivers/ide/ide-cd.c
34191 +++ b/drivers/ide/ide-cd.c
34192 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34193 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34194 if ((unsigned long)buf & alignment
34195 || blk_rq_bytes(rq) & q->dma_pad_mask
34196 - || object_is_on_stack(buf))
34197 + || object_starts_on_stack(buf))
34198 drive->dma = 0;
34199 }
34200 }
34201 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34202 index fefbdfc..62ff465 100644
34203 --- a/drivers/ide/ide-floppy.c
34204 +++ b/drivers/ide/ide-floppy.c
34205 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34206 u8 pc_buf[256], header_len, desc_cnt;
34207 int i, rc = 1, blocks, length;
34208
34209 + pax_track_stack();
34210 +
34211 ide_debug_log(IDE_DBG_FUNC, "enter");
34212
34213 drive->bios_cyl = 0;
34214 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34215 index 39d4e01..11538ce 100644
34216 --- a/drivers/ide/ide-pci-generic.c
34217 +++ b/drivers/ide/ide-pci-generic.c
34218 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34219 .udma_mask = ATA_UDMA6, \
34220 }
34221
34222 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
34223 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
34224 /* 0: Unknown */
34225 DECLARE_GENERIC_PCI_DEV(0),
34226
34227 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34228 index 0d266a5..aaca790 100644
34229 --- a/drivers/ide/it8172.c
34230 +++ b/drivers/ide/it8172.c
34231 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34232 .set_dma_mode = it8172_set_dma_mode,
34233 };
34234
34235 -static const struct ide_port_info it8172_port_info __devinitdata = {
34236 +static const struct ide_port_info it8172_port_info __devinitconst = {
34237 .name = DRV_NAME,
34238 .port_ops = &it8172_port_ops,
34239 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34240 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34241 index 4797616..4be488a 100644
34242 --- a/drivers/ide/it8213.c
34243 +++ b/drivers/ide/it8213.c
34244 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34245 .cable_detect = it8213_cable_detect,
34246 };
34247
34248 -static const struct ide_port_info it8213_chipset __devinitdata = {
34249 +static const struct ide_port_info it8213_chipset __devinitconst = {
34250 .name = DRV_NAME,
34251 .enablebits = { {0x41, 0x80, 0x80} },
34252 .port_ops = &it8213_port_ops,
34253 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34254 index 51aa745..146ee60 100644
34255 --- a/drivers/ide/it821x.c
34256 +++ b/drivers/ide/it821x.c
34257 @@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34258 .cable_detect = it821x_cable_detect,
34259 };
34260
34261 -static const struct ide_port_info it821x_chipset __devinitdata = {
34262 +static const struct ide_port_info it821x_chipset __devinitconst = {
34263 .name = DRV_NAME,
34264 .init_chipset = init_chipset_it821x,
34265 .init_hwif = init_hwif_it821x,
34266 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34267 index bf2be64..9270098 100644
34268 --- a/drivers/ide/jmicron.c
34269 +++ b/drivers/ide/jmicron.c
34270 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34271 .cable_detect = jmicron_cable_detect,
34272 };
34273
34274 -static const struct ide_port_info jmicron_chipset __devinitdata = {
34275 +static const struct ide_port_info jmicron_chipset __devinitconst = {
34276 .name = DRV_NAME,
34277 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34278 .port_ops = &jmicron_port_ops,
34279 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34280 index 95327a2..73f78d8 100644
34281 --- a/drivers/ide/ns87415.c
34282 +++ b/drivers/ide/ns87415.c
34283 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34284 .dma_sff_read_status = superio_dma_sff_read_status,
34285 };
34286
34287 -static const struct ide_port_info ns87415_chipset __devinitdata = {
34288 +static const struct ide_port_info ns87415_chipset __devinitconst = {
34289 .name = DRV_NAME,
34290 .init_hwif = init_hwif_ns87415,
34291 .tp_ops = &ns87415_tp_ops,
34292 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34293 index f1d70d6..e1de05b 100644
34294 --- a/drivers/ide/opti621.c
34295 +++ b/drivers/ide/opti621.c
34296 @@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34297 .set_pio_mode = opti621_set_pio_mode,
34298 };
34299
34300 -static const struct ide_port_info opti621_chipset __devinitdata = {
34301 +static const struct ide_port_info opti621_chipset __devinitconst = {
34302 .name = DRV_NAME,
34303 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34304 .port_ops = &opti621_port_ops,
34305 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34306 index 65ba823..7311f4d 100644
34307 --- a/drivers/ide/pdc202xx_new.c
34308 +++ b/drivers/ide/pdc202xx_new.c
34309 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34310 .udma_mask = udma, \
34311 }
34312
34313 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34314 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34315 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34316 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34317 };
34318 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34319 index cb812f3..af816ef 100644
34320 --- a/drivers/ide/pdc202xx_old.c
34321 +++ b/drivers/ide/pdc202xx_old.c
34322 @@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34323 .max_sectors = sectors, \
34324 }
34325
34326 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34327 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34328 { /* 0: PDC20246 */
34329 .name = DRV_NAME,
34330 .init_chipset = init_chipset_pdc202xx,
34331 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34332 index bf14f39..15c4b98 100644
34333 --- a/drivers/ide/piix.c
34334 +++ b/drivers/ide/piix.c
34335 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34336 .udma_mask = udma, \
34337 }
34338
34339 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
34340 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
34341 /* 0: MPIIX */
34342 { /*
34343 * MPIIX actually has only a single IDE channel mapped to
34344 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34345 index a6414a8..c04173e 100644
34346 --- a/drivers/ide/rz1000.c
34347 +++ b/drivers/ide/rz1000.c
34348 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34349 }
34350 }
34351
34352 -static const struct ide_port_info rz1000_chipset __devinitdata = {
34353 +static const struct ide_port_info rz1000_chipset __devinitconst = {
34354 .name = DRV_NAME,
34355 .host_flags = IDE_HFLAG_NO_DMA,
34356 };
34357 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34358 index d467478..9203942 100644
34359 --- a/drivers/ide/sc1200.c
34360 +++ b/drivers/ide/sc1200.c
34361 @@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34362 .dma_sff_read_status = ide_dma_sff_read_status,
34363 };
34364
34365 -static const struct ide_port_info sc1200_chipset __devinitdata = {
34366 +static const struct ide_port_info sc1200_chipset __devinitconst = {
34367 .name = DRV_NAME,
34368 .port_ops = &sc1200_port_ops,
34369 .dma_ops = &sc1200_dma_ops,
34370 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34371 index 1104bb3..59c5194 100644
34372 --- a/drivers/ide/scc_pata.c
34373 +++ b/drivers/ide/scc_pata.c
34374 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34375 .dma_sff_read_status = scc_dma_sff_read_status,
34376 };
34377
34378 -static const struct ide_port_info scc_chipset __devinitdata = {
34379 +static const struct ide_port_info scc_chipset __devinitconst = {
34380 .name = "sccIDE",
34381 .init_iops = init_iops_scc,
34382 .init_dma = scc_init_dma,
34383 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34384 index b6554ef..6cc2cc3 100644
34385 --- a/drivers/ide/serverworks.c
34386 +++ b/drivers/ide/serverworks.c
34387 @@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34388 .cable_detect = svwks_cable_detect,
34389 };
34390
34391 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34392 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34393 { /* 0: OSB4 */
34394 .name = DRV_NAME,
34395 .init_chipset = init_chipset_svwks,
34396 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34397 index ab3db61..afed580 100644
34398 --- a/drivers/ide/setup-pci.c
34399 +++ b/drivers/ide/setup-pci.c
34400 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34401 int ret, i, n_ports = dev2 ? 4 : 2;
34402 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34403
34404 + pax_track_stack();
34405 +
34406 for (i = 0; i < n_ports / 2; i++) {
34407 ret = ide_setup_pci_controller(pdev[i], d, !i);
34408 if (ret < 0)
34409 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34410 index d95df52..0b03a39 100644
34411 --- a/drivers/ide/siimage.c
34412 +++ b/drivers/ide/siimage.c
34413 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34414 .udma_mask = ATA_UDMA6, \
34415 }
34416
34417 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34418 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34419 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34420 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34421 };
34422 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34423 index 3b88eba..ca8699d 100644
34424 --- a/drivers/ide/sis5513.c
34425 +++ b/drivers/ide/sis5513.c
34426 @@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34427 .cable_detect = sis_cable_detect,
34428 };
34429
34430 -static const struct ide_port_info sis5513_chipset __devinitdata = {
34431 +static const struct ide_port_info sis5513_chipset __devinitconst = {
34432 .name = DRV_NAME,
34433 .init_chipset = init_chipset_sis5513,
34434 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34435 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34436 index d698da4..fca42a4 100644
34437 --- a/drivers/ide/sl82c105.c
34438 +++ b/drivers/ide/sl82c105.c
34439 @@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34440 .dma_sff_read_status = ide_dma_sff_read_status,
34441 };
34442
34443 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
34444 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
34445 .name = DRV_NAME,
34446 .init_chipset = init_chipset_sl82c105,
34447 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34448 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34449 index 1ccfb40..83d5779 100644
34450 --- a/drivers/ide/slc90e66.c
34451 +++ b/drivers/ide/slc90e66.c
34452 @@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34453 .cable_detect = slc90e66_cable_detect,
34454 };
34455
34456 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
34457 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
34458 .name = DRV_NAME,
34459 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34460 .port_ops = &slc90e66_port_ops,
34461 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34462 index 05a93d6..5f9e325 100644
34463 --- a/drivers/ide/tc86c001.c
34464 +++ b/drivers/ide/tc86c001.c
34465 @@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34466 .dma_sff_read_status = ide_dma_sff_read_status,
34467 };
34468
34469 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
34470 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
34471 .name = DRV_NAME,
34472 .init_hwif = init_hwif_tc86c001,
34473 .port_ops = &tc86c001_port_ops,
34474 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34475 index 8773c3b..7907d6c 100644
34476 --- a/drivers/ide/triflex.c
34477 +++ b/drivers/ide/triflex.c
34478 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34479 .set_dma_mode = triflex_set_mode,
34480 };
34481
34482 -static const struct ide_port_info triflex_device __devinitdata = {
34483 +static const struct ide_port_info triflex_device __devinitconst = {
34484 .name = DRV_NAME,
34485 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34486 .port_ops = &triflex_port_ops,
34487 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34488 index 4b42ca0..e494a98 100644
34489 --- a/drivers/ide/trm290.c
34490 +++ b/drivers/ide/trm290.c
34491 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34492 .dma_check = trm290_dma_check,
34493 };
34494
34495 -static const struct ide_port_info trm290_chipset __devinitdata = {
34496 +static const struct ide_port_info trm290_chipset __devinitconst = {
34497 .name = DRV_NAME,
34498 .init_hwif = init_hwif_trm290,
34499 .tp_ops = &trm290_tp_ops,
34500 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34501 index 028de26..520d5d5 100644
34502 --- a/drivers/ide/via82cxxx.c
34503 +++ b/drivers/ide/via82cxxx.c
34504 @@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34505 .cable_detect = via82cxxx_cable_detect,
34506 };
34507
34508 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34509 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34510 .name = DRV_NAME,
34511 .init_chipset = init_chipset_via82cxxx,
34512 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34513 diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34514 index 2cd00b5..14de699 100644
34515 --- a/drivers/ieee1394/dv1394.c
34516 +++ b/drivers/ieee1394/dv1394.c
34517 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34518 based upon DIF section and sequence
34519 */
34520
34521 -static void inline
34522 +static inline void
34523 frame_put_packet (struct frame *f, struct packet *p)
34524 {
34525 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34526 diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34527 index e947d8f..6a966b9 100644
34528 --- a/drivers/ieee1394/hosts.c
34529 +++ b/drivers/ieee1394/hosts.c
34530 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34531 }
34532
34533 static struct hpsb_host_driver dummy_driver = {
34534 + .name = "dummy",
34535 .transmit_packet = dummy_transmit_packet,
34536 .devctl = dummy_devctl,
34537 .isoctl = dummy_isoctl
34538 diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34539 index ddaab6e..8d37435 100644
34540 --- a/drivers/ieee1394/init_ohci1394_dma.c
34541 +++ b/drivers/ieee1394/init_ohci1394_dma.c
34542 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34543 for (func = 0; func < 8; func++) {
34544 u32 class = read_pci_config(num,slot,func,
34545 PCI_CLASS_REVISION);
34546 - if ((class == 0xffffffff))
34547 + if (class == 0xffffffff)
34548 continue; /* No device at this func */
34549
34550 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34551 diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34552 index 65c1429..5d8c11f 100644
34553 --- a/drivers/ieee1394/ohci1394.c
34554 +++ b/drivers/ieee1394/ohci1394.c
34555 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34556 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34557
34558 /* Module Parameters */
34559 -static int phys_dma = 1;
34560 +static int phys_dma;
34561 module_param(phys_dma, int, 0444);
34562 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34563 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34564
34565 static void dma_trm_tasklet(unsigned long data);
34566 static void dma_trm_reset(struct dma_trm_ctx *d);
34567 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34568 index f199896..78c9fc8 100644
34569 --- a/drivers/ieee1394/sbp2.c
34570 +++ b/drivers/ieee1394/sbp2.c
34571 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34572 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34573 MODULE_LICENSE("GPL");
34574
34575 -static int sbp2_module_init(void)
34576 +static int __init sbp2_module_init(void)
34577 {
34578 int ret;
34579
34580 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34581 index a5dea6b..0cefe8f 100644
34582 --- a/drivers/infiniband/core/cm.c
34583 +++ b/drivers/infiniband/core/cm.c
34584 @@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34585
34586 struct cm_counter_group {
34587 struct kobject obj;
34588 - atomic_long_t counter[CM_ATTR_COUNT];
34589 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34590 };
34591
34592 struct cm_counter_attribute {
34593 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34594 struct ib_mad_send_buf *msg = NULL;
34595 int ret;
34596
34597 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34598 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34599 counter[CM_REQ_COUNTER]);
34600
34601 /* Quick state check to discard duplicate REQs. */
34602 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34603 if (!cm_id_priv)
34604 return;
34605
34606 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34607 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34608 counter[CM_REP_COUNTER]);
34609 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34610 if (ret)
34611 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34612 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34613 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34614 spin_unlock_irq(&cm_id_priv->lock);
34615 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34616 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34617 counter[CM_RTU_COUNTER]);
34618 goto out;
34619 }
34620 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34621 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34622 dreq_msg->local_comm_id);
34623 if (!cm_id_priv) {
34624 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34625 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34626 counter[CM_DREQ_COUNTER]);
34627 cm_issue_drep(work->port, work->mad_recv_wc);
34628 return -EINVAL;
34629 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34630 case IB_CM_MRA_REP_RCVD:
34631 break;
34632 case IB_CM_TIMEWAIT:
34633 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34634 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34635 counter[CM_DREQ_COUNTER]);
34636 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34637 goto unlock;
34638 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34639 cm_free_msg(msg);
34640 goto deref;
34641 case IB_CM_DREQ_RCVD:
34642 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34643 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34644 counter[CM_DREQ_COUNTER]);
34645 goto unlock;
34646 default:
34647 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34648 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34649 cm_id_priv->msg, timeout)) {
34650 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34651 - atomic_long_inc(&work->port->
34652 + atomic_long_inc_unchecked(&work->port->
34653 counter_group[CM_RECV_DUPLICATES].
34654 counter[CM_MRA_COUNTER]);
34655 goto out;
34656 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
34657 break;
34658 case IB_CM_MRA_REQ_RCVD:
34659 case IB_CM_MRA_REP_RCVD:
34660 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34661 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34662 counter[CM_MRA_COUNTER]);
34663 /* fall through */
34664 default:
34665 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
34666 case IB_CM_LAP_IDLE:
34667 break;
34668 case IB_CM_MRA_LAP_SENT:
34669 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34670 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34671 counter[CM_LAP_COUNTER]);
34672 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34673 goto unlock;
34674 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
34675 cm_free_msg(msg);
34676 goto deref;
34677 case IB_CM_LAP_RCVD:
34678 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34679 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34680 counter[CM_LAP_COUNTER]);
34681 goto unlock;
34682 default:
34683 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
34684 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
34685 if (cur_cm_id_priv) {
34686 spin_unlock_irq(&cm.lock);
34687 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34688 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34689 counter[CM_SIDR_REQ_COUNTER]);
34690 goto out; /* Duplicate message. */
34691 }
34692 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
34693 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
34694 msg->retries = 1;
34695
34696 - atomic_long_add(1 + msg->retries,
34697 + atomic_long_add_unchecked(1 + msg->retries,
34698 &port->counter_group[CM_XMIT].counter[attr_index]);
34699 if (msg->retries)
34700 - atomic_long_add(msg->retries,
34701 + atomic_long_add_unchecked(msg->retries,
34702 &port->counter_group[CM_XMIT_RETRIES].
34703 counter[attr_index]);
34704
34705 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
34706 }
34707
34708 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
34709 - atomic_long_inc(&port->counter_group[CM_RECV].
34710 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
34711 counter[attr_id - CM_ATTR_ID_OFFSET]);
34712
34713 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
34714 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
34715 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
34716
34717 return sprintf(buf, "%ld\n",
34718 - atomic_long_read(&group->counter[cm_attr->index]));
34719 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
34720 }
34721
34722 -static struct sysfs_ops cm_counter_ops = {
34723 +static const struct sysfs_ops cm_counter_ops = {
34724 .show = cm_show_counter
34725 };
34726
34727 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
34728 index 4507043..14ad522 100644
34729 --- a/drivers/infiniband/core/fmr_pool.c
34730 +++ b/drivers/infiniband/core/fmr_pool.c
34731 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
34732
34733 struct task_struct *thread;
34734
34735 - atomic_t req_ser;
34736 - atomic_t flush_ser;
34737 + atomic_unchecked_t req_ser;
34738 + atomic_unchecked_t flush_ser;
34739
34740 wait_queue_head_t force_wait;
34741 };
34742 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34743 struct ib_fmr_pool *pool = pool_ptr;
34744
34745 do {
34746 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
34747 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
34748 ib_fmr_batch_release(pool);
34749
34750 - atomic_inc(&pool->flush_ser);
34751 + atomic_inc_unchecked(&pool->flush_ser);
34752 wake_up_interruptible(&pool->force_wait);
34753
34754 if (pool->flush_function)
34755 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34756 }
34757
34758 set_current_state(TASK_INTERRUPTIBLE);
34759 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
34760 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
34761 !kthread_should_stop())
34762 schedule();
34763 __set_current_state(TASK_RUNNING);
34764 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
34765 pool->dirty_watermark = params->dirty_watermark;
34766 pool->dirty_len = 0;
34767 spin_lock_init(&pool->pool_lock);
34768 - atomic_set(&pool->req_ser, 0);
34769 - atomic_set(&pool->flush_ser, 0);
34770 + atomic_set_unchecked(&pool->req_ser, 0);
34771 + atomic_set_unchecked(&pool->flush_ser, 0);
34772 init_waitqueue_head(&pool->force_wait);
34773
34774 pool->thread = kthread_run(ib_fmr_cleanup_thread,
34775 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
34776 }
34777 spin_unlock_irq(&pool->pool_lock);
34778
34779 - serial = atomic_inc_return(&pool->req_ser);
34780 + serial = atomic_inc_return_unchecked(&pool->req_ser);
34781 wake_up_process(pool->thread);
34782
34783 if (wait_event_interruptible(pool->force_wait,
34784 - atomic_read(&pool->flush_ser) - serial >= 0))
34785 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
34786 return -EINTR;
34787
34788 return 0;
34789 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
34790 } else {
34791 list_add_tail(&fmr->list, &pool->dirty_list);
34792 if (++pool->dirty_len >= pool->dirty_watermark) {
34793 - atomic_inc(&pool->req_ser);
34794 + atomic_inc_unchecked(&pool->req_ser);
34795 wake_up_process(pool->thread);
34796 }
34797 }
34798 diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
34799 index 158a214..1558bb7 100644
34800 --- a/drivers/infiniband/core/sysfs.c
34801 +++ b/drivers/infiniband/core/sysfs.c
34802 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
34803 return port_attr->show(p, port_attr, buf);
34804 }
34805
34806 -static struct sysfs_ops port_sysfs_ops = {
34807 +static const struct sysfs_ops port_sysfs_ops = {
34808 .show = port_attr_show
34809 };
34810
34811 diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
34812 index 5440da0..1194ecb 100644
34813 --- a/drivers/infiniband/core/uverbs_marshall.c
34814 +++ b/drivers/infiniband/core/uverbs_marshall.c
34815 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
34816 dst->grh.sgid_index = src->grh.sgid_index;
34817 dst->grh.hop_limit = src->grh.hop_limit;
34818 dst->grh.traffic_class = src->grh.traffic_class;
34819 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
34820 dst->dlid = src->dlid;
34821 dst->sl = src->sl;
34822 dst->src_path_bits = src->src_path_bits;
34823 dst->static_rate = src->static_rate;
34824 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
34825 dst->port_num = src->port_num;
34826 + dst->reserved = 0;
34827 }
34828 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
34829
34830 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34831 struct ib_qp_attr *src)
34832 {
34833 + dst->qp_state = src->qp_state;
34834 dst->cur_qp_state = src->cur_qp_state;
34835 dst->path_mtu = src->path_mtu;
34836 dst->path_mig_state = src->path_mig_state;
34837 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34838 dst->rnr_retry = src->rnr_retry;
34839 dst->alt_port_num = src->alt_port_num;
34840 dst->alt_timeout = src->alt_timeout;
34841 + memset(dst->reserved, 0, sizeof(dst->reserved));
34842 }
34843 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
34844
34845 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
34846 index 100da85..62e6b88 100644
34847 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
34848 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
34849 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
34850 struct infinipath_counters counters;
34851 struct ipath_devdata *dd;
34852
34853 + pax_track_stack();
34854 +
34855 dd = file->f_path.dentry->d_inode->i_private;
34856 dd->ipath_f_read_counters(dd, &counters);
34857
34858 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
34859 index cbde0cf..afaf55c 100644
34860 --- a/drivers/infiniband/hw/nes/nes.c
34861 +++ b/drivers/infiniband/hw/nes/nes.c
34862 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
34863 LIST_HEAD(nes_adapter_list);
34864 static LIST_HEAD(nes_dev_list);
34865
34866 -atomic_t qps_destroyed;
34867 +atomic_unchecked_t qps_destroyed;
34868
34869 static unsigned int ee_flsh_adapter;
34870 static unsigned int sysfs_nonidx_addr;
34871 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
34872 struct nes_adapter *nesadapter = nesdev->nesadapter;
34873 u32 qp_id;
34874
34875 - atomic_inc(&qps_destroyed);
34876 + atomic_inc_unchecked(&qps_destroyed);
34877
34878 /* Free the control structures */
34879
34880 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
34881 index bcc6abc..9c76b2f 100644
34882 --- a/drivers/infiniband/hw/nes/nes.h
34883 +++ b/drivers/infiniband/hw/nes/nes.h
34884 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
34885 extern unsigned int wqm_quanta;
34886 extern struct list_head nes_adapter_list;
34887
34888 -extern atomic_t cm_connects;
34889 -extern atomic_t cm_accepts;
34890 -extern atomic_t cm_disconnects;
34891 -extern atomic_t cm_closes;
34892 -extern atomic_t cm_connecteds;
34893 -extern atomic_t cm_connect_reqs;
34894 -extern atomic_t cm_rejects;
34895 -extern atomic_t mod_qp_timouts;
34896 -extern atomic_t qps_created;
34897 -extern atomic_t qps_destroyed;
34898 -extern atomic_t sw_qps_destroyed;
34899 +extern atomic_unchecked_t cm_connects;
34900 +extern atomic_unchecked_t cm_accepts;
34901 +extern atomic_unchecked_t cm_disconnects;
34902 +extern atomic_unchecked_t cm_closes;
34903 +extern atomic_unchecked_t cm_connecteds;
34904 +extern atomic_unchecked_t cm_connect_reqs;
34905 +extern atomic_unchecked_t cm_rejects;
34906 +extern atomic_unchecked_t mod_qp_timouts;
34907 +extern atomic_unchecked_t qps_created;
34908 +extern atomic_unchecked_t qps_destroyed;
34909 +extern atomic_unchecked_t sw_qps_destroyed;
34910 extern u32 mh_detected;
34911 extern u32 mh_pauses_sent;
34912 extern u32 cm_packets_sent;
34913 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
34914 extern u32 cm_listens_created;
34915 extern u32 cm_listens_destroyed;
34916 extern u32 cm_backlog_drops;
34917 -extern atomic_t cm_loopbacks;
34918 -extern atomic_t cm_nodes_created;
34919 -extern atomic_t cm_nodes_destroyed;
34920 -extern atomic_t cm_accel_dropped_pkts;
34921 -extern atomic_t cm_resets_recvd;
34922 +extern atomic_unchecked_t cm_loopbacks;
34923 +extern atomic_unchecked_t cm_nodes_created;
34924 +extern atomic_unchecked_t cm_nodes_destroyed;
34925 +extern atomic_unchecked_t cm_accel_dropped_pkts;
34926 +extern atomic_unchecked_t cm_resets_recvd;
34927
34928 extern u32 int_mod_timer_init;
34929 extern u32 int_mod_cq_depth_256;
34930 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
34931 index 73473db..5ed06e8 100644
34932 --- a/drivers/infiniband/hw/nes/nes_cm.c
34933 +++ b/drivers/infiniband/hw/nes/nes_cm.c
34934 @@ -69,11 +69,11 @@ u32 cm_packets_received;
34935 u32 cm_listens_created;
34936 u32 cm_listens_destroyed;
34937 u32 cm_backlog_drops;
34938 -atomic_t cm_loopbacks;
34939 -atomic_t cm_nodes_created;
34940 -atomic_t cm_nodes_destroyed;
34941 -atomic_t cm_accel_dropped_pkts;
34942 -atomic_t cm_resets_recvd;
34943 +atomic_unchecked_t cm_loopbacks;
34944 +atomic_unchecked_t cm_nodes_created;
34945 +atomic_unchecked_t cm_nodes_destroyed;
34946 +atomic_unchecked_t cm_accel_dropped_pkts;
34947 +atomic_unchecked_t cm_resets_recvd;
34948
34949 static inline int mini_cm_accelerated(struct nes_cm_core *,
34950 struct nes_cm_node *);
34951 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
34952
34953 static struct nes_cm_core *g_cm_core;
34954
34955 -atomic_t cm_connects;
34956 -atomic_t cm_accepts;
34957 -atomic_t cm_disconnects;
34958 -atomic_t cm_closes;
34959 -atomic_t cm_connecteds;
34960 -atomic_t cm_connect_reqs;
34961 -atomic_t cm_rejects;
34962 +atomic_unchecked_t cm_connects;
34963 +atomic_unchecked_t cm_accepts;
34964 +atomic_unchecked_t cm_disconnects;
34965 +atomic_unchecked_t cm_closes;
34966 +atomic_unchecked_t cm_connecteds;
34967 +atomic_unchecked_t cm_connect_reqs;
34968 +atomic_unchecked_t cm_rejects;
34969
34970
34971 /**
34972 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
34973 cm_node->rem_mac);
34974
34975 add_hte_node(cm_core, cm_node);
34976 - atomic_inc(&cm_nodes_created);
34977 + atomic_inc_unchecked(&cm_nodes_created);
34978
34979 return cm_node;
34980 }
34981 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
34982 }
34983
34984 atomic_dec(&cm_core->node_cnt);
34985 - atomic_inc(&cm_nodes_destroyed);
34986 + atomic_inc_unchecked(&cm_nodes_destroyed);
34987 nesqp = cm_node->nesqp;
34988 if (nesqp) {
34989 nesqp->cm_node = NULL;
34990 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
34991
34992 static void drop_packet(struct sk_buff *skb)
34993 {
34994 - atomic_inc(&cm_accel_dropped_pkts);
34995 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
34996 dev_kfree_skb_any(skb);
34997 }
34998
34999 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35000
35001 int reset = 0; /* whether to send reset in case of err.. */
35002 int passive_state;
35003 - atomic_inc(&cm_resets_recvd);
35004 + atomic_inc_unchecked(&cm_resets_recvd);
35005 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35006 " refcnt=%d\n", cm_node, cm_node->state,
35007 atomic_read(&cm_node->ref_count));
35008 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35009 rem_ref_cm_node(cm_node->cm_core, cm_node);
35010 return NULL;
35011 }
35012 - atomic_inc(&cm_loopbacks);
35013 + atomic_inc_unchecked(&cm_loopbacks);
35014 loopbackremotenode->loopbackpartner = cm_node;
35015 loopbackremotenode->tcp_cntxt.rcv_wscale =
35016 NES_CM_DEFAULT_RCV_WND_SCALE;
35017 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35018 add_ref_cm_node(cm_node);
35019 } else if (cm_node->state == NES_CM_STATE_TSA) {
35020 rem_ref_cm_node(cm_core, cm_node);
35021 - atomic_inc(&cm_accel_dropped_pkts);
35022 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35023 dev_kfree_skb_any(skb);
35024 break;
35025 }
35026 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35027
35028 if ((cm_id) && (cm_id->event_handler)) {
35029 if (issue_disconn) {
35030 - atomic_inc(&cm_disconnects);
35031 + atomic_inc_unchecked(&cm_disconnects);
35032 cm_event.event = IW_CM_EVENT_DISCONNECT;
35033 cm_event.status = disconn_status;
35034 cm_event.local_addr = cm_id->local_addr;
35035 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35036 }
35037
35038 if (issue_close) {
35039 - atomic_inc(&cm_closes);
35040 + atomic_inc_unchecked(&cm_closes);
35041 nes_disconnect(nesqp, 1);
35042
35043 cm_id->provider_data = nesqp;
35044 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35045
35046 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35047 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35048 - atomic_inc(&cm_accepts);
35049 + atomic_inc_unchecked(&cm_accepts);
35050
35051 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35052 atomic_read(&nesvnic->netdev->refcnt));
35053 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35054
35055 struct nes_cm_core *cm_core;
35056
35057 - atomic_inc(&cm_rejects);
35058 + atomic_inc_unchecked(&cm_rejects);
35059 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35060 loopback = cm_node->loopbackpartner;
35061 cm_core = cm_node->cm_core;
35062 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35063 ntohl(cm_id->local_addr.sin_addr.s_addr),
35064 ntohs(cm_id->local_addr.sin_port));
35065
35066 - atomic_inc(&cm_connects);
35067 + atomic_inc_unchecked(&cm_connects);
35068 nesqp->active_conn = 1;
35069
35070 /* cache the cm_id in the qp */
35071 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35072 if (nesqp->destroyed) {
35073 return;
35074 }
35075 - atomic_inc(&cm_connecteds);
35076 + atomic_inc_unchecked(&cm_connecteds);
35077 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35078 " local port 0x%04X. jiffies = %lu.\n",
35079 nesqp->hwqp.qp_id,
35080 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35081
35082 ret = cm_id->event_handler(cm_id, &cm_event);
35083 cm_id->add_ref(cm_id);
35084 - atomic_inc(&cm_closes);
35085 + atomic_inc_unchecked(&cm_closes);
35086 cm_event.event = IW_CM_EVENT_CLOSE;
35087 cm_event.status = IW_CM_EVENT_STATUS_OK;
35088 cm_event.provider_data = cm_id->provider_data;
35089 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35090 return;
35091 cm_id = cm_node->cm_id;
35092
35093 - atomic_inc(&cm_connect_reqs);
35094 + atomic_inc_unchecked(&cm_connect_reqs);
35095 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35096 cm_node, cm_id, jiffies);
35097
35098 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35099 return;
35100 cm_id = cm_node->cm_id;
35101
35102 - atomic_inc(&cm_connect_reqs);
35103 + atomic_inc_unchecked(&cm_connect_reqs);
35104 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35105 cm_node, cm_id, jiffies);
35106
35107 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35108 index e593af3..870694a 100644
35109 --- a/drivers/infiniband/hw/nes/nes_nic.c
35110 +++ b/drivers/infiniband/hw/nes/nes_nic.c
35111 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35112 target_stat_values[++index] = mh_detected;
35113 target_stat_values[++index] = mh_pauses_sent;
35114 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35115 - target_stat_values[++index] = atomic_read(&cm_connects);
35116 - target_stat_values[++index] = atomic_read(&cm_accepts);
35117 - target_stat_values[++index] = atomic_read(&cm_disconnects);
35118 - target_stat_values[++index] = atomic_read(&cm_connecteds);
35119 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35120 - target_stat_values[++index] = atomic_read(&cm_rejects);
35121 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35122 - target_stat_values[++index] = atomic_read(&qps_created);
35123 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35124 - target_stat_values[++index] = atomic_read(&qps_destroyed);
35125 - target_stat_values[++index] = atomic_read(&cm_closes);
35126 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35127 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35128 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35129 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35130 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35131 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35132 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35133 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35134 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35135 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35136 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35137 target_stat_values[++index] = cm_packets_sent;
35138 target_stat_values[++index] = cm_packets_bounced;
35139 target_stat_values[++index] = cm_packets_created;
35140 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35141 target_stat_values[++index] = cm_listens_created;
35142 target_stat_values[++index] = cm_listens_destroyed;
35143 target_stat_values[++index] = cm_backlog_drops;
35144 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
35145 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
35146 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35147 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35148 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35149 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35150 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35151 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35152 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35153 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35154 target_stat_values[++index] = int_mod_timer_init;
35155 target_stat_values[++index] = int_mod_cq_depth_1;
35156 target_stat_values[++index] = int_mod_cq_depth_4;
35157 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35158 index a680c42..f914deb 100644
35159 --- a/drivers/infiniband/hw/nes/nes_verbs.c
35160 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
35161 @@ -45,9 +45,9 @@
35162
35163 #include <rdma/ib_umem.h>
35164
35165 -atomic_t mod_qp_timouts;
35166 -atomic_t qps_created;
35167 -atomic_t sw_qps_destroyed;
35168 +atomic_unchecked_t mod_qp_timouts;
35169 +atomic_unchecked_t qps_created;
35170 +atomic_unchecked_t sw_qps_destroyed;
35171
35172 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35173
35174 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35175 if (init_attr->create_flags)
35176 return ERR_PTR(-EINVAL);
35177
35178 - atomic_inc(&qps_created);
35179 + atomic_inc_unchecked(&qps_created);
35180 switch (init_attr->qp_type) {
35181 case IB_QPT_RC:
35182 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35183 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35184 struct iw_cm_event cm_event;
35185 int ret;
35186
35187 - atomic_inc(&sw_qps_destroyed);
35188 + atomic_inc_unchecked(&sw_qps_destroyed);
35189 nesqp->destroyed = 1;
35190
35191 /* Blow away the connection if it exists. */
35192 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35193 index ac11be0..3883c04 100644
35194 --- a/drivers/input/gameport/gameport.c
35195 +++ b/drivers/input/gameport/gameport.c
35196 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35197 */
35198 static void gameport_init_port(struct gameport *gameport)
35199 {
35200 - static atomic_t gameport_no = ATOMIC_INIT(0);
35201 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35202
35203 __module_get(THIS_MODULE);
35204
35205 mutex_init(&gameport->drv_mutex);
35206 device_initialize(&gameport->dev);
35207 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35208 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35209 gameport->dev.bus = &gameport_bus;
35210 gameport->dev.release = gameport_release_port;
35211 if (gameport->parent)
35212 diff --git a/drivers/input/input.c b/drivers/input/input.c
35213 index c82ae82..8cfb9cb 100644
35214 --- a/drivers/input/input.c
35215 +++ b/drivers/input/input.c
35216 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35217 */
35218 int input_register_device(struct input_dev *dev)
35219 {
35220 - static atomic_t input_no = ATOMIC_INIT(0);
35221 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35222 struct input_handler *handler;
35223 const char *path;
35224 int error;
35225 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35226 dev->setkeycode = input_default_setkeycode;
35227
35228 dev_set_name(&dev->dev, "input%ld",
35229 - (unsigned long) atomic_inc_return(&input_no) - 1);
35230 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35231
35232 error = device_add(&dev->dev);
35233 if (error)
35234 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35235 index ca13a6b..b032b0c 100644
35236 --- a/drivers/input/joystick/sidewinder.c
35237 +++ b/drivers/input/joystick/sidewinder.c
35238 @@ -30,6 +30,7 @@
35239 #include <linux/kernel.h>
35240 #include <linux/module.h>
35241 #include <linux/slab.h>
35242 +#include <linux/sched.h>
35243 #include <linux/init.h>
35244 #include <linux/input.h>
35245 #include <linux/gameport.h>
35246 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35247 unsigned char buf[SW_LENGTH];
35248 int i;
35249
35250 + pax_track_stack();
35251 +
35252 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35253
35254 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35255 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35256 index 79e3edc..01412b9 100644
35257 --- a/drivers/input/joystick/xpad.c
35258 +++ b/drivers/input/joystick/xpad.c
35259 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35260
35261 static int xpad_led_probe(struct usb_xpad *xpad)
35262 {
35263 - static atomic_t led_seq = ATOMIC_INIT(0);
35264 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35265 long led_no;
35266 struct xpad_led *led;
35267 struct led_classdev *led_cdev;
35268 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35269 if (!led)
35270 return -ENOMEM;
35271
35272 - led_no = (long)atomic_inc_return(&led_seq) - 1;
35273 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35274
35275 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35276 led->xpad = xpad;
35277 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35278 index 0236f0d..c7327f1 100644
35279 --- a/drivers/input/serio/serio.c
35280 +++ b/drivers/input/serio/serio.c
35281 @@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35282 */
35283 static void serio_init_port(struct serio *serio)
35284 {
35285 - static atomic_t serio_no = ATOMIC_INIT(0);
35286 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35287
35288 __module_get(THIS_MODULE);
35289
35290 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35291 mutex_init(&serio->drv_mutex);
35292 device_initialize(&serio->dev);
35293 dev_set_name(&serio->dev, "serio%ld",
35294 - (long)atomic_inc_return(&serio_no) - 1);
35295 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
35296 serio->dev.bus = &serio_bus;
35297 serio->dev.release = serio_release_port;
35298 if (serio->parent) {
35299 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35300 index 33dcd8d..2783d25 100644
35301 --- a/drivers/isdn/gigaset/common.c
35302 +++ b/drivers/isdn/gigaset/common.c
35303 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35304 cs->commands_pending = 0;
35305 cs->cur_at_seq = 0;
35306 cs->gotfwver = -1;
35307 - cs->open_count = 0;
35308 + local_set(&cs->open_count, 0);
35309 cs->dev = NULL;
35310 cs->tty = NULL;
35311 cs->tty_dev = NULL;
35312 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35313 index a2f6125..6a70677 100644
35314 --- a/drivers/isdn/gigaset/gigaset.h
35315 +++ b/drivers/isdn/gigaset/gigaset.h
35316 @@ -34,6 +34,7 @@
35317 #include <linux/tty_driver.h>
35318 #include <linux/list.h>
35319 #include <asm/atomic.h>
35320 +#include <asm/local.h>
35321
35322 #define GIG_VERSION {0,5,0,0}
35323 #define GIG_COMPAT {0,4,0,0}
35324 @@ -446,7 +447,7 @@ struct cardstate {
35325 spinlock_t cmdlock;
35326 unsigned curlen, cmdbytes;
35327
35328 - unsigned open_count;
35329 + local_t open_count;
35330 struct tty_struct *tty;
35331 struct tasklet_struct if_wake_tasklet;
35332 unsigned control_state;
35333 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35334 index b3065b8..c7e8cc9 100644
35335 --- a/drivers/isdn/gigaset/interface.c
35336 +++ b/drivers/isdn/gigaset/interface.c
35337 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35338 return -ERESTARTSYS; // FIXME -EINTR?
35339 tty->driver_data = cs;
35340
35341 - ++cs->open_count;
35342 -
35343 - if (cs->open_count == 1) {
35344 + if (local_inc_return(&cs->open_count) == 1) {
35345 spin_lock_irqsave(&cs->lock, flags);
35346 cs->tty = tty;
35347 spin_unlock_irqrestore(&cs->lock, flags);
35348 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35349
35350 if (!cs->connected)
35351 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35352 - else if (!cs->open_count)
35353 + else if (!local_read(&cs->open_count))
35354 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35355 else {
35356 - if (!--cs->open_count) {
35357 + if (!local_dec_return(&cs->open_count)) {
35358 spin_lock_irqsave(&cs->lock, flags);
35359 cs->tty = NULL;
35360 spin_unlock_irqrestore(&cs->lock, flags);
35361 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35362 if (!cs->connected) {
35363 gig_dbg(DEBUG_IF, "not connected");
35364 retval = -ENODEV;
35365 - } else if (!cs->open_count)
35366 + } else if (!local_read(&cs->open_count))
35367 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35368 else {
35369 retval = 0;
35370 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35371 if (!cs->connected) {
35372 gig_dbg(DEBUG_IF, "not connected");
35373 retval = -ENODEV;
35374 - } else if (!cs->open_count)
35375 + } else if (!local_read(&cs->open_count))
35376 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35377 else if (cs->mstate != MS_LOCKED) {
35378 dev_warn(cs->dev, "can't write to unlocked device\n");
35379 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35380 if (!cs->connected) {
35381 gig_dbg(DEBUG_IF, "not connected");
35382 retval = -ENODEV;
35383 - } else if (!cs->open_count)
35384 + } else if (!local_read(&cs->open_count))
35385 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35386 else if (cs->mstate != MS_LOCKED) {
35387 dev_warn(cs->dev, "can't write to unlocked device\n");
35388 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35389
35390 if (!cs->connected)
35391 gig_dbg(DEBUG_IF, "not connected");
35392 - else if (!cs->open_count)
35393 + else if (!local_read(&cs->open_count))
35394 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35395 else if (cs->mstate != MS_LOCKED)
35396 dev_warn(cs->dev, "can't write to unlocked device\n");
35397 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35398
35399 if (!cs->connected)
35400 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35401 - else if (!cs->open_count)
35402 + else if (!local_read(&cs->open_count))
35403 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35404 else {
35405 //FIXME
35406 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35407
35408 if (!cs->connected)
35409 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35410 - else if (!cs->open_count)
35411 + else if (!local_read(&cs->open_count))
35412 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35413 else {
35414 //FIXME
35415 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35416 goto out;
35417 }
35418
35419 - if (!cs->open_count) {
35420 + if (!local_read(&cs->open_count)) {
35421 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35422 goto out;
35423 }
35424 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35425 index a7c0083..62a7cb6 100644
35426 --- a/drivers/isdn/hardware/avm/b1.c
35427 +++ b/drivers/isdn/hardware/avm/b1.c
35428 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35429 }
35430 if (left) {
35431 if (t4file->user) {
35432 - if (copy_from_user(buf, dp, left))
35433 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35434 return -EFAULT;
35435 } else {
35436 memcpy(buf, dp, left);
35437 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35438 }
35439 if (left) {
35440 if (config->user) {
35441 - if (copy_from_user(buf, dp, left))
35442 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35443 return -EFAULT;
35444 } else {
35445 memcpy(buf, dp, left);
35446 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35447 index f130724..c373c68 100644
35448 --- a/drivers/isdn/hardware/eicon/capidtmf.c
35449 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
35450 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35451 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35452 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35453
35454 + pax_track_stack();
35455
35456 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35457 {
35458 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35459 index 4d425c6..a9be6c4 100644
35460 --- a/drivers/isdn/hardware/eicon/capifunc.c
35461 +++ b/drivers/isdn/hardware/eicon/capifunc.c
35462 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35463 IDI_SYNC_REQ req;
35464 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35465
35466 + pax_track_stack();
35467 +
35468 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35469
35470 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35471 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35472 index 3029234..ef0d9e2 100644
35473 --- a/drivers/isdn/hardware/eicon/diddfunc.c
35474 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
35475 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35476 IDI_SYNC_REQ req;
35477 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35478
35479 + pax_track_stack();
35480 +
35481 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35482
35483 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35484 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35485 index d36a4c0..11e7d1a 100644
35486 --- a/drivers/isdn/hardware/eicon/divasfunc.c
35487 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
35488 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35489 IDI_SYNC_REQ req;
35490 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35491
35492 + pax_track_stack();
35493 +
35494 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35495
35496 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35497 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35498 index 85784a7..a19ca98 100644
35499 --- a/drivers/isdn/hardware/eicon/divasync.h
35500 +++ b/drivers/isdn/hardware/eicon/divasync.h
35501 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35502 } diva_didd_add_adapter_t;
35503 typedef struct _diva_didd_remove_adapter {
35504 IDI_CALL p_request;
35505 -} diva_didd_remove_adapter_t;
35506 +} __no_const diva_didd_remove_adapter_t;
35507 typedef struct _diva_didd_read_adapter_array {
35508 void * buffer;
35509 dword length;
35510 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35511 index db87d51..7d09acf 100644
35512 --- a/drivers/isdn/hardware/eicon/idifunc.c
35513 +++ b/drivers/isdn/hardware/eicon/idifunc.c
35514 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35515 IDI_SYNC_REQ req;
35516 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35517
35518 + pax_track_stack();
35519 +
35520 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35521
35522 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35523 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35524 index ae89fb8..0fab299 100644
35525 --- a/drivers/isdn/hardware/eicon/message.c
35526 +++ b/drivers/isdn/hardware/eicon/message.c
35527 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35528 dword d;
35529 word w;
35530
35531 + pax_track_stack();
35532 +
35533 a = plci->adapter;
35534 Id = ((word)plci->Id<<8)|a->Id;
35535 PUT_WORD(&SS_Ind[4],0x0000);
35536 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35537 word j, n, w;
35538 dword d;
35539
35540 + pax_track_stack();
35541 +
35542
35543 for(i=0;i<8;i++) bp_parms[i].length = 0;
35544 for(i=0;i<2;i++) global_config[i].length = 0;
35545 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35546 const byte llc3[] = {4,3,2,2,6,6,0};
35547 const byte header[] = {0,2,3,3,0,0,0};
35548
35549 + pax_track_stack();
35550 +
35551 for(i=0;i<8;i++) bp_parms[i].length = 0;
35552 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35553 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35554 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35555 word appl_number_group_type[MAX_APPL];
35556 PLCI *auxplci;
35557
35558 + pax_track_stack();
35559 +
35560 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35561
35562 if(!a->group_optimization_enabled)
35563 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35564 index a564b75..f3cf8b5 100644
35565 --- a/drivers/isdn/hardware/eicon/mntfunc.c
35566 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
35567 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35568 IDI_SYNC_REQ req;
35569 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35570
35571 + pax_track_stack();
35572 +
35573 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35574
35575 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35576 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35577 index a3bd163..8956575 100644
35578 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35579 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35580 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35581 typedef struct _diva_os_idi_adapter_interface {
35582 diva_init_card_proc_t cleanup_adapter_proc;
35583 diva_cmd_card_proc_t cmd_proc;
35584 -} diva_os_idi_adapter_interface_t;
35585 +} __no_const diva_os_idi_adapter_interface_t;
35586
35587 typedef struct _diva_os_xdi_adapter {
35588 struct list_head link;
35589 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35590 index adb1e8c..21b590b 100644
35591 --- a/drivers/isdn/i4l/isdn_common.c
35592 +++ b/drivers/isdn/i4l/isdn_common.c
35593 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35594 } iocpar;
35595 void __user *argp = (void __user *)arg;
35596
35597 + pax_track_stack();
35598 +
35599 #define name iocpar.name
35600 #define bname iocpar.bname
35601 #define iocts iocpar.iocts
35602 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35603 index bf7997a..cf091db 100644
35604 --- a/drivers/isdn/icn/icn.c
35605 +++ b/drivers/isdn/icn/icn.c
35606 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35607 if (count > len)
35608 count = len;
35609 if (user) {
35610 - if (copy_from_user(msg, buf, count))
35611 + if (count > sizeof msg || copy_from_user(msg, buf, count))
35612 return -EFAULT;
35613 } else
35614 memcpy(msg, buf, count);
35615 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
35616 index feb0fa4..f76f830 100644
35617 --- a/drivers/isdn/mISDN/socket.c
35618 +++ b/drivers/isdn/mISDN/socket.c
35619 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35620 if (dev) {
35621 struct mISDN_devinfo di;
35622
35623 + memset(&di, 0, sizeof(di));
35624 di.id = dev->id;
35625 di.Dprotocols = dev->Dprotocols;
35626 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35627 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35628 if (dev) {
35629 struct mISDN_devinfo di;
35630
35631 + memset(&di, 0, sizeof(di));
35632 di.id = dev->id;
35633 di.Dprotocols = dev->Dprotocols;
35634 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35635 diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
35636 index 485be8b..f0225bc 100644
35637 --- a/drivers/isdn/sc/interrupt.c
35638 +++ b/drivers/isdn/sc/interrupt.c
35639 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35640 }
35641 else if(callid>=0x0000 && callid<=0x7FFF)
35642 {
35643 + int len;
35644 +
35645 pr_debug("%s: Got Incoming Call\n",
35646 sc_adapter[card]->devicename);
35647 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
35648 - strcpy(setup.eazmsn,
35649 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
35650 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
35651 + sizeof(setup.phone));
35652 + if (len >= sizeof(setup.phone))
35653 + continue;
35654 + len = strlcpy(setup.eazmsn,
35655 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35656 + sizeof(setup.eazmsn));
35657 + if (len >= sizeof(setup.eazmsn))
35658 + continue;
35659 setup.si1 = 7;
35660 setup.si2 = 0;
35661 setup.plan = 0;
35662 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35663 * Handle a GetMyNumber Rsp
35664 */
35665 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
35666 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
35667 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35668 + rcvmsg.msg_data.byte_array,
35669 + sizeof(rcvmsg.msg_data.byte_array));
35670 continue;
35671 }
35672
35673 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
35674 index 8744d24..d1f9a9a 100644
35675 --- a/drivers/lguest/core.c
35676 +++ b/drivers/lguest/core.c
35677 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
35678 * it's worked so far. The end address needs +1 because __get_vm_area
35679 * allocates an extra guard page, so we need space for that.
35680 */
35681 +
35682 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
35683 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35684 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
35685 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35686 +#else
35687 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35688 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
35689 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35690 +#endif
35691 +
35692 if (!switcher_vma) {
35693 err = -ENOMEM;
35694 printk("lguest: could not map switcher pages high\n");
35695 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
35696 * Now the Switcher is mapped at the right address, we can't fail!
35697 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
35698 */
35699 - memcpy(switcher_vma->addr, start_switcher_text,
35700 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
35701 end_switcher_text - start_switcher_text);
35702
35703 printk(KERN_INFO "lguest: mapped switcher at %p\n",
35704 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
35705 index 6ae3888..8b38145 100644
35706 --- a/drivers/lguest/x86/core.c
35707 +++ b/drivers/lguest/x86/core.c
35708 @@ -59,7 +59,7 @@ static struct {
35709 /* Offset from where switcher.S was compiled to where we've copied it */
35710 static unsigned long switcher_offset(void)
35711 {
35712 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
35713 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
35714 }
35715
35716 /* This cpu's struct lguest_pages. */
35717 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
35718 * These copies are pretty cheap, so we do them unconditionally: */
35719 /* Save the current Host top-level page directory.
35720 */
35721 +
35722 +#ifdef CONFIG_PAX_PER_CPU_PGD
35723 + pages->state.host_cr3 = read_cr3();
35724 +#else
35725 pages->state.host_cr3 = __pa(current->mm->pgd);
35726 +#endif
35727 +
35728 /*
35729 * Set up the Guest's page tables to see this CPU's pages (and no
35730 * other CPU's pages).
35731 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
35732 * compiled-in switcher code and the high-mapped copy we just made.
35733 */
35734 for (i = 0; i < IDT_ENTRIES; i++)
35735 - default_idt_entries[i] += switcher_offset();
35736 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
35737
35738 /*
35739 * Set up the Switcher's per-cpu areas.
35740 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
35741 * it will be undisturbed when we switch. To change %cs and jump we
35742 * need this structure to feed to Intel's "lcall" instruction.
35743 */
35744 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
35745 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
35746 lguest_entry.segment = LGUEST_CS;
35747
35748 /*
35749 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
35750 index 40634b0..4f5855e 100644
35751 --- a/drivers/lguest/x86/switcher_32.S
35752 +++ b/drivers/lguest/x86/switcher_32.S
35753 @@ -87,6 +87,7 @@
35754 #include <asm/page.h>
35755 #include <asm/segment.h>
35756 #include <asm/lguest.h>
35757 +#include <asm/processor-flags.h>
35758
35759 // We mark the start of the code to copy
35760 // It's placed in .text tho it's never run here
35761 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
35762 // Changes type when we load it: damn Intel!
35763 // For after we switch over our page tables
35764 // That entry will be read-only: we'd crash.
35765 +
35766 +#ifdef CONFIG_PAX_KERNEXEC
35767 + mov %cr0, %edx
35768 + xor $X86_CR0_WP, %edx
35769 + mov %edx, %cr0
35770 +#endif
35771 +
35772 movl $(GDT_ENTRY_TSS*8), %edx
35773 ltr %dx
35774
35775 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
35776 // Let's clear it again for our return.
35777 // The GDT descriptor of the Host
35778 // Points to the table after two "size" bytes
35779 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
35780 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
35781 // Clear "used" from type field (byte 5, bit 2)
35782 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
35783 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
35784 +
35785 +#ifdef CONFIG_PAX_KERNEXEC
35786 + mov %cr0, %eax
35787 + xor $X86_CR0_WP, %eax
35788 + mov %eax, %cr0
35789 +#endif
35790
35791 // Once our page table's switched, the Guest is live!
35792 // The Host fades as we run this final step.
35793 @@ -295,13 +309,12 @@ deliver_to_host:
35794 // I consulted gcc, and it gave
35795 // These instructions, which I gladly credit:
35796 leal (%edx,%ebx,8), %eax
35797 - movzwl (%eax),%edx
35798 - movl 4(%eax), %eax
35799 - xorw %ax, %ax
35800 - orl %eax, %edx
35801 + movl 4(%eax), %edx
35802 + movw (%eax), %dx
35803 // Now the address of the handler's in %edx
35804 // We call it now: its "iret" drops us home.
35805 - jmp *%edx
35806 + ljmp $__KERNEL_CS, $1f
35807 +1: jmp *%edx
35808
35809 // Every interrupt can come to us here
35810 // But we must truly tell each apart.
35811 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
35812 index 588a5b0..b71db89 100644
35813 --- a/drivers/macintosh/macio_asic.c
35814 +++ b/drivers/macintosh/macio_asic.c
35815 @@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
35816 * MacIO is matched against any Apple ID, it's probe() function
35817 * will then decide wether it applies or not
35818 */
35819 -static const struct pci_device_id __devinitdata pci_ids [] = { {
35820 +static const struct pci_device_id __devinitconst pci_ids [] = { {
35821 .vendor = PCI_VENDOR_ID_APPLE,
35822 .device = PCI_ANY_ID,
35823 .subvendor = PCI_ANY_ID,
35824 diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
35825 index a348bb0..ecd9b3f 100644
35826 --- a/drivers/macintosh/via-pmu-backlight.c
35827 +++ b/drivers/macintosh/via-pmu-backlight.c
35828 @@ -15,7 +15,7 @@
35829
35830 #define MAX_PMU_LEVEL 0xFF
35831
35832 -static struct backlight_ops pmu_backlight_data;
35833 +static const struct backlight_ops pmu_backlight_data;
35834 static DEFINE_SPINLOCK(pmu_backlight_lock);
35835 static int sleeping, uses_pmu_bl;
35836 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
35837 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
35838 return bd->props.brightness;
35839 }
35840
35841 -static struct backlight_ops pmu_backlight_data = {
35842 +static const struct backlight_ops pmu_backlight_data = {
35843 .get_brightness = pmu_backlight_get_brightness,
35844 .update_status = pmu_backlight_update_status,
35845
35846 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
35847 index 6f308a4..b5f7ff7 100644
35848 --- a/drivers/macintosh/via-pmu.c
35849 +++ b/drivers/macintosh/via-pmu.c
35850 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
35851 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
35852 }
35853
35854 -static struct platform_suspend_ops pmu_pm_ops = {
35855 +static const struct platform_suspend_ops pmu_pm_ops = {
35856 .enter = powerbook_sleep,
35857 .valid = pmu_sleep_valid,
35858 };
35859 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
35860 index 818b617..4656e38 100644
35861 --- a/drivers/md/dm-ioctl.c
35862 +++ b/drivers/md/dm-ioctl.c
35863 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
35864 cmd == DM_LIST_VERSIONS_CMD)
35865 return 0;
35866
35867 - if ((cmd == DM_DEV_CREATE_CMD)) {
35868 + if (cmd == DM_DEV_CREATE_CMD) {
35869 if (!*param->name) {
35870 DMWARN("name not supplied when creating device");
35871 return -EINVAL;
35872 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
35873 index 6021d0a..a878643 100644
35874 --- a/drivers/md/dm-raid1.c
35875 +++ b/drivers/md/dm-raid1.c
35876 @@ -41,7 +41,7 @@ enum dm_raid1_error {
35877
35878 struct mirror {
35879 struct mirror_set *ms;
35880 - atomic_t error_count;
35881 + atomic_unchecked_t error_count;
35882 unsigned long error_type;
35883 struct dm_dev *dev;
35884 sector_t offset;
35885 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
35886 * simple way to tell if a device has encountered
35887 * errors.
35888 */
35889 - atomic_inc(&m->error_count);
35890 + atomic_inc_unchecked(&m->error_count);
35891
35892 if (test_and_set_bit(error_type, &m->error_type))
35893 return;
35894 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
35895 }
35896
35897 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
35898 - if (!atomic_read(&new->error_count)) {
35899 + if (!atomic_read_unchecked(&new->error_count)) {
35900 set_default_mirror(new);
35901 break;
35902 }
35903 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
35904 struct mirror *m = get_default_mirror(ms);
35905
35906 do {
35907 - if (likely(!atomic_read(&m->error_count)))
35908 + if (likely(!atomic_read_unchecked(&m->error_count)))
35909 return m;
35910
35911 if (m-- == ms->mirror)
35912 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
35913 {
35914 struct mirror *default_mirror = get_default_mirror(m->ms);
35915
35916 - return !atomic_read(&default_mirror->error_count);
35917 + return !atomic_read_unchecked(&default_mirror->error_count);
35918 }
35919
35920 static int mirror_available(struct mirror_set *ms, struct bio *bio)
35921 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
35922 */
35923 if (likely(region_in_sync(ms, region, 1)))
35924 m = choose_mirror(ms, bio->bi_sector);
35925 - else if (m && atomic_read(&m->error_count))
35926 + else if (m && atomic_read_unchecked(&m->error_count))
35927 m = NULL;
35928
35929 if (likely(m))
35930 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
35931 }
35932
35933 ms->mirror[mirror].ms = ms;
35934 - atomic_set(&(ms->mirror[mirror].error_count), 0);
35935 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
35936 ms->mirror[mirror].error_type = 0;
35937 ms->mirror[mirror].offset = offset;
35938
35939 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
35940 */
35941 static char device_status_char(struct mirror *m)
35942 {
35943 - if (!atomic_read(&(m->error_count)))
35944 + if (!atomic_read_unchecked(&(m->error_count)))
35945 return 'A';
35946
35947 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
35948 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
35949 index bd58703..9f26571 100644
35950 --- a/drivers/md/dm-stripe.c
35951 +++ b/drivers/md/dm-stripe.c
35952 @@ -20,7 +20,7 @@ struct stripe {
35953 struct dm_dev *dev;
35954 sector_t physical_start;
35955
35956 - atomic_t error_count;
35957 + atomic_unchecked_t error_count;
35958 };
35959
35960 struct stripe_c {
35961 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
35962 kfree(sc);
35963 return r;
35964 }
35965 - atomic_set(&(sc->stripe[i].error_count), 0);
35966 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
35967 }
35968
35969 ti->private = sc;
35970 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
35971 DMEMIT("%d ", sc->stripes);
35972 for (i = 0; i < sc->stripes; i++) {
35973 DMEMIT("%s ", sc->stripe[i].dev->name);
35974 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
35975 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
35976 'D' : 'A';
35977 }
35978 buffer[i] = '\0';
35979 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
35980 */
35981 for (i = 0; i < sc->stripes; i++)
35982 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
35983 - atomic_inc(&(sc->stripe[i].error_count));
35984 - if (atomic_read(&(sc->stripe[i].error_count)) <
35985 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
35986 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
35987 DM_IO_ERROR_THRESHOLD)
35988 queue_work(kstriped, &sc->kstriped_ws);
35989 }
35990 diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
35991 index 4b04590..13a77b2 100644
35992 --- a/drivers/md/dm-sysfs.c
35993 +++ b/drivers/md/dm-sysfs.c
35994 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
35995 NULL,
35996 };
35997
35998 -static struct sysfs_ops dm_sysfs_ops = {
35999 +static const struct sysfs_ops dm_sysfs_ops = {
36000 .show = dm_attr_show,
36001 };
36002
36003 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36004 index 03345bb..332250d 100644
36005 --- a/drivers/md/dm-table.c
36006 +++ b/drivers/md/dm-table.c
36007 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36008 if (!dev_size)
36009 return 0;
36010
36011 - if ((start >= dev_size) || (start + len > dev_size)) {
36012 + if ((start >= dev_size) || (len > dev_size - start)) {
36013 DMWARN("%s: %s too small for target: "
36014 "start=%llu, len=%llu, dev_size=%llu",
36015 dm_device_name(ti->table->md), bdevname(bdev, b),
36016 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36017 index c988ac2..c418141 100644
36018 --- a/drivers/md/dm.c
36019 +++ b/drivers/md/dm.c
36020 @@ -165,9 +165,9 @@ struct mapped_device {
36021 /*
36022 * Event handling.
36023 */
36024 - atomic_t event_nr;
36025 + atomic_unchecked_t event_nr;
36026 wait_queue_head_t eventq;
36027 - atomic_t uevent_seq;
36028 + atomic_unchecked_t uevent_seq;
36029 struct list_head uevent_list;
36030 spinlock_t uevent_lock; /* Protect access to uevent_list */
36031
36032 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36033 rwlock_init(&md->map_lock);
36034 atomic_set(&md->holders, 1);
36035 atomic_set(&md->open_count, 0);
36036 - atomic_set(&md->event_nr, 0);
36037 - atomic_set(&md->uevent_seq, 0);
36038 + atomic_set_unchecked(&md->event_nr, 0);
36039 + atomic_set_unchecked(&md->uevent_seq, 0);
36040 INIT_LIST_HEAD(&md->uevent_list);
36041 spin_lock_init(&md->uevent_lock);
36042
36043 @@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36044
36045 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36046
36047 - atomic_inc(&md->event_nr);
36048 + atomic_inc_unchecked(&md->event_nr);
36049 wake_up(&md->eventq);
36050 }
36051
36052 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36053
36054 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36055 {
36056 - return atomic_add_return(1, &md->uevent_seq);
36057 + return atomic_add_return_unchecked(1, &md->uevent_seq);
36058 }
36059
36060 uint32_t dm_get_event_nr(struct mapped_device *md)
36061 {
36062 - return atomic_read(&md->event_nr);
36063 + return atomic_read_unchecked(&md->event_nr);
36064 }
36065
36066 int dm_wait_event(struct mapped_device *md, int event_nr)
36067 {
36068 return wait_event_interruptible(md->eventq,
36069 - (event_nr != atomic_read(&md->event_nr)));
36070 + (event_nr != atomic_read_unchecked(&md->event_nr)));
36071 }
36072
36073 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36074 diff --git a/drivers/md/md.c b/drivers/md/md.c
36075 index 4ce6e2f..7a9530a 100644
36076 --- a/drivers/md/md.c
36077 +++ b/drivers/md/md.c
36078 @@ -153,10 +153,10 @@ static int start_readonly;
36079 * start build, activate spare
36080 */
36081 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36082 -static atomic_t md_event_count;
36083 +static atomic_unchecked_t md_event_count;
36084 void md_new_event(mddev_t *mddev)
36085 {
36086 - atomic_inc(&md_event_count);
36087 + atomic_inc_unchecked(&md_event_count);
36088 wake_up(&md_event_waiters);
36089 }
36090 EXPORT_SYMBOL_GPL(md_new_event);
36091 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36092 */
36093 static void md_new_event_inintr(mddev_t *mddev)
36094 {
36095 - atomic_inc(&md_event_count);
36096 + atomic_inc_unchecked(&md_event_count);
36097 wake_up(&md_event_waiters);
36098 }
36099
36100 @@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36101
36102 rdev->preferred_minor = 0xffff;
36103 rdev->data_offset = le64_to_cpu(sb->data_offset);
36104 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36105 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36106
36107 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36108 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36109 @@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36110 else
36111 sb->resync_offset = cpu_to_le64(0);
36112
36113 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36114 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36115
36116 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36117 sb->size = cpu_to_le64(mddev->dev_sectors);
36118 @@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36119 static ssize_t
36120 errors_show(mdk_rdev_t *rdev, char *page)
36121 {
36122 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36123 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36124 }
36125
36126 static ssize_t
36127 @@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36128 char *e;
36129 unsigned long n = simple_strtoul(buf, &e, 10);
36130 if (*buf && (*e == 0 || *e == '\n')) {
36131 - atomic_set(&rdev->corrected_errors, n);
36132 + atomic_set_unchecked(&rdev->corrected_errors, n);
36133 return len;
36134 }
36135 return -EINVAL;
36136 @@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36137 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36138 kfree(rdev);
36139 }
36140 -static struct sysfs_ops rdev_sysfs_ops = {
36141 +static const struct sysfs_ops rdev_sysfs_ops = {
36142 .show = rdev_attr_show,
36143 .store = rdev_attr_store,
36144 };
36145 @@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36146 rdev->data_offset = 0;
36147 rdev->sb_events = 0;
36148 atomic_set(&rdev->nr_pending, 0);
36149 - atomic_set(&rdev->read_errors, 0);
36150 - atomic_set(&rdev->corrected_errors, 0);
36151 + atomic_set_unchecked(&rdev->read_errors, 0);
36152 + atomic_set_unchecked(&rdev->corrected_errors, 0);
36153
36154 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36155 if (!size) {
36156 @@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36157 kfree(mddev);
36158 }
36159
36160 -static struct sysfs_ops md_sysfs_ops = {
36161 +static const struct sysfs_ops md_sysfs_ops = {
36162 .show = md_attr_show,
36163 .store = md_attr_store,
36164 };
36165 @@ -4482,7 +4482,8 @@ out:
36166 err = 0;
36167 blk_integrity_unregister(disk);
36168 md_new_event(mddev);
36169 - sysfs_notify_dirent(mddev->sysfs_state);
36170 + if (mddev->sysfs_state)
36171 + sysfs_notify_dirent(mddev->sysfs_state);
36172 return err;
36173 }
36174
36175 @@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36176
36177 spin_unlock(&pers_lock);
36178 seq_printf(seq, "\n");
36179 - mi->event = atomic_read(&md_event_count);
36180 + mi->event = atomic_read_unchecked(&md_event_count);
36181 return 0;
36182 }
36183 if (v == (void*)2) {
36184 @@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36185 chunk_kb ? "KB" : "B");
36186 if (bitmap->file) {
36187 seq_printf(seq, ", file: ");
36188 - seq_path(seq, &bitmap->file->f_path, " \t\n");
36189 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36190 }
36191
36192 seq_printf(seq, "\n");
36193 @@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36194 else {
36195 struct seq_file *p = file->private_data;
36196 p->private = mi;
36197 - mi->event = atomic_read(&md_event_count);
36198 + mi->event = atomic_read_unchecked(&md_event_count);
36199 }
36200 return error;
36201 }
36202 @@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36203 /* always allow read */
36204 mask = POLLIN | POLLRDNORM;
36205
36206 - if (mi->event != atomic_read(&md_event_count))
36207 + if (mi->event != atomic_read_unchecked(&md_event_count))
36208 mask |= POLLERR | POLLPRI;
36209 return mask;
36210 }
36211 @@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36212 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36213 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36214 (int)part_stat_read(&disk->part0, sectors[1]) -
36215 - atomic_read(&disk->sync_io);
36216 + atomic_read_unchecked(&disk->sync_io);
36217 /* sync IO will cause sync_io to increase before the disk_stats
36218 * as sync_io is counted when a request starts, and
36219 * disk_stats is counted when it completes.
36220 diff --git a/drivers/md/md.h b/drivers/md/md.h
36221 index 87430fe..0024a4c 100644
36222 --- a/drivers/md/md.h
36223 +++ b/drivers/md/md.h
36224 @@ -94,10 +94,10 @@ struct mdk_rdev_s
36225 * only maintained for arrays that
36226 * support hot removal
36227 */
36228 - atomic_t read_errors; /* number of consecutive read errors that
36229 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
36230 * we have tried to ignore.
36231 */
36232 - atomic_t corrected_errors; /* number of corrected read errors,
36233 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36234 * for reporting to userspace and storing
36235 * in superblock.
36236 */
36237 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36238
36239 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36240 {
36241 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36242 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36243 }
36244
36245 struct mdk_personality
36246 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36247 index 968cb14..f0ad2e4 100644
36248 --- a/drivers/md/raid1.c
36249 +++ b/drivers/md/raid1.c
36250 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36251 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36252 continue;
36253 rdev = conf->mirrors[d].rdev;
36254 - atomic_add(s, &rdev->corrected_errors);
36255 + atomic_add_unchecked(s, &rdev->corrected_errors);
36256 if (sync_page_io(rdev->bdev,
36257 sect + rdev->data_offset,
36258 s<<9,
36259 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36260 /* Well, this device is dead */
36261 md_error(mddev, rdev);
36262 else {
36263 - atomic_add(s, &rdev->corrected_errors);
36264 + atomic_add_unchecked(s, &rdev->corrected_errors);
36265 printk(KERN_INFO
36266 "raid1:%s: read error corrected "
36267 "(%d sectors at %llu on %s)\n",
36268 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36269 index 1b4e232..cf0f534 100644
36270 --- a/drivers/md/raid10.c
36271 +++ b/drivers/md/raid10.c
36272 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36273 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36274 set_bit(R10BIO_Uptodate, &r10_bio->state);
36275 else {
36276 - atomic_add(r10_bio->sectors,
36277 + atomic_add_unchecked(r10_bio->sectors,
36278 &conf->mirrors[d].rdev->corrected_errors);
36279 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36280 md_error(r10_bio->mddev,
36281 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36282 test_bit(In_sync, &rdev->flags)) {
36283 atomic_inc(&rdev->nr_pending);
36284 rcu_read_unlock();
36285 - atomic_add(s, &rdev->corrected_errors);
36286 + atomic_add_unchecked(s, &rdev->corrected_errors);
36287 if (sync_page_io(rdev->bdev,
36288 r10_bio->devs[sl].addr +
36289 sect + rdev->data_offset,
36290 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36291 index 883215d..675bf47 100644
36292 --- a/drivers/md/raid5.c
36293 +++ b/drivers/md/raid5.c
36294 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36295 bi->bi_next = NULL;
36296 if ((rw & WRITE) &&
36297 test_bit(R5_ReWrite, &sh->dev[i].flags))
36298 - atomic_add(STRIPE_SECTORS,
36299 + atomic_add_unchecked(STRIPE_SECTORS,
36300 &rdev->corrected_errors);
36301 generic_make_request(bi);
36302 } else {
36303 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36304 clear_bit(R5_ReadError, &sh->dev[i].flags);
36305 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36306 }
36307 - if (atomic_read(&conf->disks[i].rdev->read_errors))
36308 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
36309 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36310 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36311 } else {
36312 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36313 int retry = 0;
36314 rdev = conf->disks[i].rdev;
36315
36316 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36317 - atomic_inc(&rdev->read_errors);
36318 + atomic_inc_unchecked(&rdev->read_errors);
36319 if (conf->mddev->degraded >= conf->max_degraded)
36320 printk_rl(KERN_WARNING
36321 "raid5:%s: read error not correctable "
36322 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36323 (unsigned long long)(sh->sector
36324 + rdev->data_offset),
36325 bdn);
36326 - else if (atomic_read(&rdev->read_errors)
36327 + else if (atomic_read_unchecked(&rdev->read_errors)
36328 > conf->max_nr_stripes)
36329 printk(KERN_WARNING
36330 "raid5:%s: Too many read errors, failing device %s.\n",
36331 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36332 sector_t r_sector;
36333 struct stripe_head sh2;
36334
36335 + pax_track_stack();
36336
36337 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36338 stripe = new_sector;
36339 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36340 index 05bde9c..2f31d40 100644
36341 --- a/drivers/media/common/saa7146_hlp.c
36342 +++ b/drivers/media/common/saa7146_hlp.c
36343 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36344
36345 int x[32], y[32], w[32], h[32];
36346
36347 + pax_track_stack();
36348 +
36349 /* clear out memory */
36350 memset(&line_list[0], 0x00, sizeof(u32)*32);
36351 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36352 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36353 index cb22da5..82b686e 100644
36354 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36355 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36356 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36357 u8 buf[HOST_LINK_BUF_SIZE];
36358 int i;
36359
36360 + pax_track_stack();
36361 +
36362 dprintk("%s\n", __func__);
36363
36364 /* check if we have space for a link buf in the rx_buffer */
36365 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36366 unsigned long timeout;
36367 int written;
36368
36369 + pax_track_stack();
36370 +
36371 dprintk("%s\n", __func__);
36372
36373 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36374 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36375 index 2fe05d0..a3289c4 100644
36376 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
36377 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36378 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
36379 union {
36380 dmx_ts_cb ts;
36381 dmx_section_cb sec;
36382 - } cb;
36383 + } __no_const cb;
36384
36385 struct dvb_demux *demux;
36386 void *priv;
36387 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36388 index 94159b9..376bd8e 100644
36389 --- a/drivers/media/dvb/dvb-core/dvbdev.c
36390 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
36391 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36392 const struct dvb_device *template, void *priv, int type)
36393 {
36394 struct dvb_device *dvbdev;
36395 - struct file_operations *dvbdevfops;
36396 + file_operations_no_const *dvbdevfops;
36397 struct device *clsdev;
36398 int minor;
36399 int id;
36400 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36401 index 2a53dd0..db8c07a 100644
36402 --- a/drivers/media/dvb/dvb-usb/cxusb.c
36403 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
36404 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36405 struct dib0700_adapter_state {
36406 int (*set_param_save) (struct dvb_frontend *,
36407 struct dvb_frontend_parameters *);
36408 -};
36409 +} __no_const;
36410
36411 static int dib7070_set_param_override(struct dvb_frontend *fe,
36412 struct dvb_frontend_parameters *fep)
36413 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36414 index db7f7f7..f55e96f 100644
36415 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36416 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36417 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36418
36419 u8 buf[260];
36420
36421 + pax_track_stack();
36422 +
36423 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36424 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36425
36426 diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36427 index 524acf5..5ffc403 100644
36428 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36429 +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36430 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36431
36432 struct dib0700_adapter_state {
36433 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36434 -};
36435 +} __no_const;
36436
36437 /* Hauppauge Nova-T 500 (aka Bristol)
36438 * has a LNA on GPIO0 which is enabled by setting 1 */
36439 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36440 index ba91735..4261d84 100644
36441 --- a/drivers/media/dvb/frontends/dib3000.h
36442 +++ b/drivers/media/dvb/frontends/dib3000.h
36443 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36444 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36445 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36446 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36447 -};
36448 +} __no_const;
36449
36450 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36451 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36452 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36453 index c709ce6..b3fe620 100644
36454 --- a/drivers/media/dvb/frontends/or51211.c
36455 +++ b/drivers/media/dvb/frontends/or51211.c
36456 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36457 u8 tudata[585];
36458 int i;
36459
36460 + pax_track_stack();
36461 +
36462 dprintk("Firmware is %zd bytes\n",fw->size);
36463
36464 /* Get eprom data */
36465 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36466 index 482d0f3..ee1e202 100644
36467 --- a/drivers/media/radio/radio-cadet.c
36468 +++ b/drivers/media/radio/radio-cadet.c
36469 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36470 while (i < count && dev->rdsin != dev->rdsout)
36471 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36472
36473 - if (copy_to_user(data, readbuf, i))
36474 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36475 return -EFAULT;
36476 return i;
36477 }
36478 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36479 index 6dd51e2..0359b92 100644
36480 --- a/drivers/media/video/cx18/cx18-driver.c
36481 +++ b/drivers/media/video/cx18/cx18-driver.c
36482 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36483
36484 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36485
36486 -static atomic_t cx18_instance = ATOMIC_INIT(0);
36487 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36488
36489 /* Parameter declarations */
36490 static int cardtype[CX18_MAX_CARDS];
36491 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36492 struct i2c_client c;
36493 u8 eedata[256];
36494
36495 + pax_track_stack();
36496 +
36497 memset(&c, 0, sizeof(c));
36498 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36499 c.adapter = &cx->i2c_adap[0];
36500 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36501 struct cx18 *cx;
36502
36503 /* FIXME - module parameter arrays constrain max instances */
36504 - i = atomic_inc_return(&cx18_instance) - 1;
36505 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36506 if (i >= CX18_MAX_CARDS) {
36507 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36508 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36509 diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36510 index 463ec34..2f4625a 100644
36511 --- a/drivers/media/video/ivtv/ivtv-driver.c
36512 +++ b/drivers/media/video/ivtv/ivtv-driver.c
36513 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36514 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36515
36516 /* ivtv instance counter */
36517 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
36518 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36519
36520 /* Parameter declarations */
36521 static int cardtype[IVTV_MAX_CARDS];
36522 diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36523 index 5fc4ac0..652a54a 100644
36524 --- a/drivers/media/video/omap24xxcam.c
36525 +++ b/drivers/media/video/omap24xxcam.c
36526 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36527 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36528
36529 do_gettimeofday(&vb->ts);
36530 - vb->field_count = atomic_add_return(2, &fh->field_count);
36531 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36532 if (csr & csr_error) {
36533 vb->state = VIDEOBUF_ERROR;
36534 if (!atomic_read(&fh->cam->in_reset)) {
36535 diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36536 index 2ce67f5..cf26a5b 100644
36537 --- a/drivers/media/video/omap24xxcam.h
36538 +++ b/drivers/media/video/omap24xxcam.h
36539 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36540 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36541 struct videobuf_queue vbq;
36542 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36543 - atomic_t field_count; /* field counter for videobuf_buffer */
36544 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36545 /* accessing cam here doesn't need serialisation: it's constant */
36546 struct omap24xxcam_device *cam;
36547 };
36548 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36549 index 299afa4..eb47459 100644
36550 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36551 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36552 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36553 u8 *eeprom;
36554 struct tveeprom tvdata;
36555
36556 + pax_track_stack();
36557 +
36558 memset(&tvdata,0,sizeof(tvdata));
36559
36560 eeprom = pvr2_eeprom_fetch(hdw);
36561 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36562 index 5b152ff..3320638 100644
36563 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36564 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36565 @@ -195,7 +195,7 @@ struct pvr2_hdw {
36566
36567 /* I2C stuff */
36568 struct i2c_adapter i2c_adap;
36569 - struct i2c_algorithm i2c_algo;
36570 + i2c_algorithm_no_const i2c_algo;
36571 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36572 int i2c_cx25840_hack_state;
36573 int i2c_linked;
36574 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36575 index 1eabff6..8e2313a 100644
36576 --- a/drivers/media/video/saa7134/saa6752hs.c
36577 +++ b/drivers/media/video/saa7134/saa6752hs.c
36578 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36579 unsigned char localPAT[256];
36580 unsigned char localPMT[256];
36581
36582 + pax_track_stack();
36583 +
36584 /* Set video format - must be done first as it resets other settings */
36585 set_reg8(client, 0x41, h->video_format);
36586
36587 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36588 index 9c1d3ac..b1b49e9 100644
36589 --- a/drivers/media/video/saa7164/saa7164-cmd.c
36590 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
36591 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36592 wait_queue_head_t *q = 0;
36593 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36594
36595 + pax_track_stack();
36596 +
36597 /* While any outstand message on the bus exists... */
36598 do {
36599
36600 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36601 u8 tmp[512];
36602 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36603
36604 + pax_track_stack();
36605 +
36606 while (loop) {
36607
36608 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36609 diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36610 index b085496..cde0270 100644
36611 --- a/drivers/media/video/usbvideo/ibmcam.c
36612 +++ b/drivers/media/video/usbvideo/ibmcam.c
36613 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
36614 static int __init ibmcam_init(void)
36615 {
36616 struct usbvideo_cb cbTbl;
36617 - memset(&cbTbl, 0, sizeof(cbTbl));
36618 - cbTbl.probe = ibmcam_probe;
36619 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
36620 - cbTbl.videoStart = ibmcam_video_start;
36621 - cbTbl.videoStop = ibmcam_video_stop;
36622 - cbTbl.processData = ibmcam_ProcessIsocData;
36623 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36624 - cbTbl.adjustPicture = ibmcam_adjust_picture;
36625 - cbTbl.getFPS = ibmcam_calculate_fps;
36626 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
36627 + *(void **)&cbTbl.probe = ibmcam_probe;
36628 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
36629 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
36630 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
36631 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
36632 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36633 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
36634 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
36635 return usbvideo_register(
36636 &cams,
36637 MAX_IBMCAM,
36638 diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
36639 index 31d57f2..600b735 100644
36640 --- a/drivers/media/video/usbvideo/konicawc.c
36641 +++ b/drivers/media/video/usbvideo/konicawc.c
36642 @@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
36643 int error;
36644
36645 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36646 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36647 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36648
36649 cam->input = input_dev = input_allocate_device();
36650 if (!input_dev) {
36651 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
36652 struct usbvideo_cb cbTbl;
36653 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
36654 DRIVER_DESC "\n");
36655 - memset(&cbTbl, 0, sizeof(cbTbl));
36656 - cbTbl.probe = konicawc_probe;
36657 - cbTbl.setupOnOpen = konicawc_setup_on_open;
36658 - cbTbl.processData = konicawc_process_isoc;
36659 - cbTbl.getFPS = konicawc_calculate_fps;
36660 - cbTbl.setVideoMode = konicawc_set_video_mode;
36661 - cbTbl.startDataPump = konicawc_start_data;
36662 - cbTbl.stopDataPump = konicawc_stop_data;
36663 - cbTbl.adjustPicture = konicawc_adjust_picture;
36664 - cbTbl.userFree = konicawc_free_uvd;
36665 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
36666 + *(void **)&cbTbl.probe = konicawc_probe;
36667 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
36668 + *(void **)&cbTbl.processData = konicawc_process_isoc;
36669 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
36670 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
36671 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
36672 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
36673 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
36674 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
36675 return usbvideo_register(
36676 &cams,
36677 MAX_CAMERAS,
36678 diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
36679 index 803d3e4..c4d1b96 100644
36680 --- a/drivers/media/video/usbvideo/quickcam_messenger.c
36681 +++ b/drivers/media/video/usbvideo/quickcam_messenger.c
36682 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
36683 int error;
36684
36685 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36686 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36687 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36688
36689 cam->input = input_dev = input_allocate_device();
36690 if (!input_dev) {
36691 diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
36692 index fbd1b63..292f9f0 100644
36693 --- a/drivers/media/video/usbvideo/ultracam.c
36694 +++ b/drivers/media/video/usbvideo/ultracam.c
36695 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
36696 {
36697 struct usbvideo_cb cbTbl;
36698 memset(&cbTbl, 0, sizeof(cbTbl));
36699 - cbTbl.probe = ultracam_probe;
36700 - cbTbl.setupOnOpen = ultracam_setup_on_open;
36701 - cbTbl.videoStart = ultracam_video_start;
36702 - cbTbl.videoStop = ultracam_video_stop;
36703 - cbTbl.processData = ultracam_ProcessIsocData;
36704 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36705 - cbTbl.adjustPicture = ultracam_adjust_picture;
36706 - cbTbl.getFPS = ultracam_calculate_fps;
36707 + *(void **)&cbTbl.probe = ultracam_probe;
36708 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
36709 + *(void **)&cbTbl.videoStart = ultracam_video_start;
36710 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
36711 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
36712 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36713 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
36714 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
36715 return usbvideo_register(
36716 &cams,
36717 MAX_CAMERAS,
36718 diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
36719 index dea8b32..34f6878 100644
36720 --- a/drivers/media/video/usbvideo/usbvideo.c
36721 +++ b/drivers/media/video/usbvideo/usbvideo.c
36722 @@ -697,15 +697,15 @@ int usbvideo_register(
36723 __func__, cams, base_size, num_cams);
36724
36725 /* Copy callbacks, apply defaults for those that are not set */
36726 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
36727 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
36728 if (cams->cb.getFrame == NULL)
36729 - cams->cb.getFrame = usbvideo_GetFrame;
36730 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
36731 if (cams->cb.disconnect == NULL)
36732 - cams->cb.disconnect = usbvideo_Disconnect;
36733 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
36734 if (cams->cb.startDataPump == NULL)
36735 - cams->cb.startDataPump = usbvideo_StartDataPump;
36736 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
36737 if (cams->cb.stopDataPump == NULL)
36738 - cams->cb.stopDataPump = usbvideo_StopDataPump;
36739 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
36740
36741 cams->num_cameras = num_cams;
36742 cams->cam = (struct uvd *) &cams[1];
36743 diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
36744 index c66985b..7fa143a 100644
36745 --- a/drivers/media/video/usbvideo/usbvideo.h
36746 +++ b/drivers/media/video/usbvideo/usbvideo.h
36747 @@ -268,7 +268,7 @@ struct usbvideo_cb {
36748 int (*startDataPump)(struct uvd *uvd);
36749 void (*stopDataPump)(struct uvd *uvd);
36750 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
36751 -};
36752 +} __no_const;
36753
36754 struct usbvideo {
36755 int num_cameras; /* As allocated */
36756 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
36757 index e0f91e4..37554ea 100644
36758 --- a/drivers/media/video/usbvision/usbvision-core.c
36759 +++ b/drivers/media/video/usbvision/usbvision-core.c
36760 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
36761 unsigned char rv, gv, bv;
36762 static unsigned char *Y, *U, *V;
36763
36764 + pax_track_stack();
36765 +
36766 frame = usbvision->curFrame;
36767 imageSize = frame->frmwidth * frame->frmheight;
36768 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
36769 diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
36770 index 0d06e7c..3d17d24 100644
36771 --- a/drivers/media/video/v4l2-device.c
36772 +++ b/drivers/media/video/v4l2-device.c
36773 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
36774 EXPORT_SYMBOL_GPL(v4l2_device_register);
36775
36776 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
36777 - atomic_t *instance)
36778 + atomic_unchecked_t *instance)
36779 {
36780 - int num = atomic_inc_return(instance) - 1;
36781 + int num = atomic_inc_return_unchecked(instance) - 1;
36782 int len = strlen(basename);
36783
36784 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
36785 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
36786 index 032ebae..6a3532c 100644
36787 --- a/drivers/media/video/videobuf-dma-sg.c
36788 +++ b/drivers/media/video/videobuf-dma-sg.c
36789 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
36790 {
36791 struct videobuf_queue q;
36792
36793 + pax_track_stack();
36794 +
36795 /* Required to make generic handler to call __videobuf_alloc */
36796 q.int_ops = &sg_ops;
36797
36798 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
36799 index b6992b7..9fa7547 100644
36800 --- a/drivers/message/fusion/mptbase.c
36801 +++ b/drivers/message/fusion/mptbase.c
36802 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
36803 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
36804 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
36805
36806 +#ifdef CONFIG_GRKERNSEC_HIDESYM
36807 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36808 + NULL, NULL);
36809 +#else
36810 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36811 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
36812 +#endif
36813 +
36814 /*
36815 * Rounding UP to nearest 4-kB boundary here...
36816 */
36817 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
36818 index 83873e3..e360e9a 100644
36819 --- a/drivers/message/fusion/mptsas.c
36820 +++ b/drivers/message/fusion/mptsas.c
36821 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
36822 return 0;
36823 }
36824
36825 +static inline void
36826 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36827 +{
36828 + if (phy_info->port_details) {
36829 + phy_info->port_details->rphy = rphy;
36830 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36831 + ioc->name, rphy));
36832 + }
36833 +
36834 + if (rphy) {
36835 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36836 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36837 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36838 + ioc->name, rphy, rphy->dev.release));
36839 + }
36840 +}
36841 +
36842 /* no mutex */
36843 static void
36844 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
36845 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
36846 return NULL;
36847 }
36848
36849 -static inline void
36850 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36851 -{
36852 - if (phy_info->port_details) {
36853 - phy_info->port_details->rphy = rphy;
36854 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36855 - ioc->name, rphy));
36856 - }
36857 -
36858 - if (rphy) {
36859 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36860 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36861 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36862 - ioc->name, rphy, rphy->dev.release));
36863 - }
36864 -}
36865 -
36866 static inline struct sas_port *
36867 mptsas_get_port(struct mptsas_phyinfo *phy_info)
36868 {
36869 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
36870 index bd096ca..332cf76 100644
36871 --- a/drivers/message/fusion/mptscsih.c
36872 +++ b/drivers/message/fusion/mptscsih.c
36873 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
36874
36875 h = shost_priv(SChost);
36876
36877 - if (h) {
36878 - if (h->info_kbuf == NULL)
36879 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
36880 - return h->info_kbuf;
36881 - h->info_kbuf[0] = '\0';
36882 + if (!h)
36883 + return NULL;
36884
36885 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
36886 - h->info_kbuf[size-1] = '\0';
36887 - }
36888 + if (h->info_kbuf == NULL)
36889 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
36890 + return h->info_kbuf;
36891 + h->info_kbuf[0] = '\0';
36892 +
36893 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
36894 + h->info_kbuf[size-1] = '\0';
36895
36896 return h->info_kbuf;
36897 }
36898 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
36899 index efba702..59b2c0f 100644
36900 --- a/drivers/message/i2o/i2o_config.c
36901 +++ b/drivers/message/i2o/i2o_config.c
36902 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
36903 struct i2o_message *msg;
36904 unsigned int iop;
36905
36906 + pax_track_stack();
36907 +
36908 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
36909 return -EFAULT;
36910
36911 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
36912 index 7045c45..c07b170 100644
36913 --- a/drivers/message/i2o/i2o_proc.c
36914 +++ b/drivers/message/i2o/i2o_proc.c
36915 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
36916 "Array Controller Device"
36917 };
36918
36919 -static char *chtostr(u8 * chars, int n)
36920 -{
36921 - char tmp[256];
36922 - tmp[0] = 0;
36923 - return strncat(tmp, (char *)chars, n);
36924 -}
36925 -
36926 static int i2o_report_query_status(struct seq_file *seq, int block_status,
36927 char *group)
36928 {
36929 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
36930
36931 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
36932 seq_printf(seq, "%-#8x", ddm_table.module_id);
36933 - seq_printf(seq, "%-29s",
36934 - chtostr(ddm_table.module_name_version, 28));
36935 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
36936 seq_printf(seq, "%9d ", ddm_table.data_size);
36937 seq_printf(seq, "%8d", ddm_table.code_size);
36938
36939 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
36940
36941 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
36942 seq_printf(seq, "%-#8x", dst->module_id);
36943 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
36944 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
36945 + seq_printf(seq, "%-.28s", dst->module_name_version);
36946 + seq_printf(seq, "%-.8s", dst->date);
36947 seq_printf(seq, "%8d ", dst->module_size);
36948 seq_printf(seq, "%8d ", dst->mpb_size);
36949 seq_printf(seq, "0x%04x", dst->module_flags);
36950 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
36951 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
36952 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
36953 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
36954 - seq_printf(seq, "Vendor info : %s\n",
36955 - chtostr((u8 *) (work32 + 2), 16));
36956 - seq_printf(seq, "Product info : %s\n",
36957 - chtostr((u8 *) (work32 + 6), 16));
36958 - seq_printf(seq, "Description : %s\n",
36959 - chtostr((u8 *) (work32 + 10), 16));
36960 - seq_printf(seq, "Product rev. : %s\n",
36961 - chtostr((u8 *) (work32 + 14), 8));
36962 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
36963 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
36964 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
36965 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
36966
36967 seq_printf(seq, "Serial number : ");
36968 print_serial_number(seq, (u8 *) (work32 + 16),
36969 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
36970 }
36971
36972 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
36973 - seq_printf(seq, "Module name : %s\n",
36974 - chtostr(result.module_name, 24));
36975 - seq_printf(seq, "Module revision : %s\n",
36976 - chtostr(result.module_rev, 8));
36977 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
36978 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
36979
36980 seq_printf(seq, "Serial number : ");
36981 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
36982 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
36983 return 0;
36984 }
36985
36986 - seq_printf(seq, "Device name : %s\n",
36987 - chtostr(result.device_name, 64));
36988 - seq_printf(seq, "Service name : %s\n",
36989 - chtostr(result.service_name, 64));
36990 - seq_printf(seq, "Physical name : %s\n",
36991 - chtostr(result.physical_location, 64));
36992 - seq_printf(seq, "Instance number : %s\n",
36993 - chtostr(result.instance_number, 4));
36994 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
36995 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
36996 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
36997 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
36998
36999 return 0;
37000 }
37001 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37002 index 27cf4af..b1205b8 100644
37003 --- a/drivers/message/i2o/iop.c
37004 +++ b/drivers/message/i2o/iop.c
37005 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37006
37007 spin_lock_irqsave(&c->context_list_lock, flags);
37008
37009 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37010 - atomic_inc(&c->context_list_counter);
37011 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37012 + atomic_inc_unchecked(&c->context_list_counter);
37013
37014 - entry->context = atomic_read(&c->context_list_counter);
37015 + entry->context = atomic_read_unchecked(&c->context_list_counter);
37016
37017 list_add(&entry->list, &c->context_list);
37018
37019 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37020
37021 #if BITS_PER_LONG == 64
37022 spin_lock_init(&c->context_list_lock);
37023 - atomic_set(&c->context_list_counter, 0);
37024 + atomic_set_unchecked(&c->context_list_counter, 0);
37025 INIT_LIST_HEAD(&c->context_list);
37026 #endif
37027
37028 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37029 index 78e3e85..66c9a0d 100644
37030 --- a/drivers/mfd/ab3100-core.c
37031 +++ b/drivers/mfd/ab3100-core.c
37032 @@ -777,7 +777,7 @@ struct ab_family_id {
37033 char *name;
37034 };
37035
37036 -static const struct ab_family_id ids[] __initdata = {
37037 +static const struct ab_family_id ids[] __initconst = {
37038 /* AB3100 */
37039 {
37040 .id = 0xc0,
37041 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37042 index 8d8c932..8104515 100644
37043 --- a/drivers/mfd/wm8350-i2c.c
37044 +++ b/drivers/mfd/wm8350-i2c.c
37045 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37046 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37047 int ret;
37048
37049 + pax_track_stack();
37050 +
37051 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37052 return -EINVAL;
37053
37054 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37055 index e4ff50b..4cc3f04 100644
37056 --- a/drivers/misc/kgdbts.c
37057 +++ b/drivers/misc/kgdbts.c
37058 @@ -118,7 +118,7 @@
37059 } while (0)
37060 #define MAX_CONFIG_LEN 40
37061
37062 -static struct kgdb_io kgdbts_io_ops;
37063 +static const struct kgdb_io kgdbts_io_ops;
37064 static char get_buf[BUFMAX];
37065 static int get_buf_cnt;
37066 static char put_buf[BUFMAX];
37067 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37068 module_put(THIS_MODULE);
37069 }
37070
37071 -static struct kgdb_io kgdbts_io_ops = {
37072 +static const struct kgdb_io kgdbts_io_ops = {
37073 .name = "kgdbts",
37074 .read_char = kgdbts_get_char,
37075 .write_char = kgdbts_put_char,
37076 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37077 index 37e7cfc..67cfb76 100644
37078 --- a/drivers/misc/sgi-gru/gruhandles.c
37079 +++ b/drivers/misc/sgi-gru/gruhandles.c
37080 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37081
37082 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37083 {
37084 - atomic_long_inc(&mcs_op_statistics[op].count);
37085 - atomic_long_add(clks, &mcs_op_statistics[op].total);
37086 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37087 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37088 if (mcs_op_statistics[op].max < clks)
37089 mcs_op_statistics[op].max = clks;
37090 }
37091 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37092 index 3f2375c..467c6e6 100644
37093 --- a/drivers/misc/sgi-gru/gruprocfs.c
37094 +++ b/drivers/misc/sgi-gru/gruprocfs.c
37095 @@ -32,9 +32,9 @@
37096
37097 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37098
37099 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37100 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37101 {
37102 - unsigned long val = atomic_long_read(v);
37103 + unsigned long val = atomic_long_read_unchecked(v);
37104
37105 if (val)
37106 seq_printf(s, "%16lu %s\n", val, id);
37107 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37108 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37109
37110 for (op = 0; op < mcsop_last; op++) {
37111 - count = atomic_long_read(&mcs_op_statistics[op].count);
37112 - total = atomic_long_read(&mcs_op_statistics[op].total);
37113 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37114 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37115 max = mcs_op_statistics[op].max;
37116 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37117 count ? total / count : 0, max);
37118 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37119 index 46990bc..4a251b5 100644
37120 --- a/drivers/misc/sgi-gru/grutables.h
37121 +++ b/drivers/misc/sgi-gru/grutables.h
37122 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37123 * GRU statistics.
37124 */
37125 struct gru_stats_s {
37126 - atomic_long_t vdata_alloc;
37127 - atomic_long_t vdata_free;
37128 - atomic_long_t gts_alloc;
37129 - atomic_long_t gts_free;
37130 - atomic_long_t vdata_double_alloc;
37131 - atomic_long_t gts_double_allocate;
37132 - atomic_long_t assign_context;
37133 - atomic_long_t assign_context_failed;
37134 - atomic_long_t free_context;
37135 - atomic_long_t load_user_context;
37136 - atomic_long_t load_kernel_context;
37137 - atomic_long_t lock_kernel_context;
37138 - atomic_long_t unlock_kernel_context;
37139 - atomic_long_t steal_user_context;
37140 - atomic_long_t steal_kernel_context;
37141 - atomic_long_t steal_context_failed;
37142 - atomic_long_t nopfn;
37143 - atomic_long_t break_cow;
37144 - atomic_long_t asid_new;
37145 - atomic_long_t asid_next;
37146 - atomic_long_t asid_wrap;
37147 - atomic_long_t asid_reuse;
37148 - atomic_long_t intr;
37149 - atomic_long_t intr_mm_lock_failed;
37150 - atomic_long_t call_os;
37151 - atomic_long_t call_os_offnode_reference;
37152 - atomic_long_t call_os_check_for_bug;
37153 - atomic_long_t call_os_wait_queue;
37154 - atomic_long_t user_flush_tlb;
37155 - atomic_long_t user_unload_context;
37156 - atomic_long_t user_exception;
37157 - atomic_long_t set_context_option;
37158 - atomic_long_t migrate_check;
37159 - atomic_long_t migrated_retarget;
37160 - atomic_long_t migrated_unload;
37161 - atomic_long_t migrated_unload_delay;
37162 - atomic_long_t migrated_nopfn_retarget;
37163 - atomic_long_t migrated_nopfn_unload;
37164 - atomic_long_t tlb_dropin;
37165 - atomic_long_t tlb_dropin_fail_no_asid;
37166 - atomic_long_t tlb_dropin_fail_upm;
37167 - atomic_long_t tlb_dropin_fail_invalid;
37168 - atomic_long_t tlb_dropin_fail_range_active;
37169 - atomic_long_t tlb_dropin_fail_idle;
37170 - atomic_long_t tlb_dropin_fail_fmm;
37171 - atomic_long_t tlb_dropin_fail_no_exception;
37172 - atomic_long_t tlb_dropin_fail_no_exception_war;
37173 - atomic_long_t tfh_stale_on_fault;
37174 - atomic_long_t mmu_invalidate_range;
37175 - atomic_long_t mmu_invalidate_page;
37176 - atomic_long_t mmu_clear_flush_young;
37177 - atomic_long_t flush_tlb;
37178 - atomic_long_t flush_tlb_gru;
37179 - atomic_long_t flush_tlb_gru_tgh;
37180 - atomic_long_t flush_tlb_gru_zero_asid;
37181 + atomic_long_unchecked_t vdata_alloc;
37182 + atomic_long_unchecked_t vdata_free;
37183 + atomic_long_unchecked_t gts_alloc;
37184 + atomic_long_unchecked_t gts_free;
37185 + atomic_long_unchecked_t vdata_double_alloc;
37186 + atomic_long_unchecked_t gts_double_allocate;
37187 + atomic_long_unchecked_t assign_context;
37188 + atomic_long_unchecked_t assign_context_failed;
37189 + atomic_long_unchecked_t free_context;
37190 + atomic_long_unchecked_t load_user_context;
37191 + atomic_long_unchecked_t load_kernel_context;
37192 + atomic_long_unchecked_t lock_kernel_context;
37193 + atomic_long_unchecked_t unlock_kernel_context;
37194 + atomic_long_unchecked_t steal_user_context;
37195 + atomic_long_unchecked_t steal_kernel_context;
37196 + atomic_long_unchecked_t steal_context_failed;
37197 + atomic_long_unchecked_t nopfn;
37198 + atomic_long_unchecked_t break_cow;
37199 + atomic_long_unchecked_t asid_new;
37200 + atomic_long_unchecked_t asid_next;
37201 + atomic_long_unchecked_t asid_wrap;
37202 + atomic_long_unchecked_t asid_reuse;
37203 + atomic_long_unchecked_t intr;
37204 + atomic_long_unchecked_t intr_mm_lock_failed;
37205 + atomic_long_unchecked_t call_os;
37206 + atomic_long_unchecked_t call_os_offnode_reference;
37207 + atomic_long_unchecked_t call_os_check_for_bug;
37208 + atomic_long_unchecked_t call_os_wait_queue;
37209 + atomic_long_unchecked_t user_flush_tlb;
37210 + atomic_long_unchecked_t user_unload_context;
37211 + atomic_long_unchecked_t user_exception;
37212 + atomic_long_unchecked_t set_context_option;
37213 + atomic_long_unchecked_t migrate_check;
37214 + atomic_long_unchecked_t migrated_retarget;
37215 + atomic_long_unchecked_t migrated_unload;
37216 + atomic_long_unchecked_t migrated_unload_delay;
37217 + atomic_long_unchecked_t migrated_nopfn_retarget;
37218 + atomic_long_unchecked_t migrated_nopfn_unload;
37219 + atomic_long_unchecked_t tlb_dropin;
37220 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37221 + atomic_long_unchecked_t tlb_dropin_fail_upm;
37222 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
37223 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
37224 + atomic_long_unchecked_t tlb_dropin_fail_idle;
37225 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
37226 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37227 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37228 + atomic_long_unchecked_t tfh_stale_on_fault;
37229 + atomic_long_unchecked_t mmu_invalidate_range;
37230 + atomic_long_unchecked_t mmu_invalidate_page;
37231 + atomic_long_unchecked_t mmu_clear_flush_young;
37232 + atomic_long_unchecked_t flush_tlb;
37233 + atomic_long_unchecked_t flush_tlb_gru;
37234 + atomic_long_unchecked_t flush_tlb_gru_tgh;
37235 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37236
37237 - atomic_long_t copy_gpa;
37238 + atomic_long_unchecked_t copy_gpa;
37239
37240 - atomic_long_t mesq_receive;
37241 - atomic_long_t mesq_receive_none;
37242 - atomic_long_t mesq_send;
37243 - atomic_long_t mesq_send_failed;
37244 - atomic_long_t mesq_noop;
37245 - atomic_long_t mesq_send_unexpected_error;
37246 - atomic_long_t mesq_send_lb_overflow;
37247 - atomic_long_t mesq_send_qlimit_reached;
37248 - atomic_long_t mesq_send_amo_nacked;
37249 - atomic_long_t mesq_send_put_nacked;
37250 - atomic_long_t mesq_qf_not_full;
37251 - atomic_long_t mesq_qf_locked;
37252 - atomic_long_t mesq_qf_noop_not_full;
37253 - atomic_long_t mesq_qf_switch_head_failed;
37254 - atomic_long_t mesq_qf_unexpected_error;
37255 - atomic_long_t mesq_noop_unexpected_error;
37256 - atomic_long_t mesq_noop_lb_overflow;
37257 - atomic_long_t mesq_noop_qlimit_reached;
37258 - atomic_long_t mesq_noop_amo_nacked;
37259 - atomic_long_t mesq_noop_put_nacked;
37260 + atomic_long_unchecked_t mesq_receive;
37261 + atomic_long_unchecked_t mesq_receive_none;
37262 + atomic_long_unchecked_t mesq_send;
37263 + atomic_long_unchecked_t mesq_send_failed;
37264 + atomic_long_unchecked_t mesq_noop;
37265 + atomic_long_unchecked_t mesq_send_unexpected_error;
37266 + atomic_long_unchecked_t mesq_send_lb_overflow;
37267 + atomic_long_unchecked_t mesq_send_qlimit_reached;
37268 + atomic_long_unchecked_t mesq_send_amo_nacked;
37269 + atomic_long_unchecked_t mesq_send_put_nacked;
37270 + atomic_long_unchecked_t mesq_qf_not_full;
37271 + atomic_long_unchecked_t mesq_qf_locked;
37272 + atomic_long_unchecked_t mesq_qf_noop_not_full;
37273 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
37274 + atomic_long_unchecked_t mesq_qf_unexpected_error;
37275 + atomic_long_unchecked_t mesq_noop_unexpected_error;
37276 + atomic_long_unchecked_t mesq_noop_lb_overflow;
37277 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
37278 + atomic_long_unchecked_t mesq_noop_amo_nacked;
37279 + atomic_long_unchecked_t mesq_noop_put_nacked;
37280
37281 };
37282
37283 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37284 cchop_deallocate, tghop_invalidate, mcsop_last};
37285
37286 struct mcs_op_statistic {
37287 - atomic_long_t count;
37288 - atomic_long_t total;
37289 + atomic_long_unchecked_t count;
37290 + atomic_long_unchecked_t total;
37291 unsigned long max;
37292 };
37293
37294 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37295
37296 #define STAT(id) do { \
37297 if (gru_options & OPT_STATS) \
37298 - atomic_long_inc(&gru_stats.id); \
37299 + atomic_long_inc_unchecked(&gru_stats.id); \
37300 } while (0)
37301
37302 #ifdef CONFIG_SGI_GRU_DEBUG
37303 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37304 index 2275126..12a9dbfb 100644
37305 --- a/drivers/misc/sgi-xp/xp.h
37306 +++ b/drivers/misc/sgi-xp/xp.h
37307 @@ -289,7 +289,7 @@ struct xpc_interface {
37308 xpc_notify_func, void *);
37309 void (*received) (short, int, void *);
37310 enum xp_retval (*partid_to_nasids) (short, void *);
37311 -};
37312 +} __no_const;
37313
37314 extern struct xpc_interface xpc_interface;
37315
37316 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37317 index b94d5f7..7f494c5 100644
37318 --- a/drivers/misc/sgi-xp/xpc.h
37319 +++ b/drivers/misc/sgi-xp/xpc.h
37320 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
37321 void (*received_payload) (struct xpc_channel *, void *);
37322 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37323 };
37324 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37325
37326 /* struct xpc_partition act_state values (for XPC HB) */
37327
37328 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37329 /* found in xpc_main.c */
37330 extern struct device *xpc_part;
37331 extern struct device *xpc_chan;
37332 -extern struct xpc_arch_operations xpc_arch_ops;
37333 +extern xpc_arch_operations_no_const xpc_arch_ops;
37334 extern int xpc_disengage_timelimit;
37335 extern int xpc_disengage_timedout;
37336 extern int xpc_activate_IRQ_rcvd;
37337 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37338 index fd3688a..7e211a4 100644
37339 --- a/drivers/misc/sgi-xp/xpc_main.c
37340 +++ b/drivers/misc/sgi-xp/xpc_main.c
37341 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37342 .notifier_call = xpc_system_die,
37343 };
37344
37345 -struct xpc_arch_operations xpc_arch_ops;
37346 +xpc_arch_operations_no_const xpc_arch_ops;
37347
37348 /*
37349 * Timer function to enforce the timelimit on the partition disengage.
37350 diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37351 index 8b70e03..700bda6 100644
37352 --- a/drivers/misc/sgi-xp/xpc_sn2.c
37353 +++ b/drivers/misc/sgi-xp/xpc_sn2.c
37354 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37355 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37356 }
37357
37358 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37359 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37360 .setup_partitions = xpc_setup_partitions_sn2,
37361 .teardown_partitions = xpc_teardown_partitions_sn2,
37362 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37363 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37364 int ret;
37365 size_t buf_size;
37366
37367 - xpc_arch_ops = xpc_arch_ops_sn2;
37368 + pax_open_kernel();
37369 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37370 + pax_close_kernel();
37371
37372 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37373 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37374 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37375 index 8e08d71..7cb8c9b 100644
37376 --- a/drivers/misc/sgi-xp/xpc_uv.c
37377 +++ b/drivers/misc/sgi-xp/xpc_uv.c
37378 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37379 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37380 }
37381
37382 -static struct xpc_arch_operations xpc_arch_ops_uv = {
37383 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
37384 .setup_partitions = xpc_setup_partitions_uv,
37385 .teardown_partitions = xpc_teardown_partitions_uv,
37386 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37387 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37388 int
37389 xpc_init_uv(void)
37390 {
37391 - xpc_arch_ops = xpc_arch_ops_uv;
37392 + pax_open_kernel();
37393 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37394 + pax_close_kernel();
37395
37396 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37397 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37398 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37399 index 6fd20b42..650efe3 100644
37400 --- a/drivers/mmc/host/sdhci-pci.c
37401 +++ b/drivers/mmc/host/sdhci-pci.c
37402 @@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37403 .probe = via_probe,
37404 };
37405
37406 -static const struct pci_device_id pci_ids[] __devinitdata = {
37407 +static const struct pci_device_id pci_ids[] __devinitconst = {
37408 {
37409 .vendor = PCI_VENDOR_ID_RICOH,
37410 .device = PCI_DEVICE_ID_RICOH_R5C822,
37411 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37412 index e7563a9..5f90ce5 100644
37413 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
37414 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37415 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37416 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37417 unsigned long timeo = jiffies + HZ;
37418
37419 + pax_track_stack();
37420 +
37421 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37422 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37423 goto sleep;
37424 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37425 unsigned long initial_adr;
37426 int initial_len = len;
37427
37428 + pax_track_stack();
37429 +
37430 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37431 adr += chip->start;
37432 initial_adr = adr;
37433 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37434 int retries = 3;
37435 int ret;
37436
37437 + pax_track_stack();
37438 +
37439 adr += chip->start;
37440
37441 retry:
37442 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37443 index 0667a67..3ab97ed 100644
37444 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
37445 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37446 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37447 unsigned long cmd_addr;
37448 struct cfi_private *cfi = map->fldrv_priv;
37449
37450 + pax_track_stack();
37451 +
37452 adr += chip->start;
37453
37454 /* Ensure cmd read/writes are aligned. */
37455 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37456 DECLARE_WAITQUEUE(wait, current);
37457 int wbufsize, z;
37458
37459 + pax_track_stack();
37460 +
37461 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37462 if (adr & (map_bankwidth(map)-1))
37463 return -EINVAL;
37464 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37465 DECLARE_WAITQUEUE(wait, current);
37466 int ret = 0;
37467
37468 + pax_track_stack();
37469 +
37470 adr += chip->start;
37471
37472 /* Let's determine this according to the interleave only once */
37473 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37474 unsigned long timeo = jiffies + HZ;
37475 DECLARE_WAITQUEUE(wait, current);
37476
37477 + pax_track_stack();
37478 +
37479 adr += chip->start;
37480
37481 /* Let's determine this according to the interleave only once */
37482 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37483 unsigned long timeo = jiffies + HZ;
37484 DECLARE_WAITQUEUE(wait, current);
37485
37486 + pax_track_stack();
37487 +
37488 adr += chip->start;
37489
37490 /* Let's determine this according to the interleave only once */
37491 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37492 index 5bf5f46..c5de373 100644
37493 --- a/drivers/mtd/devices/doc2000.c
37494 +++ b/drivers/mtd/devices/doc2000.c
37495 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37496
37497 /* The ECC will not be calculated correctly if less than 512 is written */
37498 /* DBB-
37499 - if (len != 0x200 && eccbuf)
37500 + if (len != 0x200)
37501 printk(KERN_WARNING
37502 "ECC needs a full sector write (adr: %lx size %lx)\n",
37503 (long) to, (long) len);
37504 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37505 index 0990f78..bb4e8a4 100644
37506 --- a/drivers/mtd/devices/doc2001.c
37507 +++ b/drivers/mtd/devices/doc2001.c
37508 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37509 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37510
37511 /* Don't allow read past end of device */
37512 - if (from >= this->totlen)
37513 + if (from >= this->totlen || !len)
37514 return -EINVAL;
37515
37516 /* Don't allow a single read to cross a 512-byte block boundary */
37517 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37518 index e56d6b4..f07e6cf 100644
37519 --- a/drivers/mtd/ftl.c
37520 +++ b/drivers/mtd/ftl.c
37521 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37522 loff_t offset;
37523 uint16_t srcunitswap = cpu_to_le16(srcunit);
37524
37525 + pax_track_stack();
37526 +
37527 eun = &part->EUNInfo[srcunit];
37528 xfer = &part->XferInfo[xferunit];
37529 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37530 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37531 index 8aca552..146446e 100755
37532 --- a/drivers/mtd/inftlcore.c
37533 +++ b/drivers/mtd/inftlcore.c
37534 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37535 struct inftl_oob oob;
37536 size_t retlen;
37537
37538 + pax_track_stack();
37539 +
37540 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37541 "pending=%d)\n", inftl, thisVUC, pendingblock);
37542
37543 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37544 index 32e82ae..ed50953 100644
37545 --- a/drivers/mtd/inftlmount.c
37546 +++ b/drivers/mtd/inftlmount.c
37547 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37548 struct INFTLPartition *ip;
37549 size_t retlen;
37550
37551 + pax_track_stack();
37552 +
37553 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37554
37555 /*
37556 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37557 index 79bf40f..fe5f8fd 100644
37558 --- a/drivers/mtd/lpddr/qinfo_probe.c
37559 +++ b/drivers/mtd/lpddr/qinfo_probe.c
37560 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37561 {
37562 map_word pfow_val[4];
37563
37564 + pax_track_stack();
37565 +
37566 /* Check identification string */
37567 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37568 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37569 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37570 index 726a1b8..f46b460 100644
37571 --- a/drivers/mtd/mtdchar.c
37572 +++ b/drivers/mtd/mtdchar.c
37573 @@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37574 u_long size;
37575 struct mtd_info_user info;
37576
37577 + pax_track_stack();
37578 +
37579 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37580
37581 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37582 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37583 index 1002e18..26d82d5 100644
37584 --- a/drivers/mtd/nftlcore.c
37585 +++ b/drivers/mtd/nftlcore.c
37586 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37587 int inplace = 1;
37588 size_t retlen;
37589
37590 + pax_track_stack();
37591 +
37592 memset(BlockMap, 0xff, sizeof(BlockMap));
37593 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37594
37595 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37596 index 8b22b18..6fada85 100644
37597 --- a/drivers/mtd/nftlmount.c
37598 +++ b/drivers/mtd/nftlmount.c
37599 @@ -23,6 +23,7 @@
37600 #include <asm/errno.h>
37601 #include <linux/delay.h>
37602 #include <linux/slab.h>
37603 +#include <linux/sched.h>
37604 #include <linux/mtd/mtd.h>
37605 #include <linux/mtd/nand.h>
37606 #include <linux/mtd/nftl.h>
37607 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37608 struct mtd_info *mtd = nftl->mbd.mtd;
37609 unsigned int i;
37610
37611 + pax_track_stack();
37612 +
37613 /* Assume logical EraseSize == physical erasesize for starting the scan.
37614 We'll sort it out later if we find a MediaHeader which says otherwise */
37615 /* Actually, we won't. The new DiskOnChip driver has already scanned
37616 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
37617 index 14cec04..d775b87 100644
37618 --- a/drivers/mtd/ubi/build.c
37619 +++ b/drivers/mtd/ubi/build.c
37620 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
37621 static int __init bytes_str_to_int(const char *str)
37622 {
37623 char *endp;
37624 - unsigned long result;
37625 + unsigned long result, scale = 1;
37626
37627 result = simple_strtoul(str, &endp, 0);
37628 if (str == endp || result >= INT_MAX) {
37629 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
37630
37631 switch (*endp) {
37632 case 'G':
37633 - result *= 1024;
37634 + scale *= 1024;
37635 case 'M':
37636 - result *= 1024;
37637 + scale *= 1024;
37638 case 'K':
37639 - result *= 1024;
37640 + scale *= 1024;
37641 if (endp[1] == 'i' && endp[2] == 'B')
37642 endp += 2;
37643 case '\0':
37644 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
37645 return -EINVAL;
37646 }
37647
37648 - return result;
37649 + if ((intoverflow_t)result*scale >= INT_MAX) {
37650 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
37651 + str);
37652 + return -EINVAL;
37653 + }
37654 +
37655 + return result*scale;
37656 }
37657
37658 /**
37659 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
37660 index ab68886..ca405e8 100644
37661 --- a/drivers/net/atlx/atl2.c
37662 +++ b/drivers/net/atlx/atl2.c
37663 @@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
37664 */
37665
37666 #define ATL2_PARAM(X, desc) \
37667 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37668 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37669 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
37670 MODULE_PARM_DESC(X, desc);
37671 #else
37672 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
37673 index 4874b2b..67f8526 100644
37674 --- a/drivers/net/bnx2.c
37675 +++ b/drivers/net/bnx2.c
37676 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
37677 int rc = 0;
37678 u32 magic, csum;
37679
37680 + pax_track_stack();
37681 +
37682 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
37683 goto test_nvram_done;
37684
37685 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
37686 index fd3eb07..8a6978d 100644
37687 --- a/drivers/net/cxgb3/l2t.h
37688 +++ b/drivers/net/cxgb3/l2t.h
37689 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
37690 */
37691 struct l2t_skb_cb {
37692 arp_failure_handler_func arp_failure_handler;
37693 -};
37694 +} __no_const;
37695
37696 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
37697
37698 diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
37699 index 032cfe0..411af379 100644
37700 --- a/drivers/net/cxgb3/t3_hw.c
37701 +++ b/drivers/net/cxgb3/t3_hw.c
37702 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
37703 int i, addr, ret;
37704 struct t3_vpd vpd;
37705
37706 + pax_track_stack();
37707 +
37708 /*
37709 * Card information is normally at VPD_BASE but some early cards had
37710 * it at 0.
37711 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
37712 index d1e0563..b9e129c 100644
37713 --- a/drivers/net/e1000e/82571.c
37714 +++ b/drivers/net/e1000e/82571.c
37715 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
37716 {
37717 struct e1000_hw *hw = &adapter->hw;
37718 struct e1000_mac_info *mac = &hw->mac;
37719 - struct e1000_mac_operations *func = &mac->ops;
37720 + e1000_mac_operations_no_const *func = &mac->ops;
37721 u32 swsm = 0;
37722 u32 swsm2 = 0;
37723 bool force_clear_smbi = false;
37724 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
37725 temp = er32(ICRXDMTC);
37726 }
37727
37728 -static struct e1000_mac_operations e82571_mac_ops = {
37729 +static const struct e1000_mac_operations e82571_mac_ops = {
37730 /* .check_mng_mode: mac type dependent */
37731 /* .check_for_link: media type dependent */
37732 .id_led_init = e1000e_id_led_init,
37733 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
37734 .setup_led = e1000e_setup_led_generic,
37735 };
37736
37737 -static struct e1000_phy_operations e82_phy_ops_igp = {
37738 +static const struct e1000_phy_operations e82_phy_ops_igp = {
37739 .acquire_phy = e1000_get_hw_semaphore_82571,
37740 .check_reset_block = e1000e_check_reset_block_generic,
37741 .commit_phy = NULL,
37742 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
37743 .cfg_on_link_up = NULL,
37744 };
37745
37746 -static struct e1000_phy_operations e82_phy_ops_m88 = {
37747 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
37748 .acquire_phy = e1000_get_hw_semaphore_82571,
37749 .check_reset_block = e1000e_check_reset_block_generic,
37750 .commit_phy = e1000e_phy_sw_reset,
37751 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
37752 .cfg_on_link_up = NULL,
37753 };
37754
37755 -static struct e1000_phy_operations e82_phy_ops_bm = {
37756 +static const struct e1000_phy_operations e82_phy_ops_bm = {
37757 .acquire_phy = e1000_get_hw_semaphore_82571,
37758 .check_reset_block = e1000e_check_reset_block_generic,
37759 .commit_phy = e1000e_phy_sw_reset,
37760 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
37761 .cfg_on_link_up = NULL,
37762 };
37763
37764 -static struct e1000_nvm_operations e82571_nvm_ops = {
37765 +static const struct e1000_nvm_operations e82571_nvm_ops = {
37766 .acquire_nvm = e1000_acquire_nvm_82571,
37767 .read_nvm = e1000e_read_nvm_eerd,
37768 .release_nvm = e1000_release_nvm_82571,
37769 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
37770 index 47db9bd..fa58ccd 100644
37771 --- a/drivers/net/e1000e/e1000.h
37772 +++ b/drivers/net/e1000e/e1000.h
37773 @@ -375,9 +375,9 @@ struct e1000_info {
37774 u32 pba;
37775 u32 max_hw_frame_size;
37776 s32 (*get_variants)(struct e1000_adapter *);
37777 - struct e1000_mac_operations *mac_ops;
37778 - struct e1000_phy_operations *phy_ops;
37779 - struct e1000_nvm_operations *nvm_ops;
37780 + const struct e1000_mac_operations *mac_ops;
37781 + const struct e1000_phy_operations *phy_ops;
37782 + const struct e1000_nvm_operations *nvm_ops;
37783 };
37784
37785 /* hardware capability, feature, and workaround flags */
37786 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
37787 index ae5d736..e9a93a1 100644
37788 --- a/drivers/net/e1000e/es2lan.c
37789 +++ b/drivers/net/e1000e/es2lan.c
37790 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
37791 {
37792 struct e1000_hw *hw = &adapter->hw;
37793 struct e1000_mac_info *mac = &hw->mac;
37794 - struct e1000_mac_operations *func = &mac->ops;
37795 + e1000_mac_operations_no_const *func = &mac->ops;
37796
37797 /* Set media type */
37798 switch (adapter->pdev->device) {
37799 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
37800 temp = er32(ICRXDMTC);
37801 }
37802
37803 -static struct e1000_mac_operations es2_mac_ops = {
37804 +static const struct e1000_mac_operations es2_mac_ops = {
37805 .id_led_init = e1000e_id_led_init,
37806 .check_mng_mode = e1000e_check_mng_mode_generic,
37807 /* check_for_link dependent on media type */
37808 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
37809 .setup_led = e1000e_setup_led_generic,
37810 };
37811
37812 -static struct e1000_phy_operations es2_phy_ops = {
37813 +static const struct e1000_phy_operations es2_phy_ops = {
37814 .acquire_phy = e1000_acquire_phy_80003es2lan,
37815 .check_reset_block = e1000e_check_reset_block_generic,
37816 .commit_phy = e1000e_phy_sw_reset,
37817 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
37818 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
37819 };
37820
37821 -static struct e1000_nvm_operations es2_nvm_ops = {
37822 +static const struct e1000_nvm_operations es2_nvm_ops = {
37823 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
37824 .read_nvm = e1000e_read_nvm_eerd,
37825 .release_nvm = e1000_release_nvm_80003es2lan,
37826 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
37827 index 11f3b7c..6381887 100644
37828 --- a/drivers/net/e1000e/hw.h
37829 +++ b/drivers/net/e1000e/hw.h
37830 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
37831 s32 (*setup_physical_interface)(struct e1000_hw *);
37832 s32 (*setup_led)(struct e1000_hw *);
37833 };
37834 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37835
37836 /* Function pointers for the PHY. */
37837 struct e1000_phy_operations {
37838 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
37839 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
37840 s32 (*cfg_on_link_up)(struct e1000_hw *);
37841 };
37842 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37843
37844 /* Function pointers for the NVM. */
37845 struct e1000_nvm_operations {
37846 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
37847 s32 (*validate_nvm)(struct e1000_hw *);
37848 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
37849 };
37850 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
37851
37852 struct e1000_mac_info {
37853 - struct e1000_mac_operations ops;
37854 + e1000_mac_operations_no_const ops;
37855
37856 u8 addr[6];
37857 u8 perm_addr[6];
37858 @@ -823,7 +826,7 @@ struct e1000_mac_info {
37859 };
37860
37861 struct e1000_phy_info {
37862 - struct e1000_phy_operations ops;
37863 + e1000_phy_operations_no_const ops;
37864
37865 enum e1000_phy_type type;
37866
37867 @@ -857,7 +860,7 @@ struct e1000_phy_info {
37868 };
37869
37870 struct e1000_nvm_info {
37871 - struct e1000_nvm_operations ops;
37872 + e1000_nvm_operations_no_const ops;
37873
37874 enum e1000_nvm_type type;
37875 enum e1000_nvm_override override;
37876 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
37877 index de39f9a..e28d3e0 100644
37878 --- a/drivers/net/e1000e/ich8lan.c
37879 +++ b/drivers/net/e1000e/ich8lan.c
37880 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
37881 }
37882 }
37883
37884 -static struct e1000_mac_operations ich8_mac_ops = {
37885 +static const struct e1000_mac_operations ich8_mac_ops = {
37886 .id_led_init = e1000e_id_led_init,
37887 .check_mng_mode = e1000_check_mng_mode_ich8lan,
37888 .check_for_link = e1000_check_for_copper_link_ich8lan,
37889 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
37890 /* id_led_init dependent on mac type */
37891 };
37892
37893 -static struct e1000_phy_operations ich8_phy_ops = {
37894 +static const struct e1000_phy_operations ich8_phy_ops = {
37895 .acquire_phy = e1000_acquire_swflag_ich8lan,
37896 .check_reset_block = e1000_check_reset_block_ich8lan,
37897 .commit_phy = NULL,
37898 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
37899 .write_phy_reg = e1000e_write_phy_reg_igp,
37900 };
37901
37902 -static struct e1000_nvm_operations ich8_nvm_ops = {
37903 +static const struct e1000_nvm_operations ich8_nvm_ops = {
37904 .acquire_nvm = e1000_acquire_nvm_ich8lan,
37905 .read_nvm = e1000_read_nvm_ich8lan,
37906 .release_nvm = e1000_release_nvm_ich8lan,
37907 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
37908 index 18d5fbb..542d96d 100644
37909 --- a/drivers/net/fealnx.c
37910 +++ b/drivers/net/fealnx.c
37911 @@ -151,7 +151,7 @@ struct chip_info {
37912 int flags;
37913 };
37914
37915 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
37916 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
37917 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
37918 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
37919 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
37920 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
37921 index 0e5b54b..b503f82 100644
37922 --- a/drivers/net/hamradio/6pack.c
37923 +++ b/drivers/net/hamradio/6pack.c
37924 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
37925 unsigned char buf[512];
37926 int count1;
37927
37928 + pax_track_stack();
37929 +
37930 if (!count)
37931 return;
37932
37933 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
37934 index 5862282..7cce8cb 100644
37935 --- a/drivers/net/ibmveth.c
37936 +++ b/drivers/net/ibmveth.c
37937 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
37938 NULL,
37939 };
37940
37941 -static struct sysfs_ops veth_pool_ops = {
37942 +static const struct sysfs_ops veth_pool_ops = {
37943 .show = veth_pool_show,
37944 .store = veth_pool_store,
37945 };
37946 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
37947 index d617f2d..57b5309 100644
37948 --- a/drivers/net/igb/e1000_82575.c
37949 +++ b/drivers/net/igb/e1000_82575.c
37950 @@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
37951 wr32(E1000_VT_CTL, vt_ctl);
37952 }
37953
37954 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
37955 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
37956 .reset_hw = igb_reset_hw_82575,
37957 .init_hw = igb_init_hw_82575,
37958 .check_for_link = igb_check_for_link_82575,
37959 @@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
37960 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
37961 };
37962
37963 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
37964 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
37965 .acquire = igb_acquire_phy_82575,
37966 .get_cfg_done = igb_get_cfg_done_82575,
37967 .release = igb_release_phy_82575,
37968 };
37969
37970 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
37971 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
37972 .acquire = igb_acquire_nvm_82575,
37973 .read = igb_read_nvm_eerd,
37974 .release = igb_release_nvm_82575,
37975 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
37976 index 72081df..d855cf5 100644
37977 --- a/drivers/net/igb/e1000_hw.h
37978 +++ b/drivers/net/igb/e1000_hw.h
37979 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
37980 s32 (*read_mac_addr)(struct e1000_hw *);
37981 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
37982 };
37983 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37984
37985 struct e1000_phy_operations {
37986 s32 (*acquire)(struct e1000_hw *);
37987 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
37988 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
37989 s32 (*write_reg)(struct e1000_hw *, u32, u16);
37990 };
37991 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37992
37993 struct e1000_nvm_operations {
37994 s32 (*acquire)(struct e1000_hw *);
37995 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
37996 void (*release)(struct e1000_hw *);
37997 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
37998 };
37999 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38000
38001 struct e1000_info {
38002 s32 (*get_invariants)(struct e1000_hw *);
38003 @@ -321,7 +324,7 @@ struct e1000_info {
38004 extern const struct e1000_info e1000_82575_info;
38005
38006 struct e1000_mac_info {
38007 - struct e1000_mac_operations ops;
38008 + e1000_mac_operations_no_const ops;
38009
38010 u8 addr[6];
38011 u8 perm_addr[6];
38012 @@ -365,7 +368,7 @@ struct e1000_mac_info {
38013 };
38014
38015 struct e1000_phy_info {
38016 - struct e1000_phy_operations ops;
38017 + e1000_phy_operations_no_const ops;
38018
38019 enum e1000_phy_type type;
38020
38021 @@ -400,7 +403,7 @@ struct e1000_phy_info {
38022 };
38023
38024 struct e1000_nvm_info {
38025 - struct e1000_nvm_operations ops;
38026 + e1000_nvm_operations_no_const ops;
38027
38028 enum e1000_nvm_type type;
38029 enum e1000_nvm_override override;
38030 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38031 s32 (*check_for_ack)(struct e1000_hw *, u16);
38032 s32 (*check_for_rst)(struct e1000_hw *, u16);
38033 };
38034 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38035
38036 struct e1000_mbx_stats {
38037 u32 msgs_tx;
38038 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38039 };
38040
38041 struct e1000_mbx_info {
38042 - struct e1000_mbx_operations ops;
38043 + e1000_mbx_operations_no_const ops;
38044 struct e1000_mbx_stats stats;
38045 u32 timeout;
38046 u32 usec_delay;
38047 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38048 index 1e8ce37..549c453 100644
38049 --- a/drivers/net/igbvf/vf.h
38050 +++ b/drivers/net/igbvf/vf.h
38051 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
38052 s32 (*read_mac_addr)(struct e1000_hw *);
38053 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38054 };
38055 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38056
38057 struct e1000_mac_info {
38058 - struct e1000_mac_operations ops;
38059 + e1000_mac_operations_no_const ops;
38060 u8 addr[6];
38061 u8 perm_addr[6];
38062
38063 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38064 s32 (*check_for_ack)(struct e1000_hw *);
38065 s32 (*check_for_rst)(struct e1000_hw *);
38066 };
38067 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38068
38069 struct e1000_mbx_stats {
38070 u32 msgs_tx;
38071 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38072 };
38073
38074 struct e1000_mbx_info {
38075 - struct e1000_mbx_operations ops;
38076 + e1000_mbx_operations_no_const ops;
38077 struct e1000_mbx_stats stats;
38078 u32 timeout;
38079 u32 usec_delay;
38080 diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38081 index aa7286b..a61394f 100644
38082 --- a/drivers/net/iseries_veth.c
38083 +++ b/drivers/net/iseries_veth.c
38084 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38085 NULL
38086 };
38087
38088 -static struct sysfs_ops veth_cnx_sysfs_ops = {
38089 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
38090 .show = veth_cnx_attribute_show
38091 };
38092
38093 @@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38094 NULL
38095 };
38096
38097 -static struct sysfs_ops veth_port_sysfs_ops = {
38098 +static const struct sysfs_ops veth_port_sysfs_ops = {
38099 .show = veth_port_attribute_show
38100 };
38101
38102 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38103 index 8aa44dc..fa1e797 100644
38104 --- a/drivers/net/ixgb/ixgb_main.c
38105 +++ b/drivers/net/ixgb/ixgb_main.c
38106 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38107 u32 rctl;
38108 int i;
38109
38110 + pax_track_stack();
38111 +
38112 /* Check for Promiscuous and All Multicast modes */
38113
38114 rctl = IXGB_READ_REG(hw, RCTL);
38115 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38116 index af35e1d..8781785 100644
38117 --- a/drivers/net/ixgb/ixgb_param.c
38118 +++ b/drivers/net/ixgb/ixgb_param.c
38119 @@ -260,6 +260,9 @@ void __devinit
38120 ixgb_check_options(struct ixgb_adapter *adapter)
38121 {
38122 int bd = adapter->bd_number;
38123 +
38124 + pax_track_stack();
38125 +
38126 if (bd >= IXGB_MAX_NIC) {
38127 printk(KERN_NOTICE
38128 "Warning: no configuration for board #%i\n", bd);
38129 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38130 index b17aa73..ed74540 100644
38131 --- a/drivers/net/ixgbe/ixgbe_type.h
38132 +++ b/drivers/net/ixgbe/ixgbe_type.h
38133 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38134 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38135 s32 (*update_checksum)(struct ixgbe_hw *);
38136 };
38137 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38138
38139 struct ixgbe_mac_operations {
38140 s32 (*init_hw)(struct ixgbe_hw *);
38141 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38142 /* Flow Control */
38143 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38144 };
38145 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38146
38147 struct ixgbe_phy_operations {
38148 s32 (*identify)(struct ixgbe_hw *);
38149 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38150 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38151 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38152 };
38153 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38154
38155 struct ixgbe_eeprom_info {
38156 - struct ixgbe_eeprom_operations ops;
38157 + ixgbe_eeprom_operations_no_const ops;
38158 enum ixgbe_eeprom_type type;
38159 u32 semaphore_delay;
38160 u16 word_size;
38161 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38162 };
38163
38164 struct ixgbe_mac_info {
38165 - struct ixgbe_mac_operations ops;
38166 + ixgbe_mac_operations_no_const ops;
38167 enum ixgbe_mac_type type;
38168 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38169 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38170 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38171 };
38172
38173 struct ixgbe_phy_info {
38174 - struct ixgbe_phy_operations ops;
38175 + ixgbe_phy_operations_no_const ops;
38176 struct mdio_if_info mdio;
38177 enum ixgbe_phy_type type;
38178 u32 id;
38179 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38180 index 291a505..2543756 100644
38181 --- a/drivers/net/mlx4/main.c
38182 +++ b/drivers/net/mlx4/main.c
38183 @@ -38,6 +38,7 @@
38184 #include <linux/errno.h>
38185 #include <linux/pci.h>
38186 #include <linux/dma-mapping.h>
38187 +#include <linux/sched.h>
38188
38189 #include <linux/mlx4/device.h>
38190 #include <linux/mlx4/doorbell.h>
38191 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38192 u64 icm_size;
38193 int err;
38194
38195 + pax_track_stack();
38196 +
38197 err = mlx4_QUERY_FW(dev);
38198 if (err) {
38199 if (err == -EACCES)
38200 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38201 index 2dce134..fa5ce75 100644
38202 --- a/drivers/net/niu.c
38203 +++ b/drivers/net/niu.c
38204 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38205 int i, num_irqs, err;
38206 u8 first_ldg;
38207
38208 + pax_track_stack();
38209 +
38210 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38211 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38212 ldg_num_map[i] = first_ldg + i;
38213 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38214 index c1b3f09..97cd8c4 100644
38215 --- a/drivers/net/pcnet32.c
38216 +++ b/drivers/net/pcnet32.c
38217 @@ -79,7 +79,7 @@ static int cards_found;
38218 /*
38219 * VLB I/O addresses
38220 */
38221 -static unsigned int pcnet32_portlist[] __initdata =
38222 +static unsigned int pcnet32_portlist[] __devinitdata =
38223 { 0x300, 0x320, 0x340, 0x360, 0 };
38224
38225 static int pcnet32_debug = 0;
38226 @@ -267,7 +267,7 @@ struct pcnet32_private {
38227 struct sk_buff **rx_skbuff;
38228 dma_addr_t *tx_dma_addr;
38229 dma_addr_t *rx_dma_addr;
38230 - struct pcnet32_access a;
38231 + struct pcnet32_access *a;
38232 spinlock_t lock; /* Guard lock */
38233 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38234 unsigned int rx_ring_size; /* current rx ring size */
38235 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38236 u16 val;
38237
38238 netif_wake_queue(dev);
38239 - val = lp->a.read_csr(ioaddr, CSR3);
38240 + val = lp->a->read_csr(ioaddr, CSR3);
38241 val &= 0x00ff;
38242 - lp->a.write_csr(ioaddr, CSR3, val);
38243 + lp->a->write_csr(ioaddr, CSR3, val);
38244 napi_enable(&lp->napi);
38245 }
38246
38247 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38248 r = mii_link_ok(&lp->mii_if);
38249 } else if (lp->chip_version >= PCNET32_79C970A) {
38250 ulong ioaddr = dev->base_addr; /* card base I/O address */
38251 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38252 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38253 } else { /* can not detect link on really old chips */
38254 r = 1;
38255 }
38256 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38257 pcnet32_netif_stop(dev);
38258
38259 spin_lock_irqsave(&lp->lock, flags);
38260 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38261 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38262
38263 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38264
38265 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38266 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38267 {
38268 struct pcnet32_private *lp = netdev_priv(dev);
38269 - struct pcnet32_access *a = &lp->a; /* access to registers */
38270 + struct pcnet32_access *a = lp->a; /* access to registers */
38271 ulong ioaddr = dev->base_addr; /* card base I/O address */
38272 struct sk_buff *skb; /* sk buff */
38273 int x, i; /* counters */
38274 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38275 pcnet32_netif_stop(dev);
38276
38277 spin_lock_irqsave(&lp->lock, flags);
38278 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38279 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38280
38281 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38282
38283 /* Reset the PCNET32 */
38284 - lp->a.reset(ioaddr);
38285 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38286 + lp->a->reset(ioaddr);
38287 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38288
38289 /* switch pcnet32 to 32bit mode */
38290 - lp->a.write_bcr(ioaddr, 20, 2);
38291 + lp->a->write_bcr(ioaddr, 20, 2);
38292
38293 /* purge & init rings but don't actually restart */
38294 pcnet32_restart(dev, 0x0000);
38295
38296 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38297 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38298
38299 /* Initialize Transmit buffers. */
38300 size = data_len + 15;
38301 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38302
38303 /* set int loopback in CSR15 */
38304 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38305 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38306 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38307
38308 teststatus = cpu_to_le16(0x8000);
38309 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38310 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38311
38312 /* Check status of descriptors */
38313 for (x = 0; x < numbuffs; x++) {
38314 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38315 }
38316 }
38317
38318 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38319 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38320 wmb();
38321 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38322 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38323 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38324 pcnet32_restart(dev, CSR0_NORMAL);
38325 } else {
38326 pcnet32_purge_rx_ring(dev);
38327 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38328 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38329 }
38330 spin_unlock_irqrestore(&lp->lock, flags);
38331
38332 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38333 static void pcnet32_led_blink_callback(struct net_device *dev)
38334 {
38335 struct pcnet32_private *lp = netdev_priv(dev);
38336 - struct pcnet32_access *a = &lp->a;
38337 + struct pcnet32_access *a = lp->a;
38338 ulong ioaddr = dev->base_addr;
38339 unsigned long flags;
38340 int i;
38341 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38342 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38343 {
38344 struct pcnet32_private *lp = netdev_priv(dev);
38345 - struct pcnet32_access *a = &lp->a;
38346 + struct pcnet32_access *a = lp->a;
38347 ulong ioaddr = dev->base_addr;
38348 unsigned long flags;
38349 int i, regs[4];
38350 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38351 {
38352 int csr5;
38353 struct pcnet32_private *lp = netdev_priv(dev);
38354 - struct pcnet32_access *a = &lp->a;
38355 + struct pcnet32_access *a = lp->a;
38356 ulong ioaddr = dev->base_addr;
38357 int ticks;
38358
38359 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38360 spin_lock_irqsave(&lp->lock, flags);
38361 if (pcnet32_tx(dev)) {
38362 /* reset the chip to clear the error condition, then restart */
38363 - lp->a.reset(ioaddr);
38364 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38365 + lp->a->reset(ioaddr);
38366 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38367 pcnet32_restart(dev, CSR0_START);
38368 netif_wake_queue(dev);
38369 }
38370 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38371 __napi_complete(napi);
38372
38373 /* clear interrupt masks */
38374 - val = lp->a.read_csr(ioaddr, CSR3);
38375 + val = lp->a->read_csr(ioaddr, CSR3);
38376 val &= 0x00ff;
38377 - lp->a.write_csr(ioaddr, CSR3, val);
38378 + lp->a->write_csr(ioaddr, CSR3, val);
38379
38380 /* Set interrupt enable. */
38381 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38382 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38383
38384 spin_unlock_irqrestore(&lp->lock, flags);
38385 }
38386 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38387 int i, csr0;
38388 u16 *buff = ptr;
38389 struct pcnet32_private *lp = netdev_priv(dev);
38390 - struct pcnet32_access *a = &lp->a;
38391 + struct pcnet32_access *a = lp->a;
38392 ulong ioaddr = dev->base_addr;
38393 unsigned long flags;
38394
38395 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38396 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38397 if (lp->phymask & (1 << j)) {
38398 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38399 - lp->a.write_bcr(ioaddr, 33,
38400 + lp->a->write_bcr(ioaddr, 33,
38401 (j << 5) | i);
38402 - *buff++ = lp->a.read_bcr(ioaddr, 34);
38403 + *buff++ = lp->a->read_bcr(ioaddr, 34);
38404 }
38405 }
38406 }
38407 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38408 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38409 lp->options |= PCNET32_PORT_FD;
38410
38411 - lp->a = *a;
38412 + lp->a = a;
38413
38414 /* prior to register_netdev, dev->name is not yet correct */
38415 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38416 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38417 if (lp->mii) {
38418 /* lp->phycount and lp->phymask are set to 0 by memset above */
38419
38420 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38421 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38422 /* scan for PHYs */
38423 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38424 unsigned short id1, id2;
38425 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38426 "Found PHY %04x:%04x at address %d.\n",
38427 id1, id2, i);
38428 }
38429 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38430 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38431 if (lp->phycount > 1) {
38432 lp->options |= PCNET32_PORT_MII;
38433 }
38434 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38435 }
38436
38437 /* Reset the PCNET32 */
38438 - lp->a.reset(ioaddr);
38439 + lp->a->reset(ioaddr);
38440
38441 /* switch pcnet32 to 32bit mode */
38442 - lp->a.write_bcr(ioaddr, 20, 2);
38443 + lp->a->write_bcr(ioaddr, 20, 2);
38444
38445 if (netif_msg_ifup(lp))
38446 printk(KERN_DEBUG
38447 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38448 (u32) (lp->init_dma_addr));
38449
38450 /* set/reset autoselect bit */
38451 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
38452 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
38453 if (lp->options & PCNET32_PORT_ASEL)
38454 val |= 2;
38455 - lp->a.write_bcr(ioaddr, 2, val);
38456 + lp->a->write_bcr(ioaddr, 2, val);
38457
38458 /* handle full duplex setting */
38459 if (lp->mii_if.full_duplex) {
38460 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
38461 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
38462 if (lp->options & PCNET32_PORT_FD) {
38463 val |= 1;
38464 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38465 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38466 if (lp->chip_version == 0x2627)
38467 val |= 3;
38468 }
38469 - lp->a.write_bcr(ioaddr, 9, val);
38470 + lp->a->write_bcr(ioaddr, 9, val);
38471 }
38472
38473 /* set/reset GPSI bit in test register */
38474 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38475 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38476 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38477 val |= 0x10;
38478 - lp->a.write_csr(ioaddr, 124, val);
38479 + lp->a->write_csr(ioaddr, 124, val);
38480
38481 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38482 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38483 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38484 * duplex, and/or enable auto negotiation, and clear DANAS
38485 */
38486 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38487 - lp->a.write_bcr(ioaddr, 32,
38488 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
38489 + lp->a->write_bcr(ioaddr, 32,
38490 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
38491 /* disable Auto Negotiation, set 10Mpbs, HD */
38492 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38493 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38494 if (lp->options & PCNET32_PORT_FD)
38495 val |= 0x10;
38496 if (lp->options & PCNET32_PORT_100)
38497 val |= 0x08;
38498 - lp->a.write_bcr(ioaddr, 32, val);
38499 + lp->a->write_bcr(ioaddr, 32, val);
38500 } else {
38501 if (lp->options & PCNET32_PORT_ASEL) {
38502 - lp->a.write_bcr(ioaddr, 32,
38503 - lp->a.read_bcr(ioaddr,
38504 + lp->a->write_bcr(ioaddr, 32,
38505 + lp->a->read_bcr(ioaddr,
38506 32) | 0x0080);
38507 /* enable auto negotiate, setup, disable fd */
38508 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38509 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38510 val |= 0x20;
38511 - lp->a.write_bcr(ioaddr, 32, val);
38512 + lp->a->write_bcr(ioaddr, 32, val);
38513 }
38514 }
38515 } else {
38516 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38517 * There is really no good other way to handle multiple PHYs
38518 * other than turning off all automatics
38519 */
38520 - val = lp->a.read_bcr(ioaddr, 2);
38521 - lp->a.write_bcr(ioaddr, 2, val & ~2);
38522 - val = lp->a.read_bcr(ioaddr, 32);
38523 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38524 + val = lp->a->read_bcr(ioaddr, 2);
38525 + lp->a->write_bcr(ioaddr, 2, val & ~2);
38526 + val = lp->a->read_bcr(ioaddr, 32);
38527 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38528
38529 if (!(lp->options & PCNET32_PORT_ASEL)) {
38530 /* setup ecmd */
38531 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38532 ecmd.speed =
38533 lp->
38534 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38535 - bcr9 = lp->a.read_bcr(ioaddr, 9);
38536 + bcr9 = lp->a->read_bcr(ioaddr, 9);
38537
38538 if (lp->options & PCNET32_PORT_FD) {
38539 ecmd.duplex = DUPLEX_FULL;
38540 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38541 ecmd.duplex = DUPLEX_HALF;
38542 bcr9 |= ~(1 << 0);
38543 }
38544 - lp->a.write_bcr(ioaddr, 9, bcr9);
38545 + lp->a->write_bcr(ioaddr, 9, bcr9);
38546 }
38547
38548 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38549 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38550
38551 #ifdef DO_DXSUFLO
38552 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38553 - val = lp->a.read_csr(ioaddr, CSR3);
38554 + val = lp->a->read_csr(ioaddr, CSR3);
38555 val |= 0x40;
38556 - lp->a.write_csr(ioaddr, CSR3, val);
38557 + lp->a->write_csr(ioaddr, CSR3, val);
38558 }
38559 #endif
38560
38561 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38562 napi_enable(&lp->napi);
38563
38564 /* Re-initialize the PCNET32, and start it when done. */
38565 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38566 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38567 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38568 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38569
38570 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38571 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38572 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38573 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38574
38575 netif_start_queue(dev);
38576
38577 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38578
38579 i = 0;
38580 while (i++ < 100)
38581 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38582 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38583 break;
38584 /*
38585 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38586 * reports that doing so triggers a bug in the '974.
38587 */
38588 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38589 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38590
38591 if (netif_msg_ifup(lp))
38592 printk(KERN_DEBUG
38593 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38594 dev->name, i,
38595 (u32) (lp->init_dma_addr),
38596 - lp->a.read_csr(ioaddr, CSR0));
38597 + lp->a->read_csr(ioaddr, CSR0));
38598
38599 spin_unlock_irqrestore(&lp->lock, flags);
38600
38601 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38602 * Switch back to 16bit mode to avoid problems with dumb
38603 * DOS packet driver after a warm reboot
38604 */
38605 - lp->a.write_bcr(ioaddr, 20, 4);
38606 + lp->a->write_bcr(ioaddr, 20, 4);
38607
38608 err_free_irq:
38609 spin_unlock_irqrestore(&lp->lock, flags);
38610 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38611
38612 /* wait for stop */
38613 for (i = 0; i < 100; i++)
38614 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
38615 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
38616 break;
38617
38618 if (i >= 100 && netif_msg_drv(lp))
38619 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38620 return;
38621
38622 /* ReInit Ring */
38623 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38624 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38625 i = 0;
38626 while (i++ < 1000)
38627 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38628 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38629 break;
38630
38631 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
38632 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
38633 }
38634
38635 static void pcnet32_tx_timeout(struct net_device *dev)
38636 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
38637 if (pcnet32_debug & NETIF_MSG_DRV)
38638 printk(KERN_ERR
38639 "%s: transmit timed out, status %4.4x, resetting.\n",
38640 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38641 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38642 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38643 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38644 dev->stats.tx_errors++;
38645 if (netif_msg_tx_err(lp)) {
38646 int i;
38647 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38648 if (netif_msg_tx_queued(lp)) {
38649 printk(KERN_DEBUG
38650 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
38651 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38652 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38653 }
38654
38655 /* Default status -- will not enable Successful-TxDone
38656 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38657 dev->stats.tx_bytes += skb->len;
38658
38659 /* Trigger an immediate send poll. */
38660 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38661 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38662
38663 dev->trans_start = jiffies;
38664
38665 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
38666
38667 spin_lock(&lp->lock);
38668
38669 - csr0 = lp->a.read_csr(ioaddr, CSR0);
38670 + csr0 = lp->a->read_csr(ioaddr, CSR0);
38671 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
38672 if (csr0 == 0xffff) {
38673 break; /* PCMCIA remove happened */
38674 }
38675 /* Acknowledge all of the current interrupt sources ASAP. */
38676 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38677 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38678
38679 if (netif_msg_intr(lp))
38680 printk(KERN_DEBUG
38681 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
38682 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
38683 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
38684
38685 /* Log misc errors. */
38686 if (csr0 & 0x4000)
38687 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
38688 if (napi_schedule_prep(&lp->napi)) {
38689 u16 val;
38690 /* set interrupt masks */
38691 - val = lp->a.read_csr(ioaddr, CSR3);
38692 + val = lp->a->read_csr(ioaddr, CSR3);
38693 val |= 0x5f00;
38694 - lp->a.write_csr(ioaddr, CSR3, val);
38695 + lp->a->write_csr(ioaddr, CSR3, val);
38696
38697 __napi_schedule(&lp->napi);
38698 break;
38699 }
38700 - csr0 = lp->a.read_csr(ioaddr, CSR0);
38701 + csr0 = lp->a->read_csr(ioaddr, CSR0);
38702 }
38703
38704 if (netif_msg_intr(lp))
38705 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
38706 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38707 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38708
38709 spin_unlock(&lp->lock);
38710
38711 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
38712
38713 spin_lock_irqsave(&lp->lock, flags);
38714
38715 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38716 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38717
38718 if (netif_msg_ifdown(lp))
38719 printk(KERN_DEBUG
38720 "%s: Shutting down ethercard, status was %2.2x.\n",
38721 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38722 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38723
38724 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
38725 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38726 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38727
38728 /*
38729 * Switch back to 16bit mode to avoid problems with dumb
38730 * DOS packet driver after a warm reboot
38731 */
38732 - lp->a.write_bcr(ioaddr, 20, 4);
38733 + lp->a->write_bcr(ioaddr, 20, 4);
38734
38735 spin_unlock_irqrestore(&lp->lock, flags);
38736
38737 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
38738 unsigned long flags;
38739
38740 spin_lock_irqsave(&lp->lock, flags);
38741 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38742 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38743 spin_unlock_irqrestore(&lp->lock, flags);
38744
38745 return &dev->stats;
38746 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
38747 if (dev->flags & IFF_ALLMULTI) {
38748 ib->filter[0] = cpu_to_le32(~0U);
38749 ib->filter[1] = cpu_to_le32(~0U);
38750 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38751 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38752 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38753 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38754 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38755 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38756 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38757 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38758 return;
38759 }
38760 /* clear the multicast filter */
38761 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
38762 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
38763 }
38764 for (i = 0; i < 4; i++)
38765 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
38766 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
38767 le16_to_cpu(mcast_table[i]));
38768 return;
38769 }
38770 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38771
38772 spin_lock_irqsave(&lp->lock, flags);
38773 suspended = pcnet32_suspend(dev, &flags, 0);
38774 - csr15 = lp->a.read_csr(ioaddr, CSR15);
38775 + csr15 = lp->a->read_csr(ioaddr, CSR15);
38776 if (dev->flags & IFF_PROMISC) {
38777 /* Log any net taps. */
38778 if (netif_msg_hw(lp))
38779 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38780 lp->init_block->mode =
38781 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
38782 7);
38783 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
38784 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
38785 } else {
38786 lp->init_block->mode =
38787 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
38788 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38789 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38790 pcnet32_load_multicast(dev);
38791 }
38792
38793 if (suspended) {
38794 int csr5;
38795 /* clear SUSPEND (SPND) - CSR5 bit 0 */
38796 - csr5 = lp->a.read_csr(ioaddr, CSR5);
38797 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38798 + csr5 = lp->a->read_csr(ioaddr, CSR5);
38799 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38800 } else {
38801 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38802 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38803 pcnet32_restart(dev, CSR0_NORMAL);
38804 netif_wake_queue(dev);
38805 }
38806 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
38807 if (!lp->mii)
38808 return 0;
38809
38810 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38811 - val_out = lp->a.read_bcr(ioaddr, 34);
38812 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38813 + val_out = lp->a->read_bcr(ioaddr, 34);
38814
38815 return val_out;
38816 }
38817 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
38818 if (!lp->mii)
38819 return;
38820
38821 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38822 - lp->a.write_bcr(ioaddr, 34, val);
38823 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38824 + lp->a->write_bcr(ioaddr, 34, val);
38825 }
38826
38827 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38828 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38829 curr_link = mii_link_ok(&lp->mii_if);
38830 } else {
38831 ulong ioaddr = dev->base_addr; /* card base I/O address */
38832 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38833 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38834 }
38835 if (!curr_link) {
38836 if (prev_link || verbose) {
38837 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38838 (ecmd.duplex ==
38839 DUPLEX_FULL) ? "full" : "half");
38840 }
38841 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
38842 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
38843 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
38844 if (lp->mii_if.full_duplex)
38845 bcr9 |= (1 << 0);
38846 else
38847 bcr9 &= ~(1 << 0);
38848 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
38849 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
38850 }
38851 } else {
38852 if (netif_msg_link(lp))
38853 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
38854 index 7cc9898..6eb50d3 100644
38855 --- a/drivers/net/sis190.c
38856 +++ b/drivers/net/sis190.c
38857 @@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
38858 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
38859 struct net_device *dev)
38860 {
38861 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
38862 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
38863 struct sis190_private *tp = netdev_priv(dev);
38864 struct pci_dev *isa_bridge;
38865 u8 reg, tmp8;
38866 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
38867 index e13685a..60c948c 100644
38868 --- a/drivers/net/sundance.c
38869 +++ b/drivers/net/sundance.c
38870 @@ -225,7 +225,7 @@ enum {
38871 struct pci_id_info {
38872 const char *name;
38873 };
38874 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
38875 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
38876 {"D-Link DFE-550TX FAST Ethernet Adapter"},
38877 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
38878 {"D-Link DFE-580TX 4 port Server Adapter"},
38879 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
38880 index 529f55a..cccaa18 100644
38881 --- a/drivers/net/tg3.h
38882 +++ b/drivers/net/tg3.h
38883 @@ -95,6 +95,7 @@
38884 #define CHIPREV_ID_5750_A0 0x4000
38885 #define CHIPREV_ID_5750_A1 0x4001
38886 #define CHIPREV_ID_5750_A3 0x4003
38887 +#define CHIPREV_ID_5750_C1 0x4201
38888 #define CHIPREV_ID_5750_C2 0x4202
38889 #define CHIPREV_ID_5752_A0_HW 0x5000
38890 #define CHIPREV_ID_5752_A0 0x6000
38891 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
38892 index b9db1b5..720f9ce 100644
38893 --- a/drivers/net/tokenring/abyss.c
38894 +++ b/drivers/net/tokenring/abyss.c
38895 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
38896
38897 static int __init abyss_init (void)
38898 {
38899 - abyss_netdev_ops = tms380tr_netdev_ops;
38900 + pax_open_kernel();
38901 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38902
38903 - abyss_netdev_ops.ndo_open = abyss_open;
38904 - abyss_netdev_ops.ndo_stop = abyss_close;
38905 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
38906 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
38907 + pax_close_kernel();
38908
38909 return pci_register_driver(&abyss_driver);
38910 }
38911 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
38912 index 456f8bf..373e56d 100644
38913 --- a/drivers/net/tokenring/madgemc.c
38914 +++ b/drivers/net/tokenring/madgemc.c
38915 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
38916
38917 static int __init madgemc_init (void)
38918 {
38919 - madgemc_netdev_ops = tms380tr_netdev_ops;
38920 - madgemc_netdev_ops.ndo_open = madgemc_open;
38921 - madgemc_netdev_ops.ndo_stop = madgemc_close;
38922 + pax_open_kernel();
38923 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38924 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
38925 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
38926 + pax_close_kernel();
38927
38928 return mca_register_driver (&madgemc_driver);
38929 }
38930 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
38931 index 16e8783..925bd49 100644
38932 --- a/drivers/net/tokenring/proteon.c
38933 +++ b/drivers/net/tokenring/proteon.c
38934 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
38935 struct platform_device *pdev;
38936 int i, num = 0, err = 0;
38937
38938 - proteon_netdev_ops = tms380tr_netdev_ops;
38939 - proteon_netdev_ops.ndo_open = proteon_open;
38940 - proteon_netdev_ops.ndo_stop = tms380tr_close;
38941 + pax_open_kernel();
38942 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38943 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
38944 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
38945 + pax_close_kernel();
38946
38947 err = platform_driver_register(&proteon_driver);
38948 if (err)
38949 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
38950 index 46db5c5..37c1536 100644
38951 --- a/drivers/net/tokenring/skisa.c
38952 +++ b/drivers/net/tokenring/skisa.c
38953 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
38954 struct platform_device *pdev;
38955 int i, num = 0, err = 0;
38956
38957 - sk_isa_netdev_ops = tms380tr_netdev_ops;
38958 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
38959 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
38960 + pax_open_kernel();
38961 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
38962 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
38963 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
38964 + pax_close_kernel();
38965
38966 err = platform_driver_register(&sk_isa_driver);
38967 if (err)
38968 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
38969 index 74e5ba4..5cf6bc9 100644
38970 --- a/drivers/net/tulip/de2104x.c
38971 +++ b/drivers/net/tulip/de2104x.c
38972 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
38973 struct de_srom_info_leaf *il;
38974 void *bufp;
38975
38976 + pax_track_stack();
38977 +
38978 /* download entire eeprom */
38979 for (i = 0; i < DE_EEPROM_WORDS; i++)
38980 ((__le16 *)ee_data)[i] =
38981 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
38982 index a8349b7..90f9dfe 100644
38983 --- a/drivers/net/tulip/de4x5.c
38984 +++ b/drivers/net/tulip/de4x5.c
38985 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38986 for (i=0; i<ETH_ALEN; i++) {
38987 tmp.addr[i] = dev->dev_addr[i];
38988 }
38989 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38990 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38991 break;
38992
38993 case DE4X5_SET_HWADDR: /* Set the hardware address */
38994 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38995 spin_lock_irqsave(&lp->lock, flags);
38996 memcpy(&statbuf, &lp->pktStats, ioc->len);
38997 spin_unlock_irqrestore(&lp->lock, flags);
38998 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
38999 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39000 return -EFAULT;
39001 break;
39002 }
39003 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39004 index 391acd3..56d11cd 100644
39005 --- a/drivers/net/tulip/eeprom.c
39006 +++ b/drivers/net/tulip/eeprom.c
39007 @@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39008 {NULL}};
39009
39010
39011 -static const char *block_name[] __devinitdata = {
39012 +static const char *block_name[] __devinitconst = {
39013 "21140 non-MII",
39014 "21140 MII PHY",
39015 "21142 Serial PHY",
39016 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39017 index b38d3b7..b1cff23 100644
39018 --- a/drivers/net/tulip/winbond-840.c
39019 +++ b/drivers/net/tulip/winbond-840.c
39020 @@ -235,7 +235,7 @@ struct pci_id_info {
39021 int drv_flags; /* Driver use, intended as capability flags. */
39022 };
39023
39024 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39025 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39026 { /* Sometime a Level-One switch card. */
39027 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39028 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39029 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39030 index f450bc9..2b747c8 100644
39031 --- a/drivers/net/usb/hso.c
39032 +++ b/drivers/net/usb/hso.c
39033 @@ -71,7 +71,7 @@
39034 #include <asm/byteorder.h>
39035 #include <linux/serial_core.h>
39036 #include <linux/serial.h>
39037 -
39038 +#include <asm/local.h>
39039
39040 #define DRIVER_VERSION "1.2"
39041 #define MOD_AUTHOR "Option Wireless"
39042 @@ -258,7 +258,7 @@ struct hso_serial {
39043
39044 /* from usb_serial_port */
39045 struct tty_struct *tty;
39046 - int open_count;
39047 + local_t open_count;
39048 spinlock_t serial_lock;
39049
39050 int (*write_data) (struct hso_serial *serial);
39051 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39052 struct urb *urb;
39053
39054 urb = serial->rx_urb[0];
39055 - if (serial->open_count > 0) {
39056 + if (local_read(&serial->open_count) > 0) {
39057 count = put_rxbuf_data(urb, serial);
39058 if (count == -1)
39059 return;
39060 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39061 DUMP1(urb->transfer_buffer, urb->actual_length);
39062
39063 /* Anyone listening? */
39064 - if (serial->open_count == 0)
39065 + if (local_read(&serial->open_count) == 0)
39066 return;
39067
39068 if (status == 0) {
39069 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39070 spin_unlock_irq(&serial->serial_lock);
39071
39072 /* check for port already opened, if not set the termios */
39073 - serial->open_count++;
39074 - if (serial->open_count == 1) {
39075 + if (local_inc_return(&serial->open_count) == 1) {
39076 tty->low_latency = 1;
39077 serial->rx_state = RX_IDLE;
39078 /* Force default termio settings */
39079 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39080 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39081 if (result) {
39082 hso_stop_serial_device(serial->parent);
39083 - serial->open_count--;
39084 + local_dec(&serial->open_count);
39085 kref_put(&serial->parent->ref, hso_serial_ref_free);
39086 }
39087 } else {
39088 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39089
39090 /* reset the rts and dtr */
39091 /* do the actual close */
39092 - serial->open_count--;
39093 + local_dec(&serial->open_count);
39094
39095 - if (serial->open_count <= 0) {
39096 - serial->open_count = 0;
39097 + if (local_read(&serial->open_count) <= 0) {
39098 + local_set(&serial->open_count, 0);
39099 spin_lock_irq(&serial->serial_lock);
39100 if (serial->tty == tty) {
39101 serial->tty->driver_data = NULL;
39102 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39103
39104 /* the actual setup */
39105 spin_lock_irqsave(&serial->serial_lock, flags);
39106 - if (serial->open_count)
39107 + if (local_read(&serial->open_count))
39108 _hso_serial_set_termios(tty, old);
39109 else
39110 tty->termios = old;
39111 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39112 /* Start all serial ports */
39113 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39114 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39115 - if (dev2ser(serial_table[i])->open_count) {
39116 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
39117 result =
39118 hso_start_serial_device(serial_table[i], GFP_NOIO);
39119 hso_kick_transmit(dev2ser(serial_table[i]));
39120 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39121 index 3e94f0c..ffdd926 100644
39122 --- a/drivers/net/vxge/vxge-config.h
39123 +++ b/drivers/net/vxge/vxge-config.h
39124 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39125 void (*link_down)(struct __vxge_hw_device *devh);
39126 void (*crit_err)(struct __vxge_hw_device *devh,
39127 enum vxge_hw_event type, u64 ext_data);
39128 -};
39129 +} __no_const;
39130
39131 /*
39132 * struct __vxge_hw_blockpool_entry - Block private data structure
39133 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39134 index 068d7a9..35293de 100644
39135 --- a/drivers/net/vxge/vxge-main.c
39136 +++ b/drivers/net/vxge/vxge-main.c
39137 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39138 struct sk_buff *completed[NR_SKB_COMPLETED];
39139 int more;
39140
39141 + pax_track_stack();
39142 +
39143 do {
39144 more = 0;
39145 skb_ptr = completed;
39146 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39147 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39148 int index;
39149
39150 + pax_track_stack();
39151 +
39152 /*
39153 * Filling
39154 * - itable with bucket numbers
39155 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39156 index 461742b..81be42e 100644
39157 --- a/drivers/net/vxge/vxge-traffic.h
39158 +++ b/drivers/net/vxge/vxge-traffic.h
39159 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39160 struct vxge_hw_mempool_dma *dma_object,
39161 u32 index,
39162 u32 is_last);
39163 -};
39164 +} __no_const;
39165
39166 void
39167 __vxge_hw_mempool_destroy(
39168 diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39169 index cd8cb95..4153b79 100644
39170 --- a/drivers/net/wan/cycx_x25.c
39171 +++ b/drivers/net/wan/cycx_x25.c
39172 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39173 unsigned char hex[1024],
39174 * phex = hex;
39175
39176 + pax_track_stack();
39177 +
39178 if (len >= (sizeof(hex) / 2))
39179 len = (sizeof(hex) / 2) - 1;
39180
39181 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39182 index aa9248f..a4e3c3b 100644
39183 --- a/drivers/net/wan/hdlc_x25.c
39184 +++ b/drivers/net/wan/hdlc_x25.c
39185 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39186
39187 static int x25_open(struct net_device *dev)
39188 {
39189 - struct lapb_register_struct cb;
39190 + static struct lapb_register_struct cb = {
39191 + .connect_confirmation = x25_connected,
39192 + .connect_indication = x25_connected,
39193 + .disconnect_confirmation = x25_disconnected,
39194 + .disconnect_indication = x25_disconnected,
39195 + .data_indication = x25_data_indication,
39196 + .data_transmit = x25_data_transmit
39197 + };
39198 int result;
39199
39200 - cb.connect_confirmation = x25_connected;
39201 - cb.connect_indication = x25_connected;
39202 - cb.disconnect_confirmation = x25_disconnected;
39203 - cb.disconnect_indication = x25_disconnected;
39204 - cb.data_indication = x25_data_indication;
39205 - cb.data_transmit = x25_data_transmit;
39206 -
39207 result = lapb_register(dev, &cb);
39208 if (result != LAPB_OK)
39209 return result;
39210 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39211 index 5ad287c..783b020 100644
39212 --- a/drivers/net/wimax/i2400m/usb-fw.c
39213 +++ b/drivers/net/wimax/i2400m/usb-fw.c
39214 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39215 int do_autopm = 1;
39216 DECLARE_COMPLETION_ONSTACK(notif_completion);
39217
39218 + pax_track_stack();
39219 +
39220 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39221 i2400m, ack, ack_size);
39222 BUG_ON(_ack == i2400m->bm_ack_buf);
39223 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39224 index 6c26840..62c97c3 100644
39225 --- a/drivers/net/wireless/airo.c
39226 +++ b/drivers/net/wireless/airo.c
39227 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39228 BSSListElement * loop_net;
39229 BSSListElement * tmp_net;
39230
39231 + pax_track_stack();
39232 +
39233 /* Blow away current list of scan results */
39234 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39235 list_move_tail (&loop_net->list, &ai->network_free_list);
39236 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39237 WepKeyRid wkr;
39238 int rc;
39239
39240 + pax_track_stack();
39241 +
39242 memset( &mySsid, 0, sizeof( mySsid ) );
39243 kfree (ai->flash);
39244 ai->flash = NULL;
39245 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39246 __le32 *vals = stats.vals;
39247 int len;
39248
39249 + pax_track_stack();
39250 +
39251 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39252 return -ENOMEM;
39253 data = (struct proc_data *)file->private_data;
39254 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39255 /* If doLoseSync is not 1, we won't do a Lose Sync */
39256 int doLoseSync = -1;
39257
39258 + pax_track_stack();
39259 +
39260 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39261 return -ENOMEM;
39262 data = (struct proc_data *)file->private_data;
39263 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39264 int i;
39265 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39266
39267 + pax_track_stack();
39268 +
39269 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39270 if (!qual)
39271 return -ENOMEM;
39272 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39273 CapabilityRid cap_rid;
39274 __le32 *vals = stats_rid.vals;
39275
39276 + pax_track_stack();
39277 +
39278 /* Get stats out of the card */
39279 clear_bit(JOB_WSTATS, &local->jobs);
39280 if (local->power.event) {
39281 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39282 index 747508c..82e965d 100644
39283 --- a/drivers/net/wireless/ath/ath5k/debug.c
39284 +++ b/drivers/net/wireless/ath/ath5k/debug.c
39285 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39286 unsigned int v;
39287 u64 tsf;
39288
39289 + pax_track_stack();
39290 +
39291 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39292 len += snprintf(buf+len, sizeof(buf)-len,
39293 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39294 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39295 unsigned int len = 0;
39296 unsigned int i;
39297
39298 + pax_track_stack();
39299 +
39300 len += snprintf(buf+len, sizeof(buf)-len,
39301 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39302
39303 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39304 index 2be4c22..593b1eb 100644
39305 --- a/drivers/net/wireless/ath/ath9k/debug.c
39306 +++ b/drivers/net/wireless/ath/ath9k/debug.c
39307 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39308 char buf[512];
39309 unsigned int len = 0;
39310
39311 + pax_track_stack();
39312 +
39313 len += snprintf(buf + len, sizeof(buf) - len,
39314 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39315 len += snprintf(buf + len, sizeof(buf) - len,
39316 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39317 int i;
39318 u8 addr[ETH_ALEN];
39319
39320 + pax_track_stack();
39321 +
39322 len += snprintf(buf + len, sizeof(buf) - len,
39323 "primary: %s (%s chan=%d ht=%d)\n",
39324 wiphy_name(sc->pri_wiphy->hw->wiphy),
39325 diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39326 index 80b19a4..dab3a45 100644
39327 --- a/drivers/net/wireless/b43/debugfs.c
39328 +++ b/drivers/net/wireless/b43/debugfs.c
39329 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
39330 struct b43_debugfs_fops {
39331 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39332 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39333 - struct file_operations fops;
39334 + const struct file_operations fops;
39335 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39336 size_t file_struct_offset;
39337 };
39338 diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39339 index 1f85ac5..c99b4b4 100644
39340 --- a/drivers/net/wireless/b43legacy/debugfs.c
39341 +++ b/drivers/net/wireless/b43legacy/debugfs.c
39342 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
39343 struct b43legacy_debugfs_fops {
39344 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39345 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39346 - struct file_operations fops;
39347 + const struct file_operations fops;
39348 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39349 size_t file_struct_offset;
39350 /* Take wl->irq_lock before calling read/write? */
39351 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39352 index 43102bf..3b569c3 100644
39353 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
39354 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39355 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39356 int err;
39357 DECLARE_SSID_BUF(ssid);
39358
39359 + pax_track_stack();
39360 +
39361 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39362
39363 if (ssid_len)
39364 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39365 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39366 int err;
39367
39368 + pax_track_stack();
39369 +
39370 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39371 idx, keylen, len);
39372
39373 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39374 index 282b1f7..169f0cf 100644
39375 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39376 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39377 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39378 unsigned long flags;
39379 DECLARE_SSID_BUF(ssid);
39380
39381 + pax_track_stack();
39382 +
39383 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39384 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39385 print_ssid(ssid, info_element->data, info_element->len),
39386 diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39387 index 950267a..80d5fd2 100644
39388 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39389 +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39390 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39391 },
39392 };
39393
39394 -static struct iwl_ops iwl1000_ops = {
39395 +static const struct iwl_ops iwl1000_ops = {
39396 .ucode = &iwl5000_ucode,
39397 .lib = &iwl1000_lib,
39398 .hcmd = &iwl5000_hcmd,
39399 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39400 index 56bfcc3..b348020 100644
39401 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39402 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39403 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39404 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39405 };
39406
39407 -static struct iwl_ops iwl3945_ops = {
39408 +static const struct iwl_ops iwl3945_ops = {
39409 .ucode = &iwl3945_ucode,
39410 .lib = &iwl3945_lib,
39411 .hcmd = &iwl3945_hcmd,
39412 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39413 index 585b8d4..e142963 100644
39414 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39415 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39416 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39417 },
39418 };
39419
39420 -static struct iwl_ops iwl4965_ops = {
39421 +static const struct iwl_ops iwl4965_ops = {
39422 .ucode = &iwl4965_ucode,
39423 .lib = &iwl4965_lib,
39424 .hcmd = &iwl4965_hcmd,
39425 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39426 index 1f423f2..e37c192 100644
39427 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39428 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39429 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39430 },
39431 };
39432
39433 -struct iwl_ops iwl5000_ops = {
39434 +const struct iwl_ops iwl5000_ops = {
39435 .ucode = &iwl5000_ucode,
39436 .lib = &iwl5000_lib,
39437 .hcmd = &iwl5000_hcmd,
39438 .utils = &iwl5000_hcmd_utils,
39439 };
39440
39441 -static struct iwl_ops iwl5150_ops = {
39442 +static const struct iwl_ops iwl5150_ops = {
39443 .ucode = &iwl5000_ucode,
39444 .lib = &iwl5150_lib,
39445 .hcmd = &iwl5000_hcmd,
39446 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39447 index 1473452..f07d5e1 100644
39448 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39449 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39450 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39451 .calc_rssi = iwl5000_calc_rssi,
39452 };
39453
39454 -static struct iwl_ops iwl6000_ops = {
39455 +static const struct iwl_ops iwl6000_ops = {
39456 .ucode = &iwl5000_ucode,
39457 .lib = &iwl6000_lib,
39458 .hcmd = &iwl5000_hcmd,
39459 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39460 index 1a3dfa2..b3e0a61 100644
39461 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39462 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39463 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39464 u8 active_index = 0;
39465 s32 tpt = 0;
39466
39467 + pax_track_stack();
39468 +
39469 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39470
39471 if (!ieee80211_is_data(hdr->frame_control) ||
39472 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39473 u8 valid_tx_ant = 0;
39474 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39475
39476 + pax_track_stack();
39477 +
39478 /* Override starting rate (index 0) if needed for debug purposes */
39479 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39480
39481 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39482 index 0e56d78..6a3c107 100644
39483 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39484 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39485 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39486 if (iwl_debug_level & IWL_DL_INFO)
39487 dev_printk(KERN_DEBUG, &(pdev->dev),
39488 "Disabling hw_scan\n");
39489 - iwl_hw_ops.hw_scan = NULL;
39490 + pax_open_kernel();
39491 + *(void **)&iwl_hw_ops.hw_scan = NULL;
39492 + pax_close_kernel();
39493 }
39494
39495 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39496 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39497 index cbc6290..eb323d7 100644
39498 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39499 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39500 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39501 #endif
39502
39503 #else
39504 -#define IWL_DEBUG(__priv, level, fmt, args...)
39505 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39506 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39507 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39508 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39509 void *p, u32 len)
39510 {}
39511 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39512 index a198bcf..8e68233 100644
39513 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39514 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39515 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39516 int pos = 0;
39517 const size_t bufsz = sizeof(buf);
39518
39519 + pax_track_stack();
39520 +
39521 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39522 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39523 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39524 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39525 const size_t bufsz = sizeof(buf);
39526 ssize_t ret;
39527
39528 + pax_track_stack();
39529 +
39530 for (i = 0; i < AC_NUM; i++) {
39531 pos += scnprintf(buf + pos, bufsz - pos,
39532 "\tcw_min\tcw_max\taifsn\ttxop\n");
39533 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39534 index 3539ea4..b174bfa 100644
39535 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39536 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39537 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
39538
39539 /* shared structures from iwl-5000.c */
39540 extern struct iwl_mod_params iwl50_mod_params;
39541 -extern struct iwl_ops iwl5000_ops;
39542 +extern const struct iwl_ops iwl5000_ops;
39543 extern struct iwl_ucode_ops iwl5000_ucode;
39544 extern struct iwl_lib_ops iwl5000_lib;
39545 extern struct iwl_hcmd_ops iwl5000_hcmd;
39546 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39547 index 619590d..69235ee 100644
39548 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39549 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39550 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39551 */
39552 if (iwl3945_mod_params.disable_hw_scan) {
39553 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39554 - iwl3945_hw_ops.hw_scan = NULL;
39555 + pax_open_kernel();
39556 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39557 + pax_close_kernel();
39558 }
39559
39560
39561 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39562 index 1465379..fe4d78b 100644
39563 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39564 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39565 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39566 int buf_len = 512;
39567 size_t len = 0;
39568
39569 + pax_track_stack();
39570 +
39571 if (*ppos != 0)
39572 return 0;
39573 if (count < sizeof(buf))
39574 diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39575 index 893a55c..7f66a50 100644
39576 --- a/drivers/net/wireless/libertas/debugfs.c
39577 +++ b/drivers/net/wireless/libertas/debugfs.c
39578 @@ -708,7 +708,7 @@ out_unlock:
39579 struct lbs_debugfs_files {
39580 const char *name;
39581 int perm;
39582 - struct file_operations fops;
39583 + const struct file_operations fops;
39584 };
39585
39586 static const struct lbs_debugfs_files debugfs_files[] = {
39587 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39588 index 2ecbedb..42704f0 100644
39589 --- a/drivers/net/wireless/rndis_wlan.c
39590 +++ b/drivers/net/wireless/rndis_wlan.c
39591 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39592
39593 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39594
39595 - if (rts_threshold < 0 || rts_threshold > 2347)
39596 + if (rts_threshold > 2347)
39597 rts_threshold = 2347;
39598
39599 tmp = cpu_to_le32(rts_threshold);
39600 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39601 index 334ccd6..47f8944 100644
39602 --- a/drivers/oprofile/buffer_sync.c
39603 +++ b/drivers/oprofile/buffer_sync.c
39604 @@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39605 if (cookie == NO_COOKIE)
39606 offset = pc;
39607 if (cookie == INVALID_COOKIE) {
39608 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39609 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39610 offset = pc;
39611 }
39612 if (cookie != last_cookie) {
39613 @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39614 /* add userspace sample */
39615
39616 if (!mm) {
39617 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
39618 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39619 return 0;
39620 }
39621
39622 cookie = lookup_dcookie(mm, s->eip, &offset);
39623
39624 if (cookie == INVALID_COOKIE) {
39625 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39626 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39627 return 0;
39628 }
39629
39630 @@ -562,7 +562,7 @@ void sync_buffer(int cpu)
39631 /* ignore backtraces if failed to add a sample */
39632 if (state == sb_bt_start) {
39633 state = sb_bt_ignore;
39634 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39635 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39636 }
39637 }
39638 release_mm(mm);
39639 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39640 index 5df60a6..72f5c1c 100644
39641 --- a/drivers/oprofile/event_buffer.c
39642 +++ b/drivers/oprofile/event_buffer.c
39643 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39644 }
39645
39646 if (buffer_pos == buffer_size) {
39647 - atomic_inc(&oprofile_stats.event_lost_overflow);
39648 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39649 return;
39650 }
39651
39652 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39653 index dc8a042..fe5f315 100644
39654 --- a/drivers/oprofile/oprof.c
39655 +++ b/drivers/oprofile/oprof.c
39656 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39657 if (oprofile_ops.switch_events())
39658 return;
39659
39660 - atomic_inc(&oprofile_stats.multiplex_counter);
39661 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39662 start_switch_worker();
39663 }
39664
39665 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39666 index 61689e8..387f7f8 100644
39667 --- a/drivers/oprofile/oprofile_stats.c
39668 +++ b/drivers/oprofile/oprofile_stats.c
39669 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39670 cpu_buf->sample_invalid_eip = 0;
39671 }
39672
39673 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39674 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39675 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
39676 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39677 - atomic_set(&oprofile_stats.multiplex_counter, 0);
39678 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39679 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39680 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39681 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39682 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39683 }
39684
39685
39686 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39687 index 0b54e46..a37c527 100644
39688 --- a/drivers/oprofile/oprofile_stats.h
39689 +++ b/drivers/oprofile/oprofile_stats.h
39690 @@ -13,11 +13,11 @@
39691 #include <asm/atomic.h>
39692
39693 struct oprofile_stat_struct {
39694 - atomic_t sample_lost_no_mm;
39695 - atomic_t sample_lost_no_mapping;
39696 - atomic_t bt_lost_no_mapping;
39697 - atomic_t event_lost_overflow;
39698 - atomic_t multiplex_counter;
39699 + atomic_unchecked_t sample_lost_no_mm;
39700 + atomic_unchecked_t sample_lost_no_mapping;
39701 + atomic_unchecked_t bt_lost_no_mapping;
39702 + atomic_unchecked_t event_lost_overflow;
39703 + atomic_unchecked_t multiplex_counter;
39704 };
39705
39706 extern struct oprofile_stat_struct oprofile_stats;
39707 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
39708 index 2766a6d..80c77e2 100644
39709 --- a/drivers/oprofile/oprofilefs.c
39710 +++ b/drivers/oprofile/oprofilefs.c
39711 @@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
39712
39713
39714 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
39715 - char const *name, atomic_t *val)
39716 + char const *name, atomic_unchecked_t *val)
39717 {
39718 struct dentry *d = __oprofilefs_create_file(sb, root, name,
39719 &atomic_ro_fops, 0444);
39720 diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
39721 index 13a64bc..ad62835 100644
39722 --- a/drivers/parisc/pdc_stable.c
39723 +++ b/drivers/parisc/pdc_stable.c
39724 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
39725 return ret;
39726 }
39727
39728 -static struct sysfs_ops pdcspath_attr_ops = {
39729 +static const struct sysfs_ops pdcspath_attr_ops = {
39730 .show = pdcspath_attr_show,
39731 .store = pdcspath_attr_store,
39732 };
39733 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
39734 index 8eefe56..40751a7 100644
39735 --- a/drivers/parport/procfs.c
39736 +++ b/drivers/parport/procfs.c
39737 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
39738
39739 *ppos += len;
39740
39741 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
39742 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
39743 }
39744
39745 #ifdef CONFIG_PARPORT_1284
39746 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
39747
39748 *ppos += len;
39749
39750 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
39751 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
39752 }
39753 #endif /* IEEE1284.3 support. */
39754
39755 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
39756 index 73e7d8e..c80f3d2 100644
39757 --- a/drivers/pci/hotplug/acpiphp_glue.c
39758 +++ b/drivers/pci/hotplug/acpiphp_glue.c
39759 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
39760 }
39761
39762
39763 -static struct acpi_dock_ops acpiphp_dock_ops = {
39764 +static const struct acpi_dock_ops acpiphp_dock_ops = {
39765 .handler = handle_hotplug_event_func,
39766 };
39767
39768 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
39769 index 9fff878..ad0ad53 100644
39770 --- a/drivers/pci/hotplug/cpci_hotplug.h
39771 +++ b/drivers/pci/hotplug/cpci_hotplug.h
39772 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
39773 int (*hardware_test) (struct slot* slot, u32 value);
39774 u8 (*get_power) (struct slot* slot);
39775 int (*set_power) (struct slot* slot, int value);
39776 -};
39777 +} __no_const;
39778
39779 struct cpci_hp_controller {
39780 unsigned int irq;
39781 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
39782 index 76ba8a1..20ca857 100644
39783 --- a/drivers/pci/hotplug/cpqphp_nvram.c
39784 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
39785 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
39786
39787 void compaq_nvram_init (void __iomem *rom_start)
39788 {
39789 +
39790 +#ifndef CONFIG_PAX_KERNEXEC
39791 if (rom_start) {
39792 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
39793 }
39794 +#endif
39795 +
39796 dbg("int15 entry = %p\n", compaq_int15_entry_point);
39797
39798 /* initialize our int15 lock */
39799 diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
39800 index 6151389..0a894ef 100644
39801 --- a/drivers/pci/hotplug/fakephp.c
39802 +++ b/drivers/pci/hotplug/fakephp.c
39803 @@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
39804 }
39805
39806 static struct kobj_type legacy_ktype = {
39807 - .sysfs_ops = &(struct sysfs_ops){
39808 + .sysfs_ops = &(const struct sysfs_ops){
39809 .store = legacy_store, .show = legacy_show
39810 },
39811 .release = &legacy_release,
39812 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
39813 index 5b680df..fe05b7e 100644
39814 --- a/drivers/pci/intel-iommu.c
39815 +++ b/drivers/pci/intel-iommu.c
39816 @@ -2643,7 +2643,7 @@ error:
39817 return 0;
39818 }
39819
39820 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
39821 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
39822 unsigned long offset, size_t size,
39823 enum dma_data_direction dir,
39824 struct dma_attrs *attrs)
39825 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
39826 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
39827 }
39828
39829 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39830 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39831 size_t size, enum dma_data_direction dir,
39832 struct dma_attrs *attrs)
39833 {
39834 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39835 }
39836 }
39837
39838 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39839 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
39840 dma_addr_t *dma_handle, gfp_t flags)
39841 {
39842 void *vaddr;
39843 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39844 return NULL;
39845 }
39846
39847 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39848 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39849 dma_addr_t dma_handle)
39850 {
39851 int order;
39852 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39853 free_pages((unsigned long)vaddr, order);
39854 }
39855
39856 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39857 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39858 int nelems, enum dma_data_direction dir,
39859 struct dma_attrs *attrs)
39860 {
39861 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
39862 return nelems;
39863 }
39864
39865 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
39866 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
39867 enum dma_data_direction dir, struct dma_attrs *attrs)
39868 {
39869 int i;
39870 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
39871 return nelems;
39872 }
39873
39874 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
39875 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
39876 {
39877 return !dma_addr;
39878 }
39879
39880 -struct dma_map_ops intel_dma_ops = {
39881 +const struct dma_map_ops intel_dma_ops = {
39882 .alloc_coherent = intel_alloc_coherent,
39883 .free_coherent = intel_free_coherent,
39884 .map_sg = intel_map_sg,
39885 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
39886 index 5b7056c..607bc94 100644
39887 --- a/drivers/pci/pcie/aspm.c
39888 +++ b/drivers/pci/pcie/aspm.c
39889 @@ -27,9 +27,9 @@
39890 #define MODULE_PARAM_PREFIX "pcie_aspm."
39891
39892 /* Note: those are not register definitions */
39893 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
39894 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
39895 -#define ASPM_STATE_L1 (4) /* L1 state */
39896 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
39897 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
39898 +#define ASPM_STATE_L1 (4U) /* L1 state */
39899 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
39900 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
39901
39902 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
39903 index 8105e32..ca10419 100644
39904 --- a/drivers/pci/probe.c
39905 +++ b/drivers/pci/probe.c
39906 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
39907 return ret;
39908 }
39909
39910 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
39911 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
39912 struct device_attribute *attr,
39913 char *buf)
39914 {
39915 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
39916 }
39917
39918 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
39919 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
39920 struct device_attribute *attr,
39921 char *buf)
39922 {
39923 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
39924 index a03ad8c..024b0da 100644
39925 --- a/drivers/pci/proc.c
39926 +++ b/drivers/pci/proc.c
39927 @@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
39928 static int __init pci_proc_init(void)
39929 {
39930 struct pci_dev *dev = NULL;
39931 +
39932 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
39933 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39934 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
39935 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39936 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
39937 +#endif
39938 +#else
39939 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
39940 +#endif
39941 proc_create("devices", 0, proc_bus_pci_dir,
39942 &proc_bus_pci_dev_operations);
39943 proc_initialized = 1;
39944 diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
39945 index 8c02b6c..5584d8e 100644
39946 --- a/drivers/pci/slot.c
39947 +++ b/drivers/pci/slot.c
39948 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
39949 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
39950 }
39951
39952 -static struct sysfs_ops pci_slot_sysfs_ops = {
39953 +static const struct sysfs_ops pci_slot_sysfs_ops = {
39954 .show = pci_slot_attr_show,
39955 .store = pci_slot_attr_store,
39956 };
39957 diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
39958 index 30cf71d2..50938f1 100644
39959 --- a/drivers/pcmcia/pcmcia_ioctl.c
39960 +++ b/drivers/pcmcia/pcmcia_ioctl.c
39961 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
39962 return -EFAULT;
39963 }
39964 }
39965 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
39966 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
39967 if (!buf)
39968 return -ENOMEM;
39969
39970 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
39971 index 52183c4..b224c69 100644
39972 --- a/drivers/platform/x86/acer-wmi.c
39973 +++ b/drivers/platform/x86/acer-wmi.c
39974 @@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
39975 return 0;
39976 }
39977
39978 -static struct backlight_ops acer_bl_ops = {
39979 +static const struct backlight_ops acer_bl_ops = {
39980 .get_brightness = read_brightness,
39981 .update_status = update_bl_status,
39982 };
39983 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
39984 index 767cb61..a87380b 100644
39985 --- a/drivers/platform/x86/asus-laptop.c
39986 +++ b/drivers/platform/x86/asus-laptop.c
39987 @@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
39988 */
39989 static int read_brightness(struct backlight_device *bd);
39990 static int update_bl_status(struct backlight_device *bd);
39991 -static struct backlight_ops asusbl_ops = {
39992 +static const struct backlight_ops asusbl_ops = {
39993 .get_brightness = read_brightness,
39994 .update_status = update_bl_status,
39995 };
39996 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
39997 index d66c07a..a4abaac 100644
39998 --- a/drivers/platform/x86/asus_acpi.c
39999 +++ b/drivers/platform/x86/asus_acpi.c
40000 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40001 return 0;
40002 }
40003
40004 -static struct backlight_ops asus_backlight_data = {
40005 +static const struct backlight_ops asus_backlight_data = {
40006 .get_brightness = read_brightness,
40007 .update_status = set_brightness_status,
40008 };
40009 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40010 index 11003bb..550ff1b 100644
40011 --- a/drivers/platform/x86/compal-laptop.c
40012 +++ b/drivers/platform/x86/compal-laptop.c
40013 @@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40014 return set_lcd_level(b->props.brightness);
40015 }
40016
40017 -static struct backlight_ops compalbl_ops = {
40018 +static const struct backlight_ops compalbl_ops = {
40019 .get_brightness = bl_get_brightness,
40020 .update_status = bl_update_status,
40021 };
40022 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40023 index 07a74da..9dc99fa 100644
40024 --- a/drivers/platform/x86/dell-laptop.c
40025 +++ b/drivers/platform/x86/dell-laptop.c
40026 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40027 return buffer.output[1];
40028 }
40029
40030 -static struct backlight_ops dell_ops = {
40031 +static const struct backlight_ops dell_ops = {
40032 .get_brightness = dell_get_intensity,
40033 .update_status = dell_send_intensity,
40034 };
40035 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40036 index c533b1c..5c81f22 100644
40037 --- a/drivers/platform/x86/eeepc-laptop.c
40038 +++ b/drivers/platform/x86/eeepc-laptop.c
40039 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40040 */
40041 static int read_brightness(struct backlight_device *bd);
40042 static int update_bl_status(struct backlight_device *bd);
40043 -static struct backlight_ops eeepcbl_ops = {
40044 +static const struct backlight_ops eeepcbl_ops = {
40045 .get_brightness = read_brightness,
40046 .update_status = update_bl_status,
40047 };
40048 diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40049 index bcd4ba8..a249b35 100644
40050 --- a/drivers/platform/x86/fujitsu-laptop.c
40051 +++ b/drivers/platform/x86/fujitsu-laptop.c
40052 @@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40053 return ret;
40054 }
40055
40056 -static struct backlight_ops fujitsubl_ops = {
40057 +static const struct backlight_ops fujitsubl_ops = {
40058 .get_brightness = bl_get_brightness,
40059 .update_status = bl_update_status,
40060 };
40061 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40062 index 759763d..1093ba2 100644
40063 --- a/drivers/platform/x86/msi-laptop.c
40064 +++ b/drivers/platform/x86/msi-laptop.c
40065 @@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40066 return set_lcd_level(b->props.brightness);
40067 }
40068
40069 -static struct backlight_ops msibl_ops = {
40070 +static const struct backlight_ops msibl_ops = {
40071 .get_brightness = bl_get_brightness,
40072 .update_status = bl_update_status,
40073 };
40074 diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40075 index fe7cf01..9012d8d 100644
40076 --- a/drivers/platform/x86/panasonic-laptop.c
40077 +++ b/drivers/platform/x86/panasonic-laptop.c
40078 @@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40079 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40080 }
40081
40082 -static struct backlight_ops pcc_backlight_ops = {
40083 +static const struct backlight_ops pcc_backlight_ops = {
40084 .get_brightness = bl_get,
40085 .update_status = bl_set_status,
40086 };
40087 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40088 index a2a742c..b37e25e 100644
40089 --- a/drivers/platform/x86/sony-laptop.c
40090 +++ b/drivers/platform/x86/sony-laptop.c
40091 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40092 }
40093
40094 static struct backlight_device *sony_backlight_device;
40095 -static struct backlight_ops sony_backlight_ops = {
40096 +static const struct backlight_ops sony_backlight_ops = {
40097 .update_status = sony_backlight_update_status,
40098 .get_brightness = sony_backlight_get_brightness,
40099 };
40100 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40101 index 68271ae..5e8fb10 100644
40102 --- a/drivers/platform/x86/thinkpad_acpi.c
40103 +++ b/drivers/platform/x86/thinkpad_acpi.c
40104 @@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40105 return 0;
40106 }
40107
40108 -void static hotkey_mask_warn_incomplete_mask(void)
40109 +static void hotkey_mask_warn_incomplete_mask(void)
40110 {
40111 /* log only what the user can fix... */
40112 const u32 wantedmask = hotkey_driver_mask &
40113 @@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40114 BACKLIGHT_UPDATE_HOTKEY);
40115 }
40116
40117 -static struct backlight_ops ibm_backlight_data = {
40118 +static const struct backlight_ops ibm_backlight_data = {
40119 .get_brightness = brightness_get,
40120 .update_status = brightness_update_status,
40121 };
40122 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40123 index 51c0a8b..0786629 100644
40124 --- a/drivers/platform/x86/toshiba_acpi.c
40125 +++ b/drivers/platform/x86/toshiba_acpi.c
40126 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40127 return AE_OK;
40128 }
40129
40130 -static struct backlight_ops toshiba_backlight_data = {
40131 +static const struct backlight_ops toshiba_backlight_data = {
40132 .get_brightness = get_lcd,
40133 .update_status = set_lcd_status,
40134 };
40135 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40136 index fc83783c..cf370d7 100644
40137 --- a/drivers/pnp/pnpbios/bioscalls.c
40138 +++ b/drivers/pnp/pnpbios/bioscalls.c
40139 @@ -60,7 +60,7 @@ do { \
40140 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40141 } while(0)
40142
40143 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40144 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40145 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40146
40147 /*
40148 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40149
40150 cpu = get_cpu();
40151 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40152 +
40153 + pax_open_kernel();
40154 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40155 + pax_close_kernel();
40156
40157 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40158 spin_lock_irqsave(&pnp_bios_lock, flags);
40159 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40160 :"memory");
40161 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40162
40163 + pax_open_kernel();
40164 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40165 + pax_close_kernel();
40166 +
40167 put_cpu();
40168
40169 /* If we get here and this is set then the PnP BIOS faulted on us. */
40170 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40171 return status;
40172 }
40173
40174 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
40175 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40176 {
40177 int i;
40178
40179 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40180 pnp_bios_callpoint.offset = header->fields.pm16offset;
40181 pnp_bios_callpoint.segment = PNP_CS16;
40182
40183 + pax_open_kernel();
40184 +
40185 for_each_possible_cpu(i) {
40186 struct desc_struct *gdt = get_cpu_gdt_table(i);
40187 if (!gdt)
40188 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40189 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40190 (unsigned long)__va(header->fields.pm16dseg));
40191 }
40192 +
40193 + pax_close_kernel();
40194 }
40195 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40196 index ba97654..66b99d4 100644
40197 --- a/drivers/pnp/resource.c
40198 +++ b/drivers/pnp/resource.c
40199 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40200 return 1;
40201
40202 /* check if the resource is valid */
40203 - if (*irq < 0 || *irq > 15)
40204 + if (*irq > 15)
40205 return 0;
40206
40207 /* check if the resource is reserved */
40208 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40209 return 1;
40210
40211 /* check if the resource is valid */
40212 - if (*dma < 0 || *dma == 4 || *dma > 7)
40213 + if (*dma == 4 || *dma > 7)
40214 return 0;
40215
40216 /* check if the resource is reserved */
40217 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40218 index 62bb981..24a2dc9 100644
40219 --- a/drivers/power/bq27x00_battery.c
40220 +++ b/drivers/power/bq27x00_battery.c
40221 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
40222 struct bq27x00_access_methods {
40223 int (*read)(u8 reg, int *rt_value, int b_single,
40224 struct bq27x00_device_info *di);
40225 -};
40226 +} __no_const;
40227
40228 struct bq27x00_device_info {
40229 struct device *dev;
40230 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40231 index 62227cd..b5b538b 100644
40232 --- a/drivers/rtc/rtc-dev.c
40233 +++ b/drivers/rtc/rtc-dev.c
40234 @@ -14,6 +14,7 @@
40235 #include <linux/module.h>
40236 #include <linux/rtc.h>
40237 #include <linux/sched.h>
40238 +#include <linux/grsecurity.h>
40239 #include "rtc-core.h"
40240
40241 static dev_t rtc_devt;
40242 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40243 if (copy_from_user(&tm, uarg, sizeof(tm)))
40244 return -EFAULT;
40245
40246 + gr_log_timechange();
40247 +
40248 return rtc_set_time(rtc, &tm);
40249
40250 case RTC_PIE_ON:
40251 diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40252 index 968e3c7..fbc637a 100644
40253 --- a/drivers/s390/cio/qdio_perf.c
40254 +++ b/drivers/s390/cio/qdio_perf.c
40255 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40256 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40257 {
40258 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40259 - (long)atomic_long_read(&perf_stats.qdio_int));
40260 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40261 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40262 - (long)atomic_long_read(&perf_stats.pci_int));
40263 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40264 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40265 - (long)atomic_long_read(&perf_stats.thin_int));
40266 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40267 seq_printf(m, "\n");
40268 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40269 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
40270 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40271 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40272 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
40273 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40274 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40275 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
40276 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40277 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40278 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40279 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40280 - (long)atomic_long_read(&perf_stats.thinint_inbound),
40281 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40282 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40283 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40284 seq_printf(m, "\n");
40285 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40286 - (long)atomic_long_read(&perf_stats.siga_in));
40287 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40288 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40289 - (long)atomic_long_read(&perf_stats.siga_out));
40290 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40291 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40292 - (long)atomic_long_read(&perf_stats.siga_sync));
40293 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40294 seq_printf(m, "\n");
40295 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40296 - (long)atomic_long_read(&perf_stats.inbound_handler));
40297 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40298 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40299 - (long)atomic_long_read(&perf_stats.outbound_handler));
40300 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40301 seq_printf(m, "\n");
40302 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40303 - (long)atomic_long_read(&perf_stats.fast_requeue));
40304 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40305 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40306 - (long)atomic_long_read(&perf_stats.outbound_target_full));
40307 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40308 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40309 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40310 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40311 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40312 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
40313 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40314 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40315 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40316 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40317 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40318 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40319 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40320 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40321 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40322 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40323 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40324 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40325 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40326 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40327 seq_printf(m, "\n");
40328 return 0;
40329 }
40330 diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40331 index ff4504c..b3604c3 100644
40332 --- a/drivers/s390/cio/qdio_perf.h
40333 +++ b/drivers/s390/cio/qdio_perf.h
40334 @@ -13,46 +13,46 @@
40335
40336 struct qdio_perf_stats {
40337 /* interrupt handler calls */
40338 - atomic_long_t qdio_int;
40339 - atomic_long_t pci_int;
40340 - atomic_long_t thin_int;
40341 + atomic_long_unchecked_t qdio_int;
40342 + atomic_long_unchecked_t pci_int;
40343 + atomic_long_unchecked_t thin_int;
40344
40345 /* tasklet runs */
40346 - atomic_long_t tasklet_inbound;
40347 - atomic_long_t tasklet_outbound;
40348 - atomic_long_t tasklet_thinint;
40349 - atomic_long_t tasklet_thinint_loop;
40350 - atomic_long_t thinint_inbound;
40351 - atomic_long_t thinint_inbound_loop;
40352 - atomic_long_t thinint_inbound_loop2;
40353 + atomic_long_unchecked_t tasklet_inbound;
40354 + atomic_long_unchecked_t tasklet_outbound;
40355 + atomic_long_unchecked_t tasklet_thinint;
40356 + atomic_long_unchecked_t tasklet_thinint_loop;
40357 + atomic_long_unchecked_t thinint_inbound;
40358 + atomic_long_unchecked_t thinint_inbound_loop;
40359 + atomic_long_unchecked_t thinint_inbound_loop2;
40360
40361 /* signal adapter calls */
40362 - atomic_long_t siga_out;
40363 - atomic_long_t siga_in;
40364 - atomic_long_t siga_sync;
40365 + atomic_long_unchecked_t siga_out;
40366 + atomic_long_unchecked_t siga_in;
40367 + atomic_long_unchecked_t siga_sync;
40368
40369 /* misc */
40370 - atomic_long_t inbound_handler;
40371 - atomic_long_t outbound_handler;
40372 - atomic_long_t fast_requeue;
40373 - atomic_long_t outbound_target_full;
40374 + atomic_long_unchecked_t inbound_handler;
40375 + atomic_long_unchecked_t outbound_handler;
40376 + atomic_long_unchecked_t fast_requeue;
40377 + atomic_long_unchecked_t outbound_target_full;
40378
40379 /* for debugging */
40380 - atomic_long_t debug_tl_out_timer;
40381 - atomic_long_t debug_stop_polling;
40382 - atomic_long_t debug_eqbs_all;
40383 - atomic_long_t debug_eqbs_incomplete;
40384 - atomic_long_t debug_sqbs_all;
40385 - atomic_long_t debug_sqbs_incomplete;
40386 + atomic_long_unchecked_t debug_tl_out_timer;
40387 + atomic_long_unchecked_t debug_stop_polling;
40388 + atomic_long_unchecked_t debug_eqbs_all;
40389 + atomic_long_unchecked_t debug_eqbs_incomplete;
40390 + atomic_long_unchecked_t debug_sqbs_all;
40391 + atomic_long_unchecked_t debug_sqbs_incomplete;
40392 };
40393
40394 extern struct qdio_perf_stats perf_stats;
40395 extern int qdio_performance_stats;
40396
40397 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
40398 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40399 {
40400 if (qdio_performance_stats)
40401 - atomic_long_inc(count);
40402 + atomic_long_inc_unchecked(count);
40403 }
40404
40405 int qdio_setup_perf_stats(void);
40406 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40407 index 1ddcf40..a85f062 100644
40408 --- a/drivers/scsi/BusLogic.c
40409 +++ b/drivers/scsi/BusLogic.c
40410 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40411 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40412 *PrototypeHostAdapter)
40413 {
40414 + pax_track_stack();
40415 +
40416 /*
40417 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40418 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40419 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40420 index cdbdec9..b7d560b 100644
40421 --- a/drivers/scsi/aacraid/aacraid.h
40422 +++ b/drivers/scsi/aacraid/aacraid.h
40423 @@ -471,7 +471,7 @@ struct adapter_ops
40424 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40425 /* Administrative operations */
40426 int (*adapter_comm)(struct aac_dev * dev, int comm);
40427 -};
40428 +} __no_const;
40429
40430 /*
40431 * Define which interrupt handler needs to be installed
40432 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40433 index a5b8e7b..a6a0e43 100644
40434 --- a/drivers/scsi/aacraid/commctrl.c
40435 +++ b/drivers/scsi/aacraid/commctrl.c
40436 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40437 u32 actual_fibsize64, actual_fibsize = 0;
40438 int i;
40439
40440 + pax_track_stack();
40441
40442 if (dev->in_reset) {
40443 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40444 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40445 index 9b97c3e..f099725 100644
40446 --- a/drivers/scsi/aacraid/linit.c
40447 +++ b/drivers/scsi/aacraid/linit.c
40448 @@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40449 #elif defined(__devinitconst)
40450 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40451 #else
40452 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40453 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40454 #endif
40455 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40456 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40457 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40458 index 996f722..9127845 100644
40459 --- a/drivers/scsi/aic94xx/aic94xx_init.c
40460 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
40461 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40462 flash_error_table[i].reason);
40463 }
40464
40465 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40466 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40467 asd_show_update_bios, asd_store_update_bios);
40468
40469 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40470 @@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40471 .lldd_control_phy = asd_control_phy,
40472 };
40473
40474 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40475 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40476 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40477 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40478 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40479 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40480 index 58efd4b..cb48dc7 100644
40481 --- a/drivers/scsi/bfa/bfa_ioc.h
40482 +++ b/drivers/scsi/bfa/bfa_ioc.h
40483 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40484 bfa_ioc_disable_cbfn_t disable_cbfn;
40485 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40486 bfa_ioc_reset_cbfn_t reset_cbfn;
40487 -};
40488 +} __no_const;
40489
40490 /**
40491 * Heartbeat failure notification queue element.
40492 diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40493 index 7ad177e..5503586 100644
40494 --- a/drivers/scsi/bfa/bfa_iocfc.h
40495 +++ b/drivers/scsi/bfa/bfa_iocfc.h
40496 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
40497 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40498 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40499 u32 *nvecs, u32 *maxvec);
40500 -};
40501 +} __no_const;
40502 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40503
40504 struct bfa_iocfc_s {
40505 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40506 index 4967643..cbec06b 100644
40507 --- a/drivers/scsi/dpt_i2o.c
40508 +++ b/drivers/scsi/dpt_i2o.c
40509 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40510 dma_addr_t addr;
40511 ulong flags = 0;
40512
40513 + pax_track_stack();
40514 +
40515 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40516 // get user msg size in u32s
40517 if(get_user(size, &user_msg[0])){
40518 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40519 s32 rcode;
40520 dma_addr_t addr;
40521
40522 + pax_track_stack();
40523 +
40524 memset(msg, 0 , sizeof(msg));
40525 len = scsi_bufflen(cmd);
40526 direction = 0x00000000;
40527 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40528 index c7076ce..e20c67c 100644
40529 --- a/drivers/scsi/eata.c
40530 +++ b/drivers/scsi/eata.c
40531 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40532 struct hostdata *ha;
40533 char name[16];
40534
40535 + pax_track_stack();
40536 +
40537 sprintf(name, "%s%d", driver_name, j);
40538
40539 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40540 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40541 index 11ae5c9..891daec 100644
40542 --- a/drivers/scsi/fcoe/libfcoe.c
40543 +++ b/drivers/scsi/fcoe/libfcoe.c
40544 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40545 size_t rlen;
40546 size_t dlen;
40547
40548 + pax_track_stack();
40549 +
40550 fiph = (struct fip_header *)skb->data;
40551 sub = fiph->fip_subcode;
40552 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40553 diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40554 index 71c7bbe..e93088a 100644
40555 --- a/drivers/scsi/fnic/fnic_main.c
40556 +++ b/drivers/scsi/fnic/fnic_main.c
40557 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40558 /* Start local port initiatialization */
40559
40560 lp->link_up = 0;
40561 - lp->tt = fnic_transport_template;
40562 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40563
40564 lp->max_retry_count = fnic->config.flogi_retries;
40565 lp->max_rport_retry_count = fnic->config.plogi_retries;
40566 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40567 index bb96d74..9ec3ce4 100644
40568 --- a/drivers/scsi/gdth.c
40569 +++ b/drivers/scsi/gdth.c
40570 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40571 ulong flags;
40572 gdth_ha_str *ha;
40573
40574 + pax_track_stack();
40575 +
40576 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40577 return -EFAULT;
40578 ha = gdth_find_ha(ldrv.ionode);
40579 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40580 gdth_ha_str *ha;
40581 int rval;
40582
40583 + pax_track_stack();
40584 +
40585 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40586 res.number >= MAX_HDRIVES)
40587 return -EFAULT;
40588 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40589 gdth_ha_str *ha;
40590 int rval;
40591
40592 + pax_track_stack();
40593 +
40594 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40595 return -EFAULT;
40596 ha = gdth_find_ha(gen.ionode);
40597 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40598 int i;
40599 gdth_cmd_str gdtcmd;
40600 char cmnd[MAX_COMMAND_SIZE];
40601 +
40602 + pax_track_stack();
40603 +
40604 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40605
40606 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40607 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40608 index 1258da3..20d8ae6 100644
40609 --- a/drivers/scsi/gdth_proc.c
40610 +++ b/drivers/scsi/gdth_proc.c
40611 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
40612 ulong64 paddr;
40613
40614 char cmnd[MAX_COMMAND_SIZE];
40615 +
40616 + pax_track_stack();
40617 +
40618 memset(cmnd, 0xff, 12);
40619 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
40620
40621 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
40622 gdth_hget_str *phg;
40623 char cmnd[MAX_COMMAND_SIZE];
40624
40625 + pax_track_stack();
40626 +
40627 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
40628 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
40629 if (!gdtcmd || !estr)
40630 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40631 index d03a926..f324286 100644
40632 --- a/drivers/scsi/hosts.c
40633 +++ b/drivers/scsi/hosts.c
40634 @@ -40,7 +40,7 @@
40635 #include "scsi_logging.h"
40636
40637
40638 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
40639 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
40640
40641
40642 static void scsi_host_cls_release(struct device *dev)
40643 @@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40644 * subtract one because we increment first then return, but we need to
40645 * know what the next host number was before increment
40646 */
40647 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40648 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40649 shost->dma_channel = 0xff;
40650
40651 /* These three are default values which can be overridden */
40652 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
40653 index a601159..55e19d2 100644
40654 --- a/drivers/scsi/ipr.c
40655 +++ b/drivers/scsi/ipr.c
40656 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
40657 return true;
40658 }
40659
40660 -static struct ata_port_operations ipr_sata_ops = {
40661 +static const struct ata_port_operations ipr_sata_ops = {
40662 .phy_reset = ipr_ata_phy_reset,
40663 .hardreset = ipr_sata_reset,
40664 .post_internal_cmd = ipr_ata_post_internal,
40665 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
40666 index 4e49fbc..97907ff 100644
40667 --- a/drivers/scsi/ips.h
40668 +++ b/drivers/scsi/ips.h
40669 @@ -1027,7 +1027,7 @@ typedef struct {
40670 int (*intr)(struct ips_ha *);
40671 void (*enableint)(struct ips_ha *);
40672 uint32_t (*statupd)(struct ips_ha *);
40673 -} ips_hw_func_t;
40674 +} __no_const ips_hw_func_t;
40675
40676 typedef struct ips_ha {
40677 uint8_t ha_id[IPS_MAX_CHANNELS+1];
40678 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40679 index c1c1574..a9c9348 100644
40680 --- a/drivers/scsi/libfc/fc_exch.c
40681 +++ b/drivers/scsi/libfc/fc_exch.c
40682 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
40683 * all together if not used XXX
40684 */
40685 struct {
40686 - atomic_t no_free_exch;
40687 - atomic_t no_free_exch_xid;
40688 - atomic_t xid_not_found;
40689 - atomic_t xid_busy;
40690 - atomic_t seq_not_found;
40691 - atomic_t non_bls_resp;
40692 + atomic_unchecked_t no_free_exch;
40693 + atomic_unchecked_t no_free_exch_xid;
40694 + atomic_unchecked_t xid_not_found;
40695 + atomic_unchecked_t xid_busy;
40696 + atomic_unchecked_t seq_not_found;
40697 + atomic_unchecked_t non_bls_resp;
40698 } stats;
40699 };
40700 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
40701 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40702 /* allocate memory for exchange */
40703 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40704 if (!ep) {
40705 - atomic_inc(&mp->stats.no_free_exch);
40706 + atomic_inc_unchecked(&mp->stats.no_free_exch);
40707 goto out;
40708 }
40709 memset(ep, 0, sizeof(*ep));
40710 @@ -557,7 +557,7 @@ out:
40711 return ep;
40712 err:
40713 spin_unlock_bh(&pool->lock);
40714 - atomic_inc(&mp->stats.no_free_exch_xid);
40715 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40716 mempool_free(ep, mp->ep_pool);
40717 return NULL;
40718 }
40719 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40720 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40721 ep = fc_exch_find(mp, xid);
40722 if (!ep) {
40723 - atomic_inc(&mp->stats.xid_not_found);
40724 + atomic_inc_unchecked(&mp->stats.xid_not_found);
40725 reject = FC_RJT_OX_ID;
40726 goto out;
40727 }
40728 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40729 ep = fc_exch_find(mp, xid);
40730 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40731 if (ep) {
40732 - atomic_inc(&mp->stats.xid_busy);
40733 + atomic_inc_unchecked(&mp->stats.xid_busy);
40734 reject = FC_RJT_RX_ID;
40735 goto rel;
40736 }
40737 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40738 }
40739 xid = ep->xid; /* get our XID */
40740 } else if (!ep) {
40741 - atomic_inc(&mp->stats.xid_not_found);
40742 + atomic_inc_unchecked(&mp->stats.xid_not_found);
40743 reject = FC_RJT_RX_ID; /* XID not found */
40744 goto out;
40745 }
40746 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40747 } else {
40748 sp = &ep->seq;
40749 if (sp->id != fh->fh_seq_id) {
40750 - atomic_inc(&mp->stats.seq_not_found);
40751 + atomic_inc_unchecked(&mp->stats.seq_not_found);
40752 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
40753 goto rel;
40754 }
40755 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40756
40757 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
40758 if (!ep) {
40759 - atomic_inc(&mp->stats.xid_not_found);
40760 + atomic_inc_unchecked(&mp->stats.xid_not_found);
40761 goto out;
40762 }
40763 if (ep->esb_stat & ESB_ST_COMPLETE) {
40764 - atomic_inc(&mp->stats.xid_not_found);
40765 + atomic_inc_unchecked(&mp->stats.xid_not_found);
40766 goto out;
40767 }
40768 if (ep->rxid == FC_XID_UNKNOWN)
40769 ep->rxid = ntohs(fh->fh_rx_id);
40770 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
40771 - atomic_inc(&mp->stats.xid_not_found);
40772 + atomic_inc_unchecked(&mp->stats.xid_not_found);
40773 goto rel;
40774 }
40775 if (ep->did != ntoh24(fh->fh_s_id) &&
40776 ep->did != FC_FID_FLOGI) {
40777 - atomic_inc(&mp->stats.xid_not_found);
40778 + atomic_inc_unchecked(&mp->stats.xid_not_found);
40779 goto rel;
40780 }
40781 sof = fr_sof(fp);
40782 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40783 } else {
40784 sp = &ep->seq;
40785 if (sp->id != fh->fh_seq_id) {
40786 - atomic_inc(&mp->stats.seq_not_found);
40787 + atomic_inc_unchecked(&mp->stats.seq_not_found);
40788 goto rel;
40789 }
40790 }
40791 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40792 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
40793
40794 if (!sp)
40795 - atomic_inc(&mp->stats.xid_not_found);
40796 + atomic_inc_unchecked(&mp->stats.xid_not_found);
40797 else
40798 - atomic_inc(&mp->stats.non_bls_resp);
40799 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
40800
40801 fc_frame_free(fp);
40802 }
40803 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
40804 index 0ee989f..a582241 100644
40805 --- a/drivers/scsi/libsas/sas_ata.c
40806 +++ b/drivers/scsi/libsas/sas_ata.c
40807 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
40808 }
40809 }
40810
40811 -static struct ata_port_operations sas_sata_ops = {
40812 +static const struct ata_port_operations sas_sata_ops = {
40813 .phy_reset = sas_ata_phy_reset,
40814 .post_internal_cmd = sas_ata_post_internal,
40815 .qc_defer = ata_std_qc_defer,
40816 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
40817 index aa10f79..5cc79e4 100644
40818 --- a/drivers/scsi/lpfc/lpfc.h
40819 +++ b/drivers/scsi/lpfc/lpfc.h
40820 @@ -400,7 +400,7 @@ struct lpfc_vport {
40821 struct dentry *debug_nodelist;
40822 struct dentry *vport_debugfs_root;
40823 struct lpfc_debugfs_trc *disc_trc;
40824 - atomic_t disc_trc_cnt;
40825 + atomic_unchecked_t disc_trc_cnt;
40826 #endif
40827 uint8_t stat_data_enabled;
40828 uint8_t stat_data_blocked;
40829 @@ -725,8 +725,8 @@ struct lpfc_hba {
40830 struct timer_list fabric_block_timer;
40831 unsigned long bit_flags;
40832 #define FABRIC_COMANDS_BLOCKED 0
40833 - atomic_t num_rsrc_err;
40834 - atomic_t num_cmd_success;
40835 + atomic_unchecked_t num_rsrc_err;
40836 + atomic_unchecked_t num_cmd_success;
40837 unsigned long last_rsrc_error_time;
40838 unsigned long last_ramp_down_time;
40839 unsigned long last_ramp_up_time;
40840 @@ -740,7 +740,7 @@ struct lpfc_hba {
40841 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
40842 struct dentry *debug_slow_ring_trc;
40843 struct lpfc_debugfs_trc *slow_ring_trc;
40844 - atomic_t slow_ring_trc_cnt;
40845 + atomic_unchecked_t slow_ring_trc_cnt;
40846 #endif
40847
40848 /* Used for deferred freeing of ELS data buffers */
40849 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
40850 index 8d0f0de..7c77a62 100644
40851 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
40852 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
40853 @@ -124,7 +124,7 @@ struct lpfc_debug {
40854 int len;
40855 };
40856
40857 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40858 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40859 static unsigned long lpfc_debugfs_start_time = 0L;
40860
40861 /**
40862 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
40863 lpfc_debugfs_enable = 0;
40864
40865 len = 0;
40866 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
40867 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
40868 (lpfc_debugfs_max_disc_trc - 1);
40869 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
40870 dtp = vport->disc_trc + i;
40871 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
40872 lpfc_debugfs_enable = 0;
40873
40874 len = 0;
40875 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
40876 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
40877 (lpfc_debugfs_max_slow_ring_trc - 1);
40878 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
40879 dtp = phba->slow_ring_trc + i;
40880 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
40881 uint32_t *ptr;
40882 char buffer[1024];
40883
40884 + pax_track_stack();
40885 +
40886 off = 0;
40887 spin_lock_irq(&phba->hbalock);
40888
40889 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
40890 !vport || !vport->disc_trc)
40891 return;
40892
40893 - index = atomic_inc_return(&vport->disc_trc_cnt) &
40894 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
40895 (lpfc_debugfs_max_disc_trc - 1);
40896 dtp = vport->disc_trc + index;
40897 dtp->fmt = fmt;
40898 dtp->data1 = data1;
40899 dtp->data2 = data2;
40900 dtp->data3 = data3;
40901 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
40902 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
40903 dtp->jif = jiffies;
40904 #endif
40905 return;
40906 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
40907 !phba || !phba->slow_ring_trc)
40908 return;
40909
40910 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
40911 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
40912 (lpfc_debugfs_max_slow_ring_trc - 1);
40913 dtp = phba->slow_ring_trc + index;
40914 dtp->fmt = fmt;
40915 dtp->data1 = data1;
40916 dtp->data2 = data2;
40917 dtp->data3 = data3;
40918 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
40919 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
40920 dtp->jif = jiffies;
40921 #endif
40922 return;
40923 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
40924 "slow_ring buffer\n");
40925 goto debug_failed;
40926 }
40927 - atomic_set(&phba->slow_ring_trc_cnt, 0);
40928 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
40929 memset(phba->slow_ring_trc, 0,
40930 (sizeof(struct lpfc_debugfs_trc) *
40931 lpfc_debugfs_max_slow_ring_trc));
40932 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
40933 "buffer\n");
40934 goto debug_failed;
40935 }
40936 - atomic_set(&vport->disc_trc_cnt, 0);
40937 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
40938
40939 snprintf(name, sizeof(name), "discovery_trace");
40940 vport->debug_disc_trc =
40941 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
40942 index 549bc7d..8189dbb 100644
40943 --- a/drivers/scsi/lpfc/lpfc_init.c
40944 +++ b/drivers/scsi/lpfc/lpfc_init.c
40945 @@ -8021,8 +8021,10 @@ lpfc_init(void)
40946 printk(LPFC_COPYRIGHT "\n");
40947
40948 if (lpfc_enable_npiv) {
40949 - lpfc_transport_functions.vport_create = lpfc_vport_create;
40950 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
40951 + pax_open_kernel();
40952 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
40953 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
40954 + pax_close_kernel();
40955 }
40956 lpfc_transport_template =
40957 fc_attach_transport(&lpfc_transport_functions);
40958 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
40959 index c88f59f..ff2a42f 100644
40960 --- a/drivers/scsi/lpfc/lpfc_scsi.c
40961 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
40962 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
40963 uint32_t evt_posted;
40964
40965 spin_lock_irqsave(&phba->hbalock, flags);
40966 - atomic_inc(&phba->num_rsrc_err);
40967 + atomic_inc_unchecked(&phba->num_rsrc_err);
40968 phba->last_rsrc_error_time = jiffies;
40969
40970 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
40971 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
40972 unsigned long flags;
40973 struct lpfc_hba *phba = vport->phba;
40974 uint32_t evt_posted;
40975 - atomic_inc(&phba->num_cmd_success);
40976 + atomic_inc_unchecked(&phba->num_cmd_success);
40977
40978 if (vport->cfg_lun_queue_depth <= queue_depth)
40979 return;
40980 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
40981 int i;
40982 struct lpfc_rport_data *rdata;
40983
40984 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
40985 - num_cmd_success = atomic_read(&phba->num_cmd_success);
40986 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
40987 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
40988
40989 vports = lpfc_create_vport_work_array(phba);
40990 if (vports != NULL)
40991 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
40992 }
40993 }
40994 lpfc_destroy_vport_work_array(phba, vports);
40995 - atomic_set(&phba->num_rsrc_err, 0);
40996 - atomic_set(&phba->num_cmd_success, 0);
40997 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
40998 + atomic_set_unchecked(&phba->num_cmd_success, 0);
40999 }
41000
41001 /**
41002 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41003 }
41004 }
41005 lpfc_destroy_vport_work_array(phba, vports);
41006 - atomic_set(&phba->num_rsrc_err, 0);
41007 - atomic_set(&phba->num_cmd_success, 0);
41008 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41009 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41010 }
41011
41012 /**
41013 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41014 index 234f0b7..3020aea 100644
41015 --- a/drivers/scsi/megaraid/megaraid_mbox.c
41016 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
41017 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41018 int rval;
41019 int i;
41020
41021 + pax_track_stack();
41022 +
41023 // Allocate memory for the base list of scb for management module.
41024 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41025
41026 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41027 index 7a117c1..ee01e9e 100644
41028 --- a/drivers/scsi/osd/osd_initiator.c
41029 +++ b/drivers/scsi/osd/osd_initiator.c
41030 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41031 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41032 int ret;
41033
41034 + pax_track_stack();
41035 +
41036 or = osd_start_request(od, GFP_KERNEL);
41037 if (!or)
41038 return -ENOMEM;
41039 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41040 index 9ab8c86..9425ad3 100644
41041 --- a/drivers/scsi/pmcraid.c
41042 +++ b/drivers/scsi/pmcraid.c
41043 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41044 res->scsi_dev = scsi_dev;
41045 scsi_dev->hostdata = res;
41046 res->change_detected = 0;
41047 - atomic_set(&res->read_failures, 0);
41048 - atomic_set(&res->write_failures, 0);
41049 + atomic_set_unchecked(&res->read_failures, 0);
41050 + atomic_set_unchecked(&res->write_failures, 0);
41051 rc = 0;
41052 }
41053 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41054 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41055
41056 /* If this was a SCSI read/write command keep count of errors */
41057 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41058 - atomic_inc(&res->read_failures);
41059 + atomic_inc_unchecked(&res->read_failures);
41060 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41061 - atomic_inc(&res->write_failures);
41062 + atomic_inc_unchecked(&res->write_failures);
41063
41064 if (!RES_IS_GSCSI(res->cfg_entry) &&
41065 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41066 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41067
41068 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41069 /* add resources only after host is added into system */
41070 - if (!atomic_read(&pinstance->expose_resources))
41071 + if (!atomic_read_unchecked(&pinstance->expose_resources))
41072 return;
41073
41074 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41075 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41076 init_waitqueue_head(&pinstance->reset_wait_q);
41077
41078 atomic_set(&pinstance->outstanding_cmds, 0);
41079 - atomic_set(&pinstance->expose_resources, 0);
41080 + atomic_set_unchecked(&pinstance->expose_resources, 0);
41081
41082 INIT_LIST_HEAD(&pinstance->free_res_q);
41083 INIT_LIST_HEAD(&pinstance->used_res_q);
41084 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41085 /* Schedule worker thread to handle CCN and take care of adding and
41086 * removing devices to OS
41087 */
41088 - atomic_set(&pinstance->expose_resources, 1);
41089 + atomic_set_unchecked(&pinstance->expose_resources, 1);
41090 schedule_work(&pinstance->worker_q);
41091 return rc;
41092
41093 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41094 index 3441b3f..6cbe8f7 100644
41095 --- a/drivers/scsi/pmcraid.h
41096 +++ b/drivers/scsi/pmcraid.h
41097 @@ -690,7 +690,7 @@ struct pmcraid_instance {
41098 atomic_t outstanding_cmds;
41099
41100 /* should add/delete resources to mid-layer now ?*/
41101 - atomic_t expose_resources;
41102 + atomic_unchecked_t expose_resources;
41103
41104 /* Tasklet to handle deferred processing */
41105 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41106 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41107 struct list_head queue; /* link to "to be exposed" resources */
41108 struct pmcraid_config_table_entry cfg_entry;
41109 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41110 - atomic_t read_failures; /* count of failed READ commands */
41111 - atomic_t write_failures; /* count of failed WRITE commands */
41112 + atomic_unchecked_t read_failures; /* count of failed READ commands */
41113 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41114
41115 /* To indicate add/delete/modify during CCN */
41116 u8 change_detected;
41117 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41118 index 2150618..7034215 100644
41119 --- a/drivers/scsi/qla2xxx/qla_def.h
41120 +++ b/drivers/scsi/qla2xxx/qla_def.h
41121 @@ -2089,7 +2089,7 @@ struct isp_operations {
41122
41123 int (*get_flash_version) (struct scsi_qla_host *, void *);
41124 int (*start_scsi) (srb_t *);
41125 -};
41126 +} __no_const;
41127
41128 /* MSI-X Support *************************************************************/
41129
41130 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41131 index 81b5f29..2ae1fad 100644
41132 --- a/drivers/scsi/qla4xxx/ql4_def.h
41133 +++ b/drivers/scsi/qla4xxx/ql4_def.h
41134 @@ -240,7 +240,7 @@ struct ddb_entry {
41135 atomic_t retry_relogin_timer; /* Min Time between relogins
41136 * (4000 only) */
41137 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41138 - atomic_t relogin_retry_count; /* Num of times relogin has been
41139 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41140 * retried */
41141
41142 uint16_t port;
41143 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41144 index af8c323..515dd51 100644
41145 --- a/drivers/scsi/qla4xxx/ql4_init.c
41146 +++ b/drivers/scsi/qla4xxx/ql4_init.c
41147 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41148 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41149 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41150 atomic_set(&ddb_entry->relogin_timer, 0);
41151 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41152 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41153 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41154 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41155 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41156 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41157 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41158 atomic_set(&ddb_entry->port_down_timer,
41159 ha->port_down_retry_count);
41160 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41161 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41162 atomic_set(&ddb_entry->relogin_timer, 0);
41163 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41164 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41165 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41166 index 83c8b5e..a82b348 100644
41167 --- a/drivers/scsi/qla4xxx/ql4_os.c
41168 +++ b/drivers/scsi/qla4xxx/ql4_os.c
41169 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41170 ddb_entry->fw_ddb_device_state ==
41171 DDB_DS_SESSION_FAILED) {
41172 /* Reset retry relogin timer */
41173 - atomic_inc(&ddb_entry->relogin_retry_count);
41174 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41175 DEBUG2(printk("scsi%ld: index[%d] relogin"
41176 " timed out-retrying"
41177 " relogin (%d)\n",
41178 ha->host_no,
41179 ddb_entry->fw_ddb_index,
41180 - atomic_read(&ddb_entry->
41181 + atomic_read_unchecked(&ddb_entry->
41182 relogin_retry_count))
41183 );
41184 start_dpc++;
41185 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41186 index dd098ca..686ce01 100644
41187 --- a/drivers/scsi/scsi.c
41188 +++ b/drivers/scsi/scsi.c
41189 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41190 unsigned long timeout;
41191 int rtn = 0;
41192
41193 - atomic_inc(&cmd->device->iorequest_cnt);
41194 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41195
41196 /* check if the device is still usable */
41197 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41198 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41199 index bc3e363..e1a8e50 100644
41200 --- a/drivers/scsi/scsi_debug.c
41201 +++ b/drivers/scsi/scsi_debug.c
41202 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41203 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41204 unsigned char *cmd = (unsigned char *)scp->cmnd;
41205
41206 + pax_track_stack();
41207 +
41208 if ((errsts = check_readiness(scp, 1, devip)))
41209 return errsts;
41210 memset(arr, 0, sizeof(arr));
41211 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41212 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41213 unsigned char *cmd = (unsigned char *)scp->cmnd;
41214
41215 + pax_track_stack();
41216 +
41217 if ((errsts = check_readiness(scp, 1, devip)))
41218 return errsts;
41219 memset(arr, 0, sizeof(arr));
41220 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41221 index 8df12522..c4c1472 100644
41222 --- a/drivers/scsi/scsi_lib.c
41223 +++ b/drivers/scsi/scsi_lib.c
41224 @@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41225 shost = sdev->host;
41226 scsi_init_cmd_errh(cmd);
41227 cmd->result = DID_NO_CONNECT << 16;
41228 - atomic_inc(&cmd->device->iorequest_cnt);
41229 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41230
41231 /*
41232 * SCSI request completion path will do scsi_device_unbusy(),
41233 @@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41234 */
41235 cmd->serial_number = 0;
41236
41237 - atomic_inc(&cmd->device->iodone_cnt);
41238 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
41239 if (cmd->result)
41240 - atomic_inc(&cmd->device->ioerr_cnt);
41241 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41242
41243 disposition = scsi_decide_disposition(cmd);
41244 if (disposition != SUCCESS &&
41245 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41246 index 91a93e0..eae0fe3 100644
41247 --- a/drivers/scsi/scsi_sysfs.c
41248 +++ b/drivers/scsi/scsi_sysfs.c
41249 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41250 char *buf) \
41251 { \
41252 struct scsi_device *sdev = to_scsi_device(dev); \
41253 - unsigned long long count = atomic_read(&sdev->field); \
41254 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
41255 return snprintf(buf, 20, "0x%llx\n", count); \
41256 } \
41257 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41258 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41259 index 1030327..f91fd30 100644
41260 --- a/drivers/scsi/scsi_tgt_lib.c
41261 +++ b/drivers/scsi/scsi_tgt_lib.c
41262 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41263 int err;
41264
41265 dprintk("%lx %u\n", uaddr, len);
41266 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41267 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41268 if (err) {
41269 /*
41270 * TODO: need to fixup sg_tablesize, max_segment_size,
41271 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41272 index db02e31..1b42ea9 100644
41273 --- a/drivers/scsi/scsi_transport_fc.c
41274 +++ b/drivers/scsi/scsi_transport_fc.c
41275 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41276 * Netlink Infrastructure
41277 */
41278
41279 -static atomic_t fc_event_seq;
41280 +static atomic_unchecked_t fc_event_seq;
41281
41282 /**
41283 * fc_get_event_number - Obtain the next sequential FC event number
41284 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41285 u32
41286 fc_get_event_number(void)
41287 {
41288 - return atomic_add_return(1, &fc_event_seq);
41289 + return atomic_add_return_unchecked(1, &fc_event_seq);
41290 }
41291 EXPORT_SYMBOL(fc_get_event_number);
41292
41293 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41294 {
41295 int error;
41296
41297 - atomic_set(&fc_event_seq, 0);
41298 + atomic_set_unchecked(&fc_event_seq, 0);
41299
41300 error = transport_class_register(&fc_host_class);
41301 if (error)
41302 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41303 index de2f8c4..63c5278 100644
41304 --- a/drivers/scsi/scsi_transport_iscsi.c
41305 +++ b/drivers/scsi/scsi_transport_iscsi.c
41306 @@ -81,7 +81,7 @@ struct iscsi_internal {
41307 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41308 };
41309
41310 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41311 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41312 static struct workqueue_struct *iscsi_eh_timer_workq;
41313
41314 /*
41315 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41316 int err;
41317
41318 ihost = shost->shost_data;
41319 - session->sid = atomic_add_return(1, &iscsi_session_nr);
41320 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41321
41322 if (id == ISCSI_MAX_TARGET) {
41323 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41324 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41325 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41326 ISCSI_TRANSPORT_VERSION);
41327
41328 - atomic_set(&iscsi_session_nr, 0);
41329 + atomic_set_unchecked(&iscsi_session_nr, 0);
41330
41331 err = class_register(&iscsi_transport_class);
41332 if (err)
41333 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41334 index 21a045e..ec89e03 100644
41335 --- a/drivers/scsi/scsi_transport_srp.c
41336 +++ b/drivers/scsi/scsi_transport_srp.c
41337 @@ -33,7 +33,7 @@
41338 #include "scsi_transport_srp_internal.h"
41339
41340 struct srp_host_attrs {
41341 - atomic_t next_port_id;
41342 + atomic_unchecked_t next_port_id;
41343 };
41344 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41345
41346 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41347 struct Scsi_Host *shost = dev_to_shost(dev);
41348 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41349
41350 - atomic_set(&srp_host->next_port_id, 0);
41351 + atomic_set_unchecked(&srp_host->next_port_id, 0);
41352 return 0;
41353 }
41354
41355 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41356 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41357 rport->roles = ids->roles;
41358
41359 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41360 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41361 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41362
41363 transport_setup_device(&rport->dev);
41364 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41365 index 040f751..98a5ed2 100644
41366 --- a/drivers/scsi/sg.c
41367 +++ b/drivers/scsi/sg.c
41368 @@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41369 sdp->disk->disk_name,
41370 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41371 NULL,
41372 - (char *)arg);
41373 + (char __user *)arg);
41374 case BLKTRACESTART:
41375 return blk_trace_startstop(sdp->device->request_queue, 1);
41376 case BLKTRACESTOP:
41377 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41378 const struct file_operations * fops;
41379 };
41380
41381 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41382 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41383 {"allow_dio", &adio_fops},
41384 {"debug", &debug_fops},
41385 {"def_reserved_size", &dressz_fops},
41386 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
41387 {
41388 int k, mask;
41389 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41390 - struct sg_proc_leaf * leaf;
41391 + const struct sg_proc_leaf * leaf;
41392
41393 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41394 if (!sg_proc_sgp)
41395 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41396 index 45374d6..61ee484 100644
41397 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41398 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41399 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41400 int do_iounmap = 0;
41401 int do_disable_device = 1;
41402
41403 + pax_track_stack();
41404 +
41405 memset(&sym_dev, 0, sizeof(sym_dev));
41406 memset(&nvram, 0, sizeof(nvram));
41407 sym_dev.pdev = pdev;
41408 diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41409 index eadc1ab..2d81457 100644
41410 --- a/drivers/serial/kgdboc.c
41411 +++ b/drivers/serial/kgdboc.c
41412 @@ -18,7 +18,7 @@
41413
41414 #define MAX_CONFIG_LEN 40
41415
41416 -static struct kgdb_io kgdboc_io_ops;
41417 +static const struct kgdb_io kgdboc_io_ops;
41418
41419 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41420 static int configured = -1;
41421 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41422 module_put(THIS_MODULE);
41423 }
41424
41425 -static struct kgdb_io kgdboc_io_ops = {
41426 +static const struct kgdb_io kgdboc_io_ops = {
41427 .name = "kgdboc",
41428 .read_char = kgdboc_get_char,
41429 .write_char = kgdboc_put_char,
41430 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41431 index b76f246..7f41af7 100644
41432 --- a/drivers/spi/spi.c
41433 +++ b/drivers/spi/spi.c
41434 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41435 EXPORT_SYMBOL_GPL(spi_sync);
41436
41437 /* portable code must never pass more than 32 bytes */
41438 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41439 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41440
41441 static u8 *buf;
41442
41443 diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41444 index 99010d4..6bad87b 100644
41445 --- a/drivers/staging/android/binder.c
41446 +++ b/drivers/staging/android/binder.c
41447 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41448 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41449 }
41450
41451 -static struct vm_operations_struct binder_vm_ops = {
41452 +static const struct vm_operations_struct binder_vm_ops = {
41453 .open = binder_vma_open,
41454 .close = binder_vma_close,
41455 };
41456 diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41457 index cda26bb..39fed3f 100644
41458 --- a/drivers/staging/b3dfg/b3dfg.c
41459 +++ b/drivers/staging/b3dfg/b3dfg.c
41460 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41461 return VM_FAULT_NOPAGE;
41462 }
41463
41464 -static struct vm_operations_struct b3dfg_vm_ops = {
41465 +static const struct vm_operations_struct b3dfg_vm_ops = {
41466 .fault = b3dfg_vma_fault,
41467 };
41468
41469 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41470 return r;
41471 }
41472
41473 -static struct file_operations b3dfg_fops = {
41474 +static const struct file_operations b3dfg_fops = {
41475 .owner = THIS_MODULE,
41476 .open = b3dfg_open,
41477 .release = b3dfg_release,
41478 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41479 index 908f25a..c9a579b 100644
41480 --- a/drivers/staging/comedi/comedi_fops.c
41481 +++ b/drivers/staging/comedi/comedi_fops.c
41482 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41483 mutex_unlock(&dev->mutex);
41484 }
41485
41486 -static struct vm_operations_struct comedi_vm_ops = {
41487 +static const struct vm_operations_struct comedi_vm_ops = {
41488 .close = comedi_unmap,
41489 };
41490
41491 diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41492 index e55a0db..577b776 100644
41493 --- a/drivers/staging/dream/qdsp5/adsp_driver.c
41494 +++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41495 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41496 static dev_t adsp_devno;
41497 static struct class *adsp_class;
41498
41499 -static struct file_operations adsp_fops = {
41500 +static const struct file_operations adsp_fops = {
41501 .owner = THIS_MODULE,
41502 .open = adsp_open,
41503 .unlocked_ioctl = adsp_ioctl,
41504 diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41505 index ad2390f..4116ee8 100644
41506 --- a/drivers/staging/dream/qdsp5/audio_aac.c
41507 +++ b/drivers/staging/dream/qdsp5/audio_aac.c
41508 @@ -1022,7 +1022,7 @@ done:
41509 return rc;
41510 }
41511
41512 -static struct file_operations audio_aac_fops = {
41513 +static const struct file_operations audio_aac_fops = {
41514 .owner = THIS_MODULE,
41515 .open = audio_open,
41516 .release = audio_release,
41517 diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41518 index cd818a5..870b37b 100644
41519 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41520 +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41521 @@ -833,7 +833,7 @@ done:
41522 return rc;
41523 }
41524
41525 -static struct file_operations audio_amrnb_fops = {
41526 +static const struct file_operations audio_amrnb_fops = {
41527 .owner = THIS_MODULE,
41528 .open = audamrnb_open,
41529 .release = audamrnb_release,
41530 diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41531 index 4b43e18..cedafda 100644
41532 --- a/drivers/staging/dream/qdsp5/audio_evrc.c
41533 +++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41534 @@ -805,7 +805,7 @@ dma_fail:
41535 return rc;
41536 }
41537
41538 -static struct file_operations audio_evrc_fops = {
41539 +static const struct file_operations audio_evrc_fops = {
41540 .owner = THIS_MODULE,
41541 .open = audevrc_open,
41542 .release = audevrc_release,
41543 diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41544 index 3d950a2..9431118 100644
41545 --- a/drivers/staging/dream/qdsp5/audio_in.c
41546 +++ b/drivers/staging/dream/qdsp5/audio_in.c
41547 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41548 return 0;
41549 }
41550
41551 -static struct file_operations audio_fops = {
41552 +static const struct file_operations audio_fops = {
41553 .owner = THIS_MODULE,
41554 .open = audio_in_open,
41555 .release = audio_in_release,
41556 @@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41557 .unlocked_ioctl = audio_in_ioctl,
41558 };
41559
41560 -static struct file_operations audpre_fops = {
41561 +static const struct file_operations audpre_fops = {
41562 .owner = THIS_MODULE,
41563 .open = audpre_open,
41564 .unlocked_ioctl = audpre_ioctl,
41565 diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41566 index b95574f..286c2f4 100644
41567 --- a/drivers/staging/dream/qdsp5/audio_mp3.c
41568 +++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41569 @@ -941,7 +941,7 @@ done:
41570 return rc;
41571 }
41572
41573 -static struct file_operations audio_mp3_fops = {
41574 +static const struct file_operations audio_mp3_fops = {
41575 .owner = THIS_MODULE,
41576 .open = audio_open,
41577 .release = audio_release,
41578 diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41579 index d1adcf6..f8f9833 100644
41580 --- a/drivers/staging/dream/qdsp5/audio_out.c
41581 +++ b/drivers/staging/dream/qdsp5/audio_out.c
41582 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41583 return 0;
41584 }
41585
41586 -static struct file_operations audio_fops = {
41587 +static const struct file_operations audio_fops = {
41588 .owner = THIS_MODULE,
41589 .open = audio_open,
41590 .release = audio_release,
41591 @@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41592 .unlocked_ioctl = audio_ioctl,
41593 };
41594
41595 -static struct file_operations audpp_fops = {
41596 +static const struct file_operations audpp_fops = {
41597 .owner = THIS_MODULE,
41598 .open = audpp_open,
41599 .unlocked_ioctl = audpp_ioctl,
41600 diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41601 index f0f50e3..f6b9dbc 100644
41602 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41603 +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41604 @@ -816,7 +816,7 @@ err:
41605 return rc;
41606 }
41607
41608 -static struct file_operations audio_qcelp_fops = {
41609 +static const struct file_operations audio_qcelp_fops = {
41610 .owner = THIS_MODULE,
41611 .open = audqcelp_open,
41612 .release = audqcelp_release,
41613 diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
41614 index 037d7ff..5469ec3 100644
41615 --- a/drivers/staging/dream/qdsp5/snd.c
41616 +++ b/drivers/staging/dream/qdsp5/snd.c
41617 @@ -242,7 +242,7 @@ err:
41618 return rc;
41619 }
41620
41621 -static struct file_operations snd_fops = {
41622 +static const struct file_operations snd_fops = {
41623 .owner = THIS_MODULE,
41624 .open = snd_open,
41625 .release = snd_release,
41626 diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
41627 index d4e7d88..0ea632a 100644
41628 --- a/drivers/staging/dream/smd/smd_qmi.c
41629 +++ b/drivers/staging/dream/smd/smd_qmi.c
41630 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
41631 return 0;
41632 }
41633
41634 -static struct file_operations qmi_fops = {
41635 +static const struct file_operations qmi_fops = {
41636 .owner = THIS_MODULE,
41637 .read = qmi_read,
41638 .write = qmi_write,
41639 diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41640 index cd3910b..ff053d3 100644
41641 --- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
41642 +++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41643 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
41644 return rc;
41645 }
41646
41647 -static struct file_operations rpcrouter_server_fops = {
41648 +static const struct file_operations rpcrouter_server_fops = {
41649 .owner = THIS_MODULE,
41650 .open = rpcrouter_open,
41651 .release = rpcrouter_release,
41652 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
41653 .unlocked_ioctl = rpcrouter_ioctl,
41654 };
41655
41656 -static struct file_operations rpcrouter_router_fops = {
41657 +static const struct file_operations rpcrouter_router_fops = {
41658 .owner = THIS_MODULE,
41659 .open = rpcrouter_open,
41660 .release = rpcrouter_release,
41661 diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
41662 index c24e4e0..07665be 100644
41663 --- a/drivers/staging/dst/dcore.c
41664 +++ b/drivers/staging/dst/dcore.c
41665 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
41666 return 0;
41667 }
41668
41669 -static struct block_device_operations dst_blk_ops = {
41670 +static const struct block_device_operations dst_blk_ops = {
41671 .open = dst_bdev_open,
41672 .release = dst_bdev_release,
41673 .owner = THIS_MODULE,
41674 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
41675 n->size = ctl->size;
41676
41677 atomic_set(&n->refcnt, 1);
41678 - atomic_long_set(&n->gen, 0);
41679 + atomic_long_set_unchecked(&n->gen, 0);
41680 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
41681
41682 err = dst_node_sysfs_init(n);
41683 diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
41684 index 557d372..8d84422 100644
41685 --- a/drivers/staging/dst/trans.c
41686 +++ b/drivers/staging/dst/trans.c
41687 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
41688 t->error = 0;
41689 t->retries = 0;
41690 atomic_set(&t->refcnt, 1);
41691 - t->gen = atomic_long_inc_return(&n->gen);
41692 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
41693
41694 t->enc = bio_data_dir(bio);
41695 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
41696 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
41697 index 94f7752..d051514 100644
41698 --- a/drivers/staging/et131x/et1310_tx.c
41699 +++ b/drivers/staging/et131x/et1310_tx.c
41700 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
41701 struct net_device_stats *stats = &etdev->net_stats;
41702
41703 if (pMpTcb->Flags & fMP_DEST_BROAD)
41704 - atomic_inc(&etdev->Stats.brdcstxmt);
41705 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
41706 else if (pMpTcb->Flags & fMP_DEST_MULTI)
41707 - atomic_inc(&etdev->Stats.multixmt);
41708 + atomic_inc_unchecked(&etdev->Stats.multixmt);
41709 else
41710 - atomic_inc(&etdev->Stats.unixmt);
41711 + atomic_inc_unchecked(&etdev->Stats.unixmt);
41712
41713 if (pMpTcb->Packet) {
41714 stats->tx_bytes += pMpTcb->Packet->len;
41715 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
41716 index 1dfe06f..f469b4d 100644
41717 --- a/drivers/staging/et131x/et131x_adapter.h
41718 +++ b/drivers/staging/et131x/et131x_adapter.h
41719 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
41720 * operations
41721 */
41722 u32 unircv; /* # multicast packets received */
41723 - atomic_t unixmt; /* # multicast packets for Tx */
41724 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
41725 u32 multircv; /* # multicast packets received */
41726 - atomic_t multixmt; /* # multicast packets for Tx */
41727 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
41728 u32 brdcstrcv; /* # broadcast packets received */
41729 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
41730 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
41731 u32 norcvbuf; /* # Rx packets discarded */
41732 u32 noxmtbuf; /* # Tx packets discarded */
41733
41734 diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
41735 index 4bd353a..e28f455 100644
41736 --- a/drivers/staging/go7007/go7007-v4l2.c
41737 +++ b/drivers/staging/go7007/go7007-v4l2.c
41738 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41739 return 0;
41740 }
41741
41742 -static struct vm_operations_struct go7007_vm_ops = {
41743 +static const struct vm_operations_struct go7007_vm_ops = {
41744 .open = go7007_vm_open,
41745 .close = go7007_vm_close,
41746 .fault = go7007_vm_fault,
41747 diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
41748 index 366dc95..b974d87 100644
41749 --- a/drivers/staging/hv/Channel.c
41750 +++ b/drivers/staging/hv/Channel.c
41751 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
41752
41753 DPRINT_ENTER(VMBUS);
41754
41755 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
41756 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
41757 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
41758 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
41759
41760 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
41761 ASSERT(msgInfo != NULL);
41762 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
41763 index b12237f..01ae28a 100644
41764 --- a/drivers/staging/hv/Hv.c
41765 +++ b/drivers/staging/hv/Hv.c
41766 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
41767 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
41768 u32 outputAddressHi = outputAddress >> 32;
41769 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
41770 - volatile void *hypercallPage = gHvContext.HypercallPage;
41771 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
41772
41773 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
41774 Control, Input, Output);
41775 diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
41776 index d089bb1..2ebc158 100644
41777 --- a/drivers/staging/hv/VmbusApi.h
41778 +++ b/drivers/staging/hv/VmbusApi.h
41779 @@ -109,7 +109,7 @@ struct vmbus_channel_interface {
41780 u32 *GpadlHandle);
41781 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
41782 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
41783 -};
41784 +} __no_const;
41785
41786 /* Base driver object */
41787 struct hv_driver {
41788 diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
41789 index 5a37cce..6ecc88c 100644
41790 --- a/drivers/staging/hv/VmbusPrivate.h
41791 +++ b/drivers/staging/hv/VmbusPrivate.h
41792 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
41793 struct VMBUS_CONNECTION {
41794 enum VMBUS_CONNECT_STATE ConnectState;
41795
41796 - atomic_t NextGpadlHandle;
41797 + atomic_unchecked_t NextGpadlHandle;
41798
41799 /*
41800 * Represents channel interrupts. Each bit position represents a
41801 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
41802 index 871a202..ca50ddf 100644
41803 --- a/drivers/staging/hv/blkvsc_drv.c
41804 +++ b/drivers/staging/hv/blkvsc_drv.c
41805 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
41806 /* The one and only one */
41807 static struct blkvsc_driver_context g_blkvsc_drv;
41808
41809 -static struct block_device_operations block_ops = {
41810 +static const struct block_device_operations block_ops = {
41811 .owner = THIS_MODULE,
41812 .open = blkvsc_open,
41813 .release = blkvsc_release,
41814 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
41815 index 6acc49a..fbc8d46 100644
41816 --- a/drivers/staging/hv/vmbus_drv.c
41817 +++ b/drivers/staging/hv/vmbus_drv.c
41818 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41819 to_device_context(root_device_obj);
41820 struct device_context *child_device_ctx =
41821 to_device_context(child_device_obj);
41822 - static atomic_t device_num = ATOMIC_INIT(0);
41823 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41824
41825 DPRINT_ENTER(VMBUS_DRV);
41826
41827 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41828
41829 /* Set the device name. Otherwise, device_register() will fail. */
41830 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
41831 - atomic_inc_return(&device_num));
41832 + atomic_inc_return_unchecked(&device_num));
41833
41834 /* The new device belongs to this bus */
41835 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
41836 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
41837 index d926189..17b19fd 100644
41838 --- a/drivers/staging/iio/ring_generic.h
41839 +++ b/drivers/staging/iio/ring_generic.h
41840 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
41841
41842 int (*is_enabled)(struct iio_ring_buffer *ring);
41843 int (*enable)(struct iio_ring_buffer *ring);
41844 -};
41845 +} __no_const;
41846
41847 /**
41848 * struct iio_ring_buffer - general ring buffer structure
41849 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
41850 index 1b237b7..88c624e 100644
41851 --- a/drivers/staging/octeon/ethernet-rx.c
41852 +++ b/drivers/staging/octeon/ethernet-rx.c
41853 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
41854 /* Increment RX stats for virtual ports */
41855 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
41856 #ifdef CONFIG_64BIT
41857 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
41858 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
41859 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
41860 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
41861 #else
41862 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
41863 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
41864 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
41865 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
41866 #endif
41867 }
41868 netif_receive_skb(skb);
41869 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
41870 dev->name);
41871 */
41872 #ifdef CONFIG_64BIT
41873 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
41874 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
41875 #else
41876 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
41877 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
41878 #endif
41879 dev_kfree_skb_irq(skb);
41880 }
41881 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
41882 index 492c502..d9909f1 100644
41883 --- a/drivers/staging/octeon/ethernet.c
41884 +++ b/drivers/staging/octeon/ethernet.c
41885 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
41886 * since the RX tasklet also increments it.
41887 */
41888 #ifdef CONFIG_64BIT
41889 - atomic64_add(rx_status.dropped_packets,
41890 - (atomic64_t *)&priv->stats.rx_dropped);
41891 + atomic64_add_unchecked(rx_status.dropped_packets,
41892 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
41893 #else
41894 - atomic_add(rx_status.dropped_packets,
41895 - (atomic_t *)&priv->stats.rx_dropped);
41896 + atomic_add_unchecked(rx_status.dropped_packets,
41897 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
41898 #endif
41899 }
41900
41901 diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
41902 index a35bd5d..28fff45 100644
41903 --- a/drivers/staging/otus/80211core/pub_zfi.h
41904 +++ b/drivers/staging/otus/80211core/pub_zfi.h
41905 @@ -531,7 +531,7 @@ struct zsCbFuncTbl
41906 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
41907
41908 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
41909 -};
41910 +} __no_const;
41911
41912 extern void zfZeroMemory(u8_t* va, u16_t length);
41913 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
41914 diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
41915 index c39a25f..696f5aa 100644
41916 --- a/drivers/staging/panel/panel.c
41917 +++ b/drivers/staging/panel/panel.c
41918 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
41919 return 0;
41920 }
41921
41922 -static struct file_operations lcd_fops = {
41923 +static const struct file_operations lcd_fops = {
41924 .write = lcd_write,
41925 .open = lcd_open,
41926 .release = lcd_release,
41927 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
41928 return 0;
41929 }
41930
41931 -static struct file_operations keypad_fops = {
41932 +static const struct file_operations keypad_fops = {
41933 .read = keypad_read, /* read */
41934 .open = keypad_open, /* open */
41935 .release = keypad_release, /* close */
41936 diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
41937 index 270ebcb..37e46af 100644
41938 --- a/drivers/staging/phison/phison.c
41939 +++ b/drivers/staging/phison/phison.c
41940 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
41941 ATA_BMDMA_SHT(DRV_NAME),
41942 };
41943
41944 -static struct ata_port_operations phison_ops = {
41945 +static const struct ata_port_operations phison_ops = {
41946 .inherits = &ata_bmdma_port_ops,
41947 .prereset = phison_pre_reset,
41948 };
41949 diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
41950 index 2eb8e3d..57616a7 100644
41951 --- a/drivers/staging/poch/poch.c
41952 +++ b/drivers/staging/poch/poch.c
41953 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
41954 return 0;
41955 }
41956
41957 -static struct file_operations poch_fops = {
41958 +static const struct file_operations poch_fops = {
41959 .owner = THIS_MODULE,
41960 .open = poch_open,
41961 .release = poch_release,
41962 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
41963 index c94de31..19402bc 100644
41964 --- a/drivers/staging/pohmelfs/inode.c
41965 +++ b/drivers/staging/pohmelfs/inode.c
41966 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
41967 mutex_init(&psb->mcache_lock);
41968 psb->mcache_root = RB_ROOT;
41969 psb->mcache_timeout = msecs_to_jiffies(5000);
41970 - atomic_long_set(&psb->mcache_gen, 0);
41971 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
41972
41973 psb->trans_max_pages = 100;
41974
41975 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
41976 INIT_LIST_HEAD(&psb->crypto_ready_list);
41977 INIT_LIST_HEAD(&psb->crypto_active_list);
41978
41979 - atomic_set(&psb->trans_gen, 1);
41980 + atomic_set_unchecked(&psb->trans_gen, 1);
41981 atomic_long_set(&psb->total_inodes, 0);
41982
41983 mutex_init(&psb->state_lock);
41984 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
41985 index e22665c..a2a9390 100644
41986 --- a/drivers/staging/pohmelfs/mcache.c
41987 +++ b/drivers/staging/pohmelfs/mcache.c
41988 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
41989 m->data = data;
41990 m->start = start;
41991 m->size = size;
41992 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
41993 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
41994
41995 mutex_lock(&psb->mcache_lock);
41996 err = pohmelfs_mcache_insert(psb, m);
41997 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
41998 index 623a07d..4035c19 100644
41999 --- a/drivers/staging/pohmelfs/netfs.h
42000 +++ b/drivers/staging/pohmelfs/netfs.h
42001 @@ -570,14 +570,14 @@ struct pohmelfs_config;
42002 struct pohmelfs_sb {
42003 struct rb_root mcache_root;
42004 struct mutex mcache_lock;
42005 - atomic_long_t mcache_gen;
42006 + atomic_long_unchecked_t mcache_gen;
42007 unsigned long mcache_timeout;
42008
42009 unsigned int idx;
42010
42011 unsigned int trans_retries;
42012
42013 - atomic_t trans_gen;
42014 + atomic_unchecked_t trans_gen;
42015
42016 unsigned int crypto_attached_size;
42017 unsigned int crypto_align_size;
42018 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42019 index 36a2535..0591bf4 100644
42020 --- a/drivers/staging/pohmelfs/trans.c
42021 +++ b/drivers/staging/pohmelfs/trans.c
42022 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42023 int err;
42024 struct netfs_cmd *cmd = t->iovec.iov_base;
42025
42026 - t->gen = atomic_inc_return(&psb->trans_gen);
42027 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42028
42029 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42030 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42031 diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42032 index f890a16..509ece8 100644
42033 --- a/drivers/staging/sep/sep_driver.c
42034 +++ b/drivers/staging/sep/sep_driver.c
42035 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42036 static dev_t sep_devno;
42037
42038 /* the files operations structure of the driver */
42039 -static struct file_operations sep_file_operations = {
42040 +static const struct file_operations sep_file_operations = {
42041 .owner = THIS_MODULE,
42042 .ioctl = sep_ioctl,
42043 .poll = sep_poll,
42044 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42045 index 5e16bc3..7655b10 100644
42046 --- a/drivers/staging/usbip/usbip_common.h
42047 +++ b/drivers/staging/usbip/usbip_common.h
42048 @@ -374,7 +374,7 @@ struct usbip_device {
42049 void (*shutdown)(struct usbip_device *);
42050 void (*reset)(struct usbip_device *);
42051 void (*unusable)(struct usbip_device *);
42052 - } eh_ops;
42053 + } __no_const eh_ops;
42054 };
42055
42056
42057 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42058 index 57f7946..d9df23d 100644
42059 --- a/drivers/staging/usbip/vhci.h
42060 +++ b/drivers/staging/usbip/vhci.h
42061 @@ -92,7 +92,7 @@ struct vhci_hcd {
42062 unsigned resuming:1;
42063 unsigned long re_timeout;
42064
42065 - atomic_t seqnum;
42066 + atomic_unchecked_t seqnum;
42067
42068 /*
42069 * NOTE:
42070 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42071 index 20cd7db..c2693ff 100644
42072 --- a/drivers/staging/usbip/vhci_hcd.c
42073 +++ b/drivers/staging/usbip/vhci_hcd.c
42074 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42075 return;
42076 }
42077
42078 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42079 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42080 if (priv->seqnum == 0xffff)
42081 usbip_uinfo("seqnum max\n");
42082
42083 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42084 return -ENOMEM;
42085 }
42086
42087 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42088 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42089 if (unlink->seqnum == 0xffff)
42090 usbip_uinfo("seqnum max\n");
42091
42092 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42093 vdev->rhport = rhport;
42094 }
42095
42096 - atomic_set(&vhci->seqnum, 0);
42097 + atomic_set_unchecked(&vhci->seqnum, 0);
42098 spin_lock_init(&vhci->lock);
42099
42100
42101 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42102 index 7fd76fe..673695a 100644
42103 --- a/drivers/staging/usbip/vhci_rx.c
42104 +++ b/drivers/staging/usbip/vhci_rx.c
42105 @@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42106 usbip_uerr("cannot find a urb of seqnum %u\n",
42107 pdu->base.seqnum);
42108 usbip_uinfo("max seqnum %d\n",
42109 - atomic_read(&the_controller->seqnum));
42110 + atomic_read_unchecked(&the_controller->seqnum));
42111 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42112 return;
42113 }
42114 diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42115 index 7891288..8e31300 100644
42116 --- a/drivers/staging/vme/devices/vme_user.c
42117 +++ b/drivers/staging/vme/devices/vme_user.c
42118 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42119 static int __init vme_user_probe(struct device *, int, int);
42120 static int __exit vme_user_remove(struct device *, int, int);
42121
42122 -static struct file_operations vme_user_fops = {
42123 +static const struct file_operations vme_user_fops = {
42124 .open = vme_user_open,
42125 .release = vme_user_release,
42126 .read = vme_user_read,
42127 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42128 index 58abf44..00c1fc8 100644
42129 --- a/drivers/staging/vt6655/hostap.c
42130 +++ b/drivers/staging/vt6655/hostap.c
42131 @@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42132 PSDevice apdev_priv;
42133 struct net_device *dev = pDevice->dev;
42134 int ret;
42135 - const struct net_device_ops apdev_netdev_ops = {
42136 + net_device_ops_no_const apdev_netdev_ops = {
42137 .ndo_start_xmit = pDevice->tx_80211,
42138 };
42139
42140 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42141 index 0c8267a..db1f363 100644
42142 --- a/drivers/staging/vt6656/hostap.c
42143 +++ b/drivers/staging/vt6656/hostap.c
42144 @@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42145 PSDevice apdev_priv;
42146 struct net_device *dev = pDevice->dev;
42147 int ret;
42148 - const struct net_device_ops apdev_netdev_ops = {
42149 + net_device_ops_no_const apdev_netdev_ops = {
42150 .ndo_start_xmit = pDevice->tx_80211,
42151 };
42152
42153 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42154 index 925678b..da7f5ed 100644
42155 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
42156 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42157 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42158
42159 struct usbctlx_completor {
42160 int (*complete) (struct usbctlx_completor *);
42161 -};
42162 +} __no_const;
42163 typedef struct usbctlx_completor usbctlx_completor_t;
42164
42165 static int
42166 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42167 index 40de151..924f268 100644
42168 --- a/drivers/telephony/ixj.c
42169 +++ b/drivers/telephony/ixj.c
42170 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42171 bool mContinue;
42172 char *pIn, *pOut;
42173
42174 + pax_track_stack();
42175 +
42176 if (!SCI_Prepare(j))
42177 return 0;
42178
42179 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42180 index e941367..b631f5a 100644
42181 --- a/drivers/uio/uio.c
42182 +++ b/drivers/uio/uio.c
42183 @@ -23,6 +23,7 @@
42184 #include <linux/string.h>
42185 #include <linux/kobject.h>
42186 #include <linux/uio_driver.h>
42187 +#include <asm/local.h>
42188
42189 #define UIO_MAX_DEVICES 255
42190
42191 @@ -30,10 +31,10 @@ struct uio_device {
42192 struct module *owner;
42193 struct device *dev;
42194 int minor;
42195 - atomic_t event;
42196 + atomic_unchecked_t event;
42197 struct fasync_struct *async_queue;
42198 wait_queue_head_t wait;
42199 - int vma_count;
42200 + local_t vma_count;
42201 struct uio_info *info;
42202 struct kobject *map_dir;
42203 struct kobject *portio_dir;
42204 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42205 return entry->show(mem, buf);
42206 }
42207
42208 -static struct sysfs_ops map_sysfs_ops = {
42209 +static const struct sysfs_ops map_sysfs_ops = {
42210 .show = map_type_show,
42211 };
42212
42213 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42214 return entry->show(port, buf);
42215 }
42216
42217 -static struct sysfs_ops portio_sysfs_ops = {
42218 +static const struct sysfs_ops portio_sysfs_ops = {
42219 .show = portio_type_show,
42220 };
42221
42222 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42223 struct uio_device *idev = dev_get_drvdata(dev);
42224 if (idev)
42225 return sprintf(buf, "%u\n",
42226 - (unsigned int)atomic_read(&idev->event));
42227 + (unsigned int)atomic_read_unchecked(&idev->event));
42228 else
42229 return -ENODEV;
42230 }
42231 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42232 {
42233 struct uio_device *idev = info->uio_dev;
42234
42235 - atomic_inc(&idev->event);
42236 + atomic_inc_unchecked(&idev->event);
42237 wake_up_interruptible(&idev->wait);
42238 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42239 }
42240 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42241 }
42242
42243 listener->dev = idev;
42244 - listener->event_count = atomic_read(&idev->event);
42245 + listener->event_count = atomic_read_unchecked(&idev->event);
42246 filep->private_data = listener;
42247
42248 if (idev->info->open) {
42249 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42250 return -EIO;
42251
42252 poll_wait(filep, &idev->wait, wait);
42253 - if (listener->event_count != atomic_read(&idev->event))
42254 + if (listener->event_count != atomic_read_unchecked(&idev->event))
42255 return POLLIN | POLLRDNORM;
42256 return 0;
42257 }
42258 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42259 do {
42260 set_current_state(TASK_INTERRUPTIBLE);
42261
42262 - event_count = atomic_read(&idev->event);
42263 + event_count = atomic_read_unchecked(&idev->event);
42264 if (event_count != listener->event_count) {
42265 if (copy_to_user(buf, &event_count, count))
42266 retval = -EFAULT;
42267 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42268 static void uio_vma_open(struct vm_area_struct *vma)
42269 {
42270 struct uio_device *idev = vma->vm_private_data;
42271 - idev->vma_count++;
42272 + local_inc(&idev->vma_count);
42273 }
42274
42275 static void uio_vma_close(struct vm_area_struct *vma)
42276 {
42277 struct uio_device *idev = vma->vm_private_data;
42278 - idev->vma_count--;
42279 + local_dec(&idev->vma_count);
42280 }
42281
42282 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42283 @@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42284 idev->owner = owner;
42285 idev->info = info;
42286 init_waitqueue_head(&idev->wait);
42287 - atomic_set(&idev->event, 0);
42288 + atomic_set_unchecked(&idev->event, 0);
42289
42290 ret = uio_get_minor(idev);
42291 if (ret)
42292 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42293 index fbea856..06efea6 100644
42294 --- a/drivers/usb/atm/usbatm.c
42295 +++ b/drivers/usb/atm/usbatm.c
42296 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42297 if (printk_ratelimit())
42298 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42299 __func__, vpi, vci);
42300 - atomic_inc(&vcc->stats->rx_err);
42301 + atomic_inc_unchecked(&vcc->stats->rx_err);
42302 return;
42303 }
42304
42305 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42306 if (length > ATM_MAX_AAL5_PDU) {
42307 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42308 __func__, length, vcc);
42309 - atomic_inc(&vcc->stats->rx_err);
42310 + atomic_inc_unchecked(&vcc->stats->rx_err);
42311 goto out;
42312 }
42313
42314 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42315 if (sarb->len < pdu_length) {
42316 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42317 __func__, pdu_length, sarb->len, vcc);
42318 - atomic_inc(&vcc->stats->rx_err);
42319 + atomic_inc_unchecked(&vcc->stats->rx_err);
42320 goto out;
42321 }
42322
42323 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42324 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42325 __func__, vcc);
42326 - atomic_inc(&vcc->stats->rx_err);
42327 + atomic_inc_unchecked(&vcc->stats->rx_err);
42328 goto out;
42329 }
42330
42331 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42332 if (printk_ratelimit())
42333 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42334 __func__, length);
42335 - atomic_inc(&vcc->stats->rx_drop);
42336 + atomic_inc_unchecked(&vcc->stats->rx_drop);
42337 goto out;
42338 }
42339
42340 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42341
42342 vcc->push(vcc, skb);
42343
42344 - atomic_inc(&vcc->stats->rx);
42345 + atomic_inc_unchecked(&vcc->stats->rx);
42346 out:
42347 skb_trim(sarb, 0);
42348 }
42349 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42350 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42351
42352 usbatm_pop(vcc, skb);
42353 - atomic_inc(&vcc->stats->tx);
42354 + atomic_inc_unchecked(&vcc->stats->tx);
42355
42356 skb = skb_dequeue(&instance->sndqueue);
42357 }
42358 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42359 if (!left--)
42360 return sprintf(page,
42361 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42362 - atomic_read(&atm_dev->stats.aal5.tx),
42363 - atomic_read(&atm_dev->stats.aal5.tx_err),
42364 - atomic_read(&atm_dev->stats.aal5.rx),
42365 - atomic_read(&atm_dev->stats.aal5.rx_err),
42366 - atomic_read(&atm_dev->stats.aal5.rx_drop));
42367 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42368 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42369 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42370 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42371 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42372
42373 if (!left--) {
42374 if (instance->disconnected)
42375 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
42376 index 3e564bf..949b448 100644
42377 --- a/drivers/usb/class/cdc-wdm.c
42378 +++ b/drivers/usb/class/cdc-wdm.c
42379 @@ -314,7 +314,7 @@ static ssize_t wdm_write
42380 if (r < 0)
42381 goto outnp;
42382
42383 - if (!file->f_flags && O_NONBLOCK)
42384 + if (!(file->f_flags & O_NONBLOCK))
42385 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
42386 &desc->flags));
42387 else
42388 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42389 index 24e6205..fe5a5d4 100644
42390 --- a/drivers/usb/core/hcd.c
42391 +++ b/drivers/usb/core/hcd.c
42392 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42393
42394 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42395
42396 -struct usb_mon_operations *mon_ops;
42397 +const struct usb_mon_operations *mon_ops;
42398
42399 /*
42400 * The registration is unlocked.
42401 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42402 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42403 */
42404
42405 -int usb_mon_register (struct usb_mon_operations *ops)
42406 +int usb_mon_register (const struct usb_mon_operations *ops)
42407 {
42408
42409 if (mon_ops)
42410 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42411 index bcbe104..9cfd1c6 100644
42412 --- a/drivers/usb/core/hcd.h
42413 +++ b/drivers/usb/core/hcd.h
42414 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42415 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42416
42417 struct usb_mon_operations {
42418 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42419 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42420 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42421 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42422 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42423 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42424 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42425 };
42426
42427 -extern struct usb_mon_operations *mon_ops;
42428 +extern const struct usb_mon_operations *mon_ops;
42429
42430 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42431 {
42432 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42433 (*mon_ops->urb_complete)(bus, urb, status);
42434 }
42435
42436 -int usb_mon_register(struct usb_mon_operations *ops);
42437 +int usb_mon_register(const struct usb_mon_operations *ops);
42438 void usb_mon_deregister(void);
42439
42440 #else
42441 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42442 index 409cc94..a673bad 100644
42443 --- a/drivers/usb/core/message.c
42444 +++ b/drivers/usb/core/message.c
42445 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42446 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42447 if (buf) {
42448 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42449 - if (len > 0) {
42450 - smallbuf = kmalloc(++len, GFP_NOIO);
42451 + if (len++ > 0) {
42452 + smallbuf = kmalloc(len, GFP_NOIO);
42453 if (!smallbuf)
42454 return buf;
42455 memcpy(smallbuf, buf, len);
42456 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42457 index 62ff5e7..530b74e 100644
42458 --- a/drivers/usb/misc/appledisplay.c
42459 +++ b/drivers/usb/misc/appledisplay.c
42460 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42461 return pdata->msgdata[1];
42462 }
42463
42464 -static struct backlight_ops appledisplay_bl_data = {
42465 +static const struct backlight_ops appledisplay_bl_data = {
42466 .get_brightness = appledisplay_bl_get_brightness,
42467 .update_status = appledisplay_bl_update_status,
42468 };
42469 diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42470 index e0c2db3..bd8cb66 100644
42471 --- a/drivers/usb/mon/mon_main.c
42472 +++ b/drivers/usb/mon/mon_main.c
42473 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42474 /*
42475 * Ops
42476 */
42477 -static struct usb_mon_operations mon_ops_0 = {
42478 +static const struct usb_mon_operations mon_ops_0 = {
42479 .urb_submit = mon_submit,
42480 .urb_submit_error = mon_submit_error,
42481 .urb_complete = mon_complete,
42482 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42483 index d6bea3e..60b250e 100644
42484 --- a/drivers/usb/wusbcore/wa-hc.h
42485 +++ b/drivers/usb/wusbcore/wa-hc.h
42486 @@ -192,7 +192,7 @@ struct wahc {
42487 struct list_head xfer_delayed_list;
42488 spinlock_t xfer_list_lock;
42489 struct work_struct xfer_work;
42490 - atomic_t xfer_id_count;
42491 + atomic_unchecked_t xfer_id_count;
42492 };
42493
42494
42495 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42496 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42497 spin_lock_init(&wa->xfer_list_lock);
42498 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42499 - atomic_set(&wa->xfer_id_count, 1);
42500 + atomic_set_unchecked(&wa->xfer_id_count, 1);
42501 }
42502
42503 /**
42504 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42505 index 613a5fc..3174865 100644
42506 --- a/drivers/usb/wusbcore/wa-xfer.c
42507 +++ b/drivers/usb/wusbcore/wa-xfer.c
42508 @@ -293,7 +293,7 @@ out:
42509 */
42510 static void wa_xfer_id_init(struct wa_xfer *xfer)
42511 {
42512 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42513 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42514 }
42515
42516 /*
42517 diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42518 index aa42fce..f8a828c 100644
42519 --- a/drivers/uwb/wlp/messages.c
42520 +++ b/drivers/uwb/wlp/messages.c
42521 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42522 size_t len = skb->len;
42523 size_t used;
42524 ssize_t result;
42525 - struct wlp_nonce enonce, rnonce;
42526 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42527 enum wlp_assc_error assc_err;
42528 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42529 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42530 diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42531 index 0370399..6627c94 100644
42532 --- a/drivers/uwb/wlp/sysfs.c
42533 +++ b/drivers/uwb/wlp/sysfs.c
42534 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42535 return ret;
42536 }
42537
42538 -static
42539 -struct sysfs_ops wss_sysfs_ops = {
42540 +static const struct sysfs_ops wss_sysfs_ops = {
42541 .show = wlp_wss_attr_show,
42542 .store = wlp_wss_attr_store,
42543 };
42544 diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42545 index d5e8010..5687b56 100644
42546 --- a/drivers/video/atmel_lcdfb.c
42547 +++ b/drivers/video/atmel_lcdfb.c
42548 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42549 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42550 }
42551
42552 -static struct backlight_ops atmel_lcdc_bl_ops = {
42553 +static const struct backlight_ops atmel_lcdc_bl_ops = {
42554 .update_status = atmel_bl_update_status,
42555 .get_brightness = atmel_bl_get_brightness,
42556 };
42557 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42558 index e4e4d43..66bcbcc 100644
42559 --- a/drivers/video/aty/aty128fb.c
42560 +++ b/drivers/video/aty/aty128fb.c
42561 @@ -149,7 +149,7 @@ enum {
42562 };
42563
42564 /* Must match above enum */
42565 -static const char *r128_family[] __devinitdata = {
42566 +static const char *r128_family[] __devinitconst = {
42567 "AGP",
42568 "PCI",
42569 "PRO AGP",
42570 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42571 return bd->props.brightness;
42572 }
42573
42574 -static struct backlight_ops aty128_bl_data = {
42575 +static const struct backlight_ops aty128_bl_data = {
42576 .get_brightness = aty128_bl_get_brightness,
42577 .update_status = aty128_bl_update_status,
42578 };
42579 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42580 index 913b4a4..9295a38 100644
42581 --- a/drivers/video/aty/atyfb_base.c
42582 +++ b/drivers/video/aty/atyfb_base.c
42583 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42584 return bd->props.brightness;
42585 }
42586
42587 -static struct backlight_ops aty_bl_data = {
42588 +static const struct backlight_ops aty_bl_data = {
42589 .get_brightness = aty_bl_get_brightness,
42590 .update_status = aty_bl_update_status,
42591 };
42592 diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42593 index 1a056ad..221bd6a 100644
42594 --- a/drivers/video/aty/radeon_backlight.c
42595 +++ b/drivers/video/aty/radeon_backlight.c
42596 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42597 return bd->props.brightness;
42598 }
42599
42600 -static struct backlight_ops radeon_bl_data = {
42601 +static const struct backlight_ops radeon_bl_data = {
42602 .get_brightness = radeon_bl_get_brightness,
42603 .update_status = radeon_bl_update_status,
42604 };
42605 diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42606 index ad05da5..3cb2cb9 100644
42607 --- a/drivers/video/backlight/adp5520_bl.c
42608 +++ b/drivers/video/backlight/adp5520_bl.c
42609 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42610 return error ? data->current_brightness : reg_val;
42611 }
42612
42613 -static struct backlight_ops adp5520_bl_ops = {
42614 +static const struct backlight_ops adp5520_bl_ops = {
42615 .update_status = adp5520_bl_update_status,
42616 .get_brightness = adp5520_bl_get_brightness,
42617 };
42618 diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42619 index 2c3bdfc..d769b0b 100644
42620 --- a/drivers/video/backlight/adx_bl.c
42621 +++ b/drivers/video/backlight/adx_bl.c
42622 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42623 return 1;
42624 }
42625
42626 -static struct backlight_ops adx_backlight_ops = {
42627 +static const struct backlight_ops adx_backlight_ops = {
42628 .options = 0,
42629 .update_status = adx_backlight_update_status,
42630 .get_brightness = adx_backlight_get_brightness,
42631 diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
42632 index 505c082..6b6b3cc 100644
42633 --- a/drivers/video/backlight/atmel-pwm-bl.c
42634 +++ b/drivers/video/backlight/atmel-pwm-bl.c
42635 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
42636 return pwm_channel_enable(&pwmbl->pwmc);
42637 }
42638
42639 -static struct backlight_ops atmel_pwm_bl_ops = {
42640 +static const struct backlight_ops atmel_pwm_bl_ops = {
42641 .get_brightness = atmel_pwm_bl_get_intensity,
42642 .update_status = atmel_pwm_bl_set_intensity,
42643 };
42644 diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
42645 index 5e20e6e..89025e6 100644
42646 --- a/drivers/video/backlight/backlight.c
42647 +++ b/drivers/video/backlight/backlight.c
42648 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
42649 * ERR_PTR() or a pointer to the newly allocated device.
42650 */
42651 struct backlight_device *backlight_device_register(const char *name,
42652 - struct device *parent, void *devdata, struct backlight_ops *ops)
42653 + struct device *parent, void *devdata, const struct backlight_ops *ops)
42654 {
42655 struct backlight_device *new_bd;
42656 int rc;
42657 diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
42658 index 9677494..b4bcf80 100644
42659 --- a/drivers/video/backlight/corgi_lcd.c
42660 +++ b/drivers/video/backlight/corgi_lcd.c
42661 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
42662 }
42663 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
42664
42665 -static struct backlight_ops corgi_bl_ops = {
42666 +static const struct backlight_ops corgi_bl_ops = {
42667 .get_brightness = corgi_bl_get_intensity,
42668 .update_status = corgi_bl_update_status,
42669 };
42670 diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
42671 index b9fe62b..2914bf1 100644
42672 --- a/drivers/video/backlight/cr_bllcd.c
42673 +++ b/drivers/video/backlight/cr_bllcd.c
42674 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
42675 return intensity;
42676 }
42677
42678 -static struct backlight_ops cr_backlight_ops = {
42679 +static const struct backlight_ops cr_backlight_ops = {
42680 .get_brightness = cr_backlight_get_intensity,
42681 .update_status = cr_backlight_set_intensity,
42682 };
42683 diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
42684 index 701a108..feacfd5 100644
42685 --- a/drivers/video/backlight/da903x_bl.c
42686 +++ b/drivers/video/backlight/da903x_bl.c
42687 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
42688 return data->current_brightness;
42689 }
42690
42691 -static struct backlight_ops da903x_backlight_ops = {
42692 +static const struct backlight_ops da903x_backlight_ops = {
42693 .update_status = da903x_backlight_update_status,
42694 .get_brightness = da903x_backlight_get_brightness,
42695 };
42696 diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
42697 index 6d27f62..e6d348e 100644
42698 --- a/drivers/video/backlight/generic_bl.c
42699 +++ b/drivers/video/backlight/generic_bl.c
42700 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
42701 }
42702 EXPORT_SYMBOL(corgibl_limit_intensity);
42703
42704 -static struct backlight_ops genericbl_ops = {
42705 +static const struct backlight_ops genericbl_ops = {
42706 .options = BL_CORE_SUSPENDRESUME,
42707 .get_brightness = genericbl_get_intensity,
42708 .update_status = genericbl_send_intensity,
42709 diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
42710 index 7fb4eef..f7cc528 100644
42711 --- a/drivers/video/backlight/hp680_bl.c
42712 +++ b/drivers/video/backlight/hp680_bl.c
42713 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
42714 return current_intensity;
42715 }
42716
42717 -static struct backlight_ops hp680bl_ops = {
42718 +static const struct backlight_ops hp680bl_ops = {
42719 .get_brightness = hp680bl_get_intensity,
42720 .update_status = hp680bl_set_intensity,
42721 };
42722 diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
42723 index 7aed256..db9071f 100644
42724 --- a/drivers/video/backlight/jornada720_bl.c
42725 +++ b/drivers/video/backlight/jornada720_bl.c
42726 @@ -93,7 +93,7 @@ out:
42727 return ret;
42728 }
42729
42730 -static struct backlight_ops jornada_bl_ops = {
42731 +static const struct backlight_ops jornada_bl_ops = {
42732 .get_brightness = jornada_bl_get_brightness,
42733 .update_status = jornada_bl_update_status,
42734 .options = BL_CORE_SUSPENDRESUME,
42735 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
42736 index a38fda1..939e7b8 100644
42737 --- a/drivers/video/backlight/kb3886_bl.c
42738 +++ b/drivers/video/backlight/kb3886_bl.c
42739 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
42740 return kb3886bl_intensity;
42741 }
42742
42743 -static struct backlight_ops kb3886bl_ops = {
42744 +static const struct backlight_ops kb3886bl_ops = {
42745 .get_brightness = kb3886bl_get_intensity,
42746 .update_status = kb3886bl_send_intensity,
42747 };
42748 diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
42749 index 6b488b8..00a9591 100644
42750 --- a/drivers/video/backlight/locomolcd.c
42751 +++ b/drivers/video/backlight/locomolcd.c
42752 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
42753 return current_intensity;
42754 }
42755
42756 -static struct backlight_ops locomobl_data = {
42757 +static const struct backlight_ops locomobl_data = {
42758 .get_brightness = locomolcd_get_intensity,
42759 .update_status = locomolcd_set_intensity,
42760 };
42761 diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
42762 index 99bdfa8..3dac448 100644
42763 --- a/drivers/video/backlight/mbp_nvidia_bl.c
42764 +++ b/drivers/video/backlight/mbp_nvidia_bl.c
42765 @@ -33,7 +33,7 @@ struct dmi_match_data {
42766 unsigned long iostart;
42767 unsigned long iolen;
42768 /* Backlight operations structure. */
42769 - struct backlight_ops backlight_ops;
42770 + const struct backlight_ops backlight_ops;
42771 };
42772
42773 /* Module parameters. */
42774 diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
42775 index cbad67e..3cf900e 100644
42776 --- a/drivers/video/backlight/omap1_bl.c
42777 +++ b/drivers/video/backlight/omap1_bl.c
42778 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
42779 return bl->current_intensity;
42780 }
42781
42782 -static struct backlight_ops omapbl_ops = {
42783 +static const struct backlight_ops omapbl_ops = {
42784 .get_brightness = omapbl_get_intensity,
42785 .update_status = omapbl_update_status,
42786 };
42787 diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
42788 index 9edaf24..075786e 100644
42789 --- a/drivers/video/backlight/progear_bl.c
42790 +++ b/drivers/video/backlight/progear_bl.c
42791 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
42792 return intensity - HW_LEVEL_MIN;
42793 }
42794
42795 -static struct backlight_ops progearbl_ops = {
42796 +static const struct backlight_ops progearbl_ops = {
42797 .get_brightness = progearbl_get_intensity,
42798 .update_status = progearbl_set_intensity,
42799 };
42800 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
42801 index 8871662..df9e0b3 100644
42802 --- a/drivers/video/backlight/pwm_bl.c
42803 +++ b/drivers/video/backlight/pwm_bl.c
42804 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
42805 return bl->props.brightness;
42806 }
42807
42808 -static struct backlight_ops pwm_backlight_ops = {
42809 +static const struct backlight_ops pwm_backlight_ops = {
42810 .update_status = pwm_backlight_update_status,
42811 .get_brightness = pwm_backlight_get_brightness,
42812 };
42813 diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
42814 index 43edbad..e14ce4d 100644
42815 --- a/drivers/video/backlight/tosa_bl.c
42816 +++ b/drivers/video/backlight/tosa_bl.c
42817 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
42818 return props->brightness;
42819 }
42820
42821 -static struct backlight_ops bl_ops = {
42822 +static const struct backlight_ops bl_ops = {
42823 .get_brightness = tosa_bl_get_brightness,
42824 .update_status = tosa_bl_update_status,
42825 };
42826 diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
42827 index 467bdb7..e32add3 100644
42828 --- a/drivers/video/backlight/wm831x_bl.c
42829 +++ b/drivers/video/backlight/wm831x_bl.c
42830 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
42831 return data->current_brightness;
42832 }
42833
42834 -static struct backlight_ops wm831x_backlight_ops = {
42835 +static const struct backlight_ops wm831x_backlight_ops = {
42836 .options = BL_CORE_SUSPENDRESUME,
42837 .update_status = wm831x_backlight_update_status,
42838 .get_brightness = wm831x_backlight_get_brightness,
42839 diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
42840 index e49ae5e..db4e6f7 100644
42841 --- a/drivers/video/bf54x-lq043fb.c
42842 +++ b/drivers/video/bf54x-lq043fb.c
42843 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42844 return 0;
42845 }
42846
42847 -static struct backlight_ops bfin_lq043fb_bl_ops = {
42848 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
42849 .get_brightness = bl_get_brightness,
42850 };
42851
42852 diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
42853 index 2c72a7c..d523e52 100644
42854 --- a/drivers/video/bfin-t350mcqb-fb.c
42855 +++ b/drivers/video/bfin-t350mcqb-fb.c
42856 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42857 return 0;
42858 }
42859
42860 -static struct backlight_ops bfin_lq043fb_bl_ops = {
42861 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
42862 .get_brightness = bl_get_brightness,
42863 };
42864
42865 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
42866 index f53b9f1..958bf4e 100644
42867 --- a/drivers/video/fbcmap.c
42868 +++ b/drivers/video/fbcmap.c
42869 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
42870 rc = -ENODEV;
42871 goto out;
42872 }
42873 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
42874 - !info->fbops->fb_setcmap)) {
42875 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
42876 rc = -EINVAL;
42877 goto out1;
42878 }
42879 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
42880 index 99bbd28..ad3829e 100644
42881 --- a/drivers/video/fbmem.c
42882 +++ b/drivers/video/fbmem.c
42883 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
42884 image->dx += image->width + 8;
42885 }
42886 } else if (rotate == FB_ROTATE_UD) {
42887 - for (x = 0; x < num && image->dx >= 0; x++) {
42888 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
42889 info->fbops->fb_imageblit(info, image);
42890 image->dx -= image->width + 8;
42891 }
42892 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
42893 image->dy += image->height + 8;
42894 }
42895 } else if (rotate == FB_ROTATE_CCW) {
42896 - for (x = 0; x < num && image->dy >= 0; x++) {
42897 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
42898 info->fbops->fb_imageblit(info, image);
42899 image->dy -= image->height + 8;
42900 }
42901 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
42902 int flags = info->flags;
42903 int ret = 0;
42904
42905 + pax_track_stack();
42906 +
42907 if (var->activate & FB_ACTIVATE_INV_MODE) {
42908 struct fb_videomode mode1, mode2;
42909
42910 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
42911 void __user *argp = (void __user *)arg;
42912 long ret = 0;
42913
42914 + pax_track_stack();
42915 +
42916 switch (cmd) {
42917 case FBIOGET_VSCREENINFO:
42918 if (!lock_fb_info(info))
42919 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
42920 return -EFAULT;
42921 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
42922 return -EINVAL;
42923 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
42924 + if (con2fb.framebuffer >= FB_MAX)
42925 return -EINVAL;
42926 if (!registered_fb[con2fb.framebuffer])
42927 request_module("fb%d", con2fb.framebuffer);
42928 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
42929 index f20eff8..3e4f622 100644
42930 --- a/drivers/video/geode/gx1fb_core.c
42931 +++ b/drivers/video/geode/gx1fb_core.c
42932 @@ -30,7 +30,7 @@ static int crt_option = 1;
42933 static char panel_option[32] = "";
42934
42935 /* Modes relevant to the GX1 (taken from modedb.c) */
42936 -static const struct fb_videomode __initdata gx1_modedb[] = {
42937 +static const struct fb_videomode __initconst gx1_modedb[] = {
42938 /* 640x480-60 VESA */
42939 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
42940 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
42941 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
42942 index 896e53d..4d87d0b 100644
42943 --- a/drivers/video/gxt4500.c
42944 +++ b/drivers/video/gxt4500.c
42945 @@ -156,7 +156,7 @@ struct gxt4500_par {
42946 static char *mode_option;
42947
42948 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
42949 -static const struct fb_videomode defaultmode __devinitdata = {
42950 +static const struct fb_videomode defaultmode __devinitconst = {
42951 .refresh = 60,
42952 .xres = 1280,
42953 .yres = 1024,
42954 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
42955 return 0;
42956 }
42957
42958 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
42959 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
42960 .id = "IBM GXT4500P",
42961 .type = FB_TYPE_PACKED_PIXELS,
42962 .visual = FB_VISUAL_PSEUDOCOLOR,
42963 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
42964 index f5bedee..28c6028 100644
42965 --- a/drivers/video/i810/i810_accel.c
42966 +++ b/drivers/video/i810/i810_accel.c
42967 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
42968 }
42969 }
42970 printk("ringbuffer lockup!!!\n");
42971 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
42972 i810_report_error(mmio);
42973 par->dev_flags |= LOCKUP;
42974 info->pixmap.scan_align = 1;
42975 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
42976 index 5743ea2..457f82c 100644
42977 --- a/drivers/video/i810/i810_main.c
42978 +++ b/drivers/video/i810/i810_main.c
42979 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
42980 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
42981
42982 /* PCI */
42983 -static const char *i810_pci_list[] __devinitdata = {
42984 +static const char *i810_pci_list[] __devinitconst = {
42985 "Intel(R) 810 Framebuffer Device" ,
42986 "Intel(R) 810-DC100 Framebuffer Device" ,
42987 "Intel(R) 810E Framebuffer Device" ,
42988 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
42989 index 3c14e43..eafa544 100644
42990 --- a/drivers/video/logo/logo_linux_clut224.ppm
42991 +++ b/drivers/video/logo/logo_linux_clut224.ppm
42992 @@ -1,1604 +1,1123 @@
42993 P3
42994 -# Standard 224-color Linux logo
42995 80 80
42996 255
42997 - 0 0 0 0 0 0 0 0 0 0 0 0
42998 - 0 0 0 0 0 0 0 0 0 0 0 0
42999 - 0 0 0 0 0 0 0 0 0 0 0 0
43000 - 0 0 0 0 0 0 0 0 0 0 0 0
43001 - 0 0 0 0 0 0 0 0 0 0 0 0
43002 - 0 0 0 0 0 0 0 0 0 0 0 0
43003 - 0 0 0 0 0 0 0 0 0 0 0 0
43004 - 0 0 0 0 0 0 0 0 0 0 0 0
43005 - 0 0 0 0 0 0 0 0 0 0 0 0
43006 - 6 6 6 6 6 6 10 10 10 10 10 10
43007 - 10 10 10 6 6 6 6 6 6 6 6 6
43008 - 0 0 0 0 0 0 0 0 0 0 0 0
43009 - 0 0 0 0 0 0 0 0 0 0 0 0
43010 - 0 0 0 0 0 0 0 0 0 0 0 0
43011 - 0 0 0 0 0 0 0 0 0 0 0 0
43012 - 0 0 0 0 0 0 0 0 0 0 0 0
43013 - 0 0 0 0 0 0 0 0 0 0 0 0
43014 - 0 0 0 0 0 0 0 0 0 0 0 0
43015 - 0 0 0 0 0 0 0 0 0 0 0 0
43016 - 0 0 0 0 0 0 0 0 0 0 0 0
43017 - 0 0 0 0 0 0 0 0 0 0 0 0
43018 - 0 0 0 0 0 0 0 0 0 0 0 0
43019 - 0 0 0 0 0 0 0 0 0 0 0 0
43020 - 0 0 0 0 0 0 0 0 0 0 0 0
43021 - 0 0 0 0 0 0 0 0 0 0 0 0
43022 - 0 0 0 0 0 0 0 0 0 0 0 0
43023 - 0 0 0 0 0 0 0 0 0 0 0 0
43024 - 0 0 0 0 0 0 0 0 0 0 0 0
43025 - 0 0 0 6 6 6 10 10 10 14 14 14
43026 - 22 22 22 26 26 26 30 30 30 34 34 34
43027 - 30 30 30 30 30 30 26 26 26 18 18 18
43028 - 14 14 14 10 10 10 6 6 6 0 0 0
43029 - 0 0 0 0 0 0 0 0 0 0 0 0
43030 - 0 0 0 0 0 0 0 0 0 0 0 0
43031 - 0 0 0 0 0 0 0 0 0 0 0 0
43032 - 0 0 0 0 0 0 0 0 0 0 0 0
43033 - 0 0 0 0 0 0 0 0 0 0 0 0
43034 - 0 0 0 0 0 0 0 0 0 0 0 0
43035 - 0 0 0 0 0 0 0 0 0 0 0 0
43036 - 0 0 0 0 0 0 0 0 0 0 0 0
43037 - 0 0 0 0 0 0 0 0 0 0 0 0
43038 - 0 0 0 0 0 1 0 0 1 0 0 0
43039 - 0 0 0 0 0 0 0 0 0 0 0 0
43040 - 0 0 0 0 0 0 0 0 0 0 0 0
43041 - 0 0 0 0 0 0 0 0 0 0 0 0
43042 - 0 0 0 0 0 0 0 0 0 0 0 0
43043 - 0 0 0 0 0 0 0 0 0 0 0 0
43044 - 0 0 0 0 0 0 0 0 0 0 0 0
43045 - 6 6 6 14 14 14 26 26 26 42 42 42
43046 - 54 54 54 66 66 66 78 78 78 78 78 78
43047 - 78 78 78 74 74 74 66 66 66 54 54 54
43048 - 42 42 42 26 26 26 18 18 18 10 10 10
43049 - 6 6 6 0 0 0 0 0 0 0 0 0
43050 - 0 0 0 0 0 0 0 0 0 0 0 0
43051 - 0 0 0 0 0 0 0 0 0 0 0 0
43052 - 0 0 0 0 0 0 0 0 0 0 0 0
43053 - 0 0 0 0 0 0 0 0 0 0 0 0
43054 - 0 0 0 0 0 0 0 0 0 0 0 0
43055 - 0 0 0 0 0 0 0 0 0 0 0 0
43056 - 0 0 0 0 0 0 0 0 0 0 0 0
43057 - 0 0 0 0 0 0 0 0 0 0 0 0
43058 - 0 0 1 0 0 0 0 0 0 0 0 0
43059 - 0 0 0 0 0 0 0 0 0 0 0 0
43060 - 0 0 0 0 0 0 0 0 0 0 0 0
43061 - 0 0 0 0 0 0 0 0 0 0 0 0
43062 - 0 0 0 0 0 0 0 0 0 0 0 0
43063 - 0 0 0 0 0 0 0 0 0 0 0 0
43064 - 0 0 0 0 0 0 0 0 0 10 10 10
43065 - 22 22 22 42 42 42 66 66 66 86 86 86
43066 - 66 66 66 38 38 38 38 38 38 22 22 22
43067 - 26 26 26 34 34 34 54 54 54 66 66 66
43068 - 86 86 86 70 70 70 46 46 46 26 26 26
43069 - 14 14 14 6 6 6 0 0 0 0 0 0
43070 - 0 0 0 0 0 0 0 0 0 0 0 0
43071 - 0 0 0 0 0 0 0 0 0 0 0 0
43072 - 0 0 0 0 0 0 0 0 0 0 0 0
43073 - 0 0 0 0 0 0 0 0 0 0 0 0
43074 - 0 0 0 0 0 0 0 0 0 0 0 0
43075 - 0 0 0 0 0 0 0 0 0 0 0 0
43076 - 0 0 0 0 0 0 0 0 0 0 0 0
43077 - 0 0 0 0 0 0 0 0 0 0 0 0
43078 - 0 0 1 0 0 1 0 0 1 0 0 0
43079 - 0 0 0 0 0 0 0 0 0 0 0 0
43080 - 0 0 0 0 0 0 0 0 0 0 0 0
43081 - 0 0 0 0 0 0 0 0 0 0 0 0
43082 - 0 0 0 0 0 0 0 0 0 0 0 0
43083 - 0 0 0 0 0 0 0 0 0 0 0 0
43084 - 0 0 0 0 0 0 10 10 10 26 26 26
43085 - 50 50 50 82 82 82 58 58 58 6 6 6
43086 - 2 2 6 2 2 6 2 2 6 2 2 6
43087 - 2 2 6 2 2 6 2 2 6 2 2 6
43088 - 6 6 6 54 54 54 86 86 86 66 66 66
43089 - 38 38 38 18 18 18 6 6 6 0 0 0
43090 - 0 0 0 0 0 0 0 0 0 0 0 0
43091 - 0 0 0 0 0 0 0 0 0 0 0 0
43092 - 0 0 0 0 0 0 0 0 0 0 0 0
43093 - 0 0 0 0 0 0 0 0 0 0 0 0
43094 - 0 0 0 0 0 0 0 0 0 0 0 0
43095 - 0 0 0 0 0 0 0 0 0 0 0 0
43096 - 0 0 0 0 0 0 0 0 0 0 0 0
43097 - 0 0 0 0 0 0 0 0 0 0 0 0
43098 - 0 0 0 0 0 0 0 0 0 0 0 0
43099 - 0 0 0 0 0 0 0 0 0 0 0 0
43100 - 0 0 0 0 0 0 0 0 0 0 0 0
43101 - 0 0 0 0 0 0 0 0 0 0 0 0
43102 - 0 0 0 0 0 0 0 0 0 0 0 0
43103 - 0 0 0 0 0 0 0 0 0 0 0 0
43104 - 0 0 0 6 6 6 22 22 22 50 50 50
43105 - 78 78 78 34 34 34 2 2 6 2 2 6
43106 - 2 2 6 2 2 6 2 2 6 2 2 6
43107 - 2 2 6 2 2 6 2 2 6 2 2 6
43108 - 2 2 6 2 2 6 6 6 6 70 70 70
43109 - 78 78 78 46 46 46 22 22 22 6 6 6
43110 - 0 0 0 0 0 0 0 0 0 0 0 0
43111 - 0 0 0 0 0 0 0 0 0 0 0 0
43112 - 0 0 0 0 0 0 0 0 0 0 0 0
43113 - 0 0 0 0 0 0 0 0 0 0 0 0
43114 - 0 0 0 0 0 0 0 0 0 0 0 0
43115 - 0 0 0 0 0 0 0 0 0 0 0 0
43116 - 0 0 0 0 0 0 0 0 0 0 0 0
43117 - 0 0 0 0 0 0 0 0 0 0 0 0
43118 - 0 0 1 0 0 1 0 0 1 0 0 0
43119 - 0 0 0 0 0 0 0 0 0 0 0 0
43120 - 0 0 0 0 0 0 0 0 0 0 0 0
43121 - 0 0 0 0 0 0 0 0 0 0 0 0
43122 - 0 0 0 0 0 0 0 0 0 0 0 0
43123 - 0 0 0 0 0 0 0 0 0 0 0 0
43124 - 6 6 6 18 18 18 42 42 42 82 82 82
43125 - 26 26 26 2 2 6 2 2 6 2 2 6
43126 - 2 2 6 2 2 6 2 2 6 2 2 6
43127 - 2 2 6 2 2 6 2 2 6 14 14 14
43128 - 46 46 46 34 34 34 6 6 6 2 2 6
43129 - 42 42 42 78 78 78 42 42 42 18 18 18
43130 - 6 6 6 0 0 0 0 0 0 0 0 0
43131 - 0 0 0 0 0 0 0 0 0 0 0 0
43132 - 0 0 0 0 0 0 0 0 0 0 0 0
43133 - 0 0 0 0 0 0 0 0 0 0 0 0
43134 - 0 0 0 0 0 0 0 0 0 0 0 0
43135 - 0 0 0 0 0 0 0 0 0 0 0 0
43136 - 0 0 0 0 0 0 0 0 0 0 0 0
43137 - 0 0 0 0 0 0 0 0 0 0 0 0
43138 - 0 0 1 0 0 0 0 0 1 0 0 0
43139 - 0 0 0 0 0 0 0 0 0 0 0 0
43140 - 0 0 0 0 0 0 0 0 0 0 0 0
43141 - 0 0 0 0 0 0 0 0 0 0 0 0
43142 - 0 0 0 0 0 0 0 0 0 0 0 0
43143 - 0 0 0 0 0 0 0 0 0 0 0 0
43144 - 10 10 10 30 30 30 66 66 66 58 58 58
43145 - 2 2 6 2 2 6 2 2 6 2 2 6
43146 - 2 2 6 2 2 6 2 2 6 2 2 6
43147 - 2 2 6 2 2 6 2 2 6 26 26 26
43148 - 86 86 86 101 101 101 46 46 46 10 10 10
43149 - 2 2 6 58 58 58 70 70 70 34 34 34
43150 - 10 10 10 0 0 0 0 0 0 0 0 0
43151 - 0 0 0 0 0 0 0 0 0 0 0 0
43152 - 0 0 0 0 0 0 0 0 0 0 0 0
43153 - 0 0 0 0 0 0 0 0 0 0 0 0
43154 - 0 0 0 0 0 0 0 0 0 0 0 0
43155 - 0 0 0 0 0 0 0 0 0 0 0 0
43156 - 0 0 0 0 0 0 0 0 0 0 0 0
43157 - 0 0 0 0 0 0 0 0 0 0 0 0
43158 - 0 0 1 0 0 1 0 0 1 0 0 0
43159 - 0 0 0 0 0 0 0 0 0 0 0 0
43160 - 0 0 0 0 0 0 0 0 0 0 0 0
43161 - 0 0 0 0 0 0 0 0 0 0 0 0
43162 - 0 0 0 0 0 0 0 0 0 0 0 0
43163 - 0 0 0 0 0 0 0 0 0 0 0 0
43164 - 14 14 14 42 42 42 86 86 86 10 10 10
43165 - 2 2 6 2 2 6 2 2 6 2 2 6
43166 - 2 2 6 2 2 6 2 2 6 2 2 6
43167 - 2 2 6 2 2 6 2 2 6 30 30 30
43168 - 94 94 94 94 94 94 58 58 58 26 26 26
43169 - 2 2 6 6 6 6 78 78 78 54 54 54
43170 - 22 22 22 6 6 6 0 0 0 0 0 0
43171 - 0 0 0 0 0 0 0 0 0 0 0 0
43172 - 0 0 0 0 0 0 0 0 0 0 0 0
43173 - 0 0 0 0 0 0 0 0 0 0 0 0
43174 - 0 0 0 0 0 0 0 0 0 0 0 0
43175 - 0 0 0 0 0 0 0 0 0 0 0 0
43176 - 0 0 0 0 0 0 0 0 0 0 0 0
43177 - 0 0 0 0 0 0 0 0 0 0 0 0
43178 - 0 0 0 0 0 0 0 0 0 0 0 0
43179 - 0 0 0 0 0 0 0 0 0 0 0 0
43180 - 0 0 0 0 0 0 0 0 0 0 0 0
43181 - 0 0 0 0 0 0 0 0 0 0 0 0
43182 - 0 0 0 0 0 0 0 0 0 0 0 0
43183 - 0 0 0 0 0 0 0 0 0 6 6 6
43184 - 22 22 22 62 62 62 62 62 62 2 2 6
43185 - 2 2 6 2 2 6 2 2 6 2 2 6
43186 - 2 2 6 2 2 6 2 2 6 2 2 6
43187 - 2 2 6 2 2 6 2 2 6 26 26 26
43188 - 54 54 54 38 38 38 18 18 18 10 10 10
43189 - 2 2 6 2 2 6 34 34 34 82 82 82
43190 - 38 38 38 14 14 14 0 0 0 0 0 0
43191 - 0 0 0 0 0 0 0 0 0 0 0 0
43192 - 0 0 0 0 0 0 0 0 0 0 0 0
43193 - 0 0 0 0 0 0 0 0 0 0 0 0
43194 - 0 0 0 0 0 0 0 0 0 0 0 0
43195 - 0 0 0 0 0 0 0 0 0 0 0 0
43196 - 0 0 0 0 0 0 0 0 0 0 0 0
43197 - 0 0 0 0 0 0 0 0 0 0 0 0
43198 - 0 0 0 0 0 1 0 0 1 0 0 0
43199 - 0 0 0 0 0 0 0 0 0 0 0 0
43200 - 0 0 0 0 0 0 0 0 0 0 0 0
43201 - 0 0 0 0 0 0 0 0 0 0 0 0
43202 - 0 0 0 0 0 0 0 0 0 0 0 0
43203 - 0 0 0 0 0 0 0 0 0 6 6 6
43204 - 30 30 30 78 78 78 30 30 30 2 2 6
43205 - 2 2 6 2 2 6 2 2 6 2 2 6
43206 - 2 2 6 2 2 6 2 2 6 2 2 6
43207 - 2 2 6 2 2 6 2 2 6 10 10 10
43208 - 10 10 10 2 2 6 2 2 6 2 2 6
43209 - 2 2 6 2 2 6 2 2 6 78 78 78
43210 - 50 50 50 18 18 18 6 6 6 0 0 0
43211 - 0 0 0 0 0 0 0 0 0 0 0 0
43212 - 0 0 0 0 0 0 0 0 0 0 0 0
43213 - 0 0 0 0 0 0 0 0 0 0 0 0
43214 - 0 0 0 0 0 0 0 0 0 0 0 0
43215 - 0 0 0 0 0 0 0 0 0 0 0 0
43216 - 0 0 0 0 0 0 0 0 0 0 0 0
43217 - 0 0 0 0 0 0 0 0 0 0 0 0
43218 - 0 0 1 0 0 0 0 0 0 0 0 0
43219 - 0 0 0 0 0 0 0 0 0 0 0 0
43220 - 0 0 0 0 0 0 0 0 0 0 0 0
43221 - 0 0 0 0 0 0 0 0 0 0 0 0
43222 - 0 0 0 0 0 0 0 0 0 0 0 0
43223 - 0 0 0 0 0 0 0 0 0 10 10 10
43224 - 38 38 38 86 86 86 14 14 14 2 2 6
43225 - 2 2 6 2 2 6 2 2 6 2 2 6
43226 - 2 2 6 2 2 6 2 2 6 2 2 6
43227 - 2 2 6 2 2 6 2 2 6 2 2 6
43228 - 2 2 6 2 2 6 2 2 6 2 2 6
43229 - 2 2 6 2 2 6 2 2 6 54 54 54
43230 - 66 66 66 26 26 26 6 6 6 0 0 0
43231 - 0 0 0 0 0 0 0 0 0 0 0 0
43232 - 0 0 0 0 0 0 0 0 0 0 0 0
43233 - 0 0 0 0 0 0 0 0 0 0 0 0
43234 - 0 0 0 0 0 0 0 0 0 0 0 0
43235 - 0 0 0 0 0 0 0 0 0 0 0 0
43236 - 0 0 0 0 0 0 0 0 0 0 0 0
43237 - 0 0 0 0 0 0 0 0 0 0 0 0
43238 - 0 0 0 0 0 1 0 0 1 0 0 0
43239 - 0 0 0 0 0 0 0 0 0 0 0 0
43240 - 0 0 0 0 0 0 0 0 0 0 0 0
43241 - 0 0 0 0 0 0 0 0 0 0 0 0
43242 - 0 0 0 0 0 0 0 0 0 0 0 0
43243 - 0 0 0 0 0 0 0 0 0 14 14 14
43244 - 42 42 42 82 82 82 2 2 6 2 2 6
43245 - 2 2 6 6 6 6 10 10 10 2 2 6
43246 - 2 2 6 2 2 6 2 2 6 2 2 6
43247 - 2 2 6 2 2 6 2 2 6 6 6 6
43248 - 14 14 14 10 10 10 2 2 6 2 2 6
43249 - 2 2 6 2 2 6 2 2 6 18 18 18
43250 - 82 82 82 34 34 34 10 10 10 0 0 0
43251 - 0 0 0 0 0 0 0 0 0 0 0 0
43252 - 0 0 0 0 0 0 0 0 0 0 0 0
43253 - 0 0 0 0 0 0 0 0 0 0 0 0
43254 - 0 0 0 0 0 0 0 0 0 0 0 0
43255 - 0 0 0 0 0 0 0 0 0 0 0 0
43256 - 0 0 0 0 0 0 0 0 0 0 0 0
43257 - 0 0 0 0 0 0 0 0 0 0 0 0
43258 - 0 0 1 0 0 0 0 0 0 0 0 0
43259 - 0 0 0 0 0 0 0 0 0 0 0 0
43260 - 0 0 0 0 0 0 0 0 0 0 0 0
43261 - 0 0 0 0 0 0 0 0 0 0 0 0
43262 - 0 0 0 0 0 0 0 0 0 0 0 0
43263 - 0 0 0 0 0 0 0 0 0 14 14 14
43264 - 46 46 46 86 86 86 2 2 6 2 2 6
43265 - 6 6 6 6 6 6 22 22 22 34 34 34
43266 - 6 6 6 2 2 6 2 2 6 2 2 6
43267 - 2 2 6 2 2 6 18 18 18 34 34 34
43268 - 10 10 10 50 50 50 22 22 22 2 2 6
43269 - 2 2 6 2 2 6 2 2 6 10 10 10
43270 - 86 86 86 42 42 42 14 14 14 0 0 0
43271 - 0 0 0 0 0 0 0 0 0 0 0 0
43272 - 0 0 0 0 0 0 0 0 0 0 0 0
43273 - 0 0 0 0 0 0 0 0 0 0 0 0
43274 - 0 0 0 0 0 0 0 0 0 0 0 0
43275 - 0 0 0 0 0 0 0 0 0 0 0 0
43276 - 0 0 0 0 0 0 0 0 0 0 0 0
43277 - 0 0 0 0 0 0 0 0 0 0 0 0
43278 - 0 0 1 0 0 1 0 0 1 0 0 0
43279 - 0 0 0 0 0 0 0 0 0 0 0 0
43280 - 0 0 0 0 0 0 0 0 0 0 0 0
43281 - 0 0 0 0 0 0 0 0 0 0 0 0
43282 - 0 0 0 0 0 0 0 0 0 0 0 0
43283 - 0 0 0 0 0 0 0 0 0 14 14 14
43284 - 46 46 46 86 86 86 2 2 6 2 2 6
43285 - 38 38 38 116 116 116 94 94 94 22 22 22
43286 - 22 22 22 2 2 6 2 2 6 2 2 6
43287 - 14 14 14 86 86 86 138 138 138 162 162 162
43288 -154 154 154 38 38 38 26 26 26 6 6 6
43289 - 2 2 6 2 2 6 2 2 6 2 2 6
43290 - 86 86 86 46 46 46 14 14 14 0 0 0
43291 - 0 0 0 0 0 0 0 0 0 0 0 0
43292 - 0 0 0 0 0 0 0 0 0 0 0 0
43293 - 0 0 0 0 0 0 0 0 0 0 0 0
43294 - 0 0 0 0 0 0 0 0 0 0 0 0
43295 - 0 0 0 0 0 0 0 0 0 0 0 0
43296 - 0 0 0 0 0 0 0 0 0 0 0 0
43297 - 0 0 0 0 0 0 0 0 0 0 0 0
43298 - 0 0 0 0 0 0 0 0 0 0 0 0
43299 - 0 0 0 0 0 0 0 0 0 0 0 0
43300 - 0 0 0 0 0 0 0 0 0 0 0 0
43301 - 0 0 0 0 0 0 0 0 0 0 0 0
43302 - 0 0 0 0 0 0 0 0 0 0 0 0
43303 - 0 0 0 0 0 0 0 0 0 14 14 14
43304 - 46 46 46 86 86 86 2 2 6 14 14 14
43305 -134 134 134 198 198 198 195 195 195 116 116 116
43306 - 10 10 10 2 2 6 2 2 6 6 6 6
43307 -101 98 89 187 187 187 210 210 210 218 218 218
43308 -214 214 214 134 134 134 14 14 14 6 6 6
43309 - 2 2 6 2 2 6 2 2 6 2 2 6
43310 - 86 86 86 50 50 50 18 18 18 6 6 6
43311 - 0 0 0 0 0 0 0 0 0 0 0 0
43312 - 0 0 0 0 0 0 0 0 0 0 0 0
43313 - 0 0 0 0 0 0 0 0 0 0 0 0
43314 - 0 0 0 0 0 0 0 0 0 0 0 0
43315 - 0 0 0 0 0 0 0 0 0 0 0 0
43316 - 0 0 0 0 0 0 0 0 0 0 0 0
43317 - 0 0 0 0 0 0 0 0 1 0 0 0
43318 - 0 0 1 0 0 1 0 0 1 0 0 0
43319 - 0 0 0 0 0 0 0 0 0 0 0 0
43320 - 0 0 0 0 0 0 0 0 0 0 0 0
43321 - 0 0 0 0 0 0 0 0 0 0 0 0
43322 - 0 0 0 0 0 0 0 0 0 0 0 0
43323 - 0 0 0 0 0 0 0 0 0 14 14 14
43324 - 46 46 46 86 86 86 2 2 6 54 54 54
43325 -218 218 218 195 195 195 226 226 226 246 246 246
43326 - 58 58 58 2 2 6 2 2 6 30 30 30
43327 -210 210 210 253 253 253 174 174 174 123 123 123
43328 -221 221 221 234 234 234 74 74 74 2 2 6
43329 - 2 2 6 2 2 6 2 2 6 2 2 6
43330 - 70 70 70 58 58 58 22 22 22 6 6 6
43331 - 0 0 0 0 0 0 0 0 0 0 0 0
43332 - 0 0 0 0 0 0 0 0 0 0 0 0
43333 - 0 0 0 0 0 0 0 0 0 0 0 0
43334 - 0 0 0 0 0 0 0 0 0 0 0 0
43335 - 0 0 0 0 0 0 0 0 0 0 0 0
43336 - 0 0 0 0 0 0 0 0 0 0 0 0
43337 - 0 0 0 0 0 0 0 0 0 0 0 0
43338 - 0 0 0 0 0 0 0 0 0 0 0 0
43339 - 0 0 0 0 0 0 0 0 0 0 0 0
43340 - 0 0 0 0 0 0 0 0 0 0 0 0
43341 - 0 0 0 0 0 0 0 0 0 0 0 0
43342 - 0 0 0 0 0 0 0 0 0 0 0 0
43343 - 0 0 0 0 0 0 0 0 0 14 14 14
43344 - 46 46 46 82 82 82 2 2 6 106 106 106
43345 -170 170 170 26 26 26 86 86 86 226 226 226
43346 -123 123 123 10 10 10 14 14 14 46 46 46
43347 -231 231 231 190 190 190 6 6 6 70 70 70
43348 - 90 90 90 238 238 238 158 158 158 2 2 6
43349 - 2 2 6 2 2 6 2 2 6 2 2 6
43350 - 70 70 70 58 58 58 22 22 22 6 6 6
43351 - 0 0 0 0 0 0 0 0 0 0 0 0
43352 - 0 0 0 0 0 0 0 0 0 0 0 0
43353 - 0 0 0 0 0 0 0 0 0 0 0 0
43354 - 0 0 0 0 0 0 0 0 0 0 0 0
43355 - 0 0 0 0 0 0 0 0 0 0 0 0
43356 - 0 0 0 0 0 0 0 0 0 0 0 0
43357 - 0 0 0 0 0 0 0 0 1 0 0 0
43358 - 0 0 1 0 0 1 0 0 1 0 0 0
43359 - 0 0 0 0 0 0 0 0 0 0 0 0
43360 - 0 0 0 0 0 0 0 0 0 0 0 0
43361 - 0 0 0 0 0 0 0 0 0 0 0 0
43362 - 0 0 0 0 0 0 0 0 0 0 0 0
43363 - 0 0 0 0 0 0 0 0 0 14 14 14
43364 - 42 42 42 86 86 86 6 6 6 116 116 116
43365 -106 106 106 6 6 6 70 70 70 149 149 149
43366 -128 128 128 18 18 18 38 38 38 54 54 54
43367 -221 221 221 106 106 106 2 2 6 14 14 14
43368 - 46 46 46 190 190 190 198 198 198 2 2 6
43369 - 2 2 6 2 2 6 2 2 6 2 2 6
43370 - 74 74 74 62 62 62 22 22 22 6 6 6
43371 - 0 0 0 0 0 0 0 0 0 0 0 0
43372 - 0 0 0 0 0 0 0 0 0 0 0 0
43373 - 0 0 0 0 0 0 0 0 0 0 0 0
43374 - 0 0 0 0 0 0 0 0 0 0 0 0
43375 - 0 0 0 0 0 0 0 0 0 0 0 0
43376 - 0 0 0 0 0 0 0 0 0 0 0 0
43377 - 0 0 0 0 0 0 0 0 1 0 0 0
43378 - 0 0 1 0 0 0 0 0 1 0 0 0
43379 - 0 0 0 0 0 0 0 0 0 0 0 0
43380 - 0 0 0 0 0 0 0 0 0 0 0 0
43381 - 0 0 0 0 0 0 0 0 0 0 0 0
43382 - 0 0 0 0 0 0 0 0 0 0 0 0
43383 - 0 0 0 0 0 0 0 0 0 14 14 14
43384 - 42 42 42 94 94 94 14 14 14 101 101 101
43385 -128 128 128 2 2 6 18 18 18 116 116 116
43386 -118 98 46 121 92 8 121 92 8 98 78 10
43387 -162 162 162 106 106 106 2 2 6 2 2 6
43388 - 2 2 6 195 195 195 195 195 195 6 6 6
43389 - 2 2 6 2 2 6 2 2 6 2 2 6
43390 - 74 74 74 62 62 62 22 22 22 6 6 6
43391 - 0 0 0 0 0 0 0 0 0 0 0 0
43392 - 0 0 0 0 0 0 0 0 0 0 0 0
43393 - 0 0 0 0 0 0 0 0 0 0 0 0
43394 - 0 0 0 0 0 0 0 0 0 0 0 0
43395 - 0 0 0 0 0 0 0 0 0 0 0 0
43396 - 0 0 0 0 0 0 0 0 0 0 0 0
43397 - 0 0 0 0 0 0 0 0 1 0 0 1
43398 - 0 0 1 0 0 0 0 0 1 0 0 0
43399 - 0 0 0 0 0 0 0 0 0 0 0 0
43400 - 0 0 0 0 0 0 0 0 0 0 0 0
43401 - 0 0 0 0 0 0 0 0 0 0 0 0
43402 - 0 0 0 0 0 0 0 0 0 0 0 0
43403 - 0 0 0 0 0 0 0 0 0 10 10 10
43404 - 38 38 38 90 90 90 14 14 14 58 58 58
43405 -210 210 210 26 26 26 54 38 6 154 114 10
43406 -226 170 11 236 186 11 225 175 15 184 144 12
43407 -215 174 15 175 146 61 37 26 9 2 2 6
43408 - 70 70 70 246 246 246 138 138 138 2 2 6
43409 - 2 2 6 2 2 6 2 2 6 2 2 6
43410 - 70 70 70 66 66 66 26 26 26 6 6 6
43411 - 0 0 0 0 0 0 0 0 0 0 0 0
43412 - 0 0 0 0 0 0 0 0 0 0 0 0
43413 - 0 0 0 0 0 0 0 0 0 0 0 0
43414 - 0 0 0 0 0 0 0 0 0 0 0 0
43415 - 0 0 0 0 0 0 0 0 0 0 0 0
43416 - 0 0 0 0 0 0 0 0 0 0 0 0
43417 - 0 0 0 0 0 0 0 0 0 0 0 0
43418 - 0 0 0 0 0 0 0 0 0 0 0 0
43419 - 0 0 0 0 0 0 0 0 0 0 0 0
43420 - 0 0 0 0 0 0 0 0 0 0 0 0
43421 - 0 0 0 0 0 0 0 0 0 0 0 0
43422 - 0 0 0 0 0 0 0 0 0 0 0 0
43423 - 0 0 0 0 0 0 0 0 0 10 10 10
43424 - 38 38 38 86 86 86 14 14 14 10 10 10
43425 -195 195 195 188 164 115 192 133 9 225 175 15
43426 -239 182 13 234 190 10 232 195 16 232 200 30
43427 -245 207 45 241 208 19 232 195 16 184 144 12
43428 -218 194 134 211 206 186 42 42 42 2 2 6
43429 - 2 2 6 2 2 6 2 2 6 2 2 6
43430 - 50 50 50 74 74 74 30 30 30 6 6 6
43431 - 0 0 0 0 0 0 0 0 0 0 0 0
43432 - 0 0 0 0 0 0 0 0 0 0 0 0
43433 - 0 0 0 0 0 0 0 0 0 0 0 0
43434 - 0 0 0 0 0 0 0 0 0 0 0 0
43435 - 0 0 0 0 0 0 0 0 0 0 0 0
43436 - 0 0 0 0 0 0 0 0 0 0 0 0
43437 - 0 0 0 0 0 0 0 0 0 0 0 0
43438 - 0 0 0 0 0 0 0 0 0 0 0 0
43439 - 0 0 0 0 0 0 0 0 0 0 0 0
43440 - 0 0 0 0 0 0 0 0 0 0 0 0
43441 - 0 0 0 0 0 0 0 0 0 0 0 0
43442 - 0 0 0 0 0 0 0 0 0 0 0 0
43443 - 0 0 0 0 0 0 0 0 0 10 10 10
43444 - 34 34 34 86 86 86 14 14 14 2 2 6
43445 -121 87 25 192 133 9 219 162 10 239 182 13
43446 -236 186 11 232 195 16 241 208 19 244 214 54
43447 -246 218 60 246 218 38 246 215 20 241 208 19
43448 -241 208 19 226 184 13 121 87 25 2 2 6
43449 - 2 2 6 2 2 6 2 2 6 2 2 6
43450 - 50 50 50 82 82 82 34 34 34 10 10 10
43451 - 0 0 0 0 0 0 0 0 0 0 0 0
43452 - 0 0 0 0 0 0 0 0 0 0 0 0
43453 - 0 0 0 0 0 0 0 0 0 0 0 0
43454 - 0 0 0 0 0 0 0 0 0 0 0 0
43455 - 0 0 0 0 0 0 0 0 0 0 0 0
43456 - 0 0 0 0 0 0 0 0 0 0 0 0
43457 - 0 0 0 0 0 0 0 0 0 0 0 0
43458 - 0 0 0 0 0 0 0 0 0 0 0 0
43459 - 0 0 0 0 0 0 0 0 0 0 0 0
43460 - 0 0 0 0 0 0 0 0 0 0 0 0
43461 - 0 0 0 0 0 0 0 0 0 0 0 0
43462 - 0 0 0 0 0 0 0 0 0 0 0 0
43463 - 0 0 0 0 0 0 0 0 0 10 10 10
43464 - 34 34 34 82 82 82 30 30 30 61 42 6
43465 -180 123 7 206 145 10 230 174 11 239 182 13
43466 -234 190 10 238 202 15 241 208 19 246 218 74
43467 -246 218 38 246 215 20 246 215 20 246 215 20
43468 -226 184 13 215 174 15 184 144 12 6 6 6
43469 - 2 2 6 2 2 6 2 2 6 2 2 6
43470 - 26 26 26 94 94 94 42 42 42 14 14 14
43471 - 0 0 0 0 0 0 0 0 0 0 0 0
43472 - 0 0 0 0 0 0 0 0 0 0 0 0
43473 - 0 0 0 0 0 0 0 0 0 0 0 0
43474 - 0 0 0 0 0 0 0 0 0 0 0 0
43475 - 0 0 0 0 0 0 0 0 0 0 0 0
43476 - 0 0 0 0 0 0 0 0 0 0 0 0
43477 - 0 0 0 0 0 0 0 0 0 0 0 0
43478 - 0 0 0 0 0 0 0 0 0 0 0 0
43479 - 0 0 0 0 0 0 0 0 0 0 0 0
43480 - 0 0 0 0 0 0 0 0 0 0 0 0
43481 - 0 0 0 0 0 0 0 0 0 0 0 0
43482 - 0 0 0 0 0 0 0 0 0 0 0 0
43483 - 0 0 0 0 0 0 0 0 0 10 10 10
43484 - 30 30 30 78 78 78 50 50 50 104 69 6
43485 -192 133 9 216 158 10 236 178 12 236 186 11
43486 -232 195 16 241 208 19 244 214 54 245 215 43
43487 -246 215 20 246 215 20 241 208 19 198 155 10
43488 -200 144 11 216 158 10 156 118 10 2 2 6
43489 - 2 2 6 2 2 6 2 2 6 2 2 6
43490 - 6 6 6 90 90 90 54 54 54 18 18 18
43491 - 6 6 6 0 0 0 0 0 0 0 0 0
43492 - 0 0 0 0 0 0 0 0 0 0 0 0
43493 - 0 0 0 0 0 0 0 0 0 0 0 0
43494 - 0 0 0 0 0 0 0 0 0 0 0 0
43495 - 0 0 0 0 0 0 0 0 0 0 0 0
43496 - 0 0 0 0 0 0 0 0 0 0 0 0
43497 - 0 0 0 0 0 0 0 0 0 0 0 0
43498 - 0 0 0 0 0 0 0 0 0 0 0 0
43499 - 0 0 0 0 0 0 0 0 0 0 0 0
43500 - 0 0 0 0 0 0 0 0 0 0 0 0
43501 - 0 0 0 0 0 0 0 0 0 0 0 0
43502 - 0 0 0 0 0 0 0 0 0 0 0 0
43503 - 0 0 0 0 0 0 0 0 0 10 10 10
43504 - 30 30 30 78 78 78 46 46 46 22 22 22
43505 -137 92 6 210 162 10 239 182 13 238 190 10
43506 -238 202 15 241 208 19 246 215 20 246 215 20
43507 -241 208 19 203 166 17 185 133 11 210 150 10
43508 -216 158 10 210 150 10 102 78 10 2 2 6
43509 - 6 6 6 54 54 54 14 14 14 2 2 6
43510 - 2 2 6 62 62 62 74 74 74 30 30 30
43511 - 10 10 10 0 0 0 0 0 0 0 0 0
43512 - 0 0 0 0 0 0 0 0 0 0 0 0
43513 - 0 0 0 0 0 0 0 0 0 0 0 0
43514 - 0 0 0 0 0 0 0 0 0 0 0 0
43515 - 0 0 0 0 0 0 0 0 0 0 0 0
43516 - 0 0 0 0 0 0 0 0 0 0 0 0
43517 - 0 0 0 0 0 0 0 0 0 0 0 0
43518 - 0 0 0 0 0 0 0 0 0 0 0 0
43519 - 0 0 0 0 0 0 0 0 0 0 0 0
43520 - 0 0 0 0 0 0 0 0 0 0 0 0
43521 - 0 0 0 0 0 0 0 0 0 0 0 0
43522 - 0 0 0 0 0 0 0 0 0 0 0 0
43523 - 0 0 0 0 0 0 0 0 0 10 10 10
43524 - 34 34 34 78 78 78 50 50 50 6 6 6
43525 - 94 70 30 139 102 15 190 146 13 226 184 13
43526 -232 200 30 232 195 16 215 174 15 190 146 13
43527 -168 122 10 192 133 9 210 150 10 213 154 11
43528 -202 150 34 182 157 106 101 98 89 2 2 6
43529 - 2 2 6 78 78 78 116 116 116 58 58 58
43530 - 2 2 6 22 22 22 90 90 90 46 46 46
43531 - 18 18 18 6 6 6 0 0 0 0 0 0
43532 - 0 0 0 0 0 0 0 0 0 0 0 0
43533 - 0 0 0 0 0 0 0 0 0 0 0 0
43534 - 0 0 0 0 0 0 0 0 0 0 0 0
43535 - 0 0 0 0 0 0 0 0 0 0 0 0
43536 - 0 0 0 0 0 0 0 0 0 0 0 0
43537 - 0 0 0 0 0 0 0 0 0 0 0 0
43538 - 0 0 0 0 0 0 0 0 0 0 0 0
43539 - 0 0 0 0 0 0 0 0 0 0 0 0
43540 - 0 0 0 0 0 0 0 0 0 0 0 0
43541 - 0 0 0 0 0 0 0 0 0 0 0 0
43542 - 0 0 0 0 0 0 0 0 0 0 0 0
43543 - 0 0 0 0 0 0 0 0 0 10 10 10
43544 - 38 38 38 86 86 86 50 50 50 6 6 6
43545 -128 128 128 174 154 114 156 107 11 168 122 10
43546 -198 155 10 184 144 12 197 138 11 200 144 11
43547 -206 145 10 206 145 10 197 138 11 188 164 115
43548 -195 195 195 198 198 198 174 174 174 14 14 14
43549 - 2 2 6 22 22 22 116 116 116 116 116 116
43550 - 22 22 22 2 2 6 74 74 74 70 70 70
43551 - 30 30 30 10 10 10 0 0 0 0 0 0
43552 - 0 0 0 0 0 0 0 0 0 0 0 0
43553 - 0 0 0 0 0 0 0 0 0 0 0 0
43554 - 0 0 0 0 0 0 0 0 0 0 0 0
43555 - 0 0 0 0 0 0 0 0 0 0 0 0
43556 - 0 0 0 0 0 0 0 0 0 0 0 0
43557 - 0 0 0 0 0 0 0 0 0 0 0 0
43558 - 0 0 0 0 0 0 0 0 0 0 0 0
43559 - 0 0 0 0 0 0 0 0 0 0 0 0
43560 - 0 0 0 0 0 0 0 0 0 0 0 0
43561 - 0 0 0 0 0 0 0 0 0 0 0 0
43562 - 0 0 0 0 0 0 0 0 0 0 0 0
43563 - 0 0 0 0 0 0 6 6 6 18 18 18
43564 - 50 50 50 101 101 101 26 26 26 10 10 10
43565 -138 138 138 190 190 190 174 154 114 156 107 11
43566 -197 138 11 200 144 11 197 138 11 192 133 9
43567 -180 123 7 190 142 34 190 178 144 187 187 187
43568 -202 202 202 221 221 221 214 214 214 66 66 66
43569 - 2 2 6 2 2 6 50 50 50 62 62 62
43570 - 6 6 6 2 2 6 10 10 10 90 90 90
43571 - 50 50 50 18 18 18 6 6 6 0 0 0
43572 - 0 0 0 0 0 0 0 0 0 0 0 0
43573 - 0 0 0 0 0 0 0 0 0 0 0 0
43574 - 0 0 0 0 0 0 0 0 0 0 0 0
43575 - 0 0 0 0 0 0 0 0 0 0 0 0
43576 - 0 0 0 0 0 0 0 0 0 0 0 0
43577 - 0 0 0 0 0 0 0 0 0 0 0 0
43578 - 0 0 0 0 0 0 0 0 0 0 0 0
43579 - 0 0 0 0 0 0 0 0 0 0 0 0
43580 - 0 0 0 0 0 0 0 0 0 0 0 0
43581 - 0 0 0 0 0 0 0 0 0 0 0 0
43582 - 0 0 0 0 0 0 0 0 0 0 0 0
43583 - 0 0 0 0 0 0 10 10 10 34 34 34
43584 - 74 74 74 74 74 74 2 2 6 6 6 6
43585 -144 144 144 198 198 198 190 190 190 178 166 146
43586 -154 121 60 156 107 11 156 107 11 168 124 44
43587 -174 154 114 187 187 187 190 190 190 210 210 210
43588 -246 246 246 253 253 253 253 253 253 182 182 182
43589 - 6 6 6 2 2 6 2 2 6 2 2 6
43590 - 2 2 6 2 2 6 2 2 6 62 62 62
43591 - 74 74 74 34 34 34 14 14 14 0 0 0
43592 - 0 0 0 0 0 0 0 0 0 0 0 0
43593 - 0 0 0 0 0 0 0 0 0 0 0 0
43594 - 0 0 0 0 0 0 0 0 0 0 0 0
43595 - 0 0 0 0 0 0 0 0 0 0 0 0
43596 - 0 0 0 0 0 0 0 0 0 0 0 0
43597 - 0 0 0 0 0 0 0 0 0 0 0 0
43598 - 0 0 0 0 0 0 0 0 0 0 0 0
43599 - 0 0 0 0 0 0 0 0 0 0 0 0
43600 - 0 0 0 0 0 0 0 0 0 0 0 0
43601 - 0 0 0 0 0 0 0 0 0 0 0 0
43602 - 0 0 0 0 0 0 0 0 0 0 0 0
43603 - 0 0 0 10 10 10 22 22 22 54 54 54
43604 - 94 94 94 18 18 18 2 2 6 46 46 46
43605 -234 234 234 221 221 221 190 190 190 190 190 190
43606 -190 190 190 187 187 187 187 187 187 190 190 190
43607 -190 190 190 195 195 195 214 214 214 242 242 242
43608 -253 253 253 253 253 253 253 253 253 253 253 253
43609 - 82 82 82 2 2 6 2 2 6 2 2 6
43610 - 2 2 6 2 2 6 2 2 6 14 14 14
43611 - 86 86 86 54 54 54 22 22 22 6 6 6
43612 - 0 0 0 0 0 0 0 0 0 0 0 0
43613 - 0 0 0 0 0 0 0 0 0 0 0 0
43614 - 0 0 0 0 0 0 0 0 0 0 0 0
43615 - 0 0 0 0 0 0 0 0 0 0 0 0
43616 - 0 0 0 0 0 0 0 0 0 0 0 0
43617 - 0 0 0 0 0 0 0 0 0 0 0 0
43618 - 0 0 0 0 0 0 0 0 0 0 0 0
43619 - 0 0 0 0 0 0 0 0 0 0 0 0
43620 - 0 0 0 0 0 0 0 0 0 0 0 0
43621 - 0 0 0 0 0 0 0 0 0 0 0 0
43622 - 0 0 0 0 0 0 0 0 0 0 0 0
43623 - 6 6 6 18 18 18 46 46 46 90 90 90
43624 - 46 46 46 18 18 18 6 6 6 182 182 182
43625 -253 253 253 246 246 246 206 206 206 190 190 190
43626 -190 190 190 190 190 190 190 190 190 190 190 190
43627 -206 206 206 231 231 231 250 250 250 253 253 253
43628 -253 253 253 253 253 253 253 253 253 253 253 253
43629 -202 202 202 14 14 14 2 2 6 2 2 6
43630 - 2 2 6 2 2 6 2 2 6 2 2 6
43631 - 42 42 42 86 86 86 42 42 42 18 18 18
43632 - 6 6 6 0 0 0 0 0 0 0 0 0
43633 - 0 0 0 0 0 0 0 0 0 0 0 0
43634 - 0 0 0 0 0 0 0 0 0 0 0 0
43635 - 0 0 0 0 0 0 0 0 0 0 0 0
43636 - 0 0 0 0 0 0 0 0 0 0 0 0
43637 - 0 0 0 0 0 0 0 0 0 0 0 0
43638 - 0 0 0 0 0 0 0 0 0 0 0 0
43639 - 0 0 0 0 0 0 0 0 0 0 0 0
43640 - 0 0 0 0 0 0 0 0 0 0 0 0
43641 - 0 0 0 0 0 0 0 0 0 0 0 0
43642 - 0 0 0 0 0 0 0 0 0 6 6 6
43643 - 14 14 14 38 38 38 74 74 74 66 66 66
43644 - 2 2 6 6 6 6 90 90 90 250 250 250
43645 -253 253 253 253 253 253 238 238 238 198 198 198
43646 -190 190 190 190 190 190 195 195 195 221 221 221
43647 -246 246 246 253 253 253 253 253 253 253 253 253
43648 -253 253 253 253 253 253 253 253 253 253 253 253
43649 -253 253 253 82 82 82 2 2 6 2 2 6
43650 - 2 2 6 2 2 6 2 2 6 2 2 6
43651 - 2 2 6 78 78 78 70 70 70 34 34 34
43652 - 14 14 14 6 6 6 0 0 0 0 0 0
43653 - 0 0 0 0 0 0 0 0 0 0 0 0
43654 - 0 0 0 0 0 0 0 0 0 0 0 0
43655 - 0 0 0 0 0 0 0 0 0 0 0 0
43656 - 0 0 0 0 0 0 0 0 0 0 0 0
43657 - 0 0 0 0 0 0 0 0 0 0 0 0
43658 - 0 0 0 0 0 0 0 0 0 0 0 0
43659 - 0 0 0 0 0 0 0 0 0 0 0 0
43660 - 0 0 0 0 0 0 0 0 0 0 0 0
43661 - 0 0 0 0 0 0 0 0 0 0 0 0
43662 - 0 0 0 0 0 0 0 0 0 14 14 14
43663 - 34 34 34 66 66 66 78 78 78 6 6 6
43664 - 2 2 6 18 18 18 218 218 218 253 253 253
43665 -253 253 253 253 253 253 253 253 253 246 246 246
43666 -226 226 226 231 231 231 246 246 246 253 253 253
43667 -253 253 253 253 253 253 253 253 253 253 253 253
43668 -253 253 253 253 253 253 253 253 253 253 253 253
43669 -253 253 253 178 178 178 2 2 6 2 2 6
43670 - 2 2 6 2 2 6 2 2 6 2 2 6
43671 - 2 2 6 18 18 18 90 90 90 62 62 62
43672 - 30 30 30 10 10 10 0 0 0 0 0 0
43673 - 0 0 0 0 0 0 0 0 0 0 0 0
43674 - 0 0 0 0 0 0 0 0 0 0 0 0
43675 - 0 0 0 0 0 0 0 0 0 0 0 0
43676 - 0 0 0 0 0 0 0 0 0 0 0 0
43677 - 0 0 0 0 0 0 0 0 0 0 0 0
43678 - 0 0 0 0 0 0 0 0 0 0 0 0
43679 - 0 0 0 0 0 0 0 0 0 0 0 0
43680 - 0 0 0 0 0 0 0 0 0 0 0 0
43681 - 0 0 0 0 0 0 0 0 0 0 0 0
43682 - 0 0 0 0 0 0 10 10 10 26 26 26
43683 - 58 58 58 90 90 90 18 18 18 2 2 6
43684 - 2 2 6 110 110 110 253 253 253 253 253 253
43685 -253 253 253 253 253 253 253 253 253 253 253 253
43686 -250 250 250 253 253 253 253 253 253 253 253 253
43687 -253 253 253 253 253 253 253 253 253 253 253 253
43688 -253 253 253 253 253 253 253 253 253 253 253 253
43689 -253 253 253 231 231 231 18 18 18 2 2 6
43690 - 2 2 6 2 2 6 2 2 6 2 2 6
43691 - 2 2 6 2 2 6 18 18 18 94 94 94
43692 - 54 54 54 26 26 26 10 10 10 0 0 0
43693 - 0 0 0 0 0 0 0 0 0 0 0 0
43694 - 0 0 0 0 0 0 0 0 0 0 0 0
43695 - 0 0 0 0 0 0 0 0 0 0 0 0
43696 - 0 0 0 0 0 0 0 0 0 0 0 0
43697 - 0 0 0 0 0 0 0 0 0 0 0 0
43698 - 0 0 0 0 0 0 0 0 0 0 0 0
43699 - 0 0 0 0 0 0 0 0 0 0 0 0
43700 - 0 0 0 0 0 0 0 0 0 0 0 0
43701 - 0 0 0 0 0 0 0 0 0 0 0 0
43702 - 0 0 0 6 6 6 22 22 22 50 50 50
43703 - 90 90 90 26 26 26 2 2 6 2 2 6
43704 - 14 14 14 195 195 195 250 250 250 253 253 253
43705 -253 253 253 253 253 253 253 253 253 253 253 253
43706 -253 253 253 253 253 253 253 253 253 253 253 253
43707 -253 253 253 253 253 253 253 253 253 253 253 253
43708 -253 253 253 253 253 253 253 253 253 253 253 253
43709 -250 250 250 242 242 242 54 54 54 2 2 6
43710 - 2 2 6 2 2 6 2 2 6 2 2 6
43711 - 2 2 6 2 2 6 2 2 6 38 38 38
43712 - 86 86 86 50 50 50 22 22 22 6 6 6
43713 - 0 0 0 0 0 0 0 0 0 0 0 0
43714 - 0 0 0 0 0 0 0 0 0 0 0 0
43715 - 0 0 0 0 0 0 0 0 0 0 0 0
43716 - 0 0 0 0 0 0 0 0 0 0 0 0
43717 - 0 0 0 0 0 0 0 0 0 0 0 0
43718 - 0 0 0 0 0 0 0 0 0 0 0 0
43719 - 0 0 0 0 0 0 0 0 0 0 0 0
43720 - 0 0 0 0 0 0 0 0 0 0 0 0
43721 - 0 0 0 0 0 0 0 0 0 0 0 0
43722 - 6 6 6 14 14 14 38 38 38 82 82 82
43723 - 34 34 34 2 2 6 2 2 6 2 2 6
43724 - 42 42 42 195 195 195 246 246 246 253 253 253
43725 -253 253 253 253 253 253 253 253 253 250 250 250
43726 -242 242 242 242 242 242 250 250 250 253 253 253
43727 -253 253 253 253 253 253 253 253 253 253 253 253
43728 -253 253 253 250 250 250 246 246 246 238 238 238
43729 -226 226 226 231 231 231 101 101 101 6 6 6
43730 - 2 2 6 2 2 6 2 2 6 2 2 6
43731 - 2 2 6 2 2 6 2 2 6 2 2 6
43732 - 38 38 38 82 82 82 42 42 42 14 14 14
43733 - 6 6 6 0 0 0 0 0 0 0 0 0
43734 - 0 0 0 0 0 0 0 0 0 0 0 0
43735 - 0 0 0 0 0 0 0 0 0 0 0 0
43736 - 0 0 0 0 0 0 0 0 0 0 0 0
43737 - 0 0 0 0 0 0 0 0 0 0 0 0
43738 - 0 0 0 0 0 0 0 0 0 0 0 0
43739 - 0 0 0 0 0 0 0 0 0 0 0 0
43740 - 0 0 0 0 0 0 0 0 0 0 0 0
43741 - 0 0 0 0 0 0 0 0 0 0 0 0
43742 - 10 10 10 26 26 26 62 62 62 66 66 66
43743 - 2 2 6 2 2 6 2 2 6 6 6 6
43744 - 70 70 70 170 170 170 206 206 206 234 234 234
43745 -246 246 246 250 250 250 250 250 250 238 238 238
43746 -226 226 226 231 231 231 238 238 238 250 250 250
43747 -250 250 250 250 250 250 246 246 246 231 231 231
43748 -214 214 214 206 206 206 202 202 202 202 202 202
43749 -198 198 198 202 202 202 182 182 182 18 18 18
43750 - 2 2 6 2 2 6 2 2 6 2 2 6
43751 - 2 2 6 2 2 6 2 2 6 2 2 6
43752 - 2 2 6 62 62 62 66 66 66 30 30 30
43753 - 10 10 10 0 0 0 0 0 0 0 0 0
43754 - 0 0 0 0 0 0 0 0 0 0 0 0
43755 - 0 0 0 0 0 0 0 0 0 0 0 0
43756 - 0 0 0 0 0 0 0 0 0 0 0 0
43757 - 0 0 0 0 0 0 0 0 0 0 0 0
43758 - 0 0 0 0 0 0 0 0 0 0 0 0
43759 - 0 0 0 0 0 0 0 0 0 0 0 0
43760 - 0 0 0 0 0 0 0 0 0 0 0 0
43761 - 0 0 0 0 0 0 0 0 0 0 0 0
43762 - 14 14 14 42 42 42 82 82 82 18 18 18
43763 - 2 2 6 2 2 6 2 2 6 10 10 10
43764 - 94 94 94 182 182 182 218 218 218 242 242 242
43765 -250 250 250 253 253 253 253 253 253 250 250 250
43766 -234 234 234 253 253 253 253 253 253 253 253 253
43767 -253 253 253 253 253 253 253 253 253 246 246 246
43768 -238 238 238 226 226 226 210 210 210 202 202 202
43769 -195 195 195 195 195 195 210 210 210 158 158 158
43770 - 6 6 6 14 14 14 50 50 50 14 14 14
43771 - 2 2 6 2 2 6 2 2 6 2 2 6
43772 - 2 2 6 6 6 6 86 86 86 46 46 46
43773 - 18 18 18 6 6 6 0 0 0 0 0 0
43774 - 0 0 0 0 0 0 0 0 0 0 0 0
43775 - 0 0 0 0 0 0 0 0 0 0 0 0
43776 - 0 0 0 0 0 0 0 0 0 0 0 0
43777 - 0 0 0 0 0 0 0 0 0 0 0 0
43778 - 0 0 0 0 0 0 0 0 0 0 0 0
43779 - 0 0 0 0 0 0 0 0 0 0 0 0
43780 - 0 0 0 0 0 0 0 0 0 0 0 0
43781 - 0 0 0 0 0 0 0 0 0 6 6 6
43782 - 22 22 22 54 54 54 70 70 70 2 2 6
43783 - 2 2 6 10 10 10 2 2 6 22 22 22
43784 -166 166 166 231 231 231 250 250 250 253 253 253
43785 -253 253 253 253 253 253 253 253 253 250 250 250
43786 -242 242 242 253 253 253 253 253 253 253 253 253
43787 -253 253 253 253 253 253 253 253 253 253 253 253
43788 -253 253 253 253 253 253 253 253 253 246 246 246
43789 -231 231 231 206 206 206 198 198 198 226 226 226
43790 - 94 94 94 2 2 6 6 6 6 38 38 38
43791 - 30 30 30 2 2 6 2 2 6 2 2 6
43792 - 2 2 6 2 2 6 62 62 62 66 66 66
43793 - 26 26 26 10 10 10 0 0 0 0 0 0
43794 - 0 0 0 0 0 0 0 0 0 0 0 0
43795 - 0 0 0 0 0 0 0 0 0 0 0 0
43796 - 0 0 0 0 0 0 0 0 0 0 0 0
43797 - 0 0 0 0 0 0 0 0 0 0 0 0
43798 - 0 0 0 0 0 0 0 0 0 0 0 0
43799 - 0 0 0 0 0 0 0 0 0 0 0 0
43800 - 0 0 0 0 0 0 0 0 0 0 0 0
43801 - 0 0 0 0 0 0 0 0 0 10 10 10
43802 - 30 30 30 74 74 74 50 50 50 2 2 6
43803 - 26 26 26 26 26 26 2 2 6 106 106 106
43804 -238 238 238 253 253 253 253 253 253 253 253 253
43805 -253 253 253 253 253 253 253 253 253 253 253 253
43806 -253 253 253 253 253 253 253 253 253 253 253 253
43807 -253 253 253 253 253 253 253 253 253 253 253 253
43808 -253 253 253 253 253 253 253 253 253 253 253 253
43809 -253 253 253 246 246 246 218 218 218 202 202 202
43810 -210 210 210 14 14 14 2 2 6 2 2 6
43811 - 30 30 30 22 22 22 2 2 6 2 2 6
43812 - 2 2 6 2 2 6 18 18 18 86 86 86
43813 - 42 42 42 14 14 14 0 0 0 0 0 0
43814 - 0 0 0 0 0 0 0 0 0 0 0 0
43815 - 0 0 0 0 0 0 0 0 0 0 0 0
43816 - 0 0 0 0 0 0 0 0 0 0 0 0
43817 - 0 0 0 0 0 0 0 0 0 0 0 0
43818 - 0 0 0 0 0 0 0 0 0 0 0 0
43819 - 0 0 0 0 0 0 0 0 0 0 0 0
43820 - 0 0 0 0 0 0 0 0 0 0 0 0
43821 - 0 0 0 0 0 0 0 0 0 14 14 14
43822 - 42 42 42 90 90 90 22 22 22 2 2 6
43823 - 42 42 42 2 2 6 18 18 18 218 218 218
43824 -253 253 253 253 253 253 253 253 253 253 253 253
43825 -253 253 253 253 253 253 253 253 253 253 253 253
43826 -253 253 253 253 253 253 253 253 253 253 253 253
43827 -253 253 253 253 253 253 253 253 253 253 253 253
43828 -253 253 253 253 253 253 253 253 253 253 253 253
43829 -253 253 253 253 253 253 250 250 250 221 221 221
43830 -218 218 218 101 101 101 2 2 6 14 14 14
43831 - 18 18 18 38 38 38 10 10 10 2 2 6
43832 - 2 2 6 2 2 6 2 2 6 78 78 78
43833 - 58 58 58 22 22 22 6 6 6 0 0 0
43834 - 0 0 0 0 0 0 0 0 0 0 0 0
43835 - 0 0 0 0 0 0 0 0 0 0 0 0
43836 - 0 0 0 0 0 0 0 0 0 0 0 0
43837 - 0 0 0 0 0 0 0 0 0 0 0 0
43838 - 0 0 0 0 0 0 0 0 0 0 0 0
43839 - 0 0 0 0 0 0 0 0 0 0 0 0
43840 - 0 0 0 0 0 0 0 0 0 0 0 0
43841 - 0 0 0 0 0 0 6 6 6 18 18 18
43842 - 54 54 54 82 82 82 2 2 6 26 26 26
43843 - 22 22 22 2 2 6 123 123 123 253 253 253
43844 -253 253 253 253 253 253 253 253 253 253 253 253
43845 -253 253 253 253 253 253 253 253 253 253 253 253
43846 -253 253 253 253 253 253 253 253 253 253 253 253
43847 -253 253 253 253 253 253 253 253 253 253 253 253
43848 -253 253 253 253 253 253 253 253 253 253 253 253
43849 -253 253 253 253 253 253 253 253 253 250 250 250
43850 -238 238 238 198 198 198 6 6 6 38 38 38
43851 - 58 58 58 26 26 26 38 38 38 2 2 6
43852 - 2 2 6 2 2 6 2 2 6 46 46 46
43853 - 78 78 78 30 30 30 10 10 10 0 0 0
43854 - 0 0 0 0 0 0 0 0 0 0 0 0
43855 - 0 0 0 0 0 0 0 0 0 0 0 0
43856 - 0 0 0 0 0 0 0 0 0 0 0 0
43857 - 0 0 0 0 0 0 0 0 0 0 0 0
43858 - 0 0 0 0 0 0 0 0 0 0 0 0
43859 - 0 0 0 0 0 0 0 0 0 0 0 0
43860 - 0 0 0 0 0 0 0 0 0 0 0 0
43861 - 0 0 0 0 0 0 10 10 10 30 30 30
43862 - 74 74 74 58 58 58 2 2 6 42 42 42
43863 - 2 2 6 22 22 22 231 231 231 253 253 253
43864 -253 253 253 253 253 253 253 253 253 253 253 253
43865 -253 253 253 253 253 253 253 253 253 250 250 250
43866 -253 253 253 253 253 253 253 253 253 253 253 253
43867 -253 253 253 253 253 253 253 253 253 253 253 253
43868 -253 253 253 253 253 253 253 253 253 253 253 253
43869 -253 253 253 253 253 253 253 253 253 253 253 253
43870 -253 253 253 246 246 246 46 46 46 38 38 38
43871 - 42 42 42 14 14 14 38 38 38 14 14 14
43872 - 2 2 6 2 2 6 2 2 6 6 6 6
43873 - 86 86 86 46 46 46 14 14 14 0 0 0
43874 - 0 0 0 0 0 0 0 0 0 0 0 0
43875 - 0 0 0 0 0 0 0 0 0 0 0 0
43876 - 0 0 0 0 0 0 0 0 0 0 0 0
43877 - 0 0 0 0 0 0 0 0 0 0 0 0
43878 - 0 0 0 0 0 0 0 0 0 0 0 0
43879 - 0 0 0 0 0 0 0 0 0 0 0 0
43880 - 0 0 0 0 0 0 0 0 0 0 0 0
43881 - 0 0 0 6 6 6 14 14 14 42 42 42
43882 - 90 90 90 18 18 18 18 18 18 26 26 26
43883 - 2 2 6 116 116 116 253 253 253 253 253 253
43884 -253 253 253 253 253 253 253 253 253 253 253 253
43885 -253 253 253 253 253 253 250 250 250 238 238 238
43886 -253 253 253 253 253 253 253 253 253 253 253 253
43887 -253 253 253 253 253 253 253 253 253 253 253 253
43888 -253 253 253 253 253 253 253 253 253 253 253 253
43889 -253 253 253 253 253 253 253 253 253 253 253 253
43890 -253 253 253 253 253 253 94 94 94 6 6 6
43891 - 2 2 6 2 2 6 10 10 10 34 34 34
43892 - 2 2 6 2 2 6 2 2 6 2 2 6
43893 - 74 74 74 58 58 58 22 22 22 6 6 6
43894 - 0 0 0 0 0 0 0 0 0 0 0 0
43895 - 0 0 0 0 0 0 0 0 0 0 0 0
43896 - 0 0 0 0 0 0 0 0 0 0 0 0
43897 - 0 0 0 0 0 0 0 0 0 0 0 0
43898 - 0 0 0 0 0 0 0 0 0 0 0 0
43899 - 0 0 0 0 0 0 0 0 0 0 0 0
43900 - 0 0 0 0 0 0 0 0 0 0 0 0
43901 - 0 0 0 10 10 10 26 26 26 66 66 66
43902 - 82 82 82 2 2 6 38 38 38 6 6 6
43903 - 14 14 14 210 210 210 253 253 253 253 253 253
43904 -253 253 253 253 253 253 253 253 253 253 253 253
43905 -253 253 253 253 253 253 246 246 246 242 242 242
43906 -253 253 253 253 253 253 253 253 253 253 253 253
43907 -253 253 253 253 253 253 253 253 253 253 253 253
43908 -253 253 253 253 253 253 253 253 253 253 253 253
43909 -253 253 253 253 253 253 253 253 253 253 253 253
43910 -253 253 253 253 253 253 144 144 144 2 2 6
43911 - 2 2 6 2 2 6 2 2 6 46 46 46
43912 - 2 2 6 2 2 6 2 2 6 2 2 6
43913 - 42 42 42 74 74 74 30 30 30 10 10 10
43914 - 0 0 0 0 0 0 0 0 0 0 0 0
43915 - 0 0 0 0 0 0 0 0 0 0 0 0
43916 - 0 0 0 0 0 0 0 0 0 0 0 0
43917 - 0 0 0 0 0 0 0 0 0 0 0 0
43918 - 0 0 0 0 0 0 0 0 0 0 0 0
43919 - 0 0 0 0 0 0 0 0 0 0 0 0
43920 - 0 0 0 0 0 0 0 0 0 0 0 0
43921 - 6 6 6 14 14 14 42 42 42 90 90 90
43922 - 26 26 26 6 6 6 42 42 42 2 2 6
43923 - 74 74 74 250 250 250 253 253 253 253 253 253
43924 -253 253 253 253 253 253 253 253 253 253 253 253
43925 -253 253 253 253 253 253 242 242 242 242 242 242
43926 -253 253 253 253 253 253 253 253 253 253 253 253
43927 -253 253 253 253 253 253 253 253 253 253 253 253
43928 -253 253 253 253 253 253 253 253 253 253 253 253
43929 -253 253 253 253 253 253 253 253 253 253 253 253
43930 -253 253 253 253 253 253 182 182 182 2 2 6
43931 - 2 2 6 2 2 6 2 2 6 46 46 46
43932 - 2 2 6 2 2 6 2 2 6 2 2 6
43933 - 10 10 10 86 86 86 38 38 38 10 10 10
43934 - 0 0 0 0 0 0 0 0 0 0 0 0
43935 - 0 0 0 0 0 0 0 0 0 0 0 0
43936 - 0 0 0 0 0 0 0 0 0 0 0 0
43937 - 0 0 0 0 0 0 0 0 0 0 0 0
43938 - 0 0 0 0 0 0 0 0 0 0 0 0
43939 - 0 0 0 0 0 0 0 0 0 0 0 0
43940 - 0 0 0 0 0 0 0 0 0 0 0 0
43941 - 10 10 10 26 26 26 66 66 66 82 82 82
43942 - 2 2 6 22 22 22 18 18 18 2 2 6
43943 -149 149 149 253 253 253 253 253 253 253 253 253
43944 -253 253 253 253 253 253 253 253 253 253 253 253
43945 -253 253 253 253 253 253 234 234 234 242 242 242
43946 -253 253 253 253 253 253 253 253 253 253 253 253
43947 -253 253 253 253 253 253 253 253 253 253 253 253
43948 -253 253 253 253 253 253 253 253 253 253 253 253
43949 -253 253 253 253 253 253 253 253 253 253 253 253
43950 -253 253 253 253 253 253 206 206 206 2 2 6
43951 - 2 2 6 2 2 6 2 2 6 38 38 38
43952 - 2 2 6 2 2 6 2 2 6 2 2 6
43953 - 6 6 6 86 86 86 46 46 46 14 14 14
43954 - 0 0 0 0 0 0 0 0 0 0 0 0
43955 - 0 0 0 0 0 0 0 0 0 0 0 0
43956 - 0 0 0 0 0 0 0 0 0 0 0 0
43957 - 0 0 0 0 0 0 0 0 0 0 0 0
43958 - 0 0 0 0 0 0 0 0 0 0 0 0
43959 - 0 0 0 0 0 0 0 0 0 0 0 0
43960 - 0 0 0 0 0 0 0 0 0 6 6 6
43961 - 18 18 18 46 46 46 86 86 86 18 18 18
43962 - 2 2 6 34 34 34 10 10 10 6 6 6
43963 -210 210 210 253 253 253 253 253 253 253 253 253
43964 -253 253 253 253 253 253 253 253 253 253 253 253
43965 -253 253 253 253 253 253 234 234 234 242 242 242
43966 -253 253 253 253 253 253 253 253 253 253 253 253
43967 -253 253 253 253 253 253 253 253 253 253 253 253
43968 -253 253 253 253 253 253 253 253 253 253 253 253
43969 -253 253 253 253 253 253 253 253 253 253 253 253
43970 -253 253 253 253 253 253 221 221 221 6 6 6
43971 - 2 2 6 2 2 6 6 6 6 30 30 30
43972 - 2 2 6 2 2 6 2 2 6 2 2 6
43973 - 2 2 6 82 82 82 54 54 54 18 18 18
43974 - 6 6 6 0 0 0 0 0 0 0 0 0
43975 - 0 0 0 0 0 0 0 0 0 0 0 0
43976 - 0 0 0 0 0 0 0 0 0 0 0 0
43977 - 0 0 0 0 0 0 0 0 0 0 0 0
43978 - 0 0 0 0 0 0 0 0 0 0 0 0
43979 - 0 0 0 0 0 0 0 0 0 0 0 0
43980 - 0 0 0 0 0 0 0 0 0 10 10 10
43981 - 26 26 26 66 66 66 62 62 62 2 2 6
43982 - 2 2 6 38 38 38 10 10 10 26 26 26
43983 -238 238 238 253 253 253 253 253 253 253 253 253
43984 -253 253 253 253 253 253 253 253 253 253 253 253
43985 -253 253 253 253 253 253 231 231 231 238 238 238
43986 -253 253 253 253 253 253 253 253 253 253 253 253
43987 -253 253 253 253 253 253 253 253 253 253 253 253
43988 -253 253 253 253 253 253 253 253 253 253 253 253
43989 -253 253 253 253 253 253 253 253 253 253 253 253
43990 -253 253 253 253 253 253 231 231 231 6 6 6
43991 - 2 2 6 2 2 6 10 10 10 30 30 30
43992 - 2 2 6 2 2 6 2 2 6 2 2 6
43993 - 2 2 6 66 66 66 58 58 58 22 22 22
43994 - 6 6 6 0 0 0 0 0 0 0 0 0
43995 - 0 0 0 0 0 0 0 0 0 0 0 0
43996 - 0 0 0 0 0 0 0 0 0 0 0 0
43997 - 0 0 0 0 0 0 0 0 0 0 0 0
43998 - 0 0 0 0 0 0 0 0 0 0 0 0
43999 - 0 0 0 0 0 0 0 0 0 0 0 0
44000 - 0 0 0 0 0 0 0 0 0 10 10 10
44001 - 38 38 38 78 78 78 6 6 6 2 2 6
44002 - 2 2 6 46 46 46 14 14 14 42 42 42
44003 -246 246 246 253 253 253 253 253 253 253 253 253
44004 -253 253 253 253 253 253 253 253 253 253 253 253
44005 -253 253 253 253 253 253 231 231 231 242 242 242
44006 -253 253 253 253 253 253 253 253 253 253 253 253
44007 -253 253 253 253 253 253 253 253 253 253 253 253
44008 -253 253 253 253 253 253 253 253 253 253 253 253
44009 -253 253 253 253 253 253 253 253 253 253 253 253
44010 -253 253 253 253 253 253 234 234 234 10 10 10
44011 - 2 2 6 2 2 6 22 22 22 14 14 14
44012 - 2 2 6 2 2 6 2 2 6 2 2 6
44013 - 2 2 6 66 66 66 62 62 62 22 22 22
44014 - 6 6 6 0 0 0 0 0 0 0 0 0
44015 - 0 0 0 0 0 0 0 0 0 0 0 0
44016 - 0 0 0 0 0 0 0 0 0 0 0 0
44017 - 0 0 0 0 0 0 0 0 0 0 0 0
44018 - 0 0 0 0 0 0 0 0 0 0 0 0
44019 - 0 0 0 0 0 0 0 0 0 0 0 0
44020 - 0 0 0 0 0 0 6 6 6 18 18 18
44021 - 50 50 50 74 74 74 2 2 6 2 2 6
44022 - 14 14 14 70 70 70 34 34 34 62 62 62
44023 -250 250 250 253 253 253 253 253 253 253 253 253
44024 -253 253 253 253 253 253 253 253 253 253 253 253
44025 -253 253 253 253 253 253 231 231 231 246 246 246
44026 -253 253 253 253 253 253 253 253 253 253 253 253
44027 -253 253 253 253 253 253 253 253 253 253 253 253
44028 -253 253 253 253 253 253 253 253 253 253 253 253
44029 -253 253 253 253 253 253 253 253 253 253 253 253
44030 -253 253 253 253 253 253 234 234 234 14 14 14
44031 - 2 2 6 2 2 6 30 30 30 2 2 6
44032 - 2 2 6 2 2 6 2 2 6 2 2 6
44033 - 2 2 6 66 66 66 62 62 62 22 22 22
44034 - 6 6 6 0 0 0 0 0 0 0 0 0
44035 - 0 0 0 0 0 0 0 0 0 0 0 0
44036 - 0 0 0 0 0 0 0 0 0 0 0 0
44037 - 0 0 0 0 0 0 0 0 0 0 0 0
44038 - 0 0 0 0 0 0 0 0 0 0 0 0
44039 - 0 0 0 0 0 0 0 0 0 0 0 0
44040 - 0 0 0 0 0 0 6 6 6 18 18 18
44041 - 54 54 54 62 62 62 2 2 6 2 2 6
44042 - 2 2 6 30 30 30 46 46 46 70 70 70
44043 -250 250 250 253 253 253 253 253 253 253 253 253
44044 -253 253 253 253 253 253 253 253 253 253 253 253
44045 -253 253 253 253 253 253 231 231 231 246 246 246
44046 -253 253 253 253 253 253 253 253 253 253 253 253
44047 -253 253 253 253 253 253 253 253 253 253 253 253
44048 -253 253 253 253 253 253 253 253 253 253 253 253
44049 -253 253 253 253 253 253 253 253 253 253 253 253
44050 -253 253 253 253 253 253 226 226 226 10 10 10
44051 - 2 2 6 6 6 6 30 30 30 2 2 6
44052 - 2 2 6 2 2 6 2 2 6 2 2 6
44053 - 2 2 6 66 66 66 58 58 58 22 22 22
44054 - 6 6 6 0 0 0 0 0 0 0 0 0
44055 - 0 0 0 0 0 0 0 0 0 0 0 0
44056 - 0 0 0 0 0 0 0 0 0 0 0 0
44057 - 0 0 0 0 0 0 0 0 0 0 0 0
44058 - 0 0 0 0 0 0 0 0 0 0 0 0
44059 - 0 0 0 0 0 0 0 0 0 0 0 0
44060 - 0 0 0 0 0 0 6 6 6 22 22 22
44061 - 58 58 58 62 62 62 2 2 6 2 2 6
44062 - 2 2 6 2 2 6 30 30 30 78 78 78
44063 -250 250 250 253 253 253 253 253 253 253 253 253
44064 -253 253 253 253 253 253 253 253 253 253 253 253
44065 -253 253 253 253 253 253 231 231 231 246 246 246
44066 -253 253 253 253 253 253 253 253 253 253 253 253
44067 -253 253 253 253 253 253 253 253 253 253 253 253
44068 -253 253 253 253 253 253 253 253 253 253 253 253
44069 -253 253 253 253 253 253 253 253 253 253 253 253
44070 -253 253 253 253 253 253 206 206 206 2 2 6
44071 - 22 22 22 34 34 34 18 14 6 22 22 22
44072 - 26 26 26 18 18 18 6 6 6 2 2 6
44073 - 2 2 6 82 82 82 54 54 54 18 18 18
44074 - 6 6 6 0 0 0 0 0 0 0 0 0
44075 - 0 0 0 0 0 0 0 0 0 0 0 0
44076 - 0 0 0 0 0 0 0 0 0 0 0 0
44077 - 0 0 0 0 0 0 0 0 0 0 0 0
44078 - 0 0 0 0 0 0 0 0 0 0 0 0
44079 - 0 0 0 0 0 0 0 0 0 0 0 0
44080 - 0 0 0 0 0 0 6 6 6 26 26 26
44081 - 62 62 62 106 106 106 74 54 14 185 133 11
44082 -210 162 10 121 92 8 6 6 6 62 62 62
44083 -238 238 238 253 253 253 253 253 253 253 253 253
44084 -253 253 253 253 253 253 253 253 253 253 253 253
44085 -253 253 253 253 253 253 231 231 231 246 246 246
44086 -253 253 253 253 253 253 253 253 253 253 253 253
44087 -253 253 253 253 253 253 253 253 253 253 253 253
44088 -253 253 253 253 253 253 253 253 253 253 253 253
44089 -253 253 253 253 253 253 253 253 253 253 253 253
44090 -253 253 253 253 253 253 158 158 158 18 18 18
44091 - 14 14 14 2 2 6 2 2 6 2 2 6
44092 - 6 6 6 18 18 18 66 66 66 38 38 38
44093 - 6 6 6 94 94 94 50 50 50 18 18 18
44094 - 6 6 6 0 0 0 0 0 0 0 0 0
44095 - 0 0 0 0 0 0 0 0 0 0 0 0
44096 - 0 0 0 0 0 0 0 0 0 0 0 0
44097 - 0 0 0 0 0 0 0 0 0 0 0 0
44098 - 0 0 0 0 0 0 0 0 0 0 0 0
44099 - 0 0 0 0 0 0 0 0 0 6 6 6
44100 - 10 10 10 10 10 10 18 18 18 38 38 38
44101 - 78 78 78 142 134 106 216 158 10 242 186 14
44102 -246 190 14 246 190 14 156 118 10 10 10 10
44103 - 90 90 90 238 238 238 253 253 253 253 253 253
44104 -253 253 253 253 253 253 253 253 253 253 253 253
44105 -253 253 253 253 253 253 231 231 231 250 250 250
44106 -253 253 253 253 253 253 253 253 253 253 253 253
44107 -253 253 253 253 253 253 253 253 253 253 253 253
44108 -253 253 253 253 253 253 253 253 253 253 253 253
44109 -253 253 253 253 253 253 253 253 253 246 230 190
44110 -238 204 91 238 204 91 181 142 44 37 26 9
44111 - 2 2 6 2 2 6 2 2 6 2 2 6
44112 - 2 2 6 2 2 6 38 38 38 46 46 46
44113 - 26 26 26 106 106 106 54 54 54 18 18 18
44114 - 6 6 6 0 0 0 0 0 0 0 0 0
44115 - 0 0 0 0 0 0 0 0 0 0 0 0
44116 - 0 0 0 0 0 0 0 0 0 0 0 0
44117 - 0 0 0 0 0 0 0 0 0 0 0 0
44118 - 0 0 0 0 0 0 0 0 0 0 0 0
44119 - 0 0 0 6 6 6 14 14 14 22 22 22
44120 - 30 30 30 38 38 38 50 50 50 70 70 70
44121 -106 106 106 190 142 34 226 170 11 242 186 14
44122 -246 190 14 246 190 14 246 190 14 154 114 10
44123 - 6 6 6 74 74 74 226 226 226 253 253 253
44124 -253 253 253 253 253 253 253 253 253 253 253 253
44125 -253 253 253 253 253 253 231 231 231 250 250 250
44126 -253 253 253 253 253 253 253 253 253 253 253 253
44127 -253 253 253 253 253 253 253 253 253 253 253 253
44128 -253 253 253 253 253 253 253 253 253 253 253 253
44129 -253 253 253 253 253 253 253 253 253 228 184 62
44130 -241 196 14 241 208 19 232 195 16 38 30 10
44131 - 2 2 6 2 2 6 2 2 6 2 2 6
44132 - 2 2 6 6 6 6 30 30 30 26 26 26
44133 -203 166 17 154 142 90 66 66 66 26 26 26
44134 - 6 6 6 0 0 0 0 0 0 0 0 0
44135 - 0 0 0 0 0 0 0 0 0 0 0 0
44136 - 0 0 0 0 0 0 0 0 0 0 0 0
44137 - 0 0 0 0 0 0 0 0 0 0 0 0
44138 - 0 0 0 0 0 0 0 0 0 0 0 0
44139 - 6 6 6 18 18 18 38 38 38 58 58 58
44140 - 78 78 78 86 86 86 101 101 101 123 123 123
44141 -175 146 61 210 150 10 234 174 13 246 186 14
44142 -246 190 14 246 190 14 246 190 14 238 190 10
44143 -102 78 10 2 2 6 46 46 46 198 198 198
44144 -253 253 253 253 253 253 253 253 253 253 253 253
44145 -253 253 253 253 253 253 234 234 234 242 242 242
44146 -253 253 253 253 253 253 253 253 253 253 253 253
44147 -253 253 253 253 253 253 253 253 253 253 253 253
44148 -253 253 253 253 253 253 253 253 253 253 253 253
44149 -253 253 253 253 253 253 253 253 253 224 178 62
44150 -242 186 14 241 196 14 210 166 10 22 18 6
44151 - 2 2 6 2 2 6 2 2 6 2 2 6
44152 - 2 2 6 2 2 6 6 6 6 121 92 8
44153 -238 202 15 232 195 16 82 82 82 34 34 34
44154 - 10 10 10 0 0 0 0 0 0 0 0 0
44155 - 0 0 0 0 0 0 0 0 0 0 0 0
44156 - 0 0 0 0 0 0 0 0 0 0 0 0
44157 - 0 0 0 0 0 0 0 0 0 0 0 0
44158 - 0 0 0 0 0 0 0 0 0 0 0 0
44159 - 14 14 14 38 38 38 70 70 70 154 122 46
44160 -190 142 34 200 144 11 197 138 11 197 138 11
44161 -213 154 11 226 170 11 242 186 14 246 190 14
44162 -246 190 14 246 190 14 246 190 14 246 190 14
44163 -225 175 15 46 32 6 2 2 6 22 22 22
44164 -158 158 158 250 250 250 253 253 253 253 253 253
44165 -253 253 253 253 253 253 253 253 253 253 253 253
44166 -253 253 253 253 253 253 253 253 253 253 253 253
44167 -253 253 253 253 253 253 253 253 253 253 253 253
44168 -253 253 253 253 253 253 253 253 253 253 253 253
44169 -253 253 253 250 250 250 242 242 242 224 178 62
44170 -239 182 13 236 186 11 213 154 11 46 32 6
44171 - 2 2 6 2 2 6 2 2 6 2 2 6
44172 - 2 2 6 2 2 6 61 42 6 225 175 15
44173 -238 190 10 236 186 11 112 100 78 42 42 42
44174 - 14 14 14 0 0 0 0 0 0 0 0 0
44175 - 0 0 0 0 0 0 0 0 0 0 0 0
44176 - 0 0 0 0 0 0 0 0 0 0 0 0
44177 - 0 0 0 0 0 0 0 0 0 0 0 0
44178 - 0 0 0 0 0 0 0 0 0 6 6 6
44179 - 22 22 22 54 54 54 154 122 46 213 154 11
44180 -226 170 11 230 174 11 226 170 11 226 170 11
44181 -236 178 12 242 186 14 246 190 14 246 190 14
44182 -246 190 14 246 190 14 246 190 14 246 190 14
44183 -241 196 14 184 144 12 10 10 10 2 2 6
44184 - 6 6 6 116 116 116 242 242 242 253 253 253
44185 -253 253 253 253 253 253 253 253 253 253 253 253
44186 -253 253 253 253 253 253 253 253 253 253 253 253
44187 -253 253 253 253 253 253 253 253 253 253 253 253
44188 -253 253 253 253 253 253 253 253 253 253 253 253
44189 -253 253 253 231 231 231 198 198 198 214 170 54
44190 -236 178 12 236 178 12 210 150 10 137 92 6
44191 - 18 14 6 2 2 6 2 2 6 2 2 6
44192 - 6 6 6 70 47 6 200 144 11 236 178 12
44193 -239 182 13 239 182 13 124 112 88 58 58 58
44194 - 22 22 22 6 6 6 0 0 0 0 0 0
44195 - 0 0 0 0 0 0 0 0 0 0 0 0
44196 - 0 0 0 0 0 0 0 0 0 0 0 0
44197 - 0 0 0 0 0 0 0 0 0 0 0 0
44198 - 0 0 0 0 0 0 0 0 0 10 10 10
44199 - 30 30 30 70 70 70 180 133 36 226 170 11
44200 -239 182 13 242 186 14 242 186 14 246 186 14
44201 -246 190 14 246 190 14 246 190 14 246 190 14
44202 -246 190 14 246 190 14 246 190 14 246 190 14
44203 -246 190 14 232 195 16 98 70 6 2 2 6
44204 - 2 2 6 2 2 6 66 66 66 221 221 221
44205 -253 253 253 253 253 253 253 253 253 253 253 253
44206 -253 253 253 253 253 253 253 253 253 253 253 253
44207 -253 253 253 253 253 253 253 253 253 253 253 253
44208 -253 253 253 253 253 253 253 253 253 253 253 253
44209 -253 253 253 206 206 206 198 198 198 214 166 58
44210 -230 174 11 230 174 11 216 158 10 192 133 9
44211 -163 110 8 116 81 8 102 78 10 116 81 8
44212 -167 114 7 197 138 11 226 170 11 239 182 13
44213 -242 186 14 242 186 14 162 146 94 78 78 78
44214 - 34 34 34 14 14 14 6 6 6 0 0 0
44215 - 0 0 0 0 0 0 0 0 0 0 0 0
44216 - 0 0 0 0 0 0 0 0 0 0 0 0
44217 - 0 0 0 0 0 0 0 0 0 0 0 0
44218 - 0 0 0 0 0 0 0 0 0 6 6 6
44219 - 30 30 30 78 78 78 190 142 34 226 170 11
44220 -239 182 13 246 190 14 246 190 14 246 190 14
44221 -246 190 14 246 190 14 246 190 14 246 190 14
44222 -246 190 14 246 190 14 246 190 14 246 190 14
44223 -246 190 14 241 196 14 203 166 17 22 18 6
44224 - 2 2 6 2 2 6 2 2 6 38 38 38
44225 -218 218 218 253 253 253 253 253 253 253 253 253
44226 -253 253 253 253 253 253 253 253 253 253 253 253
44227 -253 253 253 253 253 253 253 253 253 253 253 253
44228 -253 253 253 253 253 253 253 253 253 253 253 253
44229 -250 250 250 206 206 206 198 198 198 202 162 69
44230 -226 170 11 236 178 12 224 166 10 210 150 10
44231 -200 144 11 197 138 11 192 133 9 197 138 11
44232 -210 150 10 226 170 11 242 186 14 246 190 14
44233 -246 190 14 246 186 14 225 175 15 124 112 88
44234 - 62 62 62 30 30 30 14 14 14 6 6 6
44235 - 0 0 0 0 0 0 0 0 0 0 0 0
44236 - 0 0 0 0 0 0 0 0 0 0 0 0
44237 - 0 0 0 0 0 0 0 0 0 0 0 0
44238 - 0 0 0 0 0 0 0 0 0 10 10 10
44239 - 30 30 30 78 78 78 174 135 50 224 166 10
44240 -239 182 13 246 190 14 246 190 14 246 190 14
44241 -246 190 14 246 190 14 246 190 14 246 190 14
44242 -246 190 14 246 190 14 246 190 14 246 190 14
44243 -246 190 14 246 190 14 241 196 14 139 102 15
44244 - 2 2 6 2 2 6 2 2 6 2 2 6
44245 - 78 78 78 250 250 250 253 253 253 253 253 253
44246 -253 253 253 253 253 253 253 253 253 253 253 253
44247 -253 253 253 253 253 253 253 253 253 253 253 253
44248 -253 253 253 253 253 253 253 253 253 253 253 253
44249 -250 250 250 214 214 214 198 198 198 190 150 46
44250 -219 162 10 236 178 12 234 174 13 224 166 10
44251 -216 158 10 213 154 11 213 154 11 216 158 10
44252 -226 170 11 239 182 13 246 190 14 246 190 14
44253 -246 190 14 246 190 14 242 186 14 206 162 42
44254 -101 101 101 58 58 58 30 30 30 14 14 14
44255 - 6 6 6 0 0 0 0 0 0 0 0 0
44256 - 0 0 0 0 0 0 0 0 0 0 0 0
44257 - 0 0 0 0 0 0 0 0 0 0 0 0
44258 - 0 0 0 0 0 0 0 0 0 10 10 10
44259 - 30 30 30 74 74 74 174 135 50 216 158 10
44260 -236 178 12 246 190 14 246 190 14 246 190 14
44261 -246 190 14 246 190 14 246 190 14 246 190 14
44262 -246 190 14 246 190 14 246 190 14 246 190 14
44263 -246 190 14 246 190 14 241 196 14 226 184 13
44264 - 61 42 6 2 2 6 2 2 6 2 2 6
44265 - 22 22 22 238 238 238 253 253 253 253 253 253
44266 -253 253 253 253 253 253 253 253 253 253 253 253
44267 -253 253 253 253 253 253 253 253 253 253 253 253
44268 -253 253 253 253 253 253 253 253 253 253 253 253
44269 -253 253 253 226 226 226 187 187 187 180 133 36
44270 -216 158 10 236 178 12 239 182 13 236 178 12
44271 -230 174 11 226 170 11 226 170 11 230 174 11
44272 -236 178 12 242 186 14 246 190 14 246 190 14
44273 -246 190 14 246 190 14 246 186 14 239 182 13
44274 -206 162 42 106 106 106 66 66 66 34 34 34
44275 - 14 14 14 6 6 6 0 0 0 0 0 0
44276 - 0 0 0 0 0 0 0 0 0 0 0 0
44277 - 0 0 0 0 0 0 0 0 0 0 0 0
44278 - 0 0 0 0 0 0 0 0 0 6 6 6
44279 - 26 26 26 70 70 70 163 133 67 213 154 11
44280 -236 178 12 246 190 14 246 190 14 246 190 14
44281 -246 190 14 246 190 14 246 190 14 246 190 14
44282 -246 190 14 246 190 14 246 190 14 246 190 14
44283 -246 190 14 246 190 14 246 190 14 241 196 14
44284 -190 146 13 18 14 6 2 2 6 2 2 6
44285 - 46 46 46 246 246 246 253 253 253 253 253 253
44286 -253 253 253 253 253 253 253 253 253 253 253 253
44287 -253 253 253 253 253 253 253 253 253 253 253 253
44288 -253 253 253 253 253 253 253 253 253 253 253 253
44289 -253 253 253 221 221 221 86 86 86 156 107 11
44290 -216 158 10 236 178 12 242 186 14 246 186 14
44291 -242 186 14 239 182 13 239 182 13 242 186 14
44292 -242 186 14 246 186 14 246 190 14 246 190 14
44293 -246 190 14 246 190 14 246 190 14 246 190 14
44294 -242 186 14 225 175 15 142 122 72 66 66 66
44295 - 30 30 30 10 10 10 0 0 0 0 0 0
44296 - 0 0 0 0 0 0 0 0 0 0 0 0
44297 - 0 0 0 0 0 0 0 0 0 0 0 0
44298 - 0 0 0 0 0 0 0 0 0 6 6 6
44299 - 26 26 26 70 70 70 163 133 67 210 150 10
44300 -236 178 12 246 190 14 246 190 14 246 190 14
44301 -246 190 14 246 190 14 246 190 14 246 190 14
44302 -246 190 14 246 190 14 246 190 14 246 190 14
44303 -246 190 14 246 190 14 246 190 14 246 190 14
44304 -232 195 16 121 92 8 34 34 34 106 106 106
44305 -221 221 221 253 253 253 253 253 253 253 253 253
44306 -253 253 253 253 253 253 253 253 253 253 253 253
44307 -253 253 253 253 253 253 253 253 253 253 253 253
44308 -253 253 253 253 253 253 253 253 253 253 253 253
44309 -242 242 242 82 82 82 18 14 6 163 110 8
44310 -216 158 10 236 178 12 242 186 14 246 190 14
44311 -246 190 14 246 190 14 246 190 14 246 190 14
44312 -246 190 14 246 190 14 246 190 14 246 190 14
44313 -246 190 14 246 190 14 246 190 14 246 190 14
44314 -246 190 14 246 190 14 242 186 14 163 133 67
44315 - 46 46 46 18 18 18 6 6 6 0 0 0
44316 - 0 0 0 0 0 0 0 0 0 0 0 0
44317 - 0 0 0 0 0 0 0 0 0 0 0 0
44318 - 0 0 0 0 0 0 0 0 0 10 10 10
44319 - 30 30 30 78 78 78 163 133 67 210 150 10
44320 -236 178 12 246 186 14 246 190 14 246 190 14
44321 -246 190 14 246 190 14 246 190 14 246 190 14
44322 -246 190 14 246 190 14 246 190 14 246 190 14
44323 -246 190 14 246 190 14 246 190 14 246 190 14
44324 -241 196 14 215 174 15 190 178 144 253 253 253
44325 -253 253 253 253 253 253 253 253 253 253 253 253
44326 -253 253 253 253 253 253 253 253 253 253 253 253
44327 -253 253 253 253 253 253 253 253 253 253 253 253
44328 -253 253 253 253 253 253 253 253 253 218 218 218
44329 - 58 58 58 2 2 6 22 18 6 167 114 7
44330 -216 158 10 236 178 12 246 186 14 246 190 14
44331 -246 190 14 246 190 14 246 190 14 246 190 14
44332 -246 190 14 246 190 14 246 190 14 246 190 14
44333 -246 190 14 246 190 14 246 190 14 246 190 14
44334 -246 190 14 246 186 14 242 186 14 190 150 46
44335 - 54 54 54 22 22 22 6 6 6 0 0 0
44336 - 0 0 0 0 0 0 0 0 0 0 0 0
44337 - 0 0 0 0 0 0 0 0 0 0 0 0
44338 - 0 0 0 0 0 0 0 0 0 14 14 14
44339 - 38 38 38 86 86 86 180 133 36 213 154 11
44340 -236 178 12 246 186 14 246 190 14 246 190 14
44341 -246 190 14 246 190 14 246 190 14 246 190 14
44342 -246 190 14 246 190 14 246 190 14 246 190 14
44343 -246 190 14 246 190 14 246 190 14 246 190 14
44344 -246 190 14 232 195 16 190 146 13 214 214 214
44345 -253 253 253 253 253 253 253 253 253 253 253 253
44346 -253 253 253 253 253 253 253 253 253 253 253 253
44347 -253 253 253 253 253 253 253 253 253 253 253 253
44348 -253 253 253 250 250 250 170 170 170 26 26 26
44349 - 2 2 6 2 2 6 37 26 9 163 110 8
44350 -219 162 10 239 182 13 246 186 14 246 190 14
44351 -246 190 14 246 190 14 246 190 14 246 190 14
44352 -246 190 14 246 190 14 246 190 14 246 190 14
44353 -246 190 14 246 190 14 246 190 14 246 190 14
44354 -246 186 14 236 178 12 224 166 10 142 122 72
44355 - 46 46 46 18 18 18 6 6 6 0 0 0
44356 - 0 0 0 0 0 0 0 0 0 0 0 0
44357 - 0 0 0 0 0 0 0 0 0 0 0 0
44358 - 0 0 0 0 0 0 6 6 6 18 18 18
44359 - 50 50 50 109 106 95 192 133 9 224 166 10
44360 -242 186 14 246 190 14 246 190 14 246 190 14
44361 -246 190 14 246 190 14 246 190 14 246 190 14
44362 -246 190 14 246 190 14 246 190 14 246 190 14
44363 -246 190 14 246 190 14 246 190 14 246 190 14
44364 -242 186 14 226 184 13 210 162 10 142 110 46
44365 -226 226 226 253 253 253 253 253 253 253 253 253
44366 -253 253 253 253 253 253 253 253 253 253 253 253
44367 -253 253 253 253 253 253 253 253 253 253 253 253
44368 -198 198 198 66 66 66 2 2 6 2 2 6
44369 - 2 2 6 2 2 6 50 34 6 156 107 11
44370 -219 162 10 239 182 13 246 186 14 246 190 14
44371 -246 190 14 246 190 14 246 190 14 246 190 14
44372 -246 190 14 246 190 14 246 190 14 246 190 14
44373 -246 190 14 246 190 14 246 190 14 242 186 14
44374 -234 174 13 213 154 11 154 122 46 66 66 66
44375 - 30 30 30 10 10 10 0 0 0 0 0 0
44376 - 0 0 0 0 0 0 0 0 0 0 0 0
44377 - 0 0 0 0 0 0 0 0 0 0 0 0
44378 - 0 0 0 0 0 0 6 6 6 22 22 22
44379 - 58 58 58 154 121 60 206 145 10 234 174 13
44380 -242 186 14 246 186 14 246 190 14 246 190 14
44381 -246 190 14 246 190 14 246 190 14 246 190 14
44382 -246 190 14 246 190 14 246 190 14 246 190 14
44383 -246 190 14 246 190 14 246 190 14 246 190 14
44384 -246 186 14 236 178 12 210 162 10 163 110 8
44385 - 61 42 6 138 138 138 218 218 218 250 250 250
44386 -253 253 253 253 253 253 253 253 253 250 250 250
44387 -242 242 242 210 210 210 144 144 144 66 66 66
44388 - 6 6 6 2 2 6 2 2 6 2 2 6
44389 - 2 2 6 2 2 6 61 42 6 163 110 8
44390 -216 158 10 236 178 12 246 190 14 246 190 14
44391 -246 190 14 246 190 14 246 190 14 246 190 14
44392 -246 190 14 246 190 14 246 190 14 246 190 14
44393 -246 190 14 239 182 13 230 174 11 216 158 10
44394 -190 142 34 124 112 88 70 70 70 38 38 38
44395 - 18 18 18 6 6 6 0 0 0 0 0 0
44396 - 0 0 0 0 0 0 0 0 0 0 0 0
44397 - 0 0 0 0 0 0 0 0 0 0 0 0
44398 - 0 0 0 0 0 0 6 6 6 22 22 22
44399 - 62 62 62 168 124 44 206 145 10 224 166 10
44400 -236 178 12 239 182 13 242 186 14 242 186 14
44401 -246 186 14 246 190 14 246 190 14 246 190 14
44402 -246 190 14 246 190 14 246 190 14 246 190 14
44403 -246 190 14 246 190 14 246 190 14 246 190 14
44404 -246 190 14 236 178 12 216 158 10 175 118 6
44405 - 80 54 7 2 2 6 6 6 6 30 30 30
44406 - 54 54 54 62 62 62 50 50 50 38 38 38
44407 - 14 14 14 2 2 6 2 2 6 2 2 6
44408 - 2 2 6 2 2 6 2 2 6 2 2 6
44409 - 2 2 6 6 6 6 80 54 7 167 114 7
44410 -213 154 11 236 178 12 246 190 14 246 190 14
44411 -246 190 14 246 190 14 246 190 14 246 190 14
44412 -246 190 14 242 186 14 239 182 13 239 182 13
44413 -230 174 11 210 150 10 174 135 50 124 112 88
44414 - 82 82 82 54 54 54 34 34 34 18 18 18
44415 - 6 6 6 0 0 0 0 0 0 0 0 0
44416 - 0 0 0 0 0 0 0 0 0 0 0 0
44417 - 0 0 0 0 0 0 0 0 0 0 0 0
44418 - 0 0 0 0 0 0 6 6 6 18 18 18
44419 - 50 50 50 158 118 36 192 133 9 200 144 11
44420 -216 158 10 219 162 10 224 166 10 226 170 11
44421 -230 174 11 236 178 12 239 182 13 239 182 13
44422 -242 186 14 246 186 14 246 190 14 246 190 14
44423 -246 190 14 246 190 14 246 190 14 246 190 14
44424 -246 186 14 230 174 11 210 150 10 163 110 8
44425 -104 69 6 10 10 10 2 2 6 2 2 6
44426 - 2 2 6 2 2 6 2 2 6 2 2 6
44427 - 2 2 6 2 2 6 2 2 6 2 2 6
44428 - 2 2 6 2 2 6 2 2 6 2 2 6
44429 - 2 2 6 6 6 6 91 60 6 167 114 7
44430 -206 145 10 230 174 11 242 186 14 246 190 14
44431 -246 190 14 246 190 14 246 186 14 242 186 14
44432 -239 182 13 230 174 11 224 166 10 213 154 11
44433 -180 133 36 124 112 88 86 86 86 58 58 58
44434 - 38 38 38 22 22 22 10 10 10 6 6 6
44435 - 0 0 0 0 0 0 0 0 0 0 0 0
44436 - 0 0 0 0 0 0 0 0 0 0 0 0
44437 - 0 0 0 0 0 0 0 0 0 0 0 0
44438 - 0 0 0 0 0 0 0 0 0 14 14 14
44439 - 34 34 34 70 70 70 138 110 50 158 118 36
44440 -167 114 7 180 123 7 192 133 9 197 138 11
44441 -200 144 11 206 145 10 213 154 11 219 162 10
44442 -224 166 10 230 174 11 239 182 13 242 186 14
44443 -246 186 14 246 186 14 246 186 14 246 186 14
44444 -239 182 13 216 158 10 185 133 11 152 99 6
44445 -104 69 6 18 14 6 2 2 6 2 2 6
44446 - 2 2 6 2 2 6 2 2 6 2 2 6
44447 - 2 2 6 2 2 6 2 2 6 2 2 6
44448 - 2 2 6 2 2 6 2 2 6 2 2 6
44449 - 2 2 6 6 6 6 80 54 7 152 99 6
44450 -192 133 9 219 162 10 236 178 12 239 182 13
44451 -246 186 14 242 186 14 239 182 13 236 178 12
44452 -224 166 10 206 145 10 192 133 9 154 121 60
44453 - 94 94 94 62 62 62 42 42 42 22 22 22
44454 - 14 14 14 6 6 6 0 0 0 0 0 0
44455 - 0 0 0 0 0 0 0 0 0 0 0 0
44456 - 0 0 0 0 0 0 0 0 0 0 0 0
44457 - 0 0 0 0 0 0 0 0 0 0 0 0
44458 - 0 0 0 0 0 0 0 0 0 6 6 6
44459 - 18 18 18 34 34 34 58 58 58 78 78 78
44460 -101 98 89 124 112 88 142 110 46 156 107 11
44461 -163 110 8 167 114 7 175 118 6 180 123 7
44462 -185 133 11 197 138 11 210 150 10 219 162 10
44463 -226 170 11 236 178 12 236 178 12 234 174 13
44464 -219 162 10 197 138 11 163 110 8 130 83 6
44465 - 91 60 6 10 10 10 2 2 6 2 2 6
44466 - 18 18 18 38 38 38 38 38 38 38 38 38
44467 - 38 38 38 38 38 38 38 38 38 38 38 38
44468 - 38 38 38 38 38 38 26 26 26 2 2 6
44469 - 2 2 6 6 6 6 70 47 6 137 92 6
44470 -175 118 6 200 144 11 219 162 10 230 174 11
44471 -234 174 13 230 174 11 219 162 10 210 150 10
44472 -192 133 9 163 110 8 124 112 88 82 82 82
44473 - 50 50 50 30 30 30 14 14 14 6 6 6
44474 - 0 0 0 0 0 0 0 0 0 0 0 0
44475 - 0 0 0 0 0 0 0 0 0 0 0 0
44476 - 0 0 0 0 0 0 0 0 0 0 0 0
44477 - 0 0 0 0 0 0 0 0 0 0 0 0
44478 - 0 0 0 0 0 0 0 0 0 0 0 0
44479 - 6 6 6 14 14 14 22 22 22 34 34 34
44480 - 42 42 42 58 58 58 74 74 74 86 86 86
44481 -101 98 89 122 102 70 130 98 46 121 87 25
44482 -137 92 6 152 99 6 163 110 8 180 123 7
44483 -185 133 11 197 138 11 206 145 10 200 144 11
44484 -180 123 7 156 107 11 130 83 6 104 69 6
44485 - 50 34 6 54 54 54 110 110 110 101 98 89
44486 - 86 86 86 82 82 82 78 78 78 78 78 78
44487 - 78 78 78 78 78 78 78 78 78 78 78 78
44488 - 78 78 78 82 82 82 86 86 86 94 94 94
44489 -106 106 106 101 101 101 86 66 34 124 80 6
44490 -156 107 11 180 123 7 192 133 9 200 144 11
44491 -206 145 10 200 144 11 192 133 9 175 118 6
44492 -139 102 15 109 106 95 70 70 70 42 42 42
44493 - 22 22 22 10 10 10 0 0 0 0 0 0
44494 - 0 0 0 0 0 0 0 0 0 0 0 0
44495 - 0 0 0 0 0 0 0 0 0 0 0 0
44496 - 0 0 0 0 0 0 0 0 0 0 0 0
44497 - 0 0 0 0 0 0 0 0 0 0 0 0
44498 - 0 0 0 0 0 0 0 0 0 0 0 0
44499 - 0 0 0 0 0 0 6 6 6 10 10 10
44500 - 14 14 14 22 22 22 30 30 30 38 38 38
44501 - 50 50 50 62 62 62 74 74 74 90 90 90
44502 -101 98 89 112 100 78 121 87 25 124 80 6
44503 -137 92 6 152 99 6 152 99 6 152 99 6
44504 -138 86 6 124 80 6 98 70 6 86 66 30
44505 -101 98 89 82 82 82 58 58 58 46 46 46
44506 - 38 38 38 34 34 34 34 34 34 34 34 34
44507 - 34 34 34 34 34 34 34 34 34 34 34 34
44508 - 34 34 34 34 34 34 38 38 38 42 42 42
44509 - 54 54 54 82 82 82 94 86 76 91 60 6
44510 -134 86 6 156 107 11 167 114 7 175 118 6
44511 -175 118 6 167 114 7 152 99 6 121 87 25
44512 -101 98 89 62 62 62 34 34 34 18 18 18
44513 - 6 6 6 0 0 0 0 0 0 0 0 0
44514 - 0 0 0 0 0 0 0 0 0 0 0 0
44515 - 0 0 0 0 0 0 0 0 0 0 0 0
44516 - 0 0 0 0 0 0 0 0 0 0 0 0
44517 - 0 0 0 0 0 0 0 0 0 0 0 0
44518 - 0 0 0 0 0 0 0 0 0 0 0 0
44519 - 0 0 0 0 0 0 0 0 0 0 0 0
44520 - 0 0 0 6 6 6 6 6 6 10 10 10
44521 - 18 18 18 22 22 22 30 30 30 42 42 42
44522 - 50 50 50 66 66 66 86 86 86 101 98 89
44523 -106 86 58 98 70 6 104 69 6 104 69 6
44524 -104 69 6 91 60 6 82 62 34 90 90 90
44525 - 62 62 62 38 38 38 22 22 22 14 14 14
44526 - 10 10 10 10 10 10 10 10 10 10 10 10
44527 - 10 10 10 10 10 10 6 6 6 10 10 10
44528 - 10 10 10 10 10 10 10 10 10 14 14 14
44529 - 22 22 22 42 42 42 70 70 70 89 81 66
44530 - 80 54 7 104 69 6 124 80 6 137 92 6
44531 -134 86 6 116 81 8 100 82 52 86 86 86
44532 - 58 58 58 30 30 30 14 14 14 6 6 6
44533 - 0 0 0 0 0 0 0 0 0 0 0 0
44534 - 0 0 0 0 0 0 0 0 0 0 0 0
44535 - 0 0 0 0 0 0 0 0 0 0 0 0
44536 - 0 0 0 0 0 0 0 0 0 0 0 0
44537 - 0 0 0 0 0 0 0 0 0 0 0 0
44538 - 0 0 0 0 0 0 0 0 0 0 0 0
44539 - 0 0 0 0 0 0 0 0 0 0 0 0
44540 - 0 0 0 0 0 0 0 0 0 0 0 0
44541 - 0 0 0 6 6 6 10 10 10 14 14 14
44542 - 18 18 18 26 26 26 38 38 38 54 54 54
44543 - 70 70 70 86 86 86 94 86 76 89 81 66
44544 - 89 81 66 86 86 86 74 74 74 50 50 50
44545 - 30 30 30 14 14 14 6 6 6 0 0 0
44546 - 0 0 0 0 0 0 0 0 0 0 0 0
44547 - 0 0 0 0 0 0 0 0 0 0 0 0
44548 - 0 0 0 0 0 0 0 0 0 0 0 0
44549 - 6 6 6 18 18 18 34 34 34 58 58 58
44550 - 82 82 82 89 81 66 89 81 66 89 81 66
44551 - 94 86 66 94 86 76 74 74 74 50 50 50
44552 - 26 26 26 14 14 14 6 6 6 0 0 0
44553 - 0 0 0 0 0 0 0 0 0 0 0 0
44554 - 0 0 0 0 0 0 0 0 0 0 0 0
44555 - 0 0 0 0 0 0 0 0 0 0 0 0
44556 - 0 0 0 0 0 0 0 0 0 0 0 0
44557 - 0 0 0 0 0 0 0 0 0 0 0 0
44558 - 0 0 0 0 0 0 0 0 0 0 0 0
44559 - 0 0 0 0 0 0 0 0 0 0 0 0
44560 - 0 0 0 0 0 0 0 0 0 0 0 0
44561 - 0 0 0 0 0 0 0 0 0 0 0 0
44562 - 6 6 6 6 6 6 14 14 14 18 18 18
44563 - 30 30 30 38 38 38 46 46 46 54 54 54
44564 - 50 50 50 42 42 42 30 30 30 18 18 18
44565 - 10 10 10 0 0 0 0 0 0 0 0 0
44566 - 0 0 0 0 0 0 0 0 0 0 0 0
44567 - 0 0 0 0 0 0 0 0 0 0 0 0
44568 - 0 0 0 0 0 0 0 0 0 0 0 0
44569 - 0 0 0 6 6 6 14 14 14 26 26 26
44570 - 38 38 38 50 50 50 58 58 58 58 58 58
44571 - 54 54 54 42 42 42 30 30 30 18 18 18
44572 - 10 10 10 0 0 0 0 0 0 0 0 0
44573 - 0 0 0 0 0 0 0 0 0 0 0 0
44574 - 0 0 0 0 0 0 0 0 0 0 0 0
44575 - 0 0 0 0 0 0 0 0 0 0 0 0
44576 - 0 0 0 0 0 0 0 0 0 0 0 0
44577 - 0 0 0 0 0 0 0 0 0 0 0 0
44578 - 0 0 0 0 0 0 0 0 0 0 0 0
44579 - 0 0 0 0 0 0 0 0 0 0 0 0
44580 - 0 0 0 0 0 0 0 0 0 0 0 0
44581 - 0 0 0 0 0 0 0 0 0 0 0 0
44582 - 0 0 0 0 0 0 0 0 0 6 6 6
44583 - 6 6 6 10 10 10 14 14 14 18 18 18
44584 - 18 18 18 14 14 14 10 10 10 6 6 6
44585 - 0 0 0 0 0 0 0 0 0 0 0 0
44586 - 0 0 0 0 0 0 0 0 0 0 0 0
44587 - 0 0 0 0 0 0 0 0 0 0 0 0
44588 - 0 0 0 0 0 0 0 0 0 0 0 0
44589 - 0 0 0 0 0 0 0 0 0 6 6 6
44590 - 14 14 14 18 18 18 22 22 22 22 22 22
44591 - 18 18 18 14 14 14 10 10 10 6 6 6
44592 - 0 0 0 0 0 0 0 0 0 0 0 0
44593 - 0 0 0 0 0 0 0 0 0 0 0 0
44594 - 0 0 0 0 0 0 0 0 0 0 0 0
44595 - 0 0 0 0 0 0 0 0 0 0 0 0
44596 - 0 0 0 0 0 0 0 0 0 0 0 0
44597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44598 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44606 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44610 +4 4 4 4 4 4
44611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44612 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44624 +4 4 4 4 4 4
44625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44626 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44638 +4 4 4 4 4 4
44639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44640 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44645 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44651 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44652 +4 4 4 4 4 4
44653 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44654 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44659 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44666 +4 4 4 4 4 4
44667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44668 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44672 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44673 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44678 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44680 +4 4 4 4 4 4
44681 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44682 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44685 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
44686 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
44687 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44690 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
44691 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44692 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
44693 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44694 +4 4 4 4 4 4
44695 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44696 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44699 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
44700 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
44701 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44704 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
44705 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
44706 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
44707 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44708 +4 4 4 4 4 4
44709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44713 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
44714 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
44715 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44718 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
44719 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
44720 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
44721 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
44722 +4 4 4 4 4 4
44723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44726 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
44727 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
44728 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
44729 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
44730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44731 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44732 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
44733 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
44734 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
44735 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
44736 +4 4 4 4 4 4
44737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44740 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
44741 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
44742 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
44743 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
44744 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44745 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
44746 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
44747 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
44748 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
44749 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
44750 +4 4 4 4 4 4
44751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
44754 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
44755 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
44756 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
44757 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
44758 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
44759 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
44760 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
44761 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
44762 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
44763 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
44764 +4 4 4 4 4 4
44765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44767 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
44768 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
44769 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
44770 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
44771 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
44772 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
44773 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
44774 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
44775 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
44776 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
44777 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
44778 +4 4 4 4 4 4
44779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44781 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
44782 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
44783 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
44784 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
44785 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
44786 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
44787 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
44788 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
44789 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
44790 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
44791 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
44792 +4 4 4 4 4 4
44793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44795 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
44796 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
44797 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
44798 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
44799 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
44800 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
44801 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
44802 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
44803 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
44804 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
44805 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44806 +4 4 4 4 4 4
44807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44809 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
44810 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
44811 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
44812 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
44813 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
44814 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
44815 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
44816 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
44817 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
44818 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
44819 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
44820 +4 4 4 4 4 4
44821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44822 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
44823 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
44824 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
44825 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
44826 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
44827 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
44828 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
44829 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
44830 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
44831 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
44832 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
44833 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
44834 +4 4 4 4 4 4
44835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44836 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
44837 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
44838 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
44839 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
44840 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
44841 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
44842 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
44843 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
44844 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
44845 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
44846 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
44847 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
44848 +0 0 0 4 4 4
44849 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44850 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
44851 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
44852 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
44853 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
44854 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
44855 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
44856 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
44857 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
44858 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
44859 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
44860 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
44861 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
44862 +2 0 0 0 0 0
44863 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
44864 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
44865 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
44866 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
44867 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
44868 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
44869 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
44870 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
44871 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
44872 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
44873 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
44874 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
44875 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
44876 +37 38 37 0 0 0
44877 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44878 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
44879 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
44880 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
44881 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
44882 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
44883 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
44884 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
44885 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
44886 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
44887 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
44888 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
44889 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
44890 +85 115 134 4 0 0
44891 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
44892 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
44893 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
44894 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
44895 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
44896 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
44897 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
44898 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
44899 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
44900 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
44901 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
44902 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
44903 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
44904 +60 73 81 4 0 0
44905 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
44906 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
44907 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
44908 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
44909 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
44910 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
44911 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
44912 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
44913 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
44914 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
44915 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
44916 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
44917 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
44918 +16 19 21 4 0 0
44919 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
44920 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
44921 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
44922 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
44923 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
44924 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
44925 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
44926 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
44927 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
44928 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
44929 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
44930 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
44931 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
44932 +4 0 0 4 3 3
44933 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
44934 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
44935 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
44936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
44937 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
44938 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
44939 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
44940 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
44941 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
44942 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
44943 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
44944 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
44945 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
44946 +3 2 2 4 4 4
44947 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
44948 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
44949 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
44950 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44951 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
44952 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
44953 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
44954 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
44955 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
44956 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
44957 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
44958 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
44959 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
44960 +4 4 4 4 4 4
44961 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
44962 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
44963 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
44964 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
44965 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
44966 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
44967 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
44968 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
44969 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
44970 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
44971 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
44972 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
44973 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
44974 +4 4 4 4 4 4
44975 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
44976 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
44977 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
44978 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
44979 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
44980 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
44981 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
44982 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
44983 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
44984 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
44985 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
44986 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
44987 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
44988 +5 5 5 5 5 5
44989 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
44990 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
44991 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
44992 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
44993 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
44994 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
44995 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
44996 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
44997 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
44998 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
44999 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45000 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45001 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45002 +5 5 5 4 4 4
45003 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45004 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45005 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45006 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45007 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45008 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45009 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45010 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45011 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45012 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45013 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45014 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45016 +4 4 4 4 4 4
45017 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45018 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45019 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45020 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45021 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45022 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45023 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45024 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45025 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45026 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45027 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45028 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45030 +4 4 4 4 4 4
45031 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45032 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45033 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45034 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45035 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45036 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45037 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45038 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45039 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45040 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45041 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45044 +4 4 4 4 4 4
45045 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45046 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45047 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45048 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45049 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45050 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45051 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45052 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45053 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45054 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45055 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45058 +4 4 4 4 4 4
45059 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45060 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45061 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45062 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45063 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45064 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45065 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45066 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45067 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45068 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45069 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45072 +4 4 4 4 4 4
45073 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45074 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45075 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45076 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45077 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45078 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45079 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45080 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45081 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45082 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45083 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45086 +4 4 4 4 4 4
45087 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45088 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45089 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45090 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45091 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45092 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45093 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45094 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45095 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45096 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45097 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45100 +4 4 4 4 4 4
45101 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45102 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45103 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45104 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45105 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45106 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45107 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45108 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45109 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45110 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45111 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45114 +4 4 4 4 4 4
45115 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45116 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45117 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45118 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45119 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45120 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45121 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45122 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45123 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45124 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45125 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45128 +4 4 4 4 4 4
45129 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45130 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45131 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45132 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45133 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45134 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45135 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45136 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45137 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45138 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45139 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45142 +4 4 4 4 4 4
45143 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45144 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45145 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45146 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45147 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45148 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45149 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45150 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45151 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45152 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45153 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45156 +4 4 4 4 4 4
45157 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45158 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45159 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45160 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45161 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45162 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45163 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45164 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45165 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45166 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45167 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45170 +4 4 4 4 4 4
45171 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45172 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45173 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45174 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45175 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45176 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45177 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45178 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45179 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45180 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45181 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45184 +4 4 4 4 4 4
45185 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45186 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45187 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45188 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45189 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45190 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45191 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45192 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45193 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45194 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45195 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45198 +4 4 4 4 4 4
45199 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45200 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45201 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45202 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45203 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45204 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45205 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45206 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45207 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45208 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45209 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45212 +4 4 4 4 4 4
45213 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45214 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45215 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45216 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45217 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45218 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45219 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45220 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45221 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45222 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45223 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45226 +4 4 4 4 4 4
45227 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45228 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45229 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45230 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45231 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45232 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45233 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45234 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45235 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45236 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45237 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45240 +4 4 4 4 4 4
45241 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45242 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45243 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45244 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45245 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45246 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45247 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45248 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45249 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45250 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45251 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45254 +4 4 4 4 4 4
45255 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45256 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45257 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45258 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45259 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45260 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45261 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45262 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45263 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45264 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45265 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45268 +4 4 4 4 4 4
45269 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45270 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45271 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45272 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45273 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45274 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45275 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45276 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45277 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45278 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45279 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45282 +4 4 4 4 4 4
45283 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45284 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45285 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45286 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45287 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45288 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45289 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45290 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45291 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45292 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45293 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45296 +4 4 4 4 4 4
45297 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45298 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45299 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45300 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45301 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45302 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45303 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45304 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45305 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45306 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45307 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45310 +4 4 4 4 4 4
45311 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45312 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45313 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45314 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45315 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45316 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45317 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45318 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45319 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45320 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45321 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45324 +4 4 4 4 4 4
45325 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45326 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45327 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45328 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45329 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45330 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45331 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45332 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45333 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45334 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45335 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45338 +4 4 4 4 4 4
45339 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45340 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45341 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45342 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45343 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45344 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45345 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45346 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45347 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45348 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45349 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45352 +4 4 4 4 4 4
45353 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45354 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45355 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45356 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45357 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45358 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45359 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45360 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45361 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45362 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45363 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45366 +4 4 4 4 4 4
45367 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45368 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45369 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45370 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45371 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45372 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45373 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45374 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45375 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45376 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45377 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45380 +4 4 4 4 4 4
45381 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45382 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45383 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45384 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45385 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45386 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45387 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45388 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45389 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45390 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45391 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45394 +4 4 4 4 4 4
45395 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45396 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45397 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45398 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45399 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45400 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45401 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45402 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45403 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45404 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45405 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45408 +4 4 4 4 4 4
45409 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45410 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45411 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45412 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45413 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45414 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45415 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45416 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45417 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45418 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45422 +4 4 4 4 4 4
45423 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45424 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45425 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45426 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45427 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45428 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45429 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45430 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45431 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45432 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45436 +4 4 4 4 4 4
45437 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45438 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45439 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45440 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45441 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45442 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45443 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45444 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45445 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45446 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45450 +4 4 4 4 4 4
45451 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45452 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45453 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45454 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45455 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45456 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45457 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45458 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45459 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45460 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45464 +4 4 4 4 4 4
45465 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45466 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45467 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45468 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45469 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45470 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45471 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45472 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45473 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45478 +4 4 4 4 4 4
45479 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45480 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45481 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45482 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45483 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45484 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45485 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45486 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45487 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45492 +4 4 4 4 4 4
45493 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45494 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45495 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45496 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45497 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45498 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45499 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45500 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45501 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45505 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45506 +4 4 4 4 4 4
45507 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45508 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45509 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45510 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45511 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45512 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45513 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45514 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45519 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45520 +4 4 4 4 4 4
45521 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45522 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45523 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45524 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45525 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45526 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45527 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45528 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45533 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45534 +4 4 4 4 4 4
45535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45536 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45537 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45538 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45539 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45540 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45541 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45542 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45543 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45547 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45548 +4 4 4 4 4 4
45549 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45550 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45551 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45552 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45553 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45554 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45555 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45556 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45557 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45562 +4 4 4 4 4 4
45563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45565 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45566 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45567 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45568 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45569 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45570 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45571 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45576 +4 4 4 4 4 4
45577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45579 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45580 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45581 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45582 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45583 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45584 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45585 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45590 +4 4 4 4 4 4
45591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45593 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45594 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45595 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45596 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45597 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45598 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45604 +4 4 4 4 4 4
45605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45608 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45609 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45610 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45611 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45612 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45618 +4 4 4 4 4 4
45619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45622 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45623 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45624 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45625 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
45626 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45632 +4 4 4 4 4 4
45633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45636 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
45637 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
45638 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
45639 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
45640 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45645 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45646 +4 4 4 4 4 4
45647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45651 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
45652 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45653 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45654 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45659 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45660 +4 4 4 4 4 4
45661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45665 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
45666 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
45667 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45668 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45672 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45673 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45674 +4 4 4 4 4 4
45675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45678 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45679 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
45680 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
45681 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45682 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45686 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45687 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45688 +4 4 4 4 4 4
45689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45692 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45693 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
45694 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
45695 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45696 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45699 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45700 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45701 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45702 +4 4 4 4 4 4
45703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45707 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45708 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
45709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45714 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45716 +4 4 4 4 4 4
45717 diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
45718 index 443e3c8..c443d6a 100644
45719 --- a/drivers/video/nvidia/nv_backlight.c
45720 +++ b/drivers/video/nvidia/nv_backlight.c
45721 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
45722 return bd->props.brightness;
45723 }
45724
45725 -static struct backlight_ops nvidia_bl_ops = {
45726 +static const struct backlight_ops nvidia_bl_ops = {
45727 .get_brightness = nvidia_bl_get_brightness,
45728 .update_status = nvidia_bl_update_status,
45729 };
45730 diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
45731 index d94c57f..912984c 100644
45732 --- a/drivers/video/riva/fbdev.c
45733 +++ b/drivers/video/riva/fbdev.c
45734 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
45735 return bd->props.brightness;
45736 }
45737
45738 -static struct backlight_ops riva_bl_ops = {
45739 +static const struct backlight_ops riva_bl_ops = {
45740 .get_brightness = riva_bl_get_brightness,
45741 .update_status = riva_bl_update_status,
45742 };
45743 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
45744 index 54fbb29..2c108fc 100644
45745 --- a/drivers/video/uvesafb.c
45746 +++ b/drivers/video/uvesafb.c
45747 @@ -18,6 +18,7 @@
45748 #include <linux/fb.h>
45749 #include <linux/io.h>
45750 #include <linux/mutex.h>
45751 +#include <linux/moduleloader.h>
45752 #include <video/edid.h>
45753 #include <video/uvesafb.h>
45754 #ifdef CONFIG_X86
45755 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
45756 NULL,
45757 };
45758
45759 - return call_usermodehelper(v86d_path, argv, envp, 1);
45760 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
45761 }
45762
45763 /*
45764 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
45765 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
45766 par->pmi_setpal = par->ypan = 0;
45767 } else {
45768 +
45769 +#ifdef CONFIG_PAX_KERNEXEC
45770 +#ifdef CONFIG_MODULES
45771 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
45772 +#endif
45773 + if (!par->pmi_code) {
45774 + par->pmi_setpal = par->ypan = 0;
45775 + return 0;
45776 + }
45777 +#endif
45778 +
45779 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
45780 + task->t.regs.edi);
45781 +
45782 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45783 + pax_open_kernel();
45784 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
45785 + pax_close_kernel();
45786 +
45787 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
45788 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
45789 +#else
45790 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
45791 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
45792 +#endif
45793 +
45794 printk(KERN_INFO "uvesafb: protected mode interface info at "
45795 "%04x:%04x\n",
45796 (u16)task->t.regs.es, (u16)task->t.regs.edi);
45797 @@ -1799,6 +1822,11 @@ out:
45798 if (par->vbe_modes)
45799 kfree(par->vbe_modes);
45800
45801 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45802 + if (par->pmi_code)
45803 + module_free_exec(NULL, par->pmi_code);
45804 +#endif
45805 +
45806 framebuffer_release(info);
45807 return err;
45808 }
45809 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
45810 kfree(par->vbe_state_orig);
45811 if (par->vbe_state_saved)
45812 kfree(par->vbe_state_saved);
45813 +
45814 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45815 + if (par->pmi_code)
45816 + module_free_exec(NULL, par->pmi_code);
45817 +#endif
45818 +
45819 }
45820
45821 framebuffer_release(info);
45822 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
45823 index bd37ee1..cb827e8 100644
45824 --- a/drivers/video/vesafb.c
45825 +++ b/drivers/video/vesafb.c
45826 @@ -9,6 +9,7 @@
45827 */
45828
45829 #include <linux/module.h>
45830 +#include <linux/moduleloader.h>
45831 #include <linux/kernel.h>
45832 #include <linux/errno.h>
45833 #include <linux/string.h>
45834 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
45835 static int vram_total __initdata; /* Set total amount of memory */
45836 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
45837 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
45838 -static void (*pmi_start)(void) __read_mostly;
45839 -static void (*pmi_pal) (void) __read_mostly;
45840 +static void (*pmi_start)(void) __read_only;
45841 +static void (*pmi_pal) (void) __read_only;
45842 static int depth __read_mostly;
45843 static int vga_compat __read_mostly;
45844 /* --------------------------------------------------------------------- */
45845 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
45846 unsigned int size_vmode;
45847 unsigned int size_remap;
45848 unsigned int size_total;
45849 + void *pmi_code = NULL;
45850
45851 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
45852 return -ENODEV;
45853 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
45854 size_remap = size_total;
45855 vesafb_fix.smem_len = size_remap;
45856
45857 -#ifndef __i386__
45858 - screen_info.vesapm_seg = 0;
45859 -#endif
45860 -
45861 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
45862 printk(KERN_WARNING
45863 "vesafb: cannot reserve video memory at 0x%lx\n",
45864 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
45865 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
45866 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
45867
45868 +#ifdef __i386__
45869 +
45870 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45871 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
45872 + if (!pmi_code)
45873 +#elif !defined(CONFIG_PAX_KERNEXEC)
45874 + if (0)
45875 +#endif
45876 +
45877 +#endif
45878 + screen_info.vesapm_seg = 0;
45879 +
45880 if (screen_info.vesapm_seg) {
45881 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
45882 - screen_info.vesapm_seg,screen_info.vesapm_off);
45883 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
45884 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
45885 }
45886
45887 if (screen_info.vesapm_seg < 0xc000)
45888 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
45889
45890 if (ypan || pmi_setpal) {
45891 unsigned short *pmi_base;
45892 +
45893 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
45894 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
45895 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
45896 +
45897 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45898 + pax_open_kernel();
45899 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
45900 +#else
45901 + pmi_code = pmi_base;
45902 +#endif
45903 +
45904 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
45905 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
45906 +
45907 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45908 + pmi_start = ktva_ktla(pmi_start);
45909 + pmi_pal = ktva_ktla(pmi_pal);
45910 + pax_close_kernel();
45911 +#endif
45912 +
45913 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
45914 if (pmi_base[3]) {
45915 printk(KERN_INFO "vesafb: pmi: ports = ");
45916 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
45917 info->node, info->fix.id);
45918 return 0;
45919 err:
45920 +
45921 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45922 + module_free_exec(NULL, pmi_code);
45923 +#endif
45924 +
45925 if (info->screen_base)
45926 iounmap(info->screen_base);
45927 framebuffer_release(info);
45928 diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
45929 index 88a60e0..6783cc2 100644
45930 --- a/drivers/xen/sys-hypervisor.c
45931 +++ b/drivers/xen/sys-hypervisor.c
45932 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
45933 return 0;
45934 }
45935
45936 -static struct sysfs_ops hyp_sysfs_ops = {
45937 +static const struct sysfs_ops hyp_sysfs_ops = {
45938 .show = hyp_sysfs_show,
45939 .store = hyp_sysfs_store,
45940 };
45941 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
45942 index 18f74ec..3227009 100644
45943 --- a/fs/9p/vfs_inode.c
45944 +++ b/fs/9p/vfs_inode.c
45945 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
45946 static void
45947 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45948 {
45949 - char *s = nd_get_link(nd);
45950 + const char *s = nd_get_link(nd);
45951
45952 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
45953 IS_ERR(s) ? "<error>" : s);
45954 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
45955 index bb4cc5b..df5eaa0 100644
45956 --- a/fs/Kconfig.binfmt
45957 +++ b/fs/Kconfig.binfmt
45958 @@ -86,7 +86,7 @@ config HAVE_AOUT
45959
45960 config BINFMT_AOUT
45961 tristate "Kernel support for a.out and ECOFF binaries"
45962 - depends on HAVE_AOUT
45963 + depends on HAVE_AOUT && BROKEN
45964 ---help---
45965 A.out (Assembler.OUTput) is a set of formats for libraries and
45966 executables used in the earliest versions of UNIX. Linux used
45967 diff --git a/fs/aio.c b/fs/aio.c
45968 index 22a19ad..d484e5b 100644
45969 --- a/fs/aio.c
45970 +++ b/fs/aio.c
45971 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
45972 size += sizeof(struct io_event) * nr_events;
45973 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
45974
45975 - if (nr_pages < 0)
45976 + if (nr_pages <= 0)
45977 return -EINVAL;
45978
45979 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
45980 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
45981 struct aio_timeout to;
45982 int retry = 0;
45983
45984 + pax_track_stack();
45985 +
45986 /* needed to zero any padding within an entry (there shouldn't be
45987 * any, but C is fun!
45988 */
45989 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
45990 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
45991 {
45992 ssize_t ret;
45993 + struct iovec iovstack;
45994
45995 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
45996 kiocb->ki_nbytes, 1,
45997 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
45998 + &iovstack, &kiocb->ki_iovec);
45999 if (ret < 0)
46000 goto out;
46001
46002 + if (kiocb->ki_iovec == &iovstack) {
46003 + kiocb->ki_inline_vec = iovstack;
46004 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
46005 + }
46006 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46007 kiocb->ki_cur_seg = 0;
46008 /* ki_nbytes/left now reflect bytes instead of segs */
46009 diff --git a/fs/attr.c b/fs/attr.c
46010 index 96d394b..33cf5b4 100644
46011 --- a/fs/attr.c
46012 +++ b/fs/attr.c
46013 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46014 unsigned long limit;
46015
46016 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46017 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46018 if (limit != RLIM_INFINITY && offset > limit)
46019 goto out_sig;
46020 if (offset > inode->i_sb->s_maxbytes)
46021 diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46022 index 4a1401c..05eb5ca 100644
46023 --- a/fs/autofs/root.c
46024 +++ b/fs/autofs/root.c
46025 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46026 set_bit(n,sbi->symlink_bitmap);
46027 sl = &sbi->symlink[n];
46028 sl->len = strlen(symname);
46029 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46030 + slsize = sl->len+1;
46031 + sl->data = kmalloc(slsize, GFP_KERNEL);
46032 if (!sl->data) {
46033 clear_bit(n,sbi->symlink_bitmap);
46034 unlock_kernel();
46035 diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46036 index b4ea829..e63ef18 100644
46037 --- a/fs/autofs4/symlink.c
46038 +++ b/fs/autofs4/symlink.c
46039 @@ -15,7 +15,7 @@
46040 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46041 {
46042 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46043 - nd_set_link(nd, (char *)ino->u.symlink);
46044 + nd_set_link(nd, ino->u.symlink);
46045 return NULL;
46046 }
46047
46048 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46049 index 2341375..df9d1c2 100644
46050 --- a/fs/autofs4/waitq.c
46051 +++ b/fs/autofs4/waitq.c
46052 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46053 {
46054 unsigned long sigpipe, flags;
46055 mm_segment_t fs;
46056 - const char *data = (const char *)addr;
46057 + const char __user *data = (const char __force_user *)addr;
46058 ssize_t wr = 0;
46059
46060 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46061 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46062 index 9158c07..3f06659 100644
46063 --- a/fs/befs/linuxvfs.c
46064 +++ b/fs/befs/linuxvfs.c
46065 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46066 {
46067 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46068 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46069 - char *link = nd_get_link(nd);
46070 + const char *link = nd_get_link(nd);
46071 if (!IS_ERR(link))
46072 kfree(link);
46073 }
46074 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46075 index 0133b5a..b3baa9f 100644
46076 --- a/fs/binfmt_aout.c
46077 +++ b/fs/binfmt_aout.c
46078 @@ -16,6 +16,7 @@
46079 #include <linux/string.h>
46080 #include <linux/fs.h>
46081 #include <linux/file.h>
46082 +#include <linux/security.h>
46083 #include <linux/stat.h>
46084 #include <linux/fcntl.h>
46085 #include <linux/ptrace.h>
46086 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46087 #endif
46088 # define START_STACK(u) (u.start_stack)
46089
46090 + memset(&dump, 0, sizeof(dump));
46091 +
46092 fs = get_fs();
46093 set_fs(KERNEL_DS);
46094 has_dumped = 1;
46095 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46096
46097 /* If the size of the dump file exceeds the rlimit, then see what would happen
46098 if we wrote the stack, but not the data area. */
46099 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46100 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46101 dump.u_dsize = 0;
46102
46103 /* Make sure we have enough room to write the stack and data areas. */
46104 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46105 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46106 dump.u_ssize = 0;
46107
46108 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46109 dump_size = dump.u_ssize << PAGE_SHIFT;
46110 DUMP_WRITE(dump_start,dump_size);
46111 }
46112 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
46113 - set_fs(KERNEL_DS);
46114 - DUMP_WRITE(current,sizeof(*current));
46115 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46116 end_coredump:
46117 set_fs(fs);
46118 return has_dumped;
46119 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46120 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46121 if (rlim >= RLIM_INFINITY)
46122 rlim = ~0;
46123 +
46124 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46125 if (ex.a_data + ex.a_bss > rlim)
46126 return -ENOMEM;
46127
46128 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46129 install_exec_creds(bprm);
46130 current->flags &= ~PF_FORKNOEXEC;
46131
46132 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46133 + current->mm->pax_flags = 0UL;
46134 +#endif
46135 +
46136 +#ifdef CONFIG_PAX_PAGEEXEC
46137 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46138 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46139 +
46140 +#ifdef CONFIG_PAX_EMUTRAMP
46141 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46142 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46143 +#endif
46144 +
46145 +#ifdef CONFIG_PAX_MPROTECT
46146 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46147 + current->mm->pax_flags |= MF_PAX_MPROTECT;
46148 +#endif
46149 +
46150 + }
46151 +#endif
46152 +
46153 if (N_MAGIC(ex) == OMAGIC) {
46154 unsigned long text_addr, map_size;
46155 loff_t pos;
46156 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46157
46158 down_write(&current->mm->mmap_sem);
46159 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46160 - PROT_READ | PROT_WRITE | PROT_EXEC,
46161 + PROT_READ | PROT_WRITE,
46162 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46163 fd_offset + ex.a_text);
46164 up_write(&current->mm->mmap_sem);
46165 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46166 index 1ed37ba..efcdc04 100644
46167 --- a/fs/binfmt_elf.c
46168 +++ b/fs/binfmt_elf.c
46169 @@ -31,6 +31,7 @@
46170 #include <linux/random.h>
46171 #include <linux/elf.h>
46172 #include <linux/utsname.h>
46173 +#include <linux/xattr.h>
46174 #include <asm/uaccess.h>
46175 #include <asm/param.h>
46176 #include <asm/page.h>
46177 @@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46178 #define elf_core_dump NULL
46179 #endif
46180
46181 +#ifdef CONFIG_PAX_MPROTECT
46182 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46183 +#endif
46184 +
46185 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46186 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46187 #else
46188 @@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46189 .load_binary = load_elf_binary,
46190 .load_shlib = load_elf_library,
46191 .core_dump = elf_core_dump,
46192 +
46193 +#ifdef CONFIG_PAX_MPROTECT
46194 + .handle_mprotect= elf_handle_mprotect,
46195 +#endif
46196 +
46197 .min_coredump = ELF_EXEC_PAGESIZE,
46198 .hasvdso = 1
46199 };
46200 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46201
46202 static int set_brk(unsigned long start, unsigned long end)
46203 {
46204 + unsigned long e = end;
46205 +
46206 start = ELF_PAGEALIGN(start);
46207 end = ELF_PAGEALIGN(end);
46208 if (end > start) {
46209 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46210 if (BAD_ADDR(addr))
46211 return addr;
46212 }
46213 - current->mm->start_brk = current->mm->brk = end;
46214 + current->mm->start_brk = current->mm->brk = e;
46215 return 0;
46216 }
46217
46218 @@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46219 elf_addr_t __user *u_rand_bytes;
46220 const char *k_platform = ELF_PLATFORM;
46221 const char *k_base_platform = ELF_BASE_PLATFORM;
46222 - unsigned char k_rand_bytes[16];
46223 + u32 k_rand_bytes[4];
46224 int items;
46225 elf_addr_t *elf_info;
46226 int ei_index = 0;
46227 const struct cred *cred = current_cred();
46228 struct vm_area_struct *vma;
46229 + unsigned long saved_auxv[AT_VECTOR_SIZE];
46230 +
46231 + pax_track_stack();
46232
46233 /*
46234 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46235 @@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46236 * Generate 16 random bytes for userspace PRNG seeding.
46237 */
46238 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46239 - u_rand_bytes = (elf_addr_t __user *)
46240 - STACK_ALLOC(p, sizeof(k_rand_bytes));
46241 + srandom32(k_rand_bytes[0] ^ random32());
46242 + srandom32(k_rand_bytes[1] ^ random32());
46243 + srandom32(k_rand_bytes[2] ^ random32());
46244 + srandom32(k_rand_bytes[3] ^ random32());
46245 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
46246 + u_rand_bytes = (elf_addr_t __user *) p;
46247 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46248 return -EFAULT;
46249
46250 @@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46251 return -EFAULT;
46252 current->mm->env_end = p;
46253
46254 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46255 +
46256 /* Put the elf_info on the stack in the right place. */
46257 sp = (elf_addr_t __user *)envp + 1;
46258 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46259 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46260 return -EFAULT;
46261 return 0;
46262 }
46263 @@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46264 {
46265 struct elf_phdr *elf_phdata;
46266 struct elf_phdr *eppnt;
46267 - unsigned long load_addr = 0;
46268 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46269 int load_addr_set = 0;
46270 unsigned long last_bss = 0, elf_bss = 0;
46271 - unsigned long error = ~0UL;
46272 + unsigned long error = -EINVAL;
46273 unsigned long total_size;
46274 int retval, i, size;
46275
46276 @@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46277 goto out_close;
46278 }
46279
46280 +#ifdef CONFIG_PAX_SEGMEXEC
46281 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46282 + pax_task_size = SEGMEXEC_TASK_SIZE;
46283 +#endif
46284 +
46285 eppnt = elf_phdata;
46286 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46287 if (eppnt->p_type == PT_LOAD) {
46288 @@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46289 k = load_addr + eppnt->p_vaddr;
46290 if (BAD_ADDR(k) ||
46291 eppnt->p_filesz > eppnt->p_memsz ||
46292 - eppnt->p_memsz > TASK_SIZE ||
46293 - TASK_SIZE - eppnt->p_memsz < k) {
46294 + eppnt->p_memsz > pax_task_size ||
46295 + pax_task_size - eppnt->p_memsz < k) {
46296 error = -ENOMEM;
46297 goto out_close;
46298 }
46299 @@ -532,6 +558,363 @@ out:
46300 return error;
46301 }
46302
46303 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46304 +{
46305 + unsigned long pax_flags = 0UL;
46306 +
46307 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46308 +
46309 +#ifdef CONFIG_PAX_PAGEEXEC
46310 + if (elf_phdata->p_flags & PF_PAGEEXEC)
46311 + pax_flags |= MF_PAX_PAGEEXEC;
46312 +#endif
46313 +
46314 +#ifdef CONFIG_PAX_SEGMEXEC
46315 + if (elf_phdata->p_flags & PF_SEGMEXEC)
46316 + pax_flags |= MF_PAX_SEGMEXEC;
46317 +#endif
46318 +
46319 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46320 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46321 + if (nx_enabled)
46322 + pax_flags &= ~MF_PAX_SEGMEXEC;
46323 + else
46324 + pax_flags &= ~MF_PAX_PAGEEXEC;
46325 + }
46326 +#endif
46327 +
46328 +#ifdef CONFIG_PAX_EMUTRAMP
46329 + if (elf_phdata->p_flags & PF_EMUTRAMP)
46330 + pax_flags |= MF_PAX_EMUTRAMP;
46331 +#endif
46332 +
46333 +#ifdef CONFIG_PAX_MPROTECT
46334 + if (elf_phdata->p_flags & PF_MPROTECT)
46335 + pax_flags |= MF_PAX_MPROTECT;
46336 +#endif
46337 +
46338 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46339 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46340 + pax_flags |= MF_PAX_RANDMMAP;
46341 +#endif
46342 +
46343 +#endif
46344 +
46345 + return pax_flags;
46346 +}
46347 +
46348 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46349 +{
46350 + unsigned long pax_flags = 0UL;
46351 +
46352 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46353 +
46354 +#ifdef CONFIG_PAX_PAGEEXEC
46355 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46356 + pax_flags |= MF_PAX_PAGEEXEC;
46357 +#endif
46358 +
46359 +#ifdef CONFIG_PAX_SEGMEXEC
46360 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46361 + pax_flags |= MF_PAX_SEGMEXEC;
46362 +#endif
46363 +
46364 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46365 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46366 + if (nx_enabled)
46367 + pax_flags &= ~MF_PAX_SEGMEXEC;
46368 + else
46369 + pax_flags &= ~MF_PAX_PAGEEXEC;
46370 + }
46371 +#endif
46372 +
46373 +#ifdef CONFIG_PAX_EMUTRAMP
46374 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46375 + pax_flags |= MF_PAX_EMUTRAMP;
46376 +#endif
46377 +
46378 +#ifdef CONFIG_PAX_MPROTECT
46379 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46380 + pax_flags |= MF_PAX_MPROTECT;
46381 +#endif
46382 +
46383 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46384 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46385 + pax_flags |= MF_PAX_RANDMMAP;
46386 +#endif
46387 +
46388 +#endif
46389 +
46390 + return pax_flags;
46391 +}
46392 +
46393 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46394 +{
46395 + unsigned long pax_flags = 0UL;
46396 +
46397 +#ifdef CONFIG_PAX_EI_PAX
46398 +
46399 +#ifdef CONFIG_PAX_PAGEEXEC
46400 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46401 + pax_flags |= MF_PAX_PAGEEXEC;
46402 +#endif
46403 +
46404 +#ifdef CONFIG_PAX_SEGMEXEC
46405 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46406 + pax_flags |= MF_PAX_SEGMEXEC;
46407 +#endif
46408 +
46409 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46410 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46411 + if (nx_enabled)
46412 + pax_flags &= ~MF_PAX_SEGMEXEC;
46413 + else
46414 + pax_flags &= ~MF_PAX_PAGEEXEC;
46415 + }
46416 +#endif
46417 +
46418 +#ifdef CONFIG_PAX_EMUTRAMP
46419 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46420 + pax_flags |= MF_PAX_EMUTRAMP;
46421 +#endif
46422 +
46423 +#ifdef CONFIG_PAX_MPROTECT
46424 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46425 + pax_flags |= MF_PAX_MPROTECT;
46426 +#endif
46427 +
46428 +#ifdef CONFIG_PAX_ASLR
46429 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46430 + pax_flags |= MF_PAX_RANDMMAP;
46431 +#endif
46432 +
46433 +#else
46434 +
46435 +#ifdef CONFIG_PAX_PAGEEXEC
46436 + pax_flags |= MF_PAX_PAGEEXEC;
46437 +#endif
46438 +
46439 +#ifdef CONFIG_PAX_MPROTECT
46440 + pax_flags |= MF_PAX_MPROTECT;
46441 +#endif
46442 +
46443 +#ifdef CONFIG_PAX_RANDMMAP
46444 + pax_flags |= MF_PAX_RANDMMAP;
46445 +#endif
46446 +
46447 +#ifdef CONFIG_PAX_SEGMEXEC
46448 + if (!(__supported_pte_mask & _PAGE_NX)) {
46449 + pax_flags &= ~MF_PAX_PAGEEXEC;
46450 + pax_flags |= MF_PAX_SEGMEXEC;
46451 + }
46452 +#endif
46453 +
46454 +#endif
46455 +
46456 + return pax_flags;
46457 +}
46458 +
46459 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46460 +{
46461 +
46462 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46463 + unsigned long i;
46464 +
46465 + for (i = 0UL; i < elf_ex->e_phnum; i++)
46466 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46467 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46468 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46469 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46470 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46471 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46472 + return ~0UL;
46473 +
46474 +#ifdef CONFIG_PAX_SOFTMODE
46475 + if (pax_softmode)
46476 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46477 + else
46478 +#endif
46479 +
46480 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46481 + break;
46482 + }
46483 +#endif
46484 +
46485 + return ~0UL;
46486 +}
46487 +
46488 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46489 +{
46490 + unsigned long pax_flags = 0UL;
46491 +
46492 +#ifdef CONFIG_PAX_PAGEEXEC
46493 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46494 + pax_flags |= MF_PAX_PAGEEXEC;
46495 +#endif
46496 +
46497 +#ifdef CONFIG_PAX_SEGMEXEC
46498 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46499 + pax_flags |= MF_PAX_SEGMEXEC;
46500 +#endif
46501 +
46502 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46503 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46504 + if ((__supported_pte_mask & _PAGE_NX))
46505 + pax_flags &= ~MF_PAX_SEGMEXEC;
46506 + else
46507 + pax_flags &= ~MF_PAX_PAGEEXEC;
46508 + }
46509 +#endif
46510 +
46511 +#ifdef CONFIG_PAX_EMUTRAMP
46512 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46513 + pax_flags |= MF_PAX_EMUTRAMP;
46514 +#endif
46515 +
46516 +#ifdef CONFIG_PAX_MPROTECT
46517 + if (pax_flags_softmode & MF_PAX_MPROTECT)
46518 + pax_flags |= MF_PAX_MPROTECT;
46519 +#endif
46520 +
46521 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46522 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46523 + pax_flags |= MF_PAX_RANDMMAP;
46524 +#endif
46525 +
46526 + return pax_flags;
46527 +}
46528 +
46529 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46530 +{
46531 + unsigned long pax_flags = 0UL;
46532 +
46533 +#ifdef CONFIG_PAX_PAGEEXEC
46534 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46535 + pax_flags |= MF_PAX_PAGEEXEC;
46536 +#endif
46537 +
46538 +#ifdef CONFIG_PAX_SEGMEXEC
46539 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46540 + pax_flags |= MF_PAX_SEGMEXEC;
46541 +#endif
46542 +
46543 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46544 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46545 + if ((__supported_pte_mask & _PAGE_NX))
46546 + pax_flags &= ~MF_PAX_SEGMEXEC;
46547 + else
46548 + pax_flags &= ~MF_PAX_PAGEEXEC;
46549 + }
46550 +#endif
46551 +
46552 +#ifdef CONFIG_PAX_EMUTRAMP
46553 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46554 + pax_flags |= MF_PAX_EMUTRAMP;
46555 +#endif
46556 +
46557 +#ifdef CONFIG_PAX_MPROTECT
46558 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46559 + pax_flags |= MF_PAX_MPROTECT;
46560 +#endif
46561 +
46562 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46563 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46564 + pax_flags |= MF_PAX_RANDMMAP;
46565 +#endif
46566 +
46567 + return pax_flags;
46568 +}
46569 +
46570 +static unsigned long pax_parse_xattr_pax(struct file * const file)
46571 +{
46572 +
46573 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46574 + ssize_t xattr_size, i;
46575 + unsigned char xattr_value[5];
46576 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46577 +
46578 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46579 + if (xattr_size <= 0)
46580 + return ~0UL;
46581 +
46582 + for (i = 0; i < xattr_size; i++)
46583 + switch (xattr_value[i]) {
46584 + default:
46585 + return ~0UL;
46586 +
46587 +#define parse_flag(option1, option2, flag) \
46588 + case option1: \
46589 + pax_flags_hardmode |= MF_PAX_##flag; \
46590 + break; \
46591 + case option2: \
46592 + pax_flags_softmode |= MF_PAX_##flag; \
46593 + break;
46594 +
46595 + parse_flag('p', 'P', PAGEEXEC);
46596 + parse_flag('e', 'E', EMUTRAMP);
46597 + parse_flag('m', 'M', MPROTECT);
46598 + parse_flag('r', 'R', RANDMMAP);
46599 + parse_flag('s', 'S', SEGMEXEC);
46600 +
46601 +#undef parse_flag
46602 + }
46603 +
46604 + if (pax_flags_hardmode & pax_flags_softmode)
46605 + return ~0UL;
46606 +
46607 +#ifdef CONFIG_PAX_SOFTMODE
46608 + if (pax_softmode)
46609 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
46610 + else
46611 +#endif
46612 +
46613 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
46614 +#else
46615 + return ~0UL;
46616 +#endif
46617 +}
46618 +
46619 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46620 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
46621 +{
46622 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
46623 +
46624 + pax_flags = pax_parse_ei_pax(elf_ex);
46625 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
46626 + xattr_pax_flags = pax_parse_xattr_pax(file);
46627 +
46628 + if (pt_pax_flags == ~0UL)
46629 + pt_pax_flags = xattr_pax_flags;
46630 + else if (xattr_pax_flags == ~0UL)
46631 + xattr_pax_flags = pt_pax_flags;
46632 + if (pt_pax_flags != xattr_pax_flags)
46633 + return -EINVAL;
46634 + if (pt_pax_flags != ~0UL)
46635 + pax_flags = pt_pax_flags;
46636 +
46637 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
46638 + if (found_flags == 0) {
46639 + struct elf_phdr phdr;
46640 + memset(&phdr, 0, sizeof(phdr));
46641 + phdr.p_flags = PF_NOEMUTRAMP;
46642 +#ifdef CONFIG_PAX_SOFTMODE
46643 + if (pax_softmode)
46644 + pax_flags = pax_parse_softmode(&phdr);
46645 + else
46646 +#endif
46647 + pax_flags = pax_parse_hardmode(&phdr);
46648 + }
46649 +#endif
46650 +
46651 +
46652 + if (0 > pax_check_flags(&pax_flags))
46653 + return -EINVAL;
46654 +
46655 + current->mm->pax_flags = pax_flags;
46656 + return 0;
46657 +}
46658 +#endif
46659 +
46660 /*
46661 * These are the functions used to load ELF style executables and shared
46662 * libraries. There is no binary dependent code anywhere else.
46663 @@ -548,6 +931,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
46664 {
46665 unsigned int random_variable = 0;
46666
46667 +#ifdef CONFIG_PAX_RANDUSTACK
46668 + if (randomize_va_space)
46669 + return stack_top - current->mm->delta_stack;
46670 +#endif
46671 +
46672 if ((current->flags & PF_RANDOMIZE) &&
46673 !(current->personality & ADDR_NO_RANDOMIZE)) {
46674 random_variable = get_random_int() & STACK_RND_MASK;
46675 @@ -566,7 +954,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46676 unsigned long load_addr = 0, load_bias = 0;
46677 int load_addr_set = 0;
46678 char * elf_interpreter = NULL;
46679 - unsigned long error;
46680 + unsigned long error = 0;
46681 struct elf_phdr *elf_ppnt, *elf_phdata;
46682 unsigned long elf_bss, elf_brk;
46683 int retval, i;
46684 @@ -576,11 +964,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46685 unsigned long start_code, end_code, start_data, end_data;
46686 unsigned long reloc_func_desc = 0;
46687 int executable_stack = EXSTACK_DEFAULT;
46688 - unsigned long def_flags = 0;
46689 struct {
46690 struct elfhdr elf_ex;
46691 struct elfhdr interp_elf_ex;
46692 } *loc;
46693 + unsigned long pax_task_size = TASK_SIZE;
46694
46695 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
46696 if (!loc) {
46697 @@ -718,11 +1106,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46698
46699 /* OK, This is the point of no return */
46700 current->flags &= ~PF_FORKNOEXEC;
46701 - current->mm->def_flags = def_flags;
46702 +
46703 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46704 + current->mm->pax_flags = 0UL;
46705 +#endif
46706 +
46707 +#ifdef CONFIG_PAX_DLRESOLVE
46708 + current->mm->call_dl_resolve = 0UL;
46709 +#endif
46710 +
46711 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
46712 + current->mm->call_syscall = 0UL;
46713 +#endif
46714 +
46715 +#ifdef CONFIG_PAX_ASLR
46716 + current->mm->delta_mmap = 0UL;
46717 + current->mm->delta_stack = 0UL;
46718 +#endif
46719 +
46720 + current->mm->def_flags = 0;
46721 +
46722 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46723 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
46724 + send_sig(SIGKILL, current, 0);
46725 + goto out_free_dentry;
46726 + }
46727 +#endif
46728 +
46729 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46730 + pax_set_initial_flags(bprm);
46731 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
46732 + if (pax_set_initial_flags_func)
46733 + (pax_set_initial_flags_func)(bprm);
46734 +#endif
46735 +
46736 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46737 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
46738 + current->mm->context.user_cs_limit = PAGE_SIZE;
46739 + current->mm->def_flags |= VM_PAGEEXEC;
46740 + }
46741 +#endif
46742 +
46743 +#ifdef CONFIG_PAX_SEGMEXEC
46744 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
46745 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
46746 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
46747 + pax_task_size = SEGMEXEC_TASK_SIZE;
46748 + }
46749 +#endif
46750 +
46751 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
46752 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46753 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
46754 + put_cpu();
46755 + }
46756 +#endif
46757
46758 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
46759 may depend on the personality. */
46760 SET_PERSONALITY(loc->elf_ex);
46761 +
46762 +#ifdef CONFIG_PAX_ASLR
46763 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
46764 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
46765 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
46766 + }
46767 +#endif
46768 +
46769 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46770 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46771 + executable_stack = EXSTACK_DISABLE_X;
46772 + current->personality &= ~READ_IMPLIES_EXEC;
46773 + } else
46774 +#endif
46775 +
46776 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
46777 current->personality |= READ_IMPLIES_EXEC;
46778
46779 @@ -800,10 +1257,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46780 * might try to exec. This is because the brk will
46781 * follow the loader, and is not movable. */
46782 #ifdef CONFIG_X86
46783 - load_bias = 0;
46784 + if (current->flags & PF_RANDOMIZE)
46785 + load_bias = 0;
46786 + else
46787 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46788 #else
46789 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46790 #endif
46791 +
46792 +#ifdef CONFIG_PAX_RANDMMAP
46793 + /* PaX: randomize base address at the default exe base if requested */
46794 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
46795 +#ifdef CONFIG_SPARC64
46796 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
46797 +#else
46798 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
46799 +#endif
46800 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
46801 + elf_flags |= MAP_FIXED;
46802 + }
46803 +#endif
46804 +
46805 }
46806
46807 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
46808 @@ -836,9 +1310,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46809 * allowed task size. Note that p_filesz must always be
46810 * <= p_memsz so it is only necessary to check p_memsz.
46811 */
46812 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46813 - elf_ppnt->p_memsz > TASK_SIZE ||
46814 - TASK_SIZE - elf_ppnt->p_memsz < k) {
46815 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46816 + elf_ppnt->p_memsz > pax_task_size ||
46817 + pax_task_size - elf_ppnt->p_memsz < k) {
46818 /* set_brk can never work. Avoid overflows. */
46819 send_sig(SIGKILL, current, 0);
46820 retval = -EINVAL;
46821 @@ -866,6 +1340,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46822 start_data += load_bias;
46823 end_data += load_bias;
46824
46825 +#ifdef CONFIG_PAX_RANDMMAP
46826 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
46827 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
46828 +#endif
46829 +
46830 /* Calling set_brk effectively mmaps the pages that we need
46831 * for the bss and break sections. We must do this before
46832 * mapping in the interpreter, to make sure it doesn't wind
46833 @@ -877,9 +1356,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46834 goto out_free_dentry;
46835 }
46836 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
46837 - send_sig(SIGSEGV, current, 0);
46838 - retval = -EFAULT; /* Nobody gets to see this, but.. */
46839 - goto out_free_dentry;
46840 + /*
46841 + * This bss-zeroing can fail if the ELF
46842 + * file specifies odd protections. So
46843 + * we don't check the return value
46844 + */
46845 }
46846
46847 if (elf_interpreter) {
46848 @@ -1112,8 +1593,10 @@ static int dump_seek(struct file *file, loff_t off)
46849 unsigned long n = off;
46850 if (n > PAGE_SIZE)
46851 n = PAGE_SIZE;
46852 - if (!dump_write(file, buf, n))
46853 + if (!dump_write(file, buf, n)) {
46854 + free_page((unsigned long)buf);
46855 return 0;
46856 + }
46857 off -= n;
46858 }
46859 free_page((unsigned long)buf);
46860 @@ -1125,7 +1608,7 @@ static int dump_seek(struct file *file, loff_t off)
46861 * Decide what to dump of a segment, part, all or none.
46862 */
46863 static unsigned long vma_dump_size(struct vm_area_struct *vma,
46864 - unsigned long mm_flags)
46865 + unsigned long mm_flags, long signr)
46866 {
46867 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
46868
46869 @@ -1159,7 +1642,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
46870 if (vma->vm_file == NULL)
46871 return 0;
46872
46873 - if (FILTER(MAPPED_PRIVATE))
46874 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
46875 goto whole;
46876
46877 /*
46878 @@ -1255,8 +1738,11 @@ static int writenote(struct memelfnote *men, struct file *file,
46879 #undef DUMP_WRITE
46880
46881 #define DUMP_WRITE(addr, nr) \
46882 + do { \
46883 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
46884 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
46885 - goto end_coredump;
46886 + goto end_coredump; \
46887 + } while (0);
46888
46889 static void fill_elf_header(struct elfhdr *elf, int segs,
46890 u16 machine, u32 flags, u8 osabi)
46891 @@ -1385,9 +1871,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
46892 {
46893 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
46894 int i = 0;
46895 - do
46896 + do {
46897 i += 2;
46898 - while (auxv[i - 2] != AT_NULL);
46899 + } while (auxv[i - 2] != AT_NULL);
46900 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
46901 }
46902
46903 @@ -1973,7 +2459,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46904 phdr.p_offset = offset;
46905 phdr.p_vaddr = vma->vm_start;
46906 phdr.p_paddr = 0;
46907 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
46908 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
46909 phdr.p_memsz = vma->vm_end - vma->vm_start;
46910 offset += phdr.p_filesz;
46911 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
46912 @@ -2006,7 +2492,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46913 unsigned long addr;
46914 unsigned long end;
46915
46916 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
46917 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
46918
46919 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
46920 struct page *page;
46921 @@ -2015,6 +2501,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46922 page = get_dump_page(addr);
46923 if (page) {
46924 void *kaddr = kmap(page);
46925 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
46926 stop = ((size += PAGE_SIZE) > limit) ||
46927 !dump_write(file, kaddr, PAGE_SIZE);
46928 kunmap(page);
46929 @@ -2042,6 +2529,97 @@ out:
46930
46931 #endif /* USE_ELF_CORE_DUMP */
46932
46933 +#ifdef CONFIG_PAX_MPROTECT
46934 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
46935 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
46936 + * we'll remove VM_MAYWRITE for good on RELRO segments.
46937 + *
46938 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
46939 + * basis because we want to allow the common case and not the special ones.
46940 + */
46941 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
46942 +{
46943 + struct elfhdr elf_h;
46944 + struct elf_phdr elf_p;
46945 + unsigned long i;
46946 + unsigned long oldflags;
46947 + bool is_textrel_rw, is_textrel_rx, is_relro;
46948 +
46949 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
46950 + return;
46951 +
46952 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
46953 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
46954 +
46955 +#ifdef CONFIG_PAX_ELFRELOCS
46956 + /* possible TEXTREL */
46957 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
46958 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
46959 +#else
46960 + is_textrel_rw = false;
46961 + is_textrel_rx = false;
46962 +#endif
46963 +
46964 + /* possible RELRO */
46965 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
46966 +
46967 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
46968 + return;
46969 +
46970 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
46971 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
46972 +
46973 +#ifdef CONFIG_PAX_ETEXECRELOCS
46974 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
46975 +#else
46976 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
46977 +#endif
46978 +
46979 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
46980 + !elf_check_arch(&elf_h) ||
46981 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
46982 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
46983 + return;
46984 +
46985 + for (i = 0UL; i < elf_h.e_phnum; i++) {
46986 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
46987 + return;
46988 + switch (elf_p.p_type) {
46989 + case PT_DYNAMIC:
46990 + if (!is_textrel_rw && !is_textrel_rx)
46991 + continue;
46992 + i = 0UL;
46993 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
46994 + elf_dyn dyn;
46995 +
46996 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
46997 + return;
46998 + if (dyn.d_tag == DT_NULL)
46999 + return;
47000 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47001 + gr_log_textrel(vma);
47002 + if (is_textrel_rw)
47003 + vma->vm_flags |= VM_MAYWRITE;
47004 + else
47005 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47006 + vma->vm_flags &= ~VM_MAYWRITE;
47007 + return;
47008 + }
47009 + i++;
47010 + }
47011 + return;
47012 +
47013 + case PT_GNU_RELRO:
47014 + if (!is_relro)
47015 + continue;
47016 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47017 + vma->vm_flags &= ~VM_MAYWRITE;
47018 + return;
47019 + }
47020 + }
47021 +}
47022 +#endif
47023 +
47024 static int __init init_elf_binfmt(void)
47025 {
47026 return register_binfmt(&elf_format);
47027 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47028 index ca88c46..f155a60 100644
47029 --- a/fs/binfmt_flat.c
47030 +++ b/fs/binfmt_flat.c
47031 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47032 realdatastart = (unsigned long) -ENOMEM;
47033 printk("Unable to allocate RAM for process data, errno %d\n",
47034 (int)-realdatastart);
47035 + down_write(&current->mm->mmap_sem);
47036 do_munmap(current->mm, textpos, text_len);
47037 + up_write(&current->mm->mmap_sem);
47038 ret = realdatastart;
47039 goto err;
47040 }
47041 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47042 }
47043 if (IS_ERR_VALUE(result)) {
47044 printk("Unable to read data+bss, errno %d\n", (int)-result);
47045 + down_write(&current->mm->mmap_sem);
47046 do_munmap(current->mm, textpos, text_len);
47047 do_munmap(current->mm, realdatastart, data_len + extra);
47048 + up_write(&current->mm->mmap_sem);
47049 ret = result;
47050 goto err;
47051 }
47052 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47053 }
47054 if (IS_ERR_VALUE(result)) {
47055 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47056 + down_write(&current->mm->mmap_sem);
47057 do_munmap(current->mm, textpos, text_len + data_len + extra +
47058 MAX_SHARED_LIBS * sizeof(unsigned long));
47059 + up_write(&current->mm->mmap_sem);
47060 ret = result;
47061 goto err;
47062 }
47063 diff --git a/fs/bio.c b/fs/bio.c
47064 index e696713..83de133 100644
47065 --- a/fs/bio.c
47066 +++ b/fs/bio.c
47067 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47068
47069 i = 0;
47070 while (i < bio_slab_nr) {
47071 - struct bio_slab *bslab = &bio_slabs[i];
47072 + bslab = &bio_slabs[i];
47073
47074 if (!bslab->slab && entry == -1)
47075 entry = i;
47076 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47077 const int read = bio_data_dir(bio) == READ;
47078 struct bio_map_data *bmd = bio->bi_private;
47079 int i;
47080 - char *p = bmd->sgvecs[0].iov_base;
47081 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47082
47083 __bio_for_each_segment(bvec, bio, i, 0) {
47084 char *addr = page_address(bvec->bv_page);
47085 diff --git a/fs/block_dev.c b/fs/block_dev.c
47086 index e65efa2..04fae57 100644
47087 --- a/fs/block_dev.c
47088 +++ b/fs/block_dev.c
47089 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47090 else if (bdev->bd_contains == bdev)
47091 res = 0; /* is a whole device which isn't held */
47092
47093 - else if (bdev->bd_contains->bd_holder == bd_claim)
47094 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47095 res = 0; /* is a partition of a device that is being partitioned */
47096 else if (bdev->bd_contains->bd_holder != NULL)
47097 res = -EBUSY; /* is a partition of a held device */
47098 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47099 index c4bc570..42acd8d 100644
47100 --- a/fs/btrfs/ctree.c
47101 +++ b/fs/btrfs/ctree.c
47102 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47103 free_extent_buffer(buf);
47104 add_root_to_dirty_list(root);
47105 } else {
47106 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47107 - parent_start = parent->start;
47108 - else
47109 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47110 + if (parent)
47111 + parent_start = parent->start;
47112 + else
47113 + parent_start = 0;
47114 + } else
47115 parent_start = 0;
47116
47117 WARN_ON(trans->transid != btrfs_header_generation(parent));
47118 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47119
47120 ret = 0;
47121 if (slot == 0) {
47122 - struct btrfs_disk_key disk_key;
47123 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47124 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47125 }
47126 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47127 index f447188..59c17c5 100644
47128 --- a/fs/btrfs/disk-io.c
47129 +++ b/fs/btrfs/disk-io.c
47130 @@ -39,7 +39,7 @@
47131 #include "tree-log.h"
47132 #include "free-space-cache.h"
47133
47134 -static struct extent_io_ops btree_extent_io_ops;
47135 +static const struct extent_io_ops btree_extent_io_ops;
47136 static void end_workqueue_fn(struct btrfs_work *work);
47137 static void free_fs_root(struct btrfs_root *root);
47138
47139 @@ -2607,7 +2607,7 @@ out:
47140 return 0;
47141 }
47142
47143 -static struct extent_io_ops btree_extent_io_ops = {
47144 +static const struct extent_io_ops btree_extent_io_ops = {
47145 .write_cache_pages_lock_hook = btree_lock_page_hook,
47146 .readpage_end_io_hook = btree_readpage_end_io_hook,
47147 .submit_bio_hook = btree_submit_bio_hook,
47148 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47149 index 559f724..a026171 100644
47150 --- a/fs/btrfs/extent-tree.c
47151 +++ b/fs/btrfs/extent-tree.c
47152 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47153 u64 group_start = group->key.objectid;
47154 new_extents = kmalloc(sizeof(*new_extents),
47155 GFP_NOFS);
47156 + if (!new_extents) {
47157 + ret = -ENOMEM;
47158 + goto out;
47159 + }
47160 nr_extents = 1;
47161 ret = get_new_locations(reloc_inode,
47162 extent_key,
47163 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47164 index 36de250..7ec75c7 100644
47165 --- a/fs/btrfs/extent_io.h
47166 +++ b/fs/btrfs/extent_io.h
47167 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47168 struct bio *bio, int mirror_num,
47169 unsigned long bio_flags);
47170 struct extent_io_ops {
47171 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47172 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47173 u64 start, u64 end, int *page_started,
47174 unsigned long *nr_written);
47175 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47176 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47177 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47178 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47179 extent_submit_bio_hook_t *submit_bio_hook;
47180 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
47181 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47182 size_t size, struct bio *bio,
47183 unsigned long bio_flags);
47184 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47185 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47186 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47187 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47188 u64 start, u64 end,
47189 struct extent_state *state);
47190 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47191 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47192 u64 start, u64 end,
47193 struct extent_state *state);
47194 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47195 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47196 struct extent_state *state);
47197 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47198 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47199 struct extent_state *state, int uptodate);
47200 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47201 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47202 unsigned long old, unsigned long bits);
47203 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47204 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47205 unsigned long bits);
47206 - int (*merge_extent_hook)(struct inode *inode,
47207 + int (* const merge_extent_hook)(struct inode *inode,
47208 struct extent_state *new,
47209 struct extent_state *other);
47210 - int (*split_extent_hook)(struct inode *inode,
47211 + int (* const split_extent_hook)(struct inode *inode,
47212 struct extent_state *orig, u64 split);
47213 - int (*write_cache_pages_lock_hook)(struct page *page);
47214 + int (* const write_cache_pages_lock_hook)(struct page *page);
47215 };
47216
47217 struct extent_io_tree {
47218 @@ -88,7 +88,7 @@ struct extent_io_tree {
47219 u64 dirty_bytes;
47220 spinlock_t lock;
47221 spinlock_t buffer_lock;
47222 - struct extent_io_ops *ops;
47223 + const struct extent_io_ops *ops;
47224 };
47225
47226 struct extent_state {
47227 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47228 index cb2849f..3718fb4 100644
47229 --- a/fs/btrfs/free-space-cache.c
47230 +++ b/fs/btrfs/free-space-cache.c
47231 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47232
47233 while(1) {
47234 if (entry->bytes < bytes || entry->offset < min_start) {
47235 - struct rb_node *node;
47236 -
47237 node = rb_next(&entry->offset_index);
47238 if (!node)
47239 break;
47240 @@ -1226,7 +1224,7 @@ again:
47241 */
47242 while (entry->bitmap || found_bitmap ||
47243 (!entry->bitmap && entry->bytes < min_bytes)) {
47244 - struct rb_node *node = rb_next(&entry->offset_index);
47245 + node = rb_next(&entry->offset_index);
47246
47247 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47248 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47249 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47250 index e03a836..323837e 100644
47251 --- a/fs/btrfs/inode.c
47252 +++ b/fs/btrfs/inode.c
47253 @@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47254 static const struct address_space_operations btrfs_aops;
47255 static const struct address_space_operations btrfs_symlink_aops;
47256 static const struct file_operations btrfs_dir_file_operations;
47257 -static struct extent_io_ops btrfs_extent_io_ops;
47258 +static const struct extent_io_ops btrfs_extent_io_ops;
47259
47260 static struct kmem_cache *btrfs_inode_cachep;
47261 struct kmem_cache *btrfs_trans_handle_cachep;
47262 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47263 1, 0, NULL, GFP_NOFS);
47264 while (start < end) {
47265 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47266 + BUG_ON(!async_cow);
47267 async_cow->inode = inode;
47268 async_cow->root = root;
47269 async_cow->locked_page = locked_page;
47270 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47271 inline_size = btrfs_file_extent_inline_item_len(leaf,
47272 btrfs_item_nr(leaf, path->slots[0]));
47273 tmp = kmalloc(inline_size, GFP_NOFS);
47274 + if (!tmp)
47275 + return -ENOMEM;
47276 ptr = btrfs_file_extent_inline_start(item);
47277
47278 read_extent_buffer(leaf, tmp, ptr, inline_size);
47279 @@ -5410,7 +5413,7 @@ fail:
47280 return -ENOMEM;
47281 }
47282
47283 -static int btrfs_getattr(struct vfsmount *mnt,
47284 +int btrfs_getattr(struct vfsmount *mnt,
47285 struct dentry *dentry, struct kstat *stat)
47286 {
47287 struct inode *inode = dentry->d_inode;
47288 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47289 return 0;
47290 }
47291
47292 +EXPORT_SYMBOL(btrfs_getattr);
47293 +
47294 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
47295 +{
47296 + return BTRFS_I(inode)->root->anon_super.s_dev;
47297 +}
47298 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47299 +
47300 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47301 struct inode *new_dir, struct dentry *new_dentry)
47302 {
47303 @@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47304 .fsync = btrfs_sync_file,
47305 };
47306
47307 -static struct extent_io_ops btrfs_extent_io_ops = {
47308 +static const struct extent_io_ops btrfs_extent_io_ops = {
47309 .fill_delalloc = run_delalloc_range,
47310 .submit_bio_hook = btrfs_submit_bio_hook,
47311 .merge_bio_hook = btrfs_merge_bio_hook,
47312 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47313 index ab7ab53..94e0781 100644
47314 --- a/fs/btrfs/relocation.c
47315 +++ b/fs/btrfs/relocation.c
47316 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47317 }
47318 spin_unlock(&rc->reloc_root_tree.lock);
47319
47320 - BUG_ON((struct btrfs_root *)node->data != root);
47321 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
47322
47323 if (!del) {
47324 spin_lock(&rc->reloc_root_tree.lock);
47325 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47326 index a240b6f..4ce16ef 100644
47327 --- a/fs/btrfs/sysfs.c
47328 +++ b/fs/btrfs/sysfs.c
47329 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47330 complete(&root->kobj_unregister);
47331 }
47332
47333 -static struct sysfs_ops btrfs_super_attr_ops = {
47334 +static const struct sysfs_ops btrfs_super_attr_ops = {
47335 .show = btrfs_super_attr_show,
47336 .store = btrfs_super_attr_store,
47337 };
47338
47339 -static struct sysfs_ops btrfs_root_attr_ops = {
47340 +static const struct sysfs_ops btrfs_root_attr_ops = {
47341 .show = btrfs_root_attr_show,
47342 .store = btrfs_root_attr_store,
47343 };
47344 diff --git a/fs/buffer.c b/fs/buffer.c
47345 index 6fa5302..395d9f6 100644
47346 --- a/fs/buffer.c
47347 +++ b/fs/buffer.c
47348 @@ -25,6 +25,7 @@
47349 #include <linux/percpu.h>
47350 #include <linux/slab.h>
47351 #include <linux/capability.h>
47352 +#include <linux/security.h>
47353 #include <linux/blkdev.h>
47354 #include <linux/file.h>
47355 #include <linux/quotaops.h>
47356 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47357 index 3797e00..ce776f6 100644
47358 --- a/fs/cachefiles/bind.c
47359 +++ b/fs/cachefiles/bind.c
47360 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47361 args);
47362
47363 /* start by checking things over */
47364 - ASSERT(cache->fstop_percent >= 0 &&
47365 - cache->fstop_percent < cache->fcull_percent &&
47366 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
47367 cache->fcull_percent < cache->frun_percent &&
47368 cache->frun_percent < 100);
47369
47370 - ASSERT(cache->bstop_percent >= 0 &&
47371 - cache->bstop_percent < cache->bcull_percent &&
47372 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
47373 cache->bcull_percent < cache->brun_percent &&
47374 cache->brun_percent < 100);
47375
47376 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47377 index 4618516..bb30d01 100644
47378 --- a/fs/cachefiles/daemon.c
47379 +++ b/fs/cachefiles/daemon.c
47380 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47381 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47382 return -EIO;
47383
47384 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
47385 + if (datalen > PAGE_SIZE - 1)
47386 return -EOPNOTSUPP;
47387
47388 /* drag the command string into the kernel so we can parse it */
47389 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47390 if (args[0] != '%' || args[1] != '\0')
47391 return -EINVAL;
47392
47393 - if (fstop < 0 || fstop >= cache->fcull_percent)
47394 + if (fstop >= cache->fcull_percent)
47395 return cachefiles_daemon_range_error(cache, args);
47396
47397 cache->fstop_percent = fstop;
47398 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47399 if (args[0] != '%' || args[1] != '\0')
47400 return -EINVAL;
47401
47402 - if (bstop < 0 || bstop >= cache->bcull_percent)
47403 + if (bstop >= cache->bcull_percent)
47404 return cachefiles_daemon_range_error(cache, args);
47405
47406 cache->bstop_percent = bstop;
47407 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47408 index f7c255f..fcd61de 100644
47409 --- a/fs/cachefiles/internal.h
47410 +++ b/fs/cachefiles/internal.h
47411 @@ -56,7 +56,7 @@ struct cachefiles_cache {
47412 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47413 struct rb_root active_nodes; /* active nodes (can't be culled) */
47414 rwlock_t active_lock; /* lock for active_nodes */
47415 - atomic_t gravecounter; /* graveyard uniquifier */
47416 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47417 unsigned frun_percent; /* when to stop culling (% files) */
47418 unsigned fcull_percent; /* when to start culling (% files) */
47419 unsigned fstop_percent; /* when to stop allocating (% files) */
47420 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47421 * proc.c
47422 */
47423 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47424 -extern atomic_t cachefiles_lookup_histogram[HZ];
47425 -extern atomic_t cachefiles_mkdir_histogram[HZ];
47426 -extern atomic_t cachefiles_create_histogram[HZ];
47427 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47428 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47429 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47430
47431 extern int __init cachefiles_proc_init(void);
47432 extern void cachefiles_proc_cleanup(void);
47433 static inline
47434 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47435 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47436 {
47437 unsigned long jif = jiffies - start_jif;
47438 if (jif >= HZ)
47439 jif = HZ - 1;
47440 - atomic_inc(&histogram[jif]);
47441 + atomic_inc_unchecked(&histogram[jif]);
47442 }
47443
47444 #else
47445 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47446 index 14ac480..a62766c 100644
47447 --- a/fs/cachefiles/namei.c
47448 +++ b/fs/cachefiles/namei.c
47449 @@ -250,7 +250,7 @@ try_again:
47450 /* first step is to make up a grave dentry in the graveyard */
47451 sprintf(nbuffer, "%08x%08x",
47452 (uint32_t) get_seconds(),
47453 - (uint32_t) atomic_inc_return(&cache->gravecounter));
47454 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47455
47456 /* do the multiway lock magic */
47457 trap = lock_rename(cache->graveyard, dir);
47458 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47459 index eccd339..4c1d995 100644
47460 --- a/fs/cachefiles/proc.c
47461 +++ b/fs/cachefiles/proc.c
47462 @@ -14,9 +14,9 @@
47463 #include <linux/seq_file.h>
47464 #include "internal.h"
47465
47466 -atomic_t cachefiles_lookup_histogram[HZ];
47467 -atomic_t cachefiles_mkdir_histogram[HZ];
47468 -atomic_t cachefiles_create_histogram[HZ];
47469 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47470 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47471 +atomic_unchecked_t cachefiles_create_histogram[HZ];
47472
47473 /*
47474 * display the latency histogram
47475 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47476 return 0;
47477 default:
47478 index = (unsigned long) v - 3;
47479 - x = atomic_read(&cachefiles_lookup_histogram[index]);
47480 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
47481 - z = atomic_read(&cachefiles_create_histogram[index]);
47482 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47483 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47484 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47485 if (x == 0 && y == 0 && z == 0)
47486 return 0;
47487
47488 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47489 index a6c8c6f..5cf8517 100644
47490 --- a/fs/cachefiles/rdwr.c
47491 +++ b/fs/cachefiles/rdwr.c
47492 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47493 old_fs = get_fs();
47494 set_fs(KERNEL_DS);
47495 ret = file->f_op->write(
47496 - file, (const void __user *) data, len, &pos);
47497 + file, (const void __force_user *) data, len, &pos);
47498 set_fs(old_fs);
47499 kunmap(page);
47500 if (ret != len)
47501 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47502 index 42cec2a..2aba466 100644
47503 --- a/fs/cifs/cifs_debug.c
47504 +++ b/fs/cifs/cifs_debug.c
47505 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47506 tcon = list_entry(tmp3,
47507 struct cifsTconInfo,
47508 tcon_list);
47509 - atomic_set(&tcon->num_smbs_sent, 0);
47510 - atomic_set(&tcon->num_writes, 0);
47511 - atomic_set(&tcon->num_reads, 0);
47512 - atomic_set(&tcon->num_oplock_brks, 0);
47513 - atomic_set(&tcon->num_opens, 0);
47514 - atomic_set(&tcon->num_posixopens, 0);
47515 - atomic_set(&tcon->num_posixmkdirs, 0);
47516 - atomic_set(&tcon->num_closes, 0);
47517 - atomic_set(&tcon->num_deletes, 0);
47518 - atomic_set(&tcon->num_mkdirs, 0);
47519 - atomic_set(&tcon->num_rmdirs, 0);
47520 - atomic_set(&tcon->num_renames, 0);
47521 - atomic_set(&tcon->num_t2renames, 0);
47522 - atomic_set(&tcon->num_ffirst, 0);
47523 - atomic_set(&tcon->num_fnext, 0);
47524 - atomic_set(&tcon->num_fclose, 0);
47525 - atomic_set(&tcon->num_hardlinks, 0);
47526 - atomic_set(&tcon->num_symlinks, 0);
47527 - atomic_set(&tcon->num_locks, 0);
47528 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47529 + atomic_set_unchecked(&tcon->num_writes, 0);
47530 + atomic_set_unchecked(&tcon->num_reads, 0);
47531 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47532 + atomic_set_unchecked(&tcon->num_opens, 0);
47533 + atomic_set_unchecked(&tcon->num_posixopens, 0);
47534 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47535 + atomic_set_unchecked(&tcon->num_closes, 0);
47536 + atomic_set_unchecked(&tcon->num_deletes, 0);
47537 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
47538 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
47539 + atomic_set_unchecked(&tcon->num_renames, 0);
47540 + atomic_set_unchecked(&tcon->num_t2renames, 0);
47541 + atomic_set_unchecked(&tcon->num_ffirst, 0);
47542 + atomic_set_unchecked(&tcon->num_fnext, 0);
47543 + atomic_set_unchecked(&tcon->num_fclose, 0);
47544 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
47545 + atomic_set_unchecked(&tcon->num_symlinks, 0);
47546 + atomic_set_unchecked(&tcon->num_locks, 0);
47547 }
47548 }
47549 }
47550 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47551 if (tcon->need_reconnect)
47552 seq_puts(m, "\tDISCONNECTED ");
47553 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47554 - atomic_read(&tcon->num_smbs_sent),
47555 - atomic_read(&tcon->num_oplock_brks));
47556 + atomic_read_unchecked(&tcon->num_smbs_sent),
47557 + atomic_read_unchecked(&tcon->num_oplock_brks));
47558 seq_printf(m, "\nReads: %d Bytes: %lld",
47559 - atomic_read(&tcon->num_reads),
47560 + atomic_read_unchecked(&tcon->num_reads),
47561 (long long)(tcon->bytes_read));
47562 seq_printf(m, "\nWrites: %d Bytes: %lld",
47563 - atomic_read(&tcon->num_writes),
47564 + atomic_read_unchecked(&tcon->num_writes),
47565 (long long)(tcon->bytes_written));
47566 seq_printf(m, "\nFlushes: %d",
47567 - atomic_read(&tcon->num_flushes));
47568 + atomic_read_unchecked(&tcon->num_flushes));
47569 seq_printf(m, "\nLocks: %d HardLinks: %d "
47570 "Symlinks: %d",
47571 - atomic_read(&tcon->num_locks),
47572 - atomic_read(&tcon->num_hardlinks),
47573 - atomic_read(&tcon->num_symlinks));
47574 + atomic_read_unchecked(&tcon->num_locks),
47575 + atomic_read_unchecked(&tcon->num_hardlinks),
47576 + atomic_read_unchecked(&tcon->num_symlinks));
47577 seq_printf(m, "\nOpens: %d Closes: %d "
47578 "Deletes: %d",
47579 - atomic_read(&tcon->num_opens),
47580 - atomic_read(&tcon->num_closes),
47581 - atomic_read(&tcon->num_deletes));
47582 + atomic_read_unchecked(&tcon->num_opens),
47583 + atomic_read_unchecked(&tcon->num_closes),
47584 + atomic_read_unchecked(&tcon->num_deletes));
47585 seq_printf(m, "\nPosix Opens: %d "
47586 "Posix Mkdirs: %d",
47587 - atomic_read(&tcon->num_posixopens),
47588 - atomic_read(&tcon->num_posixmkdirs));
47589 + atomic_read_unchecked(&tcon->num_posixopens),
47590 + atomic_read_unchecked(&tcon->num_posixmkdirs));
47591 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47592 - atomic_read(&tcon->num_mkdirs),
47593 - atomic_read(&tcon->num_rmdirs));
47594 + atomic_read_unchecked(&tcon->num_mkdirs),
47595 + atomic_read_unchecked(&tcon->num_rmdirs));
47596 seq_printf(m, "\nRenames: %d T2 Renames %d",
47597 - atomic_read(&tcon->num_renames),
47598 - atomic_read(&tcon->num_t2renames));
47599 + atomic_read_unchecked(&tcon->num_renames),
47600 + atomic_read_unchecked(&tcon->num_t2renames));
47601 seq_printf(m, "\nFindFirst: %d FNext %d "
47602 "FClose %d",
47603 - atomic_read(&tcon->num_ffirst),
47604 - atomic_read(&tcon->num_fnext),
47605 - atomic_read(&tcon->num_fclose));
47606 + atomic_read_unchecked(&tcon->num_ffirst),
47607 + atomic_read_unchecked(&tcon->num_fnext),
47608 + atomic_read_unchecked(&tcon->num_fclose));
47609 }
47610 }
47611 }
47612 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47613 index 1445407..68cb0dc 100644
47614 --- a/fs/cifs/cifsfs.c
47615 +++ b/fs/cifs/cifsfs.c
47616 @@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47617 cifs_req_cachep = kmem_cache_create("cifs_request",
47618 CIFSMaxBufSize +
47619 MAX_CIFS_HDR_SIZE, 0,
47620 - SLAB_HWCACHE_ALIGN, NULL);
47621 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47622 if (cifs_req_cachep == NULL)
47623 return -ENOMEM;
47624
47625 @@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47626 efficient to alloc 1 per page off the slab compared to 17K (5page)
47627 alloc of large cifs buffers even when page debugging is on */
47628 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47629 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47630 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47631 NULL);
47632 if (cifs_sm_req_cachep == NULL) {
47633 mempool_destroy(cifs_req_poolp);
47634 @@ -991,8 +991,8 @@ init_cifs(void)
47635 atomic_set(&bufAllocCount, 0);
47636 atomic_set(&smBufAllocCount, 0);
47637 #ifdef CONFIG_CIFS_STATS2
47638 - atomic_set(&totBufAllocCount, 0);
47639 - atomic_set(&totSmBufAllocCount, 0);
47640 + atomic_set_unchecked(&totBufAllocCount, 0);
47641 + atomic_set_unchecked(&totSmBufAllocCount, 0);
47642 #endif /* CONFIG_CIFS_STATS2 */
47643
47644 atomic_set(&midCount, 0);
47645 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
47646 index e29581e..1c22bab 100644
47647 --- a/fs/cifs/cifsglob.h
47648 +++ b/fs/cifs/cifsglob.h
47649 @@ -252,28 +252,28 @@ struct cifsTconInfo {
47650 __u16 Flags; /* optional support bits */
47651 enum statusEnum tidStatus;
47652 #ifdef CONFIG_CIFS_STATS
47653 - atomic_t num_smbs_sent;
47654 - atomic_t num_writes;
47655 - atomic_t num_reads;
47656 - atomic_t num_flushes;
47657 - atomic_t num_oplock_brks;
47658 - atomic_t num_opens;
47659 - atomic_t num_closes;
47660 - atomic_t num_deletes;
47661 - atomic_t num_mkdirs;
47662 - atomic_t num_posixopens;
47663 - atomic_t num_posixmkdirs;
47664 - atomic_t num_rmdirs;
47665 - atomic_t num_renames;
47666 - atomic_t num_t2renames;
47667 - atomic_t num_ffirst;
47668 - atomic_t num_fnext;
47669 - atomic_t num_fclose;
47670 - atomic_t num_hardlinks;
47671 - atomic_t num_symlinks;
47672 - atomic_t num_locks;
47673 - atomic_t num_acl_get;
47674 - atomic_t num_acl_set;
47675 + atomic_unchecked_t num_smbs_sent;
47676 + atomic_unchecked_t num_writes;
47677 + atomic_unchecked_t num_reads;
47678 + atomic_unchecked_t num_flushes;
47679 + atomic_unchecked_t num_oplock_brks;
47680 + atomic_unchecked_t num_opens;
47681 + atomic_unchecked_t num_closes;
47682 + atomic_unchecked_t num_deletes;
47683 + atomic_unchecked_t num_mkdirs;
47684 + atomic_unchecked_t num_posixopens;
47685 + atomic_unchecked_t num_posixmkdirs;
47686 + atomic_unchecked_t num_rmdirs;
47687 + atomic_unchecked_t num_renames;
47688 + atomic_unchecked_t num_t2renames;
47689 + atomic_unchecked_t num_ffirst;
47690 + atomic_unchecked_t num_fnext;
47691 + atomic_unchecked_t num_fclose;
47692 + atomic_unchecked_t num_hardlinks;
47693 + atomic_unchecked_t num_symlinks;
47694 + atomic_unchecked_t num_locks;
47695 + atomic_unchecked_t num_acl_get;
47696 + atomic_unchecked_t num_acl_set;
47697 #ifdef CONFIG_CIFS_STATS2
47698 unsigned long long time_writes;
47699 unsigned long long time_reads;
47700 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
47701 }
47702
47703 #ifdef CONFIG_CIFS_STATS
47704 -#define cifs_stats_inc atomic_inc
47705 +#define cifs_stats_inc atomic_inc_unchecked
47706
47707 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
47708 unsigned int bytes)
47709 @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
47710 /* Various Debug counters */
47711 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
47712 #ifdef CONFIG_CIFS_STATS2
47713 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
47714 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
47715 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
47716 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
47717 #endif
47718 GLOBAL_EXTERN atomic_t smBufAllocCount;
47719 GLOBAL_EXTERN atomic_t midCount;
47720 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
47721 index fc1e048..28b3441 100644
47722 --- a/fs/cifs/link.c
47723 +++ b/fs/cifs/link.c
47724 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
47725
47726 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
47727 {
47728 - char *p = nd_get_link(nd);
47729 + const char *p = nd_get_link(nd);
47730 if (!IS_ERR(p))
47731 kfree(p);
47732 }
47733 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
47734 index 95b82e8..12a538d 100644
47735 --- a/fs/cifs/misc.c
47736 +++ b/fs/cifs/misc.c
47737 @@ -155,7 +155,7 @@ cifs_buf_get(void)
47738 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
47739 atomic_inc(&bufAllocCount);
47740 #ifdef CONFIG_CIFS_STATS2
47741 - atomic_inc(&totBufAllocCount);
47742 + atomic_inc_unchecked(&totBufAllocCount);
47743 #endif /* CONFIG_CIFS_STATS2 */
47744 }
47745
47746 @@ -190,7 +190,7 @@ cifs_small_buf_get(void)
47747 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
47748 atomic_inc(&smBufAllocCount);
47749 #ifdef CONFIG_CIFS_STATS2
47750 - atomic_inc(&totSmBufAllocCount);
47751 + atomic_inc_unchecked(&totSmBufAllocCount);
47752 #endif /* CONFIG_CIFS_STATS2 */
47753
47754 }
47755 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
47756 index a5bf577..6d19845 100644
47757 --- a/fs/coda/cache.c
47758 +++ b/fs/coda/cache.c
47759 @@ -24,14 +24,14 @@
47760 #include <linux/coda_fs_i.h>
47761 #include <linux/coda_cache.h>
47762
47763 -static atomic_t permission_epoch = ATOMIC_INIT(0);
47764 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
47765
47766 /* replace or extend an acl cache hit */
47767 void coda_cache_enter(struct inode *inode, int mask)
47768 {
47769 struct coda_inode_info *cii = ITOC(inode);
47770
47771 - cii->c_cached_epoch = atomic_read(&permission_epoch);
47772 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
47773 if (cii->c_uid != current_fsuid()) {
47774 cii->c_uid = current_fsuid();
47775 cii->c_cached_perm = mask;
47776 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
47777 void coda_cache_clear_inode(struct inode *inode)
47778 {
47779 struct coda_inode_info *cii = ITOC(inode);
47780 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
47781 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
47782 }
47783
47784 /* remove all acl caches */
47785 void coda_cache_clear_all(struct super_block *sb)
47786 {
47787 - atomic_inc(&permission_epoch);
47788 + atomic_inc_unchecked(&permission_epoch);
47789 }
47790
47791
47792 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
47793
47794 hit = (mask & cii->c_cached_perm) == mask &&
47795 cii->c_uid == current_fsuid() &&
47796 - cii->c_cached_epoch == atomic_read(&permission_epoch);
47797 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
47798
47799 return hit;
47800 }
47801 diff --git a/fs/compat.c b/fs/compat.c
47802 index d1e2411..27064e4 100644
47803 --- a/fs/compat.c
47804 +++ b/fs/compat.c
47805 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
47806 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
47807 {
47808 compat_ino_t ino = stat->ino;
47809 - typeof(ubuf->st_uid) uid = 0;
47810 - typeof(ubuf->st_gid) gid = 0;
47811 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
47812 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
47813 int err;
47814
47815 SET_UID(uid, stat->uid);
47816 @@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
47817
47818 set_fs(KERNEL_DS);
47819 /* The __user pointer cast is valid because of the set_fs() */
47820 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
47821 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
47822 set_fs(oldfs);
47823 /* truncating is ok because it's a user address */
47824 if (!ret)
47825 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
47826
47827 struct compat_readdir_callback {
47828 struct compat_old_linux_dirent __user *dirent;
47829 + struct file * file;
47830 int result;
47831 };
47832
47833 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
47834 buf->result = -EOVERFLOW;
47835 return -EOVERFLOW;
47836 }
47837 +
47838 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47839 + return 0;
47840 +
47841 buf->result++;
47842 dirent = buf->dirent;
47843 if (!access_ok(VERIFY_WRITE, dirent,
47844 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
47845
47846 buf.result = 0;
47847 buf.dirent = dirent;
47848 + buf.file = file;
47849
47850 error = vfs_readdir(file, compat_fillonedir, &buf);
47851 if (buf.result)
47852 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
47853 struct compat_getdents_callback {
47854 struct compat_linux_dirent __user *current_dir;
47855 struct compat_linux_dirent __user *previous;
47856 + struct file * file;
47857 int count;
47858 int error;
47859 };
47860 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
47861 buf->error = -EOVERFLOW;
47862 return -EOVERFLOW;
47863 }
47864 +
47865 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47866 + return 0;
47867 +
47868 dirent = buf->previous;
47869 if (dirent) {
47870 if (__put_user(offset, &dirent->d_off))
47871 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
47872 buf.previous = NULL;
47873 buf.count = count;
47874 buf.error = 0;
47875 + buf.file = file;
47876
47877 error = vfs_readdir(file, compat_filldir, &buf);
47878 if (error >= 0)
47879 @@ -987,6 +999,7 @@ out:
47880 struct compat_getdents_callback64 {
47881 struct linux_dirent64 __user *current_dir;
47882 struct linux_dirent64 __user *previous;
47883 + struct file * file;
47884 int count;
47885 int error;
47886 };
47887 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
47888 buf->error = -EINVAL; /* only used if we fail.. */
47889 if (reclen > buf->count)
47890 return -EINVAL;
47891 +
47892 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47893 + return 0;
47894 +
47895 dirent = buf->previous;
47896
47897 if (dirent) {
47898 @@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
47899 buf.previous = NULL;
47900 buf.count = count;
47901 buf.error = 0;
47902 + buf.file = file;
47903
47904 error = vfs_readdir(file, compat_filldir64, &buf);
47905 if (error >= 0)
47906 error = buf.error;
47907 lastdirent = buf.previous;
47908 if (lastdirent) {
47909 - typeof(lastdirent->d_off) d_off = file->f_pos;
47910 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
47911 if (__put_user_unaligned(d_off, &lastdirent->d_off))
47912 error = -EFAULT;
47913 else
47914 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
47915 * verify all the pointers
47916 */
47917 ret = -EINVAL;
47918 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
47919 + if (nr_segs > UIO_MAXIOV)
47920 goto out;
47921 if (!file->f_op)
47922 goto out;
47923 @@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
47924 compat_uptr_t __user *envp,
47925 struct pt_regs * regs)
47926 {
47927 +#ifdef CONFIG_GRKERNSEC
47928 + struct file *old_exec_file;
47929 + struct acl_subject_label *old_acl;
47930 + struct rlimit old_rlim[RLIM_NLIMITS];
47931 +#endif
47932 struct linux_binprm *bprm;
47933 struct file *file;
47934 struct files_struct *displaced;
47935 bool clear_in_exec;
47936 int retval;
47937 + const struct cred *cred = current_cred();
47938 +
47939 + /*
47940 + * We move the actual failure in case of RLIMIT_NPROC excess from
47941 + * set*uid() to execve() because too many poorly written programs
47942 + * don't check setuid() return code. Here we additionally recheck
47943 + * whether NPROC limit is still exceeded.
47944 + */
47945 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
47946 +
47947 + if ((current->flags & PF_NPROC_EXCEEDED) &&
47948 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
47949 + retval = -EAGAIN;
47950 + goto out_ret;
47951 + }
47952 +
47953 + /* We're below the limit (still or again), so we don't want to make
47954 + * further execve() calls fail. */
47955 + current->flags &= ~PF_NPROC_EXCEEDED;
47956
47957 retval = unshare_files(&displaced);
47958 if (retval)
47959 @@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
47960 bprm->filename = filename;
47961 bprm->interp = filename;
47962
47963 + if (gr_process_user_ban()) {
47964 + retval = -EPERM;
47965 + goto out_file;
47966 + }
47967 +
47968 + retval = -EACCES;
47969 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
47970 + goto out_file;
47971 +
47972 retval = bprm_mm_init(bprm);
47973 if (retval)
47974 goto out_file;
47975 @@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
47976 if (retval < 0)
47977 goto out;
47978
47979 + if (!gr_tpe_allow(file)) {
47980 + retval = -EACCES;
47981 + goto out;
47982 + }
47983 +
47984 + if (gr_check_crash_exec(file)) {
47985 + retval = -EACCES;
47986 + goto out;
47987 + }
47988 +
47989 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
47990 +
47991 + gr_handle_exec_args_compat(bprm, argv);
47992 +
47993 +#ifdef CONFIG_GRKERNSEC
47994 + old_acl = current->acl;
47995 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
47996 + old_exec_file = current->exec_file;
47997 + get_file(file);
47998 + current->exec_file = file;
47999 +#endif
48000 +
48001 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48002 + bprm->unsafe & LSM_UNSAFE_SHARE);
48003 + if (retval < 0)
48004 + goto out_fail;
48005 +
48006 retval = search_binary_handler(bprm, regs);
48007 if (retval < 0)
48008 - goto out;
48009 + goto out_fail;
48010 +#ifdef CONFIG_GRKERNSEC
48011 + if (old_exec_file)
48012 + fput(old_exec_file);
48013 +#endif
48014
48015 /* execve succeeded */
48016 current->fs->in_exec = 0;
48017 @@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
48018 put_files_struct(displaced);
48019 return retval;
48020
48021 +out_fail:
48022 +#ifdef CONFIG_GRKERNSEC
48023 + current->acl = old_acl;
48024 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48025 + fput(current->exec_file);
48026 + current->exec_file = old_exec_file;
48027 +#endif
48028 +
48029 out:
48030 if (bprm->mm) {
48031 acct_arg_size(bprm, 0);
48032 @@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48033 struct fdtable *fdt;
48034 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48035
48036 + pax_track_stack();
48037 +
48038 if (n < 0)
48039 goto out_nofds;
48040
48041 @@ -2151,7 +2243,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48042 oldfs = get_fs();
48043 set_fs(KERNEL_DS);
48044 /* The __user pointer casts are valid because of the set_fs() */
48045 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48046 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48047 set_fs(oldfs);
48048
48049 if (err)
48050 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48051 index 0adced2..bbb1b0d 100644
48052 --- a/fs/compat_binfmt_elf.c
48053 +++ b/fs/compat_binfmt_elf.c
48054 @@ -29,10 +29,12 @@
48055 #undef elfhdr
48056 #undef elf_phdr
48057 #undef elf_note
48058 +#undef elf_dyn
48059 #undef elf_addr_t
48060 #define elfhdr elf32_hdr
48061 #define elf_phdr elf32_phdr
48062 #define elf_note elf32_note
48063 +#define elf_dyn Elf32_Dyn
48064 #define elf_addr_t Elf32_Addr
48065
48066 /*
48067 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48068 index d84e705..d8c364c 100644
48069 --- a/fs/compat_ioctl.c
48070 +++ b/fs/compat_ioctl.c
48071 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48072 up = (struct compat_video_spu_palette __user *) arg;
48073 err = get_user(palp, &up->palette);
48074 err |= get_user(length, &up->length);
48075 + if (err)
48076 + return -EFAULT;
48077
48078 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48079 err = put_user(compat_ptr(palp), &up_native->palette);
48080 @@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48081 return -EFAULT;
48082 if (__get_user(udata, &ss32->iomem_base))
48083 return -EFAULT;
48084 - ss.iomem_base = compat_ptr(udata);
48085 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48086 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48087 __get_user(ss.port_high, &ss32->port_high))
48088 return -EFAULT;
48089 @@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48090 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48091 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48092 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48093 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48094 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48095 return -EFAULT;
48096
48097 return ioctl_preallocate(file, p);
48098 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48099 index 8e48b52..f01ed91 100644
48100 --- a/fs/configfs/dir.c
48101 +++ b/fs/configfs/dir.c
48102 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48103 }
48104 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48105 struct configfs_dirent *next;
48106 - const char * name;
48107 + const unsigned char * name;
48108 + char d_name[sizeof(next->s_dentry->d_iname)];
48109 int len;
48110
48111 next = list_entry(p, struct configfs_dirent,
48112 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48113 continue;
48114
48115 name = configfs_get_name(next);
48116 - len = strlen(name);
48117 + if (next->s_dentry && name == next->s_dentry->d_iname) {
48118 + len = next->s_dentry->d_name.len;
48119 + memcpy(d_name, name, len);
48120 + name = d_name;
48121 + } else
48122 + len = strlen(name);
48123 if (next->s_dentry)
48124 ino = next->s_dentry->d_inode->i_ino;
48125 else
48126 diff --git a/fs/dcache.c b/fs/dcache.c
48127 index 44c0aea..2529092 100644
48128 --- a/fs/dcache.c
48129 +++ b/fs/dcache.c
48130 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48131
48132 static struct kmem_cache *dentry_cache __read_mostly;
48133
48134 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48135 -
48136 /*
48137 * This is the single most critical data structure when it comes
48138 * to the dcache: the hashtable for lookups. Somebody should try
48139 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48140 mempages -= reserve;
48141
48142 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48143 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48144 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48145
48146 dcache_init();
48147 inode_init();
48148 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48149 index c010ecf..a8d8c59 100644
48150 --- a/fs/dlm/lockspace.c
48151 +++ b/fs/dlm/lockspace.c
48152 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48153 kfree(ls);
48154 }
48155
48156 -static struct sysfs_ops dlm_attr_ops = {
48157 +static const struct sysfs_ops dlm_attr_ops = {
48158 .show = dlm_attr_show,
48159 .store = dlm_attr_store,
48160 };
48161 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48162 index 88ba4d4..073f003 100644
48163 --- a/fs/ecryptfs/inode.c
48164 +++ b/fs/ecryptfs/inode.c
48165 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48166 old_fs = get_fs();
48167 set_fs(get_ds());
48168 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48169 - (char __user *)lower_buf,
48170 + (char __force_user *)lower_buf,
48171 lower_bufsiz);
48172 set_fs(old_fs);
48173 if (rc < 0)
48174 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48175 }
48176 old_fs = get_fs();
48177 set_fs(get_ds());
48178 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48179 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48180 set_fs(old_fs);
48181 if (rc < 0)
48182 goto out_free;
48183 diff --git a/fs/exec.c b/fs/exec.c
48184 index 86fafc6..6d33cbb 100644
48185 --- a/fs/exec.c
48186 +++ b/fs/exec.c
48187 @@ -56,12 +56,28 @@
48188 #include <linux/fsnotify.h>
48189 #include <linux/fs_struct.h>
48190 #include <linux/pipe_fs_i.h>
48191 +#include <linux/random.h>
48192 +#include <linux/seq_file.h>
48193 +
48194 +#ifdef CONFIG_PAX_REFCOUNT
48195 +#include <linux/kallsyms.h>
48196 +#include <linux/kdebug.h>
48197 +#endif
48198
48199 #include <asm/uaccess.h>
48200 #include <asm/mmu_context.h>
48201 #include <asm/tlb.h>
48202 #include "internal.h"
48203
48204 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48205 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48206 +#endif
48207 +
48208 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48209 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48210 +EXPORT_SYMBOL(pax_set_initial_flags_func);
48211 +#endif
48212 +
48213 int core_uses_pid;
48214 char core_pattern[CORENAME_MAX_SIZE] = "core";
48215 unsigned int core_pipe_limit;
48216 @@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48217 int write)
48218 {
48219 struct page *page;
48220 - int ret;
48221
48222 -#ifdef CONFIG_STACK_GROWSUP
48223 - if (write) {
48224 - ret = expand_stack_downwards(bprm->vma, pos);
48225 - if (ret < 0)
48226 - return NULL;
48227 - }
48228 -#endif
48229 - ret = get_user_pages(current, bprm->mm, pos,
48230 - 1, write, 1, &page, NULL);
48231 - if (ret <= 0)
48232 + if (0 > expand_stack_downwards(bprm->vma, pos))
48233 + return NULL;
48234 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48235 return NULL;
48236
48237 if (write) {
48238 @@ -263,6 +271,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48239 vma->vm_end = STACK_TOP_MAX;
48240 vma->vm_start = vma->vm_end - PAGE_SIZE;
48241 vma->vm_flags = VM_STACK_FLAGS;
48242 +
48243 +#ifdef CONFIG_PAX_SEGMEXEC
48244 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48245 +#endif
48246 +
48247 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48248
48249 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48250 @@ -276,6 +289,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48251 mm->stack_vm = mm->total_vm = 1;
48252 up_write(&mm->mmap_sem);
48253 bprm->p = vma->vm_end - sizeof(void *);
48254 +
48255 +#ifdef CONFIG_PAX_RANDUSTACK
48256 + if (randomize_va_space)
48257 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48258 +#endif
48259 +
48260 return 0;
48261 err:
48262 up_write(&mm->mmap_sem);
48263 @@ -510,7 +529,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48264 int r;
48265 mm_segment_t oldfs = get_fs();
48266 set_fs(KERNEL_DS);
48267 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
48268 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48269 set_fs(oldfs);
48270 return r;
48271 }
48272 @@ -540,7 +559,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48273 unsigned long new_end = old_end - shift;
48274 struct mmu_gather *tlb;
48275
48276 - BUG_ON(new_start > new_end);
48277 + if (new_start >= new_end || new_start < mmap_min_addr)
48278 + return -ENOMEM;
48279
48280 /*
48281 * ensure there are no vmas between where we want to go
48282 @@ -549,6 +569,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48283 if (vma != find_vma(mm, new_start))
48284 return -EFAULT;
48285
48286 +#ifdef CONFIG_PAX_SEGMEXEC
48287 + BUG_ON(pax_find_mirror_vma(vma));
48288 +#endif
48289 +
48290 /*
48291 * cover the whole range: [new_start, old_end)
48292 */
48293 @@ -630,10 +654,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48294 stack_top = arch_align_stack(stack_top);
48295 stack_top = PAGE_ALIGN(stack_top);
48296
48297 - if (unlikely(stack_top < mmap_min_addr) ||
48298 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48299 - return -ENOMEM;
48300 -
48301 stack_shift = vma->vm_end - stack_top;
48302
48303 bprm->p -= stack_shift;
48304 @@ -645,6 +665,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48305 bprm->exec -= stack_shift;
48306
48307 down_write(&mm->mmap_sem);
48308 +
48309 + /* Move stack pages down in memory. */
48310 + if (stack_shift) {
48311 + ret = shift_arg_pages(vma, stack_shift);
48312 + if (ret)
48313 + goto out_unlock;
48314 + }
48315 +
48316 vm_flags = VM_STACK_FLAGS;
48317
48318 /*
48319 @@ -658,19 +686,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48320 vm_flags &= ~VM_EXEC;
48321 vm_flags |= mm->def_flags;
48322
48323 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48324 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48325 + vm_flags &= ~VM_EXEC;
48326 +
48327 +#ifdef CONFIG_PAX_MPROTECT
48328 + if (mm->pax_flags & MF_PAX_MPROTECT)
48329 + vm_flags &= ~VM_MAYEXEC;
48330 +#endif
48331 +
48332 + }
48333 +#endif
48334 +
48335 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48336 vm_flags);
48337 if (ret)
48338 goto out_unlock;
48339 BUG_ON(prev != vma);
48340
48341 - /* Move stack pages down in memory. */
48342 - if (stack_shift) {
48343 - ret = shift_arg_pages(vma, stack_shift);
48344 - if (ret)
48345 - goto out_unlock;
48346 - }
48347 -
48348 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48349 stack_size = vma->vm_end - vma->vm_start;
48350 /*
48351 @@ -744,7 +777,7 @@ int kernel_read(struct file *file, loff_t offset,
48352 old_fs = get_fs();
48353 set_fs(get_ds());
48354 /* The cast to a user pointer is valid due to the set_fs() */
48355 - result = vfs_read(file, (void __user *)addr, count, &pos);
48356 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
48357 set_fs(old_fs);
48358 return result;
48359 }
48360 @@ -1152,7 +1185,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48361 }
48362 rcu_read_unlock();
48363
48364 - if (p->fs->users > n_fs) {
48365 + if (atomic_read(&p->fs->users) > n_fs) {
48366 bprm->unsafe |= LSM_UNSAFE_SHARE;
48367 } else {
48368 res = -EAGAIN;
48369 @@ -1347,11 +1380,35 @@ int do_execve(char * filename,
48370 char __user *__user *envp,
48371 struct pt_regs * regs)
48372 {
48373 +#ifdef CONFIG_GRKERNSEC
48374 + struct file *old_exec_file;
48375 + struct acl_subject_label *old_acl;
48376 + struct rlimit old_rlim[RLIM_NLIMITS];
48377 +#endif
48378 struct linux_binprm *bprm;
48379 struct file *file;
48380 struct files_struct *displaced;
48381 bool clear_in_exec;
48382 int retval;
48383 + const struct cred *cred = current_cred();
48384 +
48385 + /*
48386 + * We move the actual failure in case of RLIMIT_NPROC excess from
48387 + * set*uid() to execve() because too many poorly written programs
48388 + * don't check setuid() return code. Here we additionally recheck
48389 + * whether NPROC limit is still exceeded.
48390 + */
48391 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48392 +
48393 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48394 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48395 + retval = -EAGAIN;
48396 + goto out_ret;
48397 + }
48398 +
48399 + /* We're below the limit (still or again), so we don't want to make
48400 + * further execve() calls fail. */
48401 + current->flags &= ~PF_NPROC_EXCEEDED;
48402
48403 retval = unshare_files(&displaced);
48404 if (retval)
48405 @@ -1383,6 +1440,16 @@ int do_execve(char * filename,
48406 bprm->filename = filename;
48407 bprm->interp = filename;
48408
48409 + if (gr_process_user_ban()) {
48410 + retval = -EPERM;
48411 + goto out_file;
48412 + }
48413 +
48414 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48415 + retval = -EACCES;
48416 + goto out_file;
48417 + }
48418 +
48419 retval = bprm_mm_init(bprm);
48420 if (retval)
48421 goto out_file;
48422 @@ -1412,10 +1479,41 @@ int do_execve(char * filename,
48423 if (retval < 0)
48424 goto out;
48425
48426 + if (!gr_tpe_allow(file)) {
48427 + retval = -EACCES;
48428 + goto out;
48429 + }
48430 +
48431 + if (gr_check_crash_exec(file)) {
48432 + retval = -EACCES;
48433 + goto out;
48434 + }
48435 +
48436 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48437 +
48438 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
48439 +
48440 +#ifdef CONFIG_GRKERNSEC
48441 + old_acl = current->acl;
48442 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48443 + old_exec_file = current->exec_file;
48444 + get_file(file);
48445 + current->exec_file = file;
48446 +#endif
48447 +
48448 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48449 + bprm->unsafe & LSM_UNSAFE_SHARE);
48450 + if (retval < 0)
48451 + goto out_fail;
48452 +
48453 current->flags &= ~PF_KTHREAD;
48454 retval = search_binary_handler(bprm,regs);
48455 if (retval < 0)
48456 - goto out;
48457 + goto out_fail;
48458 +#ifdef CONFIG_GRKERNSEC
48459 + if (old_exec_file)
48460 + fput(old_exec_file);
48461 +#endif
48462
48463 /* execve succeeded */
48464 current->fs->in_exec = 0;
48465 @@ -1426,6 +1524,14 @@ int do_execve(char * filename,
48466 put_files_struct(displaced);
48467 return retval;
48468
48469 +out_fail:
48470 +#ifdef CONFIG_GRKERNSEC
48471 + current->acl = old_acl;
48472 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48473 + fput(current->exec_file);
48474 + current->exec_file = old_exec_file;
48475 +#endif
48476 +
48477 out:
48478 if (bprm->mm) {
48479 acct_arg_size(bprm, 0);
48480 @@ -1591,6 +1697,220 @@ out:
48481 return ispipe;
48482 }
48483
48484 +int pax_check_flags(unsigned long *flags)
48485 +{
48486 + int retval = 0;
48487 +
48488 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
48489 + if (*flags & MF_PAX_SEGMEXEC)
48490 + {
48491 + *flags &= ~MF_PAX_SEGMEXEC;
48492 + retval = -EINVAL;
48493 + }
48494 +#endif
48495 +
48496 + if ((*flags & MF_PAX_PAGEEXEC)
48497 +
48498 +#ifdef CONFIG_PAX_PAGEEXEC
48499 + && (*flags & MF_PAX_SEGMEXEC)
48500 +#endif
48501 +
48502 + )
48503 + {
48504 + *flags &= ~MF_PAX_PAGEEXEC;
48505 + retval = -EINVAL;
48506 + }
48507 +
48508 + if ((*flags & MF_PAX_MPROTECT)
48509 +
48510 +#ifdef CONFIG_PAX_MPROTECT
48511 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48512 +#endif
48513 +
48514 + )
48515 + {
48516 + *flags &= ~MF_PAX_MPROTECT;
48517 + retval = -EINVAL;
48518 + }
48519 +
48520 + if ((*flags & MF_PAX_EMUTRAMP)
48521 +
48522 +#ifdef CONFIG_PAX_EMUTRAMP
48523 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48524 +#endif
48525 +
48526 + )
48527 + {
48528 + *flags &= ~MF_PAX_EMUTRAMP;
48529 + retval = -EINVAL;
48530 + }
48531 +
48532 + return retval;
48533 +}
48534 +
48535 +EXPORT_SYMBOL(pax_check_flags);
48536 +
48537 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48538 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
48539 +{
48540 + struct task_struct *tsk = current;
48541 + struct mm_struct *mm = current->mm;
48542 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
48543 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
48544 + char *path_exec = NULL;
48545 + char *path_fault = NULL;
48546 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
48547 +
48548 + if (buffer_exec && buffer_fault) {
48549 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
48550 +
48551 + down_read(&mm->mmap_sem);
48552 + vma = mm->mmap;
48553 + while (vma && (!vma_exec || !vma_fault)) {
48554 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
48555 + vma_exec = vma;
48556 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
48557 + vma_fault = vma;
48558 + vma = vma->vm_next;
48559 + }
48560 + if (vma_exec) {
48561 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
48562 + if (IS_ERR(path_exec))
48563 + path_exec = "<path too long>";
48564 + else {
48565 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
48566 + if (path_exec) {
48567 + *path_exec = 0;
48568 + path_exec = buffer_exec;
48569 + } else
48570 + path_exec = "<path too long>";
48571 + }
48572 + }
48573 + if (vma_fault) {
48574 + start = vma_fault->vm_start;
48575 + end = vma_fault->vm_end;
48576 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
48577 + if (vma_fault->vm_file) {
48578 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
48579 + if (IS_ERR(path_fault))
48580 + path_fault = "<path too long>";
48581 + else {
48582 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
48583 + if (path_fault) {
48584 + *path_fault = 0;
48585 + path_fault = buffer_fault;
48586 + } else
48587 + path_fault = "<path too long>";
48588 + }
48589 + } else
48590 + path_fault = "<anonymous mapping>";
48591 + }
48592 + up_read(&mm->mmap_sem);
48593 + }
48594 + if (tsk->signal->curr_ip)
48595 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
48596 + else
48597 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
48598 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
48599 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
48600 + task_uid(tsk), task_euid(tsk), pc, sp);
48601 + free_page((unsigned long)buffer_exec);
48602 + free_page((unsigned long)buffer_fault);
48603 + pax_report_insns(regs, pc, sp);
48604 + do_coredump(SIGKILL, SIGKILL, regs);
48605 +}
48606 +#endif
48607 +
48608 +#ifdef CONFIG_PAX_REFCOUNT
48609 +void pax_report_refcount_overflow(struct pt_regs *regs)
48610 +{
48611 + if (current->signal->curr_ip)
48612 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48613 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
48614 + else
48615 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48616 + current->comm, task_pid_nr(current), current_uid(), current_euid());
48617 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
48618 + show_regs(regs);
48619 + force_sig_specific(SIGKILL, current);
48620 +}
48621 +#endif
48622 +
48623 +#ifdef CONFIG_PAX_USERCOPY
48624 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
48625 +int object_is_on_stack(const void *obj, unsigned long len)
48626 +{
48627 + const void * const stack = task_stack_page(current);
48628 + const void * const stackend = stack + THREAD_SIZE;
48629 +
48630 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48631 + const void *frame = NULL;
48632 + const void *oldframe;
48633 +#endif
48634 +
48635 + if (obj + len < obj)
48636 + return -1;
48637 +
48638 + if (obj + len <= stack || stackend <= obj)
48639 + return 0;
48640 +
48641 + if (obj < stack || stackend < obj + len)
48642 + return -1;
48643 +
48644 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48645 + oldframe = __builtin_frame_address(1);
48646 + if (oldframe)
48647 + frame = __builtin_frame_address(2);
48648 + /*
48649 + low ----------------------------------------------> high
48650 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
48651 + ^----------------^
48652 + allow copies only within here
48653 + */
48654 + while (stack <= frame && frame < stackend) {
48655 + /* if obj + len extends past the last frame, this
48656 + check won't pass and the next frame will be 0,
48657 + causing us to bail out and correctly report
48658 + the copy as invalid
48659 + */
48660 + if (obj + len <= frame)
48661 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
48662 + oldframe = frame;
48663 + frame = *(const void * const *)frame;
48664 + }
48665 + return -1;
48666 +#else
48667 + return 1;
48668 +#endif
48669 +}
48670 +
48671 +
48672 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
48673 +{
48674 + if (current->signal->curr_ip)
48675 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48676 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48677 + else
48678 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48679 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48680 +
48681 + dump_stack();
48682 + gr_handle_kernel_exploit();
48683 + do_group_exit(SIGKILL);
48684 +}
48685 +#endif
48686 +
48687 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
48688 +void pax_track_stack(void)
48689 +{
48690 + unsigned long sp = (unsigned long)&sp;
48691 + if (sp < current_thread_info()->lowest_stack &&
48692 + sp > (unsigned long)task_stack_page(current))
48693 + current_thread_info()->lowest_stack = sp;
48694 +}
48695 +EXPORT_SYMBOL(pax_track_stack);
48696 +#endif
48697 +
48698 static int zap_process(struct task_struct *start)
48699 {
48700 struct task_struct *t;
48701 @@ -1793,17 +2113,17 @@ static void wait_for_dump_helpers(struct file *file)
48702 pipe = file->f_path.dentry->d_inode->i_pipe;
48703
48704 pipe_lock(pipe);
48705 - pipe->readers++;
48706 - pipe->writers--;
48707 + atomic_inc(&pipe->readers);
48708 + atomic_dec(&pipe->writers);
48709
48710 - while ((pipe->readers > 1) && (!signal_pending(current))) {
48711 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
48712 wake_up_interruptible_sync(&pipe->wait);
48713 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48714 pipe_wait(pipe);
48715 }
48716
48717 - pipe->readers--;
48718 - pipe->writers++;
48719 + atomic_dec(&pipe->readers);
48720 + atomic_inc(&pipe->writers);
48721 pipe_unlock(pipe);
48722
48723 }
48724 @@ -1826,10 +2146,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48725 char **helper_argv = NULL;
48726 int helper_argc = 0;
48727 int dump_count = 0;
48728 - static atomic_t core_dump_count = ATOMIC_INIT(0);
48729 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
48730
48731 audit_core_dumps(signr);
48732
48733 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
48734 + gr_handle_brute_attach(current, mm->flags);
48735 +
48736 binfmt = mm->binfmt;
48737 if (!binfmt || !binfmt->core_dump)
48738 goto fail;
48739 @@ -1874,6 +2197,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48740 */
48741 clear_thread_flag(TIF_SIGPENDING);
48742
48743 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
48744 +
48745 /*
48746 * lock_kernel() because format_corename() is controlled by sysctl, which
48747 * uses lock_kernel()
48748 @@ -1908,7 +2233,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48749 goto fail_unlock;
48750 }
48751
48752 - dump_count = atomic_inc_return(&core_dump_count);
48753 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
48754 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
48755 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
48756 task_tgid_vnr(current), current->comm);
48757 @@ -1972,7 +2297,7 @@ close_fail:
48758 filp_close(file, NULL);
48759 fail_dropcount:
48760 if (dump_count)
48761 - atomic_dec(&core_dump_count);
48762 + atomic_dec_unchecked(&core_dump_count);
48763 fail_unlock:
48764 if (helper_argv)
48765 argv_free(helper_argv);
48766 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
48767 index 7f8d2e5..a1abdbb 100644
48768 --- a/fs/ext2/balloc.c
48769 +++ b/fs/ext2/balloc.c
48770 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
48771
48772 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
48773 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
48774 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
48775 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
48776 sbi->s_resuid != current_fsuid() &&
48777 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
48778 return 0;
48779 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
48780 index 27967f9..9f2a5fb 100644
48781 --- a/fs/ext3/balloc.c
48782 +++ b/fs/ext3/balloc.c
48783 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
48784
48785 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
48786 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
48787 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
48788 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
48789 sbi->s_resuid != current_fsuid() &&
48790 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
48791 return 0;
48792 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
48793 index e85b63c..80398e6 100644
48794 --- a/fs/ext4/balloc.c
48795 +++ b/fs/ext4/balloc.c
48796 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
48797 /* Hm, nope. Are (enough) root reserved blocks available? */
48798 if (sbi->s_resuid == current_fsuid() ||
48799 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
48800 - capable(CAP_SYS_RESOURCE)) {
48801 + capable_nolog(CAP_SYS_RESOURCE)) {
48802 if (free_blocks >= (nblocks + dirty_blocks))
48803 return 1;
48804 }
48805 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
48806 index 67c46ed..1f237e5 100644
48807 --- a/fs/ext4/ext4.h
48808 +++ b/fs/ext4/ext4.h
48809 @@ -1077,19 +1077,19 @@ struct ext4_sb_info {
48810
48811 /* stats for buddy allocator */
48812 spinlock_t s_mb_pa_lock;
48813 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
48814 - atomic_t s_bal_success; /* we found long enough chunks */
48815 - atomic_t s_bal_allocated; /* in blocks */
48816 - atomic_t s_bal_ex_scanned; /* total extents scanned */
48817 - atomic_t s_bal_goals; /* goal hits */
48818 - atomic_t s_bal_breaks; /* too long searches */
48819 - atomic_t s_bal_2orders; /* 2^order hits */
48820 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
48821 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
48822 + atomic_unchecked_t s_bal_allocated; /* in blocks */
48823 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
48824 + atomic_unchecked_t s_bal_goals; /* goal hits */
48825 + atomic_unchecked_t s_bal_breaks; /* too long searches */
48826 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
48827 spinlock_t s_bal_lock;
48828 unsigned long s_mb_buddies_generated;
48829 unsigned long long s_mb_generation_time;
48830 - atomic_t s_mb_lost_chunks;
48831 - atomic_t s_mb_preallocated;
48832 - atomic_t s_mb_discarded;
48833 + atomic_unchecked_t s_mb_lost_chunks;
48834 + atomic_unchecked_t s_mb_preallocated;
48835 + atomic_unchecked_t s_mb_discarded;
48836 atomic_t s_lock_busy;
48837
48838 /* locality groups */
48839 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
48840 index 2a60541..7439d61 100644
48841 --- a/fs/ext4/file.c
48842 +++ b/fs/ext4/file.c
48843 @@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
48844 cp = d_path(&path, buf, sizeof(buf));
48845 path_put(&path);
48846 if (!IS_ERR(cp)) {
48847 - memcpy(sbi->s_es->s_last_mounted, cp,
48848 - sizeof(sbi->s_es->s_last_mounted));
48849 + strlcpy(sbi->s_es->s_last_mounted, cp,
48850 + sizeof(sbi->s_es->s_last_mounted));
48851 sb->s_dirt = 1;
48852 }
48853 }
48854 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
48855 index 42bac1b..0aab9d8 100644
48856 --- a/fs/ext4/mballoc.c
48857 +++ b/fs/ext4/mballoc.c
48858 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
48859 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
48860
48861 if (EXT4_SB(sb)->s_mb_stats)
48862 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
48863 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
48864
48865 break;
48866 }
48867 @@ -2131,7 +2131,7 @@ repeat:
48868 ac->ac_status = AC_STATUS_CONTINUE;
48869 ac->ac_flags |= EXT4_MB_HINT_FIRST;
48870 cr = 3;
48871 - atomic_inc(&sbi->s_mb_lost_chunks);
48872 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
48873 goto repeat;
48874 }
48875 }
48876 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
48877 ext4_grpblk_t counters[16];
48878 } sg;
48879
48880 + pax_track_stack();
48881 +
48882 group--;
48883 if (group == 0)
48884 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
48885 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
48886 if (sbi->s_mb_stats) {
48887 printk(KERN_INFO
48888 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
48889 - atomic_read(&sbi->s_bal_allocated),
48890 - atomic_read(&sbi->s_bal_reqs),
48891 - atomic_read(&sbi->s_bal_success));
48892 + atomic_read_unchecked(&sbi->s_bal_allocated),
48893 + atomic_read_unchecked(&sbi->s_bal_reqs),
48894 + atomic_read_unchecked(&sbi->s_bal_success));
48895 printk(KERN_INFO
48896 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
48897 "%u 2^N hits, %u breaks, %u lost\n",
48898 - atomic_read(&sbi->s_bal_ex_scanned),
48899 - atomic_read(&sbi->s_bal_goals),
48900 - atomic_read(&sbi->s_bal_2orders),
48901 - atomic_read(&sbi->s_bal_breaks),
48902 - atomic_read(&sbi->s_mb_lost_chunks));
48903 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
48904 + atomic_read_unchecked(&sbi->s_bal_goals),
48905 + atomic_read_unchecked(&sbi->s_bal_2orders),
48906 + atomic_read_unchecked(&sbi->s_bal_breaks),
48907 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
48908 printk(KERN_INFO
48909 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
48910 sbi->s_mb_buddies_generated++,
48911 sbi->s_mb_generation_time);
48912 printk(KERN_INFO
48913 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
48914 - atomic_read(&sbi->s_mb_preallocated),
48915 - atomic_read(&sbi->s_mb_discarded));
48916 + atomic_read_unchecked(&sbi->s_mb_preallocated),
48917 + atomic_read_unchecked(&sbi->s_mb_discarded));
48918 }
48919
48920 free_percpu(sbi->s_locality_groups);
48921 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
48922 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
48923
48924 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
48925 - atomic_inc(&sbi->s_bal_reqs);
48926 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
48927 + atomic_inc_unchecked(&sbi->s_bal_reqs);
48928 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
48929 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
48930 - atomic_inc(&sbi->s_bal_success);
48931 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
48932 + atomic_inc_unchecked(&sbi->s_bal_success);
48933 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
48934 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
48935 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
48936 - atomic_inc(&sbi->s_bal_goals);
48937 + atomic_inc_unchecked(&sbi->s_bal_goals);
48938 if (ac->ac_found > sbi->s_mb_max_to_scan)
48939 - atomic_inc(&sbi->s_bal_breaks);
48940 + atomic_inc_unchecked(&sbi->s_bal_breaks);
48941 }
48942
48943 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
48944 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
48945 trace_ext4_mb_new_inode_pa(ac, pa);
48946
48947 ext4_mb_use_inode_pa(ac, pa);
48948 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48949 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48950
48951 ei = EXT4_I(ac->ac_inode);
48952 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
48953 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
48954 trace_ext4_mb_new_group_pa(ac, pa);
48955
48956 ext4_mb_use_group_pa(ac, pa);
48957 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48958 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
48959
48960 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
48961 lg = ac->ac_lg;
48962 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
48963 * from the bitmap and continue.
48964 */
48965 }
48966 - atomic_add(free, &sbi->s_mb_discarded);
48967 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
48968
48969 return err;
48970 }
48971 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
48972 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
48973 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
48974 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
48975 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
48976 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
48977
48978 if (ac) {
48979 ac->ac_sb = sb;
48980 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
48981 index f27e045..be5a1c3 100644
48982 --- a/fs/ext4/super.c
48983 +++ b/fs/ext4/super.c
48984 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobject *kobj)
48985 }
48986
48987
48988 -static struct sysfs_ops ext4_attr_ops = {
48989 +static const struct sysfs_ops ext4_attr_ops = {
48990 .show = ext4_attr_show,
48991 .store = ext4_attr_store,
48992 };
48993 diff --git a/fs/fcntl.c b/fs/fcntl.c
48994 index 97e01dc..e9aab2d 100644
48995 --- a/fs/fcntl.c
48996 +++ b/fs/fcntl.c
48997 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
48998 if (err)
48999 return err;
49000
49001 + if (gr_handle_chroot_fowner(pid, type))
49002 + return -ENOENT;
49003 + if (gr_check_protected_task_fowner(pid, type))
49004 + return -EACCES;
49005 +
49006 f_modown(filp, pid, type, force);
49007 return 0;
49008 }
49009 @@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49010
49011 static int f_setown_ex(struct file *filp, unsigned long arg)
49012 {
49013 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49014 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49015 struct f_owner_ex owner;
49016 struct pid *pid;
49017 int type;
49018 @@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49019
49020 static int f_getown_ex(struct file *filp, unsigned long arg)
49021 {
49022 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49023 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49024 struct f_owner_ex owner;
49025 int ret = 0;
49026
49027 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49028 switch (cmd) {
49029 case F_DUPFD:
49030 case F_DUPFD_CLOEXEC:
49031 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49032 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49033 break;
49034 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49035 diff --git a/fs/fifo.c b/fs/fifo.c
49036 index f8f97b8..b1f2259 100644
49037 --- a/fs/fifo.c
49038 +++ b/fs/fifo.c
49039 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49040 */
49041 filp->f_op = &read_pipefifo_fops;
49042 pipe->r_counter++;
49043 - if (pipe->readers++ == 0)
49044 + if (atomic_inc_return(&pipe->readers) == 1)
49045 wake_up_partner(inode);
49046
49047 - if (!pipe->writers) {
49048 + if (!atomic_read(&pipe->writers)) {
49049 if ((filp->f_flags & O_NONBLOCK)) {
49050 /* suppress POLLHUP until we have
49051 * seen a writer */
49052 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49053 * errno=ENXIO when there is no process reading the FIFO.
49054 */
49055 ret = -ENXIO;
49056 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49057 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49058 goto err;
49059
49060 filp->f_op = &write_pipefifo_fops;
49061 pipe->w_counter++;
49062 - if (!pipe->writers++)
49063 + if (atomic_inc_return(&pipe->writers) == 1)
49064 wake_up_partner(inode);
49065
49066 - if (!pipe->readers) {
49067 + if (!atomic_read(&pipe->readers)) {
49068 wait_for_partner(inode, &pipe->r_counter);
49069 if (signal_pending(current))
49070 goto err_wr;
49071 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49072 */
49073 filp->f_op = &rdwr_pipefifo_fops;
49074
49075 - pipe->readers++;
49076 - pipe->writers++;
49077 + atomic_inc(&pipe->readers);
49078 + atomic_inc(&pipe->writers);
49079 pipe->r_counter++;
49080 pipe->w_counter++;
49081 - if (pipe->readers == 1 || pipe->writers == 1)
49082 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49083 wake_up_partner(inode);
49084 break;
49085
49086 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49087 return 0;
49088
49089 err_rd:
49090 - if (!--pipe->readers)
49091 + if (atomic_dec_and_test(&pipe->readers))
49092 wake_up_interruptible(&pipe->wait);
49093 ret = -ERESTARTSYS;
49094 goto err;
49095
49096 err_wr:
49097 - if (!--pipe->writers)
49098 + if (atomic_dec_and_test(&pipe->writers))
49099 wake_up_interruptible(&pipe->wait);
49100 ret = -ERESTARTSYS;
49101 goto err;
49102
49103 err:
49104 - if (!pipe->readers && !pipe->writers)
49105 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49106 free_pipe_info(inode);
49107
49108 err_nocleanup:
49109 diff --git a/fs/file.c b/fs/file.c
49110 index 87e1290..a930cc4 100644
49111 --- a/fs/file.c
49112 +++ b/fs/file.c
49113 @@ -14,6 +14,7 @@
49114 #include <linux/slab.h>
49115 #include <linux/vmalloc.h>
49116 #include <linux/file.h>
49117 +#include <linux/security.h>
49118 #include <linux/fdtable.h>
49119 #include <linux/bitops.h>
49120 #include <linux/interrupt.h>
49121 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49122 * N.B. For clone tasks sharing a files structure, this test
49123 * will limit the total number of files that can be opened.
49124 */
49125 +
49126 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49127 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49128 return -EMFILE;
49129
49130 diff --git a/fs/filesystems.c b/fs/filesystems.c
49131 index a24c58e..53f91ee 100644
49132 --- a/fs/filesystems.c
49133 +++ b/fs/filesystems.c
49134 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49135 int len = dot ? dot - name : strlen(name);
49136
49137 fs = __get_fs_type(name, len);
49138 +
49139 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
49140 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49141 +#else
49142 if (!fs && (request_module("%.*s", len, name) == 0))
49143 +#endif
49144 fs = __get_fs_type(name, len);
49145
49146 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49147 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49148 index eee0590..ef5bc0e 100644
49149 --- a/fs/fs_struct.c
49150 +++ b/fs/fs_struct.c
49151 @@ -4,6 +4,7 @@
49152 #include <linux/path.h>
49153 #include <linux/slab.h>
49154 #include <linux/fs_struct.h>
49155 +#include <linux/grsecurity.h>
49156
49157 /*
49158 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49159 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49160 old_root = fs->root;
49161 fs->root = *path;
49162 path_get(path);
49163 + gr_set_chroot_entries(current, path);
49164 write_unlock(&fs->lock);
49165 if (old_root.dentry)
49166 path_put(&old_root);
49167 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49168 && fs->root.mnt == old_root->mnt) {
49169 path_get(new_root);
49170 fs->root = *new_root;
49171 + gr_set_chroot_entries(p, new_root);
49172 count++;
49173 }
49174 if (fs->pwd.dentry == old_root->dentry
49175 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49176 task_lock(tsk);
49177 write_lock(&fs->lock);
49178 tsk->fs = NULL;
49179 - kill = !--fs->users;
49180 + gr_clear_chroot_entries(tsk);
49181 + kill = !atomic_dec_return(&fs->users);
49182 write_unlock(&fs->lock);
49183 task_unlock(tsk);
49184 if (kill)
49185 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49186 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49187 /* We don't need to lock fs - think why ;-) */
49188 if (fs) {
49189 - fs->users = 1;
49190 + atomic_set(&fs->users, 1);
49191 fs->in_exec = 0;
49192 rwlock_init(&fs->lock);
49193 fs->umask = old->umask;
49194 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49195
49196 task_lock(current);
49197 write_lock(&fs->lock);
49198 - kill = !--fs->users;
49199 + kill = !atomic_dec_return(&fs->users);
49200 current->fs = new_fs;
49201 + gr_set_chroot_entries(current, &new_fs->root);
49202 write_unlock(&fs->lock);
49203 task_unlock(current);
49204
49205 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
49206
49207 /* to be mentioned only in INIT_TASK */
49208 struct fs_struct init_fs = {
49209 - .users = 1,
49210 + .users = ATOMIC_INIT(1),
49211 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49212 .umask = 0022,
49213 };
49214 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49215 task_lock(current);
49216
49217 write_lock(&init_fs.lock);
49218 - init_fs.users++;
49219 + atomic_inc(&init_fs.users);
49220 write_unlock(&init_fs.lock);
49221
49222 write_lock(&fs->lock);
49223 current->fs = &init_fs;
49224 - kill = !--fs->users;
49225 + gr_set_chroot_entries(current, &current->fs->root);
49226 + kill = !atomic_dec_return(&fs->users);
49227 write_unlock(&fs->lock);
49228
49229 task_unlock(current);
49230 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49231 index 9905350..02eaec4 100644
49232 --- a/fs/fscache/cookie.c
49233 +++ b/fs/fscache/cookie.c
49234 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49235 parent ? (char *) parent->def->name : "<no-parent>",
49236 def->name, netfs_data);
49237
49238 - fscache_stat(&fscache_n_acquires);
49239 + fscache_stat_unchecked(&fscache_n_acquires);
49240
49241 /* if there's no parent cookie, then we don't create one here either */
49242 if (!parent) {
49243 - fscache_stat(&fscache_n_acquires_null);
49244 + fscache_stat_unchecked(&fscache_n_acquires_null);
49245 _leave(" [no parent]");
49246 return NULL;
49247 }
49248 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49249 /* allocate and initialise a cookie */
49250 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49251 if (!cookie) {
49252 - fscache_stat(&fscache_n_acquires_oom);
49253 + fscache_stat_unchecked(&fscache_n_acquires_oom);
49254 _leave(" [ENOMEM]");
49255 return NULL;
49256 }
49257 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49258
49259 switch (cookie->def->type) {
49260 case FSCACHE_COOKIE_TYPE_INDEX:
49261 - fscache_stat(&fscache_n_cookie_index);
49262 + fscache_stat_unchecked(&fscache_n_cookie_index);
49263 break;
49264 case FSCACHE_COOKIE_TYPE_DATAFILE:
49265 - fscache_stat(&fscache_n_cookie_data);
49266 + fscache_stat_unchecked(&fscache_n_cookie_data);
49267 break;
49268 default:
49269 - fscache_stat(&fscache_n_cookie_special);
49270 + fscache_stat_unchecked(&fscache_n_cookie_special);
49271 break;
49272 }
49273
49274 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49275 if (fscache_acquire_non_index_cookie(cookie) < 0) {
49276 atomic_dec(&parent->n_children);
49277 __fscache_cookie_put(cookie);
49278 - fscache_stat(&fscache_n_acquires_nobufs);
49279 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
49280 _leave(" = NULL");
49281 return NULL;
49282 }
49283 }
49284
49285 - fscache_stat(&fscache_n_acquires_ok);
49286 + fscache_stat_unchecked(&fscache_n_acquires_ok);
49287 _leave(" = %p", cookie);
49288 return cookie;
49289 }
49290 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49291 cache = fscache_select_cache_for_object(cookie->parent);
49292 if (!cache) {
49293 up_read(&fscache_addremove_sem);
49294 - fscache_stat(&fscache_n_acquires_no_cache);
49295 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49296 _leave(" = -ENOMEDIUM [no cache]");
49297 return -ENOMEDIUM;
49298 }
49299 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49300 object = cache->ops->alloc_object(cache, cookie);
49301 fscache_stat_d(&fscache_n_cop_alloc_object);
49302 if (IS_ERR(object)) {
49303 - fscache_stat(&fscache_n_object_no_alloc);
49304 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
49305 ret = PTR_ERR(object);
49306 goto error;
49307 }
49308
49309 - fscache_stat(&fscache_n_object_alloc);
49310 + fscache_stat_unchecked(&fscache_n_object_alloc);
49311
49312 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49313
49314 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49315 struct fscache_object *object;
49316 struct hlist_node *_p;
49317
49318 - fscache_stat(&fscache_n_updates);
49319 + fscache_stat_unchecked(&fscache_n_updates);
49320
49321 if (!cookie) {
49322 - fscache_stat(&fscache_n_updates_null);
49323 + fscache_stat_unchecked(&fscache_n_updates_null);
49324 _leave(" [no cookie]");
49325 return;
49326 }
49327 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49328 struct fscache_object *object;
49329 unsigned long event;
49330
49331 - fscache_stat(&fscache_n_relinquishes);
49332 + fscache_stat_unchecked(&fscache_n_relinquishes);
49333 if (retire)
49334 - fscache_stat(&fscache_n_relinquishes_retire);
49335 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49336
49337 if (!cookie) {
49338 - fscache_stat(&fscache_n_relinquishes_null);
49339 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
49340 _leave(" [no cookie]");
49341 return;
49342 }
49343 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49344
49345 /* wait for the cookie to finish being instantiated (or to fail) */
49346 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49347 - fscache_stat(&fscache_n_relinquishes_waitcrt);
49348 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49349 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49350 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49351 }
49352 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49353 index edd7434..0725e66 100644
49354 --- a/fs/fscache/internal.h
49355 +++ b/fs/fscache/internal.h
49356 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49357 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49358 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49359
49360 -extern atomic_t fscache_n_op_pend;
49361 -extern atomic_t fscache_n_op_run;
49362 -extern atomic_t fscache_n_op_enqueue;
49363 -extern atomic_t fscache_n_op_deferred_release;
49364 -extern atomic_t fscache_n_op_release;
49365 -extern atomic_t fscache_n_op_gc;
49366 -extern atomic_t fscache_n_op_cancelled;
49367 -extern atomic_t fscache_n_op_rejected;
49368 +extern atomic_unchecked_t fscache_n_op_pend;
49369 +extern atomic_unchecked_t fscache_n_op_run;
49370 +extern atomic_unchecked_t fscache_n_op_enqueue;
49371 +extern atomic_unchecked_t fscache_n_op_deferred_release;
49372 +extern atomic_unchecked_t fscache_n_op_release;
49373 +extern atomic_unchecked_t fscache_n_op_gc;
49374 +extern atomic_unchecked_t fscache_n_op_cancelled;
49375 +extern atomic_unchecked_t fscache_n_op_rejected;
49376
49377 -extern atomic_t fscache_n_attr_changed;
49378 -extern atomic_t fscache_n_attr_changed_ok;
49379 -extern atomic_t fscache_n_attr_changed_nobufs;
49380 -extern atomic_t fscache_n_attr_changed_nomem;
49381 -extern atomic_t fscache_n_attr_changed_calls;
49382 +extern atomic_unchecked_t fscache_n_attr_changed;
49383 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
49384 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49385 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49386 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
49387
49388 -extern atomic_t fscache_n_allocs;
49389 -extern atomic_t fscache_n_allocs_ok;
49390 -extern atomic_t fscache_n_allocs_wait;
49391 -extern atomic_t fscache_n_allocs_nobufs;
49392 -extern atomic_t fscache_n_allocs_intr;
49393 -extern atomic_t fscache_n_allocs_object_dead;
49394 -extern atomic_t fscache_n_alloc_ops;
49395 -extern atomic_t fscache_n_alloc_op_waits;
49396 +extern atomic_unchecked_t fscache_n_allocs;
49397 +extern atomic_unchecked_t fscache_n_allocs_ok;
49398 +extern atomic_unchecked_t fscache_n_allocs_wait;
49399 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
49400 +extern atomic_unchecked_t fscache_n_allocs_intr;
49401 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
49402 +extern atomic_unchecked_t fscache_n_alloc_ops;
49403 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
49404
49405 -extern atomic_t fscache_n_retrievals;
49406 -extern atomic_t fscache_n_retrievals_ok;
49407 -extern atomic_t fscache_n_retrievals_wait;
49408 -extern atomic_t fscache_n_retrievals_nodata;
49409 -extern atomic_t fscache_n_retrievals_nobufs;
49410 -extern atomic_t fscache_n_retrievals_intr;
49411 -extern atomic_t fscache_n_retrievals_nomem;
49412 -extern atomic_t fscache_n_retrievals_object_dead;
49413 -extern atomic_t fscache_n_retrieval_ops;
49414 -extern atomic_t fscache_n_retrieval_op_waits;
49415 +extern atomic_unchecked_t fscache_n_retrievals;
49416 +extern atomic_unchecked_t fscache_n_retrievals_ok;
49417 +extern atomic_unchecked_t fscache_n_retrievals_wait;
49418 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
49419 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
49420 +extern atomic_unchecked_t fscache_n_retrievals_intr;
49421 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
49422 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
49423 +extern atomic_unchecked_t fscache_n_retrieval_ops;
49424 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
49425
49426 -extern atomic_t fscache_n_stores;
49427 -extern atomic_t fscache_n_stores_ok;
49428 -extern atomic_t fscache_n_stores_again;
49429 -extern atomic_t fscache_n_stores_nobufs;
49430 -extern atomic_t fscache_n_stores_oom;
49431 -extern atomic_t fscache_n_store_ops;
49432 -extern atomic_t fscache_n_store_calls;
49433 -extern atomic_t fscache_n_store_pages;
49434 -extern atomic_t fscache_n_store_radix_deletes;
49435 -extern atomic_t fscache_n_store_pages_over_limit;
49436 +extern atomic_unchecked_t fscache_n_stores;
49437 +extern atomic_unchecked_t fscache_n_stores_ok;
49438 +extern atomic_unchecked_t fscache_n_stores_again;
49439 +extern atomic_unchecked_t fscache_n_stores_nobufs;
49440 +extern atomic_unchecked_t fscache_n_stores_oom;
49441 +extern atomic_unchecked_t fscache_n_store_ops;
49442 +extern atomic_unchecked_t fscache_n_store_calls;
49443 +extern atomic_unchecked_t fscache_n_store_pages;
49444 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
49445 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
49446
49447 -extern atomic_t fscache_n_store_vmscan_not_storing;
49448 -extern atomic_t fscache_n_store_vmscan_gone;
49449 -extern atomic_t fscache_n_store_vmscan_busy;
49450 -extern atomic_t fscache_n_store_vmscan_cancelled;
49451 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
49452 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
49453 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
49454 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
49455
49456 -extern atomic_t fscache_n_marks;
49457 -extern atomic_t fscache_n_uncaches;
49458 +extern atomic_unchecked_t fscache_n_marks;
49459 +extern atomic_unchecked_t fscache_n_uncaches;
49460
49461 -extern atomic_t fscache_n_acquires;
49462 -extern atomic_t fscache_n_acquires_null;
49463 -extern atomic_t fscache_n_acquires_no_cache;
49464 -extern atomic_t fscache_n_acquires_ok;
49465 -extern atomic_t fscache_n_acquires_nobufs;
49466 -extern atomic_t fscache_n_acquires_oom;
49467 +extern atomic_unchecked_t fscache_n_acquires;
49468 +extern atomic_unchecked_t fscache_n_acquires_null;
49469 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
49470 +extern atomic_unchecked_t fscache_n_acquires_ok;
49471 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
49472 +extern atomic_unchecked_t fscache_n_acquires_oom;
49473
49474 -extern atomic_t fscache_n_updates;
49475 -extern atomic_t fscache_n_updates_null;
49476 -extern atomic_t fscache_n_updates_run;
49477 +extern atomic_unchecked_t fscache_n_updates;
49478 +extern atomic_unchecked_t fscache_n_updates_null;
49479 +extern atomic_unchecked_t fscache_n_updates_run;
49480
49481 -extern atomic_t fscache_n_relinquishes;
49482 -extern atomic_t fscache_n_relinquishes_null;
49483 -extern atomic_t fscache_n_relinquishes_waitcrt;
49484 -extern atomic_t fscache_n_relinquishes_retire;
49485 +extern atomic_unchecked_t fscache_n_relinquishes;
49486 +extern atomic_unchecked_t fscache_n_relinquishes_null;
49487 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
49488 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
49489
49490 -extern atomic_t fscache_n_cookie_index;
49491 -extern atomic_t fscache_n_cookie_data;
49492 -extern atomic_t fscache_n_cookie_special;
49493 +extern atomic_unchecked_t fscache_n_cookie_index;
49494 +extern atomic_unchecked_t fscache_n_cookie_data;
49495 +extern atomic_unchecked_t fscache_n_cookie_special;
49496
49497 -extern atomic_t fscache_n_object_alloc;
49498 -extern atomic_t fscache_n_object_no_alloc;
49499 -extern atomic_t fscache_n_object_lookups;
49500 -extern atomic_t fscache_n_object_lookups_negative;
49501 -extern atomic_t fscache_n_object_lookups_positive;
49502 -extern atomic_t fscache_n_object_lookups_timed_out;
49503 -extern atomic_t fscache_n_object_created;
49504 -extern atomic_t fscache_n_object_avail;
49505 -extern atomic_t fscache_n_object_dead;
49506 +extern atomic_unchecked_t fscache_n_object_alloc;
49507 +extern atomic_unchecked_t fscache_n_object_no_alloc;
49508 +extern atomic_unchecked_t fscache_n_object_lookups;
49509 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
49510 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
49511 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
49512 +extern atomic_unchecked_t fscache_n_object_created;
49513 +extern atomic_unchecked_t fscache_n_object_avail;
49514 +extern atomic_unchecked_t fscache_n_object_dead;
49515
49516 -extern atomic_t fscache_n_checkaux_none;
49517 -extern atomic_t fscache_n_checkaux_okay;
49518 -extern atomic_t fscache_n_checkaux_update;
49519 -extern atomic_t fscache_n_checkaux_obsolete;
49520 +extern atomic_unchecked_t fscache_n_checkaux_none;
49521 +extern atomic_unchecked_t fscache_n_checkaux_okay;
49522 +extern atomic_unchecked_t fscache_n_checkaux_update;
49523 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
49524
49525 extern atomic_t fscache_n_cop_alloc_object;
49526 extern atomic_t fscache_n_cop_lookup_object;
49527 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
49528 atomic_inc(stat);
49529 }
49530
49531 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
49532 +{
49533 + atomic_inc_unchecked(stat);
49534 +}
49535 +
49536 static inline void fscache_stat_d(atomic_t *stat)
49537 {
49538 atomic_dec(stat);
49539 @@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
49540
49541 #define __fscache_stat(stat) (NULL)
49542 #define fscache_stat(stat) do {} while (0)
49543 +#define fscache_stat_unchecked(stat) do {} while (0)
49544 #define fscache_stat_d(stat) do {} while (0)
49545 #endif
49546
49547 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
49548 index e513ac5..e888d34 100644
49549 --- a/fs/fscache/object.c
49550 +++ b/fs/fscache/object.c
49551 @@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49552 /* update the object metadata on disk */
49553 case FSCACHE_OBJECT_UPDATING:
49554 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
49555 - fscache_stat(&fscache_n_updates_run);
49556 + fscache_stat_unchecked(&fscache_n_updates_run);
49557 fscache_stat(&fscache_n_cop_update_object);
49558 object->cache->ops->update_object(object);
49559 fscache_stat_d(&fscache_n_cop_update_object);
49560 @@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49561 spin_lock(&object->lock);
49562 object->state = FSCACHE_OBJECT_DEAD;
49563 spin_unlock(&object->lock);
49564 - fscache_stat(&fscache_n_object_dead);
49565 + fscache_stat_unchecked(&fscache_n_object_dead);
49566 goto terminal_transit;
49567
49568 /* handle the parent cache of this object being withdrawn from
49569 @@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49570 spin_lock(&object->lock);
49571 object->state = FSCACHE_OBJECT_DEAD;
49572 spin_unlock(&object->lock);
49573 - fscache_stat(&fscache_n_object_dead);
49574 + fscache_stat_unchecked(&fscache_n_object_dead);
49575 goto terminal_transit;
49576
49577 /* complain about the object being woken up once it is
49578 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49579 parent->cookie->def->name, cookie->def->name,
49580 object->cache->tag->name);
49581
49582 - fscache_stat(&fscache_n_object_lookups);
49583 + fscache_stat_unchecked(&fscache_n_object_lookups);
49584 fscache_stat(&fscache_n_cop_lookup_object);
49585 ret = object->cache->ops->lookup_object(object);
49586 fscache_stat_d(&fscache_n_cop_lookup_object);
49587 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49588 if (ret == -ETIMEDOUT) {
49589 /* probably stuck behind another object, so move this one to
49590 * the back of the queue */
49591 - fscache_stat(&fscache_n_object_lookups_timed_out);
49592 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
49593 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49594 }
49595
49596 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
49597
49598 spin_lock(&object->lock);
49599 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49600 - fscache_stat(&fscache_n_object_lookups_negative);
49601 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
49602
49603 /* transit here to allow write requests to begin stacking up
49604 * and read requests to begin returning ENODATA */
49605 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
49606 * result, in which case there may be data available */
49607 spin_lock(&object->lock);
49608 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49609 - fscache_stat(&fscache_n_object_lookups_positive);
49610 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
49611
49612 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
49613
49614 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
49615 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49616 } else {
49617 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
49618 - fscache_stat(&fscache_n_object_created);
49619 + fscache_stat_unchecked(&fscache_n_object_created);
49620
49621 object->state = FSCACHE_OBJECT_AVAILABLE;
49622 spin_unlock(&object->lock);
49623 @@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
49624 fscache_enqueue_dependents(object);
49625
49626 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
49627 - fscache_stat(&fscache_n_object_avail);
49628 + fscache_stat_unchecked(&fscache_n_object_avail);
49629
49630 _leave("");
49631 }
49632 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49633 enum fscache_checkaux result;
49634
49635 if (!object->cookie->def->check_aux) {
49636 - fscache_stat(&fscache_n_checkaux_none);
49637 + fscache_stat_unchecked(&fscache_n_checkaux_none);
49638 return FSCACHE_CHECKAUX_OKAY;
49639 }
49640
49641 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49642 switch (result) {
49643 /* entry okay as is */
49644 case FSCACHE_CHECKAUX_OKAY:
49645 - fscache_stat(&fscache_n_checkaux_okay);
49646 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
49647 break;
49648
49649 /* entry requires update */
49650 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
49651 - fscache_stat(&fscache_n_checkaux_update);
49652 + fscache_stat_unchecked(&fscache_n_checkaux_update);
49653 break;
49654
49655 /* entry requires deletion */
49656 case FSCACHE_CHECKAUX_OBSOLETE:
49657 - fscache_stat(&fscache_n_checkaux_obsolete);
49658 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
49659 break;
49660
49661 default:
49662 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
49663 index 313e79a..775240f 100644
49664 --- a/fs/fscache/operation.c
49665 +++ b/fs/fscache/operation.c
49666 @@ -16,7 +16,7 @@
49667 #include <linux/seq_file.h>
49668 #include "internal.h"
49669
49670 -atomic_t fscache_op_debug_id;
49671 +atomic_unchecked_t fscache_op_debug_id;
49672 EXPORT_SYMBOL(fscache_op_debug_id);
49673
49674 /**
49675 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
49676 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
49677 ASSERTCMP(atomic_read(&op->usage), >, 0);
49678
49679 - fscache_stat(&fscache_n_op_enqueue);
49680 + fscache_stat_unchecked(&fscache_n_op_enqueue);
49681 switch (op->flags & FSCACHE_OP_TYPE) {
49682 case FSCACHE_OP_FAST:
49683 _debug("queue fast");
49684 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
49685 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
49686 if (op->processor)
49687 fscache_enqueue_operation(op);
49688 - fscache_stat(&fscache_n_op_run);
49689 + fscache_stat_unchecked(&fscache_n_op_run);
49690 }
49691
49692 /*
49693 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49694 if (object->n_ops > 0) {
49695 atomic_inc(&op->usage);
49696 list_add_tail(&op->pend_link, &object->pending_ops);
49697 - fscache_stat(&fscache_n_op_pend);
49698 + fscache_stat_unchecked(&fscache_n_op_pend);
49699 } else if (!list_empty(&object->pending_ops)) {
49700 atomic_inc(&op->usage);
49701 list_add_tail(&op->pend_link, &object->pending_ops);
49702 - fscache_stat(&fscache_n_op_pend);
49703 + fscache_stat_unchecked(&fscache_n_op_pend);
49704 fscache_start_operations(object);
49705 } else {
49706 ASSERTCMP(object->n_in_progress, ==, 0);
49707 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49708 object->n_exclusive++; /* reads and writes must wait */
49709 atomic_inc(&op->usage);
49710 list_add_tail(&op->pend_link, &object->pending_ops);
49711 - fscache_stat(&fscache_n_op_pend);
49712 + fscache_stat_unchecked(&fscache_n_op_pend);
49713 ret = 0;
49714 } else {
49715 /* not allowed to submit ops in any other state */
49716 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
49717 if (object->n_exclusive > 0) {
49718 atomic_inc(&op->usage);
49719 list_add_tail(&op->pend_link, &object->pending_ops);
49720 - fscache_stat(&fscache_n_op_pend);
49721 + fscache_stat_unchecked(&fscache_n_op_pend);
49722 } else if (!list_empty(&object->pending_ops)) {
49723 atomic_inc(&op->usage);
49724 list_add_tail(&op->pend_link, &object->pending_ops);
49725 - fscache_stat(&fscache_n_op_pend);
49726 + fscache_stat_unchecked(&fscache_n_op_pend);
49727 fscache_start_operations(object);
49728 } else {
49729 ASSERTCMP(object->n_exclusive, ==, 0);
49730 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
49731 object->n_ops++;
49732 atomic_inc(&op->usage);
49733 list_add_tail(&op->pend_link, &object->pending_ops);
49734 - fscache_stat(&fscache_n_op_pend);
49735 + fscache_stat_unchecked(&fscache_n_op_pend);
49736 ret = 0;
49737 } else if (object->state == FSCACHE_OBJECT_DYING ||
49738 object->state == FSCACHE_OBJECT_LC_DYING ||
49739 object->state == FSCACHE_OBJECT_WITHDRAWING) {
49740 - fscache_stat(&fscache_n_op_rejected);
49741 + fscache_stat_unchecked(&fscache_n_op_rejected);
49742 ret = -ENOBUFS;
49743 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
49744 fscache_report_unexpected_submission(object, op, ostate);
49745 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
49746
49747 ret = -EBUSY;
49748 if (!list_empty(&op->pend_link)) {
49749 - fscache_stat(&fscache_n_op_cancelled);
49750 + fscache_stat_unchecked(&fscache_n_op_cancelled);
49751 list_del_init(&op->pend_link);
49752 object->n_ops--;
49753 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
49754 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
49755 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
49756 BUG();
49757
49758 - fscache_stat(&fscache_n_op_release);
49759 + fscache_stat_unchecked(&fscache_n_op_release);
49760
49761 if (op->release) {
49762 op->release(op);
49763 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
49764 * lock, and defer it otherwise */
49765 if (!spin_trylock(&object->lock)) {
49766 _debug("defer put");
49767 - fscache_stat(&fscache_n_op_deferred_release);
49768 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
49769
49770 cache = object->cache;
49771 spin_lock(&cache->op_gc_list_lock);
49772 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
49773
49774 _debug("GC DEFERRED REL OBJ%x OP%x",
49775 object->debug_id, op->debug_id);
49776 - fscache_stat(&fscache_n_op_gc);
49777 + fscache_stat_unchecked(&fscache_n_op_gc);
49778
49779 ASSERTCMP(atomic_read(&op->usage), ==, 0);
49780
49781 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
49782 index c598ea4..6aac13e 100644
49783 --- a/fs/fscache/page.c
49784 +++ b/fs/fscache/page.c
49785 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
49786 val = radix_tree_lookup(&cookie->stores, page->index);
49787 if (!val) {
49788 rcu_read_unlock();
49789 - fscache_stat(&fscache_n_store_vmscan_not_storing);
49790 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
49791 __fscache_uncache_page(cookie, page);
49792 return true;
49793 }
49794 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
49795 spin_unlock(&cookie->stores_lock);
49796
49797 if (xpage) {
49798 - fscache_stat(&fscache_n_store_vmscan_cancelled);
49799 - fscache_stat(&fscache_n_store_radix_deletes);
49800 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
49801 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
49802 ASSERTCMP(xpage, ==, page);
49803 } else {
49804 - fscache_stat(&fscache_n_store_vmscan_gone);
49805 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
49806 }
49807
49808 wake_up_bit(&cookie->flags, 0);
49809 @@ -106,7 +106,7 @@ page_busy:
49810 /* we might want to wait here, but that could deadlock the allocator as
49811 * the slow-work threads writing to the cache may all end up sleeping
49812 * on memory allocation */
49813 - fscache_stat(&fscache_n_store_vmscan_busy);
49814 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
49815 return false;
49816 }
49817 EXPORT_SYMBOL(__fscache_maybe_release_page);
49818 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
49819 FSCACHE_COOKIE_STORING_TAG);
49820 if (!radix_tree_tag_get(&cookie->stores, page->index,
49821 FSCACHE_COOKIE_PENDING_TAG)) {
49822 - fscache_stat(&fscache_n_store_radix_deletes);
49823 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
49824 xpage = radix_tree_delete(&cookie->stores, page->index);
49825 }
49826 spin_unlock(&cookie->stores_lock);
49827 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
49828
49829 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
49830
49831 - fscache_stat(&fscache_n_attr_changed_calls);
49832 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
49833
49834 if (fscache_object_is_active(object)) {
49835 fscache_set_op_state(op, "CallFS");
49836 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49837
49838 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
49839
49840 - fscache_stat(&fscache_n_attr_changed);
49841 + fscache_stat_unchecked(&fscache_n_attr_changed);
49842
49843 op = kzalloc(sizeof(*op), GFP_KERNEL);
49844 if (!op) {
49845 - fscache_stat(&fscache_n_attr_changed_nomem);
49846 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
49847 _leave(" = -ENOMEM");
49848 return -ENOMEM;
49849 }
49850 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49851 if (fscache_submit_exclusive_op(object, op) < 0)
49852 goto nobufs;
49853 spin_unlock(&cookie->lock);
49854 - fscache_stat(&fscache_n_attr_changed_ok);
49855 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
49856 fscache_put_operation(op);
49857 _leave(" = 0");
49858 return 0;
49859 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
49860 nobufs:
49861 spin_unlock(&cookie->lock);
49862 kfree(op);
49863 - fscache_stat(&fscache_n_attr_changed_nobufs);
49864 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
49865 _leave(" = %d", -ENOBUFS);
49866 return -ENOBUFS;
49867 }
49868 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
49869 /* allocate a retrieval operation and attempt to submit it */
49870 op = kzalloc(sizeof(*op), GFP_NOIO);
49871 if (!op) {
49872 - fscache_stat(&fscache_n_retrievals_nomem);
49873 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49874 return NULL;
49875 }
49876
49877 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
49878 return 0;
49879 }
49880
49881 - fscache_stat(&fscache_n_retrievals_wait);
49882 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
49883
49884 jif = jiffies;
49885 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
49886 fscache_wait_bit_interruptible,
49887 TASK_INTERRUPTIBLE) != 0) {
49888 - fscache_stat(&fscache_n_retrievals_intr);
49889 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
49890 _leave(" = -ERESTARTSYS");
49891 return -ERESTARTSYS;
49892 }
49893 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
49894 */
49895 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49896 struct fscache_retrieval *op,
49897 - atomic_t *stat_op_waits,
49898 - atomic_t *stat_object_dead)
49899 + atomic_unchecked_t *stat_op_waits,
49900 + atomic_unchecked_t *stat_object_dead)
49901 {
49902 int ret;
49903
49904 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49905 goto check_if_dead;
49906
49907 _debug(">>> WT");
49908 - fscache_stat(stat_op_waits);
49909 + fscache_stat_unchecked(stat_op_waits);
49910 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
49911 fscache_wait_bit_interruptible,
49912 TASK_INTERRUPTIBLE) < 0) {
49913 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
49914
49915 check_if_dead:
49916 if (unlikely(fscache_object_is_dead(object))) {
49917 - fscache_stat(stat_object_dead);
49918 + fscache_stat_unchecked(stat_object_dead);
49919 return -ENOBUFS;
49920 }
49921 return 0;
49922 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49923
49924 _enter("%p,%p,,,", cookie, page);
49925
49926 - fscache_stat(&fscache_n_retrievals);
49927 + fscache_stat_unchecked(&fscache_n_retrievals);
49928
49929 if (hlist_empty(&cookie->backing_objects))
49930 goto nobufs;
49931 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49932 goto nobufs_unlock;
49933 spin_unlock(&cookie->lock);
49934
49935 - fscache_stat(&fscache_n_retrieval_ops);
49936 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
49937
49938 /* pin the netfs read context in case we need to do the actual netfs
49939 * read because we've encountered a cache read failure */
49940 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
49941
49942 error:
49943 if (ret == -ENOMEM)
49944 - fscache_stat(&fscache_n_retrievals_nomem);
49945 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49946 else if (ret == -ERESTARTSYS)
49947 - fscache_stat(&fscache_n_retrievals_intr);
49948 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
49949 else if (ret == -ENODATA)
49950 - fscache_stat(&fscache_n_retrievals_nodata);
49951 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
49952 else if (ret < 0)
49953 - fscache_stat(&fscache_n_retrievals_nobufs);
49954 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49955 else
49956 - fscache_stat(&fscache_n_retrievals_ok);
49957 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
49958
49959 fscache_put_retrieval(op);
49960 _leave(" = %d", ret);
49961 @@ -453,7 +453,7 @@ nobufs_unlock:
49962 spin_unlock(&cookie->lock);
49963 kfree(op);
49964 nobufs:
49965 - fscache_stat(&fscache_n_retrievals_nobufs);
49966 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
49967 _leave(" = -ENOBUFS");
49968 return -ENOBUFS;
49969 }
49970 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49971
49972 _enter("%p,,%d,,,", cookie, *nr_pages);
49973
49974 - fscache_stat(&fscache_n_retrievals);
49975 + fscache_stat_unchecked(&fscache_n_retrievals);
49976
49977 if (hlist_empty(&cookie->backing_objects))
49978 goto nobufs;
49979 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49980 goto nobufs_unlock;
49981 spin_unlock(&cookie->lock);
49982
49983 - fscache_stat(&fscache_n_retrieval_ops);
49984 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
49985
49986 /* pin the netfs read context in case we need to do the actual netfs
49987 * read because we've encountered a cache read failure */
49988 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
49989
49990 error:
49991 if (ret == -ENOMEM)
49992 - fscache_stat(&fscache_n_retrievals_nomem);
49993 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
49994 else if (ret == -ERESTARTSYS)
49995 - fscache_stat(&fscache_n_retrievals_intr);
49996 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
49997 else if (ret == -ENODATA)
49998 - fscache_stat(&fscache_n_retrievals_nodata);
49999 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50000 else if (ret < 0)
50001 - fscache_stat(&fscache_n_retrievals_nobufs);
50002 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50003 else
50004 - fscache_stat(&fscache_n_retrievals_ok);
50005 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50006
50007 fscache_put_retrieval(op);
50008 _leave(" = %d", ret);
50009 @@ -570,7 +570,7 @@ nobufs_unlock:
50010 spin_unlock(&cookie->lock);
50011 kfree(op);
50012 nobufs:
50013 - fscache_stat(&fscache_n_retrievals_nobufs);
50014 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50015 _leave(" = -ENOBUFS");
50016 return -ENOBUFS;
50017 }
50018 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50019
50020 _enter("%p,%p,,,", cookie, page);
50021
50022 - fscache_stat(&fscache_n_allocs);
50023 + fscache_stat_unchecked(&fscache_n_allocs);
50024
50025 if (hlist_empty(&cookie->backing_objects))
50026 goto nobufs;
50027 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50028 goto nobufs_unlock;
50029 spin_unlock(&cookie->lock);
50030
50031 - fscache_stat(&fscache_n_alloc_ops);
50032 + fscache_stat_unchecked(&fscache_n_alloc_ops);
50033
50034 ret = fscache_wait_for_retrieval_activation(
50035 object, op,
50036 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50037
50038 error:
50039 if (ret == -ERESTARTSYS)
50040 - fscache_stat(&fscache_n_allocs_intr);
50041 + fscache_stat_unchecked(&fscache_n_allocs_intr);
50042 else if (ret < 0)
50043 - fscache_stat(&fscache_n_allocs_nobufs);
50044 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50045 else
50046 - fscache_stat(&fscache_n_allocs_ok);
50047 + fscache_stat_unchecked(&fscache_n_allocs_ok);
50048
50049 fscache_put_retrieval(op);
50050 _leave(" = %d", ret);
50051 @@ -651,7 +651,7 @@ nobufs_unlock:
50052 spin_unlock(&cookie->lock);
50053 kfree(op);
50054 nobufs:
50055 - fscache_stat(&fscache_n_allocs_nobufs);
50056 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50057 _leave(" = -ENOBUFS");
50058 return -ENOBUFS;
50059 }
50060 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50061
50062 spin_lock(&cookie->stores_lock);
50063
50064 - fscache_stat(&fscache_n_store_calls);
50065 + fscache_stat_unchecked(&fscache_n_store_calls);
50066
50067 /* find a page to store */
50068 page = NULL;
50069 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50070 page = results[0];
50071 _debug("gang %d [%lx]", n, page->index);
50072 if (page->index > op->store_limit) {
50073 - fscache_stat(&fscache_n_store_pages_over_limit);
50074 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50075 goto superseded;
50076 }
50077
50078 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50079
50080 if (page) {
50081 fscache_set_op_state(&op->op, "Store");
50082 - fscache_stat(&fscache_n_store_pages);
50083 + fscache_stat_unchecked(&fscache_n_store_pages);
50084 fscache_stat(&fscache_n_cop_write_page);
50085 ret = object->cache->ops->write_page(op, page);
50086 fscache_stat_d(&fscache_n_cop_write_page);
50087 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50088 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50089 ASSERT(PageFsCache(page));
50090
50091 - fscache_stat(&fscache_n_stores);
50092 + fscache_stat_unchecked(&fscache_n_stores);
50093
50094 op = kzalloc(sizeof(*op), GFP_NOIO);
50095 if (!op)
50096 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50097 spin_unlock(&cookie->stores_lock);
50098 spin_unlock(&object->lock);
50099
50100 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50101 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50102 op->store_limit = object->store_limit;
50103
50104 if (fscache_submit_op(object, &op->op) < 0)
50105 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50106
50107 spin_unlock(&cookie->lock);
50108 radix_tree_preload_end();
50109 - fscache_stat(&fscache_n_store_ops);
50110 - fscache_stat(&fscache_n_stores_ok);
50111 + fscache_stat_unchecked(&fscache_n_store_ops);
50112 + fscache_stat_unchecked(&fscache_n_stores_ok);
50113
50114 /* the slow work queue now carries its own ref on the object */
50115 fscache_put_operation(&op->op);
50116 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50117 return 0;
50118
50119 already_queued:
50120 - fscache_stat(&fscache_n_stores_again);
50121 + fscache_stat_unchecked(&fscache_n_stores_again);
50122 already_pending:
50123 spin_unlock(&cookie->stores_lock);
50124 spin_unlock(&object->lock);
50125 spin_unlock(&cookie->lock);
50126 radix_tree_preload_end();
50127 kfree(op);
50128 - fscache_stat(&fscache_n_stores_ok);
50129 + fscache_stat_unchecked(&fscache_n_stores_ok);
50130 _leave(" = 0");
50131 return 0;
50132
50133 @@ -886,14 +886,14 @@ nobufs:
50134 spin_unlock(&cookie->lock);
50135 radix_tree_preload_end();
50136 kfree(op);
50137 - fscache_stat(&fscache_n_stores_nobufs);
50138 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
50139 _leave(" = -ENOBUFS");
50140 return -ENOBUFS;
50141
50142 nomem_free:
50143 kfree(op);
50144 nomem:
50145 - fscache_stat(&fscache_n_stores_oom);
50146 + fscache_stat_unchecked(&fscache_n_stores_oom);
50147 _leave(" = -ENOMEM");
50148 return -ENOMEM;
50149 }
50150 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50151 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50152 ASSERTCMP(page, !=, NULL);
50153
50154 - fscache_stat(&fscache_n_uncaches);
50155 + fscache_stat_unchecked(&fscache_n_uncaches);
50156
50157 /* cache withdrawal may beat us to it */
50158 if (!PageFsCache(page))
50159 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50160 unsigned long loop;
50161
50162 #ifdef CONFIG_FSCACHE_STATS
50163 - atomic_add(pagevec->nr, &fscache_n_marks);
50164 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50165 #endif
50166
50167 for (loop = 0; loop < pagevec->nr; loop++) {
50168 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50169 index 46435f3..8cddf18 100644
50170 --- a/fs/fscache/stats.c
50171 +++ b/fs/fscache/stats.c
50172 @@ -18,95 +18,95 @@
50173 /*
50174 * operation counters
50175 */
50176 -atomic_t fscache_n_op_pend;
50177 -atomic_t fscache_n_op_run;
50178 -atomic_t fscache_n_op_enqueue;
50179 -atomic_t fscache_n_op_requeue;
50180 -atomic_t fscache_n_op_deferred_release;
50181 -atomic_t fscache_n_op_release;
50182 -atomic_t fscache_n_op_gc;
50183 -atomic_t fscache_n_op_cancelled;
50184 -atomic_t fscache_n_op_rejected;
50185 +atomic_unchecked_t fscache_n_op_pend;
50186 +atomic_unchecked_t fscache_n_op_run;
50187 +atomic_unchecked_t fscache_n_op_enqueue;
50188 +atomic_unchecked_t fscache_n_op_requeue;
50189 +atomic_unchecked_t fscache_n_op_deferred_release;
50190 +atomic_unchecked_t fscache_n_op_release;
50191 +atomic_unchecked_t fscache_n_op_gc;
50192 +atomic_unchecked_t fscache_n_op_cancelled;
50193 +atomic_unchecked_t fscache_n_op_rejected;
50194
50195 -atomic_t fscache_n_attr_changed;
50196 -atomic_t fscache_n_attr_changed_ok;
50197 -atomic_t fscache_n_attr_changed_nobufs;
50198 -atomic_t fscache_n_attr_changed_nomem;
50199 -atomic_t fscache_n_attr_changed_calls;
50200 +atomic_unchecked_t fscache_n_attr_changed;
50201 +atomic_unchecked_t fscache_n_attr_changed_ok;
50202 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
50203 +atomic_unchecked_t fscache_n_attr_changed_nomem;
50204 +atomic_unchecked_t fscache_n_attr_changed_calls;
50205
50206 -atomic_t fscache_n_allocs;
50207 -atomic_t fscache_n_allocs_ok;
50208 -atomic_t fscache_n_allocs_wait;
50209 -atomic_t fscache_n_allocs_nobufs;
50210 -atomic_t fscache_n_allocs_intr;
50211 -atomic_t fscache_n_allocs_object_dead;
50212 -atomic_t fscache_n_alloc_ops;
50213 -atomic_t fscache_n_alloc_op_waits;
50214 +atomic_unchecked_t fscache_n_allocs;
50215 +atomic_unchecked_t fscache_n_allocs_ok;
50216 +atomic_unchecked_t fscache_n_allocs_wait;
50217 +atomic_unchecked_t fscache_n_allocs_nobufs;
50218 +atomic_unchecked_t fscache_n_allocs_intr;
50219 +atomic_unchecked_t fscache_n_allocs_object_dead;
50220 +atomic_unchecked_t fscache_n_alloc_ops;
50221 +atomic_unchecked_t fscache_n_alloc_op_waits;
50222
50223 -atomic_t fscache_n_retrievals;
50224 -atomic_t fscache_n_retrievals_ok;
50225 -atomic_t fscache_n_retrievals_wait;
50226 -atomic_t fscache_n_retrievals_nodata;
50227 -atomic_t fscache_n_retrievals_nobufs;
50228 -atomic_t fscache_n_retrievals_intr;
50229 -atomic_t fscache_n_retrievals_nomem;
50230 -atomic_t fscache_n_retrievals_object_dead;
50231 -atomic_t fscache_n_retrieval_ops;
50232 -atomic_t fscache_n_retrieval_op_waits;
50233 +atomic_unchecked_t fscache_n_retrievals;
50234 +atomic_unchecked_t fscache_n_retrievals_ok;
50235 +atomic_unchecked_t fscache_n_retrievals_wait;
50236 +atomic_unchecked_t fscache_n_retrievals_nodata;
50237 +atomic_unchecked_t fscache_n_retrievals_nobufs;
50238 +atomic_unchecked_t fscache_n_retrievals_intr;
50239 +atomic_unchecked_t fscache_n_retrievals_nomem;
50240 +atomic_unchecked_t fscache_n_retrievals_object_dead;
50241 +atomic_unchecked_t fscache_n_retrieval_ops;
50242 +atomic_unchecked_t fscache_n_retrieval_op_waits;
50243
50244 -atomic_t fscache_n_stores;
50245 -atomic_t fscache_n_stores_ok;
50246 -atomic_t fscache_n_stores_again;
50247 -atomic_t fscache_n_stores_nobufs;
50248 -atomic_t fscache_n_stores_oom;
50249 -atomic_t fscache_n_store_ops;
50250 -atomic_t fscache_n_store_calls;
50251 -atomic_t fscache_n_store_pages;
50252 -atomic_t fscache_n_store_radix_deletes;
50253 -atomic_t fscache_n_store_pages_over_limit;
50254 +atomic_unchecked_t fscache_n_stores;
50255 +atomic_unchecked_t fscache_n_stores_ok;
50256 +atomic_unchecked_t fscache_n_stores_again;
50257 +atomic_unchecked_t fscache_n_stores_nobufs;
50258 +atomic_unchecked_t fscache_n_stores_oom;
50259 +atomic_unchecked_t fscache_n_store_ops;
50260 +atomic_unchecked_t fscache_n_store_calls;
50261 +atomic_unchecked_t fscache_n_store_pages;
50262 +atomic_unchecked_t fscache_n_store_radix_deletes;
50263 +atomic_unchecked_t fscache_n_store_pages_over_limit;
50264
50265 -atomic_t fscache_n_store_vmscan_not_storing;
50266 -atomic_t fscache_n_store_vmscan_gone;
50267 -atomic_t fscache_n_store_vmscan_busy;
50268 -atomic_t fscache_n_store_vmscan_cancelled;
50269 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50270 +atomic_unchecked_t fscache_n_store_vmscan_gone;
50271 +atomic_unchecked_t fscache_n_store_vmscan_busy;
50272 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50273
50274 -atomic_t fscache_n_marks;
50275 -atomic_t fscache_n_uncaches;
50276 +atomic_unchecked_t fscache_n_marks;
50277 +atomic_unchecked_t fscache_n_uncaches;
50278
50279 -atomic_t fscache_n_acquires;
50280 -atomic_t fscache_n_acquires_null;
50281 -atomic_t fscache_n_acquires_no_cache;
50282 -atomic_t fscache_n_acquires_ok;
50283 -atomic_t fscache_n_acquires_nobufs;
50284 -atomic_t fscache_n_acquires_oom;
50285 +atomic_unchecked_t fscache_n_acquires;
50286 +atomic_unchecked_t fscache_n_acquires_null;
50287 +atomic_unchecked_t fscache_n_acquires_no_cache;
50288 +atomic_unchecked_t fscache_n_acquires_ok;
50289 +atomic_unchecked_t fscache_n_acquires_nobufs;
50290 +atomic_unchecked_t fscache_n_acquires_oom;
50291
50292 -atomic_t fscache_n_updates;
50293 -atomic_t fscache_n_updates_null;
50294 -atomic_t fscache_n_updates_run;
50295 +atomic_unchecked_t fscache_n_updates;
50296 +atomic_unchecked_t fscache_n_updates_null;
50297 +atomic_unchecked_t fscache_n_updates_run;
50298
50299 -atomic_t fscache_n_relinquishes;
50300 -atomic_t fscache_n_relinquishes_null;
50301 -atomic_t fscache_n_relinquishes_waitcrt;
50302 -atomic_t fscache_n_relinquishes_retire;
50303 +atomic_unchecked_t fscache_n_relinquishes;
50304 +atomic_unchecked_t fscache_n_relinquishes_null;
50305 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50306 +atomic_unchecked_t fscache_n_relinquishes_retire;
50307
50308 -atomic_t fscache_n_cookie_index;
50309 -atomic_t fscache_n_cookie_data;
50310 -atomic_t fscache_n_cookie_special;
50311 +atomic_unchecked_t fscache_n_cookie_index;
50312 +atomic_unchecked_t fscache_n_cookie_data;
50313 +atomic_unchecked_t fscache_n_cookie_special;
50314
50315 -atomic_t fscache_n_object_alloc;
50316 -atomic_t fscache_n_object_no_alloc;
50317 -atomic_t fscache_n_object_lookups;
50318 -atomic_t fscache_n_object_lookups_negative;
50319 -atomic_t fscache_n_object_lookups_positive;
50320 -atomic_t fscache_n_object_lookups_timed_out;
50321 -atomic_t fscache_n_object_created;
50322 -atomic_t fscache_n_object_avail;
50323 -atomic_t fscache_n_object_dead;
50324 +atomic_unchecked_t fscache_n_object_alloc;
50325 +atomic_unchecked_t fscache_n_object_no_alloc;
50326 +atomic_unchecked_t fscache_n_object_lookups;
50327 +atomic_unchecked_t fscache_n_object_lookups_negative;
50328 +atomic_unchecked_t fscache_n_object_lookups_positive;
50329 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
50330 +atomic_unchecked_t fscache_n_object_created;
50331 +atomic_unchecked_t fscache_n_object_avail;
50332 +atomic_unchecked_t fscache_n_object_dead;
50333
50334 -atomic_t fscache_n_checkaux_none;
50335 -atomic_t fscache_n_checkaux_okay;
50336 -atomic_t fscache_n_checkaux_update;
50337 -atomic_t fscache_n_checkaux_obsolete;
50338 +atomic_unchecked_t fscache_n_checkaux_none;
50339 +atomic_unchecked_t fscache_n_checkaux_okay;
50340 +atomic_unchecked_t fscache_n_checkaux_update;
50341 +atomic_unchecked_t fscache_n_checkaux_obsolete;
50342
50343 atomic_t fscache_n_cop_alloc_object;
50344 atomic_t fscache_n_cop_lookup_object;
50345 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50346 seq_puts(m, "FS-Cache statistics\n");
50347
50348 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50349 - atomic_read(&fscache_n_cookie_index),
50350 - atomic_read(&fscache_n_cookie_data),
50351 - atomic_read(&fscache_n_cookie_special));
50352 + atomic_read_unchecked(&fscache_n_cookie_index),
50353 + atomic_read_unchecked(&fscache_n_cookie_data),
50354 + atomic_read_unchecked(&fscache_n_cookie_special));
50355
50356 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50357 - atomic_read(&fscache_n_object_alloc),
50358 - atomic_read(&fscache_n_object_no_alloc),
50359 - atomic_read(&fscache_n_object_avail),
50360 - atomic_read(&fscache_n_object_dead));
50361 + atomic_read_unchecked(&fscache_n_object_alloc),
50362 + atomic_read_unchecked(&fscache_n_object_no_alloc),
50363 + atomic_read_unchecked(&fscache_n_object_avail),
50364 + atomic_read_unchecked(&fscache_n_object_dead));
50365 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50366 - atomic_read(&fscache_n_checkaux_none),
50367 - atomic_read(&fscache_n_checkaux_okay),
50368 - atomic_read(&fscache_n_checkaux_update),
50369 - atomic_read(&fscache_n_checkaux_obsolete));
50370 + atomic_read_unchecked(&fscache_n_checkaux_none),
50371 + atomic_read_unchecked(&fscache_n_checkaux_okay),
50372 + atomic_read_unchecked(&fscache_n_checkaux_update),
50373 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50374
50375 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50376 - atomic_read(&fscache_n_marks),
50377 - atomic_read(&fscache_n_uncaches));
50378 + atomic_read_unchecked(&fscache_n_marks),
50379 + atomic_read_unchecked(&fscache_n_uncaches));
50380
50381 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50382 " oom=%u\n",
50383 - atomic_read(&fscache_n_acquires),
50384 - atomic_read(&fscache_n_acquires_null),
50385 - atomic_read(&fscache_n_acquires_no_cache),
50386 - atomic_read(&fscache_n_acquires_ok),
50387 - atomic_read(&fscache_n_acquires_nobufs),
50388 - atomic_read(&fscache_n_acquires_oom));
50389 + atomic_read_unchecked(&fscache_n_acquires),
50390 + atomic_read_unchecked(&fscache_n_acquires_null),
50391 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
50392 + atomic_read_unchecked(&fscache_n_acquires_ok),
50393 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
50394 + atomic_read_unchecked(&fscache_n_acquires_oom));
50395
50396 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50397 - atomic_read(&fscache_n_object_lookups),
50398 - atomic_read(&fscache_n_object_lookups_negative),
50399 - atomic_read(&fscache_n_object_lookups_positive),
50400 - atomic_read(&fscache_n_object_lookups_timed_out),
50401 - atomic_read(&fscache_n_object_created));
50402 + atomic_read_unchecked(&fscache_n_object_lookups),
50403 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
50404 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
50405 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50406 + atomic_read_unchecked(&fscache_n_object_created));
50407
50408 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
50409 - atomic_read(&fscache_n_updates),
50410 - atomic_read(&fscache_n_updates_null),
50411 - atomic_read(&fscache_n_updates_run));
50412 + atomic_read_unchecked(&fscache_n_updates),
50413 + atomic_read_unchecked(&fscache_n_updates_null),
50414 + atomic_read_unchecked(&fscache_n_updates_run));
50415
50416 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
50417 - atomic_read(&fscache_n_relinquishes),
50418 - atomic_read(&fscache_n_relinquishes_null),
50419 - atomic_read(&fscache_n_relinquishes_waitcrt),
50420 - atomic_read(&fscache_n_relinquishes_retire));
50421 + atomic_read_unchecked(&fscache_n_relinquishes),
50422 + atomic_read_unchecked(&fscache_n_relinquishes_null),
50423 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
50424 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
50425
50426 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
50427 - atomic_read(&fscache_n_attr_changed),
50428 - atomic_read(&fscache_n_attr_changed_ok),
50429 - atomic_read(&fscache_n_attr_changed_nobufs),
50430 - atomic_read(&fscache_n_attr_changed_nomem),
50431 - atomic_read(&fscache_n_attr_changed_calls));
50432 + atomic_read_unchecked(&fscache_n_attr_changed),
50433 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
50434 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
50435 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
50436 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
50437
50438 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
50439 - atomic_read(&fscache_n_allocs),
50440 - atomic_read(&fscache_n_allocs_ok),
50441 - atomic_read(&fscache_n_allocs_wait),
50442 - atomic_read(&fscache_n_allocs_nobufs),
50443 - atomic_read(&fscache_n_allocs_intr));
50444 + atomic_read_unchecked(&fscache_n_allocs),
50445 + atomic_read_unchecked(&fscache_n_allocs_ok),
50446 + atomic_read_unchecked(&fscache_n_allocs_wait),
50447 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
50448 + atomic_read_unchecked(&fscache_n_allocs_intr));
50449 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
50450 - atomic_read(&fscache_n_alloc_ops),
50451 - atomic_read(&fscache_n_alloc_op_waits),
50452 - atomic_read(&fscache_n_allocs_object_dead));
50453 + atomic_read_unchecked(&fscache_n_alloc_ops),
50454 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
50455 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
50456
50457 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
50458 " int=%u oom=%u\n",
50459 - atomic_read(&fscache_n_retrievals),
50460 - atomic_read(&fscache_n_retrievals_ok),
50461 - atomic_read(&fscache_n_retrievals_wait),
50462 - atomic_read(&fscache_n_retrievals_nodata),
50463 - atomic_read(&fscache_n_retrievals_nobufs),
50464 - atomic_read(&fscache_n_retrievals_intr),
50465 - atomic_read(&fscache_n_retrievals_nomem));
50466 + atomic_read_unchecked(&fscache_n_retrievals),
50467 + atomic_read_unchecked(&fscache_n_retrievals_ok),
50468 + atomic_read_unchecked(&fscache_n_retrievals_wait),
50469 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
50470 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
50471 + atomic_read_unchecked(&fscache_n_retrievals_intr),
50472 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
50473 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
50474 - atomic_read(&fscache_n_retrieval_ops),
50475 - atomic_read(&fscache_n_retrieval_op_waits),
50476 - atomic_read(&fscache_n_retrievals_object_dead));
50477 + atomic_read_unchecked(&fscache_n_retrieval_ops),
50478 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
50479 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
50480
50481 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
50482 - atomic_read(&fscache_n_stores),
50483 - atomic_read(&fscache_n_stores_ok),
50484 - atomic_read(&fscache_n_stores_again),
50485 - atomic_read(&fscache_n_stores_nobufs),
50486 - atomic_read(&fscache_n_stores_oom));
50487 + atomic_read_unchecked(&fscache_n_stores),
50488 + atomic_read_unchecked(&fscache_n_stores_ok),
50489 + atomic_read_unchecked(&fscache_n_stores_again),
50490 + atomic_read_unchecked(&fscache_n_stores_nobufs),
50491 + atomic_read_unchecked(&fscache_n_stores_oom));
50492 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
50493 - atomic_read(&fscache_n_store_ops),
50494 - atomic_read(&fscache_n_store_calls),
50495 - atomic_read(&fscache_n_store_pages),
50496 - atomic_read(&fscache_n_store_radix_deletes),
50497 - atomic_read(&fscache_n_store_pages_over_limit));
50498 + atomic_read_unchecked(&fscache_n_store_ops),
50499 + atomic_read_unchecked(&fscache_n_store_calls),
50500 + atomic_read_unchecked(&fscache_n_store_pages),
50501 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
50502 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
50503
50504 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
50505 - atomic_read(&fscache_n_store_vmscan_not_storing),
50506 - atomic_read(&fscache_n_store_vmscan_gone),
50507 - atomic_read(&fscache_n_store_vmscan_busy),
50508 - atomic_read(&fscache_n_store_vmscan_cancelled));
50509 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
50510 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
50511 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
50512 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
50513
50514 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
50515 - atomic_read(&fscache_n_op_pend),
50516 - atomic_read(&fscache_n_op_run),
50517 - atomic_read(&fscache_n_op_enqueue),
50518 - atomic_read(&fscache_n_op_cancelled),
50519 - atomic_read(&fscache_n_op_rejected));
50520 + atomic_read_unchecked(&fscache_n_op_pend),
50521 + atomic_read_unchecked(&fscache_n_op_run),
50522 + atomic_read_unchecked(&fscache_n_op_enqueue),
50523 + atomic_read_unchecked(&fscache_n_op_cancelled),
50524 + atomic_read_unchecked(&fscache_n_op_rejected));
50525 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
50526 - atomic_read(&fscache_n_op_deferred_release),
50527 - atomic_read(&fscache_n_op_release),
50528 - atomic_read(&fscache_n_op_gc));
50529 + atomic_read_unchecked(&fscache_n_op_deferred_release),
50530 + atomic_read_unchecked(&fscache_n_op_release),
50531 + atomic_read_unchecked(&fscache_n_op_gc));
50532
50533 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
50534 atomic_read(&fscache_n_cop_alloc_object),
50535 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
50536 index de792dc..448b532 100644
50537 --- a/fs/fuse/cuse.c
50538 +++ b/fs/fuse/cuse.c
50539 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
50540 INIT_LIST_HEAD(&cuse_conntbl[i]);
50541
50542 /* inherit and extend fuse_dev_operations */
50543 - cuse_channel_fops = fuse_dev_operations;
50544 - cuse_channel_fops.owner = THIS_MODULE;
50545 - cuse_channel_fops.open = cuse_channel_open;
50546 - cuse_channel_fops.release = cuse_channel_release;
50547 + pax_open_kernel();
50548 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
50549 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
50550 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
50551 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
50552 + pax_close_kernel();
50553
50554 cuse_class = class_create(THIS_MODULE, "cuse");
50555 if (IS_ERR(cuse_class))
50556 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
50557 index 1facb39..7f48557 100644
50558 --- a/fs/fuse/dev.c
50559 +++ b/fs/fuse/dev.c
50560 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50561 {
50562 struct fuse_notify_inval_entry_out outarg;
50563 int err = -EINVAL;
50564 - char buf[FUSE_NAME_MAX+1];
50565 + char *buf = NULL;
50566 struct qstr name;
50567
50568 if (size < sizeof(outarg))
50569 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50570 if (outarg.namelen > FUSE_NAME_MAX)
50571 goto err;
50572
50573 + err = -ENOMEM;
50574 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
50575 + if (!buf)
50576 + goto err;
50577 +
50578 err = -EINVAL;
50579 if (size != sizeof(outarg) + outarg.namelen + 1)
50580 goto err;
50581 @@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50582
50583 down_read(&fc->killsb);
50584 err = -ENOENT;
50585 - if (!fc->sb)
50586 - goto err_unlock;
50587 -
50588 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50589 -
50590 -err_unlock:
50591 + if (fc->sb)
50592 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50593 up_read(&fc->killsb);
50594 + kfree(buf);
50595 return err;
50596
50597 err:
50598 fuse_copy_finish(cs);
50599 + kfree(buf);
50600 return err;
50601 }
50602
50603 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
50604 index 4787ae6..73efff7 100644
50605 --- a/fs/fuse/dir.c
50606 +++ b/fs/fuse/dir.c
50607 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
50608 return link;
50609 }
50610
50611 -static void free_link(char *link)
50612 +static void free_link(const char *link)
50613 {
50614 if (!IS_ERR(link))
50615 free_page((unsigned long) link);
50616 diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
50617 index 247436c..e650ccb 100644
50618 --- a/fs/gfs2/ops_inode.c
50619 +++ b/fs/gfs2/ops_inode.c
50620 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
50621 unsigned int x;
50622 int error;
50623
50624 + pax_track_stack();
50625 +
50626 if (ndentry->d_inode) {
50627 nip = GFS2_I(ndentry->d_inode);
50628 if (ip == nip)
50629 diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
50630 index 4463297..4fed53b 100644
50631 --- a/fs/gfs2/sys.c
50632 +++ b/fs/gfs2/sys.c
50633 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
50634 return a->store ? a->store(sdp, buf, len) : len;
50635 }
50636
50637 -static struct sysfs_ops gfs2_attr_ops = {
50638 +static const struct sysfs_ops gfs2_attr_ops = {
50639 .show = gfs2_attr_show,
50640 .store = gfs2_attr_store,
50641 };
50642 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
50643 return 0;
50644 }
50645
50646 -static struct kset_uevent_ops gfs2_uevent_ops = {
50647 +static const struct kset_uevent_ops gfs2_uevent_ops = {
50648 .uevent = gfs2_uevent,
50649 };
50650
50651 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
50652 index f6874ac..7cd98a8 100644
50653 --- a/fs/hfsplus/catalog.c
50654 +++ b/fs/hfsplus/catalog.c
50655 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
50656 int err;
50657 u16 type;
50658
50659 + pax_track_stack();
50660 +
50661 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
50662 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
50663 if (err)
50664 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
50665 int entry_size;
50666 int err;
50667
50668 + pax_track_stack();
50669 +
50670 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
50671 sb = dir->i_sb;
50672 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
50673 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
50674 int entry_size, type;
50675 int err = 0;
50676
50677 + pax_track_stack();
50678 +
50679 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
50680 dst_dir->i_ino, dst_name->name);
50681 sb = src_dir->i_sb;
50682 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
50683 index 5f40236..dac3421 100644
50684 --- a/fs/hfsplus/dir.c
50685 +++ b/fs/hfsplus/dir.c
50686 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
50687 struct hfsplus_readdir_data *rd;
50688 u16 type;
50689
50690 + pax_track_stack();
50691 +
50692 if (filp->f_pos >= inode->i_size)
50693 return 0;
50694
50695 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
50696 index 1bcf597..905a251 100644
50697 --- a/fs/hfsplus/inode.c
50698 +++ b/fs/hfsplus/inode.c
50699 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
50700 int res = 0;
50701 u16 type;
50702
50703 + pax_track_stack();
50704 +
50705 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
50706
50707 HFSPLUS_I(inode).dev = 0;
50708 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
50709 struct hfs_find_data fd;
50710 hfsplus_cat_entry entry;
50711
50712 + pax_track_stack();
50713 +
50714 if (HFSPLUS_IS_RSRC(inode))
50715 main_inode = HFSPLUS_I(inode).rsrc_inode;
50716
50717 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
50718 index f457d2c..7ef4ad5 100644
50719 --- a/fs/hfsplus/ioctl.c
50720 +++ b/fs/hfsplus/ioctl.c
50721 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
50722 struct hfsplus_cat_file *file;
50723 int res;
50724
50725 + pax_track_stack();
50726 +
50727 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50728 return -EOPNOTSUPP;
50729
50730 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
50731 struct hfsplus_cat_file *file;
50732 ssize_t res = 0;
50733
50734 + pax_track_stack();
50735 +
50736 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50737 return -EOPNOTSUPP;
50738
50739 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
50740 index 43022f3..7298079 100644
50741 --- a/fs/hfsplus/super.c
50742 +++ b/fs/hfsplus/super.c
50743 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
50744 struct nls_table *nls = NULL;
50745 int err = -EINVAL;
50746
50747 + pax_track_stack();
50748 +
50749 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
50750 if (!sbi)
50751 return -ENOMEM;
50752 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
50753 index 87a1258..5694d91 100644
50754 --- a/fs/hugetlbfs/inode.c
50755 +++ b/fs/hugetlbfs/inode.c
50756 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
50757 .kill_sb = kill_litter_super,
50758 };
50759
50760 -static struct vfsmount *hugetlbfs_vfsmount;
50761 +struct vfsmount *hugetlbfs_vfsmount;
50762
50763 static int can_do_hugetlb_shm(void)
50764 {
50765 diff --git a/fs/ioctl.c b/fs/ioctl.c
50766 index 6c75110..19d2c3c 100644
50767 --- a/fs/ioctl.c
50768 +++ b/fs/ioctl.c
50769 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
50770 u64 phys, u64 len, u32 flags)
50771 {
50772 struct fiemap_extent extent;
50773 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
50774 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
50775
50776 /* only count the extents */
50777 if (fieinfo->fi_extents_max == 0) {
50778 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
50779
50780 fieinfo.fi_flags = fiemap.fm_flags;
50781 fieinfo.fi_extents_max = fiemap.fm_extent_count;
50782 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
50783 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
50784
50785 if (fiemap.fm_extent_count != 0 &&
50786 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
50787 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
50788 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
50789 fiemap.fm_flags = fieinfo.fi_flags;
50790 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
50791 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
50792 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
50793 error = -EFAULT;
50794
50795 return error;
50796 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
50797 index b0435dd..81ee0be 100644
50798 --- a/fs/jbd/checkpoint.c
50799 +++ b/fs/jbd/checkpoint.c
50800 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
50801 tid_t this_tid;
50802 int result;
50803
50804 + pax_track_stack();
50805 +
50806 jbd_debug(1, "Start checkpoint\n");
50807
50808 /*
50809 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
50810 index 546d153..736896c 100644
50811 --- a/fs/jffs2/compr_rtime.c
50812 +++ b/fs/jffs2/compr_rtime.c
50813 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
50814 int outpos = 0;
50815 int pos=0;
50816
50817 + pax_track_stack();
50818 +
50819 memset(positions,0,sizeof(positions));
50820
50821 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
50822 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
50823 int outpos = 0;
50824 int pos=0;
50825
50826 + pax_track_stack();
50827 +
50828 memset(positions,0,sizeof(positions));
50829
50830 while (outpos<destlen) {
50831 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
50832 index 170d289..3254b98 100644
50833 --- a/fs/jffs2/compr_rubin.c
50834 +++ b/fs/jffs2/compr_rubin.c
50835 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
50836 int ret;
50837 uint32_t mysrclen, mydstlen;
50838
50839 + pax_track_stack();
50840 +
50841 mysrclen = *sourcelen;
50842 mydstlen = *dstlen - 8;
50843
50844 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
50845 index b47679b..00d65d3 100644
50846 --- a/fs/jffs2/erase.c
50847 +++ b/fs/jffs2/erase.c
50848 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
50849 struct jffs2_unknown_node marker = {
50850 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
50851 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
50852 - .totlen = cpu_to_je32(c->cleanmarker_size)
50853 + .totlen = cpu_to_je32(c->cleanmarker_size),
50854 + .hdr_crc = cpu_to_je32(0)
50855 };
50856
50857 jffs2_prealloc_raw_node_refs(c, jeb, 1);
50858 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
50859 index 5ef7bac..4fd1e3c 100644
50860 --- a/fs/jffs2/wbuf.c
50861 +++ b/fs/jffs2/wbuf.c
50862 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
50863 {
50864 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
50865 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
50866 - .totlen = constant_cpu_to_je32(8)
50867 + .totlen = constant_cpu_to_je32(8),
50868 + .hdr_crc = constant_cpu_to_je32(0)
50869 };
50870
50871 /*
50872 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
50873 index 082e844..52012a1 100644
50874 --- a/fs/jffs2/xattr.c
50875 +++ b/fs/jffs2/xattr.c
50876 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
50877
50878 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
50879
50880 + pax_track_stack();
50881 +
50882 /* Phase.1 : Merge same xref */
50883 for (i=0; i < XREF_TMPHASH_SIZE; i++)
50884 xref_tmphash[i] = NULL;
50885 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
50886 index 2234c73..f6e6e6b 100644
50887 --- a/fs/jfs/super.c
50888 +++ b/fs/jfs/super.c
50889 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
50890
50891 jfs_inode_cachep =
50892 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
50893 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
50894 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
50895 init_once);
50896 if (jfs_inode_cachep == NULL)
50897 return -ENOMEM;
50898 diff --git a/fs/libfs.c b/fs/libfs.c
50899 index ba36e93..3153fce 100644
50900 --- a/fs/libfs.c
50901 +++ b/fs/libfs.c
50902 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
50903
50904 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
50905 struct dentry *next;
50906 + char d_name[sizeof(next->d_iname)];
50907 + const unsigned char *name;
50908 +
50909 next = list_entry(p, struct dentry, d_u.d_child);
50910 if (d_unhashed(next) || !next->d_inode)
50911 continue;
50912
50913 spin_unlock(&dcache_lock);
50914 - if (filldir(dirent, next->d_name.name,
50915 + name = next->d_name.name;
50916 + if (name == next->d_iname) {
50917 + memcpy(d_name, name, next->d_name.len);
50918 + name = d_name;
50919 + }
50920 + if (filldir(dirent, name,
50921 next->d_name.len, filp->f_pos,
50922 next->d_inode->i_ino,
50923 dt_type(next->d_inode)) < 0)
50924 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
50925 index c325a83..d15b07b 100644
50926 --- a/fs/lockd/clntproc.c
50927 +++ b/fs/lockd/clntproc.c
50928 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
50929 /*
50930 * Cookie counter for NLM requests
50931 */
50932 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
50933 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
50934
50935 void nlmclnt_next_cookie(struct nlm_cookie *c)
50936 {
50937 - u32 cookie = atomic_inc_return(&nlm_cookie);
50938 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
50939
50940 memcpy(c->data, &cookie, 4);
50941 c->len=4;
50942 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
50943 struct nlm_rqst reqst, *req;
50944 int status;
50945
50946 + pax_track_stack();
50947 +
50948 req = &reqst;
50949 memset(req, 0, sizeof(*req));
50950 locks_init_lock(&req->a_args.lock.fl);
50951 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
50952 index 1a54ae1..6a16c27 100644
50953 --- a/fs/lockd/svc.c
50954 +++ b/fs/lockd/svc.c
50955 @@ -43,7 +43,7 @@
50956
50957 static struct svc_program nlmsvc_program;
50958
50959 -struct nlmsvc_binding * nlmsvc_ops;
50960 +const struct nlmsvc_binding * nlmsvc_ops;
50961 EXPORT_SYMBOL_GPL(nlmsvc_ops);
50962
50963 static DEFINE_MUTEX(nlmsvc_mutex);
50964 diff --git a/fs/locks.c b/fs/locks.c
50965 index a8794f2..4041e55 100644
50966 --- a/fs/locks.c
50967 +++ b/fs/locks.c
50968 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
50969
50970 static struct kmem_cache *filelock_cache __read_mostly;
50971
50972 +static void locks_init_lock_always(struct file_lock *fl)
50973 +{
50974 + fl->fl_next = NULL;
50975 + fl->fl_fasync = NULL;
50976 + fl->fl_owner = NULL;
50977 + fl->fl_pid = 0;
50978 + fl->fl_nspid = NULL;
50979 + fl->fl_file = NULL;
50980 + fl->fl_flags = 0;
50981 + fl->fl_type = 0;
50982 + fl->fl_start = fl->fl_end = 0;
50983 +}
50984 +
50985 /* Allocate an empty lock structure. */
50986 static struct file_lock *locks_alloc_lock(void)
50987 {
50988 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
50989 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
50990 +
50991 + if (fl)
50992 + locks_init_lock_always(fl);
50993 +
50994 + return fl;
50995 }
50996
50997 void locks_release_private(struct file_lock *fl)
50998 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
50999 INIT_LIST_HEAD(&fl->fl_link);
51000 INIT_LIST_HEAD(&fl->fl_block);
51001 init_waitqueue_head(&fl->fl_wait);
51002 - fl->fl_next = NULL;
51003 - fl->fl_fasync = NULL;
51004 - fl->fl_owner = NULL;
51005 - fl->fl_pid = 0;
51006 - fl->fl_nspid = NULL;
51007 - fl->fl_file = NULL;
51008 - fl->fl_flags = 0;
51009 - fl->fl_type = 0;
51010 - fl->fl_start = fl->fl_end = 0;
51011 fl->fl_ops = NULL;
51012 fl->fl_lmops = NULL;
51013 + locks_init_lock_always(fl);
51014 }
51015
51016 EXPORT_SYMBOL(locks_init_lock);
51017 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51018 return;
51019
51020 if (filp->f_op && filp->f_op->flock) {
51021 - struct file_lock fl = {
51022 + struct file_lock flock = {
51023 .fl_pid = current->tgid,
51024 .fl_file = filp,
51025 .fl_flags = FL_FLOCK,
51026 .fl_type = F_UNLCK,
51027 .fl_end = OFFSET_MAX,
51028 };
51029 - filp->f_op->flock(filp, F_SETLKW, &fl);
51030 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
51031 - fl.fl_ops->fl_release_private(&fl);
51032 + filp->f_op->flock(filp, F_SETLKW, &flock);
51033 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
51034 + flock.fl_ops->fl_release_private(&flock);
51035 }
51036
51037 lock_kernel();
51038 diff --git a/fs/mbcache.c b/fs/mbcache.c
51039 index ec88ff3..b843a82 100644
51040 --- a/fs/mbcache.c
51041 +++ b/fs/mbcache.c
51042 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51043 if (!cache)
51044 goto fail;
51045 cache->c_name = name;
51046 - cache->c_op.free = NULL;
51047 + *(void **)&cache->c_op.free = NULL;
51048 if (cache_op)
51049 - cache->c_op.free = cache_op->free;
51050 + *(void **)&cache->c_op.free = cache_op->free;
51051 atomic_set(&cache->c_entry_count, 0);
51052 cache->c_bucket_bits = bucket_bits;
51053 #ifdef MB_CACHE_INDEXES_COUNT
51054 diff --git a/fs/namei.c b/fs/namei.c
51055 index b0afbd4..8d065a1 100644
51056 --- a/fs/namei.c
51057 +++ b/fs/namei.c
51058 @@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51059 return ret;
51060
51061 /*
51062 + * Searching includes executable on directories, else just read.
51063 + */
51064 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51065 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51066 + if (capable(CAP_DAC_READ_SEARCH))
51067 + return 0;
51068 +
51069 + /*
51070 * Read/write DACs are always overridable.
51071 * Executable DACs are overridable if at least one exec bit is set.
51072 */
51073 @@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51074 if (capable(CAP_DAC_OVERRIDE))
51075 return 0;
51076
51077 - /*
51078 - * Searching includes executable on directories, else just read.
51079 - */
51080 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51081 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51082 - if (capable(CAP_DAC_READ_SEARCH))
51083 - return 0;
51084 -
51085 return -EACCES;
51086 }
51087
51088 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51089 if (!ret)
51090 goto ok;
51091
51092 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51093 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51094 + capable(CAP_DAC_OVERRIDE))
51095 goto ok;
51096
51097 return ret;
51098 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51099 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51100 error = PTR_ERR(cookie);
51101 if (!IS_ERR(cookie)) {
51102 - char *s = nd_get_link(nd);
51103 + const char *s = nd_get_link(nd);
51104 error = 0;
51105 if (s)
51106 error = __vfs_follow_link(nd, s);
51107 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51108 err = security_inode_follow_link(path->dentry, nd);
51109 if (err)
51110 goto loop;
51111 +
51112 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51113 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51114 + err = -EACCES;
51115 + goto loop;
51116 + }
51117 +
51118 current->link_count++;
51119 current->total_link_count++;
51120 nd->depth++;
51121 @@ -1016,11 +1024,19 @@ return_reval:
51122 break;
51123 }
51124 return_base:
51125 + if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51126 + !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51127 + path_put(&nd->path);
51128 + return -ENOENT;
51129 + }
51130 return 0;
51131 out_dput:
51132 path_put_conditional(&next, nd);
51133 break;
51134 }
51135 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51136 + err = -ENOENT;
51137 +
51138 path_put(&nd->path);
51139 return_err:
51140 return err;
51141 @@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51142 int retval = path_init(dfd, name, flags, nd);
51143 if (!retval)
51144 retval = path_walk(name, nd);
51145 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51146 - nd->path.dentry->d_inode))
51147 - audit_inode(name, nd->path.dentry);
51148 +
51149 + if (likely(!retval)) {
51150 + if (nd->path.dentry && nd->path.dentry->d_inode) {
51151 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51152 + retval = -ENOENT;
51153 + if (!audit_dummy_context())
51154 + audit_inode(name, nd->path.dentry);
51155 + }
51156 + }
51157 if (nd->root.mnt) {
51158 path_put(&nd->root);
51159 nd->root.mnt = NULL;
51160 }
51161 +
51162 return retval;
51163 }
51164
51165 @@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51166 if (error)
51167 goto err_out;
51168
51169 +
51170 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51171 + error = -EPERM;
51172 + goto err_out;
51173 + }
51174 + if (gr_handle_rawio(inode)) {
51175 + error = -EPERM;
51176 + goto err_out;
51177 + }
51178 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51179 + error = -EACCES;
51180 + goto err_out;
51181 + }
51182 +
51183 if (flag & O_TRUNC) {
51184 error = get_write_access(inode);
51185 if (error)
51186 @@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51187 {
51188 int error;
51189 struct dentry *dir = nd->path.dentry;
51190 + int acc_mode = ACC_MODE(flag);
51191 +
51192 + if (flag & O_TRUNC)
51193 + acc_mode |= MAY_WRITE;
51194 + if (flag & O_APPEND)
51195 + acc_mode |= MAY_APPEND;
51196 +
51197 + if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51198 + error = -EACCES;
51199 + goto out_unlock;
51200 + }
51201
51202 if (!IS_POSIXACL(dir->d_inode))
51203 mode &= ~current_umask();
51204 @@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51205 if (error)
51206 goto out_unlock;
51207 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51208 + if (!error)
51209 + gr_handle_create(path->dentry, nd->path.mnt);
51210 out_unlock:
51211 mutex_unlock(&dir->d_inode->i_mutex);
51212 dput(nd->path.dentry);
51213 @@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51214 &nd, flag);
51215 if (error)
51216 return ERR_PTR(error);
51217 +
51218 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51219 + error = -EPERM;
51220 + goto exit;
51221 + }
51222 +
51223 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51224 + error = -EPERM;
51225 + goto exit;
51226 + }
51227 +
51228 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51229 + error = -EACCES;
51230 + goto exit;
51231 + }
51232 +
51233 goto ok;
51234 }
51235
51236 @@ -1795,6 +1861,19 @@ do_last:
51237 /*
51238 * It already exists.
51239 */
51240 +
51241 + if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51242 + error = -ENOENT;
51243 + goto exit_mutex_unlock;
51244 + }
51245 +
51246 + /* only check if O_CREAT is specified, all other checks need
51247 + to go into may_open */
51248 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51249 + error = -EACCES;
51250 + goto exit_mutex_unlock;
51251 + }
51252 +
51253 mutex_unlock(&dir->d_inode->i_mutex);
51254 audit_inode(pathname, path.dentry);
51255
51256 @@ -1887,6 +1966,13 @@ do_link:
51257 error = security_inode_follow_link(path.dentry, &nd);
51258 if (error)
51259 goto exit_dput;
51260 +
51261 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51262 + path.dentry, nd.path.mnt)) {
51263 + error = -EACCES;
51264 + goto exit_dput;
51265 + }
51266 +
51267 error = __do_follow_link(&path, &nd);
51268 if (error) {
51269 /* Does someone understand code flow here? Or it is only
51270 @@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51271 }
51272 return dentry;
51273 eexist:
51274 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51275 + dput(dentry);
51276 + return ERR_PTR(-ENOENT);
51277 + }
51278 dput(dentry);
51279 dentry = ERR_PTR(-EEXIST);
51280 fail:
51281 @@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51282 error = may_mknod(mode);
51283 if (error)
51284 goto out_dput;
51285 +
51286 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51287 + error = -EPERM;
51288 + goto out_dput;
51289 + }
51290 +
51291 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51292 + error = -EACCES;
51293 + goto out_dput;
51294 + }
51295 +
51296 error = mnt_want_write(nd.path.mnt);
51297 if (error)
51298 goto out_dput;
51299 @@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51300 }
51301 out_drop_write:
51302 mnt_drop_write(nd.path.mnt);
51303 +
51304 + if (!error)
51305 + gr_handle_create(dentry, nd.path.mnt);
51306 out_dput:
51307 dput(dentry);
51308 out_unlock:
51309 @@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51310 if (IS_ERR(dentry))
51311 goto out_unlock;
51312
51313 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51314 + error = -EACCES;
51315 + goto out_dput;
51316 + }
51317 +
51318 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51319 mode &= ~current_umask();
51320 error = mnt_want_write(nd.path.mnt);
51321 @@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51322 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51323 out_drop_write:
51324 mnt_drop_write(nd.path.mnt);
51325 +
51326 + if (!error)
51327 + gr_handle_create(dentry, nd.path.mnt);
51328 +
51329 out_dput:
51330 dput(dentry);
51331 out_unlock:
51332 @@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51333 char * name;
51334 struct dentry *dentry;
51335 struct nameidata nd;
51336 + ino_t saved_ino = 0;
51337 + dev_t saved_dev = 0;
51338
51339 error = user_path_parent(dfd, pathname, &nd, &name);
51340 if (error)
51341 @@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51342 error = PTR_ERR(dentry);
51343 if (IS_ERR(dentry))
51344 goto exit2;
51345 +
51346 + if (dentry->d_inode != NULL) {
51347 + saved_ino = dentry->d_inode->i_ino;
51348 + saved_dev = gr_get_dev_from_dentry(dentry);
51349 +
51350 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51351 + error = -EACCES;
51352 + goto exit3;
51353 + }
51354 + }
51355 +
51356 error = mnt_want_write(nd.path.mnt);
51357 if (error)
51358 goto exit3;
51359 @@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51360 if (error)
51361 goto exit4;
51362 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51363 + if (!error && (saved_dev || saved_ino))
51364 + gr_handle_delete(saved_ino, saved_dev);
51365 exit4:
51366 mnt_drop_write(nd.path.mnt);
51367 exit3:
51368 @@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51369 struct dentry *dentry;
51370 struct nameidata nd;
51371 struct inode *inode = NULL;
51372 + ino_t saved_ino = 0;
51373 + dev_t saved_dev = 0;
51374
51375 error = user_path_parent(dfd, pathname, &nd, &name);
51376 if (error)
51377 @@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51378 if (nd.last.name[nd.last.len])
51379 goto slashes;
51380 inode = dentry->d_inode;
51381 - if (inode)
51382 + if (inode) {
51383 + if (inode->i_nlink <= 1) {
51384 + saved_ino = inode->i_ino;
51385 + saved_dev = gr_get_dev_from_dentry(dentry);
51386 + }
51387 +
51388 atomic_inc(&inode->i_count);
51389 +
51390 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51391 + error = -EACCES;
51392 + goto exit2;
51393 + }
51394 + }
51395 error = mnt_want_write(nd.path.mnt);
51396 if (error)
51397 goto exit2;
51398 @@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51399 if (error)
51400 goto exit3;
51401 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51402 + if (!error && (saved_ino || saved_dev))
51403 + gr_handle_delete(saved_ino, saved_dev);
51404 exit3:
51405 mnt_drop_write(nd.path.mnt);
51406 exit2:
51407 @@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51408 if (IS_ERR(dentry))
51409 goto out_unlock;
51410
51411 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
51412 + error = -EACCES;
51413 + goto out_dput;
51414 + }
51415 +
51416 error = mnt_want_write(nd.path.mnt);
51417 if (error)
51418 goto out_dput;
51419 @@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51420 if (error)
51421 goto out_drop_write;
51422 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
51423 + if (!error)
51424 + gr_handle_create(dentry, nd.path.mnt);
51425 out_drop_write:
51426 mnt_drop_write(nd.path.mnt);
51427 out_dput:
51428 @@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51429 error = PTR_ERR(new_dentry);
51430 if (IS_ERR(new_dentry))
51431 goto out_unlock;
51432 +
51433 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
51434 + old_path.dentry->d_inode,
51435 + old_path.dentry->d_inode->i_mode, to)) {
51436 + error = -EACCES;
51437 + goto out_dput;
51438 + }
51439 +
51440 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
51441 + old_path.dentry, old_path.mnt, to)) {
51442 + error = -EACCES;
51443 + goto out_dput;
51444 + }
51445 +
51446 error = mnt_want_write(nd.path.mnt);
51447 if (error)
51448 goto out_dput;
51449 @@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51450 if (error)
51451 goto out_drop_write;
51452 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
51453 + if (!error)
51454 + gr_handle_create(new_dentry, nd.path.mnt);
51455 out_drop_write:
51456 mnt_drop_write(nd.path.mnt);
51457 out_dput:
51458 @@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51459 char *to;
51460 int error;
51461
51462 + pax_track_stack();
51463 +
51464 error = user_path_parent(olddfd, oldname, &oldnd, &from);
51465 if (error)
51466 goto exit;
51467 @@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51468 if (new_dentry == trap)
51469 goto exit5;
51470
51471 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
51472 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
51473 + to);
51474 + if (error)
51475 + goto exit5;
51476 +
51477 error = mnt_want_write(oldnd.path.mnt);
51478 if (error)
51479 goto exit5;
51480 @@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51481 goto exit6;
51482 error = vfs_rename(old_dir->d_inode, old_dentry,
51483 new_dir->d_inode, new_dentry);
51484 + if (!error)
51485 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
51486 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
51487 exit6:
51488 mnt_drop_write(oldnd.path.mnt);
51489 exit5:
51490 @@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
51491
51492 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
51493 {
51494 + char tmpbuf[64];
51495 + const char *newlink;
51496 int len;
51497
51498 len = PTR_ERR(link);
51499 @@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
51500 len = strlen(link);
51501 if (len > (unsigned) buflen)
51502 len = buflen;
51503 - if (copy_to_user(buffer, link, len))
51504 +
51505 + if (len < sizeof(tmpbuf)) {
51506 + memcpy(tmpbuf, link, len);
51507 + newlink = tmpbuf;
51508 + } else
51509 + newlink = link;
51510 +
51511 + if (copy_to_user(buffer, newlink, len))
51512 len = -EFAULT;
51513 out:
51514 return len;
51515 diff --git a/fs/namespace.c b/fs/namespace.c
51516 index 2beb0fb..11a95a5 100644
51517 --- a/fs/namespace.c
51518 +++ b/fs/namespace.c
51519 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51520 if (!(sb->s_flags & MS_RDONLY))
51521 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
51522 up_write(&sb->s_umount);
51523 +
51524 + gr_log_remount(mnt->mnt_devname, retval);
51525 +
51526 return retval;
51527 }
51528
51529 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51530 security_sb_umount_busy(mnt);
51531 up_write(&namespace_sem);
51532 release_mounts(&umount_list);
51533 +
51534 + gr_log_unmount(mnt->mnt_devname, retval);
51535 +
51536 return retval;
51537 }
51538
51539 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51540 if (retval)
51541 goto dput_out;
51542
51543 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
51544 + retval = -EPERM;
51545 + goto dput_out;
51546 + }
51547 +
51548 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
51549 + retval = -EPERM;
51550 + goto dput_out;
51551 + }
51552 +
51553 if (flags & MS_REMOUNT)
51554 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
51555 data_page);
51556 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51557 dev_name, data_page);
51558 dput_out:
51559 path_put(&path);
51560 +
51561 + gr_log_mount(dev_name, dir_name, retval);
51562 +
51563 return retval;
51564 }
51565
51566 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
51567 goto out1;
51568 }
51569
51570 + if (gr_handle_chroot_pivot()) {
51571 + error = -EPERM;
51572 + path_put(&old);
51573 + goto out1;
51574 + }
51575 +
51576 read_lock(&current->fs->lock);
51577 root = current->fs->root;
51578 path_get(&current->fs->root);
51579 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
51580 index b8b5b30..2bd9ccb 100644
51581 --- a/fs/ncpfs/dir.c
51582 +++ b/fs/ncpfs/dir.c
51583 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
51584 int res, val = 0, len;
51585 __u8 __name[NCP_MAXPATHLEN + 1];
51586
51587 + pax_track_stack();
51588 +
51589 parent = dget_parent(dentry);
51590 dir = parent->d_inode;
51591
51592 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
51593 int error, res, len;
51594 __u8 __name[NCP_MAXPATHLEN + 1];
51595
51596 + pax_track_stack();
51597 +
51598 lock_kernel();
51599 error = -EIO;
51600 if (!ncp_conn_valid(server))
51601 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
51602 int error, result, len;
51603 int opmode;
51604 __u8 __name[NCP_MAXPATHLEN + 1];
51605 -
51606 +
51607 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
51608 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
51609
51610 + pax_track_stack();
51611 +
51612 error = -EIO;
51613 lock_kernel();
51614 if (!ncp_conn_valid(server))
51615 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51616 int error, len;
51617 __u8 __name[NCP_MAXPATHLEN + 1];
51618
51619 + pax_track_stack();
51620 +
51621 DPRINTK("ncp_mkdir: making %s/%s\n",
51622 dentry->d_parent->d_name.name, dentry->d_name.name);
51623
51624 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51625 if (!ncp_conn_valid(server))
51626 goto out;
51627
51628 + pax_track_stack();
51629 +
51630 ncp_age_dentry(server, dentry);
51631 len = sizeof(__name);
51632 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
51633 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
51634 int old_len, new_len;
51635 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
51636
51637 + pax_track_stack();
51638 +
51639 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
51640 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
51641 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
51642 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
51643 index cf98da1..da890a9 100644
51644 --- a/fs/ncpfs/inode.c
51645 +++ b/fs/ncpfs/inode.c
51646 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
51647 #endif
51648 struct ncp_entry_info finfo;
51649
51650 + pax_track_stack();
51651 +
51652 data.wdog_pid = NULL;
51653 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
51654 if (!server)
51655 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
51656 index bfaef7b..e9d03ca 100644
51657 --- a/fs/nfs/inode.c
51658 +++ b/fs/nfs/inode.c
51659 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
51660 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
51661 nfsi->attrtimeo_timestamp = jiffies;
51662
51663 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
51664 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
51665 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
51666 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
51667 else
51668 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
51669 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
51670 }
51671
51672 -static atomic_long_t nfs_attr_generation_counter;
51673 +static atomic_long_unchecked_t nfs_attr_generation_counter;
51674
51675 static unsigned long nfs_read_attr_generation_counter(void)
51676 {
51677 - return atomic_long_read(&nfs_attr_generation_counter);
51678 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
51679 }
51680
51681 unsigned long nfs_inc_attr_generation_counter(void)
51682 {
51683 - return atomic_long_inc_return(&nfs_attr_generation_counter);
51684 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
51685 }
51686
51687 void nfs_fattr_init(struct nfs_fattr *fattr)
51688 diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
51689 index cc2f505..f6a236f 100644
51690 --- a/fs/nfsd/lockd.c
51691 +++ b/fs/nfsd/lockd.c
51692 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
51693 fput(filp);
51694 }
51695
51696 -static struct nlmsvc_binding nfsd_nlm_ops = {
51697 +static const struct nlmsvc_binding nfsd_nlm_ops = {
51698 .fopen = nlm_fopen, /* open file for locking */
51699 .fclose = nlm_fclose, /* close file */
51700 };
51701 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
51702 index cfc3391..dcc083a 100644
51703 --- a/fs/nfsd/nfs4state.c
51704 +++ b/fs/nfsd/nfs4state.c
51705 @@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
51706 unsigned int cmd;
51707 int err;
51708
51709 + pax_track_stack();
51710 +
51711 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
51712 (long long) lock->lk_offset,
51713 (long long) lock->lk_length);
51714 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
51715 index 4a82a96..0d5fb49 100644
51716 --- a/fs/nfsd/nfs4xdr.c
51717 +++ b/fs/nfsd/nfs4xdr.c
51718 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
51719 struct nfsd4_compoundres *resp = rqstp->rq_resp;
51720 u32 minorversion = resp->cstate.minorversion;
51721
51722 + pax_track_stack();
51723 +
51724 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
51725 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
51726 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
51727 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
51728 index 2e09588..596421d 100644
51729 --- a/fs/nfsd/vfs.c
51730 +++ b/fs/nfsd/vfs.c
51731 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51732 } else {
51733 oldfs = get_fs();
51734 set_fs(KERNEL_DS);
51735 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
51736 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
51737 set_fs(oldfs);
51738 }
51739
51740 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51741
51742 /* Write the data. */
51743 oldfs = get_fs(); set_fs(KERNEL_DS);
51744 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
51745 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
51746 set_fs(oldfs);
51747 if (host_err < 0)
51748 goto out_nfserr;
51749 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
51750 */
51751
51752 oldfs = get_fs(); set_fs(KERNEL_DS);
51753 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
51754 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
51755 set_fs(oldfs);
51756
51757 if (host_err < 0)
51758 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
51759 index f6af760..d6b2b83 100644
51760 --- a/fs/nilfs2/ioctl.c
51761 +++ b/fs/nilfs2/ioctl.c
51762 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
51763 unsigned int cmd, void __user *argp)
51764 {
51765 struct nilfs_argv argv[5];
51766 - const static size_t argsz[5] = {
51767 + static const size_t argsz[5] = {
51768 sizeof(struct nilfs_vdesc),
51769 sizeof(struct nilfs_period),
51770 sizeof(__u64),
51771 diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
51772 index 7e54e52..9337248 100644
51773 --- a/fs/notify/dnotify/dnotify.c
51774 +++ b/fs/notify/dnotify/dnotify.c
51775 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
51776 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
51777 }
51778
51779 -static struct fsnotify_ops dnotify_fsnotify_ops = {
51780 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
51781 .handle_event = dnotify_handle_event,
51782 .should_send_event = dnotify_should_send_event,
51783 .free_group_priv = NULL,
51784 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
51785 index b8bf53b..c518688 100644
51786 --- a/fs/notify/notification.c
51787 +++ b/fs/notify/notification.c
51788 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
51789 * get set to 0 so it will never get 'freed'
51790 */
51791 static struct fsnotify_event q_overflow_event;
51792 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51793 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51794
51795 /**
51796 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
51797 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
51798 */
51799 u32 fsnotify_get_cookie(void)
51800 {
51801 - return atomic_inc_return(&fsnotify_sync_cookie);
51802 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
51803 }
51804 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
51805
51806 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
51807 index 5a9e344..0f8cd28 100644
51808 --- a/fs/ntfs/dir.c
51809 +++ b/fs/ntfs/dir.c
51810 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
51811 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
51812 ~(s64)(ndir->itype.index.block_size - 1)));
51813 /* Bounds checks. */
51814 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
51815 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
51816 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
51817 "inode 0x%lx or driver bug.", vdir->i_ino);
51818 goto err_out;
51819 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
51820 index 663c0e3..b6868e9 100644
51821 --- a/fs/ntfs/file.c
51822 +++ b/fs/ntfs/file.c
51823 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
51824 #endif /* NTFS_RW */
51825 };
51826
51827 -const struct file_operations ntfs_empty_file_ops = {};
51828 +const struct file_operations ntfs_empty_file_ops __read_only;
51829
51830 -const struct inode_operations ntfs_empty_inode_ops = {};
51831 +const struct inode_operations ntfs_empty_inode_ops __read_only;
51832 diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
51833 index 1cd2934..880b5d2 100644
51834 --- a/fs/ocfs2/cluster/masklog.c
51835 +++ b/fs/ocfs2/cluster/masklog.c
51836 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
51837 return mlog_mask_store(mlog_attr->mask, buf, count);
51838 }
51839
51840 -static struct sysfs_ops mlog_attr_ops = {
51841 +static const struct sysfs_ops mlog_attr_ops = {
51842 .show = mlog_show,
51843 .store = mlog_store,
51844 };
51845 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
51846 index ac10f83..2cd2607 100644
51847 --- a/fs/ocfs2/localalloc.c
51848 +++ b/fs/ocfs2/localalloc.c
51849 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
51850 goto bail;
51851 }
51852
51853 - atomic_inc(&osb->alloc_stats.moves);
51854 + atomic_inc_unchecked(&osb->alloc_stats.moves);
51855
51856 status = 0;
51857 bail:
51858 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
51859 index f010b22..9f9ed34 100644
51860 --- a/fs/ocfs2/namei.c
51861 +++ b/fs/ocfs2/namei.c
51862 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
51863 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
51864 struct ocfs2_dir_lookup_result target_insert = { NULL, };
51865
51866 + pax_track_stack();
51867 +
51868 /* At some point it might be nice to break this function up a
51869 * bit. */
51870
51871 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
51872 index d963d86..914cfbd 100644
51873 --- a/fs/ocfs2/ocfs2.h
51874 +++ b/fs/ocfs2/ocfs2.h
51875 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
51876
51877 struct ocfs2_alloc_stats
51878 {
51879 - atomic_t moves;
51880 - atomic_t local_data;
51881 - atomic_t bitmap_data;
51882 - atomic_t bg_allocs;
51883 - atomic_t bg_extends;
51884 + atomic_unchecked_t moves;
51885 + atomic_unchecked_t local_data;
51886 + atomic_unchecked_t bitmap_data;
51887 + atomic_unchecked_t bg_allocs;
51888 + atomic_unchecked_t bg_extends;
51889 };
51890
51891 enum ocfs2_local_alloc_state
51892 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
51893 index 79b5dac..d322952 100644
51894 --- a/fs/ocfs2/suballoc.c
51895 +++ b/fs/ocfs2/suballoc.c
51896 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
51897 mlog_errno(status);
51898 goto bail;
51899 }
51900 - atomic_inc(&osb->alloc_stats.bg_extends);
51901 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
51902
51903 /* You should never ask for this much metadata */
51904 BUG_ON(bits_wanted >
51905 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
51906 mlog_errno(status);
51907 goto bail;
51908 }
51909 - atomic_inc(&osb->alloc_stats.bg_allocs);
51910 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
51911
51912 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
51913 ac->ac_bits_given += (*num_bits);
51914 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
51915 mlog_errno(status);
51916 goto bail;
51917 }
51918 - atomic_inc(&osb->alloc_stats.bg_allocs);
51919 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
51920
51921 BUG_ON(num_bits != 1);
51922
51923 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
51924 cluster_start,
51925 num_clusters);
51926 if (!status)
51927 - atomic_inc(&osb->alloc_stats.local_data);
51928 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
51929 } else {
51930 if (min_clusters > (osb->bitmap_cpg - 1)) {
51931 /* The only paths asking for contiguousness
51932 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
51933 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
51934 bg_blkno,
51935 bg_bit_off);
51936 - atomic_inc(&osb->alloc_stats.bitmap_data);
51937 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
51938 }
51939 }
51940 if (status < 0) {
51941 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
51942 index 9f55be4..a3f8048 100644
51943 --- a/fs/ocfs2/super.c
51944 +++ b/fs/ocfs2/super.c
51945 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
51946 "%10s => GlobalAllocs: %d LocalAllocs: %d "
51947 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
51948 "Stats",
51949 - atomic_read(&osb->alloc_stats.bitmap_data),
51950 - atomic_read(&osb->alloc_stats.local_data),
51951 - atomic_read(&osb->alloc_stats.bg_allocs),
51952 - atomic_read(&osb->alloc_stats.moves),
51953 - atomic_read(&osb->alloc_stats.bg_extends));
51954 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
51955 + atomic_read_unchecked(&osb->alloc_stats.local_data),
51956 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
51957 + atomic_read_unchecked(&osb->alloc_stats.moves),
51958 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
51959
51960 out += snprintf(buf + out, len - out,
51961 "%10s => State: %u Descriptor: %llu Size: %u bits "
51962 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
51963 spin_lock_init(&osb->osb_xattr_lock);
51964 ocfs2_init_inode_steal_slot(osb);
51965
51966 - atomic_set(&osb->alloc_stats.moves, 0);
51967 - atomic_set(&osb->alloc_stats.local_data, 0);
51968 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
51969 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
51970 - atomic_set(&osb->alloc_stats.bg_extends, 0);
51971 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
51972 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
51973 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
51974 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
51975 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
51976
51977 /* Copy the blockcheck stats from the superblock probe */
51978 osb->osb_ecc_stats = *stats;
51979 diff --git a/fs/open.c b/fs/open.c
51980 index 4f01e06..091f6c3 100644
51981 --- a/fs/open.c
51982 +++ b/fs/open.c
51983 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
51984 error = locks_verify_truncate(inode, NULL, length);
51985 if (!error)
51986 error = security_path_truncate(&path, length, 0);
51987 +
51988 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
51989 + error = -EACCES;
51990 +
51991 if (!error) {
51992 vfs_dq_init(inode);
51993 error = do_truncate(path.dentry, length, 0, NULL);
51994 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
51995 if (__mnt_is_readonly(path.mnt))
51996 res = -EROFS;
51997
51998 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
51999 + res = -EACCES;
52000 +
52001 out_path_release:
52002 path_put(&path);
52003 out:
52004 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52005 if (error)
52006 goto dput_and_out;
52007
52008 + gr_log_chdir(path.dentry, path.mnt);
52009 +
52010 set_fs_pwd(current->fs, &path);
52011
52012 dput_and_out:
52013 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52014 goto out_putf;
52015
52016 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52017 +
52018 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52019 + error = -EPERM;
52020 +
52021 + if (!error)
52022 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52023 +
52024 if (!error)
52025 set_fs_pwd(current->fs, &file->f_path);
52026 out_putf:
52027 @@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52028 if (!capable(CAP_SYS_CHROOT))
52029 goto dput_and_out;
52030
52031 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52032 + goto dput_and_out;
52033 +
52034 set_fs_root(current->fs, &path);
52035 +
52036 + gr_handle_chroot_chdir(&path);
52037 +
52038 error = 0;
52039 dput_and_out:
52040 path_put(&path);
52041 @@ -616,12 +638,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52042 err = mnt_want_write_file(file);
52043 if (err)
52044 goto out_putf;
52045 +
52046 mutex_lock(&inode->i_mutex);
52047 +
52048 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
52049 + err = -EACCES;
52050 + goto out_unlock;
52051 + }
52052 +
52053 if (mode == (mode_t) -1)
52054 mode = inode->i_mode;
52055 +
52056 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
52057 + err = -EPERM;
52058 + goto out_unlock;
52059 + }
52060 +
52061 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52062 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52063 err = notify_change(dentry, &newattrs);
52064 +
52065 +out_unlock:
52066 mutex_unlock(&inode->i_mutex);
52067 mnt_drop_write(file->f_path.mnt);
52068 out_putf:
52069 @@ -645,12 +682,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52070 error = mnt_want_write(path.mnt);
52071 if (error)
52072 goto dput_and_out;
52073 +
52074 mutex_lock(&inode->i_mutex);
52075 +
52076 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
52077 + error = -EACCES;
52078 + goto out_unlock;
52079 + }
52080 +
52081 if (mode == (mode_t) -1)
52082 mode = inode->i_mode;
52083 +
52084 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
52085 + error = -EACCES;
52086 + goto out_unlock;
52087 + }
52088 +
52089 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52090 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52091 error = notify_change(path.dentry, &newattrs);
52092 +
52093 +out_unlock:
52094 mutex_unlock(&inode->i_mutex);
52095 mnt_drop_write(path.mnt);
52096 dput_and_out:
52097 @@ -664,12 +716,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52098 return sys_fchmodat(AT_FDCWD, filename, mode);
52099 }
52100
52101 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52102 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52103 {
52104 struct inode *inode = dentry->d_inode;
52105 int error;
52106 struct iattr newattrs;
52107
52108 + if (!gr_acl_handle_chown(dentry, mnt))
52109 + return -EACCES;
52110 +
52111 newattrs.ia_valid = ATTR_CTIME;
52112 if (user != (uid_t) -1) {
52113 newattrs.ia_valid |= ATTR_UID;
52114 @@ -700,7 +755,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52115 error = mnt_want_write(path.mnt);
52116 if (error)
52117 goto out_release;
52118 - error = chown_common(path.dentry, user, group);
52119 + error = chown_common(path.dentry, user, group, path.mnt);
52120 mnt_drop_write(path.mnt);
52121 out_release:
52122 path_put(&path);
52123 @@ -725,7 +780,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52124 error = mnt_want_write(path.mnt);
52125 if (error)
52126 goto out_release;
52127 - error = chown_common(path.dentry, user, group);
52128 + error = chown_common(path.dentry, user, group, path.mnt);
52129 mnt_drop_write(path.mnt);
52130 out_release:
52131 path_put(&path);
52132 @@ -744,7 +799,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52133 error = mnt_want_write(path.mnt);
52134 if (error)
52135 goto out_release;
52136 - error = chown_common(path.dentry, user, group);
52137 + error = chown_common(path.dentry, user, group, path.mnt);
52138 mnt_drop_write(path.mnt);
52139 out_release:
52140 path_put(&path);
52141 @@ -767,7 +822,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52142 goto out_fput;
52143 dentry = file->f_path.dentry;
52144 audit_inode(NULL, dentry);
52145 - error = chown_common(dentry, user, group);
52146 + error = chown_common(dentry, user, group, file->f_path.mnt);
52147 mnt_drop_write(file->f_path.mnt);
52148 out_fput:
52149 fput(file);
52150 @@ -1036,7 +1091,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52151 if (!IS_ERR(tmp)) {
52152 fd = get_unused_fd_flags(flags);
52153 if (fd >= 0) {
52154 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52155 + struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52156 if (IS_ERR(f)) {
52157 put_unused_fd(fd);
52158 fd = PTR_ERR(f);
52159 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52160 index 6ab70f4..f4103d1 100644
52161 --- a/fs/partitions/efi.c
52162 +++ b/fs/partitions/efi.c
52163 @@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52164 if (!bdev || !gpt)
52165 return NULL;
52166
52167 + if (!le32_to_cpu(gpt->num_partition_entries))
52168 + return NULL;
52169 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52170 + if (!pte)
52171 + return NULL;
52172 +
52173 count = le32_to_cpu(gpt->num_partition_entries) *
52174 le32_to_cpu(gpt->sizeof_partition_entry);
52175 - if (!count)
52176 - return NULL;
52177 - pte = kzalloc(count, GFP_KERNEL);
52178 - if (!pte)
52179 - return NULL;
52180 -
52181 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52182 (u8 *) pte,
52183 count) < count) {
52184 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52185 index dd6efdb..3babc6c 100644
52186 --- a/fs/partitions/ldm.c
52187 +++ b/fs/partitions/ldm.c
52188 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52189 ldm_error ("A VBLK claims to have %d parts.", num);
52190 return false;
52191 }
52192 +
52193 if (rec >= num) {
52194 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52195 return false;
52196 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52197 goto found;
52198 }
52199
52200 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
52201 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
52202 if (!f) {
52203 ldm_crit ("Out of memory.");
52204 return false;
52205 diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
52206 index 5765198..7f8e9e0 100644
52207 --- a/fs/partitions/mac.c
52208 +++ b/fs/partitions/mac.c
52209 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
52210 return 0; /* not a MacOS disk */
52211 }
52212 blocks_in_map = be32_to_cpu(part->map_count);
52213 - if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52214 - put_dev_sector(sect);
52215 - return 0;
52216 - }
52217 printk(" [mac]");
52218 + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52219 + put_dev_sector(sect);
52220 + return 0;
52221 + }
52222 for (slot = 1; slot <= blocks_in_map; ++slot) {
52223 int pos = slot * secsize;
52224 put_dev_sector(sect);
52225 diff --git a/fs/pipe.c b/fs/pipe.c
52226 index d0cc080..8a6f211 100644
52227 --- a/fs/pipe.c
52228 +++ b/fs/pipe.c
52229 @@ -401,9 +401,9 @@ redo:
52230 }
52231 if (bufs) /* More to do? */
52232 continue;
52233 - if (!pipe->writers)
52234 + if (!atomic_read(&pipe->writers))
52235 break;
52236 - if (!pipe->waiting_writers) {
52237 + if (!atomic_read(&pipe->waiting_writers)) {
52238 /* syscall merging: Usually we must not sleep
52239 * if O_NONBLOCK is set, or if we got some data.
52240 * But if a writer sleeps in kernel space, then
52241 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
52242 mutex_lock(&inode->i_mutex);
52243 pipe = inode->i_pipe;
52244
52245 - if (!pipe->readers) {
52246 + if (!atomic_read(&pipe->readers)) {
52247 send_sig(SIGPIPE, current, 0);
52248 ret = -EPIPE;
52249 goto out;
52250 @@ -511,7 +511,7 @@ redo1:
52251 for (;;) {
52252 int bufs;
52253
52254 - if (!pipe->readers) {
52255 + if (!atomic_read(&pipe->readers)) {
52256 send_sig(SIGPIPE, current, 0);
52257 if (!ret)
52258 ret = -EPIPE;
52259 @@ -597,9 +597,9 @@ redo2:
52260 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52261 do_wakeup = 0;
52262 }
52263 - pipe->waiting_writers++;
52264 + atomic_inc(&pipe->waiting_writers);
52265 pipe_wait(pipe);
52266 - pipe->waiting_writers--;
52267 + atomic_dec(&pipe->waiting_writers);
52268 }
52269 out:
52270 mutex_unlock(&inode->i_mutex);
52271 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52272 mask = 0;
52273 if (filp->f_mode & FMODE_READ) {
52274 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52275 - if (!pipe->writers && filp->f_version != pipe->w_counter)
52276 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52277 mask |= POLLHUP;
52278 }
52279
52280 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52281 * Most Unices do not set POLLERR for FIFOs but on Linux they
52282 * behave exactly like pipes for poll().
52283 */
52284 - if (!pipe->readers)
52285 + if (!atomic_read(&pipe->readers))
52286 mask |= POLLERR;
52287 }
52288
52289 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52290
52291 mutex_lock(&inode->i_mutex);
52292 pipe = inode->i_pipe;
52293 - pipe->readers -= decr;
52294 - pipe->writers -= decw;
52295 + atomic_sub(decr, &pipe->readers);
52296 + atomic_sub(decw, &pipe->writers);
52297
52298 - if (!pipe->readers && !pipe->writers) {
52299 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52300 free_pipe_info(inode);
52301 } else {
52302 wake_up_interruptible_sync(&pipe->wait);
52303 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52304
52305 if (inode->i_pipe) {
52306 ret = 0;
52307 - inode->i_pipe->readers++;
52308 + atomic_inc(&inode->i_pipe->readers);
52309 }
52310
52311 mutex_unlock(&inode->i_mutex);
52312 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52313
52314 if (inode->i_pipe) {
52315 ret = 0;
52316 - inode->i_pipe->writers++;
52317 + atomic_inc(&inode->i_pipe->writers);
52318 }
52319
52320 mutex_unlock(&inode->i_mutex);
52321 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52322 if (inode->i_pipe) {
52323 ret = 0;
52324 if (filp->f_mode & FMODE_READ)
52325 - inode->i_pipe->readers++;
52326 + atomic_inc(&inode->i_pipe->readers);
52327 if (filp->f_mode & FMODE_WRITE)
52328 - inode->i_pipe->writers++;
52329 + atomic_inc(&inode->i_pipe->writers);
52330 }
52331
52332 mutex_unlock(&inode->i_mutex);
52333 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52334 inode->i_pipe = NULL;
52335 }
52336
52337 -static struct vfsmount *pipe_mnt __read_mostly;
52338 +struct vfsmount *pipe_mnt __read_mostly;
52339 static int pipefs_delete_dentry(struct dentry *dentry)
52340 {
52341 /*
52342 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52343 goto fail_iput;
52344 inode->i_pipe = pipe;
52345
52346 - pipe->readers = pipe->writers = 1;
52347 + atomic_set(&pipe->readers, 1);
52348 + atomic_set(&pipe->writers, 1);
52349 inode->i_fop = &rdwr_pipefifo_fops;
52350
52351 /*
52352 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
52353 index 50f8f06..c5755df 100644
52354 --- a/fs/proc/Kconfig
52355 +++ b/fs/proc/Kconfig
52356 @@ -30,12 +30,12 @@ config PROC_FS
52357
52358 config PROC_KCORE
52359 bool "/proc/kcore support" if !ARM
52360 - depends on PROC_FS && MMU
52361 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
52362
52363 config PROC_VMCORE
52364 bool "/proc/vmcore support (EXPERIMENTAL)"
52365 - depends on PROC_FS && CRASH_DUMP
52366 - default y
52367 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
52368 + default n
52369 help
52370 Exports the dump image of crashed kernel in ELF format.
52371
52372 @@ -59,8 +59,8 @@ config PROC_SYSCTL
52373 limited in memory.
52374
52375 config PROC_PAGE_MONITOR
52376 - default y
52377 - depends on PROC_FS && MMU
52378 + default n
52379 + depends on PROC_FS && MMU && !GRKERNSEC
52380 bool "Enable /proc page monitoring" if EMBEDDED
52381 help
52382 Various /proc files exist to monitor process memory utilization:
52383 diff --git a/fs/proc/array.c b/fs/proc/array.c
52384 index c5ef152..1363194 100644
52385 --- a/fs/proc/array.c
52386 +++ b/fs/proc/array.c
52387 @@ -60,6 +60,7 @@
52388 #include <linux/tty.h>
52389 #include <linux/string.h>
52390 #include <linux/mman.h>
52391 +#include <linux/grsecurity.h>
52392 #include <linux/proc_fs.h>
52393 #include <linux/ioport.h>
52394 #include <linux/uaccess.h>
52395 @@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
52396 p->nivcsw);
52397 }
52398
52399 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52400 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
52401 +{
52402 + if (p->mm)
52403 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
52404 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
52405 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
52406 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
52407 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
52408 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
52409 + else
52410 + seq_printf(m, "PaX:\t-----\n");
52411 +}
52412 +#endif
52413 +
52414 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52415 struct pid *pid, struct task_struct *task)
52416 {
52417 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52418 task_cap(m, task);
52419 cpuset_task_status_allowed(m, task);
52420 task_context_switch_counts(m, task);
52421 +
52422 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52423 + task_pax(m, task);
52424 +#endif
52425 +
52426 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
52427 + task_grsec_rbac(m, task);
52428 +#endif
52429 +
52430 return 0;
52431 }
52432
52433 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52434 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52435 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
52436 + _mm->pax_flags & MF_PAX_SEGMEXEC))
52437 +#endif
52438 +
52439 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52440 struct pid *pid, struct task_struct *task, int whole)
52441 {
52442 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52443 cputime_t cutime, cstime, utime, stime;
52444 cputime_t cgtime, gtime;
52445 unsigned long rsslim = 0;
52446 - char tcomm[sizeof(task->comm)];
52447 + char tcomm[sizeof(task->comm)] = { 0 };
52448 unsigned long flags;
52449
52450 + pax_track_stack();
52451 +
52452 state = *get_task_state(task);
52453 vsize = eip = esp = 0;
52454 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
52455 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52456 gtime = task_gtime(task);
52457 }
52458
52459 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52460 + if (PAX_RAND_FLAGS(mm)) {
52461 + eip = 0;
52462 + esp = 0;
52463 + wchan = 0;
52464 + }
52465 +#endif
52466 +#ifdef CONFIG_GRKERNSEC_HIDESYM
52467 + wchan = 0;
52468 + eip =0;
52469 + esp =0;
52470 +#endif
52471 +
52472 /* scale priority and nice values from timeslices to -20..20 */
52473 /* to make it look like a "normal" Unix priority/nice value */
52474 priority = task_prio(task);
52475 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52476 vsize,
52477 mm ? get_mm_rss(mm) : 0,
52478 rsslim,
52479 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52480 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
52481 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
52482 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
52483 +#else
52484 mm ? (permitted ? mm->start_code : 1) : 0,
52485 mm ? (permitted ? mm->end_code : 1) : 0,
52486 (permitted && mm) ? mm->start_stack : 0,
52487 +#endif
52488 esp,
52489 eip,
52490 /* The signal information here is obsolete.
52491 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52492
52493 return 0;
52494 }
52495 +
52496 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52497 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
52498 +{
52499 + u32 curr_ip = 0;
52500 + unsigned long flags;
52501 +
52502 + if (lock_task_sighand(task, &flags)) {
52503 + curr_ip = task->signal->curr_ip;
52504 + unlock_task_sighand(task, &flags);
52505 + }
52506 +
52507 + return sprintf(buffer, "%pI4\n", &curr_ip);
52508 +}
52509 +#endif
52510 diff --git a/fs/proc/base.c b/fs/proc/base.c
52511 index 67f7dc0..e95ea4f 100644
52512 --- a/fs/proc/base.c
52513 +++ b/fs/proc/base.c
52514 @@ -102,6 +102,22 @@ struct pid_entry {
52515 union proc_op op;
52516 };
52517
52518 +struct getdents_callback {
52519 + struct linux_dirent __user * current_dir;
52520 + struct linux_dirent __user * previous;
52521 + struct file * file;
52522 + int count;
52523 + int error;
52524 +};
52525 +
52526 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
52527 + loff_t offset, u64 ino, unsigned int d_type)
52528 +{
52529 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
52530 + buf->error = -EINVAL;
52531 + return 0;
52532 +}
52533 +
52534 #define NOD(NAME, MODE, IOP, FOP, OP) { \
52535 .name = (NAME), \
52536 .len = sizeof(NAME) - 1, \
52537 @@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
52538 if (task == current)
52539 return 0;
52540
52541 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
52542 + return -EPERM;
52543 +
52544 /*
52545 * If current is actively ptrace'ing, and would also be
52546 * permitted to freshly attach with ptrace now, permit it.
52547 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
52548 if (!mm->arg_end)
52549 goto out_mm; /* Shh! No looking before we're done */
52550
52551 + if (gr_acl_handle_procpidmem(task))
52552 + goto out_mm;
52553 +
52554 len = mm->arg_end - mm->arg_start;
52555
52556 if (len > PAGE_SIZE)
52557 @@ -287,12 +309,28 @@ out:
52558 return res;
52559 }
52560
52561 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52562 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52563 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
52564 + _mm->pax_flags & MF_PAX_SEGMEXEC))
52565 +#endif
52566 +
52567 static int proc_pid_auxv(struct task_struct *task, char *buffer)
52568 {
52569 int res = 0;
52570 struct mm_struct *mm = get_task_mm(task);
52571 if (mm) {
52572 unsigned int nwords = 0;
52573 +
52574 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52575 + /* allow if we're currently ptracing this task */
52576 + if (PAX_RAND_FLAGS(mm) &&
52577 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
52578 + mmput(mm);
52579 + return 0;
52580 + }
52581 +#endif
52582 +
52583 do {
52584 nwords += 2;
52585 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
52586 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
52587 }
52588
52589
52590 -#ifdef CONFIG_KALLSYMS
52591 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52592 /*
52593 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
52594 * Returns the resolved symbol. If that fails, simply return the address.
52595 @@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
52596 mutex_unlock(&task->cred_guard_mutex);
52597 }
52598
52599 -#ifdef CONFIG_STACKTRACE
52600 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52601
52602 #define MAX_STACK_TRACE_DEPTH 64
52603
52604 @@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
52605 return count;
52606 }
52607
52608 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52609 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52610 static int proc_pid_syscall(struct task_struct *task, char *buffer)
52611 {
52612 long nr;
52613 @@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
52614 /************************************************************************/
52615
52616 /* permission checks */
52617 -static int proc_fd_access_allowed(struct inode *inode)
52618 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
52619 {
52620 struct task_struct *task;
52621 int allowed = 0;
52622 @@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
52623 */
52624 task = get_proc_task(inode);
52625 if (task) {
52626 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52627 + if (log)
52628 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
52629 + else
52630 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52631 put_task_struct(task);
52632 }
52633 return allowed;
52634 @@ -963,6 +1004,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
52635 if (!task)
52636 goto out_no_task;
52637
52638 + if (gr_acl_handle_procpidmem(task))
52639 + goto out;
52640 +
52641 if (!ptrace_may_access(task, PTRACE_MODE_READ))
52642 goto out;
52643
52644 @@ -1377,7 +1421,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
52645 path_put(&nd->path);
52646
52647 /* Are we allowed to snoop on the tasks file descriptors? */
52648 - if (!proc_fd_access_allowed(inode))
52649 + if (!proc_fd_access_allowed(inode,0))
52650 goto out;
52651
52652 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
52653 @@ -1417,8 +1461,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
52654 struct path path;
52655
52656 /* Are we allowed to snoop on the tasks file descriptors? */
52657 - if (!proc_fd_access_allowed(inode))
52658 - goto out;
52659 + /* logging this is needed for learning on chromium to work properly,
52660 + but we don't want to flood the logs from 'ps' which does a readlink
52661 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
52662 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
52663 + */
52664 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
52665 + if (!proc_fd_access_allowed(inode,0))
52666 + goto out;
52667 + } else {
52668 + if (!proc_fd_access_allowed(inode,1))
52669 + goto out;
52670 + }
52671
52672 error = PROC_I(inode)->op.proc_get_link(inode, &path);
52673 if (error)
52674 @@ -1483,7 +1537,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
52675 rcu_read_lock();
52676 cred = __task_cred(task);
52677 inode->i_uid = cred->euid;
52678 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52679 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52680 +#else
52681 inode->i_gid = cred->egid;
52682 +#endif
52683 rcu_read_unlock();
52684 }
52685 security_task_to_inode(task, inode);
52686 @@ -1501,6 +1559,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52687 struct inode *inode = dentry->d_inode;
52688 struct task_struct *task;
52689 const struct cred *cred;
52690 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52691 + const struct cred *tmpcred = current_cred();
52692 +#endif
52693
52694 generic_fillattr(inode, stat);
52695
52696 @@ -1508,13 +1569,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52697 stat->uid = 0;
52698 stat->gid = 0;
52699 task = pid_task(proc_pid(inode), PIDTYPE_PID);
52700 +
52701 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
52702 + rcu_read_unlock();
52703 + return -ENOENT;
52704 + }
52705 +
52706 if (task) {
52707 + cred = __task_cred(task);
52708 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52709 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
52710 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52711 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
52712 +#endif
52713 + ) {
52714 +#endif
52715 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
52716 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52717 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
52718 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52719 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
52720 +#endif
52721 task_dumpable(task)) {
52722 - cred = __task_cred(task);
52723 stat->uid = cred->euid;
52724 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52725 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
52726 +#else
52727 stat->gid = cred->egid;
52728 +#endif
52729 }
52730 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52731 + } else {
52732 + rcu_read_unlock();
52733 + return -ENOENT;
52734 + }
52735 +#endif
52736 }
52737 rcu_read_unlock();
52738 return 0;
52739 @@ -1545,11 +1634,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
52740
52741 if (task) {
52742 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
52743 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52744 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
52745 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52746 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
52747 +#endif
52748 task_dumpable(task)) {
52749 rcu_read_lock();
52750 cred = __task_cred(task);
52751 inode->i_uid = cred->euid;
52752 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52753 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52754 +#else
52755 inode->i_gid = cred->egid;
52756 +#endif
52757 rcu_read_unlock();
52758 } else {
52759 inode->i_uid = 0;
52760 @@ -1670,7 +1768,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
52761 int fd = proc_fd(inode);
52762
52763 if (task) {
52764 - files = get_files_struct(task);
52765 + if (!gr_acl_handle_procpidmem(task))
52766 + files = get_files_struct(task);
52767 put_task_struct(task);
52768 }
52769 if (files) {
52770 @@ -1922,12 +2021,22 @@ static const struct file_operations proc_fd_operations = {
52771 static int proc_fd_permission(struct inode *inode, int mask)
52772 {
52773 int rv;
52774 + struct task_struct *task;
52775
52776 rv = generic_permission(inode, mask, NULL);
52777 - if (rv == 0)
52778 - return 0;
52779 +
52780 if (task_pid(current) == proc_pid(inode))
52781 rv = 0;
52782 +
52783 + task = get_proc_task(inode);
52784 + if (task == NULL)
52785 + return rv;
52786 +
52787 + if (gr_acl_handle_procpidmem(task))
52788 + rv = -EACCES;
52789 +
52790 + put_task_struct(task);
52791 +
52792 return rv;
52793 }
52794
52795 @@ -2036,6 +2145,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
52796 if (!task)
52797 goto out_no_task;
52798
52799 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52800 + goto out;
52801 +
52802 /*
52803 * Yes, it does not scale. And it should not. Don't add
52804 * new entries into /proc/<tgid>/ without very good reasons.
52805 @@ -2080,6 +2192,9 @@ static int proc_pident_readdir(struct file *filp,
52806 if (!task)
52807 goto out_no_task;
52808
52809 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52810 + goto out;
52811 +
52812 ret = 0;
52813 i = filp->f_pos;
52814 switch (i) {
52815 @@ -2347,7 +2462,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
52816 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
52817 void *cookie)
52818 {
52819 - char *s = nd_get_link(nd);
52820 + const char *s = nd_get_link(nd);
52821 if (!IS_ERR(s))
52822 __putname(s);
52823 }
52824 @@ -2553,7 +2668,7 @@ static const struct pid_entry tgid_base_stuff[] = {
52825 #ifdef CONFIG_SCHED_DEBUG
52826 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
52827 #endif
52828 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52829 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52830 INF("syscall", S_IRUGO, proc_pid_syscall),
52831 #endif
52832 INF("cmdline", S_IRUGO, proc_pid_cmdline),
52833 @@ -2578,10 +2693,10 @@ static const struct pid_entry tgid_base_stuff[] = {
52834 #ifdef CONFIG_SECURITY
52835 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
52836 #endif
52837 -#ifdef CONFIG_KALLSYMS
52838 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52839 INF("wchan", S_IRUGO, proc_pid_wchan),
52840 #endif
52841 -#ifdef CONFIG_STACKTRACE
52842 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52843 ONE("stack", S_IRUGO, proc_pid_stack),
52844 #endif
52845 #ifdef CONFIG_SCHEDSTATS
52846 @@ -2611,6 +2726,9 @@ static const struct pid_entry tgid_base_stuff[] = {
52847 #ifdef CONFIG_TASK_IO_ACCOUNTING
52848 INF("io", S_IRUSR, proc_tgid_io_accounting),
52849 #endif
52850 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52851 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
52852 +#endif
52853 };
52854
52855 static int proc_tgid_base_readdir(struct file * filp,
52856 @@ -2735,7 +2853,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
52857 if (!inode)
52858 goto out;
52859
52860 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52861 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
52862 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52863 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52864 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
52865 +#else
52866 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
52867 +#endif
52868 inode->i_op = &proc_tgid_base_inode_operations;
52869 inode->i_fop = &proc_tgid_base_operations;
52870 inode->i_flags|=S_IMMUTABLE;
52871 @@ -2777,7 +2902,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
52872 if (!task)
52873 goto out;
52874
52875 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
52876 + goto out_put_task;
52877 +
52878 result = proc_pid_instantiate(dir, dentry, task, NULL);
52879 +out_put_task:
52880 put_task_struct(task);
52881 out:
52882 return result;
52883 @@ -2842,6 +2971,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
52884 {
52885 unsigned int nr;
52886 struct task_struct *reaper;
52887 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52888 + const struct cred *tmpcred = current_cred();
52889 + const struct cred *itercred;
52890 +#endif
52891 + filldir_t __filldir = filldir;
52892 struct tgid_iter iter;
52893 struct pid_namespace *ns;
52894
52895 @@ -2865,8 +2999,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
52896 for (iter = next_tgid(ns, iter);
52897 iter.task;
52898 iter.tgid += 1, iter = next_tgid(ns, iter)) {
52899 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52900 + rcu_read_lock();
52901 + itercred = __task_cred(iter.task);
52902 +#endif
52903 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
52904 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52905 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
52906 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52907 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
52908 +#endif
52909 + )
52910 +#endif
52911 + )
52912 + __filldir = &gr_fake_filldir;
52913 + else
52914 + __filldir = filldir;
52915 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52916 + rcu_read_unlock();
52917 +#endif
52918 filp->f_pos = iter.tgid + TGID_OFFSET;
52919 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
52920 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
52921 put_task_struct(iter.task);
52922 goto out;
52923 }
52924 @@ -2892,7 +3045,7 @@ static const struct pid_entry tid_base_stuff[] = {
52925 #ifdef CONFIG_SCHED_DEBUG
52926 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
52927 #endif
52928 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52929 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52930 INF("syscall", S_IRUGO, proc_pid_syscall),
52931 #endif
52932 INF("cmdline", S_IRUGO, proc_pid_cmdline),
52933 @@ -2916,10 +3069,10 @@ static const struct pid_entry tid_base_stuff[] = {
52934 #ifdef CONFIG_SECURITY
52935 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
52936 #endif
52937 -#ifdef CONFIG_KALLSYMS
52938 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52939 INF("wchan", S_IRUGO, proc_pid_wchan),
52940 #endif
52941 -#ifdef CONFIG_STACKTRACE
52942 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52943 ONE("stack", S_IRUGO, proc_pid_stack),
52944 #endif
52945 #ifdef CONFIG_SCHEDSTATS
52946 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
52947 index 82676e3..5f8518a 100644
52948 --- a/fs/proc/cmdline.c
52949 +++ b/fs/proc/cmdline.c
52950 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
52951
52952 static int __init proc_cmdline_init(void)
52953 {
52954 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
52955 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
52956 +#else
52957 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
52958 +#endif
52959 return 0;
52960 }
52961 module_init(proc_cmdline_init);
52962 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
52963 index 59ee7da..469b4b6 100644
52964 --- a/fs/proc/devices.c
52965 +++ b/fs/proc/devices.c
52966 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
52967
52968 static int __init proc_devices_init(void)
52969 {
52970 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
52971 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
52972 +#else
52973 proc_create("devices", 0, NULL, &proc_devinfo_operations);
52974 +#endif
52975 return 0;
52976 }
52977 module_init(proc_devices_init);
52978 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
52979 index d78ade3..81767f9 100644
52980 --- a/fs/proc/inode.c
52981 +++ b/fs/proc/inode.c
52982 @@ -18,12 +18,19 @@
52983 #include <linux/module.h>
52984 #include <linux/smp_lock.h>
52985 #include <linux/sysctl.h>
52986 +#include <linux/grsecurity.h>
52987
52988 #include <asm/system.h>
52989 #include <asm/uaccess.h>
52990
52991 #include "internal.h"
52992
52993 +#ifdef CONFIG_PROC_SYSCTL
52994 +extern const struct inode_operations proc_sys_inode_operations;
52995 +extern const struct inode_operations proc_sys_dir_operations;
52996 +#endif
52997 +
52998 +
52999 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53000 {
53001 atomic_inc(&de->count);
53002 @@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53003 de_put(de);
53004 if (PROC_I(inode)->sysctl)
53005 sysctl_head_put(PROC_I(inode)->sysctl);
53006 +
53007 +#ifdef CONFIG_PROC_SYSCTL
53008 + if (inode->i_op == &proc_sys_inode_operations ||
53009 + inode->i_op == &proc_sys_dir_operations)
53010 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53011 +#endif
53012 +
53013 clear_inode(inode);
53014 }
53015
53016 @@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53017 if (de->mode) {
53018 inode->i_mode = de->mode;
53019 inode->i_uid = de->uid;
53020 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53021 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53022 +#else
53023 inode->i_gid = de->gid;
53024 +#endif
53025 }
53026 if (de->size)
53027 inode->i_size = de->size;
53028 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53029 index 753ca37..26bcf3b 100644
53030 --- a/fs/proc/internal.h
53031 +++ b/fs/proc/internal.h
53032 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53033 struct pid *pid, struct task_struct *task);
53034 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53035 struct pid *pid, struct task_struct *task);
53036 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53037 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53038 +#endif
53039 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53040
53041 extern const struct file_operations proc_maps_operations;
53042 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53043 index b442dac..aab29cb 100644
53044 --- a/fs/proc/kcore.c
53045 +++ b/fs/proc/kcore.c
53046 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53047 off_t offset = 0;
53048 struct kcore_list *m;
53049
53050 + pax_track_stack();
53051 +
53052 /* setup ELF header */
53053 elf = (struct elfhdr *) bufp;
53054 bufp += sizeof(struct elfhdr);
53055 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53056 * the addresses in the elf_phdr on our list.
53057 */
53058 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53059 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53060 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53061 + if (tsz > buflen)
53062 tsz = buflen;
53063 -
53064 +
53065 while (buflen) {
53066 struct kcore_list *m;
53067
53068 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53069 kfree(elf_buf);
53070 } else {
53071 if (kern_addr_valid(start)) {
53072 - unsigned long n;
53073 + char *elf_buf;
53074 + mm_segment_t oldfs;
53075
53076 - n = copy_to_user(buffer, (char *)start, tsz);
53077 - /*
53078 - * We cannot distingush between fault on source
53079 - * and fault on destination. When this happens
53080 - * we clear too and hope it will trigger the
53081 - * EFAULT again.
53082 - */
53083 - if (n) {
53084 - if (clear_user(buffer + tsz - n,
53085 - n))
53086 + elf_buf = kmalloc(tsz, GFP_KERNEL);
53087 + if (!elf_buf)
53088 + return -ENOMEM;
53089 + oldfs = get_fs();
53090 + set_fs(KERNEL_DS);
53091 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
53092 + set_fs(oldfs);
53093 + if (copy_to_user(buffer, elf_buf, tsz)) {
53094 + kfree(elf_buf);
53095 return -EFAULT;
53096 + }
53097 }
53098 + set_fs(oldfs);
53099 + kfree(elf_buf);
53100 } else {
53101 if (clear_user(buffer, tsz))
53102 return -EFAULT;
53103 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53104
53105 static int open_kcore(struct inode *inode, struct file *filp)
53106 {
53107 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
53108 + return -EPERM;
53109 +#endif
53110 if (!capable(CAP_SYS_RAWIO))
53111 return -EPERM;
53112 if (kcore_need_update)
53113 diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
53114 index 7ca7834..cfe90a4 100644
53115 --- a/fs/proc/kmsg.c
53116 +++ b/fs/proc/kmsg.c
53117 @@ -12,37 +12,37 @@
53118 #include <linux/poll.h>
53119 #include <linux/proc_fs.h>
53120 #include <linux/fs.h>
53121 +#include <linux/syslog.h>
53122
53123 #include <asm/uaccess.h>
53124 #include <asm/io.h>
53125
53126 extern wait_queue_head_t log_wait;
53127
53128 -extern int do_syslog(int type, char __user *bug, int count);
53129 -
53130 static int kmsg_open(struct inode * inode, struct file * file)
53131 {
53132 - return do_syslog(1,NULL,0);
53133 + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
53134 }
53135
53136 static int kmsg_release(struct inode * inode, struct file * file)
53137 {
53138 - (void) do_syslog(0,NULL,0);
53139 + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
53140 return 0;
53141 }
53142
53143 static ssize_t kmsg_read(struct file *file, char __user *buf,
53144 size_t count, loff_t *ppos)
53145 {
53146 - if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
53147 + if ((file->f_flags & O_NONBLOCK) &&
53148 + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53149 return -EAGAIN;
53150 - return do_syslog(2, buf, count);
53151 + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
53152 }
53153
53154 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
53155 {
53156 poll_wait(file, &log_wait, wait);
53157 - if (do_syslog(9, NULL, 0))
53158 + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53159 return POLLIN | POLLRDNORM;
53160 return 0;
53161 }
53162 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
53163 index a65239c..ad1182a 100644
53164 --- a/fs/proc/meminfo.c
53165 +++ b/fs/proc/meminfo.c
53166 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53167 unsigned long pages[NR_LRU_LISTS];
53168 int lru;
53169
53170 + pax_track_stack();
53171 +
53172 /*
53173 * display in kilobytes.
53174 */
53175 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53176 vmi.used >> 10,
53177 vmi.largest_chunk >> 10
53178 #ifdef CONFIG_MEMORY_FAILURE
53179 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
53180 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
53181 #endif
53182 );
53183
53184 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
53185 index 9fe7d7e..cdb62c9 100644
53186 --- a/fs/proc/nommu.c
53187 +++ b/fs/proc/nommu.c
53188 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
53189 if (len < 1)
53190 len = 1;
53191 seq_printf(m, "%*c", len, ' ');
53192 - seq_path(m, &file->f_path, "");
53193 + seq_path(m, &file->f_path, "\n\\");
53194 }
53195
53196 seq_putc(m, '\n');
53197 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
53198 index 04d1270..25e1173 100644
53199 --- a/fs/proc/proc_net.c
53200 +++ b/fs/proc/proc_net.c
53201 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
53202 struct task_struct *task;
53203 struct nsproxy *ns;
53204 struct net *net = NULL;
53205 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53206 + const struct cred *cred = current_cred();
53207 +#endif
53208 +
53209 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53210 + if (cred->fsuid)
53211 + return net;
53212 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53213 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
53214 + return net;
53215 +#endif
53216
53217 rcu_read_lock();
53218 task = pid_task(proc_pid(dir), PIDTYPE_PID);
53219 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
53220 index f667e8a..55f4d96 100644
53221 --- a/fs/proc/proc_sysctl.c
53222 +++ b/fs/proc/proc_sysctl.c
53223 @@ -7,11 +7,13 @@
53224 #include <linux/security.h>
53225 #include "internal.h"
53226
53227 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
53228 +
53229 static const struct dentry_operations proc_sys_dentry_operations;
53230 static const struct file_operations proc_sys_file_operations;
53231 -static const struct inode_operations proc_sys_inode_operations;
53232 +const struct inode_operations proc_sys_inode_operations;
53233 static const struct file_operations proc_sys_dir_file_operations;
53234 -static const struct inode_operations proc_sys_dir_operations;
53235 +const struct inode_operations proc_sys_dir_operations;
53236
53237 static struct inode *proc_sys_make_inode(struct super_block *sb,
53238 struct ctl_table_header *head, struct ctl_table *table)
53239 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53240 if (!p)
53241 goto out;
53242
53243 + if (gr_handle_sysctl(p, MAY_EXEC))
53244 + goto out;
53245 +
53246 err = ERR_PTR(-ENOMEM);
53247 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
53248 if (h)
53249 @@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53250
53251 err = NULL;
53252 dentry->d_op = &proc_sys_dentry_operations;
53253 +
53254 + gr_handle_proc_create(dentry, inode);
53255 +
53256 d_add(dentry, inode);
53257
53258 out:
53259 @@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
53260 return -ENOMEM;
53261 } else {
53262 child->d_op = &proc_sys_dentry_operations;
53263 +
53264 + gr_handle_proc_create(child, inode);
53265 +
53266 d_add(child, inode);
53267 }
53268 } else {
53269 @@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
53270 if (*pos < file->f_pos)
53271 continue;
53272
53273 + if (gr_handle_sysctl(table, 0))
53274 + continue;
53275 +
53276 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
53277 if (res)
53278 return res;
53279 @@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
53280 if (IS_ERR(head))
53281 return PTR_ERR(head);
53282
53283 + if (table && gr_handle_sysctl(table, MAY_EXEC))
53284 + return -ENOENT;
53285 +
53286 generic_fillattr(inode, stat);
53287 if (table)
53288 stat->mode = (stat->mode & S_IFMT) | table->mode;
53289 @@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
53290 };
53291
53292 static const struct file_operations proc_sys_dir_file_operations = {
53293 + .read = generic_read_dir,
53294 .readdir = proc_sys_readdir,
53295 .llseek = generic_file_llseek,
53296 };
53297
53298 -static const struct inode_operations proc_sys_inode_operations = {
53299 +const struct inode_operations proc_sys_inode_operations = {
53300 .permission = proc_sys_permission,
53301 .setattr = proc_sys_setattr,
53302 .getattr = proc_sys_getattr,
53303 };
53304
53305 -static const struct inode_operations proc_sys_dir_operations = {
53306 +const struct inode_operations proc_sys_dir_operations = {
53307 .lookup = proc_sys_lookup,
53308 .permission = proc_sys_permission,
53309 .setattr = proc_sys_setattr,
53310 diff --git a/fs/proc/root.c b/fs/proc/root.c
53311 index b080b79..d957e63 100644
53312 --- a/fs/proc/root.c
53313 +++ b/fs/proc/root.c
53314 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
53315 #ifdef CONFIG_PROC_DEVICETREE
53316 proc_device_tree_init();
53317 #endif
53318 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53319 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53320 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
53321 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53322 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
53323 +#endif
53324 +#else
53325 proc_mkdir("bus", NULL);
53326 +#endif
53327 proc_sys_init();
53328 }
53329
53330 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
53331 index 3b7b82a..7dbb571 100644
53332 --- a/fs/proc/task_mmu.c
53333 +++ b/fs/proc/task_mmu.c
53334 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53335 "VmStk:\t%8lu kB\n"
53336 "VmExe:\t%8lu kB\n"
53337 "VmLib:\t%8lu kB\n"
53338 - "VmPTE:\t%8lu kB\n",
53339 - hiwater_vm << (PAGE_SHIFT-10),
53340 + "VmPTE:\t%8lu kB\n"
53341 +
53342 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53343 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
53344 +#endif
53345 +
53346 + ,hiwater_vm << (PAGE_SHIFT-10),
53347 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
53348 mm->locked_vm << (PAGE_SHIFT-10),
53349 hiwater_rss << (PAGE_SHIFT-10),
53350 total_rss << (PAGE_SHIFT-10),
53351 data << (PAGE_SHIFT-10),
53352 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
53353 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
53354 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
53355 +
53356 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53357 + , mm->context.user_cs_base, mm->context.user_cs_limit
53358 +#endif
53359 +
53360 + );
53361 }
53362
53363 unsigned long task_vsize(struct mm_struct *mm)
53364 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, void *v)
53365 struct proc_maps_private *priv = m->private;
53366 struct vm_area_struct *vma = v;
53367
53368 - vma_stop(priv, vma);
53369 + if (!IS_ERR(vma))
53370 + vma_stop(priv, vma);
53371 if (priv->task)
53372 put_task_struct(priv->task);
53373 }
53374 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
53375 return ret;
53376 }
53377
53378 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53379 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53380 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53381 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53382 +#endif
53383 +
53384 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53385 {
53386 struct mm_struct *mm = vma->vm_mm;
53387 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53388 int flags = vma->vm_flags;
53389 unsigned long ino = 0;
53390 unsigned long long pgoff = 0;
53391 - unsigned long start;
53392 dev_t dev = 0;
53393 int len;
53394
53395 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53396 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
53397 }
53398
53399 - /* We don't show the stack guard page in /proc/maps */
53400 - start = vma->vm_start;
53401 - if (vma->vm_flags & VM_GROWSDOWN)
53402 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
53403 - start += PAGE_SIZE;
53404 -
53405 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
53406 - start,
53407 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53408 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
53409 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
53410 +#else
53411 + vma->vm_start,
53412 vma->vm_end,
53413 +#endif
53414 flags & VM_READ ? 'r' : '-',
53415 flags & VM_WRITE ? 'w' : '-',
53416 flags & VM_EXEC ? 'x' : '-',
53417 flags & VM_MAYSHARE ? 's' : 'p',
53418 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53419 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
53420 +#else
53421 pgoff,
53422 +#endif
53423 MAJOR(dev), MINOR(dev), ino, &len);
53424
53425 /*
53426 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53427 */
53428 if (file) {
53429 pad_len_spaces(m, len);
53430 - seq_path(m, &file->f_path, "\n");
53431 + seq_path(m, &file->f_path, "\n\\");
53432 } else {
53433 const char *name = arch_vma_name(vma);
53434 if (!name) {
53435 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53436 if (vma->vm_start <= mm->brk &&
53437 vma->vm_end >= mm->start_brk) {
53438 name = "[heap]";
53439 - } else if (vma->vm_start <= mm->start_stack &&
53440 - vma->vm_end >= mm->start_stack) {
53441 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
53442 + (vma->vm_start <= mm->start_stack &&
53443 + vma->vm_end >= mm->start_stack)) {
53444 name = "[stack]";
53445 }
53446 } else {
53447 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m, void *v)
53448 };
53449
53450 memset(&mss, 0, sizeof mss);
53451 - mss.vma = vma;
53452 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53453 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53454 +
53455 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53456 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
53457 +#endif
53458 + mss.vma = vma;
53459 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53460 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53461 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53462 + }
53463 +#endif
53464
53465 show_map_vma(m, vma);
53466
53467 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m, void *v)
53468 "Swap: %8lu kB\n"
53469 "KernelPageSize: %8lu kB\n"
53470 "MMUPageSize: %8lu kB\n",
53471 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53472 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
53473 +#else
53474 (vma->vm_end - vma->vm_start) >> 10,
53475 +#endif
53476 mss.resident >> 10,
53477 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
53478 mss.shared_clean >> 10,
53479 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
53480 index 8f5c05d..c99c76d 100644
53481 --- a/fs/proc/task_nommu.c
53482 +++ b/fs/proc/task_nommu.c
53483 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53484 else
53485 bytes += kobjsize(mm);
53486
53487 - if (current->fs && current->fs->users > 1)
53488 + if (current->fs && atomic_read(&current->fs->users) > 1)
53489 sbytes += kobjsize(current->fs);
53490 else
53491 bytes += kobjsize(current->fs);
53492 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
53493 if (len < 1)
53494 len = 1;
53495 seq_printf(m, "%*c", len, ' ');
53496 - seq_path(m, &file->f_path, "");
53497 + seq_path(m, &file->f_path, "\n\\");
53498 }
53499
53500 seq_putc(m, '\n');
53501 diff --git a/fs/readdir.c b/fs/readdir.c
53502 index 7723401..30059a6 100644
53503 --- a/fs/readdir.c
53504 +++ b/fs/readdir.c
53505 @@ -16,6 +16,7 @@
53506 #include <linux/security.h>
53507 #include <linux/syscalls.h>
53508 #include <linux/unistd.h>
53509 +#include <linux/namei.h>
53510
53511 #include <asm/uaccess.h>
53512
53513 @@ -67,6 +68,7 @@ struct old_linux_dirent {
53514
53515 struct readdir_callback {
53516 struct old_linux_dirent __user * dirent;
53517 + struct file * file;
53518 int result;
53519 };
53520
53521 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
53522 buf->result = -EOVERFLOW;
53523 return -EOVERFLOW;
53524 }
53525 +
53526 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53527 + return 0;
53528 +
53529 buf->result++;
53530 dirent = buf->dirent;
53531 if (!access_ok(VERIFY_WRITE, dirent,
53532 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
53533
53534 buf.result = 0;
53535 buf.dirent = dirent;
53536 + buf.file = file;
53537
53538 error = vfs_readdir(file, fillonedir, &buf);
53539 if (buf.result)
53540 @@ -142,6 +149,7 @@ struct linux_dirent {
53541 struct getdents_callback {
53542 struct linux_dirent __user * current_dir;
53543 struct linux_dirent __user * previous;
53544 + struct file * file;
53545 int count;
53546 int error;
53547 };
53548 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
53549 buf->error = -EOVERFLOW;
53550 return -EOVERFLOW;
53551 }
53552 +
53553 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53554 + return 0;
53555 +
53556 dirent = buf->previous;
53557 if (dirent) {
53558 if (__put_user(offset, &dirent->d_off))
53559 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
53560 buf.previous = NULL;
53561 buf.count = count;
53562 buf.error = 0;
53563 + buf.file = file;
53564
53565 error = vfs_readdir(file, filldir, &buf);
53566 if (error >= 0)
53567 @@ -228,6 +241,7 @@ out:
53568 struct getdents_callback64 {
53569 struct linux_dirent64 __user * current_dir;
53570 struct linux_dirent64 __user * previous;
53571 + struct file *file;
53572 int count;
53573 int error;
53574 };
53575 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
53576 buf->error = -EINVAL; /* only used if we fail.. */
53577 if (reclen > buf->count)
53578 return -EINVAL;
53579 +
53580 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53581 + return 0;
53582 +
53583 dirent = buf->previous;
53584 if (dirent) {
53585 if (__put_user(offset, &dirent->d_off))
53586 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53587
53588 buf.current_dir = dirent;
53589 buf.previous = NULL;
53590 + buf.file = file;
53591 buf.count = count;
53592 buf.error = 0;
53593
53594 @@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53595 error = buf.error;
53596 lastdirent = buf.previous;
53597 if (lastdirent) {
53598 - typeof(lastdirent->d_off) d_off = file->f_pos;
53599 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
53600 if (__put_user(d_off, &lastdirent->d_off))
53601 error = -EFAULT;
53602 else
53603 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
53604 index d42c30c..4fd8718 100644
53605 --- a/fs/reiserfs/dir.c
53606 +++ b/fs/reiserfs/dir.c
53607 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
53608 struct reiserfs_dir_entry de;
53609 int ret = 0;
53610
53611 + pax_track_stack();
53612 +
53613 reiserfs_write_lock(inode->i_sb);
53614
53615 reiserfs_check_lock_depth(inode->i_sb, "readdir");
53616 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
53617 index 128d3f7..8840d44 100644
53618 --- a/fs/reiserfs/do_balan.c
53619 +++ b/fs/reiserfs/do_balan.c
53620 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
53621 return;
53622 }
53623
53624 - atomic_inc(&(fs_generation(tb->tb_sb)));
53625 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
53626 do_balance_starts(tb);
53627
53628 /* balance leaf returns 0 except if combining L R and S into
53629 diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
53630 index 72cb1cc..d0e3181 100644
53631 --- a/fs/reiserfs/item_ops.c
53632 +++ b/fs/reiserfs/item_ops.c
53633 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
53634 vi->vi_index, vi->vi_type, vi->vi_ih);
53635 }
53636
53637 -static struct item_operations stat_data_ops = {
53638 +static const struct item_operations stat_data_ops = {
53639 .bytes_number = sd_bytes_number,
53640 .decrement_key = sd_decrement_key,
53641 .is_left_mergeable = sd_is_left_mergeable,
53642 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
53643 vi->vi_index, vi->vi_type, vi->vi_ih);
53644 }
53645
53646 -static struct item_operations direct_ops = {
53647 +static const struct item_operations direct_ops = {
53648 .bytes_number = direct_bytes_number,
53649 .decrement_key = direct_decrement_key,
53650 .is_left_mergeable = direct_is_left_mergeable,
53651 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
53652 vi->vi_index, vi->vi_type, vi->vi_ih);
53653 }
53654
53655 -static struct item_operations indirect_ops = {
53656 +static const struct item_operations indirect_ops = {
53657 .bytes_number = indirect_bytes_number,
53658 .decrement_key = indirect_decrement_key,
53659 .is_left_mergeable = indirect_is_left_mergeable,
53660 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
53661 printk("\n");
53662 }
53663
53664 -static struct item_operations direntry_ops = {
53665 +static const struct item_operations direntry_ops = {
53666 .bytes_number = direntry_bytes_number,
53667 .decrement_key = direntry_decrement_key,
53668 .is_left_mergeable = direntry_is_left_mergeable,
53669 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
53670 "Invalid item type observed, run fsck ASAP");
53671 }
53672
53673 -static struct item_operations errcatch_ops = {
53674 +static const struct item_operations errcatch_ops = {
53675 errcatch_bytes_number,
53676 errcatch_decrement_key,
53677 errcatch_is_left_mergeable,
53678 @@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
53679 #error Item types must use disk-format assigned values.
53680 #endif
53681
53682 -struct item_operations *item_ops[TYPE_ANY + 1] = {
53683 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
53684 &stat_data_ops,
53685 &indirect_ops,
53686 &direct_ops,
53687 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
53688 index b5fe0aa..e0e25c4 100644
53689 --- a/fs/reiserfs/journal.c
53690 +++ b/fs/reiserfs/journal.c
53691 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
53692 struct buffer_head *bh;
53693 int i, j;
53694
53695 + pax_track_stack();
53696 +
53697 bh = __getblk(dev, block, bufsize);
53698 if (buffer_uptodate(bh))
53699 return (bh);
53700 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
53701 index 2715791..b8996db 100644
53702 --- a/fs/reiserfs/namei.c
53703 +++ b/fs/reiserfs/namei.c
53704 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
53705 unsigned long savelink = 1;
53706 struct timespec ctime;
53707
53708 + pax_track_stack();
53709 +
53710 /* three balancings: (1) old name removal, (2) new name insertion
53711 and (3) maybe "save" link insertion
53712 stat data updates: (1) old directory,
53713 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
53714 index 9229e55..3d2e3b7 100644
53715 --- a/fs/reiserfs/procfs.c
53716 +++ b/fs/reiserfs/procfs.c
53717 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
53718 "SMALL_TAILS " : "NO_TAILS ",
53719 replay_only(sb) ? "REPLAY_ONLY " : "",
53720 convert_reiserfs(sb) ? "CONV " : "",
53721 - atomic_read(&r->s_generation_counter),
53722 + atomic_read_unchecked(&r->s_generation_counter),
53723 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
53724 SF(s_do_balance), SF(s_unneeded_left_neighbor),
53725 SF(s_good_search_by_key_reada), SF(s_bmaps),
53726 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
53727 struct journal_params *jp = &rs->s_v1.s_journal;
53728 char b[BDEVNAME_SIZE];
53729
53730 + pax_track_stack();
53731 +
53732 seq_printf(m, /* on-disk fields */
53733 "jp_journal_1st_block: \t%i\n"
53734 "jp_journal_dev: \t%s[%x]\n"
53735 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
53736 index d036ee5..4c7dca1 100644
53737 --- a/fs/reiserfs/stree.c
53738 +++ b/fs/reiserfs/stree.c
53739 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
53740 int iter = 0;
53741 #endif
53742
53743 + pax_track_stack();
53744 +
53745 BUG_ON(!th->t_trans_id);
53746
53747 init_tb_struct(th, &s_del_balance, sb, path,
53748 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
53749 int retval;
53750 int quota_cut_bytes = 0;
53751
53752 + pax_track_stack();
53753 +
53754 BUG_ON(!th->t_trans_id);
53755
53756 le_key2cpu_key(&cpu_key, key);
53757 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
53758 int quota_cut_bytes;
53759 loff_t tail_pos = 0;
53760
53761 + pax_track_stack();
53762 +
53763 BUG_ON(!th->t_trans_id);
53764
53765 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
53766 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
53767 int retval;
53768 int fs_gen;
53769
53770 + pax_track_stack();
53771 +
53772 BUG_ON(!th->t_trans_id);
53773
53774 fs_gen = get_generation(inode->i_sb);
53775 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
53776 int fs_gen = 0;
53777 int quota_bytes = 0;
53778
53779 + pax_track_stack();
53780 +
53781 BUG_ON(!th->t_trans_id);
53782
53783 if (inode) { /* Do we count quotas for item? */
53784 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
53785 index f0ad05f..af3306f 100644
53786 --- a/fs/reiserfs/super.c
53787 +++ b/fs/reiserfs/super.c
53788 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
53789 {.option_name = NULL}
53790 };
53791
53792 + pax_track_stack();
53793 +
53794 *blocks = 0;
53795 if (!options || !*options)
53796 /* use default configuration: create tails, journaling on, no
53797 diff --git a/fs/select.c b/fs/select.c
53798 index fd38ce2..f5381b8 100644
53799 --- a/fs/select.c
53800 +++ b/fs/select.c
53801 @@ -20,6 +20,7 @@
53802 #include <linux/module.h>
53803 #include <linux/slab.h>
53804 #include <linux/poll.h>
53805 +#include <linux/security.h>
53806 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
53807 #include <linux/file.h>
53808 #include <linux/fdtable.h>
53809 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
53810 int retval, i, timed_out = 0;
53811 unsigned long slack = 0;
53812
53813 + pax_track_stack();
53814 +
53815 rcu_read_lock();
53816 retval = max_select_fd(n, fds);
53817 rcu_read_unlock();
53818 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
53819 /* Allocate small arguments on the stack to save memory and be faster */
53820 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
53821
53822 + pax_track_stack();
53823 +
53824 ret = -EINVAL;
53825 if (n < 0)
53826 goto out_nofds;
53827 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
53828 struct poll_list *walk = head;
53829 unsigned long todo = nfds;
53830
53831 + pax_track_stack();
53832 +
53833 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
53834 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
53835 return -EINVAL;
53836
53837 diff --git a/fs/seq_file.c b/fs/seq_file.c
53838 index eae7d9d..679f099 100644
53839 --- a/fs/seq_file.c
53840 +++ b/fs/seq_file.c
53841 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
53842 return 0;
53843 }
53844 if (!m->buf) {
53845 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
53846 + m->size = PAGE_SIZE;
53847 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
53848 if (!m->buf)
53849 return -ENOMEM;
53850 }
53851 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
53852 Eoverflow:
53853 m->op->stop(m, p);
53854 kfree(m->buf);
53855 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
53856 + m->size <<= 1;
53857 + m->buf = kmalloc(m->size, GFP_KERNEL);
53858 return !m->buf ? -ENOMEM : -EAGAIN;
53859 }
53860
53861 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
53862 m->version = file->f_version;
53863 /* grab buffer if we didn't have one */
53864 if (!m->buf) {
53865 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
53866 + m->size = PAGE_SIZE;
53867 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
53868 if (!m->buf)
53869 goto Enomem;
53870 }
53871 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
53872 goto Fill;
53873 m->op->stop(m, p);
53874 kfree(m->buf);
53875 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
53876 + m->size <<= 1;
53877 + m->buf = kmalloc(m->size, GFP_KERNEL);
53878 if (!m->buf)
53879 goto Enomem;
53880 m->count = 0;
53881 @@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
53882 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
53883 void *data)
53884 {
53885 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
53886 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
53887 int res = -ENOMEM;
53888
53889 if (op) {
53890 diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
53891 index 71c29b6..54694dd 100644
53892 --- a/fs/smbfs/proc.c
53893 +++ b/fs/smbfs/proc.c
53894 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
53895
53896 out:
53897 if (server->local_nls != NULL && server->remote_nls != NULL)
53898 - server->ops->convert = convert_cp;
53899 + *(void **)&server->ops->convert = convert_cp;
53900 else
53901 - server->ops->convert = convert_memcpy;
53902 + *(void **)&server->ops->convert = convert_memcpy;
53903
53904 smb_unlock_server(server);
53905 return n;
53906 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
53907
53908 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
53909 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
53910 - server->ops->getattr = smb_proc_getattr_core;
53911 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
53912 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
53913 - server->ops->getattr = smb_proc_getattr_ff;
53914 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
53915 }
53916
53917 /* Decode server capabilities */
53918 @@ -3439,7 +3439,7 @@ out:
53919 static void
53920 install_ops(struct smb_ops *dst, struct smb_ops *src)
53921 {
53922 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
53923 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
53924 }
53925
53926 /* < LANMAN2 */
53927 diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
53928 index 00b2909..2ace383 100644
53929 --- a/fs/smbfs/symlink.c
53930 +++ b/fs/smbfs/symlink.c
53931 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
53932
53933 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53934 {
53935 - char *s = nd_get_link(nd);
53936 + const char *s = nd_get_link(nd);
53937 if (!IS_ERR(s))
53938 __putname(s);
53939 }
53940 diff --git a/fs/splice.c b/fs/splice.c
53941 index bb92b7c..5aa72b0 100644
53942 --- a/fs/splice.c
53943 +++ b/fs/splice.c
53944 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
53945 pipe_lock(pipe);
53946
53947 for (;;) {
53948 - if (!pipe->readers) {
53949 + if (!atomic_read(&pipe->readers)) {
53950 send_sig(SIGPIPE, current, 0);
53951 if (!ret)
53952 ret = -EPIPE;
53953 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
53954 do_wakeup = 0;
53955 }
53956
53957 - pipe->waiting_writers++;
53958 + atomic_inc(&pipe->waiting_writers);
53959 pipe_wait(pipe);
53960 - pipe->waiting_writers--;
53961 + atomic_dec(&pipe->waiting_writers);
53962 }
53963
53964 pipe_unlock(pipe);
53965 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
53966 .spd_release = spd_release_page,
53967 };
53968
53969 + pax_track_stack();
53970 +
53971 index = *ppos >> PAGE_CACHE_SHIFT;
53972 loff = *ppos & ~PAGE_CACHE_MASK;
53973 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
53974 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
53975 old_fs = get_fs();
53976 set_fs(get_ds());
53977 /* The cast to a user pointer is valid due to the set_fs() */
53978 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
53979 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
53980 set_fs(old_fs);
53981
53982 return res;
53983 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
53984 old_fs = get_fs();
53985 set_fs(get_ds());
53986 /* The cast to a user pointer is valid due to the set_fs() */
53987 - res = vfs_write(file, (const char __user *)buf, count, &pos);
53988 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
53989 set_fs(old_fs);
53990
53991 return res;
53992 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
53993 .spd_release = spd_release_page,
53994 };
53995
53996 + pax_track_stack();
53997 +
53998 index = *ppos >> PAGE_CACHE_SHIFT;
53999 offset = *ppos & ~PAGE_CACHE_MASK;
54000 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54001 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54002 goto err;
54003
54004 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54005 - vec[i].iov_base = (void __user *) page_address(page);
54006 + vec[i].iov_base = (__force void __user *) page_address(page);
54007 vec[i].iov_len = this_len;
54008 pages[i] = page;
54009 spd.nr_pages++;
54010 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54011 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54012 {
54013 while (!pipe->nrbufs) {
54014 - if (!pipe->writers)
54015 + if (!atomic_read(&pipe->writers))
54016 return 0;
54017
54018 - if (!pipe->waiting_writers && sd->num_spliced)
54019 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54020 return 0;
54021
54022 if (sd->flags & SPLICE_F_NONBLOCK)
54023 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54024 * out of the pipe right after the splice_to_pipe(). So set
54025 * PIPE_READERS appropriately.
54026 */
54027 - pipe->readers = 1;
54028 + atomic_set(&pipe->readers, 1);
54029
54030 current->splice_pipe = pipe;
54031 }
54032 @@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54033 .spd_release = spd_release_page,
54034 };
54035
54036 + pax_track_stack();
54037 +
54038 pipe = pipe_info(file->f_path.dentry->d_inode);
54039 if (!pipe)
54040 return -EBADF;
54041 @@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54042 ret = -ERESTARTSYS;
54043 break;
54044 }
54045 - if (!pipe->writers)
54046 + if (!atomic_read(&pipe->writers))
54047 break;
54048 - if (!pipe->waiting_writers) {
54049 + if (!atomic_read(&pipe->waiting_writers)) {
54050 if (flags & SPLICE_F_NONBLOCK) {
54051 ret = -EAGAIN;
54052 break;
54053 @@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54054 pipe_lock(pipe);
54055
54056 while (pipe->nrbufs >= PIPE_BUFFERS) {
54057 - if (!pipe->readers) {
54058 + if (!atomic_read(&pipe->readers)) {
54059 send_sig(SIGPIPE, current, 0);
54060 ret = -EPIPE;
54061 break;
54062 @@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54063 ret = -ERESTARTSYS;
54064 break;
54065 }
54066 - pipe->waiting_writers++;
54067 + atomic_inc(&pipe->waiting_writers);
54068 pipe_wait(pipe);
54069 - pipe->waiting_writers--;
54070 + atomic_dec(&pipe->waiting_writers);
54071 }
54072
54073 pipe_unlock(pipe);
54074 @@ -1786,14 +1792,14 @@ retry:
54075 pipe_double_lock(ipipe, opipe);
54076
54077 do {
54078 - if (!opipe->readers) {
54079 + if (!atomic_read(&opipe->readers)) {
54080 send_sig(SIGPIPE, current, 0);
54081 if (!ret)
54082 ret = -EPIPE;
54083 break;
54084 }
54085
54086 - if (!ipipe->nrbufs && !ipipe->writers)
54087 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
54088 break;
54089
54090 /*
54091 @@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54092 pipe_double_lock(ipipe, opipe);
54093
54094 do {
54095 - if (!opipe->readers) {
54096 + if (!atomic_read(&opipe->readers)) {
54097 send_sig(SIGPIPE, current, 0);
54098 if (!ret)
54099 ret = -EPIPE;
54100 @@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54101 * return EAGAIN if we have the potential of some data in the
54102 * future, otherwise just return 0
54103 */
54104 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
54105 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
54106 ret = -EAGAIN;
54107
54108 pipe_unlock(ipipe);
54109 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
54110 index 7118a38..70af853 100644
54111 --- a/fs/sysfs/file.c
54112 +++ b/fs/sysfs/file.c
54113 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
54114
54115 struct sysfs_open_dirent {
54116 atomic_t refcnt;
54117 - atomic_t event;
54118 + atomic_unchecked_t event;
54119 wait_queue_head_t poll;
54120 struct list_head buffers; /* goes through sysfs_buffer.list */
54121 };
54122 @@ -53,7 +53,7 @@ struct sysfs_buffer {
54123 size_t count;
54124 loff_t pos;
54125 char * page;
54126 - struct sysfs_ops * ops;
54127 + const struct sysfs_ops * ops;
54128 struct mutex mutex;
54129 int needs_read_fill;
54130 int event;
54131 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54132 {
54133 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54134 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54135 - struct sysfs_ops * ops = buffer->ops;
54136 + const struct sysfs_ops * ops = buffer->ops;
54137 int ret = 0;
54138 ssize_t count;
54139
54140 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54141 if (!sysfs_get_active_two(attr_sd))
54142 return -ENODEV;
54143
54144 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
54145 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
54146 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
54147
54148 sysfs_put_active_two(attr_sd);
54149 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
54150 {
54151 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54152 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54153 - struct sysfs_ops * ops = buffer->ops;
54154 + const struct sysfs_ops * ops = buffer->ops;
54155 int rc;
54156
54157 /* need attr_sd for attr and ops, its parent for kobj */
54158 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
54159 return -ENOMEM;
54160
54161 atomic_set(&new_od->refcnt, 0);
54162 - atomic_set(&new_od->event, 1);
54163 + atomic_set_unchecked(&new_od->event, 1);
54164 init_waitqueue_head(&new_od->poll);
54165 INIT_LIST_HEAD(&new_od->buffers);
54166 goto retry;
54167 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
54168 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
54169 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54170 struct sysfs_buffer *buffer;
54171 - struct sysfs_ops *ops;
54172 + const struct sysfs_ops *ops;
54173 int error = -EACCES;
54174 char *p;
54175
54176 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
54177
54178 sysfs_put_active_two(attr_sd);
54179
54180 - if (buffer->event != atomic_read(&od->event))
54181 + if (buffer->event != atomic_read_unchecked(&od->event))
54182 goto trigger;
54183
54184 return DEFAULT_POLLMASK;
54185 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
54186
54187 od = sd->s_attr.open;
54188 if (od) {
54189 - atomic_inc(&od->event);
54190 + atomic_inc_unchecked(&od->event);
54191 wake_up_interruptible(&od->poll);
54192 }
54193
54194 diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
54195 index 4974995..c26609c 100644
54196 --- a/fs/sysfs/mount.c
54197 +++ b/fs/sysfs/mount.c
54198 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
54199 .s_name = "",
54200 .s_count = ATOMIC_INIT(1),
54201 .s_flags = SYSFS_DIR,
54202 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54203 + .s_mode = S_IFDIR | S_IRWXU,
54204 +#else
54205 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
54206 +#endif
54207 .s_ino = 1,
54208 };
54209
54210 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
54211 index c5081ad..342ea86 100644
54212 --- a/fs/sysfs/symlink.c
54213 +++ b/fs/sysfs/symlink.c
54214 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54215
54216 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
54217 {
54218 - char *page = nd_get_link(nd);
54219 + const char *page = nd_get_link(nd);
54220 if (!IS_ERR(page))
54221 free_page((unsigned long)page);
54222 }
54223 diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
54224 index 1e06853..b06d325 100644
54225 --- a/fs/udf/balloc.c
54226 +++ b/fs/udf/balloc.c
54227 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
54228
54229 mutex_lock(&sbi->s_alloc_mutex);
54230 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54231 - if (bloc->logicalBlockNum < 0 ||
54232 - (bloc->logicalBlockNum + count) >
54233 - partmap->s_partition_len) {
54234 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54235 udf_debug("%d < %d || %d + %d > %d\n",
54236 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
54237 count, partmap->s_partition_len);
54238 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
54239
54240 mutex_lock(&sbi->s_alloc_mutex);
54241 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54242 - if (bloc->logicalBlockNum < 0 ||
54243 - (bloc->logicalBlockNum + count) >
54244 - partmap->s_partition_len) {
54245 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54246 udf_debug("%d < %d || %d + %d > %d\n",
54247 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
54248 partmap->s_partition_len);
54249 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
54250 index 6d24c2c..fff470f 100644
54251 --- a/fs/udf/inode.c
54252 +++ b/fs/udf/inode.c
54253 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
54254 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
54255 int lastblock = 0;
54256
54257 + pax_track_stack();
54258 +
54259 prev_epos.offset = udf_file_entry_alloc_offset(inode);
54260 prev_epos.block = iinfo->i_location;
54261 prev_epos.bh = NULL;
54262 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
54263 index 9215700..bf1f68e 100644
54264 --- a/fs/udf/misc.c
54265 +++ b/fs/udf/misc.c
54266 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
54267
54268 u8 udf_tag_checksum(const struct tag *t)
54269 {
54270 - u8 *data = (u8 *)t;
54271 + const u8 *data = (const u8 *)t;
54272 u8 checksum = 0;
54273 int i;
54274 for (i = 0; i < sizeof(struct tag); ++i)
54275 diff --git a/fs/utimes.c b/fs/utimes.c
54276 index e4c75db..b4df0e0 100644
54277 --- a/fs/utimes.c
54278 +++ b/fs/utimes.c
54279 @@ -1,6 +1,7 @@
54280 #include <linux/compiler.h>
54281 #include <linux/file.h>
54282 #include <linux/fs.h>
54283 +#include <linux/security.h>
54284 #include <linux/linkage.h>
54285 #include <linux/mount.h>
54286 #include <linux/namei.h>
54287 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
54288 goto mnt_drop_write_and_out;
54289 }
54290 }
54291 +
54292 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
54293 + error = -EACCES;
54294 + goto mnt_drop_write_and_out;
54295 + }
54296 +
54297 mutex_lock(&inode->i_mutex);
54298 error = notify_change(path->dentry, &newattrs);
54299 mutex_unlock(&inode->i_mutex);
54300 diff --git a/fs/xattr.c b/fs/xattr.c
54301 index 6d4f6d3..cda3958 100644
54302 --- a/fs/xattr.c
54303 +++ b/fs/xattr.c
54304 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
54305 * Extended attribute SET operations
54306 */
54307 static long
54308 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
54309 +setxattr(struct path *path, const char __user *name, const void __user *value,
54310 size_t size, int flags)
54311 {
54312 int error;
54313 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
54314 return PTR_ERR(kvalue);
54315 }
54316
54317 - error = vfs_setxattr(d, kname, kvalue, size, flags);
54318 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
54319 + error = -EACCES;
54320 + goto out;
54321 + }
54322 +
54323 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
54324 +out:
54325 kfree(kvalue);
54326 return error;
54327 }
54328 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
54329 return error;
54330 error = mnt_want_write(path.mnt);
54331 if (!error) {
54332 - error = setxattr(path.dentry, name, value, size, flags);
54333 + error = setxattr(&path, name, value, size, flags);
54334 mnt_drop_write(path.mnt);
54335 }
54336 path_put(&path);
54337 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
54338 return error;
54339 error = mnt_want_write(path.mnt);
54340 if (!error) {
54341 - error = setxattr(path.dentry, name, value, size, flags);
54342 + error = setxattr(&path, name, value, size, flags);
54343 mnt_drop_write(path.mnt);
54344 }
54345 path_put(&path);
54346 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
54347 const void __user *,value, size_t, size, int, flags)
54348 {
54349 struct file *f;
54350 - struct dentry *dentry;
54351 int error = -EBADF;
54352
54353 f = fget(fd);
54354 if (!f)
54355 return error;
54356 - dentry = f->f_path.dentry;
54357 - audit_inode(NULL, dentry);
54358 + audit_inode(NULL, f->f_path.dentry);
54359 error = mnt_want_write_file(f);
54360 if (!error) {
54361 - error = setxattr(dentry, name, value, size, flags);
54362 + error = setxattr(&f->f_path, name, value, size, flags);
54363 mnt_drop_write(f->f_path.mnt);
54364 }
54365 fput(f);
54366 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
54367 index c6ad7c7..f2847a7 100644
54368 --- a/fs/xattr_acl.c
54369 +++ b/fs/xattr_acl.c
54370 @@ -17,8 +17,8 @@
54371 struct posix_acl *
54372 posix_acl_from_xattr(const void *value, size_t size)
54373 {
54374 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
54375 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
54376 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
54377 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
54378 int count;
54379 struct posix_acl *acl;
54380 struct posix_acl_entry *acl_e;
54381 diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
54382 index 942362f..88f96f5 100644
54383 --- a/fs/xfs/linux-2.6/xfs_ioctl.c
54384 +++ b/fs/xfs/linux-2.6/xfs_ioctl.c
54385 @@ -134,7 +134,7 @@ xfs_find_handle(
54386 }
54387
54388 error = -EFAULT;
54389 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
54390 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
54391 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
54392 goto out_put;
54393
54394 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
54395 if (IS_ERR(dentry))
54396 return PTR_ERR(dentry);
54397
54398 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
54399 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
54400 if (!kbuf)
54401 goto out_dput;
54402
54403 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
54404 xfs_mount_t *mp,
54405 void __user *arg)
54406 {
54407 - xfs_fsop_geom_t fsgeo;
54408 + xfs_fsop_geom_t fsgeo;
54409 int error;
54410
54411 error = xfs_fs_geometry(mp, &fsgeo, 3);
54412 diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
54413 index bad485a..479bd32 100644
54414 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c
54415 +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
54416 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
54417 xfs_fsop_geom_t fsgeo;
54418 int error;
54419
54420 + memset(&fsgeo, 0, sizeof(fsgeo));
54421 error = xfs_fs_geometry(mp, &fsgeo, 3);
54422 if (error)
54423 return -error;
54424 diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
54425 index 1f3b4b8..6102f6d 100644
54426 --- a/fs/xfs/linux-2.6/xfs_iops.c
54427 +++ b/fs/xfs/linux-2.6/xfs_iops.c
54428 @@ -468,7 +468,7 @@ xfs_vn_put_link(
54429 struct nameidata *nd,
54430 void *p)
54431 {
54432 - char *s = nd_get_link(nd);
54433 + const char *s = nd_get_link(nd);
54434
54435 if (!IS_ERR(s))
54436 kfree(s);
54437 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
54438 index 8971fb0..5fc1eb2 100644
54439 --- a/fs/xfs/xfs_bmap.c
54440 +++ b/fs/xfs/xfs_bmap.c
54441 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
54442 int nmap,
54443 int ret_nmap);
54444 #else
54445 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
54446 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
54447 #endif /* DEBUG */
54448
54449 #if defined(XFS_RW_TRACE)
54450 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
54451 index e89734e..5e84d8d 100644
54452 --- a/fs/xfs/xfs_dir2_sf.c
54453 +++ b/fs/xfs/xfs_dir2_sf.c
54454 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
54455 }
54456
54457 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
54458 - if (filldir(dirent, sfep->name, sfep->namelen,
54459 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
54460 + char name[sfep->namelen];
54461 + memcpy(name, sfep->name, sfep->namelen);
54462 + if (filldir(dirent, name, sfep->namelen,
54463 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
54464 + *offset = off & 0x7fffffff;
54465 + return 0;
54466 + }
54467 + } else if (filldir(dirent, sfep->name, sfep->namelen,
54468 off & 0x7fffffff, ino, DT_UNKNOWN)) {
54469 *offset = off & 0x7fffffff;
54470 return 0;
54471 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
54472 index 8f32f50..859e8a3 100644
54473 --- a/fs/xfs/xfs_vnodeops.c
54474 +++ b/fs/xfs/xfs_vnodeops.c
54475 @@ -564,13 +564,17 @@ xfs_readlink(
54476
54477 xfs_ilock(ip, XFS_ILOCK_SHARED);
54478
54479 - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
54480 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
54481 -
54482 pathlen = ip->i_d.di_size;
54483 if (!pathlen)
54484 goto out;
54485
54486 + if (pathlen > MAXPATHLEN) {
54487 + xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
54488 + __func__, (unsigned long long)ip->i_ino, pathlen);
54489 + ASSERT(0);
54490 + return XFS_ERROR(EFSCORRUPTED);
54491 + }
54492 +
54493 if (ip->i_df.if_flags & XFS_IFINLINE) {
54494 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
54495 link[pathlen] = '\0';
54496 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
54497 new file mode 100644
54498 index 0000000..f27a8e8
54499 --- /dev/null
54500 +++ b/grsecurity/Kconfig
54501 @@ -0,0 +1,1036 @@
54502 +#
54503 +# grecurity configuration
54504 +#
54505 +
54506 +menu "Grsecurity"
54507 +
54508 +config GRKERNSEC
54509 + bool "Grsecurity"
54510 + select CRYPTO
54511 + select CRYPTO_SHA256
54512 + help
54513 + If you say Y here, you will be able to configure many features
54514 + that will enhance the security of your system. It is highly
54515 + recommended that you say Y here and read through the help
54516 + for each option so that you fully understand the features and
54517 + can evaluate their usefulness for your machine.
54518 +
54519 +choice
54520 + prompt "Security Level"
54521 + depends on GRKERNSEC
54522 + default GRKERNSEC_CUSTOM
54523 +
54524 +config GRKERNSEC_LOW
54525 + bool "Low"
54526 + select GRKERNSEC_LINK
54527 + select GRKERNSEC_FIFO
54528 + select GRKERNSEC_RANDNET
54529 + select GRKERNSEC_DMESG
54530 + select GRKERNSEC_CHROOT
54531 + select GRKERNSEC_CHROOT_CHDIR
54532 +
54533 + help
54534 + If you choose this option, several of the grsecurity options will
54535 + be enabled that will give you greater protection against a number
54536 + of attacks, while assuring that none of your software will have any
54537 + conflicts with the additional security measures. If you run a lot
54538 + of unusual software, or you are having problems with the higher
54539 + security levels, you should say Y here. With this option, the
54540 + following features are enabled:
54541 +
54542 + - Linking restrictions
54543 + - FIFO restrictions
54544 + - Restricted dmesg
54545 + - Enforced chdir("/") on chroot
54546 + - Runtime module disabling
54547 +
54548 +config GRKERNSEC_MEDIUM
54549 + bool "Medium"
54550 + select PAX
54551 + select PAX_EI_PAX
54552 + select PAX_PT_PAX_FLAGS
54553 + select PAX_HAVE_ACL_FLAGS
54554 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54555 + select GRKERNSEC_CHROOT
54556 + select GRKERNSEC_CHROOT_SYSCTL
54557 + select GRKERNSEC_LINK
54558 + select GRKERNSEC_FIFO
54559 + select GRKERNSEC_DMESG
54560 + select GRKERNSEC_RANDNET
54561 + select GRKERNSEC_FORKFAIL
54562 + select GRKERNSEC_TIME
54563 + select GRKERNSEC_SIGNAL
54564 + select GRKERNSEC_CHROOT
54565 + select GRKERNSEC_CHROOT_UNIX
54566 + select GRKERNSEC_CHROOT_MOUNT
54567 + select GRKERNSEC_CHROOT_PIVOT
54568 + select GRKERNSEC_CHROOT_DOUBLE
54569 + select GRKERNSEC_CHROOT_CHDIR
54570 + select GRKERNSEC_CHROOT_MKNOD
54571 + select GRKERNSEC_PROC
54572 + select GRKERNSEC_PROC_USERGROUP
54573 + select PAX_RANDUSTACK
54574 + select PAX_ASLR
54575 + select PAX_RANDMMAP
54576 + select PAX_REFCOUNT if (X86 || SPARC64)
54577 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54578 +
54579 + help
54580 + If you say Y here, several features in addition to those included
54581 + in the low additional security level will be enabled. These
54582 + features provide even more security to your system, though in rare
54583 + cases they may be incompatible with very old or poorly written
54584 + software. If you enable this option, make sure that your auth
54585 + service (identd) is running as gid 1001. With this option,
54586 + the following features (in addition to those provided in the
54587 + low additional security level) will be enabled:
54588 +
54589 + - Failed fork logging
54590 + - Time change logging
54591 + - Signal logging
54592 + - Deny mounts in chroot
54593 + - Deny double chrooting
54594 + - Deny sysctl writes in chroot
54595 + - Deny mknod in chroot
54596 + - Deny access to abstract AF_UNIX sockets out of chroot
54597 + - Deny pivot_root in chroot
54598 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
54599 + - /proc restrictions with special GID set to 10 (usually wheel)
54600 + - Address Space Layout Randomization (ASLR)
54601 + - Prevent exploitation of most refcount overflows
54602 + - Bounds checking of copying between the kernel and userland
54603 +
54604 +config GRKERNSEC_HIGH
54605 + bool "High"
54606 + select GRKERNSEC_LINK
54607 + select GRKERNSEC_FIFO
54608 + select GRKERNSEC_DMESG
54609 + select GRKERNSEC_FORKFAIL
54610 + select GRKERNSEC_TIME
54611 + select GRKERNSEC_SIGNAL
54612 + select GRKERNSEC_CHROOT
54613 + select GRKERNSEC_CHROOT_SHMAT
54614 + select GRKERNSEC_CHROOT_UNIX
54615 + select GRKERNSEC_CHROOT_MOUNT
54616 + select GRKERNSEC_CHROOT_FCHDIR
54617 + select GRKERNSEC_CHROOT_PIVOT
54618 + select GRKERNSEC_CHROOT_DOUBLE
54619 + select GRKERNSEC_CHROOT_CHDIR
54620 + select GRKERNSEC_CHROOT_MKNOD
54621 + select GRKERNSEC_CHROOT_CAPS
54622 + select GRKERNSEC_CHROOT_SYSCTL
54623 + select GRKERNSEC_CHROOT_FINDTASK
54624 + select GRKERNSEC_SYSFS_RESTRICT
54625 + select GRKERNSEC_PROC
54626 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54627 + select GRKERNSEC_HIDESYM
54628 + select GRKERNSEC_BRUTE
54629 + select GRKERNSEC_PROC_USERGROUP
54630 + select GRKERNSEC_KMEM
54631 + select GRKERNSEC_RESLOG
54632 + select GRKERNSEC_RANDNET
54633 + select GRKERNSEC_PROC_ADD
54634 + select GRKERNSEC_CHROOT_CHMOD
54635 + select GRKERNSEC_CHROOT_NICE
54636 + select GRKERNSEC_AUDIT_MOUNT
54637 + select GRKERNSEC_MODHARDEN if (MODULES)
54638 + select GRKERNSEC_HARDEN_PTRACE
54639 + select GRKERNSEC_VM86 if (X86_32)
54640 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
54641 + select PAX
54642 + select PAX_RANDUSTACK
54643 + select PAX_ASLR
54644 + select PAX_RANDMMAP
54645 + select PAX_NOEXEC
54646 + select PAX_MPROTECT
54647 + select PAX_EI_PAX
54648 + select PAX_PT_PAX_FLAGS
54649 + select PAX_HAVE_ACL_FLAGS
54650 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
54651 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
54652 + select PAX_RANDKSTACK if (X86_TSC && X86)
54653 + select PAX_SEGMEXEC if (X86_32)
54654 + select PAX_PAGEEXEC
54655 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
54656 + select PAX_EMUTRAMP if (PARISC)
54657 + select PAX_EMUSIGRT if (PARISC)
54658 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
54659 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
54660 + select PAX_REFCOUNT if (X86 || SPARC64)
54661 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54662 + help
54663 + If you say Y here, many of the features of grsecurity will be
54664 + enabled, which will protect you against many kinds of attacks
54665 + against your system. The heightened security comes at a cost
54666 + of an increased chance of incompatibilities with rare software
54667 + on your machine. Since this security level enables PaX, you should
54668 + view <http://pax.grsecurity.net> and read about the PaX
54669 + project. While you are there, download chpax and run it on
54670 + binaries that cause problems with PaX. Also remember that
54671 + since the /proc restrictions are enabled, you must run your
54672 + identd as gid 1001. This security level enables the following
54673 + features in addition to those listed in the low and medium
54674 + security levels:
54675 +
54676 + - Additional /proc restrictions
54677 + - Chmod restrictions in chroot
54678 + - No signals, ptrace, or viewing of processes outside of chroot
54679 + - Capability restrictions in chroot
54680 + - Deny fchdir out of chroot
54681 + - Priority restrictions in chroot
54682 + - Segmentation-based implementation of PaX
54683 + - Mprotect restrictions
54684 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
54685 + - Kernel stack randomization
54686 + - Mount/unmount/remount logging
54687 + - Kernel symbol hiding
54688 + - Hardening of module auto-loading
54689 + - Ptrace restrictions
54690 + - Restricted vm86 mode
54691 + - Restricted sysfs/debugfs
54692 + - Active kernel exploit response
54693 +
54694 +config GRKERNSEC_CUSTOM
54695 + bool "Custom"
54696 + help
54697 + If you say Y here, you will be able to configure every grsecurity
54698 + option, which allows you to enable many more features that aren't
54699 + covered in the basic security levels. These additional features
54700 + include TPE, socket restrictions, and the sysctl system for
54701 + grsecurity. It is advised that you read through the help for
54702 + each option to determine its usefulness in your situation.
54703 +
54704 +endchoice
54705 +
54706 +menu "Address Space Protection"
54707 +depends on GRKERNSEC
54708 +
54709 +config GRKERNSEC_KMEM
54710 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
54711 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
54712 + help
54713 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
54714 + be written to or read from to modify or leak the contents of the running
54715 + kernel. /dev/port will also not be allowed to be opened. If you have module
54716 + support disabled, enabling this will close up four ways that are
54717 + currently used to insert malicious code into the running kernel.
54718 + Even with all these features enabled, we still highly recommend that
54719 + you use the RBAC system, as it is still possible for an attacker to
54720 + modify the running kernel through privileged I/O granted by ioperm/iopl.
54721 + If you are not using XFree86, you may be able to stop this additional
54722 + case by enabling the 'Disable privileged I/O' option. Though nothing
54723 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
54724 + but only to video memory, which is the only writing we allow in this
54725 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
54726 + not be allowed to mprotect it with PROT_WRITE later.
54727 + It is highly recommended that you say Y here if you meet all the
54728 + conditions above.
54729 +
54730 +config GRKERNSEC_VM86
54731 + bool "Restrict VM86 mode"
54732 + depends on X86_32
54733 +
54734 + help
54735 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
54736 + make use of a special execution mode on 32bit x86 processors called
54737 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
54738 + video cards and will still work with this option enabled. The purpose
54739 + of the option is to prevent exploitation of emulation errors in
54740 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
54741 + Nearly all users should be able to enable this option.
54742 +
54743 +config GRKERNSEC_IO
54744 + bool "Disable privileged I/O"
54745 + depends on X86
54746 + select RTC_CLASS
54747 + select RTC_INTF_DEV
54748 + select RTC_DRV_CMOS
54749 +
54750 + help
54751 + If you say Y here, all ioperm and iopl calls will return an error.
54752 + Ioperm and iopl can be used to modify the running kernel.
54753 + Unfortunately, some programs need this access to operate properly,
54754 + the most notable of which are XFree86 and hwclock. hwclock can be
54755 + remedied by having RTC support in the kernel, so real-time
54756 + clock support is enabled if this option is enabled, to ensure
54757 + that hwclock operates correctly. XFree86 still will not
54758 + operate correctly with this option enabled, so DO NOT CHOOSE Y
54759 + IF YOU USE XFree86. If you use XFree86 and you still want to
54760 + protect your kernel against modification, use the RBAC system.
54761 +
54762 +config GRKERNSEC_PROC_MEMMAP
54763 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
54764 + default y if (PAX_NOEXEC || PAX_ASLR)
54765 + depends on PAX_NOEXEC || PAX_ASLR
54766 + help
54767 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
54768 + give no information about the addresses of its mappings if
54769 + PaX features that rely on random addresses are enabled on the task.
54770 + If you use PaX it is greatly recommended that you say Y here as it
54771 + closes up a hole that makes the full ASLR useless for suid
54772 + binaries.
54773 +
54774 +config GRKERNSEC_BRUTE
54775 + bool "Deter exploit bruteforcing"
54776 + help
54777 + If you say Y here, attempts to bruteforce exploits against forking
54778 + daemons such as apache or sshd, as well as against suid/sgid binaries
54779 + will be deterred. When a child of a forking daemon is killed by PaX
54780 + or crashes due to an illegal instruction or other suspicious signal,
54781 + the parent process will be delayed 30 seconds upon every subsequent
54782 + fork until the administrator is able to assess the situation and
54783 + restart the daemon.
54784 + In the suid/sgid case, the attempt is logged, the user has all their
54785 + processes terminated, and they are prevented from executing any further
54786 + processes for 15 minutes.
54787 + It is recommended that you also enable signal logging in the auditing
54788 + section so that logs are generated when a process triggers a suspicious
54789 + signal.
54790 + If the sysctl option is enabled, a sysctl option with name
54791 + "deter_bruteforce" is created.
54792 +
54793 +config GRKERNSEC_MODHARDEN
54794 + bool "Harden module auto-loading"
54795 + depends on MODULES
54796 + help
54797 + If you say Y here, module auto-loading in response to use of some
54798 + feature implemented by an unloaded module will be restricted to
54799 + root users. Enabling this option helps defend against attacks
54800 + by unprivileged users who abuse the auto-loading behavior to
54801 + cause a vulnerable module to load that is then exploited.
54802 +
54803 + If this option prevents a legitimate use of auto-loading for a
54804 + non-root user, the administrator can execute modprobe manually
54805 + with the exact name of the module mentioned in the alert log.
54806 + Alternatively, the administrator can add the module to the list
54807 + of modules loaded at boot by modifying init scripts.
54808 +
54809 + Modification of init scripts will most likely be needed on
54810 + Ubuntu servers with encrypted home directory support enabled,
54811 + as the first non-root user logging in will cause the ecb(aes),
54812 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
54813 +
54814 +config GRKERNSEC_HIDESYM
54815 + bool "Hide kernel symbols"
54816 + help
54817 + If you say Y here, getting information on loaded modules, and
54818 + displaying all kernel symbols through a syscall will be restricted
54819 + to users with CAP_SYS_MODULE. For software compatibility reasons,
54820 + /proc/kallsyms will be restricted to the root user. The RBAC
54821 + system can hide that entry even from root.
54822 +
54823 + This option also prevents leaking of kernel addresses through
54824 + several /proc entries.
54825 +
54826 + Note that this option is only effective provided the following
54827 + conditions are met:
54828 + 1) The kernel using grsecurity is not precompiled by some distribution
54829 + 2) You have also enabled GRKERNSEC_DMESG
54830 + 3) You are using the RBAC system and hiding other files such as your
54831 + kernel image and System.map. Alternatively, enabling this option
54832 + causes the permissions on /boot, /lib/modules, and the kernel
54833 + source directory to change at compile time to prevent
54834 + reading by non-root users.
54835 + If the above conditions are met, this option will aid in providing a
54836 + useful protection against local kernel exploitation of overflows
54837 + and arbitrary read/write vulnerabilities.
54838 +
54839 +config GRKERNSEC_KERN_LOCKOUT
54840 + bool "Active kernel exploit response"
54841 + depends on X86 || ARM || PPC || SPARC
54842 + help
54843 + If you say Y here, when a PaX alert is triggered due to suspicious
54844 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
54845 + or an OOPs occurs due to bad memory accesses, instead of just
54846 + terminating the offending process (and potentially allowing
54847 + a subsequent exploit from the same user), we will take one of two
54848 + actions:
54849 + If the user was root, we will panic the system
54850 + If the user was non-root, we will log the attempt, terminate
54851 + all processes owned by the user, then prevent them from creating
54852 + any new processes until the system is restarted
54853 + This deters repeated kernel exploitation/bruteforcing attempts
54854 + and is useful for later forensics.
54855 +
54856 +endmenu
54857 +menu "Role Based Access Control Options"
54858 +depends on GRKERNSEC
54859 +
54860 +config GRKERNSEC_RBAC_DEBUG
54861 + bool
54862 +
54863 +config GRKERNSEC_NO_RBAC
54864 + bool "Disable RBAC system"
54865 + help
54866 + If you say Y here, the /dev/grsec device will be removed from the kernel,
54867 + preventing the RBAC system from being enabled. You should only say Y
54868 + here if you have no intention of using the RBAC system, so as to prevent
54869 + an attacker with root access from misusing the RBAC system to hide files
54870 + and processes when loadable module support and /dev/[k]mem have been
54871 + locked down.
54872 +
54873 +config GRKERNSEC_ACL_HIDEKERN
54874 + bool "Hide kernel processes"
54875 + help
54876 + If you say Y here, all kernel threads will be hidden to all
54877 + processes but those whose subject has the "view hidden processes"
54878 + flag.
54879 +
54880 +config GRKERNSEC_ACL_MAXTRIES
54881 + int "Maximum tries before password lockout"
54882 + default 3
54883 + help
54884 + This option enforces the maximum number of times a user can attempt
54885 + to authorize themselves with the grsecurity RBAC system before being
54886 + denied the ability to attempt authorization again for a specified time.
54887 + The lower the number, the harder it will be to brute-force a password.
54888 +
54889 +config GRKERNSEC_ACL_TIMEOUT
54890 + int "Time to wait after max password tries, in seconds"
54891 + default 30
54892 + help
54893 + This option specifies the time the user must wait after attempting to
54894 + authorize to the RBAC system with the maximum number of invalid
54895 + passwords. The higher the number, the harder it will be to brute-force
54896 + a password.
54897 +
54898 +endmenu
54899 +menu "Filesystem Protections"
54900 +depends on GRKERNSEC
54901 +
54902 +config GRKERNSEC_PROC
54903 + bool "Proc restrictions"
54904 + help
54905 + If you say Y here, the permissions of the /proc filesystem
54906 + will be altered to enhance system security and privacy. You MUST
54907 + choose either a user only restriction or a user and group restriction.
54908 + Depending upon the option you choose, you can either restrict users to
54909 + see only the processes they themselves run, or choose a group that can
54910 + view all processes and files normally restricted to root if you choose
54911 + the "restrict to user only" option. NOTE: If you're running identd as
54912 + a non-root user, you will have to run it as the group you specify here.
54913 +
54914 +config GRKERNSEC_PROC_USER
54915 + bool "Restrict /proc to user only"
54916 + depends on GRKERNSEC_PROC
54917 + help
54918 + If you say Y here, non-root users will only be able to view their own
54919 + processes, and restricts them from viewing network-related information,
54920 + and viewing kernel symbol and module information.
54921 +
54922 +config GRKERNSEC_PROC_USERGROUP
54923 + bool "Allow special group"
54924 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
54925 + help
54926 + If you say Y here, you will be able to select a group that will be
54927 + able to view all processes and network-related information. If you've
54928 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
54929 + remain hidden. This option is useful if you want to run identd as
54930 + a non-root user.
54931 +
54932 +config GRKERNSEC_PROC_GID
54933 + int "GID for special group"
54934 + depends on GRKERNSEC_PROC_USERGROUP
54935 + default 1001
54936 +
54937 +config GRKERNSEC_PROC_ADD
54938 + bool "Additional restrictions"
54939 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
54940 + help
54941 + If you say Y here, additional restrictions will be placed on
54942 + /proc that keep normal users from viewing device information and
54943 + slabinfo information that could be useful for exploits.
54944 +
54945 +config GRKERNSEC_LINK
54946 + bool "Linking restrictions"
54947 + help
54948 + If you say Y here, /tmp race exploits will be prevented, since users
54949 + will no longer be able to follow symlinks owned by other users in
54950 + world-writable +t directories (e.g. /tmp), unless the owner of the
54951 + symlink is the owner of the directory. users will also not be
54952 + able to hardlink to files they do not own. If the sysctl option is
54953 + enabled, a sysctl option with name "linking_restrictions" is created.
54954 +
54955 +config GRKERNSEC_FIFO
54956 + bool "FIFO restrictions"
54957 + help
54958 + If you say Y here, users will not be able to write to FIFOs they don't
54959 + own in world-writable +t directories (e.g. /tmp), unless the owner of
54960 + the FIFO is the same owner of the directory it's held in. If the sysctl
54961 + option is enabled, a sysctl option with name "fifo_restrictions" is
54962 + created.
54963 +
54964 +config GRKERNSEC_SYSFS_RESTRICT
54965 + bool "Sysfs/debugfs restriction"
54966 + depends on SYSFS
54967 + help
54968 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
54969 + any filesystem normally mounted under it (e.g. debugfs) will only
54970 + be accessible by root. These filesystems generally provide access
54971 + to hardware and debug information that isn't appropriate for unprivileged
54972 + users of the system. Sysfs and debugfs have also become a large source
54973 + of new vulnerabilities, ranging from infoleaks to local compromise.
54974 + There has been very little oversight with an eye toward security involved
54975 + in adding new exporters of information to these filesystems, so their
54976 + use is discouraged.
54977 + This option is equivalent to a chmod 0700 of the mount paths.
54978 +
54979 +config GRKERNSEC_ROFS
54980 + bool "Runtime read-only mount protection"
54981 + help
54982 + If you say Y here, a sysctl option with name "romount_protect" will
54983 + be created. By setting this option to 1 at runtime, filesystems
54984 + will be protected in the following ways:
54985 + * No new writable mounts will be allowed
54986 + * Existing read-only mounts won't be able to be remounted read/write
54987 + * Write operations will be denied on all block devices
54988 + This option acts independently of grsec_lock: once it is set to 1,
54989 + it cannot be turned off. Therefore, please be mindful of the resulting
54990 + behavior if this option is enabled in an init script on a read-only
54991 + filesystem. This feature is mainly intended for secure embedded systems.
54992 +
54993 +config GRKERNSEC_CHROOT
54994 + bool "Chroot jail restrictions"
54995 + help
54996 + If you say Y here, you will be able to choose several options that will
54997 + make breaking out of a chrooted jail much more difficult. If you
54998 + encounter no software incompatibilities with the following options, it
54999 + is recommended that you enable each one.
55000 +
55001 +config GRKERNSEC_CHROOT_MOUNT
55002 + bool "Deny mounts"
55003 + depends on GRKERNSEC_CHROOT
55004 + help
55005 + If you say Y here, processes inside a chroot will not be able to
55006 + mount or remount filesystems. If the sysctl option is enabled, a
55007 + sysctl option with name "chroot_deny_mount" is created.
55008 +
55009 +config GRKERNSEC_CHROOT_DOUBLE
55010 + bool "Deny double-chroots"
55011 + depends on GRKERNSEC_CHROOT
55012 + help
55013 + If you say Y here, processes inside a chroot will not be able to chroot
55014 + again outside the chroot. This is a widely used method of breaking
55015 + out of a chroot jail and should not be allowed. If the sysctl
55016 + option is enabled, a sysctl option with name
55017 + "chroot_deny_chroot" is created.
55018 +
55019 +config GRKERNSEC_CHROOT_PIVOT
55020 + bool "Deny pivot_root in chroot"
55021 + depends on GRKERNSEC_CHROOT
55022 + help
55023 + If you say Y here, processes inside a chroot will not be able to use
55024 + a function called pivot_root() that was introduced in Linux 2.3.41. It
55025 + works similar to chroot in that it changes the root filesystem. This
55026 + function could be misused in a chrooted process to attempt to break out
55027 + of the chroot, and therefore should not be allowed. If the sysctl
55028 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
55029 + created.
55030 +
55031 +config GRKERNSEC_CHROOT_CHDIR
55032 + bool "Enforce chdir(\"/\") on all chroots"
55033 + depends on GRKERNSEC_CHROOT
55034 + help
55035 + If you say Y here, the current working directory of all newly-chrooted
55036 + applications will be set to the the root directory of the chroot.
55037 + The man page on chroot(2) states:
55038 + Note that this call does not change the current working
55039 + directory, so that `.' can be outside the tree rooted at
55040 + `/'. In particular, the super-user can escape from a
55041 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55042 +
55043 + It is recommended that you say Y here, since it's not known to break
55044 + any software. If the sysctl option is enabled, a sysctl option with
55045 + name "chroot_enforce_chdir" is created.
55046 +
55047 +config GRKERNSEC_CHROOT_CHMOD
55048 + bool "Deny (f)chmod +s"
55049 + depends on GRKERNSEC_CHROOT
55050 + help
55051 + If you say Y here, processes inside a chroot will not be able to chmod
55052 + or fchmod files to make them have suid or sgid bits. This protects
55053 + against another published method of breaking a chroot. If the sysctl
55054 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
55055 + created.
55056 +
55057 +config GRKERNSEC_CHROOT_FCHDIR
55058 + bool "Deny fchdir out of chroot"
55059 + depends on GRKERNSEC_CHROOT
55060 + help
55061 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
55062 + to a file descriptor of the chrooting process that points to a directory
55063 + outside the filesystem will be stopped. If the sysctl option
55064 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55065 +
55066 +config GRKERNSEC_CHROOT_MKNOD
55067 + bool "Deny mknod"
55068 + depends on GRKERNSEC_CHROOT
55069 + help
55070 + If you say Y here, processes inside a chroot will not be allowed to
55071 + mknod. The problem with using mknod inside a chroot is that it
55072 + would allow an attacker to create a device entry that is the same
55073 + as one on the physical root of your system, which could range from
55074 + anything from the console device to a device for your harddrive (which
55075 + they could then use to wipe the drive or steal data). It is recommended
55076 + that you say Y here, unless you run into software incompatibilities.
55077 + If the sysctl option is enabled, a sysctl option with name
55078 + "chroot_deny_mknod" is created.
55079 +
55080 +config GRKERNSEC_CHROOT_SHMAT
55081 + bool "Deny shmat() out of chroot"
55082 + depends on GRKERNSEC_CHROOT
55083 + help
55084 + If you say Y here, processes inside a chroot will not be able to attach
55085 + to shared memory segments that were created outside of the chroot jail.
55086 + It is recommended that you say Y here. If the sysctl option is enabled,
55087 + a sysctl option with name "chroot_deny_shmat" is created.
55088 +
55089 +config GRKERNSEC_CHROOT_UNIX
55090 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
55091 + depends on GRKERNSEC_CHROOT
55092 + help
55093 + If you say Y here, processes inside a chroot will not be able to
55094 + connect to abstract (meaning not belonging to a filesystem) Unix
55095 + domain sockets that were bound outside of a chroot. It is recommended
55096 + that you say Y here. If the sysctl option is enabled, a sysctl option
55097 + with name "chroot_deny_unix" is created.
55098 +
55099 +config GRKERNSEC_CHROOT_FINDTASK
55100 + bool "Protect outside processes"
55101 + depends on GRKERNSEC_CHROOT
55102 + help
55103 + If you say Y here, processes inside a chroot will not be able to
55104 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55105 + getsid, or view any process outside of the chroot. If the sysctl
55106 + option is enabled, a sysctl option with name "chroot_findtask" is
55107 + created.
55108 +
55109 +config GRKERNSEC_CHROOT_NICE
55110 + bool "Restrict priority changes"
55111 + depends on GRKERNSEC_CHROOT
55112 + help
55113 + If you say Y here, processes inside a chroot will not be able to raise
55114 + the priority of processes in the chroot, or alter the priority of
55115 + processes outside the chroot. This provides more security than simply
55116 + removing CAP_SYS_NICE from the process' capability set. If the
55117 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55118 + is created.
55119 +
55120 +config GRKERNSEC_CHROOT_SYSCTL
55121 + bool "Deny sysctl writes"
55122 + depends on GRKERNSEC_CHROOT
55123 + help
55124 + If you say Y here, an attacker in a chroot will not be able to
55125 + write to sysctl entries, either by sysctl(2) or through a /proc
55126 + interface. It is strongly recommended that you say Y here. If the
55127 + sysctl option is enabled, a sysctl option with name
55128 + "chroot_deny_sysctl" is created.
55129 +
55130 +config GRKERNSEC_CHROOT_CAPS
55131 + bool "Capability restrictions"
55132 + depends on GRKERNSEC_CHROOT
55133 + help
55134 + If you say Y here, the capabilities on all processes within a
55135 + chroot jail will be lowered to stop module insertion, raw i/o,
55136 + system and net admin tasks, rebooting the system, modifying immutable
55137 + files, modifying IPC owned by another, and changing the system time.
55138 + This is left an option because it can break some apps. Disable this
55139 + if your chrooted apps are having problems performing those kinds of
55140 + tasks. If the sysctl option is enabled, a sysctl option with
55141 + name "chroot_caps" is created.
55142 +
55143 +endmenu
55144 +menu "Kernel Auditing"
55145 +depends on GRKERNSEC
55146 +
55147 +config GRKERNSEC_AUDIT_GROUP
55148 + bool "Single group for auditing"
55149 + help
55150 + If you say Y here, the exec, chdir, and (un)mount logging features
55151 + will only operate on a group you specify. This option is recommended
55152 + if you only want to watch certain users instead of having a large
55153 + amount of logs from the entire system. If the sysctl option is enabled,
55154 + a sysctl option with name "audit_group" is created.
55155 +
55156 +config GRKERNSEC_AUDIT_GID
55157 + int "GID for auditing"
55158 + depends on GRKERNSEC_AUDIT_GROUP
55159 + default 1007
55160 +
55161 +config GRKERNSEC_EXECLOG
55162 + bool "Exec logging"
55163 + help
55164 + If you say Y here, all execve() calls will be logged (since the
55165 + other exec*() calls are frontends to execve(), all execution
55166 + will be logged). Useful for shell-servers that like to keep track
55167 + of their users. If the sysctl option is enabled, a sysctl option with
55168 + name "exec_logging" is created.
55169 + WARNING: This option when enabled will produce a LOT of logs, especially
55170 + on an active system.
55171 +
55172 +config GRKERNSEC_RESLOG
55173 + bool "Resource logging"
55174 + help
55175 + If you say Y here, all attempts to overstep resource limits will
55176 + be logged with the resource name, the requested size, and the current
55177 + limit. It is highly recommended that you say Y here. If the sysctl
55178 + option is enabled, a sysctl option with name "resource_logging" is
55179 + created. If the RBAC system is enabled, the sysctl value is ignored.
55180 +
55181 +config GRKERNSEC_CHROOT_EXECLOG
55182 + bool "Log execs within chroot"
55183 + help
55184 + If you say Y here, all executions inside a chroot jail will be logged
55185 + to syslog. This can cause a large amount of logs if certain
55186 + applications (eg. djb's daemontools) are installed on the system, and
55187 + is therefore left as an option. If the sysctl option is enabled, a
55188 + sysctl option with name "chroot_execlog" is created.
55189 +
55190 +config GRKERNSEC_AUDIT_PTRACE
55191 + bool "Ptrace logging"
55192 + help
55193 + If you say Y here, all attempts to attach to a process via ptrace
55194 + will be logged. If the sysctl option is enabled, a sysctl option
55195 + with name "audit_ptrace" is created.
55196 +
55197 +config GRKERNSEC_AUDIT_CHDIR
55198 + bool "Chdir logging"
55199 + help
55200 + If you say Y here, all chdir() calls will be logged. If the sysctl
55201 + option is enabled, a sysctl option with name "audit_chdir" is created.
55202 +
55203 +config GRKERNSEC_AUDIT_MOUNT
55204 + bool "(Un)Mount logging"
55205 + help
55206 + If you say Y here, all mounts and unmounts will be logged. If the
55207 + sysctl option is enabled, a sysctl option with name "audit_mount" is
55208 + created.
55209 +
55210 +config GRKERNSEC_SIGNAL
55211 + bool "Signal logging"
55212 + help
55213 + If you say Y here, certain important signals will be logged, such as
55214 + SIGSEGV, which will as a result inform you of when a error in a program
55215 + occurred, which in some cases could mean a possible exploit attempt.
55216 + If the sysctl option is enabled, a sysctl option with name
55217 + "signal_logging" is created.
55218 +
55219 +config GRKERNSEC_FORKFAIL
55220 + bool "Fork failure logging"
55221 + help
55222 + If you say Y here, all failed fork() attempts will be logged.
55223 + This could suggest a fork bomb, or someone attempting to overstep
55224 + their process limit. If the sysctl option is enabled, a sysctl option
55225 + with name "forkfail_logging" is created.
55226 +
55227 +config GRKERNSEC_TIME
55228 + bool "Time change logging"
55229 + help
55230 + If you say Y here, any changes of the system clock will be logged.
55231 + If the sysctl option is enabled, a sysctl option with name
55232 + "timechange_logging" is created.
55233 +
55234 +config GRKERNSEC_PROC_IPADDR
55235 + bool "/proc/<pid>/ipaddr support"
55236 + help
55237 + If you say Y here, a new entry will be added to each /proc/<pid>
55238 + directory that contains the IP address of the person using the task.
55239 + The IP is carried across local TCP and AF_UNIX stream sockets.
55240 + This information can be useful for IDS/IPSes to perform remote response
55241 + to a local attack. The entry is readable by only the owner of the
55242 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
55243 + the RBAC system), and thus does not create privacy concerns.
55244 +
55245 +config GRKERNSEC_RWXMAP_LOG
55246 + bool 'Denied RWX mmap/mprotect logging'
55247 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
55248 + help
55249 + If you say Y here, calls to mmap() and mprotect() with explicit
55250 + usage of PROT_WRITE and PROT_EXEC together will be logged when
55251 + denied by the PAX_MPROTECT feature. If the sysctl option is
55252 + enabled, a sysctl option with name "rwxmap_logging" is created.
55253 +
55254 +config GRKERNSEC_AUDIT_TEXTREL
55255 + bool 'ELF text relocations logging (READ HELP)'
55256 + depends on PAX_MPROTECT
55257 + help
55258 + If you say Y here, text relocations will be logged with the filename
55259 + of the offending library or binary. The purpose of the feature is
55260 + to help Linux distribution developers get rid of libraries and
55261 + binaries that need text relocations which hinder the future progress
55262 + of PaX. Only Linux distribution developers should say Y here, and
55263 + never on a production machine, as this option creates an information
55264 + leak that could aid an attacker in defeating the randomization of
55265 + a single memory region. If the sysctl option is enabled, a sysctl
55266 + option with name "audit_textrel" is created.
55267 +
55268 +endmenu
55269 +
55270 +menu "Executable Protections"
55271 +depends on GRKERNSEC
55272 +
55273 +config GRKERNSEC_DMESG
55274 + bool "Dmesg(8) restriction"
55275 + help
55276 + If you say Y here, non-root users will not be able to use dmesg(8)
55277 + to view up to the last 4kb of messages in the kernel's log buffer.
55278 + The kernel's log buffer often contains kernel addresses and other
55279 + identifying information useful to an attacker in fingerprinting a
55280 + system for a targeted exploit.
55281 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
55282 + created.
55283 +
55284 +config GRKERNSEC_HARDEN_PTRACE
55285 + bool "Deter ptrace-based process snooping"
55286 + help
55287 + If you say Y here, TTY sniffers and other malicious monitoring
55288 + programs implemented through ptrace will be defeated. If you
55289 + have been using the RBAC system, this option has already been
55290 + enabled for several years for all users, with the ability to make
55291 + fine-grained exceptions.
55292 +
55293 + This option only affects the ability of non-root users to ptrace
55294 + processes that are not a descendent of the ptracing process.
55295 + This means that strace ./binary and gdb ./binary will still work,
55296 + but attaching to arbitrary processes will not. If the sysctl
55297 + option is enabled, a sysctl option with name "harden_ptrace" is
55298 + created.
55299 +
55300 +config GRKERNSEC_TPE
55301 + bool "Trusted Path Execution (TPE)"
55302 + help
55303 + If you say Y here, you will be able to choose a gid to add to the
55304 + supplementary groups of users you want to mark as "untrusted."
55305 + These users will not be able to execute any files that are not in
55306 + root-owned directories writable only by root. If the sysctl option
55307 + is enabled, a sysctl option with name "tpe" is created.
55308 +
55309 +config GRKERNSEC_TPE_ALL
55310 + bool "Partially restrict all non-root users"
55311 + depends on GRKERNSEC_TPE
55312 + help
55313 + If you say Y here, all non-root users will be covered under
55314 + a weaker TPE restriction. This is separate from, and in addition to,
55315 + the main TPE options that you have selected elsewhere. Thus, if a
55316 + "trusted" GID is chosen, this restriction applies to even that GID.
55317 + Under this restriction, all non-root users will only be allowed to
55318 + execute files in directories they own that are not group or
55319 + world-writable, or in directories owned by root and writable only by
55320 + root. If the sysctl option is enabled, a sysctl option with name
55321 + "tpe_restrict_all" is created.
55322 +
55323 +config GRKERNSEC_TPE_INVERT
55324 + bool "Invert GID option"
55325 + depends on GRKERNSEC_TPE
55326 + help
55327 + If you say Y here, the group you specify in the TPE configuration will
55328 + decide what group TPE restrictions will be *disabled* for. This
55329 + option is useful if you want TPE restrictions to be applied to most
55330 + users on the system. If the sysctl option is enabled, a sysctl option
55331 + with name "tpe_invert" is created. Unlike other sysctl options, this
55332 + entry will default to on for backward-compatibility.
55333 +
55334 +config GRKERNSEC_TPE_GID
55335 + int "GID for untrusted users"
55336 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
55337 + default 1005
55338 + help
55339 + Setting this GID determines what group TPE restrictions will be
55340 + *enabled* for. If the sysctl option is enabled, a sysctl option
55341 + with name "tpe_gid" is created.
55342 +
55343 +config GRKERNSEC_TPE_GID
55344 + int "GID for trusted users"
55345 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
55346 + default 1005
55347 + help
55348 + Setting this GID determines what group TPE restrictions will be
55349 + *disabled* for. If the sysctl option is enabled, a sysctl option
55350 + with name "tpe_gid" is created.
55351 +
55352 +endmenu
55353 +menu "Network Protections"
55354 +depends on GRKERNSEC
55355 +
55356 +config GRKERNSEC_RANDNET
55357 + bool "Larger entropy pools"
55358 + help
55359 + If you say Y here, the entropy pools used for many features of Linux
55360 + and grsecurity will be doubled in size. Since several grsecurity
55361 + features use additional randomness, it is recommended that you say Y
55362 + here. Saying Y here has a similar effect as modifying
55363 + /proc/sys/kernel/random/poolsize.
55364 +
55365 +config GRKERNSEC_BLACKHOLE
55366 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
55367 + depends on NET
55368 + help
55369 + If you say Y here, neither TCP resets nor ICMP
55370 + destination-unreachable packets will be sent in response to packets
55371 + sent to ports for which no associated listening process exists.
55372 + This feature supports both IPV4 and IPV6 and exempts the
55373 + loopback interface from blackholing. Enabling this feature
55374 + makes a host more resilient to DoS attacks and reduces network
55375 + visibility against scanners.
55376 +
55377 + The blackhole feature as-implemented is equivalent to the FreeBSD
55378 + blackhole feature, as it prevents RST responses to all packets, not
55379 + just SYNs. Under most application behavior this causes no
55380 + problems, but applications (like haproxy) may not close certain
55381 + connections in a way that cleanly terminates them on the remote
55382 + end, leaving the remote host in LAST_ACK state. Because of this
55383 + side-effect and to prevent intentional LAST_ACK DoSes, this
55384 + feature also adds automatic mitigation against such attacks.
55385 + The mitigation drastically reduces the amount of time a socket
55386 + can spend in LAST_ACK state. If you're using haproxy and not
55387 + all servers it connects to have this option enabled, consider
55388 + disabling this feature on the haproxy host.
55389 +
55390 + If the sysctl option is enabled, two sysctl options with names
55391 + "ip_blackhole" and "lastack_retries" will be created.
55392 + While "ip_blackhole" takes the standard zero/non-zero on/off
55393 + toggle, "lastack_retries" uses the same kinds of values as
55394 + "tcp_retries1" and "tcp_retries2". The default value of 4
55395 + prevents a socket from lasting more than 45 seconds in LAST_ACK
55396 + state.
55397 +
55398 +config GRKERNSEC_SOCKET
55399 + bool "Socket restrictions"
55400 + depends on NET
55401 + help
55402 + If you say Y here, you will be able to choose from several options.
55403 + If you assign a GID on your system and add it to the supplementary
55404 + groups of users you want to restrict socket access to, this patch
55405 + will perform up to three things, based on the option(s) you choose.
55406 +
55407 +config GRKERNSEC_SOCKET_ALL
55408 + bool "Deny any sockets to group"
55409 + depends on GRKERNSEC_SOCKET
55410 + help
55411 + If you say Y here, you will be able to choose a GID of whose users will
55412 + be unable to connect to other hosts from your machine or run server
55413 + applications from your machine. If the sysctl option is enabled, a
55414 + sysctl option with name "socket_all" is created.
55415 +
55416 +config GRKERNSEC_SOCKET_ALL_GID
55417 + int "GID to deny all sockets for"
55418 + depends on GRKERNSEC_SOCKET_ALL
55419 + default 1004
55420 + help
55421 + Here you can choose the GID to disable socket access for. Remember to
55422 + add the users you want socket access disabled for to the GID
55423 + specified here. If the sysctl option is enabled, a sysctl option
55424 + with name "socket_all_gid" is created.
55425 +
55426 +config GRKERNSEC_SOCKET_CLIENT
55427 + bool "Deny client sockets to group"
55428 + depends on GRKERNSEC_SOCKET
55429 + help
55430 + If you say Y here, you will be able to choose a GID of whose users will
55431 + be unable to connect to other hosts from your machine, but will be
55432 + able to run servers. If this option is enabled, all users in the group
55433 + you specify will have to use passive mode when initiating ftp transfers
55434 + from the shell on your machine. If the sysctl option is enabled, a
55435 + sysctl option with name "socket_client" is created.
55436 +
55437 +config GRKERNSEC_SOCKET_CLIENT_GID
55438 + int "GID to deny client sockets for"
55439 + depends on GRKERNSEC_SOCKET_CLIENT
55440 + default 1003
55441 + help
55442 + Here you can choose the GID to disable client socket access for.
55443 + Remember to add the users you want client socket access disabled for to
55444 + the GID specified here. If the sysctl option is enabled, a sysctl
55445 + option with name "socket_client_gid" is created.
55446 +
55447 +config GRKERNSEC_SOCKET_SERVER
55448 + bool "Deny server sockets to group"
55449 + depends on GRKERNSEC_SOCKET
55450 + help
55451 + If you say Y here, you will be able to choose a GID of whose users will
55452 + be unable to run server applications from your machine. If the sysctl
55453 + option is enabled, a sysctl option with name "socket_server" is created.
55454 +
55455 +config GRKERNSEC_SOCKET_SERVER_GID
55456 + int "GID to deny server sockets for"
55457 + depends on GRKERNSEC_SOCKET_SERVER
55458 + default 1002
55459 + help
55460 + Here you can choose the GID to disable server socket access for.
55461 + Remember to add the users you want server socket access disabled for to
55462 + the GID specified here. If the sysctl option is enabled, a sysctl
55463 + option with name "socket_server_gid" is created.
55464 +
55465 +endmenu
55466 +menu "Sysctl support"
55467 +depends on GRKERNSEC && SYSCTL
55468 +
55469 +config GRKERNSEC_SYSCTL
55470 + bool "Sysctl support"
55471 + help
55472 + If you say Y here, you will be able to change the options that
55473 + grsecurity runs with at bootup, without having to recompile your
55474 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
55475 + to enable (1) or disable (0) various features. All the sysctl entries
55476 + are mutable until the "grsec_lock" entry is set to a non-zero value.
55477 + All features enabled in the kernel configuration are disabled at boot
55478 + if you do not say Y to the "Turn on features by default" option.
55479 + All options should be set at startup, and the grsec_lock entry should
55480 + be set to a non-zero value after all the options are set.
55481 + *THIS IS EXTREMELY IMPORTANT*
55482 +
55483 +config GRKERNSEC_SYSCTL_DISTRO
55484 + bool "Extra sysctl support for distro makers (READ HELP)"
55485 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
55486 + help
55487 + If you say Y here, additional sysctl options will be created
55488 + for features that affect processes running as root. Therefore,
55489 + it is critical when using this option that the grsec_lock entry be
55490 + enabled after boot. Only distros with prebuilt kernel packages
55491 + with this option enabled that can ensure grsec_lock is enabled
55492 + after boot should use this option.
55493 + *Failure to set grsec_lock after boot makes all grsec features
55494 + this option covers useless*
55495 +
55496 + Currently this option creates the following sysctl entries:
55497 + "Disable Privileged I/O": "disable_priv_io"
55498 +
55499 +config GRKERNSEC_SYSCTL_ON
55500 + bool "Turn on features by default"
55501 + depends on GRKERNSEC_SYSCTL
55502 + help
55503 + If you say Y here, instead of having all features enabled in the
55504 + kernel configuration disabled at boot time, the features will be
55505 + enabled at boot time. It is recommended you say Y here unless
55506 + there is some reason you would want all sysctl-tunable features to
55507 + be disabled by default. As mentioned elsewhere, it is important
55508 + to enable the grsec_lock entry once you have finished modifying
55509 + the sysctl entries.
55510 +
55511 +endmenu
55512 +menu "Logging Options"
55513 +depends on GRKERNSEC
55514 +
55515 +config GRKERNSEC_FLOODTIME
55516 + int "Seconds in between log messages (minimum)"
55517 + default 10
55518 + help
55519 + This option allows you to enforce the number of seconds between
55520 + grsecurity log messages. The default should be suitable for most
55521 + people, however, if you choose to change it, choose a value small enough
55522 + to allow informative logs to be produced, but large enough to
55523 + prevent flooding.
55524 +
55525 +config GRKERNSEC_FLOODBURST
55526 + int "Number of messages in a burst (maximum)"
55527 + default 6
55528 + help
55529 + This option allows you to choose the maximum number of messages allowed
55530 + within the flood time interval you chose in a separate option. The
55531 + default should be suitable for most people, however if you find that
55532 + many of your logs are being interpreted as flooding, you may want to
55533 + raise this value.
55534 +
55535 +endmenu
55536 +
55537 +endmenu
55538 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
55539 new file mode 100644
55540 index 0000000..be9ae3a
55541 --- /dev/null
55542 +++ b/grsecurity/Makefile
55543 @@ -0,0 +1,36 @@
55544 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
55545 +# during 2001-2009 it has been completely redesigned by Brad Spengler
55546 +# into an RBAC system
55547 +#
55548 +# All code in this directory and various hooks inserted throughout the kernel
55549 +# are copyright Brad Spengler - Open Source Security, Inc., and released
55550 +# under the GPL v2 or higher
55551 +
55552 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
55553 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
55554 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
55555 +
55556 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
55557 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
55558 + gracl_learn.o grsec_log.o
55559 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
55560 +
55561 +ifdef CONFIG_NET
55562 +obj-y += grsec_sock.o
55563 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
55564 +endif
55565 +
55566 +ifndef CONFIG_GRKERNSEC
55567 +obj-y += grsec_disabled.o
55568 +endif
55569 +
55570 +ifdef CONFIG_GRKERNSEC_HIDESYM
55571 +extra-y := grsec_hidesym.o
55572 +$(obj)/grsec_hidesym.o:
55573 + @-chmod -f 500 /boot
55574 + @-chmod -f 500 /lib/modules
55575 + @-chmod -f 500 /lib64/modules
55576 + @-chmod -f 500 /lib32/modules
55577 + @-chmod -f 700 .
55578 + @echo ' grsec: protected kernel image paths'
55579 +endif
55580 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
55581 new file mode 100644
55582 index 0000000..6bd68d6
55583 --- /dev/null
55584 +++ b/grsecurity/gracl.c
55585 @@ -0,0 +1,4141 @@
55586 +#include <linux/kernel.h>
55587 +#include <linux/module.h>
55588 +#include <linux/sched.h>
55589 +#include <linux/mm.h>
55590 +#include <linux/file.h>
55591 +#include <linux/fs.h>
55592 +#include <linux/namei.h>
55593 +#include <linux/mount.h>
55594 +#include <linux/tty.h>
55595 +#include <linux/proc_fs.h>
55596 +#include <linux/smp_lock.h>
55597 +#include <linux/slab.h>
55598 +#include <linux/vmalloc.h>
55599 +#include <linux/types.h>
55600 +#include <linux/sysctl.h>
55601 +#include <linux/netdevice.h>
55602 +#include <linux/ptrace.h>
55603 +#include <linux/gracl.h>
55604 +#include <linux/gralloc.h>
55605 +#include <linux/grsecurity.h>
55606 +#include <linux/grinternal.h>
55607 +#include <linux/pid_namespace.h>
55608 +#include <linux/fdtable.h>
55609 +#include <linux/percpu.h>
55610 +
55611 +#include <asm/uaccess.h>
55612 +#include <asm/errno.h>
55613 +#include <asm/mman.h>
55614 +
55615 +static struct acl_role_db acl_role_set;
55616 +static struct name_db name_set;
55617 +static struct inodev_db inodev_set;
55618 +
55619 +/* for keeping track of userspace pointers used for subjects, so we
55620 + can share references in the kernel as well
55621 +*/
55622 +
55623 +static struct dentry *real_root;
55624 +static struct vfsmount *real_root_mnt;
55625 +
55626 +static struct acl_subj_map_db subj_map_set;
55627 +
55628 +static struct acl_role_label *default_role;
55629 +
55630 +static struct acl_role_label *role_list;
55631 +
55632 +static u16 acl_sp_role_value;
55633 +
55634 +extern char *gr_shared_page[4];
55635 +static DEFINE_MUTEX(gr_dev_mutex);
55636 +DEFINE_RWLOCK(gr_inode_lock);
55637 +
55638 +struct gr_arg *gr_usermode;
55639 +
55640 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
55641 +
55642 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
55643 +extern void gr_clear_learn_entries(void);
55644 +
55645 +#ifdef CONFIG_GRKERNSEC_RESLOG
55646 +extern void gr_log_resource(const struct task_struct *task,
55647 + const int res, const unsigned long wanted, const int gt);
55648 +#endif
55649 +
55650 +unsigned char *gr_system_salt;
55651 +unsigned char *gr_system_sum;
55652 +
55653 +static struct sprole_pw **acl_special_roles = NULL;
55654 +static __u16 num_sprole_pws = 0;
55655 +
55656 +static struct acl_role_label *kernel_role = NULL;
55657 +
55658 +static unsigned int gr_auth_attempts = 0;
55659 +static unsigned long gr_auth_expires = 0UL;
55660 +
55661 +#ifdef CONFIG_NET
55662 +extern struct vfsmount *sock_mnt;
55663 +#endif
55664 +extern struct vfsmount *pipe_mnt;
55665 +extern struct vfsmount *shm_mnt;
55666 +#ifdef CONFIG_HUGETLBFS
55667 +extern struct vfsmount *hugetlbfs_vfsmount;
55668 +#endif
55669 +
55670 +static struct acl_object_label *fakefs_obj_rw;
55671 +static struct acl_object_label *fakefs_obj_rwx;
55672 +
55673 +extern int gr_init_uidset(void);
55674 +extern void gr_free_uidset(void);
55675 +extern void gr_remove_uid(uid_t uid);
55676 +extern int gr_find_uid(uid_t uid);
55677 +
55678 +__inline__ int
55679 +gr_acl_is_enabled(void)
55680 +{
55681 + return (gr_status & GR_READY);
55682 +}
55683 +
55684 +#ifdef CONFIG_BTRFS_FS
55685 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55686 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55687 +#endif
55688 +
55689 +static inline dev_t __get_dev(const struct dentry *dentry)
55690 +{
55691 +#ifdef CONFIG_BTRFS_FS
55692 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55693 + return get_btrfs_dev_from_inode(dentry->d_inode);
55694 + else
55695 +#endif
55696 + return dentry->d_inode->i_sb->s_dev;
55697 +}
55698 +
55699 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
55700 +{
55701 + return __get_dev(dentry);
55702 +}
55703 +
55704 +static char gr_task_roletype_to_char(struct task_struct *task)
55705 +{
55706 + switch (task->role->roletype &
55707 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
55708 + GR_ROLE_SPECIAL)) {
55709 + case GR_ROLE_DEFAULT:
55710 + return 'D';
55711 + case GR_ROLE_USER:
55712 + return 'U';
55713 + case GR_ROLE_GROUP:
55714 + return 'G';
55715 + case GR_ROLE_SPECIAL:
55716 + return 'S';
55717 + }
55718 +
55719 + return 'X';
55720 +}
55721 +
55722 +char gr_roletype_to_char(void)
55723 +{
55724 + return gr_task_roletype_to_char(current);
55725 +}
55726 +
55727 +__inline__ int
55728 +gr_acl_tpe_check(void)
55729 +{
55730 + if (unlikely(!(gr_status & GR_READY)))
55731 + return 0;
55732 + if (current->role->roletype & GR_ROLE_TPE)
55733 + return 1;
55734 + else
55735 + return 0;
55736 +}
55737 +
55738 +int
55739 +gr_handle_rawio(const struct inode *inode)
55740 +{
55741 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55742 + if (inode && S_ISBLK(inode->i_mode) &&
55743 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55744 + !capable(CAP_SYS_RAWIO))
55745 + return 1;
55746 +#endif
55747 + return 0;
55748 +}
55749 +
55750 +static int
55751 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
55752 +{
55753 + if (likely(lena != lenb))
55754 + return 0;
55755 +
55756 + return !memcmp(a, b, lena);
55757 +}
55758 +
55759 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
55760 +{
55761 + *buflen -= namelen;
55762 + if (*buflen < 0)
55763 + return -ENAMETOOLONG;
55764 + *buffer -= namelen;
55765 + memcpy(*buffer, str, namelen);
55766 + return 0;
55767 +}
55768 +
55769 +/* this must be called with vfsmount_lock and dcache_lock held */
55770 +
55771 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
55772 + struct dentry *root, struct vfsmount *rootmnt,
55773 + char *buffer, int buflen)
55774 +{
55775 + char * end = buffer+buflen;
55776 + char * retval;
55777 + int namelen;
55778 +
55779 + *--end = '\0';
55780 + buflen--;
55781 +
55782 + if (buflen < 1)
55783 + goto Elong;
55784 + /* Get '/' right */
55785 + retval = end-1;
55786 + *retval = '/';
55787 +
55788 + for (;;) {
55789 + struct dentry * parent;
55790 +
55791 + if (dentry == root && vfsmnt == rootmnt)
55792 + break;
55793 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
55794 + /* Global root? */
55795 + if (vfsmnt->mnt_parent == vfsmnt)
55796 + goto global_root;
55797 + dentry = vfsmnt->mnt_mountpoint;
55798 + vfsmnt = vfsmnt->mnt_parent;
55799 + continue;
55800 + }
55801 + parent = dentry->d_parent;
55802 + prefetch(parent);
55803 + namelen = dentry->d_name.len;
55804 + buflen -= namelen + 1;
55805 + if (buflen < 0)
55806 + goto Elong;
55807 + end -= namelen;
55808 + memcpy(end, dentry->d_name.name, namelen);
55809 + *--end = '/';
55810 + retval = end;
55811 + dentry = parent;
55812 + }
55813 +
55814 +out:
55815 + return retval;
55816 +
55817 +global_root:
55818 + namelen = dentry->d_name.len;
55819 + buflen -= namelen;
55820 + if (buflen < 0)
55821 + goto Elong;
55822 + retval -= namelen-1; /* hit the slash */
55823 + memcpy(retval, dentry->d_name.name, namelen);
55824 + goto out;
55825 +Elong:
55826 + retval = ERR_PTR(-ENAMETOOLONG);
55827 + goto out;
55828 +}
55829 +
55830 +static char *
55831 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
55832 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
55833 +{
55834 + char *retval;
55835 +
55836 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
55837 + if (unlikely(IS_ERR(retval)))
55838 + retval = strcpy(buf, "<path too long>");
55839 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
55840 + retval[1] = '\0';
55841 +
55842 + return retval;
55843 +}
55844 +
55845 +static char *
55846 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
55847 + char *buf, int buflen)
55848 +{
55849 + char *res;
55850 +
55851 + /* we can use real_root, real_root_mnt, because this is only called
55852 + by the RBAC system */
55853 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
55854 +
55855 + return res;
55856 +}
55857 +
55858 +static char *
55859 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
55860 + char *buf, int buflen)
55861 +{
55862 + char *res;
55863 + struct dentry *root;
55864 + struct vfsmount *rootmnt;
55865 + struct task_struct *reaper = &init_task;
55866 +
55867 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
55868 + read_lock(&reaper->fs->lock);
55869 + root = dget(reaper->fs->root.dentry);
55870 + rootmnt = mntget(reaper->fs->root.mnt);
55871 + read_unlock(&reaper->fs->lock);
55872 +
55873 + spin_lock(&dcache_lock);
55874 + spin_lock(&vfsmount_lock);
55875 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
55876 + spin_unlock(&vfsmount_lock);
55877 + spin_unlock(&dcache_lock);
55878 +
55879 + dput(root);
55880 + mntput(rootmnt);
55881 + return res;
55882 +}
55883 +
55884 +static char *
55885 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
55886 +{
55887 + char *ret;
55888 + spin_lock(&dcache_lock);
55889 + spin_lock(&vfsmount_lock);
55890 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
55891 + PAGE_SIZE);
55892 + spin_unlock(&vfsmount_lock);
55893 + spin_unlock(&dcache_lock);
55894 + return ret;
55895 +}
55896 +
55897 +static char *
55898 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
55899 +{
55900 + char *ret;
55901 + char *buf;
55902 + int buflen;
55903 +
55904 + spin_lock(&dcache_lock);
55905 + spin_lock(&vfsmount_lock);
55906 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
55907 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
55908 + buflen = (int)(ret - buf);
55909 + if (buflen >= 5)
55910 + prepend(&ret, &buflen, "/proc", 5);
55911 + else
55912 + ret = strcpy(buf, "<path too long>");
55913 + spin_unlock(&vfsmount_lock);
55914 + spin_unlock(&dcache_lock);
55915 + return ret;
55916 +}
55917 +
55918 +char *
55919 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
55920 +{
55921 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
55922 + PAGE_SIZE);
55923 +}
55924 +
55925 +char *
55926 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
55927 +{
55928 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55929 + PAGE_SIZE);
55930 +}
55931 +
55932 +char *
55933 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
55934 +{
55935 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
55936 + PAGE_SIZE);
55937 +}
55938 +
55939 +char *
55940 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
55941 +{
55942 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
55943 + PAGE_SIZE);
55944 +}
55945 +
55946 +char *
55947 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
55948 +{
55949 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
55950 + PAGE_SIZE);
55951 +}
55952 +
55953 +__inline__ __u32
55954 +to_gr_audit(const __u32 reqmode)
55955 +{
55956 + /* masks off auditable permission flags, then shifts them to create
55957 + auditing flags, and adds the special case of append auditing if
55958 + we're requesting write */
55959 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
55960 +}
55961 +
55962 +struct acl_subject_label *
55963 +lookup_subject_map(const struct acl_subject_label *userp)
55964 +{
55965 + unsigned int index = shash(userp, subj_map_set.s_size);
55966 + struct subject_map *match;
55967 +
55968 + match = subj_map_set.s_hash[index];
55969 +
55970 + while (match && match->user != userp)
55971 + match = match->next;
55972 +
55973 + if (match != NULL)
55974 + return match->kernel;
55975 + else
55976 + return NULL;
55977 +}
55978 +
55979 +static void
55980 +insert_subj_map_entry(struct subject_map *subjmap)
55981 +{
55982 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
55983 + struct subject_map **curr;
55984 +
55985 + subjmap->prev = NULL;
55986 +
55987 + curr = &subj_map_set.s_hash[index];
55988 + if (*curr != NULL)
55989 + (*curr)->prev = subjmap;
55990 +
55991 + subjmap->next = *curr;
55992 + *curr = subjmap;
55993 +
55994 + return;
55995 +}
55996 +
55997 +static struct acl_role_label *
55998 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
55999 + const gid_t gid)
56000 +{
56001 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
56002 + struct acl_role_label *match;
56003 + struct role_allowed_ip *ipp;
56004 + unsigned int x;
56005 + u32 curr_ip = task->signal->curr_ip;
56006 +
56007 + task->signal->saved_ip = curr_ip;
56008 +
56009 + match = acl_role_set.r_hash[index];
56010 +
56011 + while (match) {
56012 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
56013 + for (x = 0; x < match->domain_child_num; x++) {
56014 + if (match->domain_children[x] == uid)
56015 + goto found;
56016 + }
56017 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
56018 + break;
56019 + match = match->next;
56020 + }
56021 +found:
56022 + if (match == NULL) {
56023 + try_group:
56024 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
56025 + match = acl_role_set.r_hash[index];
56026 +
56027 + while (match) {
56028 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
56029 + for (x = 0; x < match->domain_child_num; x++) {
56030 + if (match->domain_children[x] == gid)
56031 + goto found2;
56032 + }
56033 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
56034 + break;
56035 + match = match->next;
56036 + }
56037 +found2:
56038 + if (match == NULL)
56039 + match = default_role;
56040 + if (match->allowed_ips == NULL)
56041 + return match;
56042 + else {
56043 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56044 + if (likely
56045 + ((ntohl(curr_ip) & ipp->netmask) ==
56046 + (ntohl(ipp->addr) & ipp->netmask)))
56047 + return match;
56048 + }
56049 + match = default_role;
56050 + }
56051 + } else if (match->allowed_ips == NULL) {
56052 + return match;
56053 + } else {
56054 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56055 + if (likely
56056 + ((ntohl(curr_ip) & ipp->netmask) ==
56057 + (ntohl(ipp->addr) & ipp->netmask)))
56058 + return match;
56059 + }
56060 + goto try_group;
56061 + }
56062 +
56063 + return match;
56064 +}
56065 +
56066 +struct acl_subject_label *
56067 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
56068 + const struct acl_role_label *role)
56069 +{
56070 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
56071 + struct acl_subject_label *match;
56072 +
56073 + match = role->subj_hash[index];
56074 +
56075 + while (match && (match->inode != ino || match->device != dev ||
56076 + (match->mode & GR_DELETED))) {
56077 + match = match->next;
56078 + }
56079 +
56080 + if (match && !(match->mode & GR_DELETED))
56081 + return match;
56082 + else
56083 + return NULL;
56084 +}
56085 +
56086 +struct acl_subject_label *
56087 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
56088 + const struct acl_role_label *role)
56089 +{
56090 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
56091 + struct acl_subject_label *match;
56092 +
56093 + match = role->subj_hash[index];
56094 +
56095 + while (match && (match->inode != ino || match->device != dev ||
56096 + !(match->mode & GR_DELETED))) {
56097 + match = match->next;
56098 + }
56099 +
56100 + if (match && (match->mode & GR_DELETED))
56101 + return match;
56102 + else
56103 + return NULL;
56104 +}
56105 +
56106 +static struct acl_object_label *
56107 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
56108 + const struct acl_subject_label *subj)
56109 +{
56110 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56111 + struct acl_object_label *match;
56112 +
56113 + match = subj->obj_hash[index];
56114 +
56115 + while (match && (match->inode != ino || match->device != dev ||
56116 + (match->mode & GR_DELETED))) {
56117 + match = match->next;
56118 + }
56119 +
56120 + if (match && !(match->mode & GR_DELETED))
56121 + return match;
56122 + else
56123 + return NULL;
56124 +}
56125 +
56126 +static struct acl_object_label *
56127 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
56128 + const struct acl_subject_label *subj)
56129 +{
56130 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56131 + struct acl_object_label *match;
56132 +
56133 + match = subj->obj_hash[index];
56134 +
56135 + while (match && (match->inode != ino || match->device != dev ||
56136 + !(match->mode & GR_DELETED))) {
56137 + match = match->next;
56138 + }
56139 +
56140 + if (match && (match->mode & GR_DELETED))
56141 + return match;
56142 +
56143 + match = subj->obj_hash[index];
56144 +
56145 + while (match && (match->inode != ino || match->device != dev ||
56146 + (match->mode & GR_DELETED))) {
56147 + match = match->next;
56148 + }
56149 +
56150 + if (match && !(match->mode & GR_DELETED))
56151 + return match;
56152 + else
56153 + return NULL;
56154 +}
56155 +
56156 +static struct name_entry *
56157 +lookup_name_entry(const char *name)
56158 +{
56159 + unsigned int len = strlen(name);
56160 + unsigned int key = full_name_hash(name, len);
56161 + unsigned int index = key % name_set.n_size;
56162 + struct name_entry *match;
56163 +
56164 + match = name_set.n_hash[index];
56165 +
56166 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
56167 + match = match->next;
56168 +
56169 + return match;
56170 +}
56171 +
56172 +static struct name_entry *
56173 +lookup_name_entry_create(const char *name)
56174 +{
56175 + unsigned int len = strlen(name);
56176 + unsigned int key = full_name_hash(name, len);
56177 + unsigned int index = key % name_set.n_size;
56178 + struct name_entry *match;
56179 +
56180 + match = name_set.n_hash[index];
56181 +
56182 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56183 + !match->deleted))
56184 + match = match->next;
56185 +
56186 + if (match && match->deleted)
56187 + return match;
56188 +
56189 + match = name_set.n_hash[index];
56190 +
56191 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56192 + match->deleted))
56193 + match = match->next;
56194 +
56195 + if (match && !match->deleted)
56196 + return match;
56197 + else
56198 + return NULL;
56199 +}
56200 +
56201 +static struct inodev_entry *
56202 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
56203 +{
56204 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
56205 + struct inodev_entry *match;
56206 +
56207 + match = inodev_set.i_hash[index];
56208 +
56209 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
56210 + match = match->next;
56211 +
56212 + return match;
56213 +}
56214 +
56215 +static void
56216 +insert_inodev_entry(struct inodev_entry *entry)
56217 +{
56218 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
56219 + inodev_set.i_size);
56220 + struct inodev_entry **curr;
56221 +
56222 + entry->prev = NULL;
56223 +
56224 + curr = &inodev_set.i_hash[index];
56225 + if (*curr != NULL)
56226 + (*curr)->prev = entry;
56227 +
56228 + entry->next = *curr;
56229 + *curr = entry;
56230 +
56231 + return;
56232 +}
56233 +
56234 +static void
56235 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
56236 +{
56237 + unsigned int index =
56238 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
56239 + struct acl_role_label **curr;
56240 + struct acl_role_label *tmp;
56241 +
56242 + curr = &acl_role_set.r_hash[index];
56243 +
56244 + /* if role was already inserted due to domains and already has
56245 + a role in the same bucket as it attached, then we need to
56246 + combine these two buckets
56247 + */
56248 + if (role->next) {
56249 + tmp = role->next;
56250 + while (tmp->next)
56251 + tmp = tmp->next;
56252 + tmp->next = *curr;
56253 + } else
56254 + role->next = *curr;
56255 + *curr = role;
56256 +
56257 + return;
56258 +}
56259 +
56260 +static void
56261 +insert_acl_role_label(struct acl_role_label *role)
56262 +{
56263 + int i;
56264 +
56265 + if (role_list == NULL) {
56266 + role_list = role;
56267 + role->prev = NULL;
56268 + } else {
56269 + role->prev = role_list;
56270 + role_list = role;
56271 + }
56272 +
56273 + /* used for hash chains */
56274 + role->next = NULL;
56275 +
56276 + if (role->roletype & GR_ROLE_DOMAIN) {
56277 + for (i = 0; i < role->domain_child_num; i++)
56278 + __insert_acl_role_label(role, role->domain_children[i]);
56279 + } else
56280 + __insert_acl_role_label(role, role->uidgid);
56281 +}
56282 +
56283 +static int
56284 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
56285 +{
56286 + struct name_entry **curr, *nentry;
56287 + struct inodev_entry *ientry;
56288 + unsigned int len = strlen(name);
56289 + unsigned int key = full_name_hash(name, len);
56290 + unsigned int index = key % name_set.n_size;
56291 +
56292 + curr = &name_set.n_hash[index];
56293 +
56294 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
56295 + curr = &((*curr)->next);
56296 +
56297 + if (*curr != NULL)
56298 + return 1;
56299 +
56300 + nentry = acl_alloc(sizeof (struct name_entry));
56301 + if (nentry == NULL)
56302 + return 0;
56303 + ientry = acl_alloc(sizeof (struct inodev_entry));
56304 + if (ientry == NULL)
56305 + return 0;
56306 + ientry->nentry = nentry;
56307 +
56308 + nentry->key = key;
56309 + nentry->name = name;
56310 + nentry->inode = inode;
56311 + nentry->device = device;
56312 + nentry->len = len;
56313 + nentry->deleted = deleted;
56314 +
56315 + nentry->prev = NULL;
56316 + curr = &name_set.n_hash[index];
56317 + if (*curr != NULL)
56318 + (*curr)->prev = nentry;
56319 + nentry->next = *curr;
56320 + *curr = nentry;
56321 +
56322 + /* insert us into the table searchable by inode/dev */
56323 + insert_inodev_entry(ientry);
56324 +
56325 + return 1;
56326 +}
56327 +
56328 +static void
56329 +insert_acl_obj_label(struct acl_object_label *obj,
56330 + struct acl_subject_label *subj)
56331 +{
56332 + unsigned int index =
56333 + fhash(obj->inode, obj->device, subj->obj_hash_size);
56334 + struct acl_object_label **curr;
56335 +
56336 +
56337 + obj->prev = NULL;
56338 +
56339 + curr = &subj->obj_hash[index];
56340 + if (*curr != NULL)
56341 + (*curr)->prev = obj;
56342 +
56343 + obj->next = *curr;
56344 + *curr = obj;
56345 +
56346 + return;
56347 +}
56348 +
56349 +static void
56350 +insert_acl_subj_label(struct acl_subject_label *obj,
56351 + struct acl_role_label *role)
56352 +{
56353 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
56354 + struct acl_subject_label **curr;
56355 +
56356 + obj->prev = NULL;
56357 +
56358 + curr = &role->subj_hash[index];
56359 + if (*curr != NULL)
56360 + (*curr)->prev = obj;
56361 +
56362 + obj->next = *curr;
56363 + *curr = obj;
56364 +
56365 + return;
56366 +}
56367 +
56368 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
56369 +
56370 +static void *
56371 +create_table(__u32 * len, int elementsize)
56372 +{
56373 + unsigned int table_sizes[] = {
56374 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
56375 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
56376 + 4194301, 8388593, 16777213, 33554393, 67108859
56377 + };
56378 + void *newtable = NULL;
56379 + unsigned int pwr = 0;
56380 +
56381 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
56382 + table_sizes[pwr] <= *len)
56383 + pwr++;
56384 +
56385 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
56386 + return newtable;
56387 +
56388 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
56389 + newtable =
56390 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
56391 + else
56392 + newtable = vmalloc(table_sizes[pwr] * elementsize);
56393 +
56394 + *len = table_sizes[pwr];
56395 +
56396 + return newtable;
56397 +}
56398 +
56399 +static int
56400 +init_variables(const struct gr_arg *arg)
56401 +{
56402 + struct task_struct *reaper = &init_task;
56403 + unsigned int stacksize;
56404 +
56405 + subj_map_set.s_size = arg->role_db.num_subjects;
56406 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
56407 + name_set.n_size = arg->role_db.num_objects;
56408 + inodev_set.i_size = arg->role_db.num_objects;
56409 +
56410 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
56411 + !name_set.n_size || !inodev_set.i_size)
56412 + return 1;
56413 +
56414 + if (!gr_init_uidset())
56415 + return 1;
56416 +
56417 + /* set up the stack that holds allocation info */
56418 +
56419 + stacksize = arg->role_db.num_pointers + 5;
56420 +
56421 + if (!acl_alloc_stack_init(stacksize))
56422 + return 1;
56423 +
56424 + /* grab reference for the real root dentry and vfsmount */
56425 + read_lock(&reaper->fs->lock);
56426 + real_root = dget(reaper->fs->root.dentry);
56427 + real_root_mnt = mntget(reaper->fs->root.mnt);
56428 + read_unlock(&reaper->fs->lock);
56429 +
56430 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56431 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
56432 +#endif
56433 +
56434 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
56435 + if (fakefs_obj_rw == NULL)
56436 + return 1;
56437 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
56438 +
56439 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
56440 + if (fakefs_obj_rwx == NULL)
56441 + return 1;
56442 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
56443 +
56444 + subj_map_set.s_hash =
56445 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
56446 + acl_role_set.r_hash =
56447 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
56448 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
56449 + inodev_set.i_hash =
56450 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
56451 +
56452 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
56453 + !name_set.n_hash || !inodev_set.i_hash)
56454 + return 1;
56455 +
56456 + memset(subj_map_set.s_hash, 0,
56457 + sizeof(struct subject_map *) * subj_map_set.s_size);
56458 + memset(acl_role_set.r_hash, 0,
56459 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
56460 + memset(name_set.n_hash, 0,
56461 + sizeof (struct name_entry *) * name_set.n_size);
56462 + memset(inodev_set.i_hash, 0,
56463 + sizeof (struct inodev_entry *) * inodev_set.i_size);
56464 +
56465 + return 0;
56466 +}
56467 +
56468 +/* free information not needed after startup
56469 + currently contains user->kernel pointer mappings for subjects
56470 +*/
56471 +
56472 +static void
56473 +free_init_variables(void)
56474 +{
56475 + __u32 i;
56476 +
56477 + if (subj_map_set.s_hash) {
56478 + for (i = 0; i < subj_map_set.s_size; i++) {
56479 + if (subj_map_set.s_hash[i]) {
56480 + kfree(subj_map_set.s_hash[i]);
56481 + subj_map_set.s_hash[i] = NULL;
56482 + }
56483 + }
56484 +
56485 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
56486 + PAGE_SIZE)
56487 + kfree(subj_map_set.s_hash);
56488 + else
56489 + vfree(subj_map_set.s_hash);
56490 + }
56491 +
56492 + return;
56493 +}
56494 +
56495 +static void
56496 +free_variables(void)
56497 +{
56498 + struct acl_subject_label *s;
56499 + struct acl_role_label *r;
56500 + struct task_struct *task, *task2;
56501 + unsigned int x;
56502 +
56503 + gr_clear_learn_entries();
56504 +
56505 + read_lock(&tasklist_lock);
56506 + do_each_thread(task2, task) {
56507 + task->acl_sp_role = 0;
56508 + task->acl_role_id = 0;
56509 + task->acl = NULL;
56510 + task->role = NULL;
56511 + } while_each_thread(task2, task);
56512 + read_unlock(&tasklist_lock);
56513 +
56514 + /* release the reference to the real root dentry and vfsmount */
56515 + if (real_root)
56516 + dput(real_root);
56517 + real_root = NULL;
56518 + if (real_root_mnt)
56519 + mntput(real_root_mnt);
56520 + real_root_mnt = NULL;
56521 +
56522 + /* free all object hash tables */
56523 +
56524 + FOR_EACH_ROLE_START(r)
56525 + if (r->subj_hash == NULL)
56526 + goto next_role;
56527 + FOR_EACH_SUBJECT_START(r, s, x)
56528 + if (s->obj_hash == NULL)
56529 + break;
56530 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56531 + kfree(s->obj_hash);
56532 + else
56533 + vfree(s->obj_hash);
56534 + FOR_EACH_SUBJECT_END(s, x)
56535 + FOR_EACH_NESTED_SUBJECT_START(r, s)
56536 + if (s->obj_hash == NULL)
56537 + break;
56538 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56539 + kfree(s->obj_hash);
56540 + else
56541 + vfree(s->obj_hash);
56542 + FOR_EACH_NESTED_SUBJECT_END(s)
56543 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
56544 + kfree(r->subj_hash);
56545 + else
56546 + vfree(r->subj_hash);
56547 + r->subj_hash = NULL;
56548 +next_role:
56549 + FOR_EACH_ROLE_END(r)
56550 +
56551 + acl_free_all();
56552 +
56553 + if (acl_role_set.r_hash) {
56554 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
56555 + PAGE_SIZE)
56556 + kfree(acl_role_set.r_hash);
56557 + else
56558 + vfree(acl_role_set.r_hash);
56559 + }
56560 + if (name_set.n_hash) {
56561 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
56562 + PAGE_SIZE)
56563 + kfree(name_set.n_hash);
56564 + else
56565 + vfree(name_set.n_hash);
56566 + }
56567 +
56568 + if (inodev_set.i_hash) {
56569 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
56570 + PAGE_SIZE)
56571 + kfree(inodev_set.i_hash);
56572 + else
56573 + vfree(inodev_set.i_hash);
56574 + }
56575 +
56576 + gr_free_uidset();
56577 +
56578 + memset(&name_set, 0, sizeof (struct name_db));
56579 + memset(&inodev_set, 0, sizeof (struct inodev_db));
56580 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
56581 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
56582 +
56583 + default_role = NULL;
56584 + role_list = NULL;
56585 +
56586 + return;
56587 +}
56588 +
56589 +static __u32
56590 +count_user_objs(struct acl_object_label *userp)
56591 +{
56592 + struct acl_object_label o_tmp;
56593 + __u32 num = 0;
56594 +
56595 + while (userp) {
56596 + if (copy_from_user(&o_tmp, userp,
56597 + sizeof (struct acl_object_label)))
56598 + break;
56599 +
56600 + userp = o_tmp.prev;
56601 + num++;
56602 + }
56603 +
56604 + return num;
56605 +}
56606 +
56607 +static struct acl_subject_label *
56608 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
56609 +
56610 +static int
56611 +copy_user_glob(struct acl_object_label *obj)
56612 +{
56613 + struct acl_object_label *g_tmp, **guser;
56614 + unsigned int len;
56615 + char *tmp;
56616 +
56617 + if (obj->globbed == NULL)
56618 + return 0;
56619 +
56620 + guser = &obj->globbed;
56621 + while (*guser) {
56622 + g_tmp = (struct acl_object_label *)
56623 + acl_alloc(sizeof (struct acl_object_label));
56624 + if (g_tmp == NULL)
56625 + return -ENOMEM;
56626 +
56627 + if (copy_from_user(g_tmp, *guser,
56628 + sizeof (struct acl_object_label)))
56629 + return -EFAULT;
56630 +
56631 + len = strnlen_user(g_tmp->filename, PATH_MAX);
56632 +
56633 + if (!len || len >= PATH_MAX)
56634 + return -EINVAL;
56635 +
56636 + if ((tmp = (char *) acl_alloc(len)) == NULL)
56637 + return -ENOMEM;
56638 +
56639 + if (copy_from_user(tmp, g_tmp->filename, len))
56640 + return -EFAULT;
56641 + tmp[len-1] = '\0';
56642 + g_tmp->filename = tmp;
56643 +
56644 + *guser = g_tmp;
56645 + guser = &(g_tmp->next);
56646 + }
56647 +
56648 + return 0;
56649 +}
56650 +
56651 +static int
56652 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
56653 + struct acl_role_label *role)
56654 +{
56655 + struct acl_object_label *o_tmp;
56656 + unsigned int len;
56657 + int ret;
56658 + char *tmp;
56659 +
56660 + while (userp) {
56661 + if ((o_tmp = (struct acl_object_label *)
56662 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
56663 + return -ENOMEM;
56664 +
56665 + if (copy_from_user(o_tmp, userp,
56666 + sizeof (struct acl_object_label)))
56667 + return -EFAULT;
56668 +
56669 + userp = o_tmp->prev;
56670 +
56671 + len = strnlen_user(o_tmp->filename, PATH_MAX);
56672 +
56673 + if (!len || len >= PATH_MAX)
56674 + return -EINVAL;
56675 +
56676 + if ((tmp = (char *) acl_alloc(len)) == NULL)
56677 + return -ENOMEM;
56678 +
56679 + if (copy_from_user(tmp, o_tmp->filename, len))
56680 + return -EFAULT;
56681 + tmp[len-1] = '\0';
56682 + o_tmp->filename = tmp;
56683 +
56684 + insert_acl_obj_label(o_tmp, subj);
56685 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
56686 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
56687 + return -ENOMEM;
56688 +
56689 + ret = copy_user_glob(o_tmp);
56690 + if (ret)
56691 + return ret;
56692 +
56693 + if (o_tmp->nested) {
56694 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
56695 + if (IS_ERR(o_tmp->nested))
56696 + return PTR_ERR(o_tmp->nested);
56697 +
56698 + /* insert into nested subject list */
56699 + o_tmp->nested->next = role->hash->first;
56700 + role->hash->first = o_tmp->nested;
56701 + }
56702 + }
56703 +
56704 + return 0;
56705 +}
56706 +
56707 +static __u32
56708 +count_user_subjs(struct acl_subject_label *userp)
56709 +{
56710 + struct acl_subject_label s_tmp;
56711 + __u32 num = 0;
56712 +
56713 + while (userp) {
56714 + if (copy_from_user(&s_tmp, userp,
56715 + sizeof (struct acl_subject_label)))
56716 + break;
56717 +
56718 + userp = s_tmp.prev;
56719 + /* do not count nested subjects against this count, since
56720 + they are not included in the hash table, but are
56721 + attached to objects. We have already counted
56722 + the subjects in userspace for the allocation
56723 + stack
56724 + */
56725 + if (!(s_tmp.mode & GR_NESTED))
56726 + num++;
56727 + }
56728 +
56729 + return num;
56730 +}
56731 +
56732 +static int
56733 +copy_user_allowedips(struct acl_role_label *rolep)
56734 +{
56735 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
56736 +
56737 + ruserip = rolep->allowed_ips;
56738 +
56739 + while (ruserip) {
56740 + rlast = rtmp;
56741 +
56742 + if ((rtmp = (struct role_allowed_ip *)
56743 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
56744 + return -ENOMEM;
56745 +
56746 + if (copy_from_user(rtmp, ruserip,
56747 + sizeof (struct role_allowed_ip)))
56748 + return -EFAULT;
56749 +
56750 + ruserip = rtmp->prev;
56751 +
56752 + if (!rlast) {
56753 + rtmp->prev = NULL;
56754 + rolep->allowed_ips = rtmp;
56755 + } else {
56756 + rlast->next = rtmp;
56757 + rtmp->prev = rlast;
56758 + }
56759 +
56760 + if (!ruserip)
56761 + rtmp->next = NULL;
56762 + }
56763 +
56764 + return 0;
56765 +}
56766 +
56767 +static int
56768 +copy_user_transitions(struct acl_role_label *rolep)
56769 +{
56770 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
56771 +
56772 + unsigned int len;
56773 + char *tmp;
56774 +
56775 + rusertp = rolep->transitions;
56776 +
56777 + while (rusertp) {
56778 + rlast = rtmp;
56779 +
56780 + if ((rtmp = (struct role_transition *)
56781 + acl_alloc(sizeof (struct role_transition))) == NULL)
56782 + return -ENOMEM;
56783 +
56784 + if (copy_from_user(rtmp, rusertp,
56785 + sizeof (struct role_transition)))
56786 + return -EFAULT;
56787 +
56788 + rusertp = rtmp->prev;
56789 +
56790 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
56791 +
56792 + if (!len || len >= GR_SPROLE_LEN)
56793 + return -EINVAL;
56794 +
56795 + if ((tmp = (char *) acl_alloc(len)) == NULL)
56796 + return -ENOMEM;
56797 +
56798 + if (copy_from_user(tmp, rtmp->rolename, len))
56799 + return -EFAULT;
56800 + tmp[len-1] = '\0';
56801 + rtmp->rolename = tmp;
56802 +
56803 + if (!rlast) {
56804 + rtmp->prev = NULL;
56805 + rolep->transitions = rtmp;
56806 + } else {
56807 + rlast->next = rtmp;
56808 + rtmp->prev = rlast;
56809 + }
56810 +
56811 + if (!rusertp)
56812 + rtmp->next = NULL;
56813 + }
56814 +
56815 + return 0;
56816 +}
56817 +
56818 +static struct acl_subject_label *
56819 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
56820 +{
56821 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
56822 + unsigned int len;
56823 + char *tmp;
56824 + __u32 num_objs;
56825 + struct acl_ip_label **i_tmp, *i_utmp2;
56826 + struct gr_hash_struct ghash;
56827 + struct subject_map *subjmap;
56828 + unsigned int i_num;
56829 + int err;
56830 +
56831 + s_tmp = lookup_subject_map(userp);
56832 +
56833 + /* we've already copied this subject into the kernel, just return
56834 + the reference to it, and don't copy it over again
56835 + */
56836 + if (s_tmp)
56837 + return(s_tmp);
56838 +
56839 + if ((s_tmp = (struct acl_subject_label *)
56840 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
56841 + return ERR_PTR(-ENOMEM);
56842 +
56843 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
56844 + if (subjmap == NULL)
56845 + return ERR_PTR(-ENOMEM);
56846 +
56847 + subjmap->user = userp;
56848 + subjmap->kernel = s_tmp;
56849 + insert_subj_map_entry(subjmap);
56850 +
56851 + if (copy_from_user(s_tmp, userp,
56852 + sizeof (struct acl_subject_label)))
56853 + return ERR_PTR(-EFAULT);
56854 +
56855 + len = strnlen_user(s_tmp->filename, PATH_MAX);
56856 +
56857 + if (!len || len >= PATH_MAX)
56858 + return ERR_PTR(-EINVAL);
56859 +
56860 + if ((tmp = (char *) acl_alloc(len)) == NULL)
56861 + return ERR_PTR(-ENOMEM);
56862 +
56863 + if (copy_from_user(tmp, s_tmp->filename, len))
56864 + return ERR_PTR(-EFAULT);
56865 + tmp[len-1] = '\0';
56866 + s_tmp->filename = tmp;
56867 +
56868 + if (!strcmp(s_tmp->filename, "/"))
56869 + role->root_label = s_tmp;
56870 +
56871 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
56872 + return ERR_PTR(-EFAULT);
56873 +
56874 + /* copy user and group transition tables */
56875 +
56876 + if (s_tmp->user_trans_num) {
56877 + uid_t *uidlist;
56878 +
56879 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
56880 + if (uidlist == NULL)
56881 + return ERR_PTR(-ENOMEM);
56882 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
56883 + return ERR_PTR(-EFAULT);
56884 +
56885 + s_tmp->user_transitions = uidlist;
56886 + }
56887 +
56888 + if (s_tmp->group_trans_num) {
56889 + gid_t *gidlist;
56890 +
56891 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
56892 + if (gidlist == NULL)
56893 + return ERR_PTR(-ENOMEM);
56894 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
56895 + return ERR_PTR(-EFAULT);
56896 +
56897 + s_tmp->group_transitions = gidlist;
56898 + }
56899 +
56900 + /* set up object hash table */
56901 + num_objs = count_user_objs(ghash.first);
56902 +
56903 + s_tmp->obj_hash_size = num_objs;
56904 + s_tmp->obj_hash =
56905 + (struct acl_object_label **)
56906 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
56907 +
56908 + if (!s_tmp->obj_hash)
56909 + return ERR_PTR(-ENOMEM);
56910 +
56911 + memset(s_tmp->obj_hash, 0,
56912 + s_tmp->obj_hash_size *
56913 + sizeof (struct acl_object_label *));
56914 +
56915 + /* add in objects */
56916 + err = copy_user_objs(ghash.first, s_tmp, role);
56917 +
56918 + if (err)
56919 + return ERR_PTR(err);
56920 +
56921 + /* set pointer for parent subject */
56922 + if (s_tmp->parent_subject) {
56923 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
56924 +
56925 + if (IS_ERR(s_tmp2))
56926 + return s_tmp2;
56927 +
56928 + s_tmp->parent_subject = s_tmp2;
56929 + }
56930 +
56931 + /* add in ip acls */
56932 +
56933 + if (!s_tmp->ip_num) {
56934 + s_tmp->ips = NULL;
56935 + goto insert;
56936 + }
56937 +
56938 + i_tmp =
56939 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
56940 + sizeof (struct acl_ip_label *));
56941 +
56942 + if (!i_tmp)
56943 + return ERR_PTR(-ENOMEM);
56944 +
56945 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
56946 + *(i_tmp + i_num) =
56947 + (struct acl_ip_label *)
56948 + acl_alloc(sizeof (struct acl_ip_label));
56949 + if (!*(i_tmp + i_num))
56950 + return ERR_PTR(-ENOMEM);
56951 +
56952 + if (copy_from_user
56953 + (&i_utmp2, s_tmp->ips + i_num,
56954 + sizeof (struct acl_ip_label *)))
56955 + return ERR_PTR(-EFAULT);
56956 +
56957 + if (copy_from_user
56958 + (*(i_tmp + i_num), i_utmp2,
56959 + sizeof (struct acl_ip_label)))
56960 + return ERR_PTR(-EFAULT);
56961 +
56962 + if ((*(i_tmp + i_num))->iface == NULL)
56963 + continue;
56964 +
56965 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
56966 + if (!len || len >= IFNAMSIZ)
56967 + return ERR_PTR(-EINVAL);
56968 + tmp = acl_alloc(len);
56969 + if (tmp == NULL)
56970 + return ERR_PTR(-ENOMEM);
56971 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
56972 + return ERR_PTR(-EFAULT);
56973 + (*(i_tmp + i_num))->iface = tmp;
56974 + }
56975 +
56976 + s_tmp->ips = i_tmp;
56977 +
56978 +insert:
56979 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
56980 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
56981 + return ERR_PTR(-ENOMEM);
56982 +
56983 + return s_tmp;
56984 +}
56985 +
56986 +static int
56987 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
56988 +{
56989 + struct acl_subject_label s_pre;
56990 + struct acl_subject_label * ret;
56991 + int err;
56992 +
56993 + while (userp) {
56994 + if (copy_from_user(&s_pre, userp,
56995 + sizeof (struct acl_subject_label)))
56996 + return -EFAULT;
56997 +
56998 + /* do not add nested subjects here, add
56999 + while parsing objects
57000 + */
57001 +
57002 + if (s_pre.mode & GR_NESTED) {
57003 + userp = s_pre.prev;
57004 + continue;
57005 + }
57006 +
57007 + ret = do_copy_user_subj(userp, role);
57008 +
57009 + err = PTR_ERR(ret);
57010 + if (IS_ERR(ret))
57011 + return err;
57012 +
57013 + insert_acl_subj_label(ret, role);
57014 +
57015 + userp = s_pre.prev;
57016 + }
57017 +
57018 + return 0;
57019 +}
57020 +
57021 +static int
57022 +copy_user_acl(struct gr_arg *arg)
57023 +{
57024 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
57025 + struct sprole_pw *sptmp;
57026 + struct gr_hash_struct *ghash;
57027 + uid_t *domainlist;
57028 + unsigned int r_num;
57029 + unsigned int len;
57030 + char *tmp;
57031 + int err = 0;
57032 + __u16 i;
57033 + __u32 num_subjs;
57034 +
57035 + /* we need a default and kernel role */
57036 + if (arg->role_db.num_roles < 2)
57037 + return -EINVAL;
57038 +
57039 + /* copy special role authentication info from userspace */
57040 +
57041 + num_sprole_pws = arg->num_sprole_pws;
57042 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
57043 +
57044 + if (!acl_special_roles) {
57045 + err = -ENOMEM;
57046 + goto cleanup;
57047 + }
57048 +
57049 + for (i = 0; i < num_sprole_pws; i++) {
57050 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
57051 + if (!sptmp) {
57052 + err = -ENOMEM;
57053 + goto cleanup;
57054 + }
57055 + if (copy_from_user(sptmp, arg->sprole_pws + i,
57056 + sizeof (struct sprole_pw))) {
57057 + err = -EFAULT;
57058 + goto cleanup;
57059 + }
57060 +
57061 + len =
57062 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
57063 +
57064 + if (!len || len >= GR_SPROLE_LEN) {
57065 + err = -EINVAL;
57066 + goto cleanup;
57067 + }
57068 +
57069 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
57070 + err = -ENOMEM;
57071 + goto cleanup;
57072 + }
57073 +
57074 + if (copy_from_user(tmp, sptmp->rolename, len)) {
57075 + err = -EFAULT;
57076 + goto cleanup;
57077 + }
57078 + tmp[len-1] = '\0';
57079 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57080 + printk(KERN_ALERT "Copying special role %s\n", tmp);
57081 +#endif
57082 + sptmp->rolename = tmp;
57083 + acl_special_roles[i] = sptmp;
57084 + }
57085 +
57086 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
57087 +
57088 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
57089 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
57090 +
57091 + if (!r_tmp) {
57092 + err = -ENOMEM;
57093 + goto cleanup;
57094 + }
57095 +
57096 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
57097 + sizeof (struct acl_role_label *))) {
57098 + err = -EFAULT;
57099 + goto cleanup;
57100 + }
57101 +
57102 + if (copy_from_user(r_tmp, r_utmp2,
57103 + sizeof (struct acl_role_label))) {
57104 + err = -EFAULT;
57105 + goto cleanup;
57106 + }
57107 +
57108 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
57109 +
57110 + if (!len || len >= PATH_MAX) {
57111 + err = -EINVAL;
57112 + goto cleanup;
57113 + }
57114 +
57115 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
57116 + err = -ENOMEM;
57117 + goto cleanup;
57118 + }
57119 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
57120 + err = -EFAULT;
57121 + goto cleanup;
57122 + }
57123 + tmp[len-1] = '\0';
57124 + r_tmp->rolename = tmp;
57125 +
57126 + if (!strcmp(r_tmp->rolename, "default")
57127 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
57128 + default_role = r_tmp;
57129 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
57130 + kernel_role = r_tmp;
57131 + }
57132 +
57133 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
57134 + err = -ENOMEM;
57135 + goto cleanup;
57136 + }
57137 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
57138 + err = -EFAULT;
57139 + goto cleanup;
57140 + }
57141 +
57142 + r_tmp->hash = ghash;
57143 +
57144 + num_subjs = count_user_subjs(r_tmp->hash->first);
57145 +
57146 + r_tmp->subj_hash_size = num_subjs;
57147 + r_tmp->subj_hash =
57148 + (struct acl_subject_label **)
57149 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
57150 +
57151 + if (!r_tmp->subj_hash) {
57152 + err = -ENOMEM;
57153 + goto cleanup;
57154 + }
57155 +
57156 + err = copy_user_allowedips(r_tmp);
57157 + if (err)
57158 + goto cleanup;
57159 +
57160 + /* copy domain info */
57161 + if (r_tmp->domain_children != NULL) {
57162 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
57163 + if (domainlist == NULL) {
57164 + err = -ENOMEM;
57165 + goto cleanup;
57166 + }
57167 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
57168 + err = -EFAULT;
57169 + goto cleanup;
57170 + }
57171 + r_tmp->domain_children = domainlist;
57172 + }
57173 +
57174 + err = copy_user_transitions(r_tmp);
57175 + if (err)
57176 + goto cleanup;
57177 +
57178 + memset(r_tmp->subj_hash, 0,
57179 + r_tmp->subj_hash_size *
57180 + sizeof (struct acl_subject_label *));
57181 +
57182 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
57183 +
57184 + if (err)
57185 + goto cleanup;
57186 +
57187 + /* set nested subject list to null */
57188 + r_tmp->hash->first = NULL;
57189 +
57190 + insert_acl_role_label(r_tmp);
57191 + }
57192 +
57193 + goto return_err;
57194 + cleanup:
57195 + free_variables();
57196 + return_err:
57197 + return err;
57198 +
57199 +}
57200 +
57201 +static int
57202 +gracl_init(struct gr_arg *args)
57203 +{
57204 + int error = 0;
57205 +
57206 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
57207 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
57208 +
57209 + if (init_variables(args)) {
57210 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
57211 + error = -ENOMEM;
57212 + free_variables();
57213 + goto out;
57214 + }
57215 +
57216 + error = copy_user_acl(args);
57217 + free_init_variables();
57218 + if (error) {
57219 + free_variables();
57220 + goto out;
57221 + }
57222 +
57223 + if ((error = gr_set_acls(0))) {
57224 + free_variables();
57225 + goto out;
57226 + }
57227 +
57228 + pax_open_kernel();
57229 + gr_status |= GR_READY;
57230 + pax_close_kernel();
57231 +
57232 + out:
57233 + return error;
57234 +}
57235 +
57236 +/* derived from glibc fnmatch() 0: match, 1: no match*/
57237 +
57238 +static int
57239 +glob_match(const char *p, const char *n)
57240 +{
57241 + char c;
57242 +
57243 + while ((c = *p++) != '\0') {
57244 + switch (c) {
57245 + case '?':
57246 + if (*n == '\0')
57247 + return 1;
57248 + else if (*n == '/')
57249 + return 1;
57250 + break;
57251 + case '\\':
57252 + if (*n != c)
57253 + return 1;
57254 + break;
57255 + case '*':
57256 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
57257 + if (*n == '/')
57258 + return 1;
57259 + else if (c == '?') {
57260 + if (*n == '\0')
57261 + return 1;
57262 + else
57263 + ++n;
57264 + }
57265 + }
57266 + if (c == '\0') {
57267 + return 0;
57268 + } else {
57269 + const char *endp;
57270 +
57271 + if ((endp = strchr(n, '/')) == NULL)
57272 + endp = n + strlen(n);
57273 +
57274 + if (c == '[') {
57275 + for (--p; n < endp; ++n)
57276 + if (!glob_match(p, n))
57277 + return 0;
57278 + } else if (c == '/') {
57279 + while (*n != '\0' && *n != '/')
57280 + ++n;
57281 + if (*n == '/' && !glob_match(p, n + 1))
57282 + return 0;
57283 + } else {
57284 + for (--p; n < endp; ++n)
57285 + if (*n == c && !glob_match(p, n))
57286 + return 0;
57287 + }
57288 +
57289 + return 1;
57290 + }
57291 + case '[':
57292 + {
57293 + int not;
57294 + char cold;
57295 +
57296 + if (*n == '\0' || *n == '/')
57297 + return 1;
57298 +
57299 + not = (*p == '!' || *p == '^');
57300 + if (not)
57301 + ++p;
57302 +
57303 + c = *p++;
57304 + for (;;) {
57305 + unsigned char fn = (unsigned char)*n;
57306 +
57307 + if (c == '\0')
57308 + return 1;
57309 + else {
57310 + if (c == fn)
57311 + goto matched;
57312 + cold = c;
57313 + c = *p++;
57314 +
57315 + if (c == '-' && *p != ']') {
57316 + unsigned char cend = *p++;
57317 +
57318 + if (cend == '\0')
57319 + return 1;
57320 +
57321 + if (cold <= fn && fn <= cend)
57322 + goto matched;
57323 +
57324 + c = *p++;
57325 + }
57326 + }
57327 +
57328 + if (c == ']')
57329 + break;
57330 + }
57331 + if (!not)
57332 + return 1;
57333 + break;
57334 + matched:
57335 + while (c != ']') {
57336 + if (c == '\0')
57337 + return 1;
57338 +
57339 + c = *p++;
57340 + }
57341 + if (not)
57342 + return 1;
57343 + }
57344 + break;
57345 + default:
57346 + if (c != *n)
57347 + return 1;
57348 + }
57349 +
57350 + ++n;
57351 + }
57352 +
57353 + if (*n == '\0')
57354 + return 0;
57355 +
57356 + if (*n == '/')
57357 + return 0;
57358 +
57359 + return 1;
57360 +}
57361 +
57362 +static struct acl_object_label *
57363 +chk_glob_label(struct acl_object_label *globbed,
57364 + struct dentry *dentry, struct vfsmount *mnt, char **path)
57365 +{
57366 + struct acl_object_label *tmp;
57367 +
57368 + if (*path == NULL)
57369 + *path = gr_to_filename_nolock(dentry, mnt);
57370 +
57371 + tmp = globbed;
57372 +
57373 + while (tmp) {
57374 + if (!glob_match(tmp->filename, *path))
57375 + return tmp;
57376 + tmp = tmp->next;
57377 + }
57378 +
57379 + return NULL;
57380 +}
57381 +
57382 +static struct acl_object_label *
57383 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57384 + const ino_t curr_ino, const dev_t curr_dev,
57385 + const struct acl_subject_label *subj, char **path, const int checkglob)
57386 +{
57387 + struct acl_subject_label *tmpsubj;
57388 + struct acl_object_label *retval;
57389 + struct acl_object_label *retval2;
57390 +
57391 + tmpsubj = (struct acl_subject_label *) subj;
57392 + read_lock(&gr_inode_lock);
57393 + do {
57394 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
57395 + if (retval) {
57396 + if (checkglob && retval->globbed) {
57397 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
57398 + (struct vfsmount *)orig_mnt, path);
57399 + if (retval2)
57400 + retval = retval2;
57401 + }
57402 + break;
57403 + }
57404 + } while ((tmpsubj = tmpsubj->parent_subject));
57405 + read_unlock(&gr_inode_lock);
57406 +
57407 + return retval;
57408 +}
57409 +
57410 +static __inline__ struct acl_object_label *
57411 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57412 + const struct dentry *curr_dentry,
57413 + const struct acl_subject_label *subj, char **path, const int checkglob)
57414 +{
57415 + int newglob = checkglob;
57416 +
57417 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
57418 + as we don't want a / * rule to match instead of the / object
57419 + don't do this for create lookups that call this function though, since they're looking up
57420 + on the parent and thus need globbing checks on all paths
57421 + */
57422 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
57423 + newglob = GR_NO_GLOB;
57424 +
57425 + return __full_lookup(orig_dentry, orig_mnt,
57426 + curr_dentry->d_inode->i_ino,
57427 + __get_dev(curr_dentry), subj, path, newglob);
57428 +}
57429 +
57430 +static struct acl_object_label *
57431 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57432 + const struct acl_subject_label *subj, char *path, const int checkglob)
57433 +{
57434 + struct dentry *dentry = (struct dentry *) l_dentry;
57435 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57436 + struct acl_object_label *retval;
57437 +
57438 + spin_lock(&dcache_lock);
57439 + spin_lock(&vfsmount_lock);
57440 +
57441 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
57442 +#ifdef CONFIG_NET
57443 + mnt == sock_mnt ||
57444 +#endif
57445 +#ifdef CONFIG_HUGETLBFS
57446 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
57447 +#endif
57448 + /* ignore Eric Biederman */
57449 + IS_PRIVATE(l_dentry->d_inode))) {
57450 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
57451 + goto out;
57452 + }
57453 +
57454 + for (;;) {
57455 + if (dentry == real_root && mnt == real_root_mnt)
57456 + break;
57457 +
57458 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57459 + if (mnt->mnt_parent == mnt)
57460 + break;
57461 +
57462 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57463 + if (retval != NULL)
57464 + goto out;
57465 +
57466 + dentry = mnt->mnt_mountpoint;
57467 + mnt = mnt->mnt_parent;
57468 + continue;
57469 + }
57470 +
57471 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57472 + if (retval != NULL)
57473 + goto out;
57474 +
57475 + dentry = dentry->d_parent;
57476 + }
57477 +
57478 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57479 +
57480 + if (retval == NULL)
57481 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
57482 +out:
57483 + spin_unlock(&vfsmount_lock);
57484 + spin_unlock(&dcache_lock);
57485 +
57486 + BUG_ON(retval == NULL);
57487 +
57488 + return retval;
57489 +}
57490 +
57491 +static __inline__ struct acl_object_label *
57492 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57493 + const struct acl_subject_label *subj)
57494 +{
57495 + char *path = NULL;
57496 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
57497 +}
57498 +
57499 +static __inline__ struct acl_object_label *
57500 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57501 + const struct acl_subject_label *subj)
57502 +{
57503 + char *path = NULL;
57504 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
57505 +}
57506 +
57507 +static __inline__ struct acl_object_label *
57508 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57509 + const struct acl_subject_label *subj, char *path)
57510 +{
57511 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
57512 +}
57513 +
57514 +static struct acl_subject_label *
57515 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57516 + const struct acl_role_label *role)
57517 +{
57518 + struct dentry *dentry = (struct dentry *) l_dentry;
57519 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57520 + struct acl_subject_label *retval;
57521 +
57522 + spin_lock(&dcache_lock);
57523 + spin_lock(&vfsmount_lock);
57524 +
57525 + for (;;) {
57526 + if (dentry == real_root && mnt == real_root_mnt)
57527 + break;
57528 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57529 + if (mnt->mnt_parent == mnt)
57530 + break;
57531 +
57532 + read_lock(&gr_inode_lock);
57533 + retval =
57534 + lookup_acl_subj_label(dentry->d_inode->i_ino,
57535 + __get_dev(dentry), role);
57536 + read_unlock(&gr_inode_lock);
57537 + if (retval != NULL)
57538 + goto out;
57539 +
57540 + dentry = mnt->mnt_mountpoint;
57541 + mnt = mnt->mnt_parent;
57542 + continue;
57543 + }
57544 +
57545 + read_lock(&gr_inode_lock);
57546 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57547 + __get_dev(dentry), role);
57548 + read_unlock(&gr_inode_lock);
57549 + if (retval != NULL)
57550 + goto out;
57551 +
57552 + dentry = dentry->d_parent;
57553 + }
57554 +
57555 + read_lock(&gr_inode_lock);
57556 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57557 + __get_dev(dentry), role);
57558 + read_unlock(&gr_inode_lock);
57559 +
57560 + if (unlikely(retval == NULL)) {
57561 + read_lock(&gr_inode_lock);
57562 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
57563 + __get_dev(real_root), role);
57564 + read_unlock(&gr_inode_lock);
57565 + }
57566 +out:
57567 + spin_unlock(&vfsmount_lock);
57568 + spin_unlock(&dcache_lock);
57569 +
57570 + BUG_ON(retval == NULL);
57571 +
57572 + return retval;
57573 +}
57574 +
57575 +static void
57576 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
57577 +{
57578 + struct task_struct *task = current;
57579 + const struct cred *cred = current_cred();
57580 +
57581 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57582 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57583 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57584 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
57585 +
57586 + return;
57587 +}
57588 +
57589 +static void
57590 +gr_log_learn_sysctl(const char *path, const __u32 mode)
57591 +{
57592 + struct task_struct *task = current;
57593 + const struct cred *cred = current_cred();
57594 +
57595 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57596 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57597 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57598 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
57599 +
57600 + return;
57601 +}
57602 +
57603 +static void
57604 +gr_log_learn_id_change(const char type, const unsigned int real,
57605 + const unsigned int effective, const unsigned int fs)
57606 +{
57607 + struct task_struct *task = current;
57608 + const struct cred *cred = current_cred();
57609 +
57610 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
57611 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57612 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57613 + type, real, effective, fs, &task->signal->saved_ip);
57614 +
57615 + return;
57616 +}
57617 +
57618 +__u32
57619 +gr_search_file(const struct dentry * dentry, const __u32 mode,
57620 + const struct vfsmount * mnt)
57621 +{
57622 + __u32 retval = mode;
57623 + struct acl_subject_label *curracl;
57624 + struct acl_object_label *currobj;
57625 +
57626 + if (unlikely(!(gr_status & GR_READY)))
57627 + return (mode & ~GR_AUDITS);
57628 +
57629 + curracl = current->acl;
57630 +
57631 + currobj = chk_obj_label(dentry, mnt, curracl);
57632 + retval = currobj->mode & mode;
57633 +
57634 + /* if we're opening a specified transfer file for writing
57635 + (e.g. /dev/initctl), then transfer our role to init
57636 + */
57637 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
57638 + current->role->roletype & GR_ROLE_PERSIST)) {
57639 + struct task_struct *task = init_pid_ns.child_reaper;
57640 +
57641 + if (task->role != current->role) {
57642 + task->acl_sp_role = 0;
57643 + task->acl_role_id = current->acl_role_id;
57644 + task->role = current->role;
57645 + rcu_read_lock();
57646 + read_lock(&grsec_exec_file_lock);
57647 + gr_apply_subject_to_task(task);
57648 + read_unlock(&grsec_exec_file_lock);
57649 + rcu_read_unlock();
57650 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
57651 + }
57652 + }
57653 +
57654 + if (unlikely
57655 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
57656 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
57657 + __u32 new_mode = mode;
57658 +
57659 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
57660 +
57661 + retval = new_mode;
57662 +
57663 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
57664 + new_mode |= GR_INHERIT;
57665 +
57666 + if (!(mode & GR_NOLEARN))
57667 + gr_log_learn(dentry, mnt, new_mode);
57668 + }
57669 +
57670 + return retval;
57671 +}
57672 +
57673 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
57674 + const struct dentry *parent,
57675 + const struct vfsmount *mnt)
57676 +{
57677 + struct name_entry *match;
57678 + struct acl_object_label *matchpo;
57679 + struct acl_subject_label *curracl;
57680 + char *path;
57681 +
57682 + if (unlikely(!(gr_status & GR_READY)))
57683 + return NULL;
57684 +
57685 + preempt_disable();
57686 + path = gr_to_filename_rbac(new_dentry, mnt);
57687 + match = lookup_name_entry_create(path);
57688 +
57689 + curracl = current->acl;
57690 +
57691 + if (match) {
57692 + read_lock(&gr_inode_lock);
57693 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
57694 + read_unlock(&gr_inode_lock);
57695 +
57696 + if (matchpo) {
57697 + preempt_enable();
57698 + return matchpo;
57699 + }
57700 + }
57701 +
57702 + // lookup parent
57703 +
57704 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
57705 +
57706 + preempt_enable();
57707 + return matchpo;
57708 +}
57709 +
57710 +__u32
57711 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
57712 + const struct vfsmount * mnt, const __u32 mode)
57713 +{
57714 + struct acl_object_label *matchpo;
57715 + __u32 retval;
57716 +
57717 + if (unlikely(!(gr_status & GR_READY)))
57718 + return (mode & ~GR_AUDITS);
57719 +
57720 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
57721 +
57722 + retval = matchpo->mode & mode;
57723 +
57724 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
57725 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
57726 + __u32 new_mode = mode;
57727 +
57728 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
57729 +
57730 + gr_log_learn(new_dentry, mnt, new_mode);
57731 + return new_mode;
57732 + }
57733 +
57734 + return retval;
57735 +}
57736 +
57737 +__u32
57738 +gr_check_link(const struct dentry * new_dentry,
57739 + const struct dentry * parent_dentry,
57740 + const struct vfsmount * parent_mnt,
57741 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
57742 +{
57743 + struct acl_object_label *obj;
57744 + __u32 oldmode, newmode;
57745 + __u32 needmode;
57746 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
57747 + GR_DELETE | GR_INHERIT;
57748 +
57749 + if (unlikely(!(gr_status & GR_READY)))
57750 + return (GR_CREATE | GR_LINK);
57751 +
57752 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
57753 + oldmode = obj->mode;
57754 +
57755 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
57756 + newmode = obj->mode;
57757 +
57758 + needmode = newmode & checkmodes;
57759 +
57760 + // old name for hardlink must have at least the permissions of the new name
57761 + if ((oldmode & needmode) != needmode)
57762 + goto bad;
57763 +
57764 + // if old name had restrictions/auditing, make sure the new name does as well
57765 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
57766 +
57767 + // don't allow hardlinking of suid/sgid files without permission
57768 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
57769 + needmode |= GR_SETID;
57770 +
57771 + if ((newmode & needmode) != needmode)
57772 + goto bad;
57773 +
57774 + // enforce minimum permissions
57775 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
57776 + return newmode;
57777 +bad:
57778 + needmode = oldmode;
57779 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
57780 + needmode |= GR_SETID;
57781 +
57782 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
57783 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
57784 + return (GR_CREATE | GR_LINK);
57785 + } else if (newmode & GR_SUPPRESS)
57786 + return GR_SUPPRESS;
57787 + else
57788 + return 0;
57789 +}
57790 +
57791 +int
57792 +gr_check_hidden_task(const struct task_struct *task)
57793 +{
57794 + if (unlikely(!(gr_status & GR_READY)))
57795 + return 0;
57796 +
57797 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
57798 + return 1;
57799 +
57800 + return 0;
57801 +}
57802 +
57803 +int
57804 +gr_check_protected_task(const struct task_struct *task)
57805 +{
57806 + if (unlikely(!(gr_status & GR_READY) || !task))
57807 + return 0;
57808 +
57809 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
57810 + task->acl != current->acl)
57811 + return 1;
57812 +
57813 + return 0;
57814 +}
57815 +
57816 +int
57817 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57818 +{
57819 + struct task_struct *p;
57820 + int ret = 0;
57821 +
57822 + if (unlikely(!(gr_status & GR_READY) || !pid))
57823 + return ret;
57824 +
57825 + read_lock(&tasklist_lock);
57826 + do_each_pid_task(pid, type, p) {
57827 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
57828 + p->acl != current->acl) {
57829 + ret = 1;
57830 + goto out;
57831 + }
57832 + } while_each_pid_task(pid, type, p);
57833 +out:
57834 + read_unlock(&tasklist_lock);
57835 +
57836 + return ret;
57837 +}
57838 +
57839 +void
57840 +gr_copy_label(struct task_struct *tsk)
57841 +{
57842 + tsk->signal->used_accept = 0;
57843 + tsk->acl_sp_role = 0;
57844 + tsk->acl_role_id = current->acl_role_id;
57845 + tsk->acl = current->acl;
57846 + tsk->role = current->role;
57847 + tsk->signal->curr_ip = current->signal->curr_ip;
57848 + tsk->signal->saved_ip = current->signal->saved_ip;
57849 + if (current->exec_file)
57850 + get_file(current->exec_file);
57851 + tsk->exec_file = current->exec_file;
57852 + tsk->is_writable = current->is_writable;
57853 + if (unlikely(current->signal->used_accept)) {
57854 + current->signal->curr_ip = 0;
57855 + current->signal->saved_ip = 0;
57856 + }
57857 +
57858 + return;
57859 +}
57860 +
57861 +static void
57862 +gr_set_proc_res(struct task_struct *task)
57863 +{
57864 + struct acl_subject_label *proc;
57865 + unsigned short i;
57866 +
57867 + proc = task->acl;
57868 +
57869 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
57870 + return;
57871 +
57872 + for (i = 0; i < RLIM_NLIMITS; i++) {
57873 + if (!(proc->resmask & (1 << i)))
57874 + continue;
57875 +
57876 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
57877 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
57878 + }
57879 +
57880 + return;
57881 +}
57882 +
57883 +extern int __gr_process_user_ban(struct user_struct *user);
57884 +
57885 +int
57886 +gr_check_user_change(int real, int effective, int fs)
57887 +{
57888 + unsigned int i;
57889 + __u16 num;
57890 + uid_t *uidlist;
57891 + int curuid;
57892 + int realok = 0;
57893 + int effectiveok = 0;
57894 + int fsok = 0;
57895 +
57896 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57897 + struct user_struct *user;
57898 +
57899 + if (real == -1)
57900 + goto skipit;
57901 +
57902 + user = find_user(real);
57903 + if (user == NULL)
57904 + goto skipit;
57905 +
57906 + if (__gr_process_user_ban(user)) {
57907 + /* for find_user */
57908 + free_uid(user);
57909 + return 1;
57910 + }
57911 +
57912 + /* for find_user */
57913 + free_uid(user);
57914 +
57915 +skipit:
57916 +#endif
57917 +
57918 + if (unlikely(!(gr_status & GR_READY)))
57919 + return 0;
57920 +
57921 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57922 + gr_log_learn_id_change('u', real, effective, fs);
57923 +
57924 + num = current->acl->user_trans_num;
57925 + uidlist = current->acl->user_transitions;
57926 +
57927 + if (uidlist == NULL)
57928 + return 0;
57929 +
57930 + if (real == -1)
57931 + realok = 1;
57932 + if (effective == -1)
57933 + effectiveok = 1;
57934 + if (fs == -1)
57935 + fsok = 1;
57936 +
57937 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
57938 + for (i = 0; i < num; i++) {
57939 + curuid = (int)uidlist[i];
57940 + if (real == curuid)
57941 + realok = 1;
57942 + if (effective == curuid)
57943 + effectiveok = 1;
57944 + if (fs == curuid)
57945 + fsok = 1;
57946 + }
57947 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
57948 + for (i = 0; i < num; i++) {
57949 + curuid = (int)uidlist[i];
57950 + if (real == curuid)
57951 + break;
57952 + if (effective == curuid)
57953 + break;
57954 + if (fs == curuid)
57955 + break;
57956 + }
57957 + /* not in deny list */
57958 + if (i == num) {
57959 + realok = 1;
57960 + effectiveok = 1;
57961 + fsok = 1;
57962 + }
57963 + }
57964 +
57965 + if (realok && effectiveok && fsok)
57966 + return 0;
57967 + else {
57968 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
57969 + return 1;
57970 + }
57971 +}
57972 +
57973 +int
57974 +gr_check_group_change(int real, int effective, int fs)
57975 +{
57976 + unsigned int i;
57977 + __u16 num;
57978 + gid_t *gidlist;
57979 + int curgid;
57980 + int realok = 0;
57981 + int effectiveok = 0;
57982 + int fsok = 0;
57983 +
57984 + if (unlikely(!(gr_status & GR_READY)))
57985 + return 0;
57986 +
57987 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
57988 + gr_log_learn_id_change('g', real, effective, fs);
57989 +
57990 + num = current->acl->group_trans_num;
57991 + gidlist = current->acl->group_transitions;
57992 +
57993 + if (gidlist == NULL)
57994 + return 0;
57995 +
57996 + if (real == -1)
57997 + realok = 1;
57998 + if (effective == -1)
57999 + effectiveok = 1;
58000 + if (fs == -1)
58001 + fsok = 1;
58002 +
58003 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
58004 + for (i = 0; i < num; i++) {
58005 + curgid = (int)gidlist[i];
58006 + if (real == curgid)
58007 + realok = 1;
58008 + if (effective == curgid)
58009 + effectiveok = 1;
58010 + if (fs == curgid)
58011 + fsok = 1;
58012 + }
58013 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
58014 + for (i = 0; i < num; i++) {
58015 + curgid = (int)gidlist[i];
58016 + if (real == curgid)
58017 + break;
58018 + if (effective == curgid)
58019 + break;
58020 + if (fs == curgid)
58021 + break;
58022 + }
58023 + /* not in deny list */
58024 + if (i == num) {
58025 + realok = 1;
58026 + effectiveok = 1;
58027 + fsok = 1;
58028 + }
58029 + }
58030 +
58031 + if (realok && effectiveok && fsok)
58032 + return 0;
58033 + else {
58034 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58035 + return 1;
58036 + }
58037 +}
58038 +
58039 +void
58040 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
58041 +{
58042 + struct acl_role_label *role = task->role;
58043 + struct acl_subject_label *subj = NULL;
58044 + struct acl_object_label *obj;
58045 + struct file *filp;
58046 +
58047 + if (unlikely(!(gr_status & GR_READY)))
58048 + return;
58049 +
58050 + filp = task->exec_file;
58051 +
58052 + /* kernel process, we'll give them the kernel role */
58053 + if (unlikely(!filp)) {
58054 + task->role = kernel_role;
58055 + task->acl = kernel_role->root_label;
58056 + return;
58057 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
58058 + role = lookup_acl_role_label(task, uid, gid);
58059 +
58060 + /* perform subject lookup in possibly new role
58061 + we can use this result below in the case where role == task->role
58062 + */
58063 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
58064 +
58065 + /* if we changed uid/gid, but result in the same role
58066 + and are using inheritance, don't lose the inherited subject
58067 + if current subject is other than what normal lookup
58068 + would result in, we arrived via inheritance, don't
58069 + lose subject
58070 + */
58071 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
58072 + (subj == task->acl)))
58073 + task->acl = subj;
58074 +
58075 + task->role = role;
58076 +
58077 + task->is_writable = 0;
58078 +
58079 + /* ignore additional mmap checks for processes that are writable
58080 + by the default ACL */
58081 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58082 + if (unlikely(obj->mode & GR_WRITE))
58083 + task->is_writable = 1;
58084 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58085 + if (unlikely(obj->mode & GR_WRITE))
58086 + task->is_writable = 1;
58087 +
58088 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58089 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58090 +#endif
58091 +
58092 + gr_set_proc_res(task);
58093 +
58094 + return;
58095 +}
58096 +
58097 +int
58098 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
58099 + const int unsafe_share)
58100 +{
58101 + struct task_struct *task = current;
58102 + struct acl_subject_label *newacl;
58103 + struct acl_object_label *obj;
58104 + __u32 retmode;
58105 +
58106 + if (unlikely(!(gr_status & GR_READY)))
58107 + return 0;
58108 +
58109 + newacl = chk_subj_label(dentry, mnt, task->role);
58110 +
58111 + task_lock(task);
58112 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
58113 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58114 + !(task->role->roletype & GR_ROLE_GOD) &&
58115 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
58116 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
58117 + task_unlock(task);
58118 + if (unsafe_share)
58119 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
58120 + else
58121 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58122 + return -EACCES;
58123 + }
58124 + task_unlock(task);
58125 +
58126 + obj = chk_obj_label(dentry, mnt, task->acl);
58127 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
58128 +
58129 + if (!(task->acl->mode & GR_INHERITLEARN) &&
58130 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
58131 + if (obj->nested)
58132 + task->acl = obj->nested;
58133 + else
58134 + task->acl = newacl;
58135 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
58136 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
58137 +
58138 + task->is_writable = 0;
58139 +
58140 + /* ignore additional mmap checks for processes that are writable
58141 + by the default ACL */
58142 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
58143 + if (unlikely(obj->mode & GR_WRITE))
58144 + task->is_writable = 1;
58145 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
58146 + if (unlikely(obj->mode & GR_WRITE))
58147 + task->is_writable = 1;
58148 +
58149 + gr_set_proc_res(task);
58150 +
58151 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58152 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58153 +#endif
58154 + return 0;
58155 +}
58156 +
58157 +/* always called with valid inodev ptr */
58158 +static void
58159 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
58160 +{
58161 + struct acl_object_label *matchpo;
58162 + struct acl_subject_label *matchps;
58163 + struct acl_subject_label *subj;
58164 + struct acl_role_label *role;
58165 + unsigned int x;
58166 +
58167 + FOR_EACH_ROLE_START(role)
58168 + FOR_EACH_SUBJECT_START(role, subj, x)
58169 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
58170 + matchpo->mode |= GR_DELETED;
58171 + FOR_EACH_SUBJECT_END(subj,x)
58172 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
58173 + if (subj->inode == ino && subj->device == dev)
58174 + subj->mode |= GR_DELETED;
58175 + FOR_EACH_NESTED_SUBJECT_END(subj)
58176 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
58177 + matchps->mode |= GR_DELETED;
58178 + FOR_EACH_ROLE_END(role)
58179 +
58180 + inodev->nentry->deleted = 1;
58181 +
58182 + return;
58183 +}
58184 +
58185 +void
58186 +gr_handle_delete(const ino_t ino, const dev_t dev)
58187 +{
58188 + struct inodev_entry *inodev;
58189 +
58190 + if (unlikely(!(gr_status & GR_READY)))
58191 + return;
58192 +
58193 + write_lock(&gr_inode_lock);
58194 + inodev = lookup_inodev_entry(ino, dev);
58195 + if (inodev != NULL)
58196 + do_handle_delete(inodev, ino, dev);
58197 + write_unlock(&gr_inode_lock);
58198 +
58199 + return;
58200 +}
58201 +
58202 +static void
58203 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
58204 + const ino_t newinode, const dev_t newdevice,
58205 + struct acl_subject_label *subj)
58206 +{
58207 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
58208 + struct acl_object_label *match;
58209 +
58210 + match = subj->obj_hash[index];
58211 +
58212 + while (match && (match->inode != oldinode ||
58213 + match->device != olddevice ||
58214 + !(match->mode & GR_DELETED)))
58215 + match = match->next;
58216 +
58217 + if (match && (match->inode == oldinode)
58218 + && (match->device == olddevice)
58219 + && (match->mode & GR_DELETED)) {
58220 + if (match->prev == NULL) {
58221 + subj->obj_hash[index] = match->next;
58222 + if (match->next != NULL)
58223 + match->next->prev = NULL;
58224 + } else {
58225 + match->prev->next = match->next;
58226 + if (match->next != NULL)
58227 + match->next->prev = match->prev;
58228 + }
58229 + match->prev = NULL;
58230 + match->next = NULL;
58231 + match->inode = newinode;
58232 + match->device = newdevice;
58233 + match->mode &= ~GR_DELETED;
58234 +
58235 + insert_acl_obj_label(match, subj);
58236 + }
58237 +
58238 + return;
58239 +}
58240 +
58241 +static void
58242 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
58243 + const ino_t newinode, const dev_t newdevice,
58244 + struct acl_role_label *role)
58245 +{
58246 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
58247 + struct acl_subject_label *match;
58248 +
58249 + match = role->subj_hash[index];
58250 +
58251 + while (match && (match->inode != oldinode ||
58252 + match->device != olddevice ||
58253 + !(match->mode & GR_DELETED)))
58254 + match = match->next;
58255 +
58256 + if (match && (match->inode == oldinode)
58257 + && (match->device == olddevice)
58258 + && (match->mode & GR_DELETED)) {
58259 + if (match->prev == NULL) {
58260 + role->subj_hash[index] = match->next;
58261 + if (match->next != NULL)
58262 + match->next->prev = NULL;
58263 + } else {
58264 + match->prev->next = match->next;
58265 + if (match->next != NULL)
58266 + match->next->prev = match->prev;
58267 + }
58268 + match->prev = NULL;
58269 + match->next = NULL;
58270 + match->inode = newinode;
58271 + match->device = newdevice;
58272 + match->mode &= ~GR_DELETED;
58273 +
58274 + insert_acl_subj_label(match, role);
58275 + }
58276 +
58277 + return;
58278 +}
58279 +
58280 +static void
58281 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
58282 + const ino_t newinode, const dev_t newdevice)
58283 +{
58284 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
58285 + struct inodev_entry *match;
58286 +
58287 + match = inodev_set.i_hash[index];
58288 +
58289 + while (match && (match->nentry->inode != oldinode ||
58290 + match->nentry->device != olddevice || !match->nentry->deleted))
58291 + match = match->next;
58292 +
58293 + if (match && (match->nentry->inode == oldinode)
58294 + && (match->nentry->device == olddevice) &&
58295 + match->nentry->deleted) {
58296 + if (match->prev == NULL) {
58297 + inodev_set.i_hash[index] = match->next;
58298 + if (match->next != NULL)
58299 + match->next->prev = NULL;
58300 + } else {
58301 + match->prev->next = match->next;
58302 + if (match->next != NULL)
58303 + match->next->prev = match->prev;
58304 + }
58305 + match->prev = NULL;
58306 + match->next = NULL;
58307 + match->nentry->inode = newinode;
58308 + match->nentry->device = newdevice;
58309 + match->nentry->deleted = 0;
58310 +
58311 + insert_inodev_entry(match);
58312 + }
58313 +
58314 + return;
58315 +}
58316 +
58317 +static void
58318 +__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
58319 +{
58320 + struct acl_subject_label *subj;
58321 + struct acl_role_label *role;
58322 + unsigned int x;
58323 +
58324 + FOR_EACH_ROLE_START(role)
58325 + update_acl_subj_label(matchn->inode, matchn->device,
58326 + inode, dev, role);
58327 +
58328 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
58329 + if ((subj->inode == inode) && (subj->device == dev)) {
58330 + subj->inode = inode;
58331 + subj->device = dev;
58332 + }
58333 + FOR_EACH_NESTED_SUBJECT_END(subj)
58334 + FOR_EACH_SUBJECT_START(role, subj, x)
58335 + update_acl_obj_label(matchn->inode, matchn->device,
58336 + inode, dev, subj);
58337 + FOR_EACH_SUBJECT_END(subj,x)
58338 + FOR_EACH_ROLE_END(role)
58339 +
58340 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
58341 +
58342 + return;
58343 +}
58344 +
58345 +static void
58346 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
58347 + const struct vfsmount *mnt)
58348 +{
58349 + ino_t ino = dentry->d_inode->i_ino;
58350 + dev_t dev = __get_dev(dentry);
58351 +
58352 + __do_handle_create(matchn, ino, dev);
58353 +
58354 + return;
58355 +}
58356 +
58357 +void
58358 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
58359 +{
58360 + struct name_entry *matchn;
58361 +
58362 + if (unlikely(!(gr_status & GR_READY)))
58363 + return;
58364 +
58365 + preempt_disable();
58366 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
58367 +
58368 + if (unlikely((unsigned long)matchn)) {
58369 + write_lock(&gr_inode_lock);
58370 + do_handle_create(matchn, dentry, mnt);
58371 + write_unlock(&gr_inode_lock);
58372 + }
58373 + preempt_enable();
58374 +
58375 + return;
58376 +}
58377 +
58378 +void
58379 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
58380 +{
58381 + struct name_entry *matchn;
58382 +
58383 + if (unlikely(!(gr_status & GR_READY)))
58384 + return;
58385 +
58386 + preempt_disable();
58387 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
58388 +
58389 + if (unlikely((unsigned long)matchn)) {
58390 + write_lock(&gr_inode_lock);
58391 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
58392 + write_unlock(&gr_inode_lock);
58393 + }
58394 + preempt_enable();
58395 +
58396 + return;
58397 +}
58398 +
58399 +void
58400 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58401 + struct dentry *old_dentry,
58402 + struct dentry *new_dentry,
58403 + struct vfsmount *mnt, const __u8 replace)
58404 +{
58405 + struct name_entry *matchn;
58406 + struct inodev_entry *inodev;
58407 + struct inode *inode = new_dentry->d_inode;
58408 + ino_t oldinode = old_dentry->d_inode->i_ino;
58409 + dev_t olddev = __get_dev(old_dentry);
58410 +
58411 + /* vfs_rename swaps the name and parent link for old_dentry and
58412 + new_dentry
58413 + at this point, old_dentry has the new name, parent link, and inode
58414 + for the renamed file
58415 + if a file is being replaced by a rename, new_dentry has the inode
58416 + and name for the replaced file
58417 + */
58418 +
58419 + if (unlikely(!(gr_status & GR_READY)))
58420 + return;
58421 +
58422 + preempt_disable();
58423 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
58424 +
58425 + /* we wouldn't have to check d_inode if it weren't for
58426 + NFS silly-renaming
58427 + */
58428 +
58429 + write_lock(&gr_inode_lock);
58430 + if (unlikely(replace && inode)) {
58431 + ino_t newinode = inode->i_ino;
58432 + dev_t newdev = __get_dev(new_dentry);
58433 + inodev = lookup_inodev_entry(newinode, newdev);
58434 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
58435 + do_handle_delete(inodev, newinode, newdev);
58436 + }
58437 +
58438 + inodev = lookup_inodev_entry(oldinode, olddev);
58439 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
58440 + do_handle_delete(inodev, oldinode, olddev);
58441 +
58442 + if (unlikely((unsigned long)matchn))
58443 + do_handle_create(matchn, old_dentry, mnt);
58444 +
58445 + write_unlock(&gr_inode_lock);
58446 + preempt_enable();
58447 +
58448 + return;
58449 +}
58450 +
58451 +static int
58452 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
58453 + unsigned char **sum)
58454 +{
58455 + struct acl_role_label *r;
58456 + struct role_allowed_ip *ipp;
58457 + struct role_transition *trans;
58458 + unsigned int i;
58459 + int found = 0;
58460 + u32 curr_ip = current->signal->curr_ip;
58461 +
58462 + current->signal->saved_ip = curr_ip;
58463 +
58464 + /* check transition table */
58465 +
58466 + for (trans = current->role->transitions; trans; trans = trans->next) {
58467 + if (!strcmp(rolename, trans->rolename)) {
58468 + found = 1;
58469 + break;
58470 + }
58471 + }
58472 +
58473 + if (!found)
58474 + return 0;
58475 +
58476 + /* handle special roles that do not require authentication
58477 + and check ip */
58478 +
58479 + FOR_EACH_ROLE_START(r)
58480 + if (!strcmp(rolename, r->rolename) &&
58481 + (r->roletype & GR_ROLE_SPECIAL)) {
58482 + found = 0;
58483 + if (r->allowed_ips != NULL) {
58484 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
58485 + if ((ntohl(curr_ip) & ipp->netmask) ==
58486 + (ntohl(ipp->addr) & ipp->netmask))
58487 + found = 1;
58488 + }
58489 + } else
58490 + found = 2;
58491 + if (!found)
58492 + return 0;
58493 +
58494 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
58495 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
58496 + *salt = NULL;
58497 + *sum = NULL;
58498 + return 1;
58499 + }
58500 + }
58501 + FOR_EACH_ROLE_END(r)
58502 +
58503 + for (i = 0; i < num_sprole_pws; i++) {
58504 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
58505 + *salt = acl_special_roles[i]->salt;
58506 + *sum = acl_special_roles[i]->sum;
58507 + return 1;
58508 + }
58509 + }
58510 +
58511 + return 0;
58512 +}
58513 +
58514 +static void
58515 +assign_special_role(char *rolename)
58516 +{
58517 + struct acl_object_label *obj;
58518 + struct acl_role_label *r;
58519 + struct acl_role_label *assigned = NULL;
58520 + struct task_struct *tsk;
58521 + struct file *filp;
58522 +
58523 + FOR_EACH_ROLE_START(r)
58524 + if (!strcmp(rolename, r->rolename) &&
58525 + (r->roletype & GR_ROLE_SPECIAL)) {
58526 + assigned = r;
58527 + break;
58528 + }
58529 + FOR_EACH_ROLE_END(r)
58530 +
58531 + if (!assigned)
58532 + return;
58533 +
58534 + read_lock(&tasklist_lock);
58535 + read_lock(&grsec_exec_file_lock);
58536 +
58537 + tsk = current->real_parent;
58538 + if (tsk == NULL)
58539 + goto out_unlock;
58540 +
58541 + filp = tsk->exec_file;
58542 + if (filp == NULL)
58543 + goto out_unlock;
58544 +
58545 + tsk->is_writable = 0;
58546 +
58547 + tsk->acl_sp_role = 1;
58548 + tsk->acl_role_id = ++acl_sp_role_value;
58549 + tsk->role = assigned;
58550 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
58551 +
58552 + /* ignore additional mmap checks for processes that are writable
58553 + by the default ACL */
58554 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58555 + if (unlikely(obj->mode & GR_WRITE))
58556 + tsk->is_writable = 1;
58557 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
58558 + if (unlikely(obj->mode & GR_WRITE))
58559 + tsk->is_writable = 1;
58560 +
58561 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58562 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
58563 +#endif
58564 +
58565 +out_unlock:
58566 + read_unlock(&grsec_exec_file_lock);
58567 + read_unlock(&tasklist_lock);
58568 + return;
58569 +}
58570 +
58571 +int gr_check_secure_terminal(struct task_struct *task)
58572 +{
58573 + struct task_struct *p, *p2, *p3;
58574 + struct files_struct *files;
58575 + struct fdtable *fdt;
58576 + struct file *our_file = NULL, *file;
58577 + int i;
58578 +
58579 + if (task->signal->tty == NULL)
58580 + return 1;
58581 +
58582 + files = get_files_struct(task);
58583 + if (files != NULL) {
58584 + rcu_read_lock();
58585 + fdt = files_fdtable(files);
58586 + for (i=0; i < fdt->max_fds; i++) {
58587 + file = fcheck_files(files, i);
58588 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
58589 + get_file(file);
58590 + our_file = file;
58591 + }
58592 + }
58593 + rcu_read_unlock();
58594 + put_files_struct(files);
58595 + }
58596 +
58597 + if (our_file == NULL)
58598 + return 1;
58599 +
58600 + read_lock(&tasklist_lock);
58601 + do_each_thread(p2, p) {
58602 + files = get_files_struct(p);
58603 + if (files == NULL ||
58604 + (p->signal && p->signal->tty == task->signal->tty)) {
58605 + if (files != NULL)
58606 + put_files_struct(files);
58607 + continue;
58608 + }
58609 + rcu_read_lock();
58610 + fdt = files_fdtable(files);
58611 + for (i=0; i < fdt->max_fds; i++) {
58612 + file = fcheck_files(files, i);
58613 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
58614 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
58615 + p3 = task;
58616 + while (p3->pid > 0) {
58617 + if (p3 == p)
58618 + break;
58619 + p3 = p3->real_parent;
58620 + }
58621 + if (p3 == p)
58622 + break;
58623 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
58624 + gr_handle_alertkill(p);
58625 + rcu_read_unlock();
58626 + put_files_struct(files);
58627 + read_unlock(&tasklist_lock);
58628 + fput(our_file);
58629 + return 0;
58630 + }
58631 + }
58632 + rcu_read_unlock();
58633 + put_files_struct(files);
58634 + } while_each_thread(p2, p);
58635 + read_unlock(&tasklist_lock);
58636 +
58637 + fput(our_file);
58638 + return 1;
58639 +}
58640 +
58641 +ssize_t
58642 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
58643 +{
58644 + struct gr_arg_wrapper uwrap;
58645 + unsigned char *sprole_salt = NULL;
58646 + unsigned char *sprole_sum = NULL;
58647 + int error = sizeof (struct gr_arg_wrapper);
58648 + int error2 = 0;
58649 +
58650 + mutex_lock(&gr_dev_mutex);
58651 +
58652 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
58653 + error = -EPERM;
58654 + goto out;
58655 + }
58656 +
58657 + if (count != sizeof (struct gr_arg_wrapper)) {
58658 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
58659 + error = -EINVAL;
58660 + goto out;
58661 + }
58662 +
58663 +
58664 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
58665 + gr_auth_expires = 0;
58666 + gr_auth_attempts = 0;
58667 + }
58668 +
58669 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
58670 + error = -EFAULT;
58671 + goto out;
58672 + }
58673 +
58674 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
58675 + error = -EINVAL;
58676 + goto out;
58677 + }
58678 +
58679 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
58680 + error = -EFAULT;
58681 + goto out;
58682 + }
58683 +
58684 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58685 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
58686 + time_after(gr_auth_expires, get_seconds())) {
58687 + error = -EBUSY;
58688 + goto out;
58689 + }
58690 +
58691 + /* if non-root trying to do anything other than use a special role,
58692 + do not attempt authentication, do not count towards authentication
58693 + locking
58694 + */
58695 +
58696 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
58697 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58698 + current_uid()) {
58699 + error = -EPERM;
58700 + goto out;
58701 + }
58702 +
58703 + /* ensure pw and special role name are null terminated */
58704 +
58705 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
58706 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
58707 +
58708 + /* Okay.
58709 + * We have our enough of the argument structure..(we have yet
58710 + * to copy_from_user the tables themselves) . Copy the tables
58711 + * only if we need them, i.e. for loading operations. */
58712 +
58713 + switch (gr_usermode->mode) {
58714 + case GR_STATUS:
58715 + if (gr_status & GR_READY) {
58716 + error = 1;
58717 + if (!gr_check_secure_terminal(current))
58718 + error = 3;
58719 + } else
58720 + error = 2;
58721 + goto out;
58722 + case GR_SHUTDOWN:
58723 + if ((gr_status & GR_READY)
58724 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58725 + pax_open_kernel();
58726 + gr_status &= ~GR_READY;
58727 + pax_close_kernel();
58728 +
58729 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
58730 + free_variables();
58731 + memset(gr_usermode, 0, sizeof (struct gr_arg));
58732 + memset(gr_system_salt, 0, GR_SALT_LEN);
58733 + memset(gr_system_sum, 0, GR_SHA_LEN);
58734 + } else if (gr_status & GR_READY) {
58735 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
58736 + error = -EPERM;
58737 + } else {
58738 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
58739 + error = -EAGAIN;
58740 + }
58741 + break;
58742 + case GR_ENABLE:
58743 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
58744 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
58745 + else {
58746 + if (gr_status & GR_READY)
58747 + error = -EAGAIN;
58748 + else
58749 + error = error2;
58750 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
58751 + }
58752 + break;
58753 + case GR_RELOAD:
58754 + if (!(gr_status & GR_READY)) {
58755 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
58756 + error = -EAGAIN;
58757 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58758 + lock_kernel();
58759 +
58760 + pax_open_kernel();
58761 + gr_status &= ~GR_READY;
58762 + pax_close_kernel();
58763 +
58764 + free_variables();
58765 + if (!(error2 = gracl_init(gr_usermode))) {
58766 + unlock_kernel();
58767 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
58768 + } else {
58769 + unlock_kernel();
58770 + error = error2;
58771 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
58772 + }
58773 + } else {
58774 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
58775 + error = -EPERM;
58776 + }
58777 + break;
58778 + case GR_SEGVMOD:
58779 + if (unlikely(!(gr_status & GR_READY))) {
58780 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
58781 + error = -EAGAIN;
58782 + break;
58783 + }
58784 +
58785 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
58786 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
58787 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
58788 + struct acl_subject_label *segvacl;
58789 + segvacl =
58790 + lookup_acl_subj_label(gr_usermode->segv_inode,
58791 + gr_usermode->segv_device,
58792 + current->role);
58793 + if (segvacl) {
58794 + segvacl->crashes = 0;
58795 + segvacl->expires = 0;
58796 + }
58797 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
58798 + gr_remove_uid(gr_usermode->segv_uid);
58799 + }
58800 + } else {
58801 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
58802 + error = -EPERM;
58803 + }
58804 + break;
58805 + case GR_SPROLE:
58806 + case GR_SPROLEPAM:
58807 + if (unlikely(!(gr_status & GR_READY))) {
58808 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
58809 + error = -EAGAIN;
58810 + break;
58811 + }
58812 +
58813 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
58814 + current->role->expires = 0;
58815 + current->role->auth_attempts = 0;
58816 + }
58817 +
58818 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
58819 + time_after(current->role->expires, get_seconds())) {
58820 + error = -EBUSY;
58821 + goto out;
58822 + }
58823 +
58824 + if (lookup_special_role_auth
58825 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
58826 + && ((!sprole_salt && !sprole_sum)
58827 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
58828 + char *p = "";
58829 + assign_special_role(gr_usermode->sp_role);
58830 + read_lock(&tasklist_lock);
58831 + if (current->real_parent)
58832 + p = current->real_parent->role->rolename;
58833 + read_unlock(&tasklist_lock);
58834 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
58835 + p, acl_sp_role_value);
58836 + } else {
58837 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
58838 + error = -EPERM;
58839 + if(!(current->role->auth_attempts++))
58840 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
58841 +
58842 + goto out;
58843 + }
58844 + break;
58845 + case GR_UNSPROLE:
58846 + if (unlikely(!(gr_status & GR_READY))) {
58847 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
58848 + error = -EAGAIN;
58849 + break;
58850 + }
58851 +
58852 + if (current->role->roletype & GR_ROLE_SPECIAL) {
58853 + char *p = "";
58854 + int i = 0;
58855 +
58856 + read_lock(&tasklist_lock);
58857 + if (current->real_parent) {
58858 + p = current->real_parent->role->rolename;
58859 + i = current->real_parent->acl_role_id;
58860 + }
58861 + read_unlock(&tasklist_lock);
58862 +
58863 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
58864 + gr_set_acls(1);
58865 + } else {
58866 + error = -EPERM;
58867 + goto out;
58868 + }
58869 + break;
58870 + default:
58871 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
58872 + error = -EINVAL;
58873 + break;
58874 + }
58875 +
58876 + if (error != -EPERM)
58877 + goto out;
58878 +
58879 + if(!(gr_auth_attempts++))
58880 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
58881 +
58882 + out:
58883 + mutex_unlock(&gr_dev_mutex);
58884 + return error;
58885 +}
58886 +
58887 +/* must be called with
58888 + rcu_read_lock();
58889 + read_lock(&tasklist_lock);
58890 + read_lock(&grsec_exec_file_lock);
58891 +*/
58892 +int gr_apply_subject_to_task(struct task_struct *task)
58893 +{
58894 + struct acl_object_label *obj;
58895 + char *tmpname;
58896 + struct acl_subject_label *tmpsubj;
58897 + struct file *filp;
58898 + struct name_entry *nmatch;
58899 +
58900 + filp = task->exec_file;
58901 + if (filp == NULL)
58902 + return 0;
58903 +
58904 + /* the following is to apply the correct subject
58905 + on binaries running when the RBAC system
58906 + is enabled, when the binaries have been
58907 + replaced or deleted since their execution
58908 + -----
58909 + when the RBAC system starts, the inode/dev
58910 + from exec_file will be one the RBAC system
58911 + is unaware of. It only knows the inode/dev
58912 + of the present file on disk, or the absence
58913 + of it.
58914 + */
58915 + preempt_disable();
58916 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
58917 +
58918 + nmatch = lookup_name_entry(tmpname);
58919 + preempt_enable();
58920 + tmpsubj = NULL;
58921 + if (nmatch) {
58922 + if (nmatch->deleted)
58923 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
58924 + else
58925 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
58926 + if (tmpsubj != NULL)
58927 + task->acl = tmpsubj;
58928 + }
58929 + if (tmpsubj == NULL)
58930 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
58931 + task->role);
58932 + if (task->acl) {
58933 + task->is_writable = 0;
58934 + /* ignore additional mmap checks for processes that are writable
58935 + by the default ACL */
58936 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58937 + if (unlikely(obj->mode & GR_WRITE))
58938 + task->is_writable = 1;
58939 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58940 + if (unlikely(obj->mode & GR_WRITE))
58941 + task->is_writable = 1;
58942 +
58943 + gr_set_proc_res(task);
58944 +
58945 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58946 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58947 +#endif
58948 + } else {
58949 + return 1;
58950 + }
58951 +
58952 + return 0;
58953 +}
58954 +
58955 +int
58956 +gr_set_acls(const int type)
58957 +{
58958 + struct task_struct *task, *task2;
58959 + struct acl_role_label *role = current->role;
58960 + __u16 acl_role_id = current->acl_role_id;
58961 + const struct cred *cred;
58962 + int ret;
58963 +
58964 + rcu_read_lock();
58965 + read_lock(&tasklist_lock);
58966 + read_lock(&grsec_exec_file_lock);
58967 + do_each_thread(task2, task) {
58968 + /* check to see if we're called from the exit handler,
58969 + if so, only replace ACLs that have inherited the admin
58970 + ACL */
58971 +
58972 + if (type && (task->role != role ||
58973 + task->acl_role_id != acl_role_id))
58974 + continue;
58975 +
58976 + task->acl_role_id = 0;
58977 + task->acl_sp_role = 0;
58978 +
58979 + if (task->exec_file) {
58980 + cred = __task_cred(task);
58981 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
58982 +
58983 + ret = gr_apply_subject_to_task(task);
58984 + if (ret) {
58985 + read_unlock(&grsec_exec_file_lock);
58986 + read_unlock(&tasklist_lock);
58987 + rcu_read_unlock();
58988 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
58989 + return ret;
58990 + }
58991 + } else {
58992 + // it's a kernel process
58993 + task->role = kernel_role;
58994 + task->acl = kernel_role->root_label;
58995 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
58996 + task->acl->mode &= ~GR_PROCFIND;
58997 +#endif
58998 + }
58999 + } while_each_thread(task2, task);
59000 + read_unlock(&grsec_exec_file_lock);
59001 + read_unlock(&tasklist_lock);
59002 + rcu_read_unlock();
59003 +
59004 + return 0;
59005 +}
59006 +
59007 +void
59008 +gr_learn_resource(const struct task_struct *task,
59009 + const int res, const unsigned long wanted, const int gt)
59010 +{
59011 + struct acl_subject_label *acl;
59012 + const struct cred *cred;
59013 +
59014 + if (unlikely((gr_status & GR_READY) &&
59015 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
59016 + goto skip_reslog;
59017 +
59018 +#ifdef CONFIG_GRKERNSEC_RESLOG
59019 + gr_log_resource(task, res, wanted, gt);
59020 +#endif
59021 + skip_reslog:
59022 +
59023 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
59024 + return;
59025 +
59026 + acl = task->acl;
59027 +
59028 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
59029 + !(acl->resmask & (1 << (unsigned short) res))))
59030 + return;
59031 +
59032 + if (wanted >= acl->res[res].rlim_cur) {
59033 + unsigned long res_add;
59034 +
59035 + res_add = wanted;
59036 + switch (res) {
59037 + case RLIMIT_CPU:
59038 + res_add += GR_RLIM_CPU_BUMP;
59039 + break;
59040 + case RLIMIT_FSIZE:
59041 + res_add += GR_RLIM_FSIZE_BUMP;
59042 + break;
59043 + case RLIMIT_DATA:
59044 + res_add += GR_RLIM_DATA_BUMP;
59045 + break;
59046 + case RLIMIT_STACK:
59047 + res_add += GR_RLIM_STACK_BUMP;
59048 + break;
59049 + case RLIMIT_CORE:
59050 + res_add += GR_RLIM_CORE_BUMP;
59051 + break;
59052 + case RLIMIT_RSS:
59053 + res_add += GR_RLIM_RSS_BUMP;
59054 + break;
59055 + case RLIMIT_NPROC:
59056 + res_add += GR_RLIM_NPROC_BUMP;
59057 + break;
59058 + case RLIMIT_NOFILE:
59059 + res_add += GR_RLIM_NOFILE_BUMP;
59060 + break;
59061 + case RLIMIT_MEMLOCK:
59062 + res_add += GR_RLIM_MEMLOCK_BUMP;
59063 + break;
59064 + case RLIMIT_AS:
59065 + res_add += GR_RLIM_AS_BUMP;
59066 + break;
59067 + case RLIMIT_LOCKS:
59068 + res_add += GR_RLIM_LOCKS_BUMP;
59069 + break;
59070 + case RLIMIT_SIGPENDING:
59071 + res_add += GR_RLIM_SIGPENDING_BUMP;
59072 + break;
59073 + case RLIMIT_MSGQUEUE:
59074 + res_add += GR_RLIM_MSGQUEUE_BUMP;
59075 + break;
59076 + case RLIMIT_NICE:
59077 + res_add += GR_RLIM_NICE_BUMP;
59078 + break;
59079 + case RLIMIT_RTPRIO:
59080 + res_add += GR_RLIM_RTPRIO_BUMP;
59081 + break;
59082 + case RLIMIT_RTTIME:
59083 + res_add += GR_RLIM_RTTIME_BUMP;
59084 + break;
59085 + }
59086 +
59087 + acl->res[res].rlim_cur = res_add;
59088 +
59089 + if (wanted > acl->res[res].rlim_max)
59090 + acl->res[res].rlim_max = res_add;
59091 +
59092 + /* only log the subject filename, since resource logging is supported for
59093 + single-subject learning only */
59094 + rcu_read_lock();
59095 + cred = __task_cred(task);
59096 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59097 + task->role->roletype, cred->uid, cred->gid, acl->filename,
59098 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
59099 + "", (unsigned long) res, &task->signal->saved_ip);
59100 + rcu_read_unlock();
59101 + }
59102 +
59103 + return;
59104 +}
59105 +
59106 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
59107 +void
59108 +pax_set_initial_flags(struct linux_binprm *bprm)
59109 +{
59110 + struct task_struct *task = current;
59111 + struct acl_subject_label *proc;
59112 + unsigned long flags;
59113 +
59114 + if (unlikely(!(gr_status & GR_READY)))
59115 + return;
59116 +
59117 + flags = pax_get_flags(task);
59118 +
59119 + proc = task->acl;
59120 +
59121 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
59122 + flags &= ~MF_PAX_PAGEEXEC;
59123 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
59124 + flags &= ~MF_PAX_SEGMEXEC;
59125 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
59126 + flags &= ~MF_PAX_RANDMMAP;
59127 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
59128 + flags &= ~MF_PAX_EMUTRAMP;
59129 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
59130 + flags &= ~MF_PAX_MPROTECT;
59131 +
59132 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
59133 + flags |= MF_PAX_PAGEEXEC;
59134 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
59135 + flags |= MF_PAX_SEGMEXEC;
59136 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
59137 + flags |= MF_PAX_RANDMMAP;
59138 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
59139 + flags |= MF_PAX_EMUTRAMP;
59140 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
59141 + flags |= MF_PAX_MPROTECT;
59142 +
59143 + pax_set_flags(task, flags);
59144 +
59145 + return;
59146 +}
59147 +#endif
59148 +
59149 +#ifdef CONFIG_SYSCTL
59150 +/* Eric Biederman likes breaking userland ABI and every inode-based security
59151 + system to save 35kb of memory */
59152 +
59153 +/* we modify the passed in filename, but adjust it back before returning */
59154 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
59155 +{
59156 + struct name_entry *nmatch;
59157 + char *p, *lastp = NULL;
59158 + struct acl_object_label *obj = NULL, *tmp;
59159 + struct acl_subject_label *tmpsubj;
59160 + char c = '\0';
59161 +
59162 + read_lock(&gr_inode_lock);
59163 +
59164 + p = name + len - 1;
59165 + do {
59166 + nmatch = lookup_name_entry(name);
59167 + if (lastp != NULL)
59168 + *lastp = c;
59169 +
59170 + if (nmatch == NULL)
59171 + goto next_component;
59172 + tmpsubj = current->acl;
59173 + do {
59174 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
59175 + if (obj != NULL) {
59176 + tmp = obj->globbed;
59177 + while (tmp) {
59178 + if (!glob_match(tmp->filename, name)) {
59179 + obj = tmp;
59180 + goto found_obj;
59181 + }
59182 + tmp = tmp->next;
59183 + }
59184 + goto found_obj;
59185 + }
59186 + } while ((tmpsubj = tmpsubj->parent_subject));
59187 +next_component:
59188 + /* end case */
59189 + if (p == name)
59190 + break;
59191 +
59192 + while (*p != '/')
59193 + p--;
59194 + if (p == name)
59195 + lastp = p + 1;
59196 + else {
59197 + lastp = p;
59198 + p--;
59199 + }
59200 + c = *lastp;
59201 + *lastp = '\0';
59202 + } while (1);
59203 +found_obj:
59204 + read_unlock(&gr_inode_lock);
59205 + /* obj returned will always be non-null */
59206 + return obj;
59207 +}
59208 +
59209 +/* returns 0 when allowing, non-zero on error
59210 + op of 0 is used for readdir, so we don't log the names of hidden files
59211 +*/
59212 +__u32
59213 +gr_handle_sysctl(const struct ctl_table *table, const int op)
59214 +{
59215 + ctl_table *tmp;
59216 + const char *proc_sys = "/proc/sys";
59217 + char *path;
59218 + struct acl_object_label *obj;
59219 + unsigned short len = 0, pos = 0, depth = 0, i;
59220 + __u32 err = 0;
59221 + __u32 mode = 0;
59222 +
59223 + if (unlikely(!(gr_status & GR_READY)))
59224 + return 0;
59225 +
59226 + /* for now, ignore operations on non-sysctl entries if it's not a
59227 + readdir*/
59228 + if (table->child != NULL && op != 0)
59229 + return 0;
59230 +
59231 + mode |= GR_FIND;
59232 + /* it's only a read if it's an entry, read on dirs is for readdir */
59233 + if (op & MAY_READ)
59234 + mode |= GR_READ;
59235 + if (op & MAY_WRITE)
59236 + mode |= GR_WRITE;
59237 +
59238 + preempt_disable();
59239 +
59240 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
59241 +
59242 + /* it's only a read/write if it's an actual entry, not a dir
59243 + (which are opened for readdir)
59244 + */
59245 +
59246 + /* convert the requested sysctl entry into a pathname */
59247 +
59248 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59249 + len += strlen(tmp->procname);
59250 + len++;
59251 + depth++;
59252 + }
59253 +
59254 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
59255 + /* deny */
59256 + goto out;
59257 + }
59258 +
59259 + memset(path, 0, PAGE_SIZE);
59260 +
59261 + memcpy(path, proc_sys, strlen(proc_sys));
59262 +
59263 + pos += strlen(proc_sys);
59264 +
59265 + for (; depth > 0; depth--) {
59266 + path[pos] = '/';
59267 + pos++;
59268 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59269 + if (depth == i) {
59270 + memcpy(path + pos, tmp->procname,
59271 + strlen(tmp->procname));
59272 + pos += strlen(tmp->procname);
59273 + }
59274 + i++;
59275 + }
59276 + }
59277 +
59278 + obj = gr_lookup_by_name(path, pos);
59279 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
59280 +
59281 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
59282 + ((err & mode) != mode))) {
59283 + __u32 new_mode = mode;
59284 +
59285 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59286 +
59287 + err = 0;
59288 + gr_log_learn_sysctl(path, new_mode);
59289 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
59290 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
59291 + err = -ENOENT;
59292 + } else if (!(err & GR_FIND)) {
59293 + err = -ENOENT;
59294 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
59295 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
59296 + path, (mode & GR_READ) ? " reading" : "",
59297 + (mode & GR_WRITE) ? " writing" : "");
59298 + err = -EACCES;
59299 + } else if ((err & mode) != mode) {
59300 + err = -EACCES;
59301 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
59302 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
59303 + path, (mode & GR_READ) ? " reading" : "",
59304 + (mode & GR_WRITE) ? " writing" : "");
59305 + err = 0;
59306 + } else
59307 + err = 0;
59308 +
59309 + out:
59310 + preempt_enable();
59311 +
59312 + return err;
59313 +}
59314 +#endif
59315 +
59316 +int
59317 +gr_handle_proc_ptrace(struct task_struct *task)
59318 +{
59319 + struct file *filp;
59320 + struct task_struct *tmp = task;
59321 + struct task_struct *curtemp = current;
59322 + __u32 retmode;
59323 +
59324 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59325 + if (unlikely(!(gr_status & GR_READY)))
59326 + return 0;
59327 +#endif
59328 +
59329 + read_lock(&tasklist_lock);
59330 + read_lock(&grsec_exec_file_lock);
59331 + filp = task->exec_file;
59332 +
59333 + while (tmp->pid > 0) {
59334 + if (tmp == curtemp)
59335 + break;
59336 + tmp = tmp->real_parent;
59337 + }
59338 +
59339 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59340 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
59341 + read_unlock(&grsec_exec_file_lock);
59342 + read_unlock(&tasklist_lock);
59343 + return 1;
59344 + }
59345 +
59346 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59347 + if (!(gr_status & GR_READY)) {
59348 + read_unlock(&grsec_exec_file_lock);
59349 + read_unlock(&tasklist_lock);
59350 + return 0;
59351 + }
59352 +#endif
59353 +
59354 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
59355 + read_unlock(&grsec_exec_file_lock);
59356 + read_unlock(&tasklist_lock);
59357 +
59358 + if (retmode & GR_NOPTRACE)
59359 + return 1;
59360 +
59361 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
59362 + && (current->acl != task->acl || (current->acl != current->role->root_label
59363 + && current->pid != task->pid)))
59364 + return 1;
59365 +
59366 + return 0;
59367 +}
59368 +
59369 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
59370 +{
59371 + if (unlikely(!(gr_status & GR_READY)))
59372 + return;
59373 +
59374 + if (!(current->role->roletype & GR_ROLE_GOD))
59375 + return;
59376 +
59377 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
59378 + p->role->rolename, gr_task_roletype_to_char(p),
59379 + p->acl->filename);
59380 +}
59381 +
59382 +int
59383 +gr_handle_ptrace(struct task_struct *task, const long request)
59384 +{
59385 + struct task_struct *tmp = task;
59386 + struct task_struct *curtemp = current;
59387 + __u32 retmode;
59388 +
59389 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59390 + if (unlikely(!(gr_status & GR_READY)))
59391 + return 0;
59392 +#endif
59393 +
59394 + read_lock(&tasklist_lock);
59395 + while (tmp->pid > 0) {
59396 + if (tmp == curtemp)
59397 + break;
59398 + tmp = tmp->real_parent;
59399 + }
59400 +
59401 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59402 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
59403 + read_unlock(&tasklist_lock);
59404 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59405 + return 1;
59406 + }
59407 + read_unlock(&tasklist_lock);
59408 +
59409 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59410 + if (!(gr_status & GR_READY))
59411 + return 0;
59412 +#endif
59413 +
59414 + read_lock(&grsec_exec_file_lock);
59415 + if (unlikely(!task->exec_file)) {
59416 + read_unlock(&grsec_exec_file_lock);
59417 + return 0;
59418 + }
59419 +
59420 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
59421 + read_unlock(&grsec_exec_file_lock);
59422 +
59423 + if (retmode & GR_NOPTRACE) {
59424 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59425 + return 1;
59426 + }
59427 +
59428 + if (retmode & GR_PTRACERD) {
59429 + switch (request) {
59430 + case PTRACE_POKETEXT:
59431 + case PTRACE_POKEDATA:
59432 + case PTRACE_POKEUSR:
59433 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
59434 + case PTRACE_SETREGS:
59435 + case PTRACE_SETFPREGS:
59436 +#endif
59437 +#ifdef CONFIG_X86
59438 + case PTRACE_SETFPXREGS:
59439 +#endif
59440 +#ifdef CONFIG_ALTIVEC
59441 + case PTRACE_SETVRREGS:
59442 +#endif
59443 + return 1;
59444 + default:
59445 + return 0;
59446 + }
59447 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
59448 + !(current->role->roletype & GR_ROLE_GOD) &&
59449 + (current->acl != task->acl)) {
59450 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59451 + return 1;
59452 + }
59453 +
59454 + return 0;
59455 +}
59456 +
59457 +static int is_writable_mmap(const struct file *filp)
59458 +{
59459 + struct task_struct *task = current;
59460 + struct acl_object_label *obj, *obj2;
59461 +
59462 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
59463 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
59464 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59465 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
59466 + task->role->root_label);
59467 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
59468 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
59469 + return 1;
59470 + }
59471 + }
59472 + return 0;
59473 +}
59474 +
59475 +int
59476 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
59477 +{
59478 + __u32 mode;
59479 +
59480 + if (unlikely(!file || !(prot & PROT_EXEC)))
59481 + return 1;
59482 +
59483 + if (is_writable_mmap(file))
59484 + return 0;
59485 +
59486 + mode =
59487 + gr_search_file(file->f_path.dentry,
59488 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59489 + file->f_path.mnt);
59490 +
59491 + if (!gr_tpe_allow(file))
59492 + return 0;
59493 +
59494 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59495 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59496 + return 0;
59497 + } else if (unlikely(!(mode & GR_EXEC))) {
59498 + return 0;
59499 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59500 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59501 + return 1;
59502 + }
59503 +
59504 + return 1;
59505 +}
59506 +
59507 +int
59508 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59509 +{
59510 + __u32 mode;
59511 +
59512 + if (unlikely(!file || !(prot & PROT_EXEC)))
59513 + return 1;
59514 +
59515 + if (is_writable_mmap(file))
59516 + return 0;
59517 +
59518 + mode =
59519 + gr_search_file(file->f_path.dentry,
59520 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59521 + file->f_path.mnt);
59522 +
59523 + if (!gr_tpe_allow(file))
59524 + return 0;
59525 +
59526 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59527 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59528 + return 0;
59529 + } else if (unlikely(!(mode & GR_EXEC))) {
59530 + return 0;
59531 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59532 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59533 + return 1;
59534 + }
59535 +
59536 + return 1;
59537 +}
59538 +
59539 +void
59540 +gr_acl_handle_psacct(struct task_struct *task, const long code)
59541 +{
59542 + unsigned long runtime;
59543 + unsigned long cputime;
59544 + unsigned int wday, cday;
59545 + __u8 whr, chr;
59546 + __u8 wmin, cmin;
59547 + __u8 wsec, csec;
59548 + struct timespec timeval;
59549 +
59550 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
59551 + !(task->acl->mode & GR_PROCACCT)))
59552 + return;
59553 +
59554 + do_posix_clock_monotonic_gettime(&timeval);
59555 + runtime = timeval.tv_sec - task->start_time.tv_sec;
59556 + wday = runtime / (3600 * 24);
59557 + runtime -= wday * (3600 * 24);
59558 + whr = runtime / 3600;
59559 + runtime -= whr * 3600;
59560 + wmin = runtime / 60;
59561 + runtime -= wmin * 60;
59562 + wsec = runtime;
59563 +
59564 + cputime = (task->utime + task->stime) / HZ;
59565 + cday = cputime / (3600 * 24);
59566 + cputime -= cday * (3600 * 24);
59567 + chr = cputime / 3600;
59568 + cputime -= chr * 3600;
59569 + cmin = cputime / 60;
59570 + cputime -= cmin * 60;
59571 + csec = cputime;
59572 +
59573 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
59574 +
59575 + return;
59576 +}
59577 +
59578 +void gr_set_kernel_label(struct task_struct *task)
59579 +{
59580 + if (gr_status & GR_READY) {
59581 + task->role = kernel_role;
59582 + task->acl = kernel_role->root_label;
59583 + }
59584 + return;
59585 +}
59586 +
59587 +#ifdef CONFIG_TASKSTATS
59588 +int gr_is_taskstats_denied(int pid)
59589 +{
59590 + struct task_struct *task;
59591 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59592 + const struct cred *cred;
59593 +#endif
59594 + int ret = 0;
59595 +
59596 + /* restrict taskstats viewing to un-chrooted root users
59597 + who have the 'view' subject flag if the RBAC system is enabled
59598 + */
59599 +
59600 + rcu_read_lock();
59601 + read_lock(&tasklist_lock);
59602 + task = find_task_by_vpid(pid);
59603 + if (task) {
59604 +#ifdef CONFIG_GRKERNSEC_CHROOT
59605 + if (proc_is_chrooted(task))
59606 + ret = -EACCES;
59607 +#endif
59608 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59609 + cred = __task_cred(task);
59610 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59611 + if (cred->uid != 0)
59612 + ret = -EACCES;
59613 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59614 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
59615 + ret = -EACCES;
59616 +#endif
59617 +#endif
59618 + if (gr_status & GR_READY) {
59619 + if (!(task->acl->mode & GR_VIEW))
59620 + ret = -EACCES;
59621 + }
59622 + } else
59623 + ret = -ENOENT;
59624 +
59625 + read_unlock(&tasklist_lock);
59626 + rcu_read_unlock();
59627 +
59628 + return ret;
59629 +}
59630 +#endif
59631 +
59632 +/* AUXV entries are filled via a descendant of search_binary_handler
59633 + after we've already applied the subject for the target
59634 +*/
59635 +int gr_acl_enable_at_secure(void)
59636 +{
59637 + if (unlikely(!(gr_status & GR_READY)))
59638 + return 0;
59639 +
59640 + if (current->acl->mode & GR_ATSECURE)
59641 + return 1;
59642 +
59643 + return 0;
59644 +}
59645 +
59646 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
59647 +{
59648 + struct task_struct *task = current;
59649 + struct dentry *dentry = file->f_path.dentry;
59650 + struct vfsmount *mnt = file->f_path.mnt;
59651 + struct acl_object_label *obj, *tmp;
59652 + struct acl_subject_label *subj;
59653 + unsigned int bufsize;
59654 + int is_not_root;
59655 + char *path;
59656 + dev_t dev = __get_dev(dentry);
59657 +
59658 + if (unlikely(!(gr_status & GR_READY)))
59659 + return 1;
59660 +
59661 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59662 + return 1;
59663 +
59664 + /* ignore Eric Biederman */
59665 + if (IS_PRIVATE(dentry->d_inode))
59666 + return 1;
59667 +
59668 + subj = task->acl;
59669 + do {
59670 + obj = lookup_acl_obj_label(ino, dev, subj);
59671 + if (obj != NULL)
59672 + return (obj->mode & GR_FIND) ? 1 : 0;
59673 + } while ((subj = subj->parent_subject));
59674 +
59675 + /* this is purely an optimization since we're looking for an object
59676 + for the directory we're doing a readdir on
59677 + if it's possible for any globbed object to match the entry we're
59678 + filling into the directory, then the object we find here will be
59679 + an anchor point with attached globbed objects
59680 + */
59681 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
59682 + if (obj->globbed == NULL)
59683 + return (obj->mode & GR_FIND) ? 1 : 0;
59684 +
59685 + is_not_root = ((obj->filename[0] == '/') &&
59686 + (obj->filename[1] == '\0')) ? 0 : 1;
59687 + bufsize = PAGE_SIZE - namelen - is_not_root;
59688 +
59689 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
59690 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
59691 + return 1;
59692 +
59693 + preempt_disable();
59694 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
59695 + bufsize);
59696 +
59697 + bufsize = strlen(path);
59698 +
59699 + /* if base is "/", don't append an additional slash */
59700 + if (is_not_root)
59701 + *(path + bufsize) = '/';
59702 + memcpy(path + bufsize + is_not_root, name, namelen);
59703 + *(path + bufsize + namelen + is_not_root) = '\0';
59704 +
59705 + tmp = obj->globbed;
59706 + while (tmp) {
59707 + if (!glob_match(tmp->filename, path)) {
59708 + preempt_enable();
59709 + return (tmp->mode & GR_FIND) ? 1 : 0;
59710 + }
59711 + tmp = tmp->next;
59712 + }
59713 + preempt_enable();
59714 + return (obj->mode & GR_FIND) ? 1 : 0;
59715 +}
59716 +
59717 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
59718 +EXPORT_SYMBOL(gr_acl_is_enabled);
59719 +#endif
59720 +EXPORT_SYMBOL(gr_learn_resource);
59721 +EXPORT_SYMBOL(gr_set_kernel_label);
59722 +#ifdef CONFIG_SECURITY
59723 +EXPORT_SYMBOL(gr_check_user_change);
59724 +EXPORT_SYMBOL(gr_check_group_change);
59725 +#endif
59726 +
59727 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
59728 new file mode 100644
59729 index 0000000..34fefda
59730 --- /dev/null
59731 +++ b/grsecurity/gracl_alloc.c
59732 @@ -0,0 +1,105 @@
59733 +#include <linux/kernel.h>
59734 +#include <linux/mm.h>
59735 +#include <linux/slab.h>
59736 +#include <linux/vmalloc.h>
59737 +#include <linux/gracl.h>
59738 +#include <linux/grsecurity.h>
59739 +
59740 +static unsigned long alloc_stack_next = 1;
59741 +static unsigned long alloc_stack_size = 1;
59742 +static void **alloc_stack;
59743 +
59744 +static __inline__ int
59745 +alloc_pop(void)
59746 +{
59747 + if (alloc_stack_next == 1)
59748 + return 0;
59749 +
59750 + kfree(alloc_stack[alloc_stack_next - 2]);
59751 +
59752 + alloc_stack_next--;
59753 +
59754 + return 1;
59755 +}
59756 +
59757 +static __inline__ int
59758 +alloc_push(void *buf)
59759 +{
59760 + if (alloc_stack_next >= alloc_stack_size)
59761 + return 1;
59762 +
59763 + alloc_stack[alloc_stack_next - 1] = buf;
59764 +
59765 + alloc_stack_next++;
59766 +
59767 + return 0;
59768 +}
59769 +
59770 +void *
59771 +acl_alloc(unsigned long len)
59772 +{
59773 + void *ret = NULL;
59774 +
59775 + if (!len || len > PAGE_SIZE)
59776 + goto out;
59777 +
59778 + ret = kmalloc(len, GFP_KERNEL);
59779 +
59780 + if (ret) {
59781 + if (alloc_push(ret)) {
59782 + kfree(ret);
59783 + ret = NULL;
59784 + }
59785 + }
59786 +
59787 +out:
59788 + return ret;
59789 +}
59790 +
59791 +void *
59792 +acl_alloc_num(unsigned long num, unsigned long len)
59793 +{
59794 + if (!len || (num > (PAGE_SIZE / len)))
59795 + return NULL;
59796 +
59797 + return acl_alloc(num * len);
59798 +}
59799 +
59800 +void
59801 +acl_free_all(void)
59802 +{
59803 + if (gr_acl_is_enabled() || !alloc_stack)
59804 + return;
59805 +
59806 + while (alloc_pop()) ;
59807 +
59808 + if (alloc_stack) {
59809 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
59810 + kfree(alloc_stack);
59811 + else
59812 + vfree(alloc_stack);
59813 + }
59814 +
59815 + alloc_stack = NULL;
59816 + alloc_stack_size = 1;
59817 + alloc_stack_next = 1;
59818 +
59819 + return;
59820 +}
59821 +
59822 +int
59823 +acl_alloc_stack_init(unsigned long size)
59824 +{
59825 + if ((size * sizeof (void *)) <= PAGE_SIZE)
59826 + alloc_stack =
59827 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
59828 + else
59829 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
59830 +
59831 + alloc_stack_size = size;
59832 +
59833 + if (!alloc_stack)
59834 + return 0;
59835 + else
59836 + return 1;
59837 +}
59838 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
59839 new file mode 100644
59840 index 0000000..955ddfb
59841 --- /dev/null
59842 +++ b/grsecurity/gracl_cap.c
59843 @@ -0,0 +1,101 @@
59844 +#include <linux/kernel.h>
59845 +#include <linux/module.h>
59846 +#include <linux/sched.h>
59847 +#include <linux/gracl.h>
59848 +#include <linux/grsecurity.h>
59849 +#include <linux/grinternal.h>
59850 +
59851 +extern const char *captab_log[];
59852 +extern int captab_log_entries;
59853 +
59854 +int
59855 +gr_acl_is_capable(const int cap)
59856 +{
59857 + struct task_struct *task = current;
59858 + const struct cred *cred = current_cred();
59859 + struct acl_subject_label *curracl;
59860 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
59861 + kernel_cap_t cap_audit = __cap_empty_set;
59862 +
59863 + if (!gr_acl_is_enabled())
59864 + return 1;
59865 +
59866 + curracl = task->acl;
59867 +
59868 + cap_drop = curracl->cap_lower;
59869 + cap_mask = curracl->cap_mask;
59870 + cap_audit = curracl->cap_invert_audit;
59871 +
59872 + while ((curracl = curracl->parent_subject)) {
59873 + /* if the cap isn't specified in the current computed mask but is specified in the
59874 + current level subject, and is lowered in the current level subject, then add
59875 + it to the set of dropped capabilities
59876 + otherwise, add the current level subject's mask to the current computed mask
59877 + */
59878 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
59879 + cap_raise(cap_mask, cap);
59880 + if (cap_raised(curracl->cap_lower, cap))
59881 + cap_raise(cap_drop, cap);
59882 + if (cap_raised(curracl->cap_invert_audit, cap))
59883 + cap_raise(cap_audit, cap);
59884 + }
59885 + }
59886 +
59887 + if (!cap_raised(cap_drop, cap)) {
59888 + if (cap_raised(cap_audit, cap))
59889 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
59890 + return 1;
59891 + }
59892 +
59893 + curracl = task->acl;
59894 +
59895 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
59896 + && cap_raised(cred->cap_effective, cap)) {
59897 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59898 + task->role->roletype, cred->uid,
59899 + cred->gid, task->exec_file ?
59900 + gr_to_filename(task->exec_file->f_path.dentry,
59901 + task->exec_file->f_path.mnt) : curracl->filename,
59902 + curracl->filename, 0UL,
59903 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
59904 + return 1;
59905 + }
59906 +
59907 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
59908 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
59909 + return 0;
59910 +}
59911 +
59912 +int
59913 +gr_acl_is_capable_nolog(const int cap)
59914 +{
59915 + struct acl_subject_label *curracl;
59916 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
59917 +
59918 + if (!gr_acl_is_enabled())
59919 + return 1;
59920 +
59921 + curracl = current->acl;
59922 +
59923 + cap_drop = curracl->cap_lower;
59924 + cap_mask = curracl->cap_mask;
59925 +
59926 + while ((curracl = curracl->parent_subject)) {
59927 + /* if the cap isn't specified in the current computed mask but is specified in the
59928 + current level subject, and is lowered in the current level subject, then add
59929 + it to the set of dropped capabilities
59930 + otherwise, add the current level subject's mask to the current computed mask
59931 + */
59932 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
59933 + cap_raise(cap_mask, cap);
59934 + if (cap_raised(curracl->cap_lower, cap))
59935 + cap_raise(cap_drop, cap);
59936 + }
59937 + }
59938 +
59939 + if (!cap_raised(cap_drop, cap))
59940 + return 1;
59941 +
59942 + return 0;
59943 +}
59944 +
59945 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
59946 new file mode 100644
59947 index 0000000..d5f210c
59948 --- /dev/null
59949 +++ b/grsecurity/gracl_fs.c
59950 @@ -0,0 +1,433 @@
59951 +#include <linux/kernel.h>
59952 +#include <linux/sched.h>
59953 +#include <linux/types.h>
59954 +#include <linux/fs.h>
59955 +#include <linux/file.h>
59956 +#include <linux/stat.h>
59957 +#include <linux/grsecurity.h>
59958 +#include <linux/grinternal.h>
59959 +#include <linux/gracl.h>
59960 +
59961 +__u32
59962 +gr_acl_handle_hidden_file(const struct dentry * dentry,
59963 + const struct vfsmount * mnt)
59964 +{
59965 + __u32 mode;
59966 +
59967 + if (unlikely(!dentry->d_inode))
59968 + return GR_FIND;
59969 +
59970 + mode =
59971 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
59972 +
59973 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
59974 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
59975 + return mode;
59976 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
59977 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
59978 + return 0;
59979 + } else if (unlikely(!(mode & GR_FIND)))
59980 + return 0;
59981 +
59982 + return GR_FIND;
59983 +}
59984 +
59985 +__u32
59986 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
59987 + int acc_mode)
59988 +{
59989 + __u32 reqmode = GR_FIND;
59990 + __u32 mode;
59991 +
59992 + if (unlikely(!dentry->d_inode))
59993 + return reqmode;
59994 +
59995 + if (acc_mode & MAY_APPEND)
59996 + reqmode |= GR_APPEND;
59997 + else if (acc_mode & MAY_WRITE)
59998 + reqmode |= GR_WRITE;
59999 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
60000 + reqmode |= GR_READ;
60001 +
60002 + mode =
60003 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60004 + mnt);
60005 +
60006 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60007 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60008 + reqmode & GR_READ ? " reading" : "",
60009 + reqmode & GR_WRITE ? " writing" : reqmode &
60010 + GR_APPEND ? " appending" : "");
60011 + return reqmode;
60012 + } else
60013 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60014 + {
60015 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60016 + reqmode & GR_READ ? " reading" : "",
60017 + reqmode & GR_WRITE ? " writing" : reqmode &
60018 + GR_APPEND ? " appending" : "");
60019 + return 0;
60020 + } else if (unlikely((mode & reqmode) != reqmode))
60021 + return 0;
60022 +
60023 + return reqmode;
60024 +}
60025 +
60026 +__u32
60027 +gr_acl_handle_creat(const struct dentry * dentry,
60028 + const struct dentry * p_dentry,
60029 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60030 + const int imode)
60031 +{
60032 + __u32 reqmode = GR_WRITE | GR_CREATE;
60033 + __u32 mode;
60034 +
60035 + if (acc_mode & MAY_APPEND)
60036 + reqmode |= GR_APPEND;
60037 + // if a directory was required or the directory already exists, then
60038 + // don't count this open as a read
60039 + if ((acc_mode & MAY_READ) &&
60040 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
60041 + reqmode |= GR_READ;
60042 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
60043 + reqmode |= GR_SETID;
60044 +
60045 + mode =
60046 + gr_check_create(dentry, p_dentry, p_mnt,
60047 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60048 +
60049 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60050 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60051 + reqmode & GR_READ ? " reading" : "",
60052 + reqmode & GR_WRITE ? " writing" : reqmode &
60053 + GR_APPEND ? " appending" : "");
60054 + return reqmode;
60055 + } else
60056 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60057 + {
60058 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60059 + reqmode & GR_READ ? " reading" : "",
60060 + reqmode & GR_WRITE ? " writing" : reqmode &
60061 + GR_APPEND ? " appending" : "");
60062 + return 0;
60063 + } else if (unlikely((mode & reqmode) != reqmode))
60064 + return 0;
60065 +
60066 + return reqmode;
60067 +}
60068 +
60069 +__u32
60070 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
60071 + const int fmode)
60072 +{
60073 + __u32 mode, reqmode = GR_FIND;
60074 +
60075 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
60076 + reqmode |= GR_EXEC;
60077 + if (fmode & S_IWOTH)
60078 + reqmode |= GR_WRITE;
60079 + if (fmode & S_IROTH)
60080 + reqmode |= GR_READ;
60081 +
60082 + mode =
60083 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60084 + mnt);
60085 +
60086 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60087 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60088 + reqmode & GR_READ ? " reading" : "",
60089 + reqmode & GR_WRITE ? " writing" : "",
60090 + reqmode & GR_EXEC ? " executing" : "");
60091 + return reqmode;
60092 + } else
60093 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60094 + {
60095 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60096 + reqmode & GR_READ ? " reading" : "",
60097 + reqmode & GR_WRITE ? " writing" : "",
60098 + reqmode & GR_EXEC ? " executing" : "");
60099 + return 0;
60100 + } else if (unlikely((mode & reqmode) != reqmode))
60101 + return 0;
60102 +
60103 + return reqmode;
60104 +}
60105 +
60106 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
60107 +{
60108 + __u32 mode;
60109 +
60110 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
60111 +
60112 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60113 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
60114 + return mode;
60115 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60116 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
60117 + return 0;
60118 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
60119 + return 0;
60120 +
60121 + return (reqmode);
60122 +}
60123 +
60124 +__u32
60125 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
60126 +{
60127 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
60128 +}
60129 +
60130 +__u32
60131 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
60132 +{
60133 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
60134 +}
60135 +
60136 +__u32
60137 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
60138 +{
60139 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
60140 +}
60141 +
60142 +__u32
60143 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
60144 +{
60145 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
60146 +}
60147 +
60148 +__u32
60149 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
60150 + mode_t mode)
60151 +{
60152 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
60153 + return 1;
60154 +
60155 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60156 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60157 + GR_FCHMOD_ACL_MSG);
60158 + } else {
60159 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
60160 + }
60161 +}
60162 +
60163 +__u32
60164 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
60165 + mode_t mode)
60166 +{
60167 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60168 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60169 + GR_CHMOD_ACL_MSG);
60170 + } else {
60171 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
60172 + }
60173 +}
60174 +
60175 +__u32
60176 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
60177 +{
60178 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
60179 +}
60180 +
60181 +__u32
60182 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
60183 +{
60184 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
60185 +}
60186 +
60187 +__u32
60188 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
60189 +{
60190 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
60191 +}
60192 +
60193 +__u32
60194 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
60195 +{
60196 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
60197 + GR_UNIXCONNECT_ACL_MSG);
60198 +}
60199 +
60200 +/* hardlinks require at minimum create and link permission,
60201 + any additional privilege required is based on the
60202 + privilege of the file being linked to
60203 +*/
60204 +__u32
60205 +gr_acl_handle_link(const struct dentry * new_dentry,
60206 + const struct dentry * parent_dentry,
60207 + const struct vfsmount * parent_mnt,
60208 + const struct dentry * old_dentry,
60209 + const struct vfsmount * old_mnt, const char *to)
60210 +{
60211 + __u32 mode;
60212 + __u32 needmode = GR_CREATE | GR_LINK;
60213 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
60214 +
60215 + mode =
60216 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
60217 + old_mnt);
60218 +
60219 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
60220 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60221 + return mode;
60222 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60223 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60224 + return 0;
60225 + } else if (unlikely((mode & needmode) != needmode))
60226 + return 0;
60227 +
60228 + return 1;
60229 +}
60230 +
60231 +__u32
60232 +gr_acl_handle_symlink(const struct dentry * new_dentry,
60233 + const struct dentry * parent_dentry,
60234 + const struct vfsmount * parent_mnt, const char *from)
60235 +{
60236 + __u32 needmode = GR_WRITE | GR_CREATE;
60237 + __u32 mode;
60238 +
60239 + mode =
60240 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
60241 + GR_CREATE | GR_AUDIT_CREATE |
60242 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
60243 +
60244 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
60245 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60246 + return mode;
60247 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60248 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60249 + return 0;
60250 + } else if (unlikely((mode & needmode) != needmode))
60251 + return 0;
60252 +
60253 + return (GR_WRITE | GR_CREATE);
60254 +}
60255 +
60256 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
60257 +{
60258 + __u32 mode;
60259 +
60260 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60261 +
60262 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60263 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
60264 + return mode;
60265 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60266 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
60267 + return 0;
60268 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
60269 + return 0;
60270 +
60271 + return (reqmode);
60272 +}
60273 +
60274 +__u32
60275 +gr_acl_handle_mknod(const struct dentry * new_dentry,
60276 + const struct dentry * parent_dentry,
60277 + const struct vfsmount * parent_mnt,
60278 + const int mode)
60279 +{
60280 + __u32 reqmode = GR_WRITE | GR_CREATE;
60281 + if (unlikely(mode & (S_ISUID | S_ISGID)))
60282 + reqmode |= GR_SETID;
60283 +
60284 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60285 + reqmode, GR_MKNOD_ACL_MSG);
60286 +}
60287 +
60288 +__u32
60289 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
60290 + const struct dentry *parent_dentry,
60291 + const struct vfsmount *parent_mnt)
60292 +{
60293 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60294 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
60295 +}
60296 +
60297 +#define RENAME_CHECK_SUCCESS(old, new) \
60298 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
60299 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
60300 +
60301 +int
60302 +gr_acl_handle_rename(struct dentry *new_dentry,
60303 + struct dentry *parent_dentry,
60304 + const struct vfsmount *parent_mnt,
60305 + struct dentry *old_dentry,
60306 + struct inode *old_parent_inode,
60307 + struct vfsmount *old_mnt, const char *newname)
60308 +{
60309 + __u32 comp1, comp2;
60310 + int error = 0;
60311 +
60312 + if (unlikely(!gr_acl_is_enabled()))
60313 + return 0;
60314 +
60315 + if (!new_dentry->d_inode) {
60316 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
60317 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
60318 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
60319 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
60320 + GR_DELETE | GR_AUDIT_DELETE |
60321 + GR_AUDIT_READ | GR_AUDIT_WRITE |
60322 + GR_SUPPRESS, old_mnt);
60323 + } else {
60324 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
60325 + GR_CREATE | GR_DELETE |
60326 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
60327 + GR_AUDIT_READ | GR_AUDIT_WRITE |
60328 + GR_SUPPRESS, parent_mnt);
60329 + comp2 =
60330 + gr_search_file(old_dentry,
60331 + GR_READ | GR_WRITE | GR_AUDIT_READ |
60332 + GR_DELETE | GR_AUDIT_DELETE |
60333 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
60334 + }
60335 +
60336 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
60337 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
60338 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60339 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
60340 + && !(comp2 & GR_SUPPRESS)) {
60341 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60342 + error = -EACCES;
60343 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
60344 + error = -EACCES;
60345 +
60346 + return error;
60347 +}
60348 +
60349 +void
60350 +gr_acl_handle_exit(void)
60351 +{
60352 + u16 id;
60353 + char *rolename;
60354 + struct file *exec_file;
60355 +
60356 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
60357 + !(current->role->roletype & GR_ROLE_PERSIST))) {
60358 + id = current->acl_role_id;
60359 + rolename = current->role->rolename;
60360 + gr_set_acls(1);
60361 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
60362 + }
60363 +
60364 + write_lock(&grsec_exec_file_lock);
60365 + exec_file = current->exec_file;
60366 + current->exec_file = NULL;
60367 + write_unlock(&grsec_exec_file_lock);
60368 +
60369 + if (exec_file)
60370 + fput(exec_file);
60371 +}
60372 +
60373 +int
60374 +gr_acl_handle_procpidmem(const struct task_struct *task)
60375 +{
60376 + if (unlikely(!gr_acl_is_enabled()))
60377 + return 0;
60378 +
60379 + if (task != current && task->acl->mode & GR_PROTPROCFD)
60380 + return -EACCES;
60381 +
60382 + return 0;
60383 +}
60384 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
60385 new file mode 100644
60386 index 0000000..cd07b96
60387 --- /dev/null
60388 +++ b/grsecurity/gracl_ip.c
60389 @@ -0,0 +1,382 @@
60390 +#include <linux/kernel.h>
60391 +#include <asm/uaccess.h>
60392 +#include <asm/errno.h>
60393 +#include <net/sock.h>
60394 +#include <linux/file.h>
60395 +#include <linux/fs.h>
60396 +#include <linux/net.h>
60397 +#include <linux/in.h>
60398 +#include <linux/skbuff.h>
60399 +#include <linux/ip.h>
60400 +#include <linux/udp.h>
60401 +#include <linux/smp_lock.h>
60402 +#include <linux/types.h>
60403 +#include <linux/sched.h>
60404 +#include <linux/netdevice.h>
60405 +#include <linux/inetdevice.h>
60406 +#include <linux/gracl.h>
60407 +#include <linux/grsecurity.h>
60408 +#include <linux/grinternal.h>
60409 +
60410 +#define GR_BIND 0x01
60411 +#define GR_CONNECT 0x02
60412 +#define GR_INVERT 0x04
60413 +#define GR_BINDOVERRIDE 0x08
60414 +#define GR_CONNECTOVERRIDE 0x10
60415 +#define GR_SOCK_FAMILY 0x20
60416 +
60417 +static const char * gr_protocols[IPPROTO_MAX] = {
60418 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
60419 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
60420 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
60421 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
60422 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
60423 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
60424 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
60425 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
60426 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
60427 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
60428 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
60429 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
60430 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
60431 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
60432 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
60433 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
60434 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
60435 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
60436 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
60437 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
60438 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
60439 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
60440 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
60441 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
60442 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
60443 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
60444 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
60445 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
60446 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
60447 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
60448 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
60449 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
60450 + };
60451 +
60452 +static const char * gr_socktypes[SOCK_MAX] = {
60453 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
60454 + "unknown:7", "unknown:8", "unknown:9", "packet"
60455 + };
60456 +
60457 +static const char * gr_sockfamilies[AF_MAX+1] = {
60458 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
60459 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
60460 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
60461 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
60462 + };
60463 +
60464 +const char *
60465 +gr_proto_to_name(unsigned char proto)
60466 +{
60467 + return gr_protocols[proto];
60468 +}
60469 +
60470 +const char *
60471 +gr_socktype_to_name(unsigned char type)
60472 +{
60473 + return gr_socktypes[type];
60474 +}
60475 +
60476 +const char *
60477 +gr_sockfamily_to_name(unsigned char family)
60478 +{
60479 + return gr_sockfamilies[family];
60480 +}
60481 +
60482 +int
60483 +gr_search_socket(const int domain, const int type, const int protocol)
60484 +{
60485 + struct acl_subject_label *curr;
60486 + const struct cred *cred = current_cred();
60487 +
60488 + if (unlikely(!gr_acl_is_enabled()))
60489 + goto exit;
60490 +
60491 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
60492 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
60493 + goto exit; // let the kernel handle it
60494 +
60495 + curr = current->acl;
60496 +
60497 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
60498 + /* the family is allowed, if this is PF_INET allow it only if
60499 + the extra sock type/protocol checks pass */
60500 + if (domain == PF_INET)
60501 + goto inet_check;
60502 + goto exit;
60503 + } else {
60504 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60505 + __u32 fakeip = 0;
60506 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60507 + current->role->roletype, cred->uid,
60508 + cred->gid, current->exec_file ?
60509 + gr_to_filename(current->exec_file->f_path.dentry,
60510 + current->exec_file->f_path.mnt) :
60511 + curr->filename, curr->filename,
60512 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
60513 + &current->signal->saved_ip);
60514 + goto exit;
60515 + }
60516 + goto exit_fail;
60517 + }
60518 +
60519 +inet_check:
60520 + /* the rest of this checking is for IPv4 only */
60521 + if (!curr->ips)
60522 + goto exit;
60523 +
60524 + if ((curr->ip_type & (1 << type)) &&
60525 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
60526 + goto exit;
60527 +
60528 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60529 + /* we don't place acls on raw sockets , and sometimes
60530 + dgram/ip sockets are opened for ioctl and not
60531 + bind/connect, so we'll fake a bind learn log */
60532 + if (type == SOCK_RAW || type == SOCK_PACKET) {
60533 + __u32 fakeip = 0;
60534 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60535 + current->role->roletype, cred->uid,
60536 + cred->gid, current->exec_file ?
60537 + gr_to_filename(current->exec_file->f_path.dentry,
60538 + current->exec_file->f_path.mnt) :
60539 + curr->filename, curr->filename,
60540 + &fakeip, 0, type,
60541 + protocol, GR_CONNECT, &current->signal->saved_ip);
60542 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
60543 + __u32 fakeip = 0;
60544 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60545 + current->role->roletype, cred->uid,
60546 + cred->gid, current->exec_file ?
60547 + gr_to_filename(current->exec_file->f_path.dentry,
60548 + current->exec_file->f_path.mnt) :
60549 + curr->filename, curr->filename,
60550 + &fakeip, 0, type,
60551 + protocol, GR_BIND, &current->signal->saved_ip);
60552 + }
60553 + /* we'll log when they use connect or bind */
60554 + goto exit;
60555 + }
60556 +
60557 +exit_fail:
60558 + if (domain == PF_INET)
60559 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
60560 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
60561 + else
60562 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
60563 + gr_socktype_to_name(type), protocol);
60564 +
60565 + return 0;
60566 +exit:
60567 + return 1;
60568 +}
60569 +
60570 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
60571 +{
60572 + if ((ip->mode & mode) &&
60573 + (ip_port >= ip->low) &&
60574 + (ip_port <= ip->high) &&
60575 + ((ntohl(ip_addr) & our_netmask) ==
60576 + (ntohl(our_addr) & our_netmask))
60577 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
60578 + && (ip->type & (1 << type))) {
60579 + if (ip->mode & GR_INVERT)
60580 + return 2; // specifically denied
60581 + else
60582 + return 1; // allowed
60583 + }
60584 +
60585 + return 0; // not specifically allowed, may continue parsing
60586 +}
60587 +
60588 +static int
60589 +gr_search_connectbind(const int full_mode, struct sock *sk,
60590 + struct sockaddr_in *addr, const int type)
60591 +{
60592 + char iface[IFNAMSIZ] = {0};
60593 + struct acl_subject_label *curr;
60594 + struct acl_ip_label *ip;
60595 + struct inet_sock *isk;
60596 + struct net_device *dev;
60597 + struct in_device *idev;
60598 + unsigned long i;
60599 + int ret;
60600 + int mode = full_mode & (GR_BIND | GR_CONNECT);
60601 + __u32 ip_addr = 0;
60602 + __u32 our_addr;
60603 + __u32 our_netmask;
60604 + char *p;
60605 + __u16 ip_port = 0;
60606 + const struct cred *cred = current_cred();
60607 +
60608 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
60609 + return 0;
60610 +
60611 + curr = current->acl;
60612 + isk = inet_sk(sk);
60613 +
60614 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
60615 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
60616 + addr->sin_addr.s_addr = curr->inaddr_any_override;
60617 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
60618 + struct sockaddr_in saddr;
60619 + int err;
60620 +
60621 + saddr.sin_family = AF_INET;
60622 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
60623 + saddr.sin_port = isk->sport;
60624 +
60625 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60626 + if (err)
60627 + return err;
60628 +
60629 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60630 + if (err)
60631 + return err;
60632 + }
60633 +
60634 + if (!curr->ips)
60635 + return 0;
60636 +
60637 + ip_addr = addr->sin_addr.s_addr;
60638 + ip_port = ntohs(addr->sin_port);
60639 +
60640 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60641 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60642 + current->role->roletype, cred->uid,
60643 + cred->gid, current->exec_file ?
60644 + gr_to_filename(current->exec_file->f_path.dentry,
60645 + current->exec_file->f_path.mnt) :
60646 + curr->filename, curr->filename,
60647 + &ip_addr, ip_port, type,
60648 + sk->sk_protocol, mode, &current->signal->saved_ip);
60649 + return 0;
60650 + }
60651 +
60652 + for (i = 0; i < curr->ip_num; i++) {
60653 + ip = *(curr->ips + i);
60654 + if (ip->iface != NULL) {
60655 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
60656 + p = strchr(iface, ':');
60657 + if (p != NULL)
60658 + *p = '\0';
60659 + dev = dev_get_by_name(sock_net(sk), iface);
60660 + if (dev == NULL)
60661 + continue;
60662 + idev = in_dev_get(dev);
60663 + if (idev == NULL) {
60664 + dev_put(dev);
60665 + continue;
60666 + }
60667 + rcu_read_lock();
60668 + for_ifa(idev) {
60669 + if (!strcmp(ip->iface, ifa->ifa_label)) {
60670 + our_addr = ifa->ifa_address;
60671 + our_netmask = 0xffffffff;
60672 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60673 + if (ret == 1) {
60674 + rcu_read_unlock();
60675 + in_dev_put(idev);
60676 + dev_put(dev);
60677 + return 0;
60678 + } else if (ret == 2) {
60679 + rcu_read_unlock();
60680 + in_dev_put(idev);
60681 + dev_put(dev);
60682 + goto denied;
60683 + }
60684 + }
60685 + } endfor_ifa(idev);
60686 + rcu_read_unlock();
60687 + in_dev_put(idev);
60688 + dev_put(dev);
60689 + } else {
60690 + our_addr = ip->addr;
60691 + our_netmask = ip->netmask;
60692 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60693 + if (ret == 1)
60694 + return 0;
60695 + else if (ret == 2)
60696 + goto denied;
60697 + }
60698 + }
60699 +
60700 +denied:
60701 + if (mode == GR_BIND)
60702 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
60703 + else if (mode == GR_CONNECT)
60704 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
60705 +
60706 + return -EACCES;
60707 +}
60708 +
60709 +int
60710 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
60711 +{
60712 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
60713 +}
60714 +
60715 +int
60716 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
60717 +{
60718 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
60719 +}
60720 +
60721 +int gr_search_listen(struct socket *sock)
60722 +{
60723 + struct sock *sk = sock->sk;
60724 + struct sockaddr_in addr;
60725 +
60726 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
60727 + addr.sin_port = inet_sk(sk)->sport;
60728 +
60729 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
60730 +}
60731 +
60732 +int gr_search_accept(struct socket *sock)
60733 +{
60734 + struct sock *sk = sock->sk;
60735 + struct sockaddr_in addr;
60736 +
60737 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
60738 + addr.sin_port = inet_sk(sk)->sport;
60739 +
60740 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
60741 +}
60742 +
60743 +int
60744 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
60745 +{
60746 + if (addr)
60747 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
60748 + else {
60749 + struct sockaddr_in sin;
60750 + const struct inet_sock *inet = inet_sk(sk);
60751 +
60752 + sin.sin_addr.s_addr = inet->daddr;
60753 + sin.sin_port = inet->dport;
60754 +
60755 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
60756 + }
60757 +}
60758 +
60759 +int
60760 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
60761 +{
60762 + struct sockaddr_in sin;
60763 +
60764 + if (unlikely(skb->len < sizeof (struct udphdr)))
60765 + return 0; // skip this packet
60766 +
60767 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
60768 + sin.sin_port = udp_hdr(skb)->source;
60769 +
60770 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
60771 +}
60772 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
60773 new file mode 100644
60774 index 0000000..34bdd46
60775 --- /dev/null
60776 +++ b/grsecurity/gracl_learn.c
60777 @@ -0,0 +1,208 @@
60778 +#include <linux/kernel.h>
60779 +#include <linux/mm.h>
60780 +#include <linux/sched.h>
60781 +#include <linux/poll.h>
60782 +#include <linux/smp_lock.h>
60783 +#include <linux/string.h>
60784 +#include <linux/file.h>
60785 +#include <linux/types.h>
60786 +#include <linux/vmalloc.h>
60787 +#include <linux/grinternal.h>
60788 +
60789 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
60790 + size_t count, loff_t *ppos);
60791 +extern int gr_acl_is_enabled(void);
60792 +
60793 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
60794 +static int gr_learn_attached;
60795 +
60796 +/* use a 512k buffer */
60797 +#define LEARN_BUFFER_SIZE (512 * 1024)
60798 +
60799 +static DEFINE_SPINLOCK(gr_learn_lock);
60800 +static DEFINE_MUTEX(gr_learn_user_mutex);
60801 +
60802 +/* we need to maintain two buffers, so that the kernel context of grlearn
60803 + uses a semaphore around the userspace copying, and the other kernel contexts
60804 + use a spinlock when copying into the buffer, since they cannot sleep
60805 +*/
60806 +static char *learn_buffer;
60807 +static char *learn_buffer_user;
60808 +static int learn_buffer_len;
60809 +static int learn_buffer_user_len;
60810 +
60811 +static ssize_t
60812 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
60813 +{
60814 + DECLARE_WAITQUEUE(wait, current);
60815 + ssize_t retval = 0;
60816 +
60817 + add_wait_queue(&learn_wait, &wait);
60818 + set_current_state(TASK_INTERRUPTIBLE);
60819 + do {
60820 + mutex_lock(&gr_learn_user_mutex);
60821 + spin_lock(&gr_learn_lock);
60822 + if (learn_buffer_len)
60823 + break;
60824 + spin_unlock(&gr_learn_lock);
60825 + mutex_unlock(&gr_learn_user_mutex);
60826 + if (file->f_flags & O_NONBLOCK) {
60827 + retval = -EAGAIN;
60828 + goto out;
60829 + }
60830 + if (signal_pending(current)) {
60831 + retval = -ERESTARTSYS;
60832 + goto out;
60833 + }
60834 +
60835 + schedule();
60836 + } while (1);
60837 +
60838 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
60839 + learn_buffer_user_len = learn_buffer_len;
60840 + retval = learn_buffer_len;
60841 + learn_buffer_len = 0;
60842 +
60843 + spin_unlock(&gr_learn_lock);
60844 +
60845 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
60846 + retval = -EFAULT;
60847 +
60848 + mutex_unlock(&gr_learn_user_mutex);
60849 +out:
60850 + set_current_state(TASK_RUNNING);
60851 + remove_wait_queue(&learn_wait, &wait);
60852 + return retval;
60853 +}
60854 +
60855 +static unsigned int
60856 +poll_learn(struct file * file, poll_table * wait)
60857 +{
60858 + poll_wait(file, &learn_wait, wait);
60859 +
60860 + if (learn_buffer_len)
60861 + return (POLLIN | POLLRDNORM);
60862 +
60863 + return 0;
60864 +}
60865 +
60866 +void
60867 +gr_clear_learn_entries(void)
60868 +{
60869 + char *tmp;
60870 +
60871 + mutex_lock(&gr_learn_user_mutex);
60872 + spin_lock(&gr_learn_lock);
60873 + tmp = learn_buffer;
60874 + learn_buffer = NULL;
60875 + spin_unlock(&gr_learn_lock);
60876 + if (tmp)
60877 + vfree(tmp);
60878 + if (learn_buffer_user != NULL) {
60879 + vfree(learn_buffer_user);
60880 + learn_buffer_user = NULL;
60881 + }
60882 + learn_buffer_len = 0;
60883 + mutex_unlock(&gr_learn_user_mutex);
60884 +
60885 + return;
60886 +}
60887 +
60888 +void
60889 +gr_add_learn_entry(const char *fmt, ...)
60890 +{
60891 + va_list args;
60892 + unsigned int len;
60893 +
60894 + if (!gr_learn_attached)
60895 + return;
60896 +
60897 + spin_lock(&gr_learn_lock);
60898 +
60899 + /* leave a gap at the end so we know when it's "full" but don't have to
60900 + compute the exact length of the string we're trying to append
60901 + */
60902 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
60903 + spin_unlock(&gr_learn_lock);
60904 + wake_up_interruptible(&learn_wait);
60905 + return;
60906 + }
60907 + if (learn_buffer == NULL) {
60908 + spin_unlock(&gr_learn_lock);
60909 + return;
60910 + }
60911 +
60912 + va_start(args, fmt);
60913 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
60914 + va_end(args);
60915 +
60916 + learn_buffer_len += len + 1;
60917 +
60918 + spin_unlock(&gr_learn_lock);
60919 + wake_up_interruptible(&learn_wait);
60920 +
60921 + return;
60922 +}
60923 +
60924 +static int
60925 +open_learn(struct inode *inode, struct file *file)
60926 +{
60927 + if (file->f_mode & FMODE_READ && gr_learn_attached)
60928 + return -EBUSY;
60929 + if (file->f_mode & FMODE_READ) {
60930 + int retval = 0;
60931 + mutex_lock(&gr_learn_user_mutex);
60932 + if (learn_buffer == NULL)
60933 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
60934 + if (learn_buffer_user == NULL)
60935 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
60936 + if (learn_buffer == NULL) {
60937 + retval = -ENOMEM;
60938 + goto out_error;
60939 + }
60940 + if (learn_buffer_user == NULL) {
60941 + retval = -ENOMEM;
60942 + goto out_error;
60943 + }
60944 + learn_buffer_len = 0;
60945 + learn_buffer_user_len = 0;
60946 + gr_learn_attached = 1;
60947 +out_error:
60948 + mutex_unlock(&gr_learn_user_mutex);
60949 + return retval;
60950 + }
60951 + return 0;
60952 +}
60953 +
60954 +static int
60955 +close_learn(struct inode *inode, struct file *file)
60956 +{
60957 + if (file->f_mode & FMODE_READ) {
60958 + char *tmp = NULL;
60959 + mutex_lock(&gr_learn_user_mutex);
60960 + spin_lock(&gr_learn_lock);
60961 + tmp = learn_buffer;
60962 + learn_buffer = NULL;
60963 + spin_unlock(&gr_learn_lock);
60964 + if (tmp)
60965 + vfree(tmp);
60966 + if (learn_buffer_user != NULL) {
60967 + vfree(learn_buffer_user);
60968 + learn_buffer_user = NULL;
60969 + }
60970 + learn_buffer_len = 0;
60971 + learn_buffer_user_len = 0;
60972 + gr_learn_attached = 0;
60973 + mutex_unlock(&gr_learn_user_mutex);
60974 + }
60975 +
60976 + return 0;
60977 +}
60978 +
60979 +const struct file_operations grsec_fops = {
60980 + .read = read_learn,
60981 + .write = write_grsec_handler,
60982 + .open = open_learn,
60983 + .release = close_learn,
60984 + .poll = poll_learn,
60985 +};
60986 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
60987 new file mode 100644
60988 index 0000000..70b2179
60989 --- /dev/null
60990 +++ b/grsecurity/gracl_res.c
60991 @@ -0,0 +1,67 @@
60992 +#include <linux/kernel.h>
60993 +#include <linux/sched.h>
60994 +#include <linux/gracl.h>
60995 +#include <linux/grinternal.h>
60996 +
60997 +static const char *restab_log[] = {
60998 + [RLIMIT_CPU] = "RLIMIT_CPU",
60999 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
61000 + [RLIMIT_DATA] = "RLIMIT_DATA",
61001 + [RLIMIT_STACK] = "RLIMIT_STACK",
61002 + [RLIMIT_CORE] = "RLIMIT_CORE",
61003 + [RLIMIT_RSS] = "RLIMIT_RSS",
61004 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
61005 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
61006 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
61007 + [RLIMIT_AS] = "RLIMIT_AS",
61008 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
61009 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
61010 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
61011 + [RLIMIT_NICE] = "RLIMIT_NICE",
61012 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
61013 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
61014 + [GR_CRASH_RES] = "RLIMIT_CRASH"
61015 +};
61016 +
61017 +void
61018 +gr_log_resource(const struct task_struct *task,
61019 + const int res, const unsigned long wanted, const int gt)
61020 +{
61021 + const struct cred *cred;
61022 + unsigned long rlim;
61023 +
61024 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
61025 + return;
61026 +
61027 + // not yet supported resource
61028 + if (unlikely(!restab_log[res]))
61029 + return;
61030 +
61031 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
61032 + rlim = task->signal->rlim[res].rlim_max;
61033 + else
61034 + rlim = task->signal->rlim[res].rlim_cur;
61035 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
61036 + return;
61037 +
61038 + rcu_read_lock();
61039 + cred = __task_cred(task);
61040 +
61041 + if (res == RLIMIT_NPROC &&
61042 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
61043 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
61044 + goto out_rcu_unlock;
61045 + else if (res == RLIMIT_MEMLOCK &&
61046 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
61047 + goto out_rcu_unlock;
61048 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
61049 + goto out_rcu_unlock;
61050 + rcu_read_unlock();
61051 +
61052 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
61053 +
61054 + return;
61055 +out_rcu_unlock:
61056 + rcu_read_unlock();
61057 + return;
61058 +}
61059 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
61060 new file mode 100644
61061 index 0000000..1d1b734
61062 --- /dev/null
61063 +++ b/grsecurity/gracl_segv.c
61064 @@ -0,0 +1,284 @@
61065 +#include <linux/kernel.h>
61066 +#include <linux/mm.h>
61067 +#include <asm/uaccess.h>
61068 +#include <asm/errno.h>
61069 +#include <asm/mman.h>
61070 +#include <net/sock.h>
61071 +#include <linux/file.h>
61072 +#include <linux/fs.h>
61073 +#include <linux/net.h>
61074 +#include <linux/in.h>
61075 +#include <linux/smp_lock.h>
61076 +#include <linux/slab.h>
61077 +#include <linux/types.h>
61078 +#include <linux/sched.h>
61079 +#include <linux/timer.h>
61080 +#include <linux/gracl.h>
61081 +#include <linux/grsecurity.h>
61082 +#include <linux/grinternal.h>
61083 +
61084 +static struct crash_uid *uid_set;
61085 +static unsigned short uid_used;
61086 +static DEFINE_SPINLOCK(gr_uid_lock);
61087 +extern rwlock_t gr_inode_lock;
61088 +extern struct acl_subject_label *
61089 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
61090 + struct acl_role_label *role);
61091 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
61092 +
61093 +int
61094 +gr_init_uidset(void)
61095 +{
61096 + uid_set =
61097 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
61098 + uid_used = 0;
61099 +
61100 + return uid_set ? 1 : 0;
61101 +}
61102 +
61103 +void
61104 +gr_free_uidset(void)
61105 +{
61106 + if (uid_set)
61107 + kfree(uid_set);
61108 +
61109 + return;
61110 +}
61111 +
61112 +int
61113 +gr_find_uid(const uid_t uid)
61114 +{
61115 + struct crash_uid *tmp = uid_set;
61116 + uid_t buid;
61117 + int low = 0, high = uid_used - 1, mid;
61118 +
61119 + while (high >= low) {
61120 + mid = (low + high) >> 1;
61121 + buid = tmp[mid].uid;
61122 + if (buid == uid)
61123 + return mid;
61124 + if (buid > uid)
61125 + high = mid - 1;
61126 + if (buid < uid)
61127 + low = mid + 1;
61128 + }
61129 +
61130 + return -1;
61131 +}
61132 +
61133 +static __inline__ void
61134 +gr_insertsort(void)
61135 +{
61136 + unsigned short i, j;
61137 + struct crash_uid index;
61138 +
61139 + for (i = 1; i < uid_used; i++) {
61140 + index = uid_set[i];
61141 + j = i;
61142 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
61143 + uid_set[j] = uid_set[j - 1];
61144 + j--;
61145 + }
61146 + uid_set[j] = index;
61147 + }
61148 +
61149 + return;
61150 +}
61151 +
61152 +static __inline__ void
61153 +gr_insert_uid(const uid_t uid, const unsigned long expires)
61154 +{
61155 + int loc;
61156 +
61157 + if (uid_used == GR_UIDTABLE_MAX)
61158 + return;
61159 +
61160 + loc = gr_find_uid(uid);
61161 +
61162 + if (loc >= 0) {
61163 + uid_set[loc].expires = expires;
61164 + return;
61165 + }
61166 +
61167 + uid_set[uid_used].uid = uid;
61168 + uid_set[uid_used].expires = expires;
61169 + uid_used++;
61170 +
61171 + gr_insertsort();
61172 +
61173 + return;
61174 +}
61175 +
61176 +void
61177 +gr_remove_uid(const unsigned short loc)
61178 +{
61179 + unsigned short i;
61180 +
61181 + for (i = loc + 1; i < uid_used; i++)
61182 + uid_set[i - 1] = uid_set[i];
61183 +
61184 + uid_used--;
61185 +
61186 + return;
61187 +}
61188 +
61189 +int
61190 +gr_check_crash_uid(const uid_t uid)
61191 +{
61192 + int loc;
61193 + int ret = 0;
61194 +
61195 + if (unlikely(!gr_acl_is_enabled()))
61196 + return 0;
61197 +
61198 + spin_lock(&gr_uid_lock);
61199 + loc = gr_find_uid(uid);
61200 +
61201 + if (loc < 0)
61202 + goto out_unlock;
61203 +
61204 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
61205 + gr_remove_uid(loc);
61206 + else
61207 + ret = 1;
61208 +
61209 +out_unlock:
61210 + spin_unlock(&gr_uid_lock);
61211 + return ret;
61212 +}
61213 +
61214 +static __inline__ int
61215 +proc_is_setxid(const struct cred *cred)
61216 +{
61217 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
61218 + cred->uid != cred->fsuid)
61219 + return 1;
61220 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
61221 + cred->gid != cred->fsgid)
61222 + return 1;
61223 +
61224 + return 0;
61225 +}
61226 +
61227 +void
61228 +gr_handle_crash(struct task_struct *task, const int sig)
61229 +{
61230 + struct acl_subject_label *curr;
61231 + struct task_struct *tsk, *tsk2;
61232 + const struct cred *cred;
61233 + const struct cred *cred2;
61234 +
61235 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
61236 + return;
61237 +
61238 + if (unlikely(!gr_acl_is_enabled()))
61239 + return;
61240 +
61241 + curr = task->acl;
61242 +
61243 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
61244 + return;
61245 +
61246 + if (time_before_eq(curr->expires, get_seconds())) {
61247 + curr->expires = 0;
61248 + curr->crashes = 0;
61249 + }
61250 +
61251 + curr->crashes++;
61252 +
61253 + if (!curr->expires)
61254 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
61255 +
61256 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61257 + time_after(curr->expires, get_seconds())) {
61258 + rcu_read_lock();
61259 + cred = __task_cred(task);
61260 + if (cred->uid && proc_is_setxid(cred)) {
61261 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61262 + spin_lock(&gr_uid_lock);
61263 + gr_insert_uid(cred->uid, curr->expires);
61264 + spin_unlock(&gr_uid_lock);
61265 + curr->expires = 0;
61266 + curr->crashes = 0;
61267 + read_lock(&tasklist_lock);
61268 + do_each_thread(tsk2, tsk) {
61269 + cred2 = __task_cred(tsk);
61270 + if (tsk != task && cred2->uid == cred->uid)
61271 + gr_fake_force_sig(SIGKILL, tsk);
61272 + } while_each_thread(tsk2, tsk);
61273 + read_unlock(&tasklist_lock);
61274 + } else {
61275 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61276 + read_lock(&tasklist_lock);
61277 + read_lock(&grsec_exec_file_lock);
61278 + do_each_thread(tsk2, tsk) {
61279 + if (likely(tsk != task)) {
61280 + // if this thread has the same subject as the one that triggered
61281 + // RES_CRASH and it's the same binary, kill it
61282 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
61283 + gr_fake_force_sig(SIGKILL, tsk);
61284 + }
61285 + } while_each_thread(tsk2, tsk);
61286 + read_unlock(&grsec_exec_file_lock);
61287 + read_unlock(&tasklist_lock);
61288 + }
61289 + rcu_read_unlock();
61290 + }
61291 +
61292 + return;
61293 +}
61294 +
61295 +int
61296 +gr_check_crash_exec(const struct file *filp)
61297 +{
61298 + struct acl_subject_label *curr;
61299 +
61300 + if (unlikely(!gr_acl_is_enabled()))
61301 + return 0;
61302 +
61303 + read_lock(&gr_inode_lock);
61304 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
61305 + filp->f_path.dentry->d_inode->i_sb->s_dev,
61306 + current->role);
61307 + read_unlock(&gr_inode_lock);
61308 +
61309 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
61310 + (!curr->crashes && !curr->expires))
61311 + return 0;
61312 +
61313 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61314 + time_after(curr->expires, get_seconds()))
61315 + return 1;
61316 + else if (time_before_eq(curr->expires, get_seconds())) {
61317 + curr->crashes = 0;
61318 + curr->expires = 0;
61319 + }
61320 +
61321 + return 0;
61322 +}
61323 +
61324 +void
61325 +gr_handle_alertkill(struct task_struct *task)
61326 +{
61327 + struct acl_subject_label *curracl;
61328 + __u32 curr_ip;
61329 + struct task_struct *p, *p2;
61330 +
61331 + if (unlikely(!gr_acl_is_enabled()))
61332 + return;
61333 +
61334 + curracl = task->acl;
61335 + curr_ip = task->signal->curr_ip;
61336 +
61337 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
61338 + read_lock(&tasklist_lock);
61339 + do_each_thread(p2, p) {
61340 + if (p->signal->curr_ip == curr_ip)
61341 + gr_fake_force_sig(SIGKILL, p);
61342 + } while_each_thread(p2, p);
61343 + read_unlock(&tasklist_lock);
61344 + } else if (curracl->mode & GR_KILLPROC)
61345 + gr_fake_force_sig(SIGKILL, task);
61346 +
61347 + return;
61348 +}
61349 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
61350 new file mode 100644
61351 index 0000000..9d83a69
61352 --- /dev/null
61353 +++ b/grsecurity/gracl_shm.c
61354 @@ -0,0 +1,40 @@
61355 +#include <linux/kernel.h>
61356 +#include <linux/mm.h>
61357 +#include <linux/sched.h>
61358 +#include <linux/file.h>
61359 +#include <linux/ipc.h>
61360 +#include <linux/gracl.h>
61361 +#include <linux/grsecurity.h>
61362 +#include <linux/grinternal.h>
61363 +
61364 +int
61365 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61366 + const time_t shm_createtime, const uid_t cuid, const int shmid)
61367 +{
61368 + struct task_struct *task;
61369 +
61370 + if (!gr_acl_is_enabled())
61371 + return 1;
61372 +
61373 + rcu_read_lock();
61374 + read_lock(&tasklist_lock);
61375 +
61376 + task = find_task_by_vpid(shm_cprid);
61377 +
61378 + if (unlikely(!task))
61379 + task = find_task_by_vpid(shm_lapid);
61380 +
61381 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
61382 + (task->pid == shm_lapid)) &&
61383 + (task->acl->mode & GR_PROTSHM) &&
61384 + (task->acl != current->acl))) {
61385 + read_unlock(&tasklist_lock);
61386 + rcu_read_unlock();
61387 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
61388 + return 0;
61389 + }
61390 + read_unlock(&tasklist_lock);
61391 + rcu_read_unlock();
61392 +
61393 + return 1;
61394 +}
61395 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
61396 new file mode 100644
61397 index 0000000..bc0be01
61398 --- /dev/null
61399 +++ b/grsecurity/grsec_chdir.c
61400 @@ -0,0 +1,19 @@
61401 +#include <linux/kernel.h>
61402 +#include <linux/sched.h>
61403 +#include <linux/fs.h>
61404 +#include <linux/file.h>
61405 +#include <linux/grsecurity.h>
61406 +#include <linux/grinternal.h>
61407 +
61408 +void
61409 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
61410 +{
61411 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61412 + if ((grsec_enable_chdir && grsec_enable_group &&
61413 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
61414 + !grsec_enable_group)) {
61415 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
61416 + }
61417 +#endif
61418 + return;
61419 +}
61420 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
61421 new file mode 100644
61422 index 0000000..197bdd5
61423 --- /dev/null
61424 +++ b/grsecurity/grsec_chroot.c
61425 @@ -0,0 +1,386 @@
61426 +#include <linux/kernel.h>
61427 +#include <linux/module.h>
61428 +#include <linux/sched.h>
61429 +#include <linux/file.h>
61430 +#include <linux/fs.h>
61431 +#include <linux/mount.h>
61432 +#include <linux/types.h>
61433 +#include <linux/pid_namespace.h>
61434 +#include <linux/grsecurity.h>
61435 +#include <linux/grinternal.h>
61436 +
61437 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
61438 +{
61439 +#ifdef CONFIG_GRKERNSEC
61440 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
61441 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
61442 + task->gr_is_chrooted = 1;
61443 + else
61444 + task->gr_is_chrooted = 0;
61445 +
61446 + task->gr_chroot_dentry = path->dentry;
61447 +#endif
61448 + return;
61449 +}
61450 +
61451 +void gr_clear_chroot_entries(struct task_struct *task)
61452 +{
61453 +#ifdef CONFIG_GRKERNSEC
61454 + task->gr_is_chrooted = 0;
61455 + task->gr_chroot_dentry = NULL;
61456 +#endif
61457 + return;
61458 +}
61459 +
61460 +int
61461 +gr_handle_chroot_unix(const pid_t pid)
61462 +{
61463 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61464 + struct task_struct *p;
61465 +
61466 + if (unlikely(!grsec_enable_chroot_unix))
61467 + return 1;
61468 +
61469 + if (likely(!proc_is_chrooted(current)))
61470 + return 1;
61471 +
61472 + rcu_read_lock();
61473 + read_lock(&tasklist_lock);
61474 +
61475 + p = find_task_by_vpid_unrestricted(pid);
61476 + if (unlikely(p && !have_same_root(current, p))) {
61477 + read_unlock(&tasklist_lock);
61478 + rcu_read_unlock();
61479 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
61480 + return 0;
61481 + }
61482 + read_unlock(&tasklist_lock);
61483 + rcu_read_unlock();
61484 +#endif
61485 + return 1;
61486 +}
61487 +
61488 +int
61489 +gr_handle_chroot_nice(void)
61490 +{
61491 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61492 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
61493 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
61494 + return -EPERM;
61495 + }
61496 +#endif
61497 + return 0;
61498 +}
61499 +
61500 +int
61501 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
61502 +{
61503 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61504 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
61505 + && proc_is_chrooted(current)) {
61506 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
61507 + return -EACCES;
61508 + }
61509 +#endif
61510 + return 0;
61511 +}
61512 +
61513 +int
61514 +gr_handle_chroot_rawio(const struct inode *inode)
61515 +{
61516 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61517 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
61518 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
61519 + return 1;
61520 +#endif
61521 + return 0;
61522 +}
61523 +
61524 +int
61525 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
61526 +{
61527 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61528 + struct task_struct *p;
61529 + int ret = 0;
61530 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
61531 + return ret;
61532 +
61533 + read_lock(&tasklist_lock);
61534 + do_each_pid_task(pid, type, p) {
61535 + if (!have_same_root(current, p)) {
61536 + ret = 1;
61537 + goto out;
61538 + }
61539 + } while_each_pid_task(pid, type, p);
61540 +out:
61541 + read_unlock(&tasklist_lock);
61542 + return ret;
61543 +#endif
61544 + return 0;
61545 +}
61546 +
61547 +int
61548 +gr_pid_is_chrooted(struct task_struct *p)
61549 +{
61550 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61551 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
61552 + return 0;
61553 +
61554 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
61555 + !have_same_root(current, p)) {
61556 + return 1;
61557 + }
61558 +#endif
61559 + return 0;
61560 +}
61561 +
61562 +EXPORT_SYMBOL(gr_pid_is_chrooted);
61563 +
61564 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
61565 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
61566 +{
61567 + struct dentry *dentry = (struct dentry *)u_dentry;
61568 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
61569 + struct dentry *realroot;
61570 + struct vfsmount *realrootmnt;
61571 + struct dentry *currentroot;
61572 + struct vfsmount *currentmnt;
61573 + struct task_struct *reaper = &init_task;
61574 + int ret = 1;
61575 +
61576 + read_lock(&reaper->fs->lock);
61577 + realrootmnt = mntget(reaper->fs->root.mnt);
61578 + realroot = dget(reaper->fs->root.dentry);
61579 + read_unlock(&reaper->fs->lock);
61580 +
61581 + read_lock(&current->fs->lock);
61582 + currentmnt = mntget(current->fs->root.mnt);
61583 + currentroot = dget(current->fs->root.dentry);
61584 + read_unlock(&current->fs->lock);
61585 +
61586 + spin_lock(&dcache_lock);
61587 + for (;;) {
61588 + if (unlikely((dentry == realroot && mnt == realrootmnt)
61589 + || (dentry == currentroot && mnt == currentmnt)))
61590 + break;
61591 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
61592 + if (mnt->mnt_parent == mnt)
61593 + break;
61594 + dentry = mnt->mnt_mountpoint;
61595 + mnt = mnt->mnt_parent;
61596 + continue;
61597 + }
61598 + dentry = dentry->d_parent;
61599 + }
61600 + spin_unlock(&dcache_lock);
61601 +
61602 + dput(currentroot);
61603 + mntput(currentmnt);
61604 +
61605 + /* access is outside of chroot */
61606 + if (dentry == realroot && mnt == realrootmnt)
61607 + ret = 0;
61608 +
61609 + dput(realroot);
61610 + mntput(realrootmnt);
61611 + return ret;
61612 +}
61613 +#endif
61614 +
61615 +int
61616 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
61617 +{
61618 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61619 + if (!grsec_enable_chroot_fchdir)
61620 + return 1;
61621 +
61622 + if (!proc_is_chrooted(current))
61623 + return 1;
61624 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
61625 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
61626 + return 0;
61627 + }
61628 +#endif
61629 + return 1;
61630 +}
61631 +
61632 +int
61633 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61634 + const time_t shm_createtime)
61635 +{
61636 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61637 + struct task_struct *p;
61638 + time_t starttime;
61639 +
61640 + if (unlikely(!grsec_enable_chroot_shmat))
61641 + return 1;
61642 +
61643 + if (likely(!proc_is_chrooted(current)))
61644 + return 1;
61645 +
61646 + rcu_read_lock();
61647 + read_lock(&tasklist_lock);
61648 +
61649 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
61650 + starttime = p->start_time.tv_sec;
61651 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
61652 + if (have_same_root(current, p)) {
61653 + goto allow;
61654 + } else {
61655 + read_unlock(&tasklist_lock);
61656 + rcu_read_unlock();
61657 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61658 + return 0;
61659 + }
61660 + }
61661 + /* creator exited, pid reuse, fall through to next check */
61662 + }
61663 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
61664 + if (unlikely(!have_same_root(current, p))) {
61665 + read_unlock(&tasklist_lock);
61666 + rcu_read_unlock();
61667 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61668 + return 0;
61669 + }
61670 + }
61671 +
61672 +allow:
61673 + read_unlock(&tasklist_lock);
61674 + rcu_read_unlock();
61675 +#endif
61676 + return 1;
61677 +}
61678 +
61679 +void
61680 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
61681 +{
61682 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61683 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
61684 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
61685 +#endif
61686 + return;
61687 +}
61688 +
61689 +int
61690 +gr_handle_chroot_mknod(const struct dentry *dentry,
61691 + const struct vfsmount *mnt, const int mode)
61692 +{
61693 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
61694 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
61695 + proc_is_chrooted(current)) {
61696 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
61697 + return -EPERM;
61698 + }
61699 +#endif
61700 + return 0;
61701 +}
61702 +
61703 +int
61704 +gr_handle_chroot_mount(const struct dentry *dentry,
61705 + const struct vfsmount *mnt, const char *dev_name)
61706 +{
61707 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
61708 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
61709 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
61710 + return -EPERM;
61711 + }
61712 +#endif
61713 + return 0;
61714 +}
61715 +
61716 +int
61717 +gr_handle_chroot_pivot(void)
61718 +{
61719 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
61720 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
61721 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
61722 + return -EPERM;
61723 + }
61724 +#endif
61725 + return 0;
61726 +}
61727 +
61728 +int
61729 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
61730 +{
61731 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
61732 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
61733 + !gr_is_outside_chroot(dentry, mnt)) {
61734 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
61735 + return -EPERM;
61736 + }
61737 +#endif
61738 + return 0;
61739 +}
61740 +
61741 +extern const char *captab_log[];
61742 +extern int captab_log_entries;
61743 +
61744 +int
61745 +gr_chroot_is_capable(const int cap)
61746 +{
61747 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61748 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
61749 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
61750 + if (cap_raised(chroot_caps, cap)) {
61751 + const struct cred *creds = current_cred();
61752 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
61753 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
61754 + }
61755 + return 0;
61756 + }
61757 + }
61758 +#endif
61759 + return 1;
61760 +}
61761 +
61762 +int
61763 +gr_chroot_is_capable_nolog(const int cap)
61764 +{
61765 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61766 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
61767 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
61768 + if (cap_raised(chroot_caps, cap)) {
61769 + return 0;
61770 + }
61771 + }
61772 +#endif
61773 + return 1;
61774 +}
61775 +
61776 +int
61777 +gr_handle_chroot_sysctl(const int op)
61778 +{
61779 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
61780 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
61781 + && (op & MAY_WRITE))
61782 + return -EACCES;
61783 +#endif
61784 + return 0;
61785 +}
61786 +
61787 +void
61788 +gr_handle_chroot_chdir(struct path *path)
61789 +{
61790 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
61791 + if (grsec_enable_chroot_chdir)
61792 + set_fs_pwd(current->fs, path);
61793 +#endif
61794 + return;
61795 +}
61796 +
61797 +int
61798 +gr_handle_chroot_chmod(const struct dentry *dentry,
61799 + const struct vfsmount *mnt, const int mode)
61800 +{
61801 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
61802 + /* allow chmod +s on directories, but not on files */
61803 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
61804 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
61805 + proc_is_chrooted(current)) {
61806 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
61807 + return -EPERM;
61808 + }
61809 +#endif
61810 + return 0;
61811 +}
61812 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
61813 new file mode 100644
61814 index 0000000..b81db5b
61815 --- /dev/null
61816 +++ b/grsecurity/grsec_disabled.c
61817 @@ -0,0 +1,439 @@
61818 +#include <linux/kernel.h>
61819 +#include <linux/module.h>
61820 +#include <linux/sched.h>
61821 +#include <linux/file.h>
61822 +#include <linux/fs.h>
61823 +#include <linux/kdev_t.h>
61824 +#include <linux/net.h>
61825 +#include <linux/in.h>
61826 +#include <linux/ip.h>
61827 +#include <linux/skbuff.h>
61828 +#include <linux/sysctl.h>
61829 +
61830 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61831 +void
61832 +pax_set_initial_flags(struct linux_binprm *bprm)
61833 +{
61834 + return;
61835 +}
61836 +#endif
61837 +
61838 +#ifdef CONFIG_SYSCTL
61839 +__u32
61840 +gr_handle_sysctl(const struct ctl_table * table, const int op)
61841 +{
61842 + return 0;
61843 +}
61844 +#endif
61845 +
61846 +#ifdef CONFIG_TASKSTATS
61847 +int gr_is_taskstats_denied(int pid)
61848 +{
61849 + return 0;
61850 +}
61851 +#endif
61852 +
61853 +int
61854 +gr_acl_is_enabled(void)
61855 +{
61856 + return 0;
61857 +}
61858 +
61859 +void
61860 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
61861 +{
61862 + return;
61863 +}
61864 +
61865 +int
61866 +gr_handle_rawio(const struct inode *inode)
61867 +{
61868 + return 0;
61869 +}
61870 +
61871 +void
61872 +gr_acl_handle_psacct(struct task_struct *task, const long code)
61873 +{
61874 + return;
61875 +}
61876 +
61877 +int
61878 +gr_handle_ptrace(struct task_struct *task, const long request)
61879 +{
61880 + return 0;
61881 +}
61882 +
61883 +int
61884 +gr_handle_proc_ptrace(struct task_struct *task)
61885 +{
61886 + return 0;
61887 +}
61888 +
61889 +void
61890 +gr_learn_resource(const struct task_struct *task,
61891 + const int res, const unsigned long wanted, const int gt)
61892 +{
61893 + return;
61894 +}
61895 +
61896 +int
61897 +gr_set_acls(const int type)
61898 +{
61899 + return 0;
61900 +}
61901 +
61902 +int
61903 +gr_check_hidden_task(const struct task_struct *tsk)
61904 +{
61905 + return 0;
61906 +}
61907 +
61908 +int
61909 +gr_check_protected_task(const struct task_struct *task)
61910 +{
61911 + return 0;
61912 +}
61913 +
61914 +int
61915 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
61916 +{
61917 + return 0;
61918 +}
61919 +
61920 +void
61921 +gr_copy_label(struct task_struct *tsk)
61922 +{
61923 + return;
61924 +}
61925 +
61926 +void
61927 +gr_set_pax_flags(struct task_struct *task)
61928 +{
61929 + return;
61930 +}
61931 +
61932 +int
61933 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
61934 + const int unsafe_share)
61935 +{
61936 + return 0;
61937 +}
61938 +
61939 +void
61940 +gr_handle_delete(const ino_t ino, const dev_t dev)
61941 +{
61942 + return;
61943 +}
61944 +
61945 +void
61946 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
61947 +{
61948 + return;
61949 +}
61950 +
61951 +void
61952 +gr_handle_crash(struct task_struct *task, const int sig)
61953 +{
61954 + return;
61955 +}
61956 +
61957 +int
61958 +gr_check_crash_exec(const struct file *filp)
61959 +{
61960 + return 0;
61961 +}
61962 +
61963 +int
61964 +gr_check_crash_uid(const uid_t uid)
61965 +{
61966 + return 0;
61967 +}
61968 +
61969 +void
61970 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61971 + struct dentry *old_dentry,
61972 + struct dentry *new_dentry,
61973 + struct vfsmount *mnt, const __u8 replace)
61974 +{
61975 + return;
61976 +}
61977 +
61978 +int
61979 +gr_search_socket(const int family, const int type, const int protocol)
61980 +{
61981 + return 1;
61982 +}
61983 +
61984 +int
61985 +gr_search_connectbind(const int mode, const struct socket *sock,
61986 + const struct sockaddr_in *addr)
61987 +{
61988 + return 0;
61989 +}
61990 +
61991 +void
61992 +gr_handle_alertkill(struct task_struct *task)
61993 +{
61994 + return;
61995 +}
61996 +
61997 +__u32
61998 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
61999 +{
62000 + return 1;
62001 +}
62002 +
62003 +__u32
62004 +gr_acl_handle_hidden_file(const struct dentry * dentry,
62005 + const struct vfsmount * mnt)
62006 +{
62007 + return 1;
62008 +}
62009 +
62010 +__u32
62011 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62012 + int acc_mode)
62013 +{
62014 + return 1;
62015 +}
62016 +
62017 +__u32
62018 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62019 +{
62020 + return 1;
62021 +}
62022 +
62023 +__u32
62024 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
62025 +{
62026 + return 1;
62027 +}
62028 +
62029 +int
62030 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
62031 + unsigned int *vm_flags)
62032 +{
62033 + return 1;
62034 +}
62035 +
62036 +__u32
62037 +gr_acl_handle_truncate(const struct dentry * dentry,
62038 + const struct vfsmount * mnt)
62039 +{
62040 + return 1;
62041 +}
62042 +
62043 +__u32
62044 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
62045 +{
62046 + return 1;
62047 +}
62048 +
62049 +__u32
62050 +gr_acl_handle_access(const struct dentry * dentry,
62051 + const struct vfsmount * mnt, const int fmode)
62052 +{
62053 + return 1;
62054 +}
62055 +
62056 +__u32
62057 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
62058 + mode_t mode)
62059 +{
62060 + return 1;
62061 +}
62062 +
62063 +__u32
62064 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
62065 + mode_t mode)
62066 +{
62067 + return 1;
62068 +}
62069 +
62070 +__u32
62071 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
62072 +{
62073 + return 1;
62074 +}
62075 +
62076 +__u32
62077 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
62078 +{
62079 + return 1;
62080 +}
62081 +
62082 +void
62083 +grsecurity_init(void)
62084 +{
62085 + return;
62086 +}
62087 +
62088 +__u32
62089 +gr_acl_handle_mknod(const struct dentry * new_dentry,
62090 + const struct dentry * parent_dentry,
62091 + const struct vfsmount * parent_mnt,
62092 + const int mode)
62093 +{
62094 + return 1;
62095 +}
62096 +
62097 +__u32
62098 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
62099 + const struct dentry * parent_dentry,
62100 + const struct vfsmount * parent_mnt)
62101 +{
62102 + return 1;
62103 +}
62104 +
62105 +__u32
62106 +gr_acl_handle_symlink(const struct dentry * new_dentry,
62107 + const struct dentry * parent_dentry,
62108 + const struct vfsmount * parent_mnt, const char *from)
62109 +{
62110 + return 1;
62111 +}
62112 +
62113 +__u32
62114 +gr_acl_handle_link(const struct dentry * new_dentry,
62115 + const struct dentry * parent_dentry,
62116 + const struct vfsmount * parent_mnt,
62117 + const struct dentry * old_dentry,
62118 + const struct vfsmount * old_mnt, const char *to)
62119 +{
62120 + return 1;
62121 +}
62122 +
62123 +int
62124 +gr_acl_handle_rename(const struct dentry *new_dentry,
62125 + const struct dentry *parent_dentry,
62126 + const struct vfsmount *parent_mnt,
62127 + const struct dentry *old_dentry,
62128 + const struct inode *old_parent_inode,
62129 + const struct vfsmount *old_mnt, const char *newname)
62130 +{
62131 + return 0;
62132 +}
62133 +
62134 +int
62135 +gr_acl_handle_filldir(const struct file *file, const char *name,
62136 + const int namelen, const ino_t ino)
62137 +{
62138 + return 1;
62139 +}
62140 +
62141 +int
62142 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62143 + const time_t shm_createtime, const uid_t cuid, const int shmid)
62144 +{
62145 + return 1;
62146 +}
62147 +
62148 +int
62149 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
62150 +{
62151 + return 0;
62152 +}
62153 +
62154 +int
62155 +gr_search_accept(const struct socket *sock)
62156 +{
62157 + return 0;
62158 +}
62159 +
62160 +int
62161 +gr_search_listen(const struct socket *sock)
62162 +{
62163 + return 0;
62164 +}
62165 +
62166 +int
62167 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
62168 +{
62169 + return 0;
62170 +}
62171 +
62172 +__u32
62173 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
62174 +{
62175 + return 1;
62176 +}
62177 +
62178 +__u32
62179 +gr_acl_handle_creat(const struct dentry * dentry,
62180 + const struct dentry * p_dentry,
62181 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62182 + const int imode)
62183 +{
62184 + return 1;
62185 +}
62186 +
62187 +void
62188 +gr_acl_handle_exit(void)
62189 +{
62190 + return;
62191 +}
62192 +
62193 +int
62194 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62195 +{
62196 + return 1;
62197 +}
62198 +
62199 +void
62200 +gr_set_role_label(const uid_t uid, const gid_t gid)
62201 +{
62202 + return;
62203 +}
62204 +
62205 +int
62206 +gr_acl_handle_procpidmem(const struct task_struct *task)
62207 +{
62208 + return 0;
62209 +}
62210 +
62211 +int
62212 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
62213 +{
62214 + return 0;
62215 +}
62216 +
62217 +int
62218 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
62219 +{
62220 + return 0;
62221 +}
62222 +
62223 +void
62224 +gr_set_kernel_label(struct task_struct *task)
62225 +{
62226 + return;
62227 +}
62228 +
62229 +int
62230 +gr_check_user_change(int real, int effective, int fs)
62231 +{
62232 + return 0;
62233 +}
62234 +
62235 +int
62236 +gr_check_group_change(int real, int effective, int fs)
62237 +{
62238 + return 0;
62239 +}
62240 +
62241 +int gr_acl_enable_at_secure(void)
62242 +{
62243 + return 0;
62244 +}
62245 +
62246 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
62247 +{
62248 + return dentry->d_inode->i_sb->s_dev;
62249 +}
62250 +
62251 +EXPORT_SYMBOL(gr_learn_resource);
62252 +EXPORT_SYMBOL(gr_set_kernel_label);
62253 +#ifdef CONFIG_SECURITY
62254 +EXPORT_SYMBOL(gr_check_user_change);
62255 +EXPORT_SYMBOL(gr_check_group_change);
62256 +#endif
62257 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
62258 new file mode 100644
62259 index 0000000..a96e155
62260 --- /dev/null
62261 +++ b/grsecurity/grsec_exec.c
62262 @@ -0,0 +1,204 @@
62263 +#include <linux/kernel.h>
62264 +#include <linux/sched.h>
62265 +#include <linux/file.h>
62266 +#include <linux/binfmts.h>
62267 +#include <linux/smp_lock.h>
62268 +#include <linux/fs.h>
62269 +#include <linux/types.h>
62270 +#include <linux/grdefs.h>
62271 +#include <linux/grinternal.h>
62272 +#include <linux/capability.h>
62273 +#include <linux/compat.h>
62274 +#include <linux/module.h>
62275 +
62276 +#include <asm/uaccess.h>
62277 +
62278 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62279 +static char gr_exec_arg_buf[132];
62280 +static DEFINE_MUTEX(gr_exec_arg_mutex);
62281 +#endif
62282 +
62283 +void
62284 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
62285 +{
62286 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62287 + char *grarg = gr_exec_arg_buf;
62288 + unsigned int i, x, execlen = 0;
62289 + char c;
62290 +
62291 + if (!((grsec_enable_execlog && grsec_enable_group &&
62292 + in_group_p(grsec_audit_gid))
62293 + || (grsec_enable_execlog && !grsec_enable_group)))
62294 + return;
62295 +
62296 + mutex_lock(&gr_exec_arg_mutex);
62297 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
62298 +
62299 + if (unlikely(argv == NULL))
62300 + goto log;
62301 +
62302 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
62303 + const char __user *p;
62304 + unsigned int len;
62305 +
62306 + if (copy_from_user(&p, argv + i, sizeof(p)))
62307 + goto log;
62308 + if (!p)
62309 + goto log;
62310 + len = strnlen_user(p, 128 - execlen);
62311 + if (len > 128 - execlen)
62312 + len = 128 - execlen;
62313 + else if (len > 0)
62314 + len--;
62315 + if (copy_from_user(grarg + execlen, p, len))
62316 + goto log;
62317 +
62318 + /* rewrite unprintable characters */
62319 + for (x = 0; x < len; x++) {
62320 + c = *(grarg + execlen + x);
62321 + if (c < 32 || c > 126)
62322 + *(grarg + execlen + x) = ' ';
62323 + }
62324 +
62325 + execlen += len;
62326 + *(grarg + execlen) = ' ';
62327 + *(grarg + execlen + 1) = '\0';
62328 + execlen++;
62329 + }
62330 +
62331 + log:
62332 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62333 + bprm->file->f_path.mnt, grarg);
62334 + mutex_unlock(&gr_exec_arg_mutex);
62335 +#endif
62336 + return;
62337 +}
62338 +
62339 +#ifdef CONFIG_COMPAT
62340 +void
62341 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
62342 +{
62343 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62344 + char *grarg = gr_exec_arg_buf;
62345 + unsigned int i, x, execlen = 0;
62346 + char c;
62347 +
62348 + if (!((grsec_enable_execlog && grsec_enable_group &&
62349 + in_group_p(grsec_audit_gid))
62350 + || (grsec_enable_execlog && !grsec_enable_group)))
62351 + return;
62352 +
62353 + mutex_lock(&gr_exec_arg_mutex);
62354 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
62355 +
62356 + if (unlikely(argv == NULL))
62357 + goto log;
62358 +
62359 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
62360 + compat_uptr_t p;
62361 + unsigned int len;
62362 +
62363 + if (get_user(p, argv + i))
62364 + goto log;
62365 + len = strnlen_user(compat_ptr(p), 128 - execlen);
62366 + if (len > 128 - execlen)
62367 + len = 128 - execlen;
62368 + else if (len > 0)
62369 + len--;
62370 + else
62371 + goto log;
62372 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
62373 + goto log;
62374 +
62375 + /* rewrite unprintable characters */
62376 + for (x = 0; x < len; x++) {
62377 + c = *(grarg + execlen + x);
62378 + if (c < 32 || c > 126)
62379 + *(grarg + execlen + x) = ' ';
62380 + }
62381 +
62382 + execlen += len;
62383 + *(grarg + execlen) = ' ';
62384 + *(grarg + execlen + 1) = '\0';
62385 + execlen++;
62386 + }
62387 +
62388 + log:
62389 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62390 + bprm->file->f_path.mnt, grarg);
62391 + mutex_unlock(&gr_exec_arg_mutex);
62392 +#endif
62393 + return;
62394 +}
62395 +#endif
62396 +
62397 +#ifdef CONFIG_GRKERNSEC
62398 +extern int gr_acl_is_capable(const int cap);
62399 +extern int gr_acl_is_capable_nolog(const int cap);
62400 +extern int gr_chroot_is_capable(const int cap);
62401 +extern int gr_chroot_is_capable_nolog(const int cap);
62402 +#endif
62403 +
62404 +const char *captab_log[] = {
62405 + "CAP_CHOWN",
62406 + "CAP_DAC_OVERRIDE",
62407 + "CAP_DAC_READ_SEARCH",
62408 + "CAP_FOWNER",
62409 + "CAP_FSETID",
62410 + "CAP_KILL",
62411 + "CAP_SETGID",
62412 + "CAP_SETUID",
62413 + "CAP_SETPCAP",
62414 + "CAP_LINUX_IMMUTABLE",
62415 + "CAP_NET_BIND_SERVICE",
62416 + "CAP_NET_BROADCAST",
62417 + "CAP_NET_ADMIN",
62418 + "CAP_NET_RAW",
62419 + "CAP_IPC_LOCK",
62420 + "CAP_IPC_OWNER",
62421 + "CAP_SYS_MODULE",
62422 + "CAP_SYS_RAWIO",
62423 + "CAP_SYS_CHROOT",
62424 + "CAP_SYS_PTRACE",
62425 + "CAP_SYS_PACCT",
62426 + "CAP_SYS_ADMIN",
62427 + "CAP_SYS_BOOT",
62428 + "CAP_SYS_NICE",
62429 + "CAP_SYS_RESOURCE",
62430 + "CAP_SYS_TIME",
62431 + "CAP_SYS_TTY_CONFIG",
62432 + "CAP_MKNOD",
62433 + "CAP_LEASE",
62434 + "CAP_AUDIT_WRITE",
62435 + "CAP_AUDIT_CONTROL",
62436 + "CAP_SETFCAP",
62437 + "CAP_MAC_OVERRIDE",
62438 + "CAP_MAC_ADMIN"
62439 +};
62440 +
62441 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
62442 +
62443 +int gr_is_capable(const int cap)
62444 +{
62445 +#ifdef CONFIG_GRKERNSEC
62446 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
62447 + return 1;
62448 + return 0;
62449 +#else
62450 + return 1;
62451 +#endif
62452 +}
62453 +
62454 +int gr_is_capable_nolog(const int cap)
62455 +{
62456 +#ifdef CONFIG_GRKERNSEC
62457 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
62458 + return 1;
62459 + return 0;
62460 +#else
62461 + return 1;
62462 +#endif
62463 +}
62464 +
62465 +EXPORT_SYMBOL(gr_is_capable);
62466 +EXPORT_SYMBOL(gr_is_capable_nolog);
62467 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
62468 new file mode 100644
62469 index 0000000..d3ee748
62470 --- /dev/null
62471 +++ b/grsecurity/grsec_fifo.c
62472 @@ -0,0 +1,24 @@
62473 +#include <linux/kernel.h>
62474 +#include <linux/sched.h>
62475 +#include <linux/fs.h>
62476 +#include <linux/file.h>
62477 +#include <linux/grinternal.h>
62478 +
62479 +int
62480 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
62481 + const struct dentry *dir, const int flag, const int acc_mode)
62482 +{
62483 +#ifdef CONFIG_GRKERNSEC_FIFO
62484 + const struct cred *cred = current_cred();
62485 +
62486 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
62487 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
62488 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
62489 + (cred->fsuid != dentry->d_inode->i_uid)) {
62490 + if (!inode_permission(dentry->d_inode, acc_mode))
62491 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
62492 + return -EACCES;
62493 + }
62494 +#endif
62495 + return 0;
62496 +}
62497 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
62498 new file mode 100644
62499 index 0000000..8ca18bf
62500 --- /dev/null
62501 +++ b/grsecurity/grsec_fork.c
62502 @@ -0,0 +1,23 @@
62503 +#include <linux/kernel.h>
62504 +#include <linux/sched.h>
62505 +#include <linux/grsecurity.h>
62506 +#include <linux/grinternal.h>
62507 +#include <linux/errno.h>
62508 +
62509 +void
62510 +gr_log_forkfail(const int retval)
62511 +{
62512 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
62513 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
62514 + switch (retval) {
62515 + case -EAGAIN:
62516 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
62517 + break;
62518 + case -ENOMEM:
62519 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
62520 + break;
62521 + }
62522 + }
62523 +#endif
62524 + return;
62525 +}
62526 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
62527 new file mode 100644
62528 index 0000000..f813c26
62529 --- /dev/null
62530 +++ b/grsecurity/grsec_init.c
62531 @@ -0,0 +1,270 @@
62532 +#include <linux/kernel.h>
62533 +#include <linux/sched.h>
62534 +#include <linux/mm.h>
62535 +#include <linux/smp_lock.h>
62536 +#include <linux/gracl.h>
62537 +#include <linux/slab.h>
62538 +#include <linux/vmalloc.h>
62539 +#include <linux/percpu.h>
62540 +#include <linux/module.h>
62541 +
62542 +int grsec_enable_brute;
62543 +int grsec_enable_link;
62544 +int grsec_enable_dmesg;
62545 +int grsec_enable_harden_ptrace;
62546 +int grsec_enable_fifo;
62547 +int grsec_enable_execlog;
62548 +int grsec_enable_signal;
62549 +int grsec_enable_forkfail;
62550 +int grsec_enable_audit_ptrace;
62551 +int grsec_enable_time;
62552 +int grsec_enable_audit_textrel;
62553 +int grsec_enable_group;
62554 +int grsec_audit_gid;
62555 +int grsec_enable_chdir;
62556 +int grsec_enable_mount;
62557 +int grsec_enable_rofs;
62558 +int grsec_enable_chroot_findtask;
62559 +int grsec_enable_chroot_mount;
62560 +int grsec_enable_chroot_shmat;
62561 +int grsec_enable_chroot_fchdir;
62562 +int grsec_enable_chroot_double;
62563 +int grsec_enable_chroot_pivot;
62564 +int grsec_enable_chroot_chdir;
62565 +int grsec_enable_chroot_chmod;
62566 +int grsec_enable_chroot_mknod;
62567 +int grsec_enable_chroot_nice;
62568 +int grsec_enable_chroot_execlog;
62569 +int grsec_enable_chroot_caps;
62570 +int grsec_enable_chroot_sysctl;
62571 +int grsec_enable_chroot_unix;
62572 +int grsec_enable_tpe;
62573 +int grsec_tpe_gid;
62574 +int grsec_enable_blackhole;
62575 +#ifdef CONFIG_IPV6_MODULE
62576 +EXPORT_SYMBOL(grsec_enable_blackhole);
62577 +#endif
62578 +int grsec_lastack_retries;
62579 +int grsec_enable_tpe_all;
62580 +int grsec_enable_tpe_invert;
62581 +int grsec_enable_socket_all;
62582 +int grsec_socket_all_gid;
62583 +int grsec_enable_socket_client;
62584 +int grsec_socket_client_gid;
62585 +int grsec_enable_socket_server;
62586 +int grsec_socket_server_gid;
62587 +int grsec_resource_logging;
62588 +int grsec_disable_privio;
62589 +int grsec_enable_log_rwxmaps;
62590 +int grsec_lock;
62591 +
62592 +DEFINE_SPINLOCK(grsec_alert_lock);
62593 +unsigned long grsec_alert_wtime = 0;
62594 +unsigned long grsec_alert_fyet = 0;
62595 +
62596 +DEFINE_SPINLOCK(grsec_audit_lock);
62597 +
62598 +DEFINE_RWLOCK(grsec_exec_file_lock);
62599 +
62600 +char *gr_shared_page[4];
62601 +
62602 +char *gr_alert_log_fmt;
62603 +char *gr_audit_log_fmt;
62604 +char *gr_alert_log_buf;
62605 +char *gr_audit_log_buf;
62606 +
62607 +extern struct gr_arg *gr_usermode;
62608 +extern unsigned char *gr_system_salt;
62609 +extern unsigned char *gr_system_sum;
62610 +
62611 +void __init
62612 +grsecurity_init(void)
62613 +{
62614 + int j;
62615 + /* create the per-cpu shared pages */
62616 +
62617 +#ifdef CONFIG_X86
62618 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
62619 +#endif
62620 +
62621 + for (j = 0; j < 4; j++) {
62622 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
62623 + if (gr_shared_page[j] == NULL) {
62624 + panic("Unable to allocate grsecurity shared page");
62625 + return;
62626 + }
62627 + }
62628 +
62629 + /* allocate log buffers */
62630 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
62631 + if (!gr_alert_log_fmt) {
62632 + panic("Unable to allocate grsecurity alert log format buffer");
62633 + return;
62634 + }
62635 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
62636 + if (!gr_audit_log_fmt) {
62637 + panic("Unable to allocate grsecurity audit log format buffer");
62638 + return;
62639 + }
62640 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62641 + if (!gr_alert_log_buf) {
62642 + panic("Unable to allocate grsecurity alert log buffer");
62643 + return;
62644 + }
62645 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62646 + if (!gr_audit_log_buf) {
62647 + panic("Unable to allocate grsecurity audit log buffer");
62648 + return;
62649 + }
62650 +
62651 + /* allocate memory for authentication structure */
62652 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
62653 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
62654 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
62655 +
62656 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
62657 + panic("Unable to allocate grsecurity authentication structure");
62658 + return;
62659 + }
62660 +
62661 +
62662 +#ifdef CONFIG_GRKERNSEC_IO
62663 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
62664 + grsec_disable_privio = 1;
62665 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62666 + grsec_disable_privio = 1;
62667 +#else
62668 + grsec_disable_privio = 0;
62669 +#endif
62670 +#endif
62671 +
62672 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
62673 + /* for backward compatibility, tpe_invert always defaults to on if
62674 + enabled in the kernel
62675 + */
62676 + grsec_enable_tpe_invert = 1;
62677 +#endif
62678 +
62679 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62680 +#ifndef CONFIG_GRKERNSEC_SYSCTL
62681 + grsec_lock = 1;
62682 +#endif
62683 +
62684 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
62685 + grsec_enable_audit_textrel = 1;
62686 +#endif
62687 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
62688 + grsec_enable_log_rwxmaps = 1;
62689 +#endif
62690 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
62691 + grsec_enable_group = 1;
62692 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
62693 +#endif
62694 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62695 + grsec_enable_chdir = 1;
62696 +#endif
62697 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62698 + grsec_enable_harden_ptrace = 1;
62699 +#endif
62700 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62701 + grsec_enable_mount = 1;
62702 +#endif
62703 +#ifdef CONFIG_GRKERNSEC_LINK
62704 + grsec_enable_link = 1;
62705 +#endif
62706 +#ifdef CONFIG_GRKERNSEC_BRUTE
62707 + grsec_enable_brute = 1;
62708 +#endif
62709 +#ifdef CONFIG_GRKERNSEC_DMESG
62710 + grsec_enable_dmesg = 1;
62711 +#endif
62712 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
62713 + grsec_enable_blackhole = 1;
62714 + grsec_lastack_retries = 4;
62715 +#endif
62716 +#ifdef CONFIG_GRKERNSEC_FIFO
62717 + grsec_enable_fifo = 1;
62718 +#endif
62719 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62720 + grsec_enable_execlog = 1;
62721 +#endif
62722 +#ifdef CONFIG_GRKERNSEC_SIGNAL
62723 + grsec_enable_signal = 1;
62724 +#endif
62725 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
62726 + grsec_enable_forkfail = 1;
62727 +#endif
62728 +#ifdef CONFIG_GRKERNSEC_TIME
62729 + grsec_enable_time = 1;
62730 +#endif
62731 +#ifdef CONFIG_GRKERNSEC_RESLOG
62732 + grsec_resource_logging = 1;
62733 +#endif
62734 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62735 + grsec_enable_chroot_findtask = 1;
62736 +#endif
62737 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62738 + grsec_enable_chroot_unix = 1;
62739 +#endif
62740 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62741 + grsec_enable_chroot_mount = 1;
62742 +#endif
62743 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62744 + grsec_enable_chroot_fchdir = 1;
62745 +#endif
62746 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62747 + grsec_enable_chroot_shmat = 1;
62748 +#endif
62749 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
62750 + grsec_enable_audit_ptrace = 1;
62751 +#endif
62752 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62753 + grsec_enable_chroot_double = 1;
62754 +#endif
62755 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62756 + grsec_enable_chroot_pivot = 1;
62757 +#endif
62758 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62759 + grsec_enable_chroot_chdir = 1;
62760 +#endif
62761 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62762 + grsec_enable_chroot_chmod = 1;
62763 +#endif
62764 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62765 + grsec_enable_chroot_mknod = 1;
62766 +#endif
62767 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62768 + grsec_enable_chroot_nice = 1;
62769 +#endif
62770 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62771 + grsec_enable_chroot_execlog = 1;
62772 +#endif
62773 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62774 + grsec_enable_chroot_caps = 1;
62775 +#endif
62776 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62777 + grsec_enable_chroot_sysctl = 1;
62778 +#endif
62779 +#ifdef CONFIG_GRKERNSEC_TPE
62780 + grsec_enable_tpe = 1;
62781 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
62782 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
62783 + grsec_enable_tpe_all = 1;
62784 +#endif
62785 +#endif
62786 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
62787 + grsec_enable_socket_all = 1;
62788 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
62789 +#endif
62790 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
62791 + grsec_enable_socket_client = 1;
62792 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
62793 +#endif
62794 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
62795 + grsec_enable_socket_server = 1;
62796 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
62797 +#endif
62798 +#endif
62799 +
62800 + return;
62801 +}
62802 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
62803 new file mode 100644
62804 index 0000000..3efe141
62805 --- /dev/null
62806 +++ b/grsecurity/grsec_link.c
62807 @@ -0,0 +1,43 @@
62808 +#include <linux/kernel.h>
62809 +#include <linux/sched.h>
62810 +#include <linux/fs.h>
62811 +#include <linux/file.h>
62812 +#include <linux/grinternal.h>
62813 +
62814 +int
62815 +gr_handle_follow_link(const struct inode *parent,
62816 + const struct inode *inode,
62817 + const struct dentry *dentry, const struct vfsmount *mnt)
62818 +{
62819 +#ifdef CONFIG_GRKERNSEC_LINK
62820 + const struct cred *cred = current_cred();
62821 +
62822 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
62823 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
62824 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
62825 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
62826 + return -EACCES;
62827 + }
62828 +#endif
62829 + return 0;
62830 +}
62831 +
62832 +int
62833 +gr_handle_hardlink(const struct dentry *dentry,
62834 + const struct vfsmount *mnt,
62835 + struct inode *inode, const int mode, const char *to)
62836 +{
62837 +#ifdef CONFIG_GRKERNSEC_LINK
62838 + const struct cred *cred = current_cred();
62839 +
62840 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
62841 + (!S_ISREG(mode) || (mode & S_ISUID) ||
62842 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
62843 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
62844 + !capable(CAP_FOWNER) && cred->uid) {
62845 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
62846 + return -EPERM;
62847 + }
62848 +#endif
62849 + return 0;
62850 +}
62851 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
62852 new file mode 100644
62853 index 0000000..a45d2e9
62854 --- /dev/null
62855 +++ b/grsecurity/grsec_log.c
62856 @@ -0,0 +1,322 @@
62857 +#include <linux/kernel.h>
62858 +#include <linux/sched.h>
62859 +#include <linux/file.h>
62860 +#include <linux/tty.h>
62861 +#include <linux/fs.h>
62862 +#include <linux/grinternal.h>
62863 +
62864 +#ifdef CONFIG_TREE_PREEMPT_RCU
62865 +#define DISABLE_PREEMPT() preempt_disable()
62866 +#define ENABLE_PREEMPT() preempt_enable()
62867 +#else
62868 +#define DISABLE_PREEMPT()
62869 +#define ENABLE_PREEMPT()
62870 +#endif
62871 +
62872 +#define BEGIN_LOCKS(x) \
62873 + DISABLE_PREEMPT(); \
62874 + rcu_read_lock(); \
62875 + read_lock(&tasklist_lock); \
62876 + read_lock(&grsec_exec_file_lock); \
62877 + if (x != GR_DO_AUDIT) \
62878 + spin_lock(&grsec_alert_lock); \
62879 + else \
62880 + spin_lock(&grsec_audit_lock)
62881 +
62882 +#define END_LOCKS(x) \
62883 + if (x != GR_DO_AUDIT) \
62884 + spin_unlock(&grsec_alert_lock); \
62885 + else \
62886 + spin_unlock(&grsec_audit_lock); \
62887 + read_unlock(&grsec_exec_file_lock); \
62888 + read_unlock(&tasklist_lock); \
62889 + rcu_read_unlock(); \
62890 + ENABLE_PREEMPT(); \
62891 + if (x == GR_DONT_AUDIT) \
62892 + gr_handle_alertkill(current)
62893 +
62894 +enum {
62895 + FLOODING,
62896 + NO_FLOODING
62897 +};
62898 +
62899 +extern char *gr_alert_log_fmt;
62900 +extern char *gr_audit_log_fmt;
62901 +extern char *gr_alert_log_buf;
62902 +extern char *gr_audit_log_buf;
62903 +
62904 +static int gr_log_start(int audit)
62905 +{
62906 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
62907 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
62908 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62909 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
62910 + unsigned long curr_secs = get_seconds();
62911 +
62912 + if (audit == GR_DO_AUDIT)
62913 + goto set_fmt;
62914 +
62915 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
62916 + grsec_alert_wtime = curr_secs;
62917 + grsec_alert_fyet = 0;
62918 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
62919 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
62920 + grsec_alert_fyet++;
62921 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
62922 + grsec_alert_wtime = curr_secs;
62923 + grsec_alert_fyet++;
62924 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
62925 + return FLOODING;
62926 + }
62927 + else return FLOODING;
62928 +
62929 +set_fmt:
62930 +#endif
62931 + memset(buf, 0, PAGE_SIZE);
62932 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
62933 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
62934 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
62935 + } else if (current->signal->curr_ip) {
62936 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
62937 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
62938 + } else if (gr_acl_is_enabled()) {
62939 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
62940 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
62941 + } else {
62942 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
62943 + strcpy(buf, fmt);
62944 + }
62945 +
62946 + return NO_FLOODING;
62947 +}
62948 +
62949 +static void gr_log_middle(int audit, const char *msg, va_list ap)
62950 + __attribute__ ((format (printf, 2, 0)));
62951 +
62952 +static void gr_log_middle(int audit, const char *msg, va_list ap)
62953 +{
62954 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62955 + unsigned int len = strlen(buf);
62956 +
62957 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
62958 +
62959 + return;
62960 +}
62961 +
62962 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
62963 + __attribute__ ((format (printf, 2, 3)));
62964 +
62965 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
62966 +{
62967 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62968 + unsigned int len = strlen(buf);
62969 + va_list ap;
62970 +
62971 + va_start(ap, msg);
62972 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
62973 + va_end(ap);
62974 +
62975 + return;
62976 +}
62977 +
62978 +static void gr_log_end(int audit, int append_default)
62979 +{
62980 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
62981 +
62982 + if (append_default) {
62983 + unsigned int len = strlen(buf);
62984 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
62985 + }
62986 +
62987 + printk("%s\n", buf);
62988 +
62989 + return;
62990 +}
62991 +
62992 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
62993 +{
62994 + int logtype;
62995 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
62996 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
62997 + void *voidptr = NULL;
62998 + int num1 = 0, num2 = 0;
62999 + unsigned long ulong1 = 0, ulong2 = 0;
63000 + struct dentry *dentry = NULL;
63001 + struct vfsmount *mnt = NULL;
63002 + struct file *file = NULL;
63003 + struct task_struct *task = NULL;
63004 + const struct cred *cred, *pcred;
63005 + va_list ap;
63006 +
63007 + BEGIN_LOCKS(audit);
63008 + logtype = gr_log_start(audit);
63009 + if (logtype == FLOODING) {
63010 + END_LOCKS(audit);
63011 + return;
63012 + }
63013 + va_start(ap, argtypes);
63014 + switch (argtypes) {
63015 + case GR_TTYSNIFF:
63016 + task = va_arg(ap, struct task_struct *);
63017 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
63018 + break;
63019 + case GR_SYSCTL_HIDDEN:
63020 + str1 = va_arg(ap, char *);
63021 + gr_log_middle_varargs(audit, msg, result, str1);
63022 + break;
63023 + case GR_RBAC:
63024 + dentry = va_arg(ap, struct dentry *);
63025 + mnt = va_arg(ap, struct vfsmount *);
63026 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
63027 + break;
63028 + case GR_RBAC_STR:
63029 + dentry = va_arg(ap, struct dentry *);
63030 + mnt = va_arg(ap, struct vfsmount *);
63031 + str1 = va_arg(ap, char *);
63032 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
63033 + break;
63034 + case GR_STR_RBAC:
63035 + str1 = va_arg(ap, char *);
63036 + dentry = va_arg(ap, struct dentry *);
63037 + mnt = va_arg(ap, struct vfsmount *);
63038 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
63039 + break;
63040 + case GR_RBAC_MODE2:
63041 + dentry = va_arg(ap, struct dentry *);
63042 + mnt = va_arg(ap, struct vfsmount *);
63043 + str1 = va_arg(ap, char *);
63044 + str2 = va_arg(ap, char *);
63045 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
63046 + break;
63047 + case GR_RBAC_MODE3:
63048 + dentry = va_arg(ap, struct dentry *);
63049 + mnt = va_arg(ap, struct vfsmount *);
63050 + str1 = va_arg(ap, char *);
63051 + str2 = va_arg(ap, char *);
63052 + str3 = va_arg(ap, char *);
63053 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
63054 + break;
63055 + case GR_FILENAME:
63056 + dentry = va_arg(ap, struct dentry *);
63057 + mnt = va_arg(ap, struct vfsmount *);
63058 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
63059 + break;
63060 + case GR_STR_FILENAME:
63061 + str1 = va_arg(ap, char *);
63062 + dentry = va_arg(ap, struct dentry *);
63063 + mnt = va_arg(ap, struct vfsmount *);
63064 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
63065 + break;
63066 + case GR_FILENAME_STR:
63067 + dentry = va_arg(ap, struct dentry *);
63068 + mnt = va_arg(ap, struct vfsmount *);
63069 + str1 = va_arg(ap, char *);
63070 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
63071 + break;
63072 + case GR_FILENAME_TWO_INT:
63073 + dentry = va_arg(ap, struct dentry *);
63074 + mnt = va_arg(ap, struct vfsmount *);
63075 + num1 = va_arg(ap, int);
63076 + num2 = va_arg(ap, int);
63077 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
63078 + break;
63079 + case GR_FILENAME_TWO_INT_STR:
63080 + dentry = va_arg(ap, struct dentry *);
63081 + mnt = va_arg(ap, struct vfsmount *);
63082 + num1 = va_arg(ap, int);
63083 + num2 = va_arg(ap, int);
63084 + str1 = va_arg(ap, char *);
63085 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
63086 + break;
63087 + case GR_TEXTREL:
63088 + file = va_arg(ap, struct file *);
63089 + ulong1 = va_arg(ap, unsigned long);
63090 + ulong2 = va_arg(ap, unsigned long);
63091 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
63092 + break;
63093 + case GR_PTRACE:
63094 + task = va_arg(ap, struct task_struct *);
63095 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
63096 + break;
63097 + case GR_RESOURCE:
63098 + task = va_arg(ap, struct task_struct *);
63099 + cred = __task_cred(task);
63100 + pcred = __task_cred(task->real_parent);
63101 + ulong1 = va_arg(ap, unsigned long);
63102 + str1 = va_arg(ap, char *);
63103 + ulong2 = va_arg(ap, unsigned long);
63104 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63105 + break;
63106 + case GR_CAP:
63107 + task = va_arg(ap, struct task_struct *);
63108 + cred = __task_cred(task);
63109 + pcred = __task_cred(task->real_parent);
63110 + str1 = va_arg(ap, char *);
63111 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63112 + break;
63113 + case GR_SIG:
63114 + str1 = va_arg(ap, char *);
63115 + voidptr = va_arg(ap, void *);
63116 + gr_log_middle_varargs(audit, msg, str1, voidptr);
63117 + break;
63118 + case GR_SIG2:
63119 + task = va_arg(ap, struct task_struct *);
63120 + cred = __task_cred(task);
63121 + pcred = __task_cred(task->real_parent);
63122 + num1 = va_arg(ap, int);
63123 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63124 + break;
63125 + case GR_CRASH1:
63126 + task = va_arg(ap, struct task_struct *);
63127 + cred = __task_cred(task);
63128 + pcred = __task_cred(task->real_parent);
63129 + ulong1 = va_arg(ap, unsigned long);
63130 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
63131 + break;
63132 + case GR_CRASH2:
63133 + task = va_arg(ap, struct task_struct *);
63134 + cred = __task_cred(task);
63135 + pcred = __task_cred(task->real_parent);
63136 + ulong1 = va_arg(ap, unsigned long);
63137 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
63138 + break;
63139 + case GR_RWXMAP:
63140 + file = va_arg(ap, struct file *);
63141 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
63142 + break;
63143 + case GR_PSACCT:
63144 + {
63145 + unsigned int wday, cday;
63146 + __u8 whr, chr;
63147 + __u8 wmin, cmin;
63148 + __u8 wsec, csec;
63149 + char cur_tty[64] = { 0 };
63150 + char parent_tty[64] = { 0 };
63151 +
63152 + task = va_arg(ap, struct task_struct *);
63153 + wday = va_arg(ap, unsigned int);
63154 + cday = va_arg(ap, unsigned int);
63155 + whr = va_arg(ap, int);
63156 + chr = va_arg(ap, int);
63157 + wmin = va_arg(ap, int);
63158 + cmin = va_arg(ap, int);
63159 + wsec = va_arg(ap, int);
63160 + csec = va_arg(ap, int);
63161 + ulong1 = va_arg(ap, unsigned long);
63162 + cred = __task_cred(task);
63163 + pcred = __task_cred(task->real_parent);
63164 +
63165 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63166 + }
63167 + break;
63168 + default:
63169 + gr_log_middle(audit, msg, ap);
63170 + }
63171 + va_end(ap);
63172 + // these don't need DEFAULTSECARGS printed on the end
63173 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
63174 + gr_log_end(audit, 0);
63175 + else
63176 + gr_log_end(audit, 1);
63177 + END_LOCKS(audit);
63178 +}
63179 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
63180 new file mode 100644
63181 index 0000000..6c0416b
63182 --- /dev/null
63183 +++ b/grsecurity/grsec_mem.c
63184 @@ -0,0 +1,33 @@
63185 +#include <linux/kernel.h>
63186 +#include <linux/sched.h>
63187 +#include <linux/mm.h>
63188 +#include <linux/mman.h>
63189 +#include <linux/grinternal.h>
63190 +
63191 +void
63192 +gr_handle_ioperm(void)
63193 +{
63194 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
63195 + return;
63196 +}
63197 +
63198 +void
63199 +gr_handle_iopl(void)
63200 +{
63201 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
63202 + return;
63203 +}
63204 +
63205 +void
63206 +gr_handle_mem_readwrite(u64 from, u64 to)
63207 +{
63208 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
63209 + return;
63210 +}
63211 +
63212 +void
63213 +gr_handle_vm86(void)
63214 +{
63215 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
63216 + return;
63217 +}
63218 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
63219 new file mode 100644
63220 index 0000000..2131422
63221 --- /dev/null
63222 +++ b/grsecurity/grsec_mount.c
63223 @@ -0,0 +1,62 @@
63224 +#include <linux/kernel.h>
63225 +#include <linux/sched.h>
63226 +#include <linux/mount.h>
63227 +#include <linux/grsecurity.h>
63228 +#include <linux/grinternal.h>
63229 +
63230 +void
63231 +gr_log_remount(const char *devname, const int retval)
63232 +{
63233 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63234 + if (grsec_enable_mount && (retval >= 0))
63235 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
63236 +#endif
63237 + return;
63238 +}
63239 +
63240 +void
63241 +gr_log_unmount(const char *devname, const int retval)
63242 +{
63243 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63244 + if (grsec_enable_mount && (retval >= 0))
63245 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
63246 +#endif
63247 + return;
63248 +}
63249 +
63250 +void
63251 +gr_log_mount(const char *from, const char *to, const int retval)
63252 +{
63253 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63254 + if (grsec_enable_mount && (retval >= 0))
63255 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
63256 +#endif
63257 + return;
63258 +}
63259 +
63260 +int
63261 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
63262 +{
63263 +#ifdef CONFIG_GRKERNSEC_ROFS
63264 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
63265 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
63266 + return -EPERM;
63267 + } else
63268 + return 0;
63269 +#endif
63270 + return 0;
63271 +}
63272 +
63273 +int
63274 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
63275 +{
63276 +#ifdef CONFIG_GRKERNSEC_ROFS
63277 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
63278 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
63279 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
63280 + return -EPERM;
63281 + } else
63282 + return 0;
63283 +#endif
63284 + return 0;
63285 +}
63286 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
63287 new file mode 100644
63288 index 0000000..a3b12a0
63289 --- /dev/null
63290 +++ b/grsecurity/grsec_pax.c
63291 @@ -0,0 +1,36 @@
63292 +#include <linux/kernel.h>
63293 +#include <linux/sched.h>
63294 +#include <linux/mm.h>
63295 +#include <linux/file.h>
63296 +#include <linux/grinternal.h>
63297 +#include <linux/grsecurity.h>
63298 +
63299 +void
63300 +gr_log_textrel(struct vm_area_struct * vma)
63301 +{
63302 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63303 + if (grsec_enable_audit_textrel)
63304 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
63305 +#endif
63306 + return;
63307 +}
63308 +
63309 +void
63310 +gr_log_rwxmmap(struct file *file)
63311 +{
63312 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63313 + if (grsec_enable_log_rwxmaps)
63314 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
63315 +#endif
63316 + return;
63317 +}
63318 +
63319 +void
63320 +gr_log_rwxmprotect(struct file *file)
63321 +{
63322 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63323 + if (grsec_enable_log_rwxmaps)
63324 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
63325 +#endif
63326 + return;
63327 +}
63328 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
63329 new file mode 100644
63330 index 0000000..472c1d6
63331 --- /dev/null
63332 +++ b/grsecurity/grsec_ptrace.c
63333 @@ -0,0 +1,14 @@
63334 +#include <linux/kernel.h>
63335 +#include <linux/sched.h>
63336 +#include <linux/grinternal.h>
63337 +#include <linux/grsecurity.h>
63338 +
63339 +void
63340 +gr_audit_ptrace(struct task_struct *task)
63341 +{
63342 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63343 + if (grsec_enable_audit_ptrace)
63344 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
63345 +#endif
63346 + return;
63347 +}
63348 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
63349 new file mode 100644
63350 index 0000000..dc73fe9
63351 --- /dev/null
63352 +++ b/grsecurity/grsec_sig.c
63353 @@ -0,0 +1,205 @@
63354 +#include <linux/kernel.h>
63355 +#include <linux/sched.h>
63356 +#include <linux/delay.h>
63357 +#include <linux/grsecurity.h>
63358 +#include <linux/grinternal.h>
63359 +#include <linux/hardirq.h>
63360 +
63361 +char *signames[] = {
63362 + [SIGSEGV] = "Segmentation fault",
63363 + [SIGILL] = "Illegal instruction",
63364 + [SIGABRT] = "Abort",
63365 + [SIGBUS] = "Invalid alignment/Bus error"
63366 +};
63367 +
63368 +void
63369 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
63370 +{
63371 +#ifdef CONFIG_GRKERNSEC_SIGNAL
63372 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
63373 + (sig == SIGABRT) || (sig == SIGBUS))) {
63374 + if (t->pid == current->pid) {
63375 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
63376 + } else {
63377 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
63378 + }
63379 + }
63380 +#endif
63381 + return;
63382 +}
63383 +
63384 +int
63385 +gr_handle_signal(const struct task_struct *p, const int sig)
63386 +{
63387 +#ifdef CONFIG_GRKERNSEC
63388 + if (current->pid > 1 && gr_check_protected_task(p)) {
63389 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
63390 + return -EPERM;
63391 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
63392 + return -EPERM;
63393 + }
63394 +#endif
63395 + return 0;
63396 +}
63397 +
63398 +#ifdef CONFIG_GRKERNSEC
63399 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
63400 +
63401 +int gr_fake_force_sig(int sig, struct task_struct *t)
63402 +{
63403 + unsigned long int flags;
63404 + int ret, blocked, ignored;
63405 + struct k_sigaction *action;
63406 +
63407 + spin_lock_irqsave(&t->sighand->siglock, flags);
63408 + action = &t->sighand->action[sig-1];
63409 + ignored = action->sa.sa_handler == SIG_IGN;
63410 + blocked = sigismember(&t->blocked, sig);
63411 + if (blocked || ignored) {
63412 + action->sa.sa_handler = SIG_DFL;
63413 + if (blocked) {
63414 + sigdelset(&t->blocked, sig);
63415 + recalc_sigpending_and_wake(t);
63416 + }
63417 + }
63418 + if (action->sa.sa_handler == SIG_DFL)
63419 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
63420 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
63421 +
63422 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
63423 +
63424 + return ret;
63425 +}
63426 +#endif
63427 +
63428 +#ifdef CONFIG_GRKERNSEC_BRUTE
63429 +#define GR_USER_BAN_TIME (15 * 60)
63430 +
63431 +static int __get_dumpable(unsigned long mm_flags)
63432 +{
63433 + int ret;
63434 +
63435 + ret = mm_flags & MMF_DUMPABLE_MASK;
63436 + return (ret >= 2) ? 2 : ret;
63437 +}
63438 +#endif
63439 +
63440 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
63441 +{
63442 +#ifdef CONFIG_GRKERNSEC_BRUTE
63443 + uid_t uid = 0;
63444 +
63445 + if (!grsec_enable_brute)
63446 + return;
63447 +
63448 + rcu_read_lock();
63449 + read_lock(&tasklist_lock);
63450 + read_lock(&grsec_exec_file_lock);
63451 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
63452 + p->real_parent->brute = 1;
63453 + else {
63454 + const struct cred *cred = __task_cred(p), *cred2;
63455 + struct task_struct *tsk, *tsk2;
63456 +
63457 + if (!__get_dumpable(mm_flags) && cred->uid) {
63458 + struct user_struct *user;
63459 +
63460 + uid = cred->uid;
63461 +
63462 + /* this is put upon execution past expiration */
63463 + user = find_user(uid);
63464 + if (user == NULL)
63465 + goto unlock;
63466 + user->banned = 1;
63467 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
63468 + if (user->ban_expires == ~0UL)
63469 + user->ban_expires--;
63470 +
63471 + do_each_thread(tsk2, tsk) {
63472 + cred2 = __task_cred(tsk);
63473 + if (tsk != p && cred2->uid == uid)
63474 + gr_fake_force_sig(SIGKILL, tsk);
63475 + } while_each_thread(tsk2, tsk);
63476 + }
63477 + }
63478 +unlock:
63479 + read_unlock(&grsec_exec_file_lock);
63480 + read_unlock(&tasklist_lock);
63481 + rcu_read_unlock();
63482 +
63483 + if (uid)
63484 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
63485 +#endif
63486 + return;
63487 +}
63488 +
63489 +void gr_handle_brute_check(void)
63490 +{
63491 +#ifdef CONFIG_GRKERNSEC_BRUTE
63492 + if (current->brute)
63493 + msleep(30 * 1000);
63494 +#endif
63495 + return;
63496 +}
63497 +
63498 +void gr_handle_kernel_exploit(void)
63499 +{
63500 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
63501 + const struct cred *cred;
63502 + struct task_struct *tsk, *tsk2;
63503 + struct user_struct *user;
63504 + uid_t uid;
63505 +
63506 + if (in_irq() || in_serving_softirq() || in_nmi())
63507 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
63508 +
63509 + uid = current_uid();
63510 +
63511 + if (uid == 0)
63512 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
63513 + else {
63514 + /* kill all the processes of this user, hold a reference
63515 + to their creds struct, and prevent them from creating
63516 + another process until system reset
63517 + */
63518 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
63519 + /* we intentionally leak this ref */
63520 + user = get_uid(current->cred->user);
63521 + if (user) {
63522 + user->banned = 1;
63523 + user->ban_expires = ~0UL;
63524 + }
63525 +
63526 + read_lock(&tasklist_lock);
63527 + do_each_thread(tsk2, tsk) {
63528 + cred = __task_cred(tsk);
63529 + if (cred->uid == uid)
63530 + gr_fake_force_sig(SIGKILL, tsk);
63531 + } while_each_thread(tsk2, tsk);
63532 + read_unlock(&tasklist_lock);
63533 + }
63534 +#endif
63535 +}
63536 +
63537 +int __gr_process_user_ban(struct user_struct *user)
63538 +{
63539 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63540 + if (unlikely(user->banned)) {
63541 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
63542 + user->banned = 0;
63543 + user->ban_expires = 0;
63544 + free_uid(user);
63545 + } else
63546 + return -EPERM;
63547 + }
63548 +#endif
63549 + return 0;
63550 +}
63551 +
63552 +int gr_process_user_ban(void)
63553 +{
63554 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63555 + return __gr_process_user_ban(current->cred->user);
63556 +#endif
63557 + return 0;
63558 +}
63559 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
63560 new file mode 100644
63561 index 0000000..7512ea9
63562 --- /dev/null
63563 +++ b/grsecurity/grsec_sock.c
63564 @@ -0,0 +1,275 @@
63565 +#include <linux/kernel.h>
63566 +#include <linux/module.h>
63567 +#include <linux/sched.h>
63568 +#include <linux/file.h>
63569 +#include <linux/net.h>
63570 +#include <linux/in.h>
63571 +#include <linux/ip.h>
63572 +#include <net/sock.h>
63573 +#include <net/inet_sock.h>
63574 +#include <linux/grsecurity.h>
63575 +#include <linux/grinternal.h>
63576 +#include <linux/gracl.h>
63577 +
63578 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
63579 +EXPORT_SYMBOL(gr_cap_rtnetlink);
63580 +
63581 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
63582 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
63583 +
63584 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
63585 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
63586 +
63587 +#ifdef CONFIG_UNIX_MODULE
63588 +EXPORT_SYMBOL(gr_acl_handle_unix);
63589 +EXPORT_SYMBOL(gr_acl_handle_mknod);
63590 +EXPORT_SYMBOL(gr_handle_chroot_unix);
63591 +EXPORT_SYMBOL(gr_handle_create);
63592 +#endif
63593 +
63594 +#ifdef CONFIG_GRKERNSEC
63595 +#define gr_conn_table_size 32749
63596 +struct conn_table_entry {
63597 + struct conn_table_entry *next;
63598 + struct signal_struct *sig;
63599 +};
63600 +
63601 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
63602 +DEFINE_SPINLOCK(gr_conn_table_lock);
63603 +
63604 +extern const char * gr_socktype_to_name(unsigned char type);
63605 +extern const char * gr_proto_to_name(unsigned char proto);
63606 +extern const char * gr_sockfamily_to_name(unsigned char family);
63607 +
63608 +static __inline__ int
63609 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
63610 +{
63611 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
63612 +}
63613 +
63614 +static __inline__ int
63615 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
63616 + __u16 sport, __u16 dport)
63617 +{
63618 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
63619 + sig->gr_sport == sport && sig->gr_dport == dport))
63620 + return 1;
63621 + else
63622 + return 0;
63623 +}
63624 +
63625 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
63626 +{
63627 + struct conn_table_entry **match;
63628 + unsigned int index;
63629 +
63630 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63631 + sig->gr_sport, sig->gr_dport,
63632 + gr_conn_table_size);
63633 +
63634 + newent->sig = sig;
63635 +
63636 + match = &gr_conn_table[index];
63637 + newent->next = *match;
63638 + *match = newent;
63639 +
63640 + return;
63641 +}
63642 +
63643 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
63644 +{
63645 + struct conn_table_entry *match, *last = NULL;
63646 + unsigned int index;
63647 +
63648 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63649 + sig->gr_sport, sig->gr_dport,
63650 + gr_conn_table_size);
63651 +
63652 + match = gr_conn_table[index];
63653 + while (match && !conn_match(match->sig,
63654 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
63655 + sig->gr_dport)) {
63656 + last = match;
63657 + match = match->next;
63658 + }
63659 +
63660 + if (match) {
63661 + if (last)
63662 + last->next = match->next;
63663 + else
63664 + gr_conn_table[index] = NULL;
63665 + kfree(match);
63666 + }
63667 +
63668 + return;
63669 +}
63670 +
63671 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
63672 + __u16 sport, __u16 dport)
63673 +{
63674 + struct conn_table_entry *match;
63675 + unsigned int index;
63676 +
63677 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
63678 +
63679 + match = gr_conn_table[index];
63680 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
63681 + match = match->next;
63682 +
63683 + if (match)
63684 + return match->sig;
63685 + else
63686 + return NULL;
63687 +}
63688 +
63689 +#endif
63690 +
63691 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
63692 +{
63693 +#ifdef CONFIG_GRKERNSEC
63694 + struct signal_struct *sig = task->signal;
63695 + struct conn_table_entry *newent;
63696 +
63697 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
63698 + if (newent == NULL)
63699 + return;
63700 + /* no bh lock needed since we are called with bh disabled */
63701 + spin_lock(&gr_conn_table_lock);
63702 + gr_del_task_from_ip_table_nolock(sig);
63703 + sig->gr_saddr = inet->rcv_saddr;
63704 + sig->gr_daddr = inet->daddr;
63705 + sig->gr_sport = inet->sport;
63706 + sig->gr_dport = inet->dport;
63707 + gr_add_to_task_ip_table_nolock(sig, newent);
63708 + spin_unlock(&gr_conn_table_lock);
63709 +#endif
63710 + return;
63711 +}
63712 +
63713 +void gr_del_task_from_ip_table(struct task_struct *task)
63714 +{
63715 +#ifdef CONFIG_GRKERNSEC
63716 + spin_lock_bh(&gr_conn_table_lock);
63717 + gr_del_task_from_ip_table_nolock(task->signal);
63718 + spin_unlock_bh(&gr_conn_table_lock);
63719 +#endif
63720 + return;
63721 +}
63722 +
63723 +void
63724 +gr_attach_curr_ip(const struct sock *sk)
63725 +{
63726 +#ifdef CONFIG_GRKERNSEC
63727 + struct signal_struct *p, *set;
63728 + const struct inet_sock *inet = inet_sk(sk);
63729 +
63730 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
63731 + return;
63732 +
63733 + set = current->signal;
63734 +
63735 + spin_lock_bh(&gr_conn_table_lock);
63736 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
63737 + inet->dport, inet->sport);
63738 + if (unlikely(p != NULL)) {
63739 + set->curr_ip = p->curr_ip;
63740 + set->used_accept = 1;
63741 + gr_del_task_from_ip_table_nolock(p);
63742 + spin_unlock_bh(&gr_conn_table_lock);
63743 + return;
63744 + }
63745 + spin_unlock_bh(&gr_conn_table_lock);
63746 +
63747 + set->curr_ip = inet->daddr;
63748 + set->used_accept = 1;
63749 +#endif
63750 + return;
63751 +}
63752 +
63753 +int
63754 +gr_handle_sock_all(const int family, const int type, const int protocol)
63755 +{
63756 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63757 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
63758 + (family != AF_UNIX)) {
63759 + if (family == AF_INET)
63760 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
63761 + else
63762 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
63763 + return -EACCES;
63764 + }
63765 +#endif
63766 + return 0;
63767 +}
63768 +
63769 +int
63770 +gr_handle_sock_server(const struct sockaddr *sck)
63771 +{
63772 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63773 + if (grsec_enable_socket_server &&
63774 + in_group_p(grsec_socket_server_gid) &&
63775 + sck && (sck->sa_family != AF_UNIX) &&
63776 + (sck->sa_family != AF_LOCAL)) {
63777 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
63778 + return -EACCES;
63779 + }
63780 +#endif
63781 + return 0;
63782 +}
63783 +
63784 +int
63785 +gr_handle_sock_server_other(const struct sock *sck)
63786 +{
63787 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63788 + if (grsec_enable_socket_server &&
63789 + in_group_p(grsec_socket_server_gid) &&
63790 + sck && (sck->sk_family != AF_UNIX) &&
63791 + (sck->sk_family != AF_LOCAL)) {
63792 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
63793 + return -EACCES;
63794 + }
63795 +#endif
63796 + return 0;
63797 +}
63798 +
63799 +int
63800 +gr_handle_sock_client(const struct sockaddr *sck)
63801 +{
63802 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63803 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
63804 + sck && (sck->sa_family != AF_UNIX) &&
63805 + (sck->sa_family != AF_LOCAL)) {
63806 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
63807 + return -EACCES;
63808 + }
63809 +#endif
63810 + return 0;
63811 +}
63812 +
63813 +kernel_cap_t
63814 +gr_cap_rtnetlink(struct sock *sock)
63815 +{
63816 +#ifdef CONFIG_GRKERNSEC
63817 + if (!gr_acl_is_enabled())
63818 + return current_cap();
63819 + else if (sock->sk_protocol == NETLINK_ISCSI &&
63820 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
63821 + gr_is_capable(CAP_SYS_ADMIN))
63822 + return current_cap();
63823 + else if (sock->sk_protocol == NETLINK_AUDIT &&
63824 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
63825 + gr_is_capable(CAP_AUDIT_WRITE) &&
63826 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
63827 + gr_is_capable(CAP_AUDIT_CONTROL))
63828 + return current_cap();
63829 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
63830 + ((sock->sk_protocol == NETLINK_ROUTE) ?
63831 + gr_is_capable_nolog(CAP_NET_ADMIN) :
63832 + gr_is_capable(CAP_NET_ADMIN)))
63833 + return current_cap();
63834 + else
63835 + return __cap_empty_set;
63836 +#else
63837 + return current_cap();
63838 +#endif
63839 +}
63840 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
63841 new file mode 100644
63842 index 0000000..2753505
63843 --- /dev/null
63844 +++ b/grsecurity/grsec_sysctl.c
63845 @@ -0,0 +1,479 @@
63846 +#include <linux/kernel.h>
63847 +#include <linux/sched.h>
63848 +#include <linux/sysctl.h>
63849 +#include <linux/grsecurity.h>
63850 +#include <linux/grinternal.h>
63851 +
63852 +int
63853 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
63854 +{
63855 +#ifdef CONFIG_GRKERNSEC_SYSCTL
63856 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
63857 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
63858 + return -EACCES;
63859 + }
63860 +#endif
63861 + return 0;
63862 +}
63863 +
63864 +#ifdef CONFIG_GRKERNSEC_ROFS
63865 +static int __maybe_unused one = 1;
63866 +#endif
63867 +
63868 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
63869 +ctl_table grsecurity_table[] = {
63870 +#ifdef CONFIG_GRKERNSEC_SYSCTL
63871 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
63872 +#ifdef CONFIG_GRKERNSEC_IO
63873 + {
63874 + .ctl_name = CTL_UNNUMBERED,
63875 + .procname = "disable_priv_io",
63876 + .data = &grsec_disable_privio,
63877 + .maxlen = sizeof(int),
63878 + .mode = 0600,
63879 + .proc_handler = &proc_dointvec,
63880 + },
63881 +#endif
63882 +#endif
63883 +#ifdef CONFIG_GRKERNSEC_LINK
63884 + {
63885 + .ctl_name = CTL_UNNUMBERED,
63886 + .procname = "linking_restrictions",
63887 + .data = &grsec_enable_link,
63888 + .maxlen = sizeof(int),
63889 + .mode = 0600,
63890 + .proc_handler = &proc_dointvec,
63891 + },
63892 +#endif
63893 +#ifdef CONFIG_GRKERNSEC_BRUTE
63894 + {
63895 + .ctl_name = CTL_UNNUMBERED,
63896 + .procname = "deter_bruteforce",
63897 + .data = &grsec_enable_brute,
63898 + .maxlen = sizeof(int),
63899 + .mode = 0600,
63900 + .proc_handler = &proc_dointvec,
63901 + },
63902 +#endif
63903 +#ifdef CONFIG_GRKERNSEC_FIFO
63904 + {
63905 + .ctl_name = CTL_UNNUMBERED,
63906 + .procname = "fifo_restrictions",
63907 + .data = &grsec_enable_fifo,
63908 + .maxlen = sizeof(int),
63909 + .mode = 0600,
63910 + .proc_handler = &proc_dointvec,
63911 + },
63912 +#endif
63913 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63914 + {
63915 + .ctl_name = CTL_UNNUMBERED,
63916 + .procname = "ip_blackhole",
63917 + .data = &grsec_enable_blackhole,
63918 + .maxlen = sizeof(int),
63919 + .mode = 0600,
63920 + .proc_handler = &proc_dointvec,
63921 + },
63922 + {
63923 + .ctl_name = CTL_UNNUMBERED,
63924 + .procname = "lastack_retries",
63925 + .data = &grsec_lastack_retries,
63926 + .maxlen = sizeof(int),
63927 + .mode = 0600,
63928 + .proc_handler = &proc_dointvec,
63929 + },
63930 +#endif
63931 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63932 + {
63933 + .ctl_name = CTL_UNNUMBERED,
63934 + .procname = "exec_logging",
63935 + .data = &grsec_enable_execlog,
63936 + .maxlen = sizeof(int),
63937 + .mode = 0600,
63938 + .proc_handler = &proc_dointvec,
63939 + },
63940 +#endif
63941 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63942 + {
63943 + .ctl_name = CTL_UNNUMBERED,
63944 + .procname = "rwxmap_logging",
63945 + .data = &grsec_enable_log_rwxmaps,
63946 + .maxlen = sizeof(int),
63947 + .mode = 0600,
63948 + .proc_handler = &proc_dointvec,
63949 + },
63950 +#endif
63951 +#ifdef CONFIG_GRKERNSEC_SIGNAL
63952 + {
63953 + .ctl_name = CTL_UNNUMBERED,
63954 + .procname = "signal_logging",
63955 + .data = &grsec_enable_signal,
63956 + .maxlen = sizeof(int),
63957 + .mode = 0600,
63958 + .proc_handler = &proc_dointvec,
63959 + },
63960 +#endif
63961 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63962 + {
63963 + .ctl_name = CTL_UNNUMBERED,
63964 + .procname = "forkfail_logging",
63965 + .data = &grsec_enable_forkfail,
63966 + .maxlen = sizeof(int),
63967 + .mode = 0600,
63968 + .proc_handler = &proc_dointvec,
63969 + },
63970 +#endif
63971 +#ifdef CONFIG_GRKERNSEC_TIME
63972 + {
63973 + .ctl_name = CTL_UNNUMBERED,
63974 + .procname = "timechange_logging",
63975 + .data = &grsec_enable_time,
63976 + .maxlen = sizeof(int),
63977 + .mode = 0600,
63978 + .proc_handler = &proc_dointvec,
63979 + },
63980 +#endif
63981 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63982 + {
63983 + .ctl_name = CTL_UNNUMBERED,
63984 + .procname = "chroot_deny_shmat",
63985 + .data = &grsec_enable_chroot_shmat,
63986 + .maxlen = sizeof(int),
63987 + .mode = 0600,
63988 + .proc_handler = &proc_dointvec,
63989 + },
63990 +#endif
63991 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63992 + {
63993 + .ctl_name = CTL_UNNUMBERED,
63994 + .procname = "chroot_deny_unix",
63995 + .data = &grsec_enable_chroot_unix,
63996 + .maxlen = sizeof(int),
63997 + .mode = 0600,
63998 + .proc_handler = &proc_dointvec,
63999 + },
64000 +#endif
64001 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64002 + {
64003 + .ctl_name = CTL_UNNUMBERED,
64004 + .procname = "chroot_deny_mount",
64005 + .data = &grsec_enable_chroot_mount,
64006 + .maxlen = sizeof(int),
64007 + .mode = 0600,
64008 + .proc_handler = &proc_dointvec,
64009 + },
64010 +#endif
64011 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64012 + {
64013 + .ctl_name = CTL_UNNUMBERED,
64014 + .procname = "chroot_deny_fchdir",
64015 + .data = &grsec_enable_chroot_fchdir,
64016 + .maxlen = sizeof(int),
64017 + .mode = 0600,
64018 + .proc_handler = &proc_dointvec,
64019 + },
64020 +#endif
64021 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64022 + {
64023 + .ctl_name = CTL_UNNUMBERED,
64024 + .procname = "chroot_deny_chroot",
64025 + .data = &grsec_enable_chroot_double,
64026 + .maxlen = sizeof(int),
64027 + .mode = 0600,
64028 + .proc_handler = &proc_dointvec,
64029 + },
64030 +#endif
64031 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64032 + {
64033 + .ctl_name = CTL_UNNUMBERED,
64034 + .procname = "chroot_deny_pivot",
64035 + .data = &grsec_enable_chroot_pivot,
64036 + .maxlen = sizeof(int),
64037 + .mode = 0600,
64038 + .proc_handler = &proc_dointvec,
64039 + },
64040 +#endif
64041 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64042 + {
64043 + .ctl_name = CTL_UNNUMBERED,
64044 + .procname = "chroot_enforce_chdir",
64045 + .data = &grsec_enable_chroot_chdir,
64046 + .maxlen = sizeof(int),
64047 + .mode = 0600,
64048 + .proc_handler = &proc_dointvec,
64049 + },
64050 +#endif
64051 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64052 + {
64053 + .ctl_name = CTL_UNNUMBERED,
64054 + .procname = "chroot_deny_chmod",
64055 + .data = &grsec_enable_chroot_chmod,
64056 + .maxlen = sizeof(int),
64057 + .mode = 0600,
64058 + .proc_handler = &proc_dointvec,
64059 + },
64060 +#endif
64061 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64062 + {
64063 + .ctl_name = CTL_UNNUMBERED,
64064 + .procname = "chroot_deny_mknod",
64065 + .data = &grsec_enable_chroot_mknod,
64066 + .maxlen = sizeof(int),
64067 + .mode = 0600,
64068 + .proc_handler = &proc_dointvec,
64069 + },
64070 +#endif
64071 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64072 + {
64073 + .ctl_name = CTL_UNNUMBERED,
64074 + .procname = "chroot_restrict_nice",
64075 + .data = &grsec_enable_chroot_nice,
64076 + .maxlen = sizeof(int),
64077 + .mode = 0600,
64078 + .proc_handler = &proc_dointvec,
64079 + },
64080 +#endif
64081 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64082 + {
64083 + .ctl_name = CTL_UNNUMBERED,
64084 + .procname = "chroot_execlog",
64085 + .data = &grsec_enable_chroot_execlog,
64086 + .maxlen = sizeof(int),
64087 + .mode = 0600,
64088 + .proc_handler = &proc_dointvec,
64089 + },
64090 +#endif
64091 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64092 + {
64093 + .ctl_name = CTL_UNNUMBERED,
64094 + .procname = "chroot_caps",
64095 + .data = &grsec_enable_chroot_caps,
64096 + .maxlen = sizeof(int),
64097 + .mode = 0600,
64098 + .proc_handler = &proc_dointvec,
64099 + },
64100 +#endif
64101 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64102 + {
64103 + .ctl_name = CTL_UNNUMBERED,
64104 + .procname = "chroot_deny_sysctl",
64105 + .data = &grsec_enable_chroot_sysctl,
64106 + .maxlen = sizeof(int),
64107 + .mode = 0600,
64108 + .proc_handler = &proc_dointvec,
64109 + },
64110 +#endif
64111 +#ifdef CONFIG_GRKERNSEC_TPE
64112 + {
64113 + .ctl_name = CTL_UNNUMBERED,
64114 + .procname = "tpe",
64115 + .data = &grsec_enable_tpe,
64116 + .maxlen = sizeof(int),
64117 + .mode = 0600,
64118 + .proc_handler = &proc_dointvec,
64119 + },
64120 + {
64121 + .ctl_name = CTL_UNNUMBERED,
64122 + .procname = "tpe_gid",
64123 + .data = &grsec_tpe_gid,
64124 + .maxlen = sizeof(int),
64125 + .mode = 0600,
64126 + .proc_handler = &proc_dointvec,
64127 + },
64128 +#endif
64129 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64130 + {
64131 + .ctl_name = CTL_UNNUMBERED,
64132 + .procname = "tpe_invert",
64133 + .data = &grsec_enable_tpe_invert,
64134 + .maxlen = sizeof(int),
64135 + .mode = 0600,
64136 + .proc_handler = &proc_dointvec,
64137 + },
64138 +#endif
64139 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
64140 + {
64141 + .ctl_name = CTL_UNNUMBERED,
64142 + .procname = "tpe_restrict_all",
64143 + .data = &grsec_enable_tpe_all,
64144 + .maxlen = sizeof(int),
64145 + .mode = 0600,
64146 + .proc_handler = &proc_dointvec,
64147 + },
64148 +#endif
64149 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64150 + {
64151 + .ctl_name = CTL_UNNUMBERED,
64152 + .procname = "socket_all",
64153 + .data = &grsec_enable_socket_all,
64154 + .maxlen = sizeof(int),
64155 + .mode = 0600,
64156 + .proc_handler = &proc_dointvec,
64157 + },
64158 + {
64159 + .ctl_name = CTL_UNNUMBERED,
64160 + .procname = "socket_all_gid",
64161 + .data = &grsec_socket_all_gid,
64162 + .maxlen = sizeof(int),
64163 + .mode = 0600,
64164 + .proc_handler = &proc_dointvec,
64165 + },
64166 +#endif
64167 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64168 + {
64169 + .ctl_name = CTL_UNNUMBERED,
64170 + .procname = "socket_client",
64171 + .data = &grsec_enable_socket_client,
64172 + .maxlen = sizeof(int),
64173 + .mode = 0600,
64174 + .proc_handler = &proc_dointvec,
64175 + },
64176 + {
64177 + .ctl_name = CTL_UNNUMBERED,
64178 + .procname = "socket_client_gid",
64179 + .data = &grsec_socket_client_gid,
64180 + .maxlen = sizeof(int),
64181 + .mode = 0600,
64182 + .proc_handler = &proc_dointvec,
64183 + },
64184 +#endif
64185 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64186 + {
64187 + .ctl_name = CTL_UNNUMBERED,
64188 + .procname = "socket_server",
64189 + .data = &grsec_enable_socket_server,
64190 + .maxlen = sizeof(int),
64191 + .mode = 0600,
64192 + .proc_handler = &proc_dointvec,
64193 + },
64194 + {
64195 + .ctl_name = CTL_UNNUMBERED,
64196 + .procname = "socket_server_gid",
64197 + .data = &grsec_socket_server_gid,
64198 + .maxlen = sizeof(int),
64199 + .mode = 0600,
64200 + .proc_handler = &proc_dointvec,
64201 + },
64202 +#endif
64203 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64204 + {
64205 + .ctl_name = CTL_UNNUMBERED,
64206 + .procname = "audit_group",
64207 + .data = &grsec_enable_group,
64208 + .maxlen = sizeof(int),
64209 + .mode = 0600,
64210 + .proc_handler = &proc_dointvec,
64211 + },
64212 + {
64213 + .ctl_name = CTL_UNNUMBERED,
64214 + .procname = "audit_gid",
64215 + .data = &grsec_audit_gid,
64216 + .maxlen = sizeof(int),
64217 + .mode = 0600,
64218 + .proc_handler = &proc_dointvec,
64219 + },
64220 +#endif
64221 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64222 + {
64223 + .ctl_name = CTL_UNNUMBERED,
64224 + .procname = "audit_chdir",
64225 + .data = &grsec_enable_chdir,
64226 + .maxlen = sizeof(int),
64227 + .mode = 0600,
64228 + .proc_handler = &proc_dointvec,
64229 + },
64230 +#endif
64231 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64232 + {
64233 + .ctl_name = CTL_UNNUMBERED,
64234 + .procname = "audit_mount",
64235 + .data = &grsec_enable_mount,
64236 + .maxlen = sizeof(int),
64237 + .mode = 0600,
64238 + .proc_handler = &proc_dointvec,
64239 + },
64240 +#endif
64241 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64242 + {
64243 + .ctl_name = CTL_UNNUMBERED,
64244 + .procname = "audit_textrel",
64245 + .data = &grsec_enable_audit_textrel,
64246 + .maxlen = sizeof(int),
64247 + .mode = 0600,
64248 + .proc_handler = &proc_dointvec,
64249 + },
64250 +#endif
64251 +#ifdef CONFIG_GRKERNSEC_DMESG
64252 + {
64253 + .ctl_name = CTL_UNNUMBERED,
64254 + .procname = "dmesg",
64255 + .data = &grsec_enable_dmesg,
64256 + .maxlen = sizeof(int),
64257 + .mode = 0600,
64258 + .proc_handler = &proc_dointvec,
64259 + },
64260 +#endif
64261 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64262 + {
64263 + .ctl_name = CTL_UNNUMBERED,
64264 + .procname = "chroot_findtask",
64265 + .data = &grsec_enable_chroot_findtask,
64266 + .maxlen = sizeof(int),
64267 + .mode = 0600,
64268 + .proc_handler = &proc_dointvec,
64269 + },
64270 +#endif
64271 +#ifdef CONFIG_GRKERNSEC_RESLOG
64272 + {
64273 + .ctl_name = CTL_UNNUMBERED,
64274 + .procname = "resource_logging",
64275 + .data = &grsec_resource_logging,
64276 + .maxlen = sizeof(int),
64277 + .mode = 0600,
64278 + .proc_handler = &proc_dointvec,
64279 + },
64280 +#endif
64281 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64282 + {
64283 + .ctl_name = CTL_UNNUMBERED,
64284 + .procname = "audit_ptrace",
64285 + .data = &grsec_enable_audit_ptrace,
64286 + .maxlen = sizeof(int),
64287 + .mode = 0600,
64288 + .proc_handler = &proc_dointvec,
64289 + },
64290 +#endif
64291 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64292 + {
64293 + .ctl_name = CTL_UNNUMBERED,
64294 + .procname = "harden_ptrace",
64295 + .data = &grsec_enable_harden_ptrace,
64296 + .maxlen = sizeof(int),
64297 + .mode = 0600,
64298 + .proc_handler = &proc_dointvec,
64299 + },
64300 +#endif
64301 + {
64302 + .ctl_name = CTL_UNNUMBERED,
64303 + .procname = "grsec_lock",
64304 + .data = &grsec_lock,
64305 + .maxlen = sizeof(int),
64306 + .mode = 0600,
64307 + .proc_handler = &proc_dointvec,
64308 + },
64309 +#endif
64310 +#ifdef CONFIG_GRKERNSEC_ROFS
64311 + {
64312 + .ctl_name = CTL_UNNUMBERED,
64313 + .procname = "romount_protect",
64314 + .data = &grsec_enable_rofs,
64315 + .maxlen = sizeof(int),
64316 + .mode = 0600,
64317 + .proc_handler = &proc_dointvec_minmax,
64318 + .extra1 = &one,
64319 + .extra2 = &one,
64320 + },
64321 +#endif
64322 + { .ctl_name = 0 }
64323 +};
64324 +#endif
64325 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
64326 new file mode 100644
64327 index 0000000..0dc13c3
64328 --- /dev/null
64329 +++ b/grsecurity/grsec_time.c
64330 @@ -0,0 +1,16 @@
64331 +#include <linux/kernel.h>
64332 +#include <linux/sched.h>
64333 +#include <linux/grinternal.h>
64334 +#include <linux/module.h>
64335 +
64336 +void
64337 +gr_log_timechange(void)
64338 +{
64339 +#ifdef CONFIG_GRKERNSEC_TIME
64340 + if (grsec_enable_time)
64341 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
64342 +#endif
64343 + return;
64344 +}
64345 +
64346 +EXPORT_SYMBOL(gr_log_timechange);
64347 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
64348 new file mode 100644
64349 index 0000000..4a78774
64350 --- /dev/null
64351 +++ b/grsecurity/grsec_tpe.c
64352 @@ -0,0 +1,39 @@
64353 +#include <linux/kernel.h>
64354 +#include <linux/sched.h>
64355 +#include <linux/file.h>
64356 +#include <linux/fs.h>
64357 +#include <linux/grinternal.h>
64358 +
64359 +extern int gr_acl_tpe_check(void);
64360 +
64361 +int
64362 +gr_tpe_allow(const struct file *file)
64363 +{
64364 +#ifdef CONFIG_GRKERNSEC
64365 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
64366 + const struct cred *cred = current_cred();
64367 +
64368 + if (cred->uid && ((grsec_enable_tpe &&
64369 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64370 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
64371 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
64372 +#else
64373 + in_group_p(grsec_tpe_gid)
64374 +#endif
64375 + ) || gr_acl_tpe_check()) &&
64376 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
64377 + (inode->i_mode & S_IWOTH))))) {
64378 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
64379 + return 0;
64380 + }
64381 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
64382 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
64383 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
64384 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
64385 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
64386 + return 0;
64387 + }
64388 +#endif
64389 +#endif
64390 + return 1;
64391 +}
64392 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
64393 new file mode 100644
64394 index 0000000..9f7b1ac
64395 --- /dev/null
64396 +++ b/grsecurity/grsum.c
64397 @@ -0,0 +1,61 @@
64398 +#include <linux/err.h>
64399 +#include <linux/kernel.h>
64400 +#include <linux/sched.h>
64401 +#include <linux/mm.h>
64402 +#include <linux/scatterlist.h>
64403 +#include <linux/crypto.h>
64404 +#include <linux/gracl.h>
64405 +
64406 +
64407 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
64408 +#error "crypto and sha256 must be built into the kernel"
64409 +#endif
64410 +
64411 +int
64412 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
64413 +{
64414 + char *p;
64415 + struct crypto_hash *tfm;
64416 + struct hash_desc desc;
64417 + struct scatterlist sg;
64418 + unsigned char temp_sum[GR_SHA_LEN];
64419 + volatile int retval = 0;
64420 + volatile int dummy = 0;
64421 + unsigned int i;
64422 +
64423 + sg_init_table(&sg, 1);
64424 +
64425 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
64426 + if (IS_ERR(tfm)) {
64427 + /* should never happen, since sha256 should be built in */
64428 + return 1;
64429 + }
64430 +
64431 + desc.tfm = tfm;
64432 + desc.flags = 0;
64433 +
64434 + crypto_hash_init(&desc);
64435 +
64436 + p = salt;
64437 + sg_set_buf(&sg, p, GR_SALT_LEN);
64438 + crypto_hash_update(&desc, &sg, sg.length);
64439 +
64440 + p = entry->pw;
64441 + sg_set_buf(&sg, p, strlen(p));
64442 +
64443 + crypto_hash_update(&desc, &sg, sg.length);
64444 +
64445 + crypto_hash_final(&desc, temp_sum);
64446 +
64447 + memset(entry->pw, 0, GR_PW_LEN);
64448 +
64449 + for (i = 0; i < GR_SHA_LEN; i++)
64450 + if (sum[i] != temp_sum[i])
64451 + retval = 1;
64452 + else
64453 + dummy = 1; // waste a cycle
64454 +
64455 + crypto_free_hash(tfm);
64456 +
64457 + return retval;
64458 +}
64459 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
64460 index 3cd9ccd..fe16d47 100644
64461 --- a/include/acpi/acpi_bus.h
64462 +++ b/include/acpi/acpi_bus.h
64463 @@ -107,7 +107,7 @@ struct acpi_device_ops {
64464 acpi_op_bind bind;
64465 acpi_op_unbind unbind;
64466 acpi_op_notify notify;
64467 -};
64468 +} __no_const;
64469
64470 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
64471
64472 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
64473 index f4906f6..71feb73 100644
64474 --- a/include/acpi/acpi_drivers.h
64475 +++ b/include/acpi/acpi_drivers.h
64476 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
64477 Dock Station
64478 -------------------------------------------------------------------------- */
64479 struct acpi_dock_ops {
64480 - acpi_notify_handler handler;
64481 - acpi_notify_handler uevent;
64482 + const acpi_notify_handler handler;
64483 + const acpi_notify_handler uevent;
64484 };
64485
64486 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
64487 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
64488 extern int register_dock_notifier(struct notifier_block *nb);
64489 extern void unregister_dock_notifier(struct notifier_block *nb);
64490 extern int register_hotplug_dock_device(acpi_handle handle,
64491 - struct acpi_dock_ops *ops,
64492 + const struct acpi_dock_ops *ops,
64493 void *context);
64494 extern void unregister_hotplug_dock_device(acpi_handle handle);
64495 #else
64496 @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
64497 {
64498 }
64499 static inline int register_hotplug_dock_device(acpi_handle handle,
64500 - struct acpi_dock_ops *ops,
64501 + const struct acpi_dock_ops *ops,
64502 void *context)
64503 {
64504 return -ENODEV;
64505 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
64506 index b7babf0..a9ac9fc 100644
64507 --- a/include/asm-generic/atomic-long.h
64508 +++ b/include/asm-generic/atomic-long.h
64509 @@ -22,6 +22,12 @@
64510
64511 typedef atomic64_t atomic_long_t;
64512
64513 +#ifdef CONFIG_PAX_REFCOUNT
64514 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
64515 +#else
64516 +typedef atomic64_t atomic_long_unchecked_t;
64517 +#endif
64518 +
64519 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
64520
64521 static inline long atomic_long_read(atomic_long_t *l)
64522 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64523 return (long)atomic64_read(v);
64524 }
64525
64526 +#ifdef CONFIG_PAX_REFCOUNT
64527 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64528 +{
64529 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64530 +
64531 + return (long)atomic64_read_unchecked(v);
64532 +}
64533 +#endif
64534 +
64535 static inline void atomic_long_set(atomic_long_t *l, long i)
64536 {
64537 atomic64_t *v = (atomic64_t *)l;
64538 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64539 atomic64_set(v, i);
64540 }
64541
64542 +#ifdef CONFIG_PAX_REFCOUNT
64543 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64544 +{
64545 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64546 +
64547 + atomic64_set_unchecked(v, i);
64548 +}
64549 +#endif
64550 +
64551 static inline void atomic_long_inc(atomic_long_t *l)
64552 {
64553 atomic64_t *v = (atomic64_t *)l;
64554 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64555 atomic64_inc(v);
64556 }
64557
64558 +#ifdef CONFIG_PAX_REFCOUNT
64559 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64560 +{
64561 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64562 +
64563 + atomic64_inc_unchecked(v);
64564 +}
64565 +#endif
64566 +
64567 static inline void atomic_long_dec(atomic_long_t *l)
64568 {
64569 atomic64_t *v = (atomic64_t *)l;
64570 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64571 atomic64_dec(v);
64572 }
64573
64574 +#ifdef CONFIG_PAX_REFCOUNT
64575 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64576 +{
64577 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64578 +
64579 + atomic64_dec_unchecked(v);
64580 +}
64581 +#endif
64582 +
64583 static inline void atomic_long_add(long i, atomic_long_t *l)
64584 {
64585 atomic64_t *v = (atomic64_t *)l;
64586 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64587 atomic64_add(i, v);
64588 }
64589
64590 +#ifdef CONFIG_PAX_REFCOUNT
64591 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64592 +{
64593 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64594 +
64595 + atomic64_add_unchecked(i, v);
64596 +}
64597 +#endif
64598 +
64599 static inline void atomic_long_sub(long i, atomic_long_t *l)
64600 {
64601 atomic64_t *v = (atomic64_t *)l;
64602 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
64603 return (long)atomic64_inc_return(v);
64604 }
64605
64606 +#ifdef CONFIG_PAX_REFCOUNT
64607 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
64608 +{
64609 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64610 +
64611 + return (long)atomic64_inc_return_unchecked(v);
64612 +}
64613 +#endif
64614 +
64615 static inline long atomic_long_dec_return(atomic_long_t *l)
64616 {
64617 atomic64_t *v = (atomic64_t *)l;
64618 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
64619
64620 typedef atomic_t atomic_long_t;
64621
64622 +#ifdef CONFIG_PAX_REFCOUNT
64623 +typedef atomic_unchecked_t atomic_long_unchecked_t;
64624 +#else
64625 +typedef atomic_t atomic_long_unchecked_t;
64626 +#endif
64627 +
64628 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
64629 static inline long atomic_long_read(atomic_long_t *l)
64630 {
64631 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64632 return (long)atomic_read(v);
64633 }
64634
64635 +#ifdef CONFIG_PAX_REFCOUNT
64636 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64637 +{
64638 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64639 +
64640 + return (long)atomic_read_unchecked(v);
64641 +}
64642 +#endif
64643 +
64644 static inline void atomic_long_set(atomic_long_t *l, long i)
64645 {
64646 atomic_t *v = (atomic_t *)l;
64647 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64648 atomic_set(v, i);
64649 }
64650
64651 +#ifdef CONFIG_PAX_REFCOUNT
64652 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64653 +{
64654 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64655 +
64656 + atomic_set_unchecked(v, i);
64657 +}
64658 +#endif
64659 +
64660 static inline void atomic_long_inc(atomic_long_t *l)
64661 {
64662 atomic_t *v = (atomic_t *)l;
64663 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64664 atomic_inc(v);
64665 }
64666
64667 +#ifdef CONFIG_PAX_REFCOUNT
64668 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64669 +{
64670 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64671 +
64672 + atomic_inc_unchecked(v);
64673 +}
64674 +#endif
64675 +
64676 static inline void atomic_long_dec(atomic_long_t *l)
64677 {
64678 atomic_t *v = (atomic_t *)l;
64679 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64680 atomic_dec(v);
64681 }
64682
64683 +#ifdef CONFIG_PAX_REFCOUNT
64684 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64685 +{
64686 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64687 +
64688 + atomic_dec_unchecked(v);
64689 +}
64690 +#endif
64691 +
64692 static inline void atomic_long_add(long i, atomic_long_t *l)
64693 {
64694 atomic_t *v = (atomic_t *)l;
64695 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64696 atomic_add(i, v);
64697 }
64698
64699 +#ifdef CONFIG_PAX_REFCOUNT
64700 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64701 +{
64702 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64703 +
64704 + atomic_add_unchecked(i, v);
64705 +}
64706 +#endif
64707 +
64708 static inline void atomic_long_sub(long i, atomic_long_t *l)
64709 {
64710 atomic_t *v = (atomic_t *)l;
64711 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
64712 return (long)atomic_inc_return(v);
64713 }
64714
64715 +#ifdef CONFIG_PAX_REFCOUNT
64716 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
64717 +{
64718 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64719 +
64720 + return (long)atomic_inc_return_unchecked(v);
64721 +}
64722 +#endif
64723 +
64724 static inline long atomic_long_dec_return(atomic_long_t *l)
64725 {
64726 atomic_t *v = (atomic_t *)l;
64727 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
64728
64729 #endif /* BITS_PER_LONG == 64 */
64730
64731 +#ifdef CONFIG_PAX_REFCOUNT
64732 +static inline void pax_refcount_needs_these_functions(void)
64733 +{
64734 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
64735 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
64736 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
64737 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
64738 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
64739 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
64740 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
64741 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
64742 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
64743 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
64744 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
64745 +
64746 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
64747 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
64748 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
64749 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
64750 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
64751 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
64752 +}
64753 +#else
64754 +#define atomic_read_unchecked(v) atomic_read(v)
64755 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
64756 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
64757 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
64758 +#define atomic_inc_unchecked(v) atomic_inc(v)
64759 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
64760 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
64761 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
64762 +#define atomic_dec_unchecked(v) atomic_dec(v)
64763 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
64764 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
64765 +
64766 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
64767 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
64768 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
64769 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
64770 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
64771 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
64772 +#endif
64773 +
64774 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
64775 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
64776 index b18ce4f..2ee2843 100644
64777 --- a/include/asm-generic/atomic64.h
64778 +++ b/include/asm-generic/atomic64.h
64779 @@ -16,6 +16,8 @@ typedef struct {
64780 long long counter;
64781 } atomic64_t;
64782
64783 +typedef atomic64_t atomic64_unchecked_t;
64784 +
64785 #define ATOMIC64_INIT(i) { (i) }
64786
64787 extern long long atomic64_read(const atomic64_t *v);
64788 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
64789 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
64790 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
64791
64792 +#define atomic64_read_unchecked(v) atomic64_read(v)
64793 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
64794 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
64795 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
64796 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
64797 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
64798 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
64799 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
64800 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
64801 +
64802 #endif /* _ASM_GENERIC_ATOMIC64_H */
64803 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
64804 index d48ddf0..656a0ac 100644
64805 --- a/include/asm-generic/bug.h
64806 +++ b/include/asm-generic/bug.h
64807 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
64808
64809 #else /* !CONFIG_BUG */
64810 #ifndef HAVE_ARCH_BUG
64811 -#define BUG() do {} while(0)
64812 +#define BUG() do { for (;;) ; } while(0)
64813 #endif
64814
64815 #ifndef HAVE_ARCH_BUG_ON
64816 -#define BUG_ON(condition) do { if (condition) ; } while(0)
64817 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
64818 #endif
64819
64820 #ifndef HAVE_ARCH_WARN_ON
64821 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
64822 index 1bfcfe5..e04c5c9 100644
64823 --- a/include/asm-generic/cache.h
64824 +++ b/include/asm-generic/cache.h
64825 @@ -6,7 +6,7 @@
64826 * cache lines need to provide their own cache.h.
64827 */
64828
64829 -#define L1_CACHE_SHIFT 5
64830 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
64831 +#define L1_CACHE_SHIFT 5UL
64832 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
64833
64834 #endif /* __ASM_GENERIC_CACHE_H */
64835 diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
64836 index 6920695..41038bc 100644
64837 --- a/include/asm-generic/dma-mapping-common.h
64838 +++ b/include/asm-generic/dma-mapping-common.h
64839 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
64840 enum dma_data_direction dir,
64841 struct dma_attrs *attrs)
64842 {
64843 - struct dma_map_ops *ops = get_dma_ops(dev);
64844 + const struct dma_map_ops *ops = get_dma_ops(dev);
64845 dma_addr_t addr;
64846
64847 kmemcheck_mark_initialized(ptr, size);
64848 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
64849 enum dma_data_direction dir,
64850 struct dma_attrs *attrs)
64851 {
64852 - struct dma_map_ops *ops = get_dma_ops(dev);
64853 + const struct dma_map_ops *ops = get_dma_ops(dev);
64854
64855 BUG_ON(!valid_dma_direction(dir));
64856 if (ops->unmap_page)
64857 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
64858 int nents, enum dma_data_direction dir,
64859 struct dma_attrs *attrs)
64860 {
64861 - struct dma_map_ops *ops = get_dma_ops(dev);
64862 + const struct dma_map_ops *ops = get_dma_ops(dev);
64863 int i, ents;
64864 struct scatterlist *s;
64865
64866 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
64867 int nents, enum dma_data_direction dir,
64868 struct dma_attrs *attrs)
64869 {
64870 - struct dma_map_ops *ops = get_dma_ops(dev);
64871 + const struct dma_map_ops *ops = get_dma_ops(dev);
64872
64873 BUG_ON(!valid_dma_direction(dir));
64874 debug_dma_unmap_sg(dev, sg, nents, dir);
64875 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64876 size_t offset, size_t size,
64877 enum dma_data_direction dir)
64878 {
64879 - struct dma_map_ops *ops = get_dma_ops(dev);
64880 + const struct dma_map_ops *ops = get_dma_ops(dev);
64881 dma_addr_t addr;
64882
64883 kmemcheck_mark_initialized(page_address(page) + offset, size);
64884 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64885 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
64886 size_t size, enum dma_data_direction dir)
64887 {
64888 - struct dma_map_ops *ops = get_dma_ops(dev);
64889 + const struct dma_map_ops *ops = get_dma_ops(dev);
64890
64891 BUG_ON(!valid_dma_direction(dir));
64892 if (ops->unmap_page)
64893 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
64894 size_t size,
64895 enum dma_data_direction dir)
64896 {
64897 - struct dma_map_ops *ops = get_dma_ops(dev);
64898 + const struct dma_map_ops *ops = get_dma_ops(dev);
64899
64900 BUG_ON(!valid_dma_direction(dir));
64901 if (ops->sync_single_for_cpu)
64902 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
64903 dma_addr_t addr, size_t size,
64904 enum dma_data_direction dir)
64905 {
64906 - struct dma_map_ops *ops = get_dma_ops(dev);
64907 + const struct dma_map_ops *ops = get_dma_ops(dev);
64908
64909 BUG_ON(!valid_dma_direction(dir));
64910 if (ops->sync_single_for_device)
64911 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
64912 size_t size,
64913 enum dma_data_direction dir)
64914 {
64915 - struct dma_map_ops *ops = get_dma_ops(dev);
64916 + const struct dma_map_ops *ops = get_dma_ops(dev);
64917
64918 BUG_ON(!valid_dma_direction(dir));
64919 if (ops->sync_single_range_for_cpu) {
64920 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
64921 size_t size,
64922 enum dma_data_direction dir)
64923 {
64924 - struct dma_map_ops *ops = get_dma_ops(dev);
64925 + const struct dma_map_ops *ops = get_dma_ops(dev);
64926
64927 BUG_ON(!valid_dma_direction(dir));
64928 if (ops->sync_single_range_for_device) {
64929 @@ -155,7 +155,7 @@ static inline void
64930 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
64931 int nelems, enum dma_data_direction dir)
64932 {
64933 - struct dma_map_ops *ops = get_dma_ops(dev);
64934 + const struct dma_map_ops *ops = get_dma_ops(dev);
64935
64936 BUG_ON(!valid_dma_direction(dir));
64937 if (ops->sync_sg_for_cpu)
64938 @@ -167,7 +167,7 @@ static inline void
64939 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
64940 int nelems, enum dma_data_direction dir)
64941 {
64942 - struct dma_map_ops *ops = get_dma_ops(dev);
64943 + const struct dma_map_ops *ops = get_dma_ops(dev);
64944
64945 BUG_ON(!valid_dma_direction(dir));
64946 if (ops->sync_sg_for_device)
64947 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
64948 index 0d68a1e..b74a761 100644
64949 --- a/include/asm-generic/emergency-restart.h
64950 +++ b/include/asm-generic/emergency-restart.h
64951 @@ -1,7 +1,7 @@
64952 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
64953 #define _ASM_GENERIC_EMERGENCY_RESTART_H
64954
64955 -static inline void machine_emergency_restart(void)
64956 +static inline __noreturn void machine_emergency_restart(void)
64957 {
64958 machine_restart(NULL);
64959 }
64960 diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
64961 index 3c2344f..4590a7d 100644
64962 --- a/include/asm-generic/futex.h
64963 +++ b/include/asm-generic/futex.h
64964 @@ -6,7 +6,7 @@
64965 #include <asm/errno.h>
64966
64967 static inline int
64968 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
64969 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
64970 {
64971 int op = (encoded_op >> 28) & 7;
64972 int cmp = (encoded_op >> 24) & 15;
64973 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
64974 }
64975
64976 static inline int
64977 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
64978 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
64979 {
64980 return -ENOSYS;
64981 }
64982 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
64983 index 1ca3efc..e3dc852 100644
64984 --- a/include/asm-generic/int-l64.h
64985 +++ b/include/asm-generic/int-l64.h
64986 @@ -46,6 +46,8 @@ typedef unsigned int u32;
64987 typedef signed long s64;
64988 typedef unsigned long u64;
64989
64990 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
64991 +
64992 #define S8_C(x) x
64993 #define U8_C(x) x ## U
64994 #define S16_C(x) x
64995 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
64996 index f394147..b6152b9 100644
64997 --- a/include/asm-generic/int-ll64.h
64998 +++ b/include/asm-generic/int-ll64.h
64999 @@ -51,6 +51,8 @@ typedef unsigned int u32;
65000 typedef signed long long s64;
65001 typedef unsigned long long u64;
65002
65003 +typedef unsigned long long intoverflow_t;
65004 +
65005 #define S8_C(x) x
65006 #define U8_C(x) x ## U
65007 #define S16_C(x) x
65008 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65009 index e5f234a..cdb16b3 100644
65010 --- a/include/asm-generic/kmap_types.h
65011 +++ b/include/asm-generic/kmap_types.h
65012 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
65013 KMAP_D(16) KM_IRQ_PTE,
65014 KMAP_D(17) KM_NMI,
65015 KMAP_D(18) KM_NMI_PTE,
65016 -KMAP_D(19) KM_TYPE_NR
65017 +KMAP_D(19) KM_CLEARPAGE,
65018 +KMAP_D(20) KM_TYPE_NR
65019 };
65020
65021 #undef KMAP_D
65022 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
65023 index 725612b..9cc513a 100644
65024 --- a/include/asm-generic/pgtable-nopmd.h
65025 +++ b/include/asm-generic/pgtable-nopmd.h
65026 @@ -1,14 +1,19 @@
65027 #ifndef _PGTABLE_NOPMD_H
65028 #define _PGTABLE_NOPMD_H
65029
65030 -#ifndef __ASSEMBLY__
65031 -
65032 #include <asm-generic/pgtable-nopud.h>
65033
65034 -struct mm_struct;
65035 -
65036 #define __PAGETABLE_PMD_FOLDED
65037
65038 +#define PMD_SHIFT PUD_SHIFT
65039 +#define PTRS_PER_PMD 1
65040 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
65041 +#define PMD_MASK (~(PMD_SIZE-1))
65042 +
65043 +#ifndef __ASSEMBLY__
65044 +
65045 +struct mm_struct;
65046 +
65047 /*
65048 * Having the pmd type consist of a pud gets the size right, and allows
65049 * us to conceptually access the pud entry that this pmd is folded into
65050 @@ -16,11 +21,6 @@ struct mm_struct;
65051 */
65052 typedef struct { pud_t pud; } pmd_t;
65053
65054 -#define PMD_SHIFT PUD_SHIFT
65055 -#define PTRS_PER_PMD 1
65056 -#define PMD_SIZE (1UL << PMD_SHIFT)
65057 -#define PMD_MASK (~(PMD_SIZE-1))
65058 -
65059 /*
65060 * The "pud_xxx()" functions here are trivial for a folded two-level
65061 * setup: the pmd is never bad, and a pmd always exists (as it's folded
65062 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
65063 index 810431d..ccc3638 100644
65064 --- a/include/asm-generic/pgtable-nopud.h
65065 +++ b/include/asm-generic/pgtable-nopud.h
65066 @@ -1,10 +1,15 @@
65067 #ifndef _PGTABLE_NOPUD_H
65068 #define _PGTABLE_NOPUD_H
65069
65070 -#ifndef __ASSEMBLY__
65071 -
65072 #define __PAGETABLE_PUD_FOLDED
65073
65074 +#define PUD_SHIFT PGDIR_SHIFT
65075 +#define PTRS_PER_PUD 1
65076 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
65077 +#define PUD_MASK (~(PUD_SIZE-1))
65078 +
65079 +#ifndef __ASSEMBLY__
65080 +
65081 /*
65082 * Having the pud type consist of a pgd gets the size right, and allows
65083 * us to conceptually access the pgd entry that this pud is folded into
65084 @@ -12,11 +17,6 @@
65085 */
65086 typedef struct { pgd_t pgd; } pud_t;
65087
65088 -#define PUD_SHIFT PGDIR_SHIFT
65089 -#define PTRS_PER_PUD 1
65090 -#define PUD_SIZE (1UL << PUD_SHIFT)
65091 -#define PUD_MASK (~(PUD_SIZE-1))
65092 -
65093 /*
65094 * The "pgd_xxx()" functions here are trivial for a folded two-level
65095 * setup: the pud is never bad, and a pud always exists (as it's folded
65096 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
65097 index e2bd73e..fea8ed3 100644
65098 --- a/include/asm-generic/pgtable.h
65099 +++ b/include/asm-generic/pgtable.h
65100 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
65101 unsigned long size);
65102 #endif
65103
65104 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
65105 +static inline unsigned long pax_open_kernel(void) { return 0; }
65106 +#endif
65107 +
65108 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
65109 +static inline unsigned long pax_close_kernel(void) { return 0; }
65110 +#endif
65111 +
65112 #endif /* !__ASSEMBLY__ */
65113
65114 #endif /* _ASM_GENERIC_PGTABLE_H */
65115 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
65116 index b6e818f..21aa58a 100644
65117 --- a/include/asm-generic/vmlinux.lds.h
65118 +++ b/include/asm-generic/vmlinux.lds.h
65119 @@ -199,6 +199,7 @@
65120 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
65121 VMLINUX_SYMBOL(__start_rodata) = .; \
65122 *(.rodata) *(.rodata.*) \
65123 + *(.data.read_only) \
65124 *(__vermagic) /* Kernel version magic */ \
65125 *(__markers_strings) /* Markers: strings */ \
65126 *(__tracepoints_strings)/* Tracepoints: strings */ \
65127 @@ -656,22 +657,24 @@
65128 * section in the linker script will go there too. @phdr should have
65129 * a leading colon.
65130 *
65131 - * Note that this macros defines __per_cpu_load as an absolute symbol.
65132 + * Note that this macros defines per_cpu_load as an absolute symbol.
65133 * If there is no need to put the percpu section at a predetermined
65134 * address, use PERCPU().
65135 */
65136 #define PERCPU_VADDR(vaddr, phdr) \
65137 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
65138 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
65139 + per_cpu_load = .; \
65140 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
65141 - LOAD_OFFSET) { \
65142 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
65143 VMLINUX_SYMBOL(__per_cpu_start) = .; \
65144 *(.data.percpu.first) \
65145 - *(.data.percpu.page_aligned) \
65146 *(.data.percpu) \
65147 + . = ALIGN(PAGE_SIZE); \
65148 + *(.data.percpu.page_aligned) \
65149 *(.data.percpu.shared_aligned) \
65150 VMLINUX_SYMBOL(__per_cpu_end) = .; \
65151 } phdr \
65152 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
65153 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
65154
65155 /**
65156 * PERCPU - define output section for percpu area, simple version
65157 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
65158 index 66713c6..98c0460 100644
65159 --- a/include/drm/drmP.h
65160 +++ b/include/drm/drmP.h
65161 @@ -71,6 +71,7 @@
65162 #include <linux/workqueue.h>
65163 #include <linux/poll.h>
65164 #include <asm/pgalloc.h>
65165 +#include <asm/local.h>
65166 #include "drm.h"
65167
65168 #include <linux/idr.h>
65169 @@ -814,7 +815,7 @@ struct drm_driver {
65170 void (*vgaarb_irq)(struct drm_device *dev, bool state);
65171
65172 /* Driver private ops for this object */
65173 - struct vm_operations_struct *gem_vm_ops;
65174 + const struct vm_operations_struct *gem_vm_ops;
65175
65176 int major;
65177 int minor;
65178 @@ -917,7 +918,7 @@ struct drm_device {
65179
65180 /** \name Usage Counters */
65181 /*@{ */
65182 - int open_count; /**< Outstanding files open */
65183 + local_t open_count; /**< Outstanding files open */
65184 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
65185 atomic_t vma_count; /**< Outstanding vma areas open */
65186 int buf_use; /**< Buffers in use -- cannot alloc */
65187 @@ -928,7 +929,7 @@ struct drm_device {
65188 /*@{ */
65189 unsigned long counters;
65190 enum drm_stat_type types[15];
65191 - atomic_t counts[15];
65192 + atomic_unchecked_t counts[15];
65193 /*@} */
65194
65195 struct list_head filelist;
65196 @@ -1016,7 +1017,7 @@ struct drm_device {
65197 struct pci_controller *hose;
65198 #endif
65199 struct drm_sg_mem *sg; /**< Scatter gather memory */
65200 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
65201 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
65202 void *dev_private; /**< device private data */
65203 void *mm_private;
65204 struct address_space *dev_mapping;
65205 @@ -1042,11 +1043,11 @@ struct drm_device {
65206 spinlock_t object_name_lock;
65207 struct idr object_name_idr;
65208 atomic_t object_count;
65209 - atomic_t object_memory;
65210 + atomic_unchecked_t object_memory;
65211 atomic_t pin_count;
65212 - atomic_t pin_memory;
65213 + atomic_unchecked_t pin_memory;
65214 atomic_t gtt_count;
65215 - atomic_t gtt_memory;
65216 + atomic_unchecked_t gtt_memory;
65217 uint32_t gtt_total;
65218 uint32_t invalidate_domains; /* domains pending invalidation */
65219 uint32_t flush_domains; /* domains pending flush */
65220 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
65221 index b29e201..3413cc9 100644
65222 --- a/include/drm/drm_crtc_helper.h
65223 +++ b/include/drm/drm_crtc_helper.h
65224 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
65225
65226 /* reload the current crtc LUT */
65227 void (*load_lut)(struct drm_crtc *crtc);
65228 -};
65229 +} __no_const;
65230
65231 struct drm_encoder_helper_funcs {
65232 void (*dpms)(struct drm_encoder *encoder, int mode);
65233 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
65234 struct drm_connector *connector);
65235 /* disable encoder when not in use - more explicit than dpms off */
65236 void (*disable)(struct drm_encoder *encoder);
65237 -};
65238 +} __no_const;
65239
65240 struct drm_connector_helper_funcs {
65241 int (*get_modes)(struct drm_connector *connector);
65242 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
65243 index b199170..6f9e64c 100644
65244 --- a/include/drm/ttm/ttm_memory.h
65245 +++ b/include/drm/ttm/ttm_memory.h
65246 @@ -47,7 +47,7 @@
65247
65248 struct ttm_mem_shrink {
65249 int (*do_shrink) (struct ttm_mem_shrink *);
65250 -};
65251 +} __no_const;
65252
65253 /**
65254 * struct ttm_mem_global - Global memory accounting structure.
65255 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
65256 index e86dfca..40cc55f 100644
65257 --- a/include/linux/a.out.h
65258 +++ b/include/linux/a.out.h
65259 @@ -39,6 +39,14 @@ enum machine_type {
65260 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
65261 };
65262
65263 +/* Constants for the N_FLAGS field */
65264 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65265 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
65266 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
65267 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
65268 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65269 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65270 +
65271 #if !defined (N_MAGIC)
65272 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
65273 #endif
65274 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
65275 index 817b237..62c10bc 100644
65276 --- a/include/linux/atmdev.h
65277 +++ b/include/linux/atmdev.h
65278 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
65279 #endif
65280
65281 struct k_atm_aal_stats {
65282 -#define __HANDLE_ITEM(i) atomic_t i
65283 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
65284 __AAL_STAT_ITEMS
65285 #undef __HANDLE_ITEM
65286 };
65287 diff --git a/include/linux/backlight.h b/include/linux/backlight.h
65288 index 0f5f578..8c4f884 100644
65289 --- a/include/linux/backlight.h
65290 +++ b/include/linux/backlight.h
65291 @@ -36,18 +36,18 @@ struct backlight_device;
65292 struct fb_info;
65293
65294 struct backlight_ops {
65295 - unsigned int options;
65296 + const unsigned int options;
65297
65298 #define BL_CORE_SUSPENDRESUME (1 << 0)
65299
65300 /* Notify the backlight driver some property has changed */
65301 - int (*update_status)(struct backlight_device *);
65302 + int (* const update_status)(struct backlight_device *);
65303 /* Return the current backlight brightness (accounting for power,
65304 fb_blank etc.) */
65305 - int (*get_brightness)(struct backlight_device *);
65306 + int (* const get_brightness)(struct backlight_device *);
65307 /* Check if given framebuffer device is the one bound to this backlight;
65308 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
65309 - int (*check_fb)(struct fb_info *);
65310 + int (* const check_fb)(struct fb_info *);
65311 };
65312
65313 /* This structure defines all the properties of a backlight */
65314 @@ -86,7 +86,7 @@ struct backlight_device {
65315 registered this device has been unloaded, and if class_get_devdata()
65316 points to something in the body of that driver, it is also invalid. */
65317 struct mutex ops_lock;
65318 - struct backlight_ops *ops;
65319 + const struct backlight_ops *ops;
65320
65321 /* The framebuffer notifier block */
65322 struct notifier_block fb_notif;
65323 @@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
65324 }
65325
65326 extern struct backlight_device *backlight_device_register(const char *name,
65327 - struct device *dev, void *devdata, struct backlight_ops *ops);
65328 + struct device *dev, void *devdata, const struct backlight_ops *ops);
65329 extern void backlight_device_unregister(struct backlight_device *bd);
65330 extern void backlight_force_update(struct backlight_device *bd,
65331 enum backlight_update_reason reason);
65332 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
65333 index a3d802e..482f69c 100644
65334 --- a/include/linux/binfmts.h
65335 +++ b/include/linux/binfmts.h
65336 @@ -83,6 +83,7 @@ struct linux_binfmt {
65337 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
65338 int (*load_shlib)(struct file *);
65339 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
65340 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
65341 unsigned long min_coredump; /* minimal dump size */
65342 int hasvdso;
65343 };
65344 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
65345 index a06bfab..4fa38bb 100644
65346 --- a/include/linux/blkdev.h
65347 +++ b/include/linux/blkdev.h
65348 @@ -1278,7 +1278,7 @@ struct block_device_operations {
65349 int (*revalidate_disk) (struct gendisk *);
65350 int (*getgeo)(struct block_device *, struct hd_geometry *);
65351 struct module *owner;
65352 -};
65353 +} __do_const;
65354
65355 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
65356 unsigned long);
65357 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
65358 index 3b73b99..629d21b 100644
65359 --- a/include/linux/blktrace_api.h
65360 +++ b/include/linux/blktrace_api.h
65361 @@ -160,7 +160,7 @@ struct blk_trace {
65362 struct dentry *dir;
65363 struct dentry *dropped_file;
65364 struct dentry *msg_file;
65365 - atomic_t dropped;
65366 + atomic_unchecked_t dropped;
65367 };
65368
65369 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
65370 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
65371 index 83195fb..0b0f77d 100644
65372 --- a/include/linux/byteorder/little_endian.h
65373 +++ b/include/linux/byteorder/little_endian.h
65374 @@ -42,51 +42,51 @@
65375
65376 static inline __le64 __cpu_to_le64p(const __u64 *p)
65377 {
65378 - return (__force __le64)*p;
65379 + return (__force const __le64)*p;
65380 }
65381 static inline __u64 __le64_to_cpup(const __le64 *p)
65382 {
65383 - return (__force __u64)*p;
65384 + return (__force const __u64)*p;
65385 }
65386 static inline __le32 __cpu_to_le32p(const __u32 *p)
65387 {
65388 - return (__force __le32)*p;
65389 + return (__force const __le32)*p;
65390 }
65391 static inline __u32 __le32_to_cpup(const __le32 *p)
65392 {
65393 - return (__force __u32)*p;
65394 + return (__force const __u32)*p;
65395 }
65396 static inline __le16 __cpu_to_le16p(const __u16 *p)
65397 {
65398 - return (__force __le16)*p;
65399 + return (__force const __le16)*p;
65400 }
65401 static inline __u16 __le16_to_cpup(const __le16 *p)
65402 {
65403 - return (__force __u16)*p;
65404 + return (__force const __u16)*p;
65405 }
65406 static inline __be64 __cpu_to_be64p(const __u64 *p)
65407 {
65408 - return (__force __be64)__swab64p(p);
65409 + return (__force const __be64)__swab64p(p);
65410 }
65411 static inline __u64 __be64_to_cpup(const __be64 *p)
65412 {
65413 - return __swab64p((__u64 *)p);
65414 + return __swab64p((const __u64 *)p);
65415 }
65416 static inline __be32 __cpu_to_be32p(const __u32 *p)
65417 {
65418 - return (__force __be32)__swab32p(p);
65419 + return (__force const __be32)__swab32p(p);
65420 }
65421 static inline __u32 __be32_to_cpup(const __be32 *p)
65422 {
65423 - return __swab32p((__u32 *)p);
65424 + return __swab32p((const __u32 *)p);
65425 }
65426 static inline __be16 __cpu_to_be16p(const __u16 *p)
65427 {
65428 - return (__force __be16)__swab16p(p);
65429 + return (__force const __be16)__swab16p(p);
65430 }
65431 static inline __u16 __be16_to_cpup(const __be16 *p)
65432 {
65433 - return __swab16p((__u16 *)p);
65434 + return __swab16p((const __u16 *)p);
65435 }
65436 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
65437 #define __le64_to_cpus(x) do { (void)(x); } while (0)
65438 diff --git a/include/linux/cache.h b/include/linux/cache.h
65439 index 97e2488..e7576b9 100644
65440 --- a/include/linux/cache.h
65441 +++ b/include/linux/cache.h
65442 @@ -16,6 +16,10 @@
65443 #define __read_mostly
65444 #endif
65445
65446 +#ifndef __read_only
65447 +#define __read_only __read_mostly
65448 +#endif
65449 +
65450 #ifndef ____cacheline_aligned
65451 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
65452 #endif
65453 diff --git a/include/linux/capability.h b/include/linux/capability.h
65454 index c8f2a5f7..1618a5c 100644
65455 --- a/include/linux/capability.h
65456 +++ b/include/linux/capability.h
65457 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
65458 (security_real_capable_noaudit((t), (cap)) == 0)
65459
65460 extern int capable(int cap);
65461 +int capable_nolog(int cap);
65462
65463 /* audit system wants to get cap info from files as well */
65464 struct dentry;
65465 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
65466 index 450fa59..86019fb 100644
65467 --- a/include/linux/compiler-gcc4.h
65468 +++ b/include/linux/compiler-gcc4.h
65469 @@ -36,4 +36,16 @@
65470 the kernel context */
65471 #define __cold __attribute__((__cold__))
65472
65473 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
65474 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
65475 +#define __bos0(ptr) __bos((ptr), 0)
65476 +#define __bos1(ptr) __bos((ptr), 1)
65477 +
65478 +#if __GNUC_MINOR__ >= 5
65479 +#ifdef CONSTIFY_PLUGIN
65480 +#define __no_const __attribute__((no_const))
65481 +#define __do_const __attribute__((do_const))
65482 +#endif
65483 +#endif
65484 +
65485 #endif
65486 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
65487 index 04fb513..fd6477b 100644
65488 --- a/include/linux/compiler.h
65489 +++ b/include/linux/compiler.h
65490 @@ -5,11 +5,14 @@
65491
65492 #ifdef __CHECKER__
65493 # define __user __attribute__((noderef, address_space(1)))
65494 +# define __force_user __force __user
65495 # define __kernel /* default address space */
65496 +# define __force_kernel __force __kernel
65497 # define __safe __attribute__((safe))
65498 # define __force __attribute__((force))
65499 # define __nocast __attribute__((nocast))
65500 # define __iomem __attribute__((noderef, address_space(2)))
65501 +# define __force_iomem __force __iomem
65502 # define __acquires(x) __attribute__((context(x,0,1)))
65503 # define __releases(x) __attribute__((context(x,1,0)))
65504 # define __acquire(x) __context__(x,1)
65505 @@ -17,13 +20,34 @@
65506 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
65507 extern void __chk_user_ptr(const volatile void __user *);
65508 extern void __chk_io_ptr(const volatile void __iomem *);
65509 +#elif defined(CHECKER_PLUGIN)
65510 +//# define __user
65511 +//# define __force_user
65512 +//# define __kernel
65513 +//# define __force_kernel
65514 +# define __safe
65515 +# define __force
65516 +# define __nocast
65517 +# define __iomem
65518 +# define __force_iomem
65519 +# define __chk_user_ptr(x) (void)0
65520 +# define __chk_io_ptr(x) (void)0
65521 +# define __builtin_warning(x, y...) (1)
65522 +# define __acquires(x)
65523 +# define __releases(x)
65524 +# define __acquire(x) (void)0
65525 +# define __release(x) (void)0
65526 +# define __cond_lock(x,c) (c)
65527 #else
65528 # define __user
65529 +# define __force_user
65530 # define __kernel
65531 +# define __force_kernel
65532 # define __safe
65533 # define __force
65534 # define __nocast
65535 # define __iomem
65536 +# define __force_iomem
65537 # define __chk_user_ptr(x) (void)0
65538 # define __chk_io_ptr(x) (void)0
65539 # define __builtin_warning(x, y...) (1)
65540 @@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65541 # define __attribute_const__ /* unimplemented */
65542 #endif
65543
65544 +#ifndef __no_const
65545 +# define __no_const
65546 +#endif
65547 +
65548 +#ifndef __do_const
65549 +# define __do_const
65550 +#endif
65551 +
65552 /*
65553 * Tell gcc if a function is cold. The compiler will assume any path
65554 * directly leading to the call is unlikely.
65555 @@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65556 #define __cold
65557 #endif
65558
65559 +#ifndef __alloc_size
65560 +#define __alloc_size(...)
65561 +#endif
65562 +
65563 +#ifndef __bos
65564 +#define __bos(ptr, arg)
65565 +#endif
65566 +
65567 +#ifndef __bos0
65568 +#define __bos0(ptr)
65569 +#endif
65570 +
65571 +#ifndef __bos1
65572 +#define __bos1(ptr)
65573 +#endif
65574 +
65575 /* Simple shorthand for a section definition */
65576 #ifndef __section
65577 # define __section(S) __attribute__ ((__section__(#S)))
65578 @@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65579 * use is to mediate communication between process-level code and irq/NMI
65580 * handlers, all running on the same CPU.
65581 */
65582 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
65583 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
65584 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
65585
65586 #endif /* __LINUX_COMPILER_H */
65587 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
65588 index fd92988..a3164bd 100644
65589 --- a/include/linux/crypto.h
65590 +++ b/include/linux/crypto.h
65591 @@ -394,7 +394,7 @@ struct cipher_tfm {
65592 const u8 *key, unsigned int keylen);
65593 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65594 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65595 -};
65596 +} __no_const;
65597
65598 struct hash_tfm {
65599 int (*init)(struct hash_desc *desc);
65600 @@ -415,13 +415,13 @@ struct compress_tfm {
65601 int (*cot_decompress)(struct crypto_tfm *tfm,
65602 const u8 *src, unsigned int slen,
65603 u8 *dst, unsigned int *dlen);
65604 -};
65605 +} __no_const;
65606
65607 struct rng_tfm {
65608 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
65609 unsigned int dlen);
65610 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
65611 -};
65612 +} __no_const;
65613
65614 #define crt_ablkcipher crt_u.ablkcipher
65615 #define crt_aead crt_u.aead
65616 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
65617 index 30b93b2..cd7a8db 100644
65618 --- a/include/linux/dcache.h
65619 +++ b/include/linux/dcache.h
65620 @@ -119,6 +119,8 @@ struct dentry {
65621 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
65622 };
65623
65624 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
65625 +
65626 /*
65627 * dentry->d_lock spinlock nesting subclasses:
65628 *
65629 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
65630 index 3e9bd6a..f4e1aa0 100644
65631 --- a/include/linux/decompress/mm.h
65632 +++ b/include/linux/decompress/mm.h
65633 @@ -78,7 +78,7 @@ static void free(void *where)
65634 * warnings when not needed (indeed large_malloc / large_free are not
65635 * needed by inflate */
65636
65637 -#define malloc(a) kmalloc(a, GFP_KERNEL)
65638 +#define malloc(a) kmalloc((a), GFP_KERNEL)
65639 #define free(a) kfree(a)
65640
65641 #define large_malloc(a) vmalloc(a)
65642 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
65643 index 91b7618..92a93d32 100644
65644 --- a/include/linux/dma-mapping.h
65645 +++ b/include/linux/dma-mapping.h
65646 @@ -16,51 +16,51 @@ enum dma_data_direction {
65647 };
65648
65649 struct dma_map_ops {
65650 - void* (*alloc_coherent)(struct device *dev, size_t size,
65651 + void* (* const alloc_coherent)(struct device *dev, size_t size,
65652 dma_addr_t *dma_handle, gfp_t gfp);
65653 - void (*free_coherent)(struct device *dev, size_t size,
65654 + void (* const free_coherent)(struct device *dev, size_t size,
65655 void *vaddr, dma_addr_t dma_handle);
65656 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
65657 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
65658 unsigned long offset, size_t size,
65659 enum dma_data_direction dir,
65660 struct dma_attrs *attrs);
65661 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
65662 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
65663 size_t size, enum dma_data_direction dir,
65664 struct dma_attrs *attrs);
65665 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
65666 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
65667 int nents, enum dma_data_direction dir,
65668 struct dma_attrs *attrs);
65669 - void (*unmap_sg)(struct device *dev,
65670 + void (* const unmap_sg)(struct device *dev,
65671 struct scatterlist *sg, int nents,
65672 enum dma_data_direction dir,
65673 struct dma_attrs *attrs);
65674 - void (*sync_single_for_cpu)(struct device *dev,
65675 + void (* const sync_single_for_cpu)(struct device *dev,
65676 dma_addr_t dma_handle, size_t size,
65677 enum dma_data_direction dir);
65678 - void (*sync_single_for_device)(struct device *dev,
65679 + void (* const sync_single_for_device)(struct device *dev,
65680 dma_addr_t dma_handle, size_t size,
65681 enum dma_data_direction dir);
65682 - void (*sync_single_range_for_cpu)(struct device *dev,
65683 + void (* const sync_single_range_for_cpu)(struct device *dev,
65684 dma_addr_t dma_handle,
65685 unsigned long offset,
65686 size_t size,
65687 enum dma_data_direction dir);
65688 - void (*sync_single_range_for_device)(struct device *dev,
65689 + void (* const sync_single_range_for_device)(struct device *dev,
65690 dma_addr_t dma_handle,
65691 unsigned long offset,
65692 size_t size,
65693 enum dma_data_direction dir);
65694 - void (*sync_sg_for_cpu)(struct device *dev,
65695 + void (* const sync_sg_for_cpu)(struct device *dev,
65696 struct scatterlist *sg, int nents,
65697 enum dma_data_direction dir);
65698 - void (*sync_sg_for_device)(struct device *dev,
65699 + void (* const sync_sg_for_device)(struct device *dev,
65700 struct scatterlist *sg, int nents,
65701 enum dma_data_direction dir);
65702 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
65703 - int (*dma_supported)(struct device *dev, u64 mask);
65704 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
65705 + int (* const dma_supported)(struct device *dev, u64 mask);
65706 int (*set_dma_mask)(struct device *dev, u64 mask);
65707 int is_phys;
65708 -};
65709 +} __do_const;
65710
65711 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
65712
65713 diff --git a/include/linux/dst.h b/include/linux/dst.h
65714 index e26fed8..b976d9f 100644
65715 --- a/include/linux/dst.h
65716 +++ b/include/linux/dst.h
65717 @@ -380,7 +380,7 @@ struct dst_node
65718 struct thread_pool *pool;
65719
65720 /* Transaction IDs live here */
65721 - atomic_long_t gen;
65722 + atomic_long_unchecked_t gen;
65723
65724 /*
65725 * How frequently and how many times transaction
65726 diff --git a/include/linux/elf.h b/include/linux/elf.h
65727 index 90a4ed0..d652617 100644
65728 --- a/include/linux/elf.h
65729 +++ b/include/linux/elf.h
65730 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
65731 #define PT_GNU_EH_FRAME 0x6474e550
65732
65733 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
65734 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
65735 +
65736 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
65737 +
65738 +/* Constants for the e_flags field */
65739 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65740 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
65741 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
65742 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
65743 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65744 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65745
65746 /* These constants define the different elf file types */
65747 #define ET_NONE 0
65748 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
65749 #define DT_DEBUG 21
65750 #define DT_TEXTREL 22
65751 #define DT_JMPREL 23
65752 +#define DT_FLAGS 30
65753 + #define DF_TEXTREL 0x00000004
65754 #define DT_ENCODING 32
65755 #define OLD_DT_LOOS 0x60000000
65756 #define DT_LOOS 0x6000000d
65757 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
65758 #define PF_W 0x2
65759 #define PF_X 0x1
65760
65761 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
65762 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
65763 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
65764 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
65765 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
65766 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
65767 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
65768 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
65769 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
65770 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
65771 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
65772 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
65773 +
65774 typedef struct elf32_phdr{
65775 Elf32_Word p_type;
65776 Elf32_Off p_offset;
65777 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
65778 #define EI_OSABI 7
65779 #define EI_PAD 8
65780
65781 +#define EI_PAX 14
65782 +
65783 #define ELFMAG0 0x7f /* EI_MAG */
65784 #define ELFMAG1 'E'
65785 #define ELFMAG2 'L'
65786 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
65787 #define elf_phdr elf32_phdr
65788 #define elf_note elf32_note
65789 #define elf_addr_t Elf32_Off
65790 +#define elf_dyn Elf32_Dyn
65791
65792 #else
65793
65794 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
65795 #define elf_phdr elf64_phdr
65796 #define elf_note elf64_note
65797 #define elf_addr_t Elf64_Off
65798 +#define elf_dyn Elf64_Dyn
65799
65800 #endif
65801
65802 diff --git a/include/linux/fs.h b/include/linux/fs.h
65803 index 1b9a47a..6fe2934 100644
65804 --- a/include/linux/fs.h
65805 +++ b/include/linux/fs.h
65806 @@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
65807 unsigned long, unsigned long);
65808
65809 struct address_space_operations {
65810 - int (*writepage)(struct page *page, struct writeback_control *wbc);
65811 - int (*readpage)(struct file *, struct page *);
65812 - void (*sync_page)(struct page *);
65813 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
65814 + int (* const readpage)(struct file *, struct page *);
65815 + void (* const sync_page)(struct page *);
65816
65817 /* Write back some dirty pages from this mapping. */
65818 - int (*writepages)(struct address_space *, struct writeback_control *);
65819 + int (* const writepages)(struct address_space *, struct writeback_control *);
65820
65821 /* Set a page dirty. Return true if this dirtied it */
65822 - int (*set_page_dirty)(struct page *page);
65823 + int (* const set_page_dirty)(struct page *page);
65824
65825 - int (*readpages)(struct file *filp, struct address_space *mapping,
65826 + int (* const readpages)(struct file *filp, struct address_space *mapping,
65827 struct list_head *pages, unsigned nr_pages);
65828
65829 - int (*write_begin)(struct file *, struct address_space *mapping,
65830 + int (* const write_begin)(struct file *, struct address_space *mapping,
65831 loff_t pos, unsigned len, unsigned flags,
65832 struct page **pagep, void **fsdata);
65833 - int (*write_end)(struct file *, struct address_space *mapping,
65834 + int (* const write_end)(struct file *, struct address_space *mapping,
65835 loff_t pos, unsigned len, unsigned copied,
65836 struct page *page, void *fsdata);
65837
65838 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
65839 - sector_t (*bmap)(struct address_space *, sector_t);
65840 - void (*invalidatepage) (struct page *, unsigned long);
65841 - int (*releasepage) (struct page *, gfp_t);
65842 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
65843 + sector_t (* const bmap)(struct address_space *, sector_t);
65844 + void (* const invalidatepage) (struct page *, unsigned long);
65845 + int (* const releasepage) (struct page *, gfp_t);
65846 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
65847 loff_t offset, unsigned long nr_segs);
65848 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
65849 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
65850 void **, unsigned long *);
65851 /* migrate the contents of a page to the specified target */
65852 - int (*migratepage) (struct address_space *,
65853 + int (* const migratepage) (struct address_space *,
65854 struct page *, struct page *);
65855 - int (*launder_page) (struct page *);
65856 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
65857 + int (* const launder_page) (struct page *);
65858 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
65859 unsigned long);
65860 - int (*error_remove_page)(struct address_space *, struct page *);
65861 + int (* const error_remove_page)(struct address_space *, struct page *);
65862 };
65863
65864 /*
65865 @@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
65866 typedef struct files_struct *fl_owner_t;
65867
65868 struct file_lock_operations {
65869 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
65870 - void (*fl_release_private)(struct file_lock *);
65871 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
65872 + void (* const fl_release_private)(struct file_lock *);
65873 };
65874
65875 struct lock_manager_operations {
65876 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
65877 - void (*fl_notify)(struct file_lock *); /* unblock callback */
65878 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
65879 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
65880 - void (*fl_release_private)(struct file_lock *);
65881 - void (*fl_break)(struct file_lock *);
65882 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
65883 - int (*fl_change)(struct file_lock **, int);
65884 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
65885 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
65886 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
65887 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
65888 + void (* const fl_release_private)(struct file_lock *);
65889 + void (* const fl_break)(struct file_lock *);
65890 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
65891 + int (* const fl_change)(struct file_lock **, int);
65892 };
65893
65894 struct lock_manager {
65895 @@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
65896 unsigned int fi_flags; /* Flags as passed from user */
65897 unsigned int fi_extents_mapped; /* Number of mapped extents */
65898 unsigned int fi_extents_max; /* Size of fiemap_extent array */
65899 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
65900 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
65901 * array */
65902 };
65903 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
65904 @@ -1512,7 +1512,8 @@ struct file_operations {
65905 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
65906 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
65907 int (*setlease)(struct file *, long, struct file_lock **);
65908 -};
65909 +} __do_const;
65910 +typedef struct file_operations __no_const file_operations_no_const;
65911
65912 struct inode_operations {
65913 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
65914 @@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
65915 unsigned long, loff_t *);
65916
65917 struct super_operations {
65918 - struct inode *(*alloc_inode)(struct super_block *sb);
65919 - void (*destroy_inode)(struct inode *);
65920 + struct inode *(* const alloc_inode)(struct super_block *sb);
65921 + void (* const destroy_inode)(struct inode *);
65922
65923 - void (*dirty_inode) (struct inode *);
65924 - int (*write_inode) (struct inode *, int);
65925 - void (*drop_inode) (struct inode *);
65926 - void (*delete_inode) (struct inode *);
65927 - void (*put_super) (struct super_block *);
65928 - void (*write_super) (struct super_block *);
65929 - int (*sync_fs)(struct super_block *sb, int wait);
65930 - int (*freeze_fs) (struct super_block *);
65931 - int (*unfreeze_fs) (struct super_block *);
65932 - int (*statfs) (struct dentry *, struct kstatfs *);
65933 - int (*remount_fs) (struct super_block *, int *, char *);
65934 - void (*clear_inode) (struct inode *);
65935 - void (*umount_begin) (struct super_block *);
65936 + void (* const dirty_inode) (struct inode *);
65937 + int (* const write_inode) (struct inode *, int);
65938 + void (* const drop_inode) (struct inode *);
65939 + void (* const delete_inode) (struct inode *);
65940 + void (* const put_super) (struct super_block *);
65941 + void (* const write_super) (struct super_block *);
65942 + int (* const sync_fs)(struct super_block *sb, int wait);
65943 + int (* const freeze_fs) (struct super_block *);
65944 + int (* const unfreeze_fs) (struct super_block *);
65945 + int (* const statfs) (struct dentry *, struct kstatfs *);
65946 + int (* const remount_fs) (struct super_block *, int *, char *);
65947 + void (* const clear_inode) (struct inode *);
65948 + void (* const umount_begin) (struct super_block *);
65949
65950 - int (*show_options)(struct seq_file *, struct vfsmount *);
65951 - int (*show_stats)(struct seq_file *, struct vfsmount *);
65952 + int (* const show_options)(struct seq_file *, struct vfsmount *);
65953 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
65954 #ifdef CONFIG_QUOTA
65955 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
65956 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
65957 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
65958 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
65959 #endif
65960 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
65961 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
65962 };
65963
65964 /*
65965 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
65966 index 78a05bf..2a7d3e1 100644
65967 --- a/include/linux/fs_struct.h
65968 +++ b/include/linux/fs_struct.h
65969 @@ -4,7 +4,7 @@
65970 #include <linux/path.h>
65971
65972 struct fs_struct {
65973 - int users;
65974 + atomic_t users;
65975 rwlock_t lock;
65976 int umask;
65977 int in_exec;
65978 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
65979 index 7be0c6f..2f63a2b 100644
65980 --- a/include/linux/fscache-cache.h
65981 +++ b/include/linux/fscache-cache.h
65982 @@ -116,7 +116,7 @@ struct fscache_operation {
65983 #endif
65984 };
65985
65986 -extern atomic_t fscache_op_debug_id;
65987 +extern atomic_unchecked_t fscache_op_debug_id;
65988 extern const struct slow_work_ops fscache_op_slow_work_ops;
65989
65990 extern void fscache_enqueue_operation(struct fscache_operation *);
65991 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
65992 fscache_operation_release_t release)
65993 {
65994 atomic_set(&op->usage, 1);
65995 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
65996 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
65997 op->release = release;
65998 INIT_LIST_HEAD(&op->pend_link);
65999 fscache_set_op_state(op, "Init");
66000 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
66001 index 4d6f47b..00bcedb 100644
66002 --- a/include/linux/fsnotify_backend.h
66003 +++ b/include/linux/fsnotify_backend.h
66004 @@ -86,6 +86,7 @@ struct fsnotify_ops {
66005 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
66006 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
66007 };
66008 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
66009
66010 /*
66011 * A group is a "thing" that wants to receive notification about filesystem
66012 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66013 index 4ec5e67..42f1eb9 100644
66014 --- a/include/linux/ftrace_event.h
66015 +++ b/include/linux/ftrace_event.h
66016 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
66017 int filter_type);
66018 extern int trace_define_common_fields(struct ftrace_event_call *call);
66019
66020 -#define is_signed_type(type) (((type)(-1)) < 0)
66021 +#define is_signed_type(type) (((type)(-1)) < (type)1)
66022
66023 int trace_set_clr_event(const char *system, const char *event, int set);
66024
66025 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
66026 index 297df45..b6a74ff 100644
66027 --- a/include/linux/genhd.h
66028 +++ b/include/linux/genhd.h
66029 @@ -161,7 +161,7 @@ struct gendisk {
66030
66031 struct timer_rand_state *random;
66032
66033 - atomic_t sync_io; /* RAID */
66034 + atomic_unchecked_t sync_io; /* RAID */
66035 struct work_struct async_notify;
66036 #ifdef CONFIG_BLK_DEV_INTEGRITY
66037 struct blk_integrity *integrity;
66038 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
66039 new file mode 100644
66040 index 0000000..0dc3943
66041 --- /dev/null
66042 +++ b/include/linux/gracl.h
66043 @@ -0,0 +1,317 @@
66044 +#ifndef GR_ACL_H
66045 +#define GR_ACL_H
66046 +
66047 +#include <linux/grdefs.h>
66048 +#include <linux/resource.h>
66049 +#include <linux/capability.h>
66050 +#include <linux/dcache.h>
66051 +#include <asm/resource.h>
66052 +
66053 +/* Major status information */
66054 +
66055 +#define GR_VERSION "grsecurity 2.2.2"
66056 +#define GRSECURITY_VERSION 0x2202
66057 +
66058 +enum {
66059 + GR_SHUTDOWN = 0,
66060 + GR_ENABLE = 1,
66061 + GR_SPROLE = 2,
66062 + GR_RELOAD = 3,
66063 + GR_SEGVMOD = 4,
66064 + GR_STATUS = 5,
66065 + GR_UNSPROLE = 6,
66066 + GR_PASSSET = 7,
66067 + GR_SPROLEPAM = 8,
66068 +};
66069 +
66070 +/* Password setup definitions
66071 + * kernel/grhash.c */
66072 +enum {
66073 + GR_PW_LEN = 128,
66074 + GR_SALT_LEN = 16,
66075 + GR_SHA_LEN = 32,
66076 +};
66077 +
66078 +enum {
66079 + GR_SPROLE_LEN = 64,
66080 +};
66081 +
66082 +enum {
66083 + GR_NO_GLOB = 0,
66084 + GR_REG_GLOB,
66085 + GR_CREATE_GLOB
66086 +};
66087 +
66088 +#define GR_NLIMITS 32
66089 +
66090 +/* Begin Data Structures */
66091 +
66092 +struct sprole_pw {
66093 + unsigned char *rolename;
66094 + unsigned char salt[GR_SALT_LEN];
66095 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
66096 +};
66097 +
66098 +struct name_entry {
66099 + __u32 key;
66100 + ino_t inode;
66101 + dev_t device;
66102 + char *name;
66103 + __u16 len;
66104 + __u8 deleted;
66105 + struct name_entry *prev;
66106 + struct name_entry *next;
66107 +};
66108 +
66109 +struct inodev_entry {
66110 + struct name_entry *nentry;
66111 + struct inodev_entry *prev;
66112 + struct inodev_entry *next;
66113 +};
66114 +
66115 +struct acl_role_db {
66116 + struct acl_role_label **r_hash;
66117 + __u32 r_size;
66118 +};
66119 +
66120 +struct inodev_db {
66121 + struct inodev_entry **i_hash;
66122 + __u32 i_size;
66123 +};
66124 +
66125 +struct name_db {
66126 + struct name_entry **n_hash;
66127 + __u32 n_size;
66128 +};
66129 +
66130 +struct crash_uid {
66131 + uid_t uid;
66132 + unsigned long expires;
66133 +};
66134 +
66135 +struct gr_hash_struct {
66136 + void **table;
66137 + void **nametable;
66138 + void *first;
66139 + __u32 table_size;
66140 + __u32 used_size;
66141 + int type;
66142 +};
66143 +
66144 +/* Userspace Grsecurity ACL data structures */
66145 +
66146 +struct acl_subject_label {
66147 + char *filename;
66148 + ino_t inode;
66149 + dev_t device;
66150 + __u32 mode;
66151 + kernel_cap_t cap_mask;
66152 + kernel_cap_t cap_lower;
66153 + kernel_cap_t cap_invert_audit;
66154 +
66155 + struct rlimit res[GR_NLIMITS];
66156 + __u32 resmask;
66157 +
66158 + __u8 user_trans_type;
66159 + __u8 group_trans_type;
66160 + uid_t *user_transitions;
66161 + gid_t *group_transitions;
66162 + __u16 user_trans_num;
66163 + __u16 group_trans_num;
66164 +
66165 + __u32 sock_families[2];
66166 + __u32 ip_proto[8];
66167 + __u32 ip_type;
66168 + struct acl_ip_label **ips;
66169 + __u32 ip_num;
66170 + __u32 inaddr_any_override;
66171 +
66172 + __u32 crashes;
66173 + unsigned long expires;
66174 +
66175 + struct acl_subject_label *parent_subject;
66176 + struct gr_hash_struct *hash;
66177 + struct acl_subject_label *prev;
66178 + struct acl_subject_label *next;
66179 +
66180 + struct acl_object_label **obj_hash;
66181 + __u32 obj_hash_size;
66182 + __u16 pax_flags;
66183 +};
66184 +
66185 +struct role_allowed_ip {
66186 + __u32 addr;
66187 + __u32 netmask;
66188 +
66189 + struct role_allowed_ip *prev;
66190 + struct role_allowed_ip *next;
66191 +};
66192 +
66193 +struct role_transition {
66194 + char *rolename;
66195 +
66196 + struct role_transition *prev;
66197 + struct role_transition *next;
66198 +};
66199 +
66200 +struct acl_role_label {
66201 + char *rolename;
66202 + uid_t uidgid;
66203 + __u16 roletype;
66204 +
66205 + __u16 auth_attempts;
66206 + unsigned long expires;
66207 +
66208 + struct acl_subject_label *root_label;
66209 + struct gr_hash_struct *hash;
66210 +
66211 + struct acl_role_label *prev;
66212 + struct acl_role_label *next;
66213 +
66214 + struct role_transition *transitions;
66215 + struct role_allowed_ip *allowed_ips;
66216 + uid_t *domain_children;
66217 + __u16 domain_child_num;
66218 +
66219 + struct acl_subject_label **subj_hash;
66220 + __u32 subj_hash_size;
66221 +};
66222 +
66223 +struct user_acl_role_db {
66224 + struct acl_role_label **r_table;
66225 + __u32 num_pointers; /* Number of allocations to track */
66226 + __u32 num_roles; /* Number of roles */
66227 + __u32 num_domain_children; /* Number of domain children */
66228 + __u32 num_subjects; /* Number of subjects */
66229 + __u32 num_objects; /* Number of objects */
66230 +};
66231 +
66232 +struct acl_object_label {
66233 + char *filename;
66234 + ino_t inode;
66235 + dev_t device;
66236 + __u32 mode;
66237 +
66238 + struct acl_subject_label *nested;
66239 + struct acl_object_label *globbed;
66240 +
66241 + /* next two structures not used */
66242 +
66243 + struct acl_object_label *prev;
66244 + struct acl_object_label *next;
66245 +};
66246 +
66247 +struct acl_ip_label {
66248 + char *iface;
66249 + __u32 addr;
66250 + __u32 netmask;
66251 + __u16 low, high;
66252 + __u8 mode;
66253 + __u32 type;
66254 + __u32 proto[8];
66255 +
66256 + /* next two structures not used */
66257 +
66258 + struct acl_ip_label *prev;
66259 + struct acl_ip_label *next;
66260 +};
66261 +
66262 +struct gr_arg {
66263 + struct user_acl_role_db role_db;
66264 + unsigned char pw[GR_PW_LEN];
66265 + unsigned char salt[GR_SALT_LEN];
66266 + unsigned char sum[GR_SHA_LEN];
66267 + unsigned char sp_role[GR_SPROLE_LEN];
66268 + struct sprole_pw *sprole_pws;
66269 + dev_t segv_device;
66270 + ino_t segv_inode;
66271 + uid_t segv_uid;
66272 + __u16 num_sprole_pws;
66273 + __u16 mode;
66274 +};
66275 +
66276 +struct gr_arg_wrapper {
66277 + struct gr_arg *arg;
66278 + __u32 version;
66279 + __u32 size;
66280 +};
66281 +
66282 +struct subject_map {
66283 + struct acl_subject_label *user;
66284 + struct acl_subject_label *kernel;
66285 + struct subject_map *prev;
66286 + struct subject_map *next;
66287 +};
66288 +
66289 +struct acl_subj_map_db {
66290 + struct subject_map **s_hash;
66291 + __u32 s_size;
66292 +};
66293 +
66294 +/* End Data Structures Section */
66295 +
66296 +/* Hash functions generated by empirical testing by Brad Spengler
66297 + Makes good use of the low bits of the inode. Generally 0-1 times
66298 + in loop for successful match. 0-3 for unsuccessful match.
66299 + Shift/add algorithm with modulus of table size and an XOR*/
66300 +
66301 +static __inline__ unsigned int
66302 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
66303 +{
66304 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
66305 +}
66306 +
66307 + static __inline__ unsigned int
66308 +shash(const struct acl_subject_label *userp, const unsigned int sz)
66309 +{
66310 + return ((const unsigned long)userp % sz);
66311 +}
66312 +
66313 +static __inline__ unsigned int
66314 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
66315 +{
66316 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
66317 +}
66318 +
66319 +static __inline__ unsigned int
66320 +nhash(const char *name, const __u16 len, const unsigned int sz)
66321 +{
66322 + return full_name_hash((const unsigned char *)name, len) % sz;
66323 +}
66324 +
66325 +#define FOR_EACH_ROLE_START(role) \
66326 + role = role_list; \
66327 + while (role) {
66328 +
66329 +#define FOR_EACH_ROLE_END(role) \
66330 + role = role->prev; \
66331 + }
66332 +
66333 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
66334 + subj = NULL; \
66335 + iter = 0; \
66336 + while (iter < role->subj_hash_size) { \
66337 + if (subj == NULL) \
66338 + subj = role->subj_hash[iter]; \
66339 + if (subj == NULL) { \
66340 + iter++; \
66341 + continue; \
66342 + }
66343 +
66344 +#define FOR_EACH_SUBJECT_END(subj,iter) \
66345 + subj = subj->next; \
66346 + if (subj == NULL) \
66347 + iter++; \
66348 + }
66349 +
66350 +
66351 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
66352 + subj = role->hash->first; \
66353 + while (subj != NULL) {
66354 +
66355 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
66356 + subj = subj->next; \
66357 + }
66358 +
66359 +#endif
66360 +
66361 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
66362 new file mode 100644
66363 index 0000000..323ecf2
66364 --- /dev/null
66365 +++ b/include/linux/gralloc.h
66366 @@ -0,0 +1,9 @@
66367 +#ifndef __GRALLOC_H
66368 +#define __GRALLOC_H
66369 +
66370 +void acl_free_all(void);
66371 +int acl_alloc_stack_init(unsigned long size);
66372 +void *acl_alloc(unsigned long len);
66373 +void *acl_alloc_num(unsigned long num, unsigned long len);
66374 +
66375 +#endif
66376 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
66377 new file mode 100644
66378 index 0000000..70d6cd5
66379 --- /dev/null
66380 +++ b/include/linux/grdefs.h
66381 @@ -0,0 +1,140 @@
66382 +#ifndef GRDEFS_H
66383 +#define GRDEFS_H
66384 +
66385 +/* Begin grsecurity status declarations */
66386 +
66387 +enum {
66388 + GR_READY = 0x01,
66389 + GR_STATUS_INIT = 0x00 // disabled state
66390 +};
66391 +
66392 +/* Begin ACL declarations */
66393 +
66394 +/* Role flags */
66395 +
66396 +enum {
66397 + GR_ROLE_USER = 0x0001,
66398 + GR_ROLE_GROUP = 0x0002,
66399 + GR_ROLE_DEFAULT = 0x0004,
66400 + GR_ROLE_SPECIAL = 0x0008,
66401 + GR_ROLE_AUTH = 0x0010,
66402 + GR_ROLE_NOPW = 0x0020,
66403 + GR_ROLE_GOD = 0x0040,
66404 + GR_ROLE_LEARN = 0x0080,
66405 + GR_ROLE_TPE = 0x0100,
66406 + GR_ROLE_DOMAIN = 0x0200,
66407 + GR_ROLE_PAM = 0x0400,
66408 + GR_ROLE_PERSIST = 0x800
66409 +};
66410 +
66411 +/* ACL Subject and Object mode flags */
66412 +enum {
66413 + GR_DELETED = 0x80000000
66414 +};
66415 +
66416 +/* ACL Object-only mode flags */
66417 +enum {
66418 + GR_READ = 0x00000001,
66419 + GR_APPEND = 0x00000002,
66420 + GR_WRITE = 0x00000004,
66421 + GR_EXEC = 0x00000008,
66422 + GR_FIND = 0x00000010,
66423 + GR_INHERIT = 0x00000020,
66424 + GR_SETID = 0x00000040,
66425 + GR_CREATE = 0x00000080,
66426 + GR_DELETE = 0x00000100,
66427 + GR_LINK = 0x00000200,
66428 + GR_AUDIT_READ = 0x00000400,
66429 + GR_AUDIT_APPEND = 0x00000800,
66430 + GR_AUDIT_WRITE = 0x00001000,
66431 + GR_AUDIT_EXEC = 0x00002000,
66432 + GR_AUDIT_FIND = 0x00004000,
66433 + GR_AUDIT_INHERIT= 0x00008000,
66434 + GR_AUDIT_SETID = 0x00010000,
66435 + GR_AUDIT_CREATE = 0x00020000,
66436 + GR_AUDIT_DELETE = 0x00040000,
66437 + GR_AUDIT_LINK = 0x00080000,
66438 + GR_PTRACERD = 0x00100000,
66439 + GR_NOPTRACE = 0x00200000,
66440 + GR_SUPPRESS = 0x00400000,
66441 + GR_NOLEARN = 0x00800000,
66442 + GR_INIT_TRANSFER= 0x01000000
66443 +};
66444 +
66445 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
66446 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
66447 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
66448 +
66449 +/* ACL subject-only mode flags */
66450 +enum {
66451 + GR_KILL = 0x00000001,
66452 + GR_VIEW = 0x00000002,
66453 + GR_PROTECTED = 0x00000004,
66454 + GR_LEARN = 0x00000008,
66455 + GR_OVERRIDE = 0x00000010,
66456 + /* just a placeholder, this mode is only used in userspace */
66457 + GR_DUMMY = 0x00000020,
66458 + GR_PROTSHM = 0x00000040,
66459 + GR_KILLPROC = 0x00000080,
66460 + GR_KILLIPPROC = 0x00000100,
66461 + /* just a placeholder, this mode is only used in userspace */
66462 + GR_NOTROJAN = 0x00000200,
66463 + GR_PROTPROCFD = 0x00000400,
66464 + GR_PROCACCT = 0x00000800,
66465 + GR_RELAXPTRACE = 0x00001000,
66466 + GR_NESTED = 0x00002000,
66467 + GR_INHERITLEARN = 0x00004000,
66468 + GR_PROCFIND = 0x00008000,
66469 + GR_POVERRIDE = 0x00010000,
66470 + GR_KERNELAUTH = 0x00020000,
66471 + GR_ATSECURE = 0x00040000,
66472 + GR_SHMEXEC = 0x00080000
66473 +};
66474 +
66475 +enum {
66476 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
66477 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
66478 + GR_PAX_ENABLE_MPROTECT = 0x0004,
66479 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
66480 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
66481 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
66482 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
66483 + GR_PAX_DISABLE_MPROTECT = 0x0400,
66484 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
66485 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
66486 +};
66487 +
66488 +enum {
66489 + GR_ID_USER = 0x01,
66490 + GR_ID_GROUP = 0x02,
66491 +};
66492 +
66493 +enum {
66494 + GR_ID_ALLOW = 0x01,
66495 + GR_ID_DENY = 0x02,
66496 +};
66497 +
66498 +#define GR_CRASH_RES 31
66499 +#define GR_UIDTABLE_MAX 500
66500 +
66501 +/* begin resource learning section */
66502 +enum {
66503 + GR_RLIM_CPU_BUMP = 60,
66504 + GR_RLIM_FSIZE_BUMP = 50000,
66505 + GR_RLIM_DATA_BUMP = 10000,
66506 + GR_RLIM_STACK_BUMP = 1000,
66507 + GR_RLIM_CORE_BUMP = 10000,
66508 + GR_RLIM_RSS_BUMP = 500000,
66509 + GR_RLIM_NPROC_BUMP = 1,
66510 + GR_RLIM_NOFILE_BUMP = 5,
66511 + GR_RLIM_MEMLOCK_BUMP = 50000,
66512 + GR_RLIM_AS_BUMP = 500000,
66513 + GR_RLIM_LOCKS_BUMP = 2,
66514 + GR_RLIM_SIGPENDING_BUMP = 5,
66515 + GR_RLIM_MSGQUEUE_BUMP = 10000,
66516 + GR_RLIM_NICE_BUMP = 1,
66517 + GR_RLIM_RTPRIO_BUMP = 1,
66518 + GR_RLIM_RTTIME_BUMP = 1000000
66519 +};
66520 +
66521 +#endif
66522 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
66523 new file mode 100644
66524 index 0000000..e5817d7
66525 --- /dev/null
66526 +++ b/include/linux/grinternal.h
66527 @@ -0,0 +1,218 @@
66528 +#ifndef __GRINTERNAL_H
66529 +#define __GRINTERNAL_H
66530 +
66531 +#ifdef CONFIG_GRKERNSEC
66532 +
66533 +#include <linux/fs.h>
66534 +#include <linux/mnt_namespace.h>
66535 +#include <linux/nsproxy.h>
66536 +#include <linux/gracl.h>
66537 +#include <linux/grdefs.h>
66538 +#include <linux/grmsg.h>
66539 +
66540 +void gr_add_learn_entry(const char *fmt, ...)
66541 + __attribute__ ((format (printf, 1, 2)));
66542 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
66543 + const struct vfsmount *mnt);
66544 +__u32 gr_check_create(const struct dentry *new_dentry,
66545 + const struct dentry *parent,
66546 + const struct vfsmount *mnt, const __u32 mode);
66547 +int gr_check_protected_task(const struct task_struct *task);
66548 +__u32 to_gr_audit(const __u32 reqmode);
66549 +int gr_set_acls(const int type);
66550 +int gr_apply_subject_to_task(struct task_struct *task);
66551 +int gr_acl_is_enabled(void);
66552 +char gr_roletype_to_char(void);
66553 +
66554 +void gr_handle_alertkill(struct task_struct *task);
66555 +char *gr_to_filename(const struct dentry *dentry,
66556 + const struct vfsmount *mnt);
66557 +char *gr_to_filename1(const struct dentry *dentry,
66558 + const struct vfsmount *mnt);
66559 +char *gr_to_filename2(const struct dentry *dentry,
66560 + const struct vfsmount *mnt);
66561 +char *gr_to_filename3(const struct dentry *dentry,
66562 + const struct vfsmount *mnt);
66563 +
66564 +extern int grsec_enable_harden_ptrace;
66565 +extern int grsec_enable_link;
66566 +extern int grsec_enable_fifo;
66567 +extern int grsec_enable_shm;
66568 +extern int grsec_enable_execlog;
66569 +extern int grsec_enable_signal;
66570 +extern int grsec_enable_audit_ptrace;
66571 +extern int grsec_enable_forkfail;
66572 +extern int grsec_enable_time;
66573 +extern int grsec_enable_rofs;
66574 +extern int grsec_enable_chroot_shmat;
66575 +extern int grsec_enable_chroot_mount;
66576 +extern int grsec_enable_chroot_double;
66577 +extern int grsec_enable_chroot_pivot;
66578 +extern int grsec_enable_chroot_chdir;
66579 +extern int grsec_enable_chroot_chmod;
66580 +extern int grsec_enable_chroot_mknod;
66581 +extern int grsec_enable_chroot_fchdir;
66582 +extern int grsec_enable_chroot_nice;
66583 +extern int grsec_enable_chroot_execlog;
66584 +extern int grsec_enable_chroot_caps;
66585 +extern int grsec_enable_chroot_sysctl;
66586 +extern int grsec_enable_chroot_unix;
66587 +extern int grsec_enable_tpe;
66588 +extern int grsec_tpe_gid;
66589 +extern int grsec_enable_tpe_all;
66590 +extern int grsec_enable_tpe_invert;
66591 +extern int grsec_enable_socket_all;
66592 +extern int grsec_socket_all_gid;
66593 +extern int grsec_enable_socket_client;
66594 +extern int grsec_socket_client_gid;
66595 +extern int grsec_enable_socket_server;
66596 +extern int grsec_socket_server_gid;
66597 +extern int grsec_audit_gid;
66598 +extern int grsec_enable_group;
66599 +extern int grsec_enable_audit_textrel;
66600 +extern int grsec_enable_log_rwxmaps;
66601 +extern int grsec_enable_mount;
66602 +extern int grsec_enable_chdir;
66603 +extern int grsec_resource_logging;
66604 +extern int grsec_enable_blackhole;
66605 +extern int grsec_lastack_retries;
66606 +extern int grsec_enable_brute;
66607 +extern int grsec_lock;
66608 +
66609 +extern spinlock_t grsec_alert_lock;
66610 +extern unsigned long grsec_alert_wtime;
66611 +extern unsigned long grsec_alert_fyet;
66612 +
66613 +extern spinlock_t grsec_audit_lock;
66614 +
66615 +extern rwlock_t grsec_exec_file_lock;
66616 +
66617 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
66618 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
66619 + (tsk)->exec_file->f_vfsmnt) : "/")
66620 +
66621 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
66622 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
66623 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
66624 +
66625 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
66626 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
66627 + (tsk)->exec_file->f_vfsmnt) : "/")
66628 +
66629 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
66630 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
66631 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
66632 +
66633 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
66634 +
66635 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
66636 +
66637 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
66638 + (task)->pid, (cred)->uid, \
66639 + (cred)->euid, (cred)->gid, (cred)->egid, \
66640 + gr_parent_task_fullpath(task), \
66641 + (task)->real_parent->comm, (task)->real_parent->pid, \
66642 + (pcred)->uid, (pcred)->euid, \
66643 + (pcred)->gid, (pcred)->egid
66644 +
66645 +#define GR_CHROOT_CAPS {{ \
66646 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
66647 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
66648 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
66649 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
66650 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
66651 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
66652 + CAP_TO_MASK(CAP_MAC_ADMIN) }}
66653 +
66654 +#define security_learn(normal_msg,args...) \
66655 +({ \
66656 + read_lock(&grsec_exec_file_lock); \
66657 + gr_add_learn_entry(normal_msg "\n", ## args); \
66658 + read_unlock(&grsec_exec_file_lock); \
66659 +})
66660 +
66661 +enum {
66662 + GR_DO_AUDIT,
66663 + GR_DONT_AUDIT,
66664 + GR_DONT_AUDIT_GOOD
66665 +};
66666 +
66667 +enum {
66668 + GR_TTYSNIFF,
66669 + GR_RBAC,
66670 + GR_RBAC_STR,
66671 + GR_STR_RBAC,
66672 + GR_RBAC_MODE2,
66673 + GR_RBAC_MODE3,
66674 + GR_FILENAME,
66675 + GR_SYSCTL_HIDDEN,
66676 + GR_NOARGS,
66677 + GR_ONE_INT,
66678 + GR_ONE_INT_TWO_STR,
66679 + GR_ONE_STR,
66680 + GR_STR_INT,
66681 + GR_TWO_STR_INT,
66682 + GR_TWO_INT,
66683 + GR_TWO_U64,
66684 + GR_THREE_INT,
66685 + GR_FIVE_INT_TWO_STR,
66686 + GR_TWO_STR,
66687 + GR_THREE_STR,
66688 + GR_FOUR_STR,
66689 + GR_STR_FILENAME,
66690 + GR_FILENAME_STR,
66691 + GR_FILENAME_TWO_INT,
66692 + GR_FILENAME_TWO_INT_STR,
66693 + GR_TEXTREL,
66694 + GR_PTRACE,
66695 + GR_RESOURCE,
66696 + GR_CAP,
66697 + GR_SIG,
66698 + GR_SIG2,
66699 + GR_CRASH1,
66700 + GR_CRASH2,
66701 + GR_PSACCT,
66702 + GR_RWXMAP
66703 +};
66704 +
66705 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
66706 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
66707 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
66708 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
66709 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
66710 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
66711 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
66712 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
66713 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
66714 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
66715 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
66716 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
66717 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
66718 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
66719 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
66720 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
66721 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
66722 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
66723 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
66724 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
66725 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
66726 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
66727 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
66728 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
66729 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
66730 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
66731 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
66732 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
66733 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
66734 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
66735 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
66736 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
66737 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
66738 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
66739 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
66740 +
66741 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
66742 +
66743 +#endif
66744 +
66745 +#endif
66746 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
66747 new file mode 100644
66748 index 0000000..9d5fd4a
66749 --- /dev/null
66750 +++ b/include/linux/grmsg.h
66751 @@ -0,0 +1,108 @@
66752 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
66753 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
66754 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
66755 +#define GR_STOPMOD_MSG "denied modification of module state by "
66756 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
66757 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
66758 +#define GR_IOPERM_MSG "denied use of ioperm() by "
66759 +#define GR_IOPL_MSG "denied use of iopl() by "
66760 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
66761 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
66762 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
66763 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
66764 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
66765 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
66766 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
66767 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
66768 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
66769 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
66770 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
66771 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
66772 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
66773 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
66774 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
66775 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
66776 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
66777 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
66778 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
66779 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
66780 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
66781 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
66782 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
66783 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
66784 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
66785 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
66786 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
66787 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
66788 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
66789 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
66790 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
66791 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
66792 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
66793 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
66794 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
66795 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
66796 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
66797 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
66798 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
66799 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
66800 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
66801 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
66802 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
66803 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
66804 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
66805 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
66806 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
66807 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
66808 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
66809 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
66810 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
66811 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
66812 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
66813 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
66814 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
66815 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
66816 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
66817 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
66818 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
66819 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
66820 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
66821 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
66822 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
66823 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
66824 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
66825 +#define GR_NICE_CHROOT_MSG "denied priority change by "
66826 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
66827 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
66828 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
66829 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
66830 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
66831 +#define GR_TIME_MSG "time set by "
66832 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
66833 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
66834 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
66835 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
66836 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
66837 +#define GR_BIND_MSG "denied bind() by "
66838 +#define GR_CONNECT_MSG "denied connect() by "
66839 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
66840 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
66841 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
66842 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
66843 +#define GR_CAP_ACL_MSG "use of %s denied for "
66844 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
66845 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
66846 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
66847 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
66848 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
66849 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
66850 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
66851 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
66852 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
66853 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
66854 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
66855 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
66856 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
66857 +#define GR_VM86_MSG "denied use of vm86 by "
66858 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
66859 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
66860 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
66861 new file mode 100644
66862 index 0000000..24676f4
66863 --- /dev/null
66864 +++ b/include/linux/grsecurity.h
66865 @@ -0,0 +1,218 @@
66866 +#ifndef GR_SECURITY_H
66867 +#define GR_SECURITY_H
66868 +#include <linux/fs.h>
66869 +#include <linux/fs_struct.h>
66870 +#include <linux/binfmts.h>
66871 +#include <linux/gracl.h>
66872 +#include <linux/compat.h>
66873 +
66874 +/* notify of brain-dead configs */
66875 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66876 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
66877 +#endif
66878 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
66879 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
66880 +#endif
66881 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
66882 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
66883 +#endif
66884 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
66885 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
66886 +#endif
66887 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
66888 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
66889 +#endif
66890 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
66891 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
66892 +#endif
66893 +
66894 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
66895 +void gr_handle_brute_check(void);
66896 +void gr_handle_kernel_exploit(void);
66897 +int gr_process_user_ban(void);
66898 +
66899 +char gr_roletype_to_char(void);
66900 +
66901 +int gr_acl_enable_at_secure(void);
66902 +
66903 +int gr_check_user_change(int real, int effective, int fs);
66904 +int gr_check_group_change(int real, int effective, int fs);
66905 +
66906 +void gr_del_task_from_ip_table(struct task_struct *p);
66907 +
66908 +int gr_pid_is_chrooted(struct task_struct *p);
66909 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
66910 +int gr_handle_chroot_nice(void);
66911 +int gr_handle_chroot_sysctl(const int op);
66912 +int gr_handle_chroot_setpriority(struct task_struct *p,
66913 + const int niceval);
66914 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
66915 +int gr_handle_chroot_chroot(const struct dentry *dentry,
66916 + const struct vfsmount *mnt);
66917 +void gr_handle_chroot_chdir(struct path *path);
66918 +int gr_handle_chroot_chmod(const struct dentry *dentry,
66919 + const struct vfsmount *mnt, const int mode);
66920 +int gr_handle_chroot_mknod(const struct dentry *dentry,
66921 + const struct vfsmount *mnt, const int mode);
66922 +int gr_handle_chroot_mount(const struct dentry *dentry,
66923 + const struct vfsmount *mnt,
66924 + const char *dev_name);
66925 +int gr_handle_chroot_pivot(void);
66926 +int gr_handle_chroot_unix(const pid_t pid);
66927 +
66928 +int gr_handle_rawio(const struct inode *inode);
66929 +
66930 +void gr_handle_ioperm(void);
66931 +void gr_handle_iopl(void);
66932 +
66933 +int gr_tpe_allow(const struct file *file);
66934 +
66935 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
66936 +void gr_clear_chroot_entries(struct task_struct *task);
66937 +
66938 +void gr_log_forkfail(const int retval);
66939 +void gr_log_timechange(void);
66940 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
66941 +void gr_log_chdir(const struct dentry *dentry,
66942 + const struct vfsmount *mnt);
66943 +void gr_log_chroot_exec(const struct dentry *dentry,
66944 + const struct vfsmount *mnt);
66945 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
66946 +#ifdef CONFIG_COMPAT
66947 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
66948 +#endif
66949 +void gr_log_remount(const char *devname, const int retval);
66950 +void gr_log_unmount(const char *devname, const int retval);
66951 +void gr_log_mount(const char *from, const char *to, const int retval);
66952 +void gr_log_textrel(struct vm_area_struct *vma);
66953 +void gr_log_rwxmmap(struct file *file);
66954 +void gr_log_rwxmprotect(struct file *file);
66955 +
66956 +int gr_handle_follow_link(const struct inode *parent,
66957 + const struct inode *inode,
66958 + const struct dentry *dentry,
66959 + const struct vfsmount *mnt);
66960 +int gr_handle_fifo(const struct dentry *dentry,
66961 + const struct vfsmount *mnt,
66962 + const struct dentry *dir, const int flag,
66963 + const int acc_mode);
66964 +int gr_handle_hardlink(const struct dentry *dentry,
66965 + const struct vfsmount *mnt,
66966 + struct inode *inode,
66967 + const int mode, const char *to);
66968 +
66969 +int gr_is_capable(const int cap);
66970 +int gr_is_capable_nolog(const int cap);
66971 +void gr_learn_resource(const struct task_struct *task, const int limit,
66972 + const unsigned long wanted, const int gt);
66973 +void gr_copy_label(struct task_struct *tsk);
66974 +void gr_handle_crash(struct task_struct *task, const int sig);
66975 +int gr_handle_signal(const struct task_struct *p, const int sig);
66976 +int gr_check_crash_uid(const uid_t uid);
66977 +int gr_check_protected_task(const struct task_struct *task);
66978 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
66979 +int gr_acl_handle_mmap(const struct file *file,
66980 + const unsigned long prot);
66981 +int gr_acl_handle_mprotect(const struct file *file,
66982 + const unsigned long prot);
66983 +int gr_check_hidden_task(const struct task_struct *tsk);
66984 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
66985 + const struct vfsmount *mnt);
66986 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
66987 + const struct vfsmount *mnt);
66988 +__u32 gr_acl_handle_access(const struct dentry *dentry,
66989 + const struct vfsmount *mnt, const int fmode);
66990 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
66991 + const struct vfsmount *mnt, mode_t mode);
66992 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
66993 + const struct vfsmount *mnt, mode_t mode);
66994 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
66995 + const struct vfsmount *mnt);
66996 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
66997 + const struct vfsmount *mnt);
66998 +int gr_handle_ptrace(struct task_struct *task, const long request);
66999 +int gr_handle_proc_ptrace(struct task_struct *task);
67000 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
67001 + const struct vfsmount *mnt);
67002 +int gr_check_crash_exec(const struct file *filp);
67003 +int gr_acl_is_enabled(void);
67004 +void gr_set_kernel_label(struct task_struct *task);
67005 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
67006 + const gid_t gid);
67007 +int gr_set_proc_label(const struct dentry *dentry,
67008 + const struct vfsmount *mnt,
67009 + const int unsafe_share);
67010 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
67011 + const struct vfsmount *mnt);
67012 +__u32 gr_acl_handle_open(const struct dentry *dentry,
67013 + const struct vfsmount *mnt, int acc_mode);
67014 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
67015 + const struct dentry *p_dentry,
67016 + const struct vfsmount *p_mnt,
67017 + int open_flags, int acc_mode, const int imode);
67018 +void gr_handle_create(const struct dentry *dentry,
67019 + const struct vfsmount *mnt);
67020 +void gr_handle_proc_create(const struct dentry *dentry,
67021 + const struct inode *inode);
67022 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
67023 + const struct dentry *parent_dentry,
67024 + const struct vfsmount *parent_mnt,
67025 + const int mode);
67026 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
67027 + const struct dentry *parent_dentry,
67028 + const struct vfsmount *parent_mnt);
67029 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
67030 + const struct vfsmount *mnt);
67031 +void gr_handle_delete(const ino_t ino, const dev_t dev);
67032 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
67033 + const struct vfsmount *mnt);
67034 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
67035 + const struct dentry *parent_dentry,
67036 + const struct vfsmount *parent_mnt,
67037 + const char *from);
67038 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
67039 + const struct dentry *parent_dentry,
67040 + const struct vfsmount *parent_mnt,
67041 + const struct dentry *old_dentry,
67042 + const struct vfsmount *old_mnt, const char *to);
67043 +int gr_acl_handle_rename(struct dentry *new_dentry,
67044 + struct dentry *parent_dentry,
67045 + const struct vfsmount *parent_mnt,
67046 + struct dentry *old_dentry,
67047 + struct inode *old_parent_inode,
67048 + struct vfsmount *old_mnt, const char *newname);
67049 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67050 + struct dentry *old_dentry,
67051 + struct dentry *new_dentry,
67052 + struct vfsmount *mnt, const __u8 replace);
67053 +__u32 gr_check_link(const struct dentry *new_dentry,
67054 + const struct dentry *parent_dentry,
67055 + const struct vfsmount *parent_mnt,
67056 + const struct dentry *old_dentry,
67057 + const struct vfsmount *old_mnt);
67058 +int gr_acl_handle_filldir(const struct file *file, const char *name,
67059 + const unsigned int namelen, const ino_t ino);
67060 +
67061 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
67062 + const struct vfsmount *mnt);
67063 +void gr_acl_handle_exit(void);
67064 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
67065 +int gr_acl_handle_procpidmem(const struct task_struct *task);
67066 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
67067 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
67068 +void gr_audit_ptrace(struct task_struct *task);
67069 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
67070 +
67071 +#ifdef CONFIG_GRKERNSEC
67072 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
67073 +void gr_handle_vm86(void);
67074 +void gr_handle_mem_readwrite(u64 from, u64 to);
67075 +
67076 +extern int grsec_enable_dmesg;
67077 +extern int grsec_disable_privio;
67078 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67079 +extern int grsec_enable_chroot_findtask;
67080 +#endif
67081 +#endif
67082 +
67083 +#endif
67084 diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
67085 index 6a87154..a3ce57b 100644
67086 --- a/include/linux/hdpu_features.h
67087 +++ b/include/linux/hdpu_features.h
67088 @@ -3,7 +3,7 @@
67089 struct cpustate_t {
67090 spinlock_t lock;
67091 int excl;
67092 - int open_count;
67093 + atomic_t open_count;
67094 unsigned char cached_val;
67095 int inited;
67096 unsigned long *set_addr;
67097 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
67098 index 211ff44..00ab6d7 100644
67099 --- a/include/linux/highmem.h
67100 +++ b/include/linux/highmem.h
67101 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
67102 kunmap_atomic(kaddr, KM_USER0);
67103 }
67104
67105 +static inline void sanitize_highpage(struct page *page)
67106 +{
67107 + void *kaddr;
67108 + unsigned long flags;
67109 +
67110 + local_irq_save(flags);
67111 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
67112 + clear_page(kaddr);
67113 + kunmap_atomic(kaddr, KM_CLEARPAGE);
67114 + local_irq_restore(flags);
67115 +}
67116 +
67117 static inline void zero_user_segments(struct page *page,
67118 unsigned start1, unsigned end1,
67119 unsigned start2, unsigned end2)
67120 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
67121 index 7b40cda..24eb44e 100644
67122 --- a/include/linux/i2c.h
67123 +++ b/include/linux/i2c.h
67124 @@ -325,6 +325,7 @@ struct i2c_algorithm {
67125 /* To determine what the adapter supports */
67126 u32 (*functionality) (struct i2c_adapter *);
67127 };
67128 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
67129
67130 /*
67131 * i2c_adapter is the structure used to identify a physical i2c bus along
67132 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
67133 index 4c4e57d..f3c5303 100644
67134 --- a/include/linux/i2o.h
67135 +++ b/include/linux/i2o.h
67136 @@ -564,7 +564,7 @@ struct i2o_controller {
67137 struct i2o_device *exec; /* Executive */
67138 #if BITS_PER_LONG == 64
67139 spinlock_t context_list_lock; /* lock for context_list */
67140 - atomic_t context_list_counter; /* needed for unique contexts */
67141 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
67142 struct list_head context_list; /* list of context id's
67143 and pointers */
67144 #endif
67145 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
67146 index 21a6f5d..dc42eab 100644
67147 --- a/include/linux/init_task.h
67148 +++ b/include/linux/init_task.h
67149 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
67150 #define INIT_IDS
67151 #endif
67152
67153 +#ifdef CONFIG_X86
67154 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
67155 +#else
67156 +#define INIT_TASK_THREAD_INFO
67157 +#endif
67158 +
67159 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
67160 /*
67161 * Because of the reduced scope of CAP_SETPCAP when filesystem
67162 @@ -156,6 +162,7 @@ extern struct cred init_cred;
67163 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
67164 .comm = "swapper", \
67165 .thread = INIT_THREAD, \
67166 + INIT_TASK_THREAD_INFO \
67167 .fs = &init_fs, \
67168 .files = &init_files, \
67169 .signal = &init_signals, \
67170 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
67171 index 4f0a72a..a849599 100644
67172 --- a/include/linux/intel-iommu.h
67173 +++ b/include/linux/intel-iommu.h
67174 @@ -296,7 +296,7 @@ struct iommu_flush {
67175 u8 fm, u64 type);
67176 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
67177 unsigned int size_order, u64 type);
67178 -};
67179 +} __no_const;
67180
67181 enum {
67182 SR_DMAR_FECTL_REG,
67183 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
67184 index c739150..be577b5 100644
67185 --- a/include/linux/interrupt.h
67186 +++ b/include/linux/interrupt.h
67187 @@ -369,7 +369,7 @@ enum
67188 /* map softirq index to softirq name. update 'softirq_to_name' in
67189 * kernel/softirq.c when adding a new softirq.
67190 */
67191 -extern char *softirq_to_name[NR_SOFTIRQS];
67192 +extern const char * const softirq_to_name[NR_SOFTIRQS];
67193
67194 /* softirq mask and active fields moved to irq_cpustat_t in
67195 * asm/hardirq.h to get better cache usage. KAO
67196 @@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
67197
67198 struct softirq_action
67199 {
67200 - void (*action)(struct softirq_action *);
67201 + void (*action)(void);
67202 };
67203
67204 asmlinkage void do_softirq(void);
67205 asmlinkage void __do_softirq(void);
67206 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
67207 +extern void open_softirq(int nr, void (*action)(void));
67208 extern void softirq_init(void);
67209 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
67210 extern void raise_softirq_irqoff(unsigned int nr);
67211 diff --git a/include/linux/irq.h b/include/linux/irq.h
67212 index 9e5f45a..025865b 100644
67213 --- a/include/linux/irq.h
67214 +++ b/include/linux/irq.h
67215 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
67216 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
67217 bool boot)
67218 {
67219 +#ifdef CONFIG_CPUMASK_OFFSTACK
67220 gfp_t gfp = GFP_ATOMIC;
67221
67222 if (boot)
67223 gfp = GFP_NOWAIT;
67224
67225 -#ifdef CONFIG_CPUMASK_OFFSTACK
67226 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
67227 return false;
67228
67229 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
67230 index 7922742..27306a2 100644
67231 --- a/include/linux/kallsyms.h
67232 +++ b/include/linux/kallsyms.h
67233 @@ -15,7 +15,8 @@
67234
67235 struct module;
67236
67237 -#ifdef CONFIG_KALLSYMS
67238 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
67239 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67240 /* Lookup the address for a symbol. Returns 0 if not found. */
67241 unsigned long kallsyms_lookup_name(const char *name);
67242
67243 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
67244 /* Stupid that this does nothing, but I didn't create this mess. */
67245 #define __print_symbol(fmt, addr)
67246 #endif /*CONFIG_KALLSYMS*/
67247 +#else /* when included by kallsyms.c, vsnprintf.c, or
67248 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
67249 +extern void __print_symbol(const char *fmt, unsigned long address);
67250 +extern int sprint_symbol(char *buffer, unsigned long address);
67251 +const char *kallsyms_lookup(unsigned long addr,
67252 + unsigned long *symbolsize,
67253 + unsigned long *offset,
67254 + char **modname, char *namebuf);
67255 +#endif
67256
67257 /* This macro allows us to keep printk typechecking */
67258 static void __check_printsym_format(const char *fmt, ...)
67259 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
67260 index 6adcc29..13369e8 100644
67261 --- a/include/linux/kgdb.h
67262 +++ b/include/linux/kgdb.h
67263 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
67264
67265 extern int kgdb_connected;
67266
67267 -extern atomic_t kgdb_setting_breakpoint;
67268 -extern atomic_t kgdb_cpu_doing_single_step;
67269 +extern atomic_unchecked_t kgdb_setting_breakpoint;
67270 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
67271
67272 extern struct task_struct *kgdb_usethread;
67273 extern struct task_struct *kgdb_contthread;
67274 @@ -235,7 +235,7 @@ struct kgdb_arch {
67275 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
67276 void (*remove_all_hw_break)(void);
67277 void (*correct_hw_break)(void);
67278 -};
67279 +} __do_const;
67280
67281 /**
67282 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
67283 @@ -257,14 +257,14 @@ struct kgdb_io {
67284 int (*init) (void);
67285 void (*pre_exception) (void);
67286 void (*post_exception) (void);
67287 -};
67288 +} __do_const;
67289
67290 -extern struct kgdb_arch arch_kgdb_ops;
67291 +extern const struct kgdb_arch arch_kgdb_ops;
67292
67293 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
67294
67295 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
67296 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
67297 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
67298 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
67299
67300 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
67301 extern int kgdb_mem2hex(char *mem, char *buf, int count);
67302 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
67303 index 384ca8b..83dd97d 100644
67304 --- a/include/linux/kmod.h
67305 +++ b/include/linux/kmod.h
67306 @@ -31,6 +31,8 @@
67307 * usually useless though. */
67308 extern int __request_module(bool wait, const char *name, ...) \
67309 __attribute__((format(printf, 2, 3)));
67310 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
67311 + __attribute__((format(printf, 3, 4)));
67312 #define request_module(mod...) __request_module(true, mod)
67313 #define request_module_nowait(mod...) __request_module(false, mod)
67314 #define try_then_request_module(x, mod...) \
67315 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
67316 index 58ae8e0..3950d3c 100644
67317 --- a/include/linux/kobject.h
67318 +++ b/include/linux/kobject.h
67319 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
67320
67321 struct kobj_type {
67322 void (*release)(struct kobject *kobj);
67323 - struct sysfs_ops *sysfs_ops;
67324 + const struct sysfs_ops *sysfs_ops;
67325 struct attribute **default_attrs;
67326 };
67327
67328 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
67329 };
67330
67331 struct kset_uevent_ops {
67332 - int (*filter)(struct kset *kset, struct kobject *kobj);
67333 - const char *(*name)(struct kset *kset, struct kobject *kobj);
67334 - int (*uevent)(struct kset *kset, struct kobject *kobj,
67335 + int (* const filter)(struct kset *kset, struct kobject *kobj);
67336 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
67337 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
67338 struct kobj_uevent_env *env);
67339 };
67340
67341 @@ -132,7 +132,7 @@ struct kobj_attribute {
67342 const char *buf, size_t count);
67343 };
67344
67345 -extern struct sysfs_ops kobj_sysfs_ops;
67346 +extern const struct sysfs_ops kobj_sysfs_ops;
67347
67348 /**
67349 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
67350 @@ -155,14 +155,14 @@ struct kset {
67351 struct list_head list;
67352 spinlock_t list_lock;
67353 struct kobject kobj;
67354 - struct kset_uevent_ops *uevent_ops;
67355 + const struct kset_uevent_ops *uevent_ops;
67356 };
67357
67358 extern void kset_init(struct kset *kset);
67359 extern int __must_check kset_register(struct kset *kset);
67360 extern void kset_unregister(struct kset *kset);
67361 extern struct kset * __must_check kset_create_and_add(const char *name,
67362 - struct kset_uevent_ops *u,
67363 + const struct kset_uevent_ops *u,
67364 struct kobject *parent_kobj);
67365
67366 static inline struct kset *to_kset(struct kobject *kobj)
67367 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
67368 index c728a50..752d821 100644
67369 --- a/include/linux/kvm_host.h
67370 +++ b/include/linux/kvm_host.h
67371 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
67372 void vcpu_load(struct kvm_vcpu *vcpu);
67373 void vcpu_put(struct kvm_vcpu *vcpu);
67374
67375 -int kvm_init(void *opaque, unsigned int vcpu_size,
67376 +int kvm_init(const void *opaque, unsigned int vcpu_size,
67377 struct module *module);
67378 void kvm_exit(void);
67379
67380 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
67381 struct kvm_guest_debug *dbg);
67382 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
67383
67384 -int kvm_arch_init(void *opaque);
67385 +int kvm_arch_init(const void *opaque);
67386 void kvm_arch_exit(void);
67387
67388 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
67389 diff --git a/include/linux/libata.h b/include/linux/libata.h
67390 index a069916..223edde 100644
67391 --- a/include/linux/libata.h
67392 +++ b/include/linux/libata.h
67393 @@ -525,11 +525,11 @@ struct ata_ioports {
67394
67395 struct ata_host {
67396 spinlock_t lock;
67397 - struct device *dev;
67398 + struct device *dev;
67399 void __iomem * const *iomap;
67400 unsigned int n_ports;
67401 void *private_data;
67402 - struct ata_port_operations *ops;
67403 + const struct ata_port_operations *ops;
67404 unsigned long flags;
67405 #ifdef CONFIG_ATA_ACPI
67406 acpi_handle acpi_handle;
67407 @@ -710,7 +710,7 @@ struct ata_link {
67408
67409 struct ata_port {
67410 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
67411 - struct ata_port_operations *ops;
67412 + const struct ata_port_operations *ops;
67413 spinlock_t *lock;
67414 /* Flags owned by the EH context. Only EH should touch these once the
67415 port is active */
67416 @@ -884,7 +884,7 @@ struct ata_port_operations {
67417 * fields must be pointers.
67418 */
67419 const struct ata_port_operations *inherits;
67420 -};
67421 +} __do_const;
67422
67423 struct ata_port_info {
67424 unsigned long flags;
67425 @@ -892,7 +892,7 @@ struct ata_port_info {
67426 unsigned long pio_mask;
67427 unsigned long mwdma_mask;
67428 unsigned long udma_mask;
67429 - struct ata_port_operations *port_ops;
67430 + const struct ata_port_operations *port_ops;
67431 void *private_data;
67432 };
67433
67434 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
67435 extern const unsigned long sata_deb_timing_hotplug[];
67436 extern const unsigned long sata_deb_timing_long[];
67437
67438 -extern struct ata_port_operations ata_dummy_port_ops;
67439 +extern const struct ata_port_operations ata_dummy_port_ops;
67440 extern const struct ata_port_info ata_dummy_port_info;
67441
67442 static inline const unsigned long *
67443 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
67444 struct scsi_host_template *sht);
67445 extern void ata_host_detach(struct ata_host *host);
67446 extern void ata_host_init(struct ata_host *, struct device *,
67447 - unsigned long, struct ata_port_operations *);
67448 + unsigned long, const struct ata_port_operations *);
67449 extern int ata_scsi_detect(struct scsi_host_template *sht);
67450 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
67451 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
67452 diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
67453 index fbc48f8..0886e57 100644
67454 --- a/include/linux/lockd/bind.h
67455 +++ b/include/linux/lockd/bind.h
67456 @@ -23,13 +23,13 @@ struct svc_rqst;
67457 * This is the set of functions for lockd->nfsd communication
67458 */
67459 struct nlmsvc_binding {
67460 - __be32 (*fopen)(struct svc_rqst *,
67461 + __be32 (* const fopen)(struct svc_rqst *,
67462 struct nfs_fh *,
67463 struct file **);
67464 - void (*fclose)(struct file *);
67465 + void (* const fclose)(struct file *);
67466 };
67467
67468 -extern struct nlmsvc_binding * nlmsvc_ops;
67469 +extern const struct nlmsvc_binding * nlmsvc_ops;
67470
67471 /*
67472 * Similar to nfs_client_initdata, but without the NFS-specific
67473 diff --git a/include/linux/mca.h b/include/linux/mca.h
67474 index 3797270..7765ede 100644
67475 --- a/include/linux/mca.h
67476 +++ b/include/linux/mca.h
67477 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
67478 int region);
67479 void * (*mca_transform_memory)(struct mca_device *,
67480 void *memory);
67481 -};
67482 +} __no_const;
67483
67484 struct mca_bus {
67485 u64 default_dma_mask;
67486 diff --git a/include/linux/memory.h b/include/linux/memory.h
67487 index 37fa19b..b597c85 100644
67488 --- a/include/linux/memory.h
67489 +++ b/include/linux/memory.h
67490 @@ -108,7 +108,7 @@ struct memory_accessor {
67491 size_t count);
67492 ssize_t (*write)(struct memory_accessor *, const char *buf,
67493 off_t offset, size_t count);
67494 -};
67495 +} __no_const;
67496
67497 /*
67498 * Kernel text modification mutex, used for code patching. Users of this lock
67499 diff --git a/include/linux/mm.h b/include/linux/mm.h
67500 index 11e5be6..1ff2423 100644
67501 --- a/include/linux/mm.h
67502 +++ b/include/linux/mm.h
67503 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
67504
67505 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
67506 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
67507 +
67508 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67509 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
67510 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
67511 +#else
67512 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
67513 +#endif
67514 +
67515 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
67516 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
67517
67518 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
67519 int set_page_dirty_lock(struct page *page);
67520 int clear_page_dirty_for_io(struct page *page);
67521
67522 -/* Is the vma a continuation of the stack vma above it? */
67523 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
67524 -{
67525 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
67526 -}
67527 -
67528 extern unsigned long move_page_tables(struct vm_area_struct *vma,
67529 unsigned long old_addr, struct vm_area_struct *new_vma,
67530 unsigned long new_addr, unsigned long len);
67531 @@ -890,6 +891,8 @@ struct shrinker {
67532 extern void register_shrinker(struct shrinker *);
67533 extern void unregister_shrinker(struct shrinker *);
67534
67535 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
67536 +
67537 int vma_wants_writenotify(struct vm_area_struct *vma);
67538
67539 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
67540 @@ -1162,6 +1165,7 @@ out:
67541 }
67542
67543 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
67544 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
67545
67546 extern unsigned long do_brk(unsigned long, unsigned long);
67547
67548 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
67549 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
67550 struct vm_area_struct **pprev);
67551
67552 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
67553 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
67554 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
67555 +
67556 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
67557 NULL if none. Assume start_addr < end_addr. */
67558 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
67559 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
67560 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
67561 }
67562
67563 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
67564 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
67565 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
67566 unsigned long pfn, unsigned long size, pgprot_t);
67567 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
67568 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
67569 extern int sysctl_memory_failure_early_kill;
67570 extern int sysctl_memory_failure_recovery;
67571 -extern atomic_long_t mce_bad_pages;
67572 +extern atomic_long_unchecked_t mce_bad_pages;
67573 +
67574 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67575 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
67576 +#else
67577 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
67578 +#endif
67579
67580 #endif /* __KERNEL__ */
67581 #endif /* _LINUX_MM_H */
67582 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
67583 index 9d12ed5..6d9707a 100644
67584 --- a/include/linux/mm_types.h
67585 +++ b/include/linux/mm_types.h
67586 @@ -186,6 +186,8 @@ struct vm_area_struct {
67587 #ifdef CONFIG_NUMA
67588 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
67589 #endif
67590 +
67591 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
67592 };
67593
67594 struct core_thread {
67595 @@ -287,6 +289,24 @@ struct mm_struct {
67596 #ifdef CONFIG_MMU_NOTIFIER
67597 struct mmu_notifier_mm *mmu_notifier_mm;
67598 #endif
67599 +
67600 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
67601 + unsigned long pax_flags;
67602 +#endif
67603 +
67604 +#ifdef CONFIG_PAX_DLRESOLVE
67605 + unsigned long call_dl_resolve;
67606 +#endif
67607 +
67608 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
67609 + unsigned long call_syscall;
67610 +#endif
67611 +
67612 +#ifdef CONFIG_PAX_ASLR
67613 + unsigned long delta_mmap; /* randomized offset */
67614 + unsigned long delta_stack; /* randomized offset */
67615 +#endif
67616 +
67617 };
67618
67619 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
67620 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
67621 index 4e02ee2..afb159e 100644
67622 --- a/include/linux/mmu_notifier.h
67623 +++ b/include/linux/mmu_notifier.h
67624 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
67625 */
67626 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
67627 ({ \
67628 - pte_t __pte; \
67629 + pte_t ___pte; \
67630 struct vm_area_struct *___vma = __vma; \
67631 unsigned long ___address = __address; \
67632 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
67633 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
67634 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
67635 - __pte; \
67636 + ___pte; \
67637 })
67638
67639 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
67640 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
67641 index 6c31a2a..4b0e930 100644
67642 --- a/include/linux/mmzone.h
67643 +++ b/include/linux/mmzone.h
67644 @@ -350,7 +350,7 @@ struct zone {
67645 unsigned long flags; /* zone flags, see below */
67646
67647 /* Zone statistics */
67648 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67649 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67650
67651 /*
67652 * prev_priority holds the scanning priority for this zone. It is
67653 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
67654 index f58e9d8..3503935 100644
67655 --- a/include/linux/mod_devicetable.h
67656 +++ b/include/linux/mod_devicetable.h
67657 @@ -12,7 +12,7 @@
67658 typedef unsigned long kernel_ulong_t;
67659 #endif
67660
67661 -#define PCI_ANY_ID (~0)
67662 +#define PCI_ANY_ID ((__u16)~0)
67663
67664 struct pci_device_id {
67665 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
67666 @@ -131,7 +131,7 @@ struct usb_device_id {
67667 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
67668 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
67669
67670 -#define HID_ANY_ID (~0)
67671 +#define HID_ANY_ID (~0U)
67672
67673 struct hid_device_id {
67674 __u16 bus;
67675 diff --git a/include/linux/module.h b/include/linux/module.h
67676 index 482efc8..642032b 100644
67677 --- a/include/linux/module.h
67678 +++ b/include/linux/module.h
67679 @@ -16,6 +16,7 @@
67680 #include <linux/kobject.h>
67681 #include <linux/moduleparam.h>
67682 #include <linux/tracepoint.h>
67683 +#include <linux/fs.h>
67684
67685 #include <asm/local.h>
67686 #include <asm/module.h>
67687 @@ -287,16 +288,16 @@ struct module
67688 int (*init)(void);
67689
67690 /* If this is non-NULL, vfree after init() returns */
67691 - void *module_init;
67692 + void *module_init_rx, *module_init_rw;
67693
67694 /* Here is the actual code + data, vfree'd on unload. */
67695 - void *module_core;
67696 + void *module_core_rx, *module_core_rw;
67697
67698 /* Here are the sizes of the init and core sections */
67699 - unsigned int init_size, core_size;
67700 + unsigned int init_size_rw, core_size_rw;
67701
67702 /* The size of the executable code in each section. */
67703 - unsigned int init_text_size, core_text_size;
67704 + unsigned int init_size_rx, core_size_rx;
67705
67706 /* Arch-specific module values */
67707 struct mod_arch_specific arch;
67708 @@ -345,6 +346,10 @@ struct module
67709 #ifdef CONFIG_EVENT_TRACING
67710 struct ftrace_event_call *trace_events;
67711 unsigned int num_trace_events;
67712 + struct file_operations trace_id;
67713 + struct file_operations trace_enable;
67714 + struct file_operations trace_format;
67715 + struct file_operations trace_filter;
67716 #endif
67717 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
67718 unsigned long *ftrace_callsites;
67719 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
67720 bool is_module_address(unsigned long addr);
67721 bool is_module_text_address(unsigned long addr);
67722
67723 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
67724 +{
67725 +
67726 +#ifdef CONFIG_PAX_KERNEXEC
67727 + if (ktla_ktva(addr) >= (unsigned long)start &&
67728 + ktla_ktva(addr) < (unsigned long)start + size)
67729 + return 1;
67730 +#endif
67731 +
67732 + return ((void *)addr >= start && (void *)addr < start + size);
67733 +}
67734 +
67735 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
67736 +{
67737 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
67738 +}
67739 +
67740 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
67741 +{
67742 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
67743 +}
67744 +
67745 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
67746 +{
67747 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
67748 +}
67749 +
67750 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
67751 +{
67752 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
67753 +}
67754 +
67755 static inline int within_module_core(unsigned long addr, struct module *mod)
67756 {
67757 - return (unsigned long)mod->module_core <= addr &&
67758 - addr < (unsigned long)mod->module_core + mod->core_size;
67759 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
67760 }
67761
67762 static inline int within_module_init(unsigned long addr, struct module *mod)
67763 {
67764 - return (unsigned long)mod->module_init <= addr &&
67765 - addr < (unsigned long)mod->module_init + mod->init_size;
67766 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
67767 }
67768
67769 /* Search for module by name: must hold module_mutex. */
67770 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
67771 index c1f40c2..682ca53 100644
67772 --- a/include/linux/moduleloader.h
67773 +++ b/include/linux/moduleloader.h
67774 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
67775 sections. Returns NULL on failure. */
67776 void *module_alloc(unsigned long size);
67777
67778 +#ifdef CONFIG_PAX_KERNEXEC
67779 +void *module_alloc_exec(unsigned long size);
67780 +#else
67781 +#define module_alloc_exec(x) module_alloc(x)
67782 +#endif
67783 +
67784 /* Free memory returned from module_alloc. */
67785 void module_free(struct module *mod, void *module_region);
67786
67787 +#ifdef CONFIG_PAX_KERNEXEC
67788 +void module_free_exec(struct module *mod, void *module_region);
67789 +#else
67790 +#define module_free_exec(x, y) module_free((x), (y))
67791 +#endif
67792 +
67793 /* Apply the given relocation to the (simplified) ELF. Return -error
67794 or 0. */
67795 int apply_relocate(Elf_Shdr *sechdrs,
67796 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
67797 index 82a9124..8a5f622 100644
67798 --- a/include/linux/moduleparam.h
67799 +++ b/include/linux/moduleparam.h
67800 @@ -132,7 +132,7 @@ struct kparam_array
67801
67802 /* Actually copy string: maxlen param is usually sizeof(string). */
67803 #define module_param_string(name, string, len, perm) \
67804 - static const struct kparam_string __param_string_##name \
67805 + static const struct kparam_string __param_string_##name __used \
67806 = { len, string }; \
67807 __module_param_call(MODULE_PARAM_PREFIX, name, \
67808 param_set_copystring, param_get_string, \
67809 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
67810
67811 /* Comma-separated array: *nump is set to number they actually specified. */
67812 #define module_param_array_named(name, array, type, nump, perm) \
67813 - static const struct kparam_array __param_arr_##name \
67814 + static const struct kparam_array __param_arr_##name __used \
67815 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
67816 sizeof(array[0]), array }; \
67817 __module_param_call(MODULE_PARAM_PREFIX, name, \
67818 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
67819 index 878cab4..c92cb3e 100644
67820 --- a/include/linux/mutex.h
67821 +++ b/include/linux/mutex.h
67822 @@ -51,7 +51,7 @@ struct mutex {
67823 spinlock_t wait_lock;
67824 struct list_head wait_list;
67825 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
67826 - struct thread_info *owner;
67827 + struct task_struct *owner;
67828 #endif
67829 #ifdef CONFIG_DEBUG_MUTEXES
67830 const char *name;
67831 diff --git a/include/linux/namei.h b/include/linux/namei.h
67832 index ec0f607..d19e675 100644
67833 --- a/include/linux/namei.h
67834 +++ b/include/linux/namei.h
67835 @@ -22,7 +22,7 @@ struct nameidata {
67836 unsigned int flags;
67837 int last_type;
67838 unsigned depth;
67839 - char *saved_names[MAX_NESTED_LINKS + 1];
67840 + const char *saved_names[MAX_NESTED_LINKS + 1];
67841
67842 /* Intent data */
67843 union {
67844 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
67845 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
67846 extern void unlock_rename(struct dentry *, struct dentry *);
67847
67848 -static inline void nd_set_link(struct nameidata *nd, char *path)
67849 +static inline void nd_set_link(struct nameidata *nd, const char *path)
67850 {
67851 nd->saved_names[nd->depth] = path;
67852 }
67853
67854 -static inline char *nd_get_link(struct nameidata *nd)
67855 +static inline const char *nd_get_link(const struct nameidata *nd)
67856 {
67857 return nd->saved_names[nd->depth];
67858 }
67859 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
67860 index 9d7e8f7..04428c5 100644
67861 --- a/include/linux/netdevice.h
67862 +++ b/include/linux/netdevice.h
67863 @@ -637,6 +637,7 @@ struct net_device_ops {
67864 u16 xid);
67865 #endif
67866 };
67867 +typedef struct net_device_ops __no_const net_device_ops_no_const;
67868
67869 /*
67870 * The DEVICE structure.
67871 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
67872 new file mode 100644
67873 index 0000000..33f4af8
67874 --- /dev/null
67875 +++ b/include/linux/netfilter/xt_gradm.h
67876 @@ -0,0 +1,9 @@
67877 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
67878 +#define _LINUX_NETFILTER_XT_GRADM_H 1
67879 +
67880 +struct xt_gradm_mtinfo {
67881 + __u16 flags;
67882 + __u16 invflags;
67883 +};
67884 +
67885 +#endif
67886 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
67887 index b359c4a..c08b334 100644
67888 --- a/include/linux/nodemask.h
67889 +++ b/include/linux/nodemask.h
67890 @@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
67891
67892 #define any_online_node(mask) \
67893 ({ \
67894 - int node; \
67895 - for_each_node_mask(node, (mask)) \
67896 - if (node_online(node)) \
67897 + int __node; \
67898 + for_each_node_mask(__node, (mask)) \
67899 + if (node_online(__node)) \
67900 break; \
67901 - node; \
67902 + __node; \
67903 })
67904
67905 #define num_online_nodes() num_node_state(N_ONLINE)
67906 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
67907 index 5171639..7cf4235 100644
67908 --- a/include/linux/oprofile.h
67909 +++ b/include/linux/oprofile.h
67910 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
67911 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
67912 char const * name, ulong * val);
67913
67914 -/** Create a file for read-only access to an atomic_t. */
67915 +/** Create a file for read-only access to an atomic_unchecked_t. */
67916 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
67917 - char const * name, atomic_t * val);
67918 + char const * name, atomic_unchecked_t * val);
67919
67920 /** create a directory */
67921 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
67922 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
67923 index 3c62ed4..8924c7c 100644
67924 --- a/include/linux/pagemap.h
67925 +++ b/include/linux/pagemap.h
67926 @@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
67927 if (((unsigned long)uaddr & PAGE_MASK) !=
67928 ((unsigned long)end & PAGE_MASK))
67929 ret = __get_user(c, end);
67930 + (void)c;
67931 }
67932 + (void)c;
67933 return ret;
67934 }
67935
67936 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
67937 index 81c9689..a567a55 100644
67938 --- a/include/linux/perf_event.h
67939 +++ b/include/linux/perf_event.h
67940 @@ -476,7 +476,7 @@ struct hw_perf_event {
67941 struct hrtimer hrtimer;
67942 };
67943 };
67944 - atomic64_t prev_count;
67945 + atomic64_unchecked_t prev_count;
67946 u64 sample_period;
67947 u64 last_period;
67948 atomic64_t period_left;
67949 @@ -557,7 +557,7 @@ struct perf_event {
67950 const struct pmu *pmu;
67951
67952 enum perf_event_active_state state;
67953 - atomic64_t count;
67954 + atomic64_unchecked_t count;
67955
67956 /*
67957 * These are the total time in nanoseconds that the event
67958 @@ -595,8 +595,8 @@ struct perf_event {
67959 * These accumulate total time (in nanoseconds) that children
67960 * events have been enabled and running, respectively.
67961 */
67962 - atomic64_t child_total_time_enabled;
67963 - atomic64_t child_total_time_running;
67964 + atomic64_unchecked_t child_total_time_enabled;
67965 + atomic64_unchecked_t child_total_time_running;
67966
67967 /*
67968 * Protect attach/detach and child_list:
67969 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
67970 index b43a9e0..b77d869 100644
67971 --- a/include/linux/pipe_fs_i.h
67972 +++ b/include/linux/pipe_fs_i.h
67973 @@ -46,9 +46,9 @@ struct pipe_inode_info {
67974 wait_queue_head_t wait;
67975 unsigned int nrbufs, curbuf;
67976 struct page *tmp_page;
67977 - unsigned int readers;
67978 - unsigned int writers;
67979 - unsigned int waiting_writers;
67980 + atomic_t readers;
67981 + atomic_t writers;
67982 + atomic_t waiting_writers;
67983 unsigned int r_counter;
67984 unsigned int w_counter;
67985 struct fasync_struct *fasync_readers;
67986 diff --git a/include/linux/poison.h b/include/linux/poison.h
67987 index 34066ff..e95d744 100644
67988 --- a/include/linux/poison.h
67989 +++ b/include/linux/poison.h
67990 @@ -19,8 +19,8 @@
67991 * under normal circumstances, used to verify that nobody uses
67992 * non-initialized list entries.
67993 */
67994 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
67995 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
67996 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
67997 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
67998
67999 /********** include/linux/timer.h **********/
68000 /*
68001 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
68002 index 4f71bf4..77ffa64 100644
68003 --- a/include/linux/posix-timers.h
68004 +++ b/include/linux/posix-timers.h
68005 @@ -67,7 +67,7 @@ struct k_itimer {
68006 };
68007
68008 struct k_clock {
68009 - int res; /* in nanoseconds */
68010 + const int res; /* in nanoseconds */
68011 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
68012 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
68013 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
68014 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
68015 index 72b1a10..13303a9 100644
68016 --- a/include/linux/preempt.h
68017 +++ b/include/linux/preempt.h
68018 @@ -110,7 +110,7 @@ struct preempt_ops {
68019 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
68020 void (*sched_out)(struct preempt_notifier *notifier,
68021 struct task_struct *next);
68022 -};
68023 +} __no_const;
68024
68025 /**
68026 * preempt_notifier - key for installing preemption notifiers
68027 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
68028 index 379eaed..1bf73e3 100644
68029 --- a/include/linux/proc_fs.h
68030 +++ b/include/linux/proc_fs.h
68031 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
68032 return proc_create_data(name, mode, parent, proc_fops, NULL);
68033 }
68034
68035 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
68036 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
68037 +{
68038 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68039 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
68040 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68041 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
68042 +#else
68043 + return proc_create_data(name, mode, parent, proc_fops, NULL);
68044 +#endif
68045 +}
68046 +
68047 +
68048 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
68049 mode_t mode, struct proc_dir_entry *base,
68050 read_proc_t *read_proc, void * data)
68051 @@ -256,7 +269,7 @@ union proc_op {
68052 int (*proc_show)(struct seq_file *m,
68053 struct pid_namespace *ns, struct pid *pid,
68054 struct task_struct *task);
68055 -};
68056 +} __no_const;
68057
68058 struct ctl_table_header;
68059 struct ctl_table;
68060 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
68061 index 7456d7d..6c1cfc9 100644
68062 --- a/include/linux/ptrace.h
68063 +++ b/include/linux/ptrace.h
68064 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
68065 extern void exit_ptrace(struct task_struct *tracer);
68066 #define PTRACE_MODE_READ 1
68067 #define PTRACE_MODE_ATTACH 2
68068 -/* Returns 0 on success, -errno on denial. */
68069 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
68070 /* Returns true on success, false on denial. */
68071 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
68072 +/* Returns true on success, false on denial. */
68073 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
68074
68075 static inline int ptrace_reparented(struct task_struct *child)
68076 {
68077 diff --git a/include/linux/random.h b/include/linux/random.h
68078 index 2948046..3262567 100644
68079 --- a/include/linux/random.h
68080 +++ b/include/linux/random.h
68081 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
68082 u32 random32(void);
68083 void srandom32(u32 seed);
68084
68085 +static inline unsigned long pax_get_random_long(void)
68086 +{
68087 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
68088 +}
68089 +
68090 #endif /* __KERNEL___ */
68091
68092 #endif /* _LINUX_RANDOM_H */
68093 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
68094 index 988e55f..17cb4ef 100644
68095 --- a/include/linux/reboot.h
68096 +++ b/include/linux/reboot.h
68097 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
68098 * Architecture-specific implementations of sys_reboot commands.
68099 */
68100
68101 -extern void machine_restart(char *cmd);
68102 -extern void machine_halt(void);
68103 -extern void machine_power_off(void);
68104 +extern void machine_restart(char *cmd) __noreturn;
68105 +extern void machine_halt(void) __noreturn;
68106 +extern void machine_power_off(void) __noreturn;
68107
68108 extern void machine_shutdown(void);
68109 struct pt_regs;
68110 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
68111 */
68112
68113 extern void kernel_restart_prepare(char *cmd);
68114 -extern void kernel_restart(char *cmd);
68115 -extern void kernel_halt(void);
68116 -extern void kernel_power_off(void);
68117 +extern void kernel_restart(char *cmd) __noreturn;
68118 +extern void kernel_halt(void) __noreturn;
68119 +extern void kernel_power_off(void) __noreturn;
68120
68121 void ctrl_alt_del(void);
68122
68123 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
68124 * Emergency restart, callable from an interrupt handler.
68125 */
68126
68127 -extern void emergency_restart(void);
68128 +extern void emergency_restart(void) __noreturn;
68129 #include <asm/emergency-restart.h>
68130
68131 #endif
68132 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
68133 index dd31e7b..5b03c5c 100644
68134 --- a/include/linux/reiserfs_fs.h
68135 +++ b/include/linux/reiserfs_fs.h
68136 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68137 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
68138
68139 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68140 -#define get_generation(s) atomic_read (&fs_generation(s))
68141 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68142 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68143 #define __fs_changed(gen,s) (gen != get_generation (s))
68144 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
68145 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
68146 */
68147
68148 struct item_operations {
68149 - int (*bytes_number) (struct item_head * ih, int block_size);
68150 - void (*decrement_key) (struct cpu_key *);
68151 - int (*is_left_mergeable) (struct reiserfs_key * ih,
68152 + int (* const bytes_number) (struct item_head * ih, int block_size);
68153 + void (* const decrement_key) (struct cpu_key *);
68154 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
68155 unsigned long bsize);
68156 - void (*print_item) (struct item_head *, char *item);
68157 - void (*check_item) (struct item_head *, char *item);
68158 + void (* const print_item) (struct item_head *, char *item);
68159 + void (* const check_item) (struct item_head *, char *item);
68160
68161 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68162 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68163 int is_affected, int insert_size);
68164 - int (*check_left) (struct virtual_item * vi, int free,
68165 + int (* const check_left) (struct virtual_item * vi, int free,
68166 int start_skip, int end_skip);
68167 - int (*check_right) (struct virtual_item * vi, int free);
68168 - int (*part_size) (struct virtual_item * vi, int from, int to);
68169 - int (*unit_num) (struct virtual_item * vi);
68170 - void (*print_vi) (struct virtual_item * vi);
68171 + int (* const check_right) (struct virtual_item * vi, int free);
68172 + int (* const part_size) (struct virtual_item * vi, int from, int to);
68173 + int (* const unit_num) (struct virtual_item * vi);
68174 + void (* const print_vi) (struct virtual_item * vi);
68175 };
68176
68177 -extern struct item_operations *item_ops[TYPE_ANY + 1];
68178 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
68179
68180 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
68181 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
68182 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
68183 index dab68bb..0688727 100644
68184 --- a/include/linux/reiserfs_fs_sb.h
68185 +++ b/include/linux/reiserfs_fs_sb.h
68186 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
68187 /* Comment? -Hans */
68188 wait_queue_head_t s_wait;
68189 /* To be obsoleted soon by per buffer seals.. -Hans */
68190 - atomic_t s_generation_counter; // increased by one every time the
68191 + atomic_unchecked_t s_generation_counter; // increased by one every time the
68192 // tree gets re-balanced
68193 unsigned long s_properties; /* File system properties. Currently holds
68194 on-disk FS format */
68195 diff --git a/include/linux/relay.h b/include/linux/relay.h
68196 index 14a86bc..17d0700 100644
68197 --- a/include/linux/relay.h
68198 +++ b/include/linux/relay.h
68199 @@ -159,7 +159,7 @@ struct rchan_callbacks
68200 * The callback should return 0 if successful, negative if not.
68201 */
68202 int (*remove_buf_file)(struct dentry *dentry);
68203 -};
68204 +} __no_const;
68205
68206 /*
68207 * CONFIG_RELAY kernel API, kernel/relay.c
68208 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
68209 index 3392c59..a746428 100644
68210 --- a/include/linux/rfkill.h
68211 +++ b/include/linux/rfkill.h
68212 @@ -144,6 +144,7 @@ struct rfkill_ops {
68213 void (*query)(struct rfkill *rfkill, void *data);
68214 int (*set_block)(void *data, bool blocked);
68215 };
68216 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
68217
68218 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
68219 /**
68220 diff --git a/include/linux/sched.h b/include/linux/sched.h
68221 index 71849bf..40217dc 100644
68222 --- a/include/linux/sched.h
68223 +++ b/include/linux/sched.h
68224 @@ -101,6 +101,7 @@ struct bio;
68225 struct fs_struct;
68226 struct bts_context;
68227 struct perf_event_context;
68228 +struct linux_binprm;
68229
68230 /*
68231 * List of flags we want to share for kernel threads,
68232 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
68233 extern signed long schedule_timeout_uninterruptible(signed long timeout);
68234 asmlinkage void __schedule(void);
68235 asmlinkage void schedule(void);
68236 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
68237 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
68238
68239 struct nsproxy;
68240 struct user_namespace;
68241 @@ -371,9 +372,12 @@ struct user_namespace;
68242 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
68243
68244 extern int sysctl_max_map_count;
68245 +extern unsigned long sysctl_heap_stack_gap;
68246
68247 #include <linux/aio.h>
68248
68249 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
68250 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
68251 extern unsigned long
68252 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
68253 unsigned long, unsigned long);
68254 @@ -666,6 +670,16 @@ struct signal_struct {
68255 struct tty_audit_buf *tty_audit_buf;
68256 #endif
68257
68258 +#ifdef CONFIG_GRKERNSEC
68259 + u32 curr_ip;
68260 + u32 saved_ip;
68261 + u32 gr_saddr;
68262 + u32 gr_daddr;
68263 + u16 gr_sport;
68264 + u16 gr_dport;
68265 + u8 used_accept:1;
68266 +#endif
68267 +
68268 int oom_adj; /* OOM kill score adjustment (bit shift) */
68269 };
68270
68271 @@ -723,6 +737,11 @@ struct user_struct {
68272 struct key *session_keyring; /* UID's default session keyring */
68273 #endif
68274
68275 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
68276 + unsigned int banned;
68277 + unsigned long ban_expires;
68278 +#endif
68279 +
68280 /* Hash table maintenance information */
68281 struct hlist_node uidhash_node;
68282 uid_t uid;
68283 @@ -1328,8 +1347,8 @@ struct task_struct {
68284 struct list_head thread_group;
68285
68286 struct completion *vfork_done; /* for vfork() */
68287 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
68288 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68289 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
68290 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68291
68292 cputime_t utime, stime, utimescaled, stimescaled;
68293 cputime_t gtime;
68294 @@ -1343,16 +1362,6 @@ struct task_struct {
68295 struct task_cputime cputime_expires;
68296 struct list_head cpu_timers[3];
68297
68298 -/* process credentials */
68299 - const struct cred *real_cred; /* objective and real subjective task
68300 - * credentials (COW) */
68301 - const struct cred *cred; /* effective (overridable) subjective task
68302 - * credentials (COW) */
68303 - struct mutex cred_guard_mutex; /* guard against foreign influences on
68304 - * credential calculations
68305 - * (notably. ptrace) */
68306 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68307 -
68308 char comm[TASK_COMM_LEN]; /* executable name excluding path
68309 - access with [gs]et_task_comm (which lock
68310 it with task_lock())
68311 @@ -1369,6 +1378,10 @@ struct task_struct {
68312 #endif
68313 /* CPU-specific state of this task */
68314 struct thread_struct thread;
68315 +/* thread_info moved to task_struct */
68316 +#ifdef CONFIG_X86
68317 + struct thread_info tinfo;
68318 +#endif
68319 /* filesystem information */
68320 struct fs_struct *fs;
68321 /* open file information */
68322 @@ -1436,6 +1449,15 @@ struct task_struct {
68323 int hardirq_context;
68324 int softirq_context;
68325 #endif
68326 +
68327 +/* process credentials */
68328 + const struct cred *real_cred; /* objective and real subjective task
68329 + * credentials (COW) */
68330 + struct mutex cred_guard_mutex; /* guard against foreign influences on
68331 + * credential calculations
68332 + * (notably. ptrace) */
68333 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68334 +
68335 #ifdef CONFIG_LOCKDEP
68336 # define MAX_LOCK_DEPTH 48UL
68337 u64 curr_chain_key;
68338 @@ -1456,6 +1478,9 @@ struct task_struct {
68339
68340 struct backing_dev_info *backing_dev_info;
68341
68342 + const struct cred *cred; /* effective (overridable) subjective task
68343 + * credentials (COW) */
68344 +
68345 struct io_context *io_context;
68346
68347 unsigned long ptrace_message;
68348 @@ -1519,6 +1544,21 @@ struct task_struct {
68349 unsigned long default_timer_slack_ns;
68350
68351 struct list_head *scm_work_list;
68352 +
68353 +#ifdef CONFIG_GRKERNSEC
68354 + /* grsecurity */
68355 + struct dentry *gr_chroot_dentry;
68356 + struct acl_subject_label *acl;
68357 + struct acl_role_label *role;
68358 + struct file *exec_file;
68359 + u16 acl_role_id;
68360 + /* is this the task that authenticated to the special role */
68361 + u8 acl_sp_role;
68362 + u8 is_writable;
68363 + u8 brute;
68364 + u8 gr_is_chrooted;
68365 +#endif
68366 +
68367 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
68368 /* Index of current stored adress in ret_stack */
68369 int curr_ret_stack;
68370 @@ -1542,6 +1582,57 @@ struct task_struct {
68371 #endif /* CONFIG_TRACING */
68372 };
68373
68374 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
68375 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
68376 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
68377 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
68378 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
68379 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
68380 +
68381 +#ifdef CONFIG_PAX_SOFTMODE
68382 +extern int pax_softmode;
68383 +#endif
68384 +
68385 +extern int pax_check_flags(unsigned long *);
68386 +
68387 +/* if tsk != current then task_lock must be held on it */
68388 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68389 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
68390 +{
68391 + if (likely(tsk->mm))
68392 + return tsk->mm->pax_flags;
68393 + else
68394 + return 0UL;
68395 +}
68396 +
68397 +/* if tsk != current then task_lock must be held on it */
68398 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
68399 +{
68400 + if (likely(tsk->mm)) {
68401 + tsk->mm->pax_flags = flags;
68402 + return 0;
68403 + }
68404 + return -EINVAL;
68405 +}
68406 +#endif
68407 +
68408 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68409 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
68410 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
68411 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
68412 +#endif
68413 +
68414 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
68415 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
68416 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
68417 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
68418 +
68419 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
68420 +extern void pax_track_stack(void);
68421 +#else
68422 +static inline void pax_track_stack(void) {}
68423 +#endif
68424 +
68425 /* Future-safe accessor for struct task_struct's cpus_allowed. */
68426 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
68427
68428 @@ -1740,7 +1831,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
68429 #define PF_DUMPCORE 0x00000200 /* dumped core */
68430 #define PF_SIGNALED 0x00000400 /* killed by a signal */
68431 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
68432 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
68433 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
68434 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
68435 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
68436 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
68437 @@ -1978,7 +2069,9 @@ void yield(void);
68438 extern struct exec_domain default_exec_domain;
68439
68440 union thread_union {
68441 +#ifndef CONFIG_X86
68442 struct thread_info thread_info;
68443 +#endif
68444 unsigned long stack[THREAD_SIZE/sizeof(long)];
68445 };
68446
68447 @@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
68448 */
68449
68450 extern struct task_struct *find_task_by_vpid(pid_t nr);
68451 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
68452 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
68453 struct pid_namespace *ns);
68454
68455 @@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
68456 extern void exit_itimers(struct signal_struct *);
68457 extern void flush_itimer_signals(void);
68458
68459 -extern NORET_TYPE void do_group_exit(int);
68460 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
68461
68462 extern void daemonize(const char *, ...);
68463 extern int allow_signal(int);
68464 @@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
68465
68466 #endif
68467
68468 -static inline int object_is_on_stack(void *obj)
68469 +static inline int object_starts_on_stack(void *obj)
68470 {
68471 - void *stack = task_stack_page(current);
68472 + const void *stack = task_stack_page(current);
68473
68474 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
68475 }
68476
68477 +#ifdef CONFIG_PAX_USERCOPY
68478 +extern int object_is_on_stack(const void *obj, unsigned long len);
68479 +#endif
68480 +
68481 extern void thread_info_cache_init(void);
68482
68483 #ifdef CONFIG_DEBUG_STACK_USAGE
68484 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
68485 index 1ee2c05..81b7ec4 100644
68486 --- a/include/linux/screen_info.h
68487 +++ b/include/linux/screen_info.h
68488 @@ -42,7 +42,8 @@ struct screen_info {
68489 __u16 pages; /* 0x32 */
68490 __u16 vesa_attributes; /* 0x34 */
68491 __u32 capabilities; /* 0x36 */
68492 - __u8 _reserved[6]; /* 0x3a */
68493 + __u16 vesapm_size; /* 0x3a */
68494 + __u8 _reserved[4]; /* 0x3c */
68495 } __attribute__((packed));
68496
68497 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
68498 diff --git a/include/linux/security.h b/include/linux/security.h
68499 index d40d23f..d739b08 100644
68500 --- a/include/linux/security.h
68501 +++ b/include/linux/security.h
68502 @@ -34,6 +34,7 @@
68503 #include <linux/key.h>
68504 #include <linux/xfrm.h>
68505 #include <linux/gfp.h>
68506 +#include <linux/grsecurity.h>
68507 #include <net/flow.h>
68508
68509 /* Maximum number of letters for an LSM name string */
68510 @@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
68511 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
68512 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
68513 extern int cap_task_setnice(struct task_struct *p, int nice);
68514 -extern int cap_syslog(int type);
68515 +extern int cap_syslog(int type, bool from_file);
68516 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
68517
68518 struct msghdr;
68519 @@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
68520 * logging to the console.
68521 * See the syslog(2) manual page for an explanation of the @type values.
68522 * @type contains the type of action.
68523 + * @from_file indicates the context of action (if it came from /proc).
68524 * Return 0 if permission is granted.
68525 * @settime:
68526 * Check permission to change the system time.
68527 @@ -1445,7 +1447,7 @@ struct security_operations {
68528 int (*sysctl) (struct ctl_table *table, int op);
68529 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
68530 int (*quota_on) (struct dentry *dentry);
68531 - int (*syslog) (int type);
68532 + int (*syslog) (int type, bool from_file);
68533 int (*settime) (struct timespec *ts, struct timezone *tz);
68534 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
68535
68536 @@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
68537 int security_sysctl(struct ctl_table *table, int op);
68538 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
68539 int security_quota_on(struct dentry *dentry);
68540 -int security_syslog(int type);
68541 +int security_syslog(int type, bool from_file);
68542 int security_settime(struct timespec *ts, struct timezone *tz);
68543 int security_vm_enough_memory(long pages);
68544 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
68545 @@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
68546 return 0;
68547 }
68548
68549 -static inline int security_syslog(int type)
68550 +static inline int security_syslog(int type, bool from_file)
68551 {
68552 - return cap_syslog(type);
68553 + return cap_syslog(type, from_file);
68554 }
68555
68556 static inline int security_settime(struct timespec *ts, struct timezone *tz)
68557 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
68558 index 8366d8f..2307490 100644
68559 --- a/include/linux/seq_file.h
68560 +++ b/include/linux/seq_file.h
68561 @@ -32,6 +32,7 @@ struct seq_operations {
68562 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
68563 int (*show) (struct seq_file *m, void *v);
68564 };
68565 +typedef struct seq_operations __no_const seq_operations_no_const;
68566
68567 #define SEQ_SKIP 1
68568
68569 diff --git a/include/linux/shm.h b/include/linux/shm.h
68570 index eca6235..c7417ed 100644
68571 --- a/include/linux/shm.h
68572 +++ b/include/linux/shm.h
68573 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
68574 pid_t shm_cprid;
68575 pid_t shm_lprid;
68576 struct user_struct *mlock_user;
68577 +#ifdef CONFIG_GRKERNSEC
68578 + time_t shm_createtime;
68579 + pid_t shm_lapid;
68580 +#endif
68581 };
68582
68583 /* shm_mode upper byte flags */
68584 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
68585 index bcdd660..6e12e11 100644
68586 --- a/include/linux/skbuff.h
68587 +++ b/include/linux/skbuff.h
68588 @@ -14,6 +14,7 @@
68589 #ifndef _LINUX_SKBUFF_H
68590 #define _LINUX_SKBUFF_H
68591
68592 +#include <linux/const.h>
68593 #include <linux/kernel.h>
68594 #include <linux/kmemcheck.h>
68595 #include <linux/compiler.h>
68596 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
68597 */
68598 static inline int skb_queue_empty(const struct sk_buff_head *list)
68599 {
68600 - return list->next == (struct sk_buff *)list;
68601 + return list->next == (const struct sk_buff *)list;
68602 }
68603
68604 /**
68605 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
68606 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
68607 const struct sk_buff *skb)
68608 {
68609 - return (skb->next == (struct sk_buff *) list);
68610 + return (skb->next == (const struct sk_buff *) list);
68611 }
68612
68613 /**
68614 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
68615 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
68616 const struct sk_buff *skb)
68617 {
68618 - return (skb->prev == (struct sk_buff *) list);
68619 + return (skb->prev == (const struct sk_buff *) list);
68620 }
68621
68622 /**
68623 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
68624 * headroom, you should not reduce this.
68625 */
68626 #ifndef NET_SKB_PAD
68627 -#define NET_SKB_PAD 32
68628 +#define NET_SKB_PAD (_AC(32,UL))
68629 #endif
68630
68631 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
68632 diff --git a/include/linux/slab.h b/include/linux/slab.h
68633 index 2da8372..a3be824 100644
68634 --- a/include/linux/slab.h
68635 +++ b/include/linux/slab.h
68636 @@ -11,12 +11,20 @@
68637
68638 #include <linux/gfp.h>
68639 #include <linux/types.h>
68640 +#include <linux/err.h>
68641
68642 /*
68643 * Flags to pass to kmem_cache_create().
68644 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
68645 */
68646 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
68647 +
68648 +#ifdef CONFIG_PAX_USERCOPY
68649 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
68650 +#else
68651 +#define SLAB_USERCOPY 0x00000000UL
68652 +#endif
68653 +
68654 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
68655 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
68656 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
68657 @@ -82,10 +90,13 @@
68658 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
68659 * Both make kfree a no-op.
68660 */
68661 -#define ZERO_SIZE_PTR ((void *)16)
68662 +#define ZERO_SIZE_PTR \
68663 +({ \
68664 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
68665 + (void *)(-MAX_ERRNO-1L); \
68666 +})
68667
68668 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
68669 - (unsigned long)ZERO_SIZE_PTR)
68670 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
68671
68672 /*
68673 * struct kmem_cache related prototypes
68674 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
68675 void kfree(const void *);
68676 void kzfree(const void *);
68677 size_t ksize(const void *);
68678 +void check_object_size(const void *ptr, unsigned long n, bool to);
68679
68680 /*
68681 * Allocator specific definitions. These are mainly used to establish optimized
68682 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
68683
68684 void __init kmem_cache_init_late(void);
68685
68686 +#define kmalloc(x, y) \
68687 +({ \
68688 + void *___retval; \
68689 + intoverflow_t ___x = (intoverflow_t)x; \
68690 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
68691 + ___retval = NULL; \
68692 + else \
68693 + ___retval = kmalloc((size_t)___x, (y)); \
68694 + ___retval; \
68695 +})
68696 +
68697 +#define kmalloc_node(x, y, z) \
68698 +({ \
68699 + void *___retval; \
68700 + intoverflow_t ___x = (intoverflow_t)x; \
68701 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
68702 + ___retval = NULL; \
68703 + else \
68704 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
68705 + ___retval; \
68706 +})
68707 +
68708 +#define kzalloc(x, y) \
68709 +({ \
68710 + void *___retval; \
68711 + intoverflow_t ___x = (intoverflow_t)x; \
68712 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
68713 + ___retval = NULL; \
68714 + else \
68715 + ___retval = kzalloc((size_t)___x, (y)); \
68716 + ___retval; \
68717 +})
68718 +
68719 #endif /* _LINUX_SLAB_H */
68720 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
68721 index 850d057..d9dfe3c 100644
68722 --- a/include/linux/slab_def.h
68723 +++ b/include/linux/slab_def.h
68724 @@ -69,10 +69,10 @@ struct kmem_cache {
68725 unsigned long node_allocs;
68726 unsigned long node_frees;
68727 unsigned long node_overflow;
68728 - atomic_t allochit;
68729 - atomic_t allocmiss;
68730 - atomic_t freehit;
68731 - atomic_t freemiss;
68732 + atomic_unchecked_t allochit;
68733 + atomic_unchecked_t allocmiss;
68734 + atomic_unchecked_t freehit;
68735 + atomic_unchecked_t freemiss;
68736
68737 /*
68738 * If debugging is enabled, then the allocator can add additional
68739 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
68740 index 5ad70a6..57f9f65 100644
68741 --- a/include/linux/slub_def.h
68742 +++ b/include/linux/slub_def.h
68743 @@ -86,7 +86,7 @@ struct kmem_cache {
68744 struct kmem_cache_order_objects max;
68745 struct kmem_cache_order_objects min;
68746 gfp_t allocflags; /* gfp flags to use on each alloc */
68747 - int refcount; /* Refcount for slab cache destroy */
68748 + atomic_t refcount; /* Refcount for slab cache destroy */
68749 void (*ctor)(void *);
68750 int inuse; /* Offset to metadata */
68751 int align; /* Alignment */
68752 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
68753 #endif
68754
68755 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
68756 -void *__kmalloc(size_t size, gfp_t flags);
68757 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
68758
68759 #ifdef CONFIG_KMEMTRACE
68760 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
68761 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
68762 index 67ad11f..0bbd8af 100644
68763 --- a/include/linux/sonet.h
68764 +++ b/include/linux/sonet.h
68765 @@ -61,7 +61,7 @@ struct sonet_stats {
68766 #include <asm/atomic.h>
68767
68768 struct k_sonet_stats {
68769 -#define __HANDLE_ITEM(i) atomic_t i
68770 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
68771 __SONET_ITEMS
68772 #undef __HANDLE_ITEM
68773 };
68774 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
68775 index 6f52b4d..5500323 100644
68776 --- a/include/linux/sunrpc/cache.h
68777 +++ b/include/linux/sunrpc/cache.h
68778 @@ -125,7 +125,7 @@ struct cache_detail {
68779 */
68780 struct cache_req {
68781 struct cache_deferred_req *(*defer)(struct cache_req *req);
68782 -};
68783 +} __no_const;
68784 /* this must be embedded in a deferred_request that is being
68785 * delayed awaiting cache-fill
68786 */
68787 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
68788 index 8ed9642..101ceab 100644
68789 --- a/include/linux/sunrpc/clnt.h
68790 +++ b/include/linux/sunrpc/clnt.h
68791 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
68792 {
68793 switch (sap->sa_family) {
68794 case AF_INET:
68795 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
68796 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
68797 case AF_INET6:
68798 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
68799 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
68800 }
68801 return 0;
68802 }
68803 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
68804 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
68805 const struct sockaddr *src)
68806 {
68807 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
68808 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
68809 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
68810
68811 dsin->sin_family = ssin->sin_family;
68812 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
68813 if (sa->sa_family != AF_INET6)
68814 return 0;
68815
68816 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
68817 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
68818 }
68819
68820 #endif /* __KERNEL__ */
68821 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
68822 index c14fe86..393245e 100644
68823 --- a/include/linux/sunrpc/svc_rdma.h
68824 +++ b/include/linux/sunrpc/svc_rdma.h
68825 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
68826 extern unsigned int svcrdma_max_requests;
68827 extern unsigned int svcrdma_max_req_size;
68828
68829 -extern atomic_t rdma_stat_recv;
68830 -extern atomic_t rdma_stat_read;
68831 -extern atomic_t rdma_stat_write;
68832 -extern atomic_t rdma_stat_sq_starve;
68833 -extern atomic_t rdma_stat_rq_starve;
68834 -extern atomic_t rdma_stat_rq_poll;
68835 -extern atomic_t rdma_stat_rq_prod;
68836 -extern atomic_t rdma_stat_sq_poll;
68837 -extern atomic_t rdma_stat_sq_prod;
68838 +extern atomic_unchecked_t rdma_stat_recv;
68839 +extern atomic_unchecked_t rdma_stat_read;
68840 +extern atomic_unchecked_t rdma_stat_write;
68841 +extern atomic_unchecked_t rdma_stat_sq_starve;
68842 +extern atomic_unchecked_t rdma_stat_rq_starve;
68843 +extern atomic_unchecked_t rdma_stat_rq_poll;
68844 +extern atomic_unchecked_t rdma_stat_rq_prod;
68845 +extern atomic_unchecked_t rdma_stat_sq_poll;
68846 +extern atomic_unchecked_t rdma_stat_sq_prod;
68847
68848 #define RPCRDMA_VERSION 1
68849
68850 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
68851 index 5e781d8..1e62818 100644
68852 --- a/include/linux/suspend.h
68853 +++ b/include/linux/suspend.h
68854 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
68855 * which require special recovery actions in that situation.
68856 */
68857 struct platform_suspend_ops {
68858 - int (*valid)(suspend_state_t state);
68859 - int (*begin)(suspend_state_t state);
68860 - int (*prepare)(void);
68861 - int (*prepare_late)(void);
68862 - int (*enter)(suspend_state_t state);
68863 - void (*wake)(void);
68864 - void (*finish)(void);
68865 - void (*end)(void);
68866 - void (*recover)(void);
68867 + int (* const valid)(suspend_state_t state);
68868 + int (* const begin)(suspend_state_t state);
68869 + int (* const prepare)(void);
68870 + int (* const prepare_late)(void);
68871 + int (* const enter)(suspend_state_t state);
68872 + void (* const wake)(void);
68873 + void (* const finish)(void);
68874 + void (* const end)(void);
68875 + void (* const recover)(void);
68876 };
68877
68878 #ifdef CONFIG_SUSPEND
68879 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
68880 * suspend_set_ops - set platform dependent suspend operations
68881 * @ops: The new suspend operations to set.
68882 */
68883 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
68884 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
68885 extern int suspend_valid_only_mem(suspend_state_t state);
68886
68887 /**
68888 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
68889 #else /* !CONFIG_SUSPEND */
68890 #define suspend_valid_only_mem NULL
68891
68892 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
68893 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
68894 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
68895 #endif /* !CONFIG_SUSPEND */
68896
68897 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
68898 * platforms which require special recovery actions in that situation.
68899 */
68900 struct platform_hibernation_ops {
68901 - int (*begin)(void);
68902 - void (*end)(void);
68903 - int (*pre_snapshot)(void);
68904 - void (*finish)(void);
68905 - int (*prepare)(void);
68906 - int (*enter)(void);
68907 - void (*leave)(void);
68908 - int (*pre_restore)(void);
68909 - void (*restore_cleanup)(void);
68910 - void (*recover)(void);
68911 + int (* const begin)(void);
68912 + void (* const end)(void);
68913 + int (* const pre_snapshot)(void);
68914 + void (* const finish)(void);
68915 + int (* const prepare)(void);
68916 + int (* const enter)(void);
68917 + void (* const leave)(void);
68918 + int (* const pre_restore)(void);
68919 + void (* const restore_cleanup)(void);
68920 + void (* const recover)(void);
68921 };
68922
68923 #ifdef CONFIG_HIBERNATION
68924 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
68925 extern void swsusp_unset_page_free(struct page *);
68926 extern unsigned long get_safe_page(gfp_t gfp_mask);
68927
68928 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
68929 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
68930 extern int hibernate(void);
68931 extern bool system_entering_hibernation(void);
68932 #else /* CONFIG_HIBERNATION */
68933 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
68934 static inline void swsusp_set_page_free(struct page *p) {}
68935 static inline void swsusp_unset_page_free(struct page *p) {}
68936
68937 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
68938 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
68939 static inline int hibernate(void) { return -ENOSYS; }
68940 static inline bool system_entering_hibernation(void) { return false; }
68941 #endif /* CONFIG_HIBERNATION */
68942 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
68943 index 0eb6942..a805cb6 100644
68944 --- a/include/linux/sysctl.h
68945 +++ b/include/linux/sysctl.h
68946 @@ -164,7 +164,11 @@ enum
68947 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
68948 };
68949
68950 -
68951 +#ifdef CONFIG_PAX_SOFTMODE
68952 +enum {
68953 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
68954 +};
68955 +#endif
68956
68957 /* CTL_VM names: */
68958 enum
68959 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
68960
68961 extern int proc_dostring(struct ctl_table *, int,
68962 void __user *, size_t *, loff_t *);
68963 +extern int proc_dostring_modpriv(struct ctl_table *, int,
68964 + void __user *, size_t *, loff_t *);
68965 extern int proc_dointvec(struct ctl_table *, int,
68966 void __user *, size_t *, loff_t *);
68967 extern int proc_dointvec_minmax(struct ctl_table *, int,
68968 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
68969
68970 extern ctl_handler sysctl_data;
68971 extern ctl_handler sysctl_string;
68972 +extern ctl_handler sysctl_string_modpriv;
68973 extern ctl_handler sysctl_intvec;
68974 extern ctl_handler sysctl_jiffies;
68975 extern ctl_handler sysctl_ms_jiffies;
68976 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
68977 index 9d68fed..71f02cc 100644
68978 --- a/include/linux/sysfs.h
68979 +++ b/include/linux/sysfs.h
68980 @@ -75,8 +75,8 @@ struct bin_attribute {
68981 };
68982
68983 struct sysfs_ops {
68984 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
68985 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
68986 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
68987 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
68988 };
68989
68990 struct sysfs_dirent;
68991 diff --git a/include/linux/syslog.h b/include/linux/syslog.h
68992 new file mode 100644
68993 index 0000000..3891139
68994 --- /dev/null
68995 +++ b/include/linux/syslog.h
68996 @@ -0,0 +1,52 @@
68997 +/* Syslog internals
68998 + *
68999 + * Copyright 2010 Canonical, Ltd.
69000 + * Author: Kees Cook <kees.cook@canonical.com>
69001 + *
69002 + * This program is free software; you can redistribute it and/or modify
69003 + * it under the terms of the GNU General Public License as published by
69004 + * the Free Software Foundation; either version 2, or (at your option)
69005 + * any later version.
69006 + *
69007 + * This program is distributed in the hope that it will be useful,
69008 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
69009 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69010 + * GNU General Public License for more details.
69011 + *
69012 + * You should have received a copy of the GNU General Public License
69013 + * along with this program; see the file COPYING. If not, write to
69014 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
69015 + */
69016 +
69017 +#ifndef _LINUX_SYSLOG_H
69018 +#define _LINUX_SYSLOG_H
69019 +
69020 +/* Close the log. Currently a NOP. */
69021 +#define SYSLOG_ACTION_CLOSE 0
69022 +/* Open the log. Currently a NOP. */
69023 +#define SYSLOG_ACTION_OPEN 1
69024 +/* Read from the log. */
69025 +#define SYSLOG_ACTION_READ 2
69026 +/* Read all messages remaining in the ring buffer. */
69027 +#define SYSLOG_ACTION_READ_ALL 3
69028 +/* Read and clear all messages remaining in the ring buffer */
69029 +#define SYSLOG_ACTION_READ_CLEAR 4
69030 +/* Clear ring buffer. */
69031 +#define SYSLOG_ACTION_CLEAR 5
69032 +/* Disable printk's to console */
69033 +#define SYSLOG_ACTION_CONSOLE_OFF 6
69034 +/* Enable printk's to console */
69035 +#define SYSLOG_ACTION_CONSOLE_ON 7
69036 +/* Set level of messages printed to console */
69037 +#define SYSLOG_ACTION_CONSOLE_LEVEL 8
69038 +/* Return number of unread characters in the log buffer */
69039 +#define SYSLOG_ACTION_SIZE_UNREAD 9
69040 +/* Return size of the log buffer */
69041 +#define SYSLOG_ACTION_SIZE_BUFFER 10
69042 +
69043 +#define SYSLOG_FROM_CALL 0
69044 +#define SYSLOG_FROM_FILE 1
69045 +
69046 +int do_syslog(int type, char __user *buf, int count, bool from_file);
69047 +
69048 +#endif /* _LINUX_SYSLOG_H */
69049 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
69050 index a8cc4e1..98d3b85 100644
69051 --- a/include/linux/thread_info.h
69052 +++ b/include/linux/thread_info.h
69053 @@ -23,7 +23,7 @@ struct restart_block {
69054 };
69055 /* For futex_wait and futex_wait_requeue_pi */
69056 struct {
69057 - u32 *uaddr;
69058 + u32 __user *uaddr;
69059 u32 val;
69060 u32 flags;
69061 u32 bitset;
69062 diff --git a/include/linux/tty.h b/include/linux/tty.h
69063 index e9c57e9..ee6d489 100644
69064 --- a/include/linux/tty.h
69065 +++ b/include/linux/tty.h
69066 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
69067 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
69068 extern void tty_ldisc_enable(struct tty_struct *tty);
69069
69070 -
69071 /* n_tty.c */
69072 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
69073
69074 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
69075 index 0c4ee9b..9f7c426 100644
69076 --- a/include/linux/tty_ldisc.h
69077 +++ b/include/linux/tty_ldisc.h
69078 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
69079
69080 struct module *owner;
69081
69082 - int refcount;
69083 + atomic_t refcount;
69084 };
69085
69086 struct tty_ldisc {
69087 diff --git a/include/linux/types.h b/include/linux/types.h
69088 index c42724f..d190eee 100644
69089 --- a/include/linux/types.h
69090 +++ b/include/linux/types.h
69091 @@ -191,10 +191,26 @@ typedef struct {
69092 volatile int counter;
69093 } atomic_t;
69094
69095 +#ifdef CONFIG_PAX_REFCOUNT
69096 +typedef struct {
69097 + volatile int counter;
69098 +} atomic_unchecked_t;
69099 +#else
69100 +typedef atomic_t atomic_unchecked_t;
69101 +#endif
69102 +
69103 #ifdef CONFIG_64BIT
69104 typedef struct {
69105 volatile long counter;
69106 } atomic64_t;
69107 +
69108 +#ifdef CONFIG_PAX_REFCOUNT
69109 +typedef struct {
69110 + volatile long counter;
69111 +} atomic64_unchecked_t;
69112 +#else
69113 +typedef atomic64_t atomic64_unchecked_t;
69114 +#endif
69115 #endif
69116
69117 struct ustat {
69118 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
69119 index 6b58367..53a3e8e 100644
69120 --- a/include/linux/uaccess.h
69121 +++ b/include/linux/uaccess.h
69122 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69123 long ret; \
69124 mm_segment_t old_fs = get_fs(); \
69125 \
69126 - set_fs(KERNEL_DS); \
69127 pagefault_disable(); \
69128 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
69129 - pagefault_enable(); \
69130 + set_fs(KERNEL_DS); \
69131 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
69132 set_fs(old_fs); \
69133 + pagefault_enable(); \
69134 ret; \
69135 })
69136
69137 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69138 * Safely read from address @src to the buffer at @dst. If a kernel fault
69139 * happens, handle that and return -EFAULT.
69140 */
69141 -extern long probe_kernel_read(void *dst, void *src, size_t size);
69142 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
69143
69144 /*
69145 * probe_kernel_write(): safely attempt to write to a location
69146 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
69147 * Safely write to address @dst from the buffer at @src. If a kernel fault
69148 * happens, handle that and return -EFAULT.
69149 */
69150 -extern long probe_kernel_write(void *dst, void *src, size_t size);
69151 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
69152
69153 #endif /* __LINUX_UACCESS_H__ */
69154 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
69155 index 99c1b4d..bb94261 100644
69156 --- a/include/linux/unaligned/access_ok.h
69157 +++ b/include/linux/unaligned/access_ok.h
69158 @@ -6,32 +6,32 @@
69159
69160 static inline u16 get_unaligned_le16(const void *p)
69161 {
69162 - return le16_to_cpup((__le16 *)p);
69163 + return le16_to_cpup((const __le16 *)p);
69164 }
69165
69166 static inline u32 get_unaligned_le32(const void *p)
69167 {
69168 - return le32_to_cpup((__le32 *)p);
69169 + return le32_to_cpup((const __le32 *)p);
69170 }
69171
69172 static inline u64 get_unaligned_le64(const void *p)
69173 {
69174 - return le64_to_cpup((__le64 *)p);
69175 + return le64_to_cpup((const __le64 *)p);
69176 }
69177
69178 static inline u16 get_unaligned_be16(const void *p)
69179 {
69180 - return be16_to_cpup((__be16 *)p);
69181 + return be16_to_cpup((const __be16 *)p);
69182 }
69183
69184 static inline u32 get_unaligned_be32(const void *p)
69185 {
69186 - return be32_to_cpup((__be32 *)p);
69187 + return be32_to_cpup((const __be32 *)p);
69188 }
69189
69190 static inline u64 get_unaligned_be64(const void *p)
69191 {
69192 - return be64_to_cpup((__be64 *)p);
69193 + return be64_to_cpup((const __be64 *)p);
69194 }
69195
69196 static inline void put_unaligned_le16(u16 val, void *p)
69197 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
69198 index 79b9837..b5a56f9 100644
69199 --- a/include/linux/vermagic.h
69200 +++ b/include/linux/vermagic.h
69201 @@ -26,9 +26,35 @@
69202 #define MODULE_ARCH_VERMAGIC ""
69203 #endif
69204
69205 +#ifdef CONFIG_PAX_REFCOUNT
69206 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
69207 +#else
69208 +#define MODULE_PAX_REFCOUNT ""
69209 +#endif
69210 +
69211 +#ifdef CONSTIFY_PLUGIN
69212 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
69213 +#else
69214 +#define MODULE_CONSTIFY_PLUGIN ""
69215 +#endif
69216 +
69217 +#ifdef STACKLEAK_PLUGIN
69218 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
69219 +#else
69220 +#define MODULE_STACKLEAK_PLUGIN ""
69221 +#endif
69222 +
69223 +#ifdef CONFIG_GRKERNSEC
69224 +#define MODULE_GRSEC "GRSEC "
69225 +#else
69226 +#define MODULE_GRSEC ""
69227 +#endif
69228 +
69229 #define VERMAGIC_STRING \
69230 UTS_RELEASE " " \
69231 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
69232 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
69233 - MODULE_ARCH_VERMAGIC
69234 + MODULE_ARCH_VERMAGIC \
69235 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
69236 + MODULE_GRSEC
69237
69238 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
69239 index 819a634..462ac12 100644
69240 --- a/include/linux/vmalloc.h
69241 +++ b/include/linux/vmalloc.h
69242 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
69243 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
69244 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
69245 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
69246 +
69247 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69248 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
69249 +#endif
69250 +
69251 /* bits [20..32] reserved for arch specific ioremap internals */
69252
69253 /*
69254 @@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
69255
69256 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
69257
69258 +#define vmalloc(x) \
69259 +({ \
69260 + void *___retval; \
69261 + intoverflow_t ___x = (intoverflow_t)x; \
69262 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
69263 + ___retval = NULL; \
69264 + else \
69265 + ___retval = vmalloc((unsigned long)___x); \
69266 + ___retval; \
69267 +})
69268 +
69269 +#define __vmalloc(x, y, z) \
69270 +({ \
69271 + void *___retval; \
69272 + intoverflow_t ___x = (intoverflow_t)x; \
69273 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
69274 + ___retval = NULL; \
69275 + else \
69276 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
69277 + ___retval; \
69278 +})
69279 +
69280 +#define vmalloc_user(x) \
69281 +({ \
69282 + void *___retval; \
69283 + intoverflow_t ___x = (intoverflow_t)x; \
69284 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
69285 + ___retval = NULL; \
69286 + else \
69287 + ___retval = vmalloc_user((unsigned long)___x); \
69288 + ___retval; \
69289 +})
69290 +
69291 +#define vmalloc_exec(x) \
69292 +({ \
69293 + void *___retval; \
69294 + intoverflow_t ___x = (intoverflow_t)x; \
69295 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
69296 + ___retval = NULL; \
69297 + else \
69298 + ___retval = vmalloc_exec((unsigned long)___x); \
69299 + ___retval; \
69300 +})
69301 +
69302 +#define vmalloc_node(x, y) \
69303 +({ \
69304 + void *___retval; \
69305 + intoverflow_t ___x = (intoverflow_t)x; \
69306 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
69307 + ___retval = NULL; \
69308 + else \
69309 + ___retval = vmalloc_node((unsigned long)___x, (y));\
69310 + ___retval; \
69311 +})
69312 +
69313 +#define vmalloc_32(x) \
69314 +({ \
69315 + void *___retval; \
69316 + intoverflow_t ___x = (intoverflow_t)x; \
69317 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
69318 + ___retval = NULL; \
69319 + else \
69320 + ___retval = vmalloc_32((unsigned long)___x); \
69321 + ___retval; \
69322 +})
69323 +
69324 +#define vmalloc_32_user(x) \
69325 +({ \
69326 + void *___retval; \
69327 + intoverflow_t ___x = (intoverflow_t)x; \
69328 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
69329 + ___retval = NULL; \
69330 + else \
69331 + ___retval = vmalloc_32_user((unsigned long)___x);\
69332 + ___retval; \
69333 +})
69334 +
69335 #endif /* _LINUX_VMALLOC_H */
69336 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
69337 index 13070d6..aa4159a 100644
69338 --- a/include/linux/vmstat.h
69339 +++ b/include/linux/vmstat.h
69340 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
69341 /*
69342 * Zone based page accounting with per cpu differentials.
69343 */
69344 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69345 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69346
69347 static inline void zone_page_state_add(long x, struct zone *zone,
69348 enum zone_stat_item item)
69349 {
69350 - atomic_long_add(x, &zone->vm_stat[item]);
69351 - atomic_long_add(x, &vm_stat[item]);
69352 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
69353 + atomic_long_add_unchecked(x, &vm_stat[item]);
69354 }
69355
69356 static inline unsigned long global_page_state(enum zone_stat_item item)
69357 {
69358 - long x = atomic_long_read(&vm_stat[item]);
69359 + long x = atomic_long_read_unchecked(&vm_stat[item]);
69360 #ifdef CONFIG_SMP
69361 if (x < 0)
69362 x = 0;
69363 @@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
69364 static inline unsigned long zone_page_state(struct zone *zone,
69365 enum zone_stat_item item)
69366 {
69367 - long x = atomic_long_read(&zone->vm_stat[item]);
69368 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
69369 #ifdef CONFIG_SMP
69370 if (x < 0)
69371 x = 0;
69372 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
69373 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
69374 enum zone_stat_item item)
69375 {
69376 - long x = atomic_long_read(&zone->vm_stat[item]);
69377 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
69378
69379 #ifdef CONFIG_SMP
69380 int cpu;
69381 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
69382
69383 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
69384 {
69385 - atomic_long_inc(&zone->vm_stat[item]);
69386 - atomic_long_inc(&vm_stat[item]);
69387 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
69388 + atomic_long_inc_unchecked(&vm_stat[item]);
69389 }
69390
69391 static inline void __inc_zone_page_state(struct page *page,
69392 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
69393
69394 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
69395 {
69396 - atomic_long_dec(&zone->vm_stat[item]);
69397 - atomic_long_dec(&vm_stat[item]);
69398 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
69399 + atomic_long_dec_unchecked(&vm_stat[item]);
69400 }
69401
69402 static inline void __dec_zone_page_state(struct page *page,
69403 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
69404 index 5c84af8..1a3b6e2 100644
69405 --- a/include/linux/xattr.h
69406 +++ b/include/linux/xattr.h
69407 @@ -33,6 +33,11 @@
69408 #define XATTR_USER_PREFIX "user."
69409 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
69410
69411 +/* User namespace */
69412 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
69413 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
69414 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
69415 +
69416 struct inode;
69417 struct dentry;
69418
69419 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
69420 index eed5fcc..5080d24 100644
69421 --- a/include/media/saa7146_vv.h
69422 +++ b/include/media/saa7146_vv.h
69423 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
69424 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
69425
69426 /* the extension can override this */
69427 - struct v4l2_ioctl_ops ops;
69428 + v4l2_ioctl_ops_no_const ops;
69429 /* pointer to the saa7146 core ops */
69430 const struct v4l2_ioctl_ops *core_ops;
69431
69432 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
69433 index 73c9867..2da8837 100644
69434 --- a/include/media/v4l2-dev.h
69435 +++ b/include/media/v4l2-dev.h
69436 @@ -34,7 +34,7 @@ struct v4l2_device;
69437 #define V4L2_FL_UNREGISTERED (0)
69438
69439 struct v4l2_file_operations {
69440 - struct module *owner;
69441 + struct module * const owner;
69442 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
69443 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
69444 unsigned int (*poll) (struct file *, struct poll_table_struct *);
69445 @@ -46,6 +46,7 @@ struct v4l2_file_operations {
69446 int (*open) (struct file *);
69447 int (*release) (struct file *);
69448 };
69449 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
69450
69451 /*
69452 * Newer version of video_device, handled by videodev2.c
69453 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
69454 index 5d5d550..f559ef1 100644
69455 --- a/include/media/v4l2-device.h
69456 +++ b/include/media/v4l2-device.h
69457 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
69458 this function returns 0. If the name ends with a digit (e.g. cx18),
69459 then the name will be set to cx18-0 since cx180 looks really odd. */
69460 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
69461 - atomic_t *instance);
69462 + atomic_unchecked_t *instance);
69463
69464 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
69465 Since the parent disappears this ensures that v4l2_dev doesn't have an
69466 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
69467 index 7a4529d..7244290 100644
69468 --- a/include/media/v4l2-ioctl.h
69469 +++ b/include/media/v4l2-ioctl.h
69470 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
69471 long (*vidioc_default) (struct file *file, void *fh,
69472 int cmd, void *arg);
69473 };
69474 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
69475
69476
69477 /* v4l debugging and diagnostics */
69478 diff --git a/include/net/flow.h b/include/net/flow.h
69479 index 809970b..c3df4f3 100644
69480 --- a/include/net/flow.h
69481 +++ b/include/net/flow.h
69482 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
69483 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
69484 u8 dir, flow_resolve_t resolver);
69485 extern void flow_cache_flush(void);
69486 -extern atomic_t flow_cache_genid;
69487 +extern atomic_unchecked_t flow_cache_genid;
69488
69489 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
69490 {
69491 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
69492 index 15e1f8fe..668837c 100644
69493 --- a/include/net/inetpeer.h
69494 +++ b/include/net/inetpeer.h
69495 @@ -24,7 +24,7 @@ struct inet_peer
69496 __u32 dtime; /* the time of last use of not
69497 * referenced entries */
69498 atomic_t refcnt;
69499 - atomic_t rid; /* Frag reception counter */
69500 + atomic_unchecked_t rid; /* Frag reception counter */
69501 __u32 tcp_ts;
69502 unsigned long tcp_ts_stamp;
69503 };
69504 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
69505 index 98978e7..2243a3d 100644
69506 --- a/include/net/ip_vs.h
69507 +++ b/include/net/ip_vs.h
69508 @@ -365,7 +365,7 @@ struct ip_vs_conn {
69509 struct ip_vs_conn *control; /* Master control connection */
69510 atomic_t n_control; /* Number of controlled ones */
69511 struct ip_vs_dest *dest; /* real server */
69512 - atomic_t in_pkts; /* incoming packet counter */
69513 + atomic_unchecked_t in_pkts; /* incoming packet counter */
69514
69515 /* packet transmitter for different forwarding methods. If it
69516 mangles the packet, it must return NF_DROP or better NF_STOLEN,
69517 @@ -466,7 +466,7 @@ struct ip_vs_dest {
69518 union nf_inet_addr addr; /* IP address of the server */
69519 __be16 port; /* port number of the server */
69520 volatile unsigned flags; /* dest status flags */
69521 - atomic_t conn_flags; /* flags to copy to conn */
69522 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
69523 atomic_t weight; /* server weight */
69524
69525 atomic_t refcnt; /* reference counter */
69526 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
69527 index 69b610a..fe3962c 100644
69528 --- a/include/net/irda/ircomm_core.h
69529 +++ b/include/net/irda/ircomm_core.h
69530 @@ -51,7 +51,7 @@ typedef struct {
69531 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
69532 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
69533 struct ircomm_info *);
69534 -} call_t;
69535 +} __no_const call_t;
69536
69537 struct ircomm_cb {
69538 irda_queue_t queue;
69539 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
69540 index eea2e61..08c692d 100644
69541 --- a/include/net/irda/ircomm_tty.h
69542 +++ b/include/net/irda/ircomm_tty.h
69543 @@ -35,6 +35,7 @@
69544 #include <linux/termios.h>
69545 #include <linux/timer.h>
69546 #include <linux/tty.h> /* struct tty_struct */
69547 +#include <asm/local.h>
69548
69549 #include <net/irda/irias_object.h>
69550 #include <net/irda/ircomm_core.h>
69551 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
69552 unsigned short close_delay;
69553 unsigned short closing_wait; /* time to wait before closing */
69554
69555 - int open_count;
69556 - int blocked_open; /* # of blocked opens */
69557 + local_t open_count;
69558 + local_t blocked_open; /* # of blocked opens */
69559
69560 /* Protect concurent access to :
69561 * o self->open_count
69562 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
69563 index f82a1e8..82d81e8 100644
69564 --- a/include/net/iucv/af_iucv.h
69565 +++ b/include/net/iucv/af_iucv.h
69566 @@ -87,7 +87,7 @@ struct iucv_sock {
69567 struct iucv_sock_list {
69568 struct hlist_head head;
69569 rwlock_t lock;
69570 - atomic_t autobind_name;
69571 + atomic_unchecked_t autobind_name;
69572 };
69573
69574 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
69575 diff --git a/include/net/lapb.h b/include/net/lapb.h
69576 index 96cb5dd..25e8d4f 100644
69577 --- a/include/net/lapb.h
69578 +++ b/include/net/lapb.h
69579 @@ -95,7 +95,7 @@ struct lapb_cb {
69580 struct sk_buff_head write_queue;
69581 struct sk_buff_head ack_queue;
69582 unsigned char window;
69583 - struct lapb_register_struct callbacks;
69584 + struct lapb_register_struct *callbacks;
69585
69586 /* FRMR control information */
69587 struct lapb_frame frmr_data;
69588 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
69589 index 3817fda..cdb2343 100644
69590 --- a/include/net/neighbour.h
69591 +++ b/include/net/neighbour.h
69592 @@ -131,7 +131,7 @@ struct neigh_ops
69593 int (*connected_output)(struct sk_buff*);
69594 int (*hh_output)(struct sk_buff*);
69595 int (*queue_xmit)(struct sk_buff*);
69596 -};
69597 +} __do_const;
69598
69599 struct pneigh_entry
69600 {
69601 diff --git a/include/net/netlink.h b/include/net/netlink.h
69602 index c344646..4778c71 100644
69603 --- a/include/net/netlink.h
69604 +++ b/include/net/netlink.h
69605 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
69606 {
69607 return (remaining >= (int) sizeof(struct nlmsghdr) &&
69608 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
69609 - nlh->nlmsg_len <= remaining);
69610 + nlh->nlmsg_len <= (unsigned int)remaining);
69611 }
69612
69613 /**
69614 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
69615 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
69616 {
69617 if (mark)
69618 - skb_trim(skb, (unsigned char *) mark - skb->data);
69619 + skb_trim(skb, (const unsigned char *) mark - skb->data);
69620 }
69621
69622 /**
69623 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
69624 index 9a4b8b7..e49e077 100644
69625 --- a/include/net/netns/ipv4.h
69626 +++ b/include/net/netns/ipv4.h
69627 @@ -54,7 +54,7 @@ struct netns_ipv4 {
69628 int current_rt_cache_rebuild_count;
69629
69630 struct timer_list rt_secret_timer;
69631 - atomic_t rt_genid;
69632 + atomic_unchecked_t rt_genid;
69633
69634 #ifdef CONFIG_IP_MROUTE
69635 struct sock *mroute_sk;
69636 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
69637 index 8a6d529..171f401 100644
69638 --- a/include/net/sctp/sctp.h
69639 +++ b/include/net/sctp/sctp.h
69640 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
69641
69642 #else /* SCTP_DEBUG */
69643
69644 -#define SCTP_DEBUG_PRINTK(whatever...)
69645 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
69646 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
69647 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
69648 #define SCTP_ENABLE_DEBUG
69649 #define SCTP_DISABLE_DEBUG
69650 #define SCTP_ASSERT(expr, str, func)
69651 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
69652 index d97f689..f3b90ab 100644
69653 --- a/include/net/secure_seq.h
69654 +++ b/include/net/secure_seq.h
69655 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
69656 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
69657 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
69658 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
69659 - __be16 dport);
69660 + __be16 dport);
69661 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
69662 __be16 sport, __be16 dport);
69663 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
69664 - __be16 sport, __be16 dport);
69665 + __be16 sport, __be16 dport);
69666 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
69667 - __be16 sport, __be16 dport);
69668 + __be16 sport, __be16 dport);
69669 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
69670 - __be16 sport, __be16 dport);
69671 + __be16 sport, __be16 dport);
69672
69673 #endif /* _NET_SECURE_SEQ */
69674 diff --git a/include/net/sock.h b/include/net/sock.h
69675 index 9f96394..76fc9c7 100644
69676 --- a/include/net/sock.h
69677 +++ b/include/net/sock.h
69678 @@ -272,7 +272,7 @@ struct sock {
69679 rwlock_t sk_callback_lock;
69680 int sk_err,
69681 sk_err_soft;
69682 - atomic_t sk_drops;
69683 + atomic_unchecked_t sk_drops;
69684 unsigned short sk_ack_backlog;
69685 unsigned short sk_max_ack_backlog;
69686 __u32 sk_priority;
69687 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
69688 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
69689 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
69690 #else
69691 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
69692 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
69693 int inc)
69694 {
69695 }
69696 diff --git a/include/net/tcp.h b/include/net/tcp.h
69697 index 6cfe18b..dd21acb 100644
69698 --- a/include/net/tcp.h
69699 +++ b/include/net/tcp.h
69700 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
69701 struct tcp_seq_afinfo {
69702 char *name;
69703 sa_family_t family;
69704 - struct file_operations seq_fops;
69705 - struct seq_operations seq_ops;
69706 + file_operations_no_const seq_fops;
69707 + seq_operations_no_const seq_ops;
69708 };
69709
69710 struct tcp_iter_state {
69711 diff --git a/include/net/udp.h b/include/net/udp.h
69712 index f98abd2..b4b042f 100644
69713 --- a/include/net/udp.h
69714 +++ b/include/net/udp.h
69715 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
69716 char *name;
69717 sa_family_t family;
69718 struct udp_table *udp_table;
69719 - struct file_operations seq_fops;
69720 - struct seq_operations seq_ops;
69721 + file_operations_no_const seq_fops;
69722 + seq_operations_no_const seq_ops;
69723 };
69724
69725 struct udp_iter_state {
69726 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
69727 index cbb822e..e9c1cbe 100644
69728 --- a/include/rdma/iw_cm.h
69729 +++ b/include/rdma/iw_cm.h
69730 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
69731 int backlog);
69732
69733 int (*destroy_listen)(struct iw_cm_id *cm_id);
69734 -};
69735 +} __no_const;
69736
69737 /**
69738 * iw_create_cm_id - Create an IW CM identifier.
69739 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
69740 index 09a124b..caa8ca8 100644
69741 --- a/include/scsi/libfc.h
69742 +++ b/include/scsi/libfc.h
69743 @@ -675,6 +675,7 @@ struct libfc_function_template {
69744 */
69745 void (*disc_stop_final) (struct fc_lport *);
69746 };
69747 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
69748
69749 /* information used by the discovery layer */
69750 struct fc_disc {
69751 @@ -707,7 +708,7 @@ struct fc_lport {
69752 struct fc_disc disc;
69753
69754 /* Operational Information */
69755 - struct libfc_function_template tt;
69756 + libfc_function_template_no_const tt;
69757 u8 link_up;
69758 u8 qfull;
69759 enum fc_lport_state state;
69760 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
69761 index de8e180..f15e0d7 100644
69762 --- a/include/scsi/scsi_device.h
69763 +++ b/include/scsi/scsi_device.h
69764 @@ -156,9 +156,9 @@ struct scsi_device {
69765 unsigned int max_device_blocked; /* what device_blocked counts down from */
69766 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
69767
69768 - atomic_t iorequest_cnt;
69769 - atomic_t iodone_cnt;
69770 - atomic_t ioerr_cnt;
69771 + atomic_unchecked_t iorequest_cnt;
69772 + atomic_unchecked_t iodone_cnt;
69773 + atomic_unchecked_t ioerr_cnt;
69774
69775 struct device sdev_gendev,
69776 sdev_dev;
69777 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
69778 index fc50bd6..81ba9cb 100644
69779 --- a/include/scsi/scsi_transport_fc.h
69780 +++ b/include/scsi/scsi_transport_fc.h
69781 @@ -708,7 +708,7 @@ struct fc_function_template {
69782 unsigned long show_host_system_hostname:1;
69783
69784 unsigned long disable_target_scan:1;
69785 -};
69786 +} __do_const;
69787
69788
69789 /**
69790 diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
69791 index 3dae3f7..8440d6f 100644
69792 --- a/include/sound/ac97_codec.h
69793 +++ b/include/sound/ac97_codec.h
69794 @@ -419,15 +419,15 @@
69795 struct snd_ac97;
69796
69797 struct snd_ac97_build_ops {
69798 - int (*build_3d) (struct snd_ac97 *ac97);
69799 - int (*build_specific) (struct snd_ac97 *ac97);
69800 - int (*build_spdif) (struct snd_ac97 *ac97);
69801 - int (*build_post_spdif) (struct snd_ac97 *ac97);
69802 + int (* const build_3d) (struct snd_ac97 *ac97);
69803 + int (* const build_specific) (struct snd_ac97 *ac97);
69804 + int (* const build_spdif) (struct snd_ac97 *ac97);
69805 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
69806 #ifdef CONFIG_PM
69807 - void (*suspend) (struct snd_ac97 *ac97);
69808 - void (*resume) (struct snd_ac97 *ac97);
69809 + void (* const suspend) (struct snd_ac97 *ac97);
69810 + void (* const resume) (struct snd_ac97 *ac97);
69811 #endif
69812 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
69813 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
69814 };
69815
69816 struct snd_ac97_bus_ops {
69817 @@ -477,7 +477,7 @@ struct snd_ac97_template {
69818
69819 struct snd_ac97 {
69820 /* -- lowlevel (hardware) driver specific -- */
69821 - struct snd_ac97_build_ops * build_ops;
69822 + const struct snd_ac97_build_ops * build_ops;
69823 void *private_data;
69824 void (*private_free) (struct snd_ac97 *ac97);
69825 /* --- */
69826 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
69827 index 891cf1a..a94ba2b 100644
69828 --- a/include/sound/ak4xxx-adda.h
69829 +++ b/include/sound/ak4xxx-adda.h
69830 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
69831 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
69832 unsigned char val);
69833 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69834 -};
69835 +} __no_const;
69836
69837 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
69838
69839 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
69840 index 8c05e47..2b5df97 100644
69841 --- a/include/sound/hwdep.h
69842 +++ b/include/sound/hwdep.h
69843 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
69844 struct snd_hwdep_dsp_status *status);
69845 int (*dsp_load)(struct snd_hwdep *hw,
69846 struct snd_hwdep_dsp_image *image);
69847 -};
69848 +} __no_const;
69849
69850 struct snd_hwdep {
69851 struct snd_card *card;
69852 diff --git a/include/sound/info.h b/include/sound/info.h
69853 index 112e894..6fda5b5 100644
69854 --- a/include/sound/info.h
69855 +++ b/include/sound/info.h
69856 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
69857 struct snd_info_buffer *buffer);
69858 void (*write)(struct snd_info_entry *entry,
69859 struct snd_info_buffer *buffer);
69860 -};
69861 +} __no_const;
69862
69863 struct snd_info_entry_ops {
69864 int (*open)(struct snd_info_entry *entry,
69865 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
69866 index de6d981..590a550 100644
69867 --- a/include/sound/pcm.h
69868 +++ b/include/sound/pcm.h
69869 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
69870 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
69871 int (*ack)(struct snd_pcm_substream *substream);
69872 };
69873 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
69874
69875 /*
69876 *
69877 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
69878 index 736eac7..fe8a80f 100644
69879 --- a/include/sound/sb16_csp.h
69880 +++ b/include/sound/sb16_csp.h
69881 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
69882 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
69883 int (*csp_stop) (struct snd_sb_csp * p);
69884 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
69885 -};
69886 +} __no_const;
69887
69888 /*
69889 * CSP private data
69890 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
69891 index 444cd6b..3327cc5 100644
69892 --- a/include/sound/ymfpci.h
69893 +++ b/include/sound/ymfpci.h
69894 @@ -358,7 +358,7 @@ struct snd_ymfpci {
69895 spinlock_t reg_lock;
69896 spinlock_t voice_lock;
69897 wait_queue_head_t interrupt_sleep;
69898 - atomic_t interrupt_sleep_count;
69899 + atomic_unchecked_t interrupt_sleep_count;
69900 struct snd_info_entry *proc_entry;
69901 const struct firmware *dsp_microcode;
69902 const struct firmware *controller_microcode;
69903 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
69904 index b89f9db..f097b38 100644
69905 --- a/include/trace/events/irq.h
69906 +++ b/include/trace/events/irq.h
69907 @@ -34,7 +34,7 @@
69908 */
69909 TRACE_EVENT(irq_handler_entry,
69910
69911 - TP_PROTO(int irq, struct irqaction *action),
69912 + TP_PROTO(int irq, const struct irqaction *action),
69913
69914 TP_ARGS(irq, action),
69915
69916 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
69917 */
69918 TRACE_EVENT(irq_handler_exit,
69919
69920 - TP_PROTO(int irq, struct irqaction *action, int ret),
69921 + TP_PROTO(int irq, const struct irqaction *action, int ret),
69922
69923 TP_ARGS(irq, action, ret),
69924
69925 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
69926 */
69927 TRACE_EVENT(softirq_entry,
69928
69929 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
69930 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
69931
69932 TP_ARGS(h, vec),
69933
69934 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
69935 */
69936 TRACE_EVENT(softirq_exit,
69937
69938 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
69939 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
69940
69941 TP_ARGS(h, vec),
69942
69943 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
69944 index 0993a22..32ba2fe 100644
69945 --- a/include/video/uvesafb.h
69946 +++ b/include/video/uvesafb.h
69947 @@ -177,6 +177,7 @@ struct uvesafb_par {
69948 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
69949 u8 pmi_setpal; /* PMI for palette changes */
69950 u16 *pmi_base; /* protected mode interface location */
69951 + u8 *pmi_code; /* protected mode code location */
69952 void *pmi_start;
69953 void *pmi_pal;
69954 u8 *vbe_state_orig; /*
69955 diff --git a/init/Kconfig b/init/Kconfig
69956 index d72691b..3996e54 100644
69957 --- a/init/Kconfig
69958 +++ b/init/Kconfig
69959 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
69960
69961 config COMPAT_BRK
69962 bool "Disable heap randomization"
69963 - default y
69964 + default n
69965 help
69966 Randomizing heap placement makes heap exploits harder, but it
69967 also breaks ancient binaries (including anything libc5 based).
69968 diff --git a/init/do_mounts.c b/init/do_mounts.c
69969 index bb008d0..4fa3933 100644
69970 --- a/init/do_mounts.c
69971 +++ b/init/do_mounts.c
69972 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
69973
69974 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
69975 {
69976 - int err = sys_mount(name, "/root", fs, flags, data);
69977 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
69978 if (err)
69979 return err;
69980
69981 - sys_chdir("/root");
69982 + sys_chdir((__force const char __user *)"/root");
69983 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
69984 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
69985 current->fs->pwd.mnt->mnt_sb->s_type->name,
69986 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
69987 va_start(args, fmt);
69988 vsprintf(buf, fmt, args);
69989 va_end(args);
69990 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
69991 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
69992 if (fd >= 0) {
69993 sys_ioctl(fd, FDEJECT, 0);
69994 sys_close(fd);
69995 }
69996 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
69997 - fd = sys_open("/dev/console", O_RDWR, 0);
69998 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
69999 if (fd >= 0) {
70000 sys_ioctl(fd, TCGETS, (long)&termios);
70001 termios.c_lflag &= ~ICANON;
70002 sys_ioctl(fd, TCSETSF, (long)&termios);
70003 - sys_read(fd, &c, 1);
70004 + sys_read(fd, (char __user *)&c, 1);
70005 termios.c_lflag |= ICANON;
70006 sys_ioctl(fd, TCSETSF, (long)&termios);
70007 sys_close(fd);
70008 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
70009 mount_root();
70010 out:
70011 devtmpfs_mount("dev");
70012 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
70013 - sys_chroot(".");
70014 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
70015 + sys_chroot((__force char __user *)".");
70016 }
70017 diff --git a/init/do_mounts.h b/init/do_mounts.h
70018 index f5b978a..69dbfe8 100644
70019 --- a/init/do_mounts.h
70020 +++ b/init/do_mounts.h
70021 @@ -15,15 +15,15 @@ extern int root_mountflags;
70022
70023 static inline int create_dev(char *name, dev_t dev)
70024 {
70025 - sys_unlink(name);
70026 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
70027 + sys_unlink((char __force_user *)name);
70028 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
70029 }
70030
70031 #if BITS_PER_LONG == 32
70032 static inline u32 bstat(char *name)
70033 {
70034 struct stat64 stat;
70035 - if (sys_stat64(name, &stat) != 0)
70036 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
70037 return 0;
70038 if (!S_ISBLK(stat.st_mode))
70039 return 0;
70040 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
70041 static inline u32 bstat(char *name)
70042 {
70043 struct stat stat;
70044 - if (sys_newstat(name, &stat) != 0)
70045 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
70046 return 0;
70047 if (!S_ISBLK(stat.st_mode))
70048 return 0;
70049 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
70050 index 614241b..4da046b 100644
70051 --- a/init/do_mounts_initrd.c
70052 +++ b/init/do_mounts_initrd.c
70053 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
70054 sys_close(old_fd);sys_close(root_fd);
70055 sys_close(0);sys_close(1);sys_close(2);
70056 sys_setsid();
70057 - (void) sys_open("/dev/console",O_RDWR,0);
70058 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
70059 (void) sys_dup(0);
70060 (void) sys_dup(0);
70061 return kernel_execve(shell, argv, envp_init);
70062 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
70063 create_dev("/dev/root.old", Root_RAM0);
70064 /* mount initrd on rootfs' /root */
70065 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
70066 - sys_mkdir("/old", 0700);
70067 - root_fd = sys_open("/", 0, 0);
70068 - old_fd = sys_open("/old", 0, 0);
70069 + sys_mkdir((const char __force_user *)"/old", 0700);
70070 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
70071 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
70072 /* move initrd over / and chdir/chroot in initrd root */
70073 - sys_chdir("/root");
70074 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
70075 - sys_chroot(".");
70076 + sys_chdir((const char __force_user *)"/root");
70077 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
70078 + sys_chroot((const char __force_user *)".");
70079
70080 /*
70081 * In case that a resume from disk is carried out by linuxrc or one of
70082 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
70083
70084 /* move initrd to rootfs' /old */
70085 sys_fchdir(old_fd);
70086 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
70087 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
70088 /* switch root and cwd back to / of rootfs */
70089 sys_fchdir(root_fd);
70090 - sys_chroot(".");
70091 + sys_chroot((const char __force_user *)".");
70092 sys_close(old_fd);
70093 sys_close(root_fd);
70094
70095 if (new_decode_dev(real_root_dev) == Root_RAM0) {
70096 - sys_chdir("/old");
70097 + sys_chdir((const char __force_user *)"/old");
70098 return;
70099 }
70100
70101 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
70102 mount_root();
70103
70104 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
70105 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
70106 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
70107 if (!error)
70108 printk("okay\n");
70109 else {
70110 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
70111 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
70112 if (error == -ENOENT)
70113 printk("/initrd does not exist. Ignored.\n");
70114 else
70115 printk("failed\n");
70116 printk(KERN_NOTICE "Unmounting old root\n");
70117 - sys_umount("/old", MNT_DETACH);
70118 + sys_umount((char __force_user *)"/old", MNT_DETACH);
70119 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
70120 if (fd < 0) {
70121 error = fd;
70122 @@ -119,11 +119,11 @@ int __init initrd_load(void)
70123 * mounted in the normal path.
70124 */
70125 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
70126 - sys_unlink("/initrd.image");
70127 + sys_unlink((const char __force_user *)"/initrd.image");
70128 handle_initrd();
70129 return 1;
70130 }
70131 }
70132 - sys_unlink("/initrd.image");
70133 + sys_unlink((const char __force_user *)"/initrd.image");
70134 return 0;
70135 }
70136 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
70137 index 69aebbf..c0bf6a7 100644
70138 --- a/init/do_mounts_md.c
70139 +++ b/init/do_mounts_md.c
70140 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
70141 partitioned ? "_d" : "", minor,
70142 md_setup_args[ent].device_names);
70143
70144 - fd = sys_open(name, 0, 0);
70145 + fd = sys_open((char __force_user *)name, 0, 0);
70146 if (fd < 0) {
70147 printk(KERN_ERR "md: open failed - cannot start "
70148 "array %s\n", name);
70149 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
70150 * array without it
70151 */
70152 sys_close(fd);
70153 - fd = sys_open(name, 0, 0);
70154 + fd = sys_open((char __force_user *)name, 0, 0);
70155 sys_ioctl(fd, BLKRRPART, 0);
70156 }
70157 sys_close(fd);
70158 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
70159
70160 wait_for_device_probe();
70161
70162 - fd = sys_open("/dev/md0", 0, 0);
70163 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
70164 if (fd >= 0) {
70165 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
70166 sys_close(fd);
70167 diff --git a/init/initramfs.c b/init/initramfs.c
70168 index 1fd59b8..a01b079 100644
70169 --- a/init/initramfs.c
70170 +++ b/init/initramfs.c
70171 @@ -74,7 +74,7 @@ static void __init free_hash(void)
70172 }
70173 }
70174
70175 -static long __init do_utime(char __user *filename, time_t mtime)
70176 +static long __init do_utime(__force char __user *filename, time_t mtime)
70177 {
70178 struct timespec t[2];
70179
70180 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
70181 struct dir_entry *de, *tmp;
70182 list_for_each_entry_safe(de, tmp, &dir_list, list) {
70183 list_del(&de->list);
70184 - do_utime(de->name, de->mtime);
70185 + do_utime((char __force_user *)de->name, de->mtime);
70186 kfree(de->name);
70187 kfree(de);
70188 }
70189 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
70190 if (nlink >= 2) {
70191 char *old = find_link(major, minor, ino, mode, collected);
70192 if (old)
70193 - return (sys_link(old, collected) < 0) ? -1 : 1;
70194 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
70195 }
70196 return 0;
70197 }
70198 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
70199 {
70200 struct stat st;
70201
70202 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
70203 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
70204 if (S_ISDIR(st.st_mode))
70205 - sys_rmdir(path);
70206 + sys_rmdir((char __force_user *)path);
70207 else
70208 - sys_unlink(path);
70209 + sys_unlink((char __force_user *)path);
70210 }
70211 }
70212
70213 @@ -305,7 +305,7 @@ static int __init do_name(void)
70214 int openflags = O_WRONLY|O_CREAT;
70215 if (ml != 1)
70216 openflags |= O_TRUNC;
70217 - wfd = sys_open(collected, openflags, mode);
70218 + wfd = sys_open((char __force_user *)collected, openflags, mode);
70219
70220 if (wfd >= 0) {
70221 sys_fchown(wfd, uid, gid);
70222 @@ -317,17 +317,17 @@ static int __init do_name(void)
70223 }
70224 }
70225 } else if (S_ISDIR(mode)) {
70226 - sys_mkdir(collected, mode);
70227 - sys_chown(collected, uid, gid);
70228 - sys_chmod(collected, mode);
70229 + sys_mkdir((char __force_user *)collected, mode);
70230 + sys_chown((char __force_user *)collected, uid, gid);
70231 + sys_chmod((char __force_user *)collected, mode);
70232 dir_add(collected, mtime);
70233 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
70234 S_ISFIFO(mode) || S_ISSOCK(mode)) {
70235 if (maybe_link() == 0) {
70236 - sys_mknod(collected, mode, rdev);
70237 - sys_chown(collected, uid, gid);
70238 - sys_chmod(collected, mode);
70239 - do_utime(collected, mtime);
70240 + sys_mknod((char __force_user *)collected, mode, rdev);
70241 + sys_chown((char __force_user *)collected, uid, gid);
70242 + sys_chmod((char __force_user *)collected, mode);
70243 + do_utime((char __force_user *)collected, mtime);
70244 }
70245 }
70246 return 0;
70247 @@ -336,15 +336,15 @@ static int __init do_name(void)
70248 static int __init do_copy(void)
70249 {
70250 if (count >= body_len) {
70251 - sys_write(wfd, victim, body_len);
70252 + sys_write(wfd, (char __force_user *)victim, body_len);
70253 sys_close(wfd);
70254 - do_utime(vcollected, mtime);
70255 + do_utime((char __force_user *)vcollected, mtime);
70256 kfree(vcollected);
70257 eat(body_len);
70258 state = SkipIt;
70259 return 0;
70260 } else {
70261 - sys_write(wfd, victim, count);
70262 + sys_write(wfd, (char __force_user *)victim, count);
70263 body_len -= count;
70264 eat(count);
70265 return 1;
70266 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
70267 {
70268 collected[N_ALIGN(name_len) + body_len] = '\0';
70269 clean_path(collected, 0);
70270 - sys_symlink(collected + N_ALIGN(name_len), collected);
70271 - sys_lchown(collected, uid, gid);
70272 - do_utime(collected, mtime);
70273 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
70274 + sys_lchown((char __force_user *)collected, uid, gid);
70275 + do_utime((char __force_user *)collected, mtime);
70276 state = SkipIt;
70277 next_state = Reset;
70278 return 0;
70279 diff --git a/init/main.c b/init/main.c
70280 index 1eb4bd5..da8c6f5 100644
70281 --- a/init/main.c
70282 +++ b/init/main.c
70283 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
70284 #ifdef CONFIG_TC
70285 extern void tc_init(void);
70286 #endif
70287 +extern void grsecurity_init(void);
70288
70289 enum system_states system_state __read_mostly;
70290 EXPORT_SYMBOL(system_state);
70291 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
70292
70293 __setup("reset_devices", set_reset_devices);
70294
70295 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
70296 +extern char pax_enter_kernel_user[];
70297 +extern char pax_exit_kernel_user[];
70298 +extern pgdval_t clone_pgd_mask;
70299 +#endif
70300 +
70301 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
70302 +static int __init setup_pax_nouderef(char *str)
70303 +{
70304 +#ifdef CONFIG_X86_32
70305 + unsigned int cpu;
70306 + struct desc_struct *gdt;
70307 +
70308 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
70309 + gdt = get_cpu_gdt_table(cpu);
70310 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
70311 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
70312 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
70313 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
70314 + }
70315 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
70316 +#else
70317 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
70318 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
70319 + clone_pgd_mask = ~(pgdval_t)0UL;
70320 +#endif
70321 +
70322 + return 0;
70323 +}
70324 +early_param("pax_nouderef", setup_pax_nouderef);
70325 +#endif
70326 +
70327 +#ifdef CONFIG_PAX_SOFTMODE
70328 +int pax_softmode;
70329 +
70330 +static int __init setup_pax_softmode(char *str)
70331 +{
70332 + get_option(&str, &pax_softmode);
70333 + return 1;
70334 +}
70335 +__setup("pax_softmode=", setup_pax_softmode);
70336 +#endif
70337 +
70338 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
70339 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
70340 static const char *panic_later, *panic_param;
70341 @@ -705,52 +749,53 @@ int initcall_debug;
70342 core_param(initcall_debug, initcall_debug, bool, 0644);
70343
70344 static char msgbuf[64];
70345 -static struct boot_trace_call call;
70346 -static struct boot_trace_ret ret;
70347 +static struct boot_trace_call trace_call;
70348 +static struct boot_trace_ret trace_ret;
70349
70350 int do_one_initcall(initcall_t fn)
70351 {
70352 int count = preempt_count();
70353 ktime_t calltime, delta, rettime;
70354 + const char *msg1 = "", *msg2 = "";
70355
70356 if (initcall_debug) {
70357 - call.caller = task_pid_nr(current);
70358 - printk("calling %pF @ %i\n", fn, call.caller);
70359 + trace_call.caller = task_pid_nr(current);
70360 + printk("calling %pF @ %i\n", fn, trace_call.caller);
70361 calltime = ktime_get();
70362 - trace_boot_call(&call, fn);
70363 + trace_boot_call(&trace_call, fn);
70364 enable_boot_trace();
70365 }
70366
70367 - ret.result = fn();
70368 + trace_ret.result = fn();
70369
70370 if (initcall_debug) {
70371 disable_boot_trace();
70372 rettime = ktime_get();
70373 delta = ktime_sub(rettime, calltime);
70374 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
70375 - trace_boot_ret(&ret, fn);
70376 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
70377 + trace_boot_ret(&trace_ret, fn);
70378 printk("initcall %pF returned %d after %Ld usecs\n", fn,
70379 - ret.result, ret.duration);
70380 + trace_ret.result, trace_ret.duration);
70381 }
70382
70383 msgbuf[0] = 0;
70384
70385 - if (ret.result && ret.result != -ENODEV && initcall_debug)
70386 - sprintf(msgbuf, "error code %d ", ret.result);
70387 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
70388 + sprintf(msgbuf, "error code %d ", trace_ret.result);
70389
70390 if (preempt_count() != count) {
70391 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
70392 + msg1 = " preemption imbalance";
70393 preempt_count() = count;
70394 }
70395 if (irqs_disabled()) {
70396 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
70397 + msg2 = " disabled interrupts";
70398 local_irq_enable();
70399 }
70400 - if (msgbuf[0]) {
70401 - printk("initcall %pF returned with %s\n", fn, msgbuf);
70402 + if (msgbuf[0] || *msg1 || *msg2) {
70403 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
70404 }
70405
70406 - return ret.result;
70407 + return trace_ret.result;
70408 }
70409
70410
70411 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
70412 if (!ramdisk_execute_command)
70413 ramdisk_execute_command = "/init";
70414
70415 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
70416 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
70417 ramdisk_execute_command = NULL;
70418 prepare_namespace();
70419 }
70420
70421 + grsecurity_init();
70422 +
70423 /*
70424 * Ok, we have completed the initial bootup, and
70425 * we're essentially up and running. Get rid of the
70426 diff --git a/init/noinitramfs.c b/init/noinitramfs.c
70427 index f4c1a3a..96c19bd 100644
70428 --- a/init/noinitramfs.c
70429 +++ b/init/noinitramfs.c
70430 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
70431 {
70432 int err;
70433
70434 - err = sys_mkdir("/dev", 0755);
70435 + err = sys_mkdir((const char __user *)"/dev", 0755);
70436 if (err < 0)
70437 goto out;
70438
70439 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
70440 if (err < 0)
70441 goto out;
70442
70443 - err = sys_mkdir("/root", 0700);
70444 + err = sys_mkdir((const char __user *)"/root", 0700);
70445 if (err < 0)
70446 goto out;
70447
70448 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
70449 index d01bc14..8df81db 100644
70450 --- a/ipc/mqueue.c
70451 +++ b/ipc/mqueue.c
70452 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
70453 mq_bytes = (mq_msg_tblsz +
70454 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
70455
70456 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
70457 spin_lock(&mq_lock);
70458 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
70459 u->mq_bytes + mq_bytes >
70460 diff --git a/ipc/msg.c b/ipc/msg.c
70461 index 779f762..4af9e36 100644
70462 --- a/ipc/msg.c
70463 +++ b/ipc/msg.c
70464 @@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
70465 return security_msg_queue_associate(msq, msgflg);
70466 }
70467
70468 +static struct ipc_ops msg_ops = {
70469 + .getnew = newque,
70470 + .associate = msg_security,
70471 + .more_checks = NULL
70472 +};
70473 +
70474 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
70475 {
70476 struct ipc_namespace *ns;
70477 - struct ipc_ops msg_ops;
70478 struct ipc_params msg_params;
70479
70480 ns = current->nsproxy->ipc_ns;
70481
70482 - msg_ops.getnew = newque;
70483 - msg_ops.associate = msg_security;
70484 - msg_ops.more_checks = NULL;
70485 -
70486 msg_params.key = key;
70487 msg_params.flg = msgflg;
70488
70489 diff --git a/ipc/sem.c b/ipc/sem.c
70490 index b781007..f738b04 100644
70491 --- a/ipc/sem.c
70492 +++ b/ipc/sem.c
70493 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
70494 return 0;
70495 }
70496
70497 +static struct ipc_ops sem_ops = {
70498 + .getnew = newary,
70499 + .associate = sem_security,
70500 + .more_checks = sem_more_checks
70501 +};
70502 +
70503 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70504 {
70505 struct ipc_namespace *ns;
70506 - struct ipc_ops sem_ops;
70507 struct ipc_params sem_params;
70508
70509 ns = current->nsproxy->ipc_ns;
70510 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70511 if (nsems < 0 || nsems > ns->sc_semmsl)
70512 return -EINVAL;
70513
70514 - sem_ops.getnew = newary;
70515 - sem_ops.associate = sem_security;
70516 - sem_ops.more_checks = sem_more_checks;
70517 -
70518 sem_params.key = key;
70519 sem_params.flg = semflg;
70520 sem_params.u.nsems = nsems;
70521 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
70522 ushort* sem_io = fast_sem_io;
70523 int nsems;
70524
70525 + pax_track_stack();
70526 +
70527 sma = sem_lock_check(ns, semid);
70528 if (IS_ERR(sma))
70529 return PTR_ERR(sma);
70530 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
70531 unsigned long jiffies_left = 0;
70532 struct ipc_namespace *ns;
70533
70534 + pax_track_stack();
70535 +
70536 ns = current->nsproxy->ipc_ns;
70537
70538 if (nsops < 1 || semid < 0)
70539 diff --git a/ipc/shm.c b/ipc/shm.c
70540 index d30732c..7379456 100644
70541 --- a/ipc/shm.c
70542 +++ b/ipc/shm.c
70543 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
70544 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70545 #endif
70546
70547 +#ifdef CONFIG_GRKERNSEC
70548 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70549 + const time_t shm_createtime, const uid_t cuid,
70550 + const int shmid);
70551 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70552 + const time_t shm_createtime);
70553 +#endif
70554 +
70555 void shm_init_ns(struct ipc_namespace *ns)
70556 {
70557 ns->shm_ctlmax = SHMMAX;
70558 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
70559 shp->shm_lprid = 0;
70560 shp->shm_atim = shp->shm_dtim = 0;
70561 shp->shm_ctim = get_seconds();
70562 +#ifdef CONFIG_GRKERNSEC
70563 + {
70564 + struct timespec timeval;
70565 + do_posix_clock_monotonic_gettime(&timeval);
70566 +
70567 + shp->shm_createtime = timeval.tv_sec;
70568 + }
70569 +#endif
70570 shp->shm_segsz = size;
70571 shp->shm_nattch = 0;
70572 shp->shm_file = file;
70573 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
70574 return 0;
70575 }
70576
70577 +static struct ipc_ops shm_ops = {
70578 + .getnew = newseg,
70579 + .associate = shm_security,
70580 + .more_checks = shm_more_checks
70581 +};
70582 +
70583 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
70584 {
70585 struct ipc_namespace *ns;
70586 - struct ipc_ops shm_ops;
70587 struct ipc_params shm_params;
70588
70589 ns = current->nsproxy->ipc_ns;
70590
70591 - shm_ops.getnew = newseg;
70592 - shm_ops.associate = shm_security;
70593 - shm_ops.more_checks = shm_more_checks;
70594 -
70595 shm_params.key = key;
70596 shm_params.flg = shmflg;
70597 shm_params.u.size = size;
70598 @@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
70599 if (err)
70600 goto out_unlock;
70601
70602 +#ifdef CONFIG_GRKERNSEC
70603 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
70604 + shp->shm_perm.cuid, shmid) ||
70605 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
70606 + err = -EACCES;
70607 + goto out_unlock;
70608 + }
70609 +#endif
70610 +
70611 path.dentry = dget(shp->shm_file->f_path.dentry);
70612 path.mnt = shp->shm_file->f_path.mnt;
70613 shp->shm_nattch++;
70614 +#ifdef CONFIG_GRKERNSEC
70615 + shp->shm_lapid = current->pid;
70616 +#endif
70617 size = i_size_read(path.dentry->d_inode);
70618 shm_unlock(shp);
70619
70620 diff --git a/kernel/acct.c b/kernel/acct.c
70621 index a6605ca..ca91111 100644
70622 --- a/kernel/acct.c
70623 +++ b/kernel/acct.c
70624 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
70625 */
70626 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
70627 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
70628 - file->f_op->write(file, (char *)&ac,
70629 + file->f_op->write(file, (char __force_user *)&ac,
70630 sizeof(acct_t), &file->f_pos);
70631 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
70632 set_fs(fs);
70633 diff --git a/kernel/audit.c b/kernel/audit.c
70634 index 5feed23..513b02c 100644
70635 --- a/kernel/audit.c
70636 +++ b/kernel/audit.c
70637 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
70638 3) suppressed due to audit_rate_limit
70639 4) suppressed due to audit_backlog_limit
70640 */
70641 -static atomic_t audit_lost = ATOMIC_INIT(0);
70642 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
70643
70644 /* The netlink socket. */
70645 static struct sock *audit_sock;
70646 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
70647 unsigned long now;
70648 int print;
70649
70650 - atomic_inc(&audit_lost);
70651 + atomic_inc_unchecked(&audit_lost);
70652
70653 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
70654
70655 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
70656 printk(KERN_WARNING
70657 "audit: audit_lost=%d audit_rate_limit=%d "
70658 "audit_backlog_limit=%d\n",
70659 - atomic_read(&audit_lost),
70660 + atomic_read_unchecked(&audit_lost),
70661 audit_rate_limit,
70662 audit_backlog_limit);
70663 audit_panic(message);
70664 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
70665 status_set.pid = audit_pid;
70666 status_set.rate_limit = audit_rate_limit;
70667 status_set.backlog_limit = audit_backlog_limit;
70668 - status_set.lost = atomic_read(&audit_lost);
70669 + status_set.lost = atomic_read_unchecked(&audit_lost);
70670 status_set.backlog = skb_queue_len(&audit_skb_queue);
70671 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
70672 &status_set, sizeof(status_set));
70673 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
70674 spin_unlock_irq(&tsk->sighand->siglock);
70675 }
70676 read_unlock(&tasklist_lock);
70677 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
70678 - &s, sizeof(s));
70679 +
70680 + if (!err)
70681 + audit_send_reply(NETLINK_CB(skb).pid, seq,
70682 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
70683 break;
70684 }
70685 case AUDIT_TTY_SET: {
70686 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
70687 index 267e484..f8e295a 100644
70688 --- a/kernel/auditsc.c
70689 +++ b/kernel/auditsc.c
70690 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
70691 }
70692
70693 /* global counter which is incremented every time something logs in */
70694 -static atomic_t session_id = ATOMIC_INIT(0);
70695 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
70696
70697 /**
70698 * audit_set_loginuid - set a task's audit_context loginuid
70699 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
70700 */
70701 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
70702 {
70703 - unsigned int sessionid = atomic_inc_return(&session_id);
70704 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
70705 struct audit_context *context = task->audit_context;
70706
70707 if (context && context->in_syscall) {
70708 diff --git a/kernel/capability.c b/kernel/capability.c
70709 index 8a944f5..db5001e 100644
70710 --- a/kernel/capability.c
70711 +++ b/kernel/capability.c
70712 @@ -305,10 +305,26 @@ int capable(int cap)
70713 BUG();
70714 }
70715
70716 - if (security_capable(cap) == 0) {
70717 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
70718 current->flags |= PF_SUPERPRIV;
70719 return 1;
70720 }
70721 return 0;
70722 }
70723 +
70724 +int capable_nolog(int cap)
70725 +{
70726 + if (unlikely(!cap_valid(cap))) {
70727 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
70728 + BUG();
70729 + }
70730 +
70731 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
70732 + current->flags |= PF_SUPERPRIV;
70733 + return 1;
70734 + }
70735 + return 0;
70736 +}
70737 +
70738 EXPORT_SYMBOL(capable);
70739 +EXPORT_SYMBOL(capable_nolog);
70740 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
70741 index 1fbcc74..7000012 100644
70742 --- a/kernel/cgroup.c
70743 +++ b/kernel/cgroup.c
70744 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
70745 struct hlist_head *hhead;
70746 struct cg_cgroup_link *link;
70747
70748 + pax_track_stack();
70749 +
70750 /* First see if we already have a cgroup group that matches
70751 * the desired set */
70752 read_lock(&css_set_lock);
70753 diff --git a/kernel/compat.c b/kernel/compat.c
70754 index 8bc5578..186e44a 100644
70755 --- a/kernel/compat.c
70756 +++ b/kernel/compat.c
70757 @@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
70758 mm_segment_t oldfs;
70759 long ret;
70760
70761 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
70762 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
70763 oldfs = get_fs();
70764 set_fs(KERNEL_DS);
70765 ret = hrtimer_nanosleep_restart(restart);
70766 @@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
70767 oldfs = get_fs();
70768 set_fs(KERNEL_DS);
70769 ret = hrtimer_nanosleep(&tu,
70770 - rmtp ? (struct timespec __user *)&rmt : NULL,
70771 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
70772 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
70773 set_fs(oldfs);
70774
70775 @@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
70776 mm_segment_t old_fs = get_fs();
70777
70778 set_fs(KERNEL_DS);
70779 - ret = sys_sigpending((old_sigset_t __user *) &s);
70780 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
70781 set_fs(old_fs);
70782 if (ret == 0)
70783 ret = put_user(s, set);
70784 @@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
70785 old_fs = get_fs();
70786 set_fs(KERNEL_DS);
70787 ret = sys_sigprocmask(how,
70788 - set ? (old_sigset_t __user *) &s : NULL,
70789 - oset ? (old_sigset_t __user *) &s : NULL);
70790 + set ? (old_sigset_t __force_user *) &s : NULL,
70791 + oset ? (old_sigset_t __force_user *) &s : NULL);
70792 set_fs(old_fs);
70793 if (ret == 0)
70794 if (oset)
70795 @@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
70796 mm_segment_t old_fs = get_fs();
70797
70798 set_fs(KERNEL_DS);
70799 - ret = sys_old_getrlimit(resource, &r);
70800 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
70801 set_fs(old_fs);
70802
70803 if (!ret) {
70804 @@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
70805 mm_segment_t old_fs = get_fs();
70806
70807 set_fs(KERNEL_DS);
70808 - ret = sys_getrusage(who, (struct rusage __user *) &r);
70809 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
70810 set_fs(old_fs);
70811
70812 if (ret)
70813 @@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
70814 set_fs (KERNEL_DS);
70815 ret = sys_wait4(pid,
70816 (stat_addr ?
70817 - (unsigned int __user *) &status : NULL),
70818 - options, (struct rusage __user *) &r);
70819 + (unsigned int __force_user *) &status : NULL),
70820 + options, (struct rusage __force_user *) &r);
70821 set_fs (old_fs);
70822
70823 if (ret > 0) {
70824 @@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
70825 memset(&info, 0, sizeof(info));
70826
70827 set_fs(KERNEL_DS);
70828 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
70829 - uru ? (struct rusage __user *)&ru : NULL);
70830 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
70831 + uru ? (struct rusage __force_user *)&ru : NULL);
70832 set_fs(old_fs);
70833
70834 if ((ret < 0) || (info.si_signo == 0))
70835 @@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
70836 oldfs = get_fs();
70837 set_fs(KERNEL_DS);
70838 err = sys_timer_settime(timer_id, flags,
70839 - (struct itimerspec __user *) &newts,
70840 - (struct itimerspec __user *) &oldts);
70841 + (struct itimerspec __force_user *) &newts,
70842 + (struct itimerspec __force_user *) &oldts);
70843 set_fs(oldfs);
70844 if (!err && old && put_compat_itimerspec(old, &oldts))
70845 return -EFAULT;
70846 @@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
70847 oldfs = get_fs();
70848 set_fs(KERNEL_DS);
70849 err = sys_timer_gettime(timer_id,
70850 - (struct itimerspec __user *) &ts);
70851 + (struct itimerspec __force_user *) &ts);
70852 set_fs(oldfs);
70853 if (!err && put_compat_itimerspec(setting, &ts))
70854 return -EFAULT;
70855 @@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
70856 oldfs = get_fs();
70857 set_fs(KERNEL_DS);
70858 err = sys_clock_settime(which_clock,
70859 - (struct timespec __user *) &ts);
70860 + (struct timespec __force_user *) &ts);
70861 set_fs(oldfs);
70862 return err;
70863 }
70864 @@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
70865 oldfs = get_fs();
70866 set_fs(KERNEL_DS);
70867 err = sys_clock_gettime(which_clock,
70868 - (struct timespec __user *) &ts);
70869 + (struct timespec __force_user *) &ts);
70870 set_fs(oldfs);
70871 if (!err && put_compat_timespec(&ts, tp))
70872 return -EFAULT;
70873 @@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
70874 oldfs = get_fs();
70875 set_fs(KERNEL_DS);
70876 err = sys_clock_getres(which_clock,
70877 - (struct timespec __user *) &ts);
70878 + (struct timespec __force_user *) &ts);
70879 set_fs(oldfs);
70880 if (!err && tp && put_compat_timespec(&ts, tp))
70881 return -EFAULT;
70882 @@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
70883 long err;
70884 mm_segment_t oldfs;
70885 struct timespec tu;
70886 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
70887 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
70888
70889 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
70890 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
70891 oldfs = get_fs();
70892 set_fs(KERNEL_DS);
70893 err = clock_nanosleep_restart(restart);
70894 @@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
70895 oldfs = get_fs();
70896 set_fs(KERNEL_DS);
70897 err = sys_clock_nanosleep(which_clock, flags,
70898 - (struct timespec __user *) &in,
70899 - (struct timespec __user *) &out);
70900 + (struct timespec __force_user *) &in,
70901 + (struct timespec __force_user *) &out);
70902 set_fs(oldfs);
70903
70904 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
70905 diff --git a/kernel/configs.c b/kernel/configs.c
70906 index abaee68..047facd 100644
70907 --- a/kernel/configs.c
70908 +++ b/kernel/configs.c
70909 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
70910 struct proc_dir_entry *entry;
70911
70912 /* create the current config file */
70913 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
70914 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
70915 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
70916 + &ikconfig_file_ops);
70917 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70918 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
70919 + &ikconfig_file_ops);
70920 +#endif
70921 +#else
70922 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
70923 &ikconfig_file_ops);
70924 +#endif
70925 +
70926 if (!entry)
70927 return -ENOMEM;
70928
70929 diff --git a/kernel/cpu.c b/kernel/cpu.c
70930 index 7e8b6ac..8921388 100644
70931 --- a/kernel/cpu.c
70932 +++ b/kernel/cpu.c
70933 @@ -19,7 +19,7 @@
70934 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
70935 static DEFINE_MUTEX(cpu_add_remove_lock);
70936
70937 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
70938 +static RAW_NOTIFIER_HEAD(cpu_chain);
70939
70940 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
70941 * Should always be manipulated under cpu_add_remove_lock
70942 diff --git a/kernel/cred.c b/kernel/cred.c
70943 index 0b5b5fc..419b86a 100644
70944 --- a/kernel/cred.c
70945 +++ b/kernel/cred.c
70946 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
70947 */
70948 void __put_cred(struct cred *cred)
70949 {
70950 + pax_track_stack();
70951 +
70952 kdebug("__put_cred(%p{%d,%d})", cred,
70953 atomic_read(&cred->usage),
70954 read_cred_subscribers(cred));
70955 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
70956 {
70957 struct cred *cred;
70958
70959 + pax_track_stack();
70960 +
70961 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
70962 atomic_read(&tsk->cred->usage),
70963 read_cred_subscribers(tsk->cred));
70964 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct task_struct *task)
70965 {
70966 const struct cred *cred;
70967
70968 + pax_track_stack();
70969 +
70970 rcu_read_lock();
70971
70972 do {
70973 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
70974 {
70975 struct cred *new;
70976
70977 + pax_track_stack();
70978 +
70979 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
70980 if (!new)
70981 return NULL;
70982 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
70983 const struct cred *old;
70984 struct cred *new;
70985
70986 + pax_track_stack();
70987 +
70988 validate_process_creds();
70989
70990 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
70991 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
70992 struct thread_group_cred *tgcred = NULL;
70993 struct cred *new;
70994
70995 + pax_track_stack();
70996 +
70997 #ifdef CONFIG_KEYS
70998 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
70999 if (!tgcred)
71000 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
71001 struct cred *new;
71002 int ret;
71003
71004 + pax_track_stack();
71005 +
71006 mutex_init(&p->cred_guard_mutex);
71007
71008 if (
71009 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
71010 struct task_struct *task = current;
71011 const struct cred *old = task->real_cred;
71012
71013 + pax_track_stack();
71014 +
71015 kdebug("commit_creds(%p{%d,%d})", new,
71016 atomic_read(&new->usage),
71017 read_cred_subscribers(new));
71018 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
71019
71020 get_cred(new); /* we will require a ref for the subj creds too */
71021
71022 + gr_set_role_label(task, new->uid, new->gid);
71023 +
71024 /* dumpability changes */
71025 if (old->euid != new->euid ||
71026 old->egid != new->egid ||
71027 @@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
71028 key_fsgid_changed(task);
71029
71030 /* do it
71031 - * - What if a process setreuid()'s and this brings the
71032 - * new uid over his NPROC rlimit? We can check this now
71033 - * cheaply with the new uid cache, so if it matters
71034 - * we should be checking for it. -DaveM
71035 + * RLIMIT_NPROC limits on user->processes have already been checked
71036 + * in set_user().
71037 */
71038 alter_cred_subscribers(new, 2);
71039 if (new->user != old->user)
71040 @@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
71041 */
71042 void abort_creds(struct cred *new)
71043 {
71044 + pax_track_stack();
71045 +
71046 kdebug("abort_creds(%p{%d,%d})", new,
71047 atomic_read(&new->usage),
71048 read_cred_subscribers(new));
71049 @@ -629,6 +647,8 @@ const struct cred *override_creds(const struct cred *new)
71050 {
71051 const struct cred *old = current->cred;
71052
71053 + pax_track_stack();
71054 +
71055 kdebug("override_creds(%p{%d,%d})", new,
71056 atomic_read(&new->usage),
71057 read_cred_subscribers(new));
71058 @@ -658,6 +678,8 @@ void revert_creds(const struct cred *old)
71059 {
71060 const struct cred *override = current->cred;
71061
71062 + pax_track_stack();
71063 +
71064 kdebug("revert_creds(%p{%d,%d})", old,
71065 atomic_read(&old->usage),
71066 read_cred_subscribers(old));
71067 @@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
71068 const struct cred *old;
71069 struct cred *new;
71070
71071 + pax_track_stack();
71072 +
71073 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71074 if (!new)
71075 return NULL;
71076 @@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
71077 */
71078 int set_security_override(struct cred *new, u32 secid)
71079 {
71080 + pax_track_stack();
71081 +
71082 return security_kernel_act_as(new, secid);
71083 }
71084 EXPORT_SYMBOL(set_security_override);
71085 @@ -777,6 +803,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
71086 u32 secid;
71087 int ret;
71088
71089 + pax_track_stack();
71090 +
71091 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
71092 if (ret < 0)
71093 return ret;
71094 diff --git a/kernel/exit.c b/kernel/exit.c
71095 index 0f8fae3..9344a56 100644
71096 --- a/kernel/exit.c
71097 +++ b/kernel/exit.c
71098 @@ -55,6 +55,10 @@
71099 #include <asm/pgtable.h>
71100 #include <asm/mmu_context.h>
71101
71102 +#ifdef CONFIG_GRKERNSEC
71103 +extern rwlock_t grsec_exec_file_lock;
71104 +#endif
71105 +
71106 static void exit_mm(struct task_struct * tsk);
71107
71108 static void __unhash_process(struct task_struct *p)
71109 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
71110 struct task_struct *leader;
71111 int zap_leader;
71112 repeat:
71113 +#ifdef CONFIG_NET
71114 + gr_del_task_from_ip_table(p);
71115 +#endif
71116 +
71117 tracehook_prepare_release_task(p);
71118 /* don't need to get the RCU readlock here - the process is dead and
71119 * can't be modifying its own credentials */
71120 @@ -397,7 +405,7 @@ int allow_signal(int sig)
71121 * know it'll be handled, so that they don't get converted to
71122 * SIGKILL or just silently dropped.
71123 */
71124 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
71125 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
71126 recalc_sigpending();
71127 spin_unlock_irq(&current->sighand->siglock);
71128 return 0;
71129 @@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
71130 vsnprintf(current->comm, sizeof(current->comm), name, args);
71131 va_end(args);
71132
71133 +#ifdef CONFIG_GRKERNSEC
71134 + write_lock(&grsec_exec_file_lock);
71135 + if (current->exec_file) {
71136 + fput(current->exec_file);
71137 + current->exec_file = NULL;
71138 + }
71139 + write_unlock(&grsec_exec_file_lock);
71140 +#endif
71141 +
71142 + gr_set_kernel_label(current);
71143 +
71144 /*
71145 * If we were started as result of loading a module, close all of the
71146 * user space pages. We don't need them, and if we didn't close them
71147 @@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
71148 struct task_struct *tsk = current;
71149 int group_dead;
71150
71151 - profile_task_exit(tsk);
71152 -
71153 - WARN_ON(atomic_read(&tsk->fs_excl));
71154 -
71155 + /*
71156 + * Check this first since set_fs() below depends on
71157 + * current_thread_info(), which we better not access when we're in
71158 + * interrupt context. Other than that, we want to do the set_fs()
71159 + * as early as possible.
71160 + */
71161 if (unlikely(in_interrupt()))
71162 panic("Aiee, killing interrupt handler!");
71163 - if (unlikely(!tsk->pid))
71164 - panic("Attempted to kill the idle task!");
71165
71166 /*
71167 - * If do_exit is called because this processes oopsed, it's possible
71168 + * If do_exit is called because this processes Oops'ed, it's possible
71169 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
71170 * continuing. Amongst other possible reasons, this is to prevent
71171 * mm_release()->clear_child_tid() from writing to a user-controlled
71172 @@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
71173 */
71174 set_fs(USER_DS);
71175
71176 + profile_task_exit(tsk);
71177 +
71178 + WARN_ON(atomic_read(&tsk->fs_excl));
71179 +
71180 + if (unlikely(!tsk->pid))
71181 + panic("Attempted to kill the idle task!");
71182 +
71183 tracehook_report_exit(&code);
71184
71185 validate_creds_for_do_exit(tsk);
71186 @@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
71187 tsk->exit_code = code;
71188 taskstats_exit(tsk, group_dead);
71189
71190 + gr_acl_handle_psacct(tsk, code);
71191 + gr_acl_handle_exit();
71192 +
71193 exit_mm(tsk);
71194
71195 if (group_dead)
71196 @@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
71197
71198 if (unlikely(wo->wo_flags & WNOWAIT)) {
71199 int exit_code = p->exit_code;
71200 - int why, status;
71201 + int why;
71202
71203 get_task_struct(p);
71204 read_unlock(&tasklist_lock);
71205 diff --git a/kernel/fork.c b/kernel/fork.c
71206 index 4bde56f..29a9bab 100644
71207 --- a/kernel/fork.c
71208 +++ b/kernel/fork.c
71209 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
71210 *stackend = STACK_END_MAGIC; /* for overflow detection */
71211
71212 #ifdef CONFIG_CC_STACKPROTECTOR
71213 - tsk->stack_canary = get_random_int();
71214 + tsk->stack_canary = pax_get_random_long();
71215 #endif
71216
71217 /* One for us, one for whoever does the "release_task()" (usually parent) */
71218 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71219 mm->locked_vm = 0;
71220 mm->mmap = NULL;
71221 mm->mmap_cache = NULL;
71222 - mm->free_area_cache = oldmm->mmap_base;
71223 - mm->cached_hole_size = ~0UL;
71224 + mm->free_area_cache = oldmm->free_area_cache;
71225 + mm->cached_hole_size = oldmm->cached_hole_size;
71226 mm->map_count = 0;
71227 cpumask_clear(mm_cpumask(mm));
71228 mm->mm_rb = RB_ROOT;
71229 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71230 tmp->vm_flags &= ~VM_LOCKED;
71231 tmp->vm_mm = mm;
71232 tmp->vm_next = tmp->vm_prev = NULL;
71233 + tmp->vm_mirror = NULL;
71234 anon_vma_link(tmp);
71235 file = tmp->vm_file;
71236 if (file) {
71237 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71238 if (retval)
71239 goto out;
71240 }
71241 +
71242 +#ifdef CONFIG_PAX_SEGMEXEC
71243 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
71244 + struct vm_area_struct *mpnt_m;
71245 +
71246 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
71247 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
71248 +
71249 + if (!mpnt->vm_mirror)
71250 + continue;
71251 +
71252 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
71253 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
71254 + mpnt->vm_mirror = mpnt_m;
71255 + } else {
71256 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
71257 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
71258 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
71259 + mpnt->vm_mirror->vm_mirror = mpnt;
71260 + }
71261 + }
71262 + BUG_ON(mpnt_m);
71263 + }
71264 +#endif
71265 +
71266 /* a new mm has just been created */
71267 arch_dup_mmap(oldmm, mm);
71268 retval = 0;
71269 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
71270 write_unlock(&fs->lock);
71271 return -EAGAIN;
71272 }
71273 - fs->users++;
71274 + atomic_inc(&fs->users);
71275 write_unlock(&fs->lock);
71276 return 0;
71277 }
71278 tsk->fs = copy_fs_struct(fs);
71279 if (!tsk->fs)
71280 return -ENOMEM;
71281 + gr_set_chroot_entries(tsk, &tsk->fs->root);
71282 return 0;
71283 }
71284
71285 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
71286 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
71287 #endif
71288 retval = -EAGAIN;
71289 +
71290 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
71291 +
71292 if (atomic_read(&p->real_cred->user->processes) >=
71293 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
71294 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
71295 - p->real_cred->user != INIT_USER)
71296 + if (p->real_cred->user != INIT_USER &&
71297 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
71298 goto bad_fork_free;
71299 }
71300 + current->flags &= ~PF_NPROC_EXCEEDED;
71301
71302 retval = copy_creds(p, clone_flags);
71303 if (retval < 0)
71304 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
71305 goto bad_fork_free_pid;
71306 }
71307
71308 + gr_copy_label(p);
71309 +
71310 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
71311 /*
71312 * Clear TID on mm_release()?
71313 @@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
71314 bad_fork_free:
71315 free_task(p);
71316 fork_out:
71317 + gr_log_forkfail(retval);
71318 +
71319 return ERR_PTR(retval);
71320 }
71321
71322 @@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
71323 if (clone_flags & CLONE_PARENT_SETTID)
71324 put_user(nr, parent_tidptr);
71325
71326 + gr_handle_brute_check();
71327 +
71328 if (clone_flags & CLONE_VFORK) {
71329 p->vfork_done = &vfork;
71330 init_completion(&vfork);
71331 @@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
71332 return 0;
71333
71334 /* don't need lock here; in the worst case we'll do useless copy */
71335 - if (fs->users == 1)
71336 + if (atomic_read(&fs->users) == 1)
71337 return 0;
71338
71339 *new_fsp = copy_fs_struct(fs);
71340 @@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
71341 fs = current->fs;
71342 write_lock(&fs->lock);
71343 current->fs = new_fs;
71344 - if (--fs->users)
71345 + gr_set_chroot_entries(current, &current->fs->root);
71346 + if (atomic_dec_return(&fs->users))
71347 new_fs = NULL;
71348 else
71349 new_fs = fs;
71350 diff --git a/kernel/futex.c b/kernel/futex.c
71351 index fb98c9f..333faec 100644
71352 --- a/kernel/futex.c
71353 +++ b/kernel/futex.c
71354 @@ -54,6 +54,7 @@
71355 #include <linux/mount.h>
71356 #include <linux/pagemap.h>
71357 #include <linux/syscalls.h>
71358 +#include <linux/ptrace.h>
71359 #include <linux/signal.h>
71360 #include <linux/module.h>
71361 #include <linux/magic.h>
71362 @@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
71363 struct page *page;
71364 int err, ro = 0;
71365
71366 +#ifdef CONFIG_PAX_SEGMEXEC
71367 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
71368 + return -EFAULT;
71369 +#endif
71370 +
71371 /*
71372 * The futex address must be "naturally" aligned.
71373 */
71374 @@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
71375 struct futex_q q;
71376 int ret;
71377
71378 + pax_track_stack();
71379 +
71380 if (!bitset)
71381 return -EINVAL;
71382
71383 @@ -1871,7 +1879,7 @@ retry:
71384
71385 restart = &current_thread_info()->restart_block;
71386 restart->fn = futex_wait_restart;
71387 - restart->futex.uaddr = (u32 *)uaddr;
71388 + restart->futex.uaddr = uaddr;
71389 restart->futex.val = val;
71390 restart->futex.time = abs_time->tv64;
71391 restart->futex.bitset = bitset;
71392 @@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
71393 struct futex_q q;
71394 int res, ret;
71395
71396 + pax_track_stack();
71397 +
71398 if (!bitset)
71399 return -EINVAL;
71400
71401 @@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
71402 if (!p)
71403 goto err_unlock;
71404 ret = -EPERM;
71405 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71406 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
71407 + goto err_unlock;
71408 +#endif
71409 pcred = __task_cred(p);
71410 if (cred->euid != pcred->euid &&
71411 cred->euid != pcred->uid &&
71412 @@ -2489,7 +2503,7 @@ retry:
71413 */
71414 static inline int fetch_robust_entry(struct robust_list __user **entry,
71415 struct robust_list __user * __user *head,
71416 - int *pi)
71417 + unsigned int *pi)
71418 {
71419 unsigned long uentry;
71420
71421 @@ -2670,6 +2684,7 @@ static int __init futex_init(void)
71422 {
71423 u32 curval;
71424 int i;
71425 + mm_segment_t oldfs;
71426
71427 /*
71428 * This will fail and we want it. Some arch implementations do
71429 @@ -2681,7 +2696,10 @@ static int __init futex_init(void)
71430 * implementation, the non functional ones will return
71431 * -ENOSYS.
71432 */
71433 + oldfs = get_fs();
71434 + set_fs(USER_DS);
71435 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
71436 + set_fs(oldfs);
71437 if (curval == -EFAULT)
71438 futex_cmpxchg_enabled = 1;
71439
71440 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
71441 index 2357165..eb25501 100644
71442 --- a/kernel/futex_compat.c
71443 +++ b/kernel/futex_compat.c
71444 @@ -10,6 +10,7 @@
71445 #include <linux/compat.h>
71446 #include <linux/nsproxy.h>
71447 #include <linux/futex.h>
71448 +#include <linux/ptrace.h>
71449
71450 #include <asm/uaccess.h>
71451
71452 @@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
71453 {
71454 struct compat_robust_list_head __user *head;
71455 unsigned long ret;
71456 - const struct cred *cred = current_cred(), *pcred;
71457 + const struct cred *cred = current_cred();
71458 + const struct cred *pcred;
71459
71460 if (!futex_cmpxchg_enabled)
71461 return -ENOSYS;
71462 @@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
71463 if (!p)
71464 goto err_unlock;
71465 ret = -EPERM;
71466 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71467 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
71468 + goto err_unlock;
71469 +#endif
71470 pcred = __task_cred(p);
71471 if (cred->euid != pcred->euid &&
71472 cred->euid != pcred->uid &&
71473 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
71474 index 9b22d03..6295b62 100644
71475 --- a/kernel/gcov/base.c
71476 +++ b/kernel/gcov/base.c
71477 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
71478 }
71479
71480 #ifdef CONFIG_MODULES
71481 -static inline int within(void *addr, void *start, unsigned long size)
71482 -{
71483 - return ((addr >= start) && (addr < start + size));
71484 -}
71485 -
71486 /* Update list and generate events when modules are unloaded. */
71487 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
71488 void *data)
71489 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
71490 prev = NULL;
71491 /* Remove entries located in module from linked list. */
71492 for (info = gcov_info_head; info; info = info->next) {
71493 - if (within(info, mod->module_core, mod->core_size)) {
71494 + if (within_module_core_rw((unsigned long)info, mod)) {
71495 if (prev)
71496 prev->next = info->next;
71497 else
71498 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
71499 index a6e9d00..a0da4f9 100644
71500 --- a/kernel/hrtimer.c
71501 +++ b/kernel/hrtimer.c
71502 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
71503 local_irq_restore(flags);
71504 }
71505
71506 -static void run_hrtimer_softirq(struct softirq_action *h)
71507 +static void run_hrtimer_softirq(void)
71508 {
71509 hrtimer_peek_ahead_timers();
71510 }
71511 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
71512 index 8b6b8b6..6bc87df 100644
71513 --- a/kernel/kallsyms.c
71514 +++ b/kernel/kallsyms.c
71515 @@ -11,6 +11,9 @@
71516 * Changed the compression method from stem compression to "table lookup"
71517 * compression (see scripts/kallsyms.c for a more complete description)
71518 */
71519 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71520 +#define __INCLUDED_BY_HIDESYM 1
71521 +#endif
71522 #include <linux/kallsyms.h>
71523 #include <linux/module.h>
71524 #include <linux/init.h>
71525 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
71526
71527 static inline int is_kernel_inittext(unsigned long addr)
71528 {
71529 + if (system_state != SYSTEM_BOOTING)
71530 + return 0;
71531 +
71532 if (addr >= (unsigned long)_sinittext
71533 && addr <= (unsigned long)_einittext)
71534 return 1;
71535 return 0;
71536 }
71537
71538 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71539 +#ifdef CONFIG_MODULES
71540 +static inline int is_module_text(unsigned long addr)
71541 +{
71542 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
71543 + return 1;
71544 +
71545 + addr = ktla_ktva(addr);
71546 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
71547 +}
71548 +#else
71549 +static inline int is_module_text(unsigned long addr)
71550 +{
71551 + return 0;
71552 +}
71553 +#endif
71554 +#endif
71555 +
71556 static inline int is_kernel_text(unsigned long addr)
71557 {
71558 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
71559 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
71560
71561 static inline int is_kernel(unsigned long addr)
71562 {
71563 +
71564 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71565 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
71566 + return 1;
71567 +
71568 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
71569 +#else
71570 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
71571 +#endif
71572 +
71573 return 1;
71574 return in_gate_area_no_task(addr);
71575 }
71576
71577 static int is_ksym_addr(unsigned long addr)
71578 {
71579 +
71580 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71581 + if (is_module_text(addr))
71582 + return 0;
71583 +#endif
71584 +
71585 if (all_var)
71586 return is_kernel(addr);
71587
71588 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
71589
71590 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
71591 {
71592 - iter->name[0] = '\0';
71593 iter->nameoff = get_symbol_offset(new_pos);
71594 iter->pos = new_pos;
71595 }
71596 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
71597 {
71598 struct kallsym_iter *iter = m->private;
71599
71600 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71601 + if (current_uid())
71602 + return 0;
71603 +#endif
71604 +
71605 /* Some debugging symbols have no name. Ignore them. */
71606 if (!iter->name[0])
71607 return 0;
71608 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
71609 struct kallsym_iter *iter;
71610 int ret;
71611
71612 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
71613 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
71614 if (!iter)
71615 return -ENOMEM;
71616 reset_iter(iter, 0);
71617 diff --git a/kernel/kexec.c b/kernel/kexec.c
71618 index f336e21..9c1c20b 100644
71619 --- a/kernel/kexec.c
71620 +++ b/kernel/kexec.c
71621 @@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
71622 unsigned long flags)
71623 {
71624 struct compat_kexec_segment in;
71625 - struct kexec_segment out, __user *ksegments;
71626 + struct kexec_segment out;
71627 + struct kexec_segment __user *ksegments;
71628 unsigned long i, result;
71629
71630 /* Don't allow clients that don't understand the native
71631 diff --git a/kernel/kgdb.c b/kernel/kgdb.c
71632 index 53dae4b..9ba3743 100644
71633 --- a/kernel/kgdb.c
71634 +++ b/kernel/kgdb.c
71635 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
71636 /* Guard for recursive entry */
71637 static int exception_level;
71638
71639 -static struct kgdb_io *kgdb_io_ops;
71640 +static const struct kgdb_io *kgdb_io_ops;
71641 static DEFINE_SPINLOCK(kgdb_registration_lock);
71642
71643 /* kgdb console driver is loaded */
71644 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
71645 */
71646 static atomic_t passive_cpu_wait[NR_CPUS];
71647 static atomic_t cpu_in_kgdb[NR_CPUS];
71648 -atomic_t kgdb_setting_breakpoint;
71649 +atomic_unchecked_t kgdb_setting_breakpoint;
71650
71651 struct task_struct *kgdb_usethread;
71652 struct task_struct *kgdb_contthread;
71653 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
71654 sizeof(unsigned long)];
71655
71656 /* to keep track of the CPU which is doing the single stepping*/
71657 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
71658 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
71659
71660 /*
71661 * If you are debugging a problem where roundup (the collection of
71662 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
71663 return 0;
71664 if (kgdb_connected)
71665 return 1;
71666 - if (atomic_read(&kgdb_setting_breakpoint))
71667 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
71668 return 1;
71669 if (print_wait)
71670 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
71671 @@ -1426,8 +1426,8 @@ acquirelock:
71672 * instance of the exception handler wanted to come into the
71673 * debugger on a different CPU via a single step
71674 */
71675 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
71676 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
71677 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
71678 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
71679
71680 atomic_set(&kgdb_active, -1);
71681 touch_softlockup_watchdog();
71682 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
71683 *
71684 * Register it with the KGDB core.
71685 */
71686 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
71687 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
71688 {
71689 int err;
71690
71691 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
71692 *
71693 * Unregister it with the KGDB core.
71694 */
71695 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
71696 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
71697 {
71698 BUG_ON(kgdb_connected);
71699
71700 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
71701 */
71702 void kgdb_breakpoint(void)
71703 {
71704 - atomic_set(&kgdb_setting_breakpoint, 1);
71705 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
71706 wmb(); /* Sync point before breakpoint */
71707 arch_kgdb_breakpoint();
71708 wmb(); /* Sync point after breakpoint */
71709 - atomic_set(&kgdb_setting_breakpoint, 0);
71710 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
71711 }
71712 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
71713
71714 diff --git a/kernel/kmod.c b/kernel/kmod.c
71715 index d206078..e27ba6a 100644
71716 --- a/kernel/kmod.c
71717 +++ b/kernel/kmod.c
71718 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
71719 * If module auto-loading support is disabled then this function
71720 * becomes a no-operation.
71721 */
71722 -int __request_module(bool wait, const char *fmt, ...)
71723 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
71724 {
71725 - va_list args;
71726 char module_name[MODULE_NAME_LEN];
71727 unsigned int max_modprobes;
71728 int ret;
71729 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
71730 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
71731 static char *envp[] = { "HOME=/",
71732 "TERM=linux",
71733 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
71734 @@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
71735 if (ret)
71736 return ret;
71737
71738 - va_start(args, fmt);
71739 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
71740 - va_end(args);
71741 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
71742 if (ret >= MODULE_NAME_LEN)
71743 return -ENAMETOOLONG;
71744
71745 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71746 + if (!current_uid()) {
71747 + /* hack to workaround consolekit/udisks stupidity */
71748 + read_lock(&tasklist_lock);
71749 + if (!strcmp(current->comm, "mount") &&
71750 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
71751 + read_unlock(&tasklist_lock);
71752 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
71753 + return -EPERM;
71754 + }
71755 + read_unlock(&tasklist_lock);
71756 + }
71757 +#endif
71758 +
71759 /* If modprobe needs a service that is in a module, we get a recursive
71760 * loop. Limit the number of running kmod threads to max_threads/2 or
71761 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
71762 @@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
71763 atomic_dec(&kmod_concurrent);
71764 return ret;
71765 }
71766 +
71767 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
71768 +{
71769 + va_list args;
71770 + int ret;
71771 +
71772 + va_start(args, fmt);
71773 + ret = ____request_module(wait, module_param, fmt, args);
71774 + va_end(args);
71775 +
71776 + return ret;
71777 +}
71778 +
71779 +int __request_module(bool wait, const char *fmt, ...)
71780 +{
71781 + va_list args;
71782 + int ret;
71783 +
71784 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71785 + if (current_uid()) {
71786 + char module_param[MODULE_NAME_LEN];
71787 +
71788 + memset(module_param, 0, sizeof(module_param));
71789 +
71790 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
71791 +
71792 + va_start(args, fmt);
71793 + ret = ____request_module(wait, module_param, fmt, args);
71794 + va_end(args);
71795 +
71796 + return ret;
71797 + }
71798 +#endif
71799 +
71800 + va_start(args, fmt);
71801 + ret = ____request_module(wait, NULL, fmt, args);
71802 + va_end(args);
71803 +
71804 + return ret;
71805 +}
71806 +
71807 +
71808 EXPORT_SYMBOL(__request_module);
71809 #endif /* CONFIG_MODULES */
71810
71811 @@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
71812 *
71813 * Thus the __user pointer cast is valid here.
71814 */
71815 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
71816 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
71817
71818 /*
71819 * If ret is 0, either ____call_usermodehelper failed and the
71820 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
71821 index 5240d75..5a6fb33 100644
71822 --- a/kernel/kprobes.c
71823 +++ b/kernel/kprobes.c
71824 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
71825 * kernel image and loaded module images reside. This is required
71826 * so x86_64 can correctly handle the %rip-relative fixups.
71827 */
71828 - kip->insns = module_alloc(PAGE_SIZE);
71829 + kip->insns = module_alloc_exec(PAGE_SIZE);
71830 if (!kip->insns) {
71831 kfree(kip);
71832 return NULL;
71833 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
71834 */
71835 if (!list_is_singular(&kprobe_insn_pages)) {
71836 list_del(&kip->list);
71837 - module_free(NULL, kip->insns);
71838 + module_free_exec(NULL, kip->insns);
71839 kfree(kip);
71840 }
71841 return 1;
71842 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
71843 {
71844 int i, err = 0;
71845 unsigned long offset = 0, size = 0;
71846 - char *modname, namebuf[128];
71847 + char *modname, namebuf[KSYM_NAME_LEN];
71848 const char *symbol_name;
71849 void *addr;
71850 struct kprobe_blackpoint *kb;
71851 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
71852 const char *sym = NULL;
71853 unsigned int i = *(loff_t *) v;
71854 unsigned long offset = 0;
71855 - char *modname, namebuf[128];
71856 + char *modname, namebuf[KSYM_NAME_LEN];
71857
71858 head = &kprobe_table[i];
71859 preempt_disable();
71860 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
71861 index d86fe89..d12fc66 100644
71862 --- a/kernel/lockdep.c
71863 +++ b/kernel/lockdep.c
71864 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
71865 /*
71866 * Various lockdep statistics:
71867 */
71868 -atomic_t chain_lookup_hits;
71869 -atomic_t chain_lookup_misses;
71870 -atomic_t hardirqs_on_events;
71871 -atomic_t hardirqs_off_events;
71872 -atomic_t redundant_hardirqs_on;
71873 -atomic_t redundant_hardirqs_off;
71874 -atomic_t softirqs_on_events;
71875 -atomic_t softirqs_off_events;
71876 -atomic_t redundant_softirqs_on;
71877 -atomic_t redundant_softirqs_off;
71878 -atomic_t nr_unused_locks;
71879 -atomic_t nr_cyclic_checks;
71880 -atomic_t nr_find_usage_forwards_checks;
71881 -atomic_t nr_find_usage_backwards_checks;
71882 +atomic_unchecked_t chain_lookup_hits;
71883 +atomic_unchecked_t chain_lookup_misses;
71884 +atomic_unchecked_t hardirqs_on_events;
71885 +atomic_unchecked_t hardirqs_off_events;
71886 +atomic_unchecked_t redundant_hardirqs_on;
71887 +atomic_unchecked_t redundant_hardirqs_off;
71888 +atomic_unchecked_t softirqs_on_events;
71889 +atomic_unchecked_t softirqs_off_events;
71890 +atomic_unchecked_t redundant_softirqs_on;
71891 +atomic_unchecked_t redundant_softirqs_off;
71892 +atomic_unchecked_t nr_unused_locks;
71893 +atomic_unchecked_t nr_cyclic_checks;
71894 +atomic_unchecked_t nr_find_usage_forwards_checks;
71895 +atomic_unchecked_t nr_find_usage_backwards_checks;
71896 #endif
71897
71898 /*
71899 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
71900 int i;
71901 #endif
71902
71903 +#ifdef CONFIG_PAX_KERNEXEC
71904 + start = ktla_ktva(start);
71905 +#endif
71906 +
71907 /*
71908 * static variable?
71909 */
71910 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
71911 */
71912 for_each_possible_cpu(i) {
71913 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
71914 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
71915 - + per_cpu_offset(i);
71916 + end = start + PERCPU_ENOUGH_ROOM;
71917
71918 if ((addr >= start) && (addr < end))
71919 return 1;
71920 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
71921 if (!static_obj(lock->key)) {
71922 debug_locks_off();
71923 printk("INFO: trying to register non-static key.\n");
71924 + printk("lock:%pS key:%pS.\n", lock, lock->key);
71925 printk("the code is fine but needs lockdep annotation.\n");
71926 printk("turning off the locking correctness validator.\n");
71927 dump_stack();
71928 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
71929 if (!class)
71930 return 0;
71931 }
71932 - debug_atomic_inc((atomic_t *)&class->ops);
71933 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
71934 if (very_verbose(class)) {
71935 printk("\nacquire class [%p] %s", class->key, class->name);
71936 if (class->name_version > 1)
71937 diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
71938 index a2ee95a..092f0f2 100644
71939 --- a/kernel/lockdep_internals.h
71940 +++ b/kernel/lockdep_internals.h
71941 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
71942 /*
71943 * Various lockdep statistics:
71944 */
71945 -extern atomic_t chain_lookup_hits;
71946 -extern atomic_t chain_lookup_misses;
71947 -extern atomic_t hardirqs_on_events;
71948 -extern atomic_t hardirqs_off_events;
71949 -extern atomic_t redundant_hardirqs_on;
71950 -extern atomic_t redundant_hardirqs_off;
71951 -extern atomic_t softirqs_on_events;
71952 -extern atomic_t softirqs_off_events;
71953 -extern atomic_t redundant_softirqs_on;
71954 -extern atomic_t redundant_softirqs_off;
71955 -extern atomic_t nr_unused_locks;
71956 -extern atomic_t nr_cyclic_checks;
71957 -extern atomic_t nr_cyclic_check_recursions;
71958 -extern atomic_t nr_find_usage_forwards_checks;
71959 -extern atomic_t nr_find_usage_forwards_recursions;
71960 -extern atomic_t nr_find_usage_backwards_checks;
71961 -extern atomic_t nr_find_usage_backwards_recursions;
71962 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
71963 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
71964 -# define debug_atomic_read(ptr) atomic_read(ptr)
71965 +extern atomic_unchecked_t chain_lookup_hits;
71966 +extern atomic_unchecked_t chain_lookup_misses;
71967 +extern atomic_unchecked_t hardirqs_on_events;
71968 +extern atomic_unchecked_t hardirqs_off_events;
71969 +extern atomic_unchecked_t redundant_hardirqs_on;
71970 +extern atomic_unchecked_t redundant_hardirqs_off;
71971 +extern atomic_unchecked_t softirqs_on_events;
71972 +extern atomic_unchecked_t softirqs_off_events;
71973 +extern atomic_unchecked_t redundant_softirqs_on;
71974 +extern atomic_unchecked_t redundant_softirqs_off;
71975 +extern atomic_unchecked_t nr_unused_locks;
71976 +extern atomic_unchecked_t nr_cyclic_checks;
71977 +extern atomic_unchecked_t nr_cyclic_check_recursions;
71978 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
71979 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
71980 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
71981 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
71982 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
71983 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
71984 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
71985 #else
71986 # define debug_atomic_inc(ptr) do { } while (0)
71987 # define debug_atomic_dec(ptr) do { } while (0)
71988 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
71989 index d4aba4f..02a353f 100644
71990 --- a/kernel/lockdep_proc.c
71991 +++ b/kernel/lockdep_proc.c
71992 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
71993
71994 static void print_name(struct seq_file *m, struct lock_class *class)
71995 {
71996 - char str[128];
71997 + char str[KSYM_NAME_LEN];
71998 const char *name = class->name;
71999
72000 if (!name) {
72001 diff --git a/kernel/module.c b/kernel/module.c
72002 index 4b270e6..2226274 100644
72003 --- a/kernel/module.c
72004 +++ b/kernel/module.c
72005 @@ -55,6 +55,7 @@
72006 #include <linux/async.h>
72007 #include <linux/percpu.h>
72008 #include <linux/kmemleak.h>
72009 +#include <linux/grsecurity.h>
72010
72011 #define CREATE_TRACE_POINTS
72012 #include <trace/events/module.h>
72013 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
72014 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
72015
72016 /* Bounds of module allocation, for speeding __module_address */
72017 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
72018 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
72019 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
72020
72021 int register_module_notifier(struct notifier_block * nb)
72022 {
72023 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72024 return true;
72025
72026 list_for_each_entry_rcu(mod, &modules, list) {
72027 - struct symsearch arr[] = {
72028 + struct symsearch modarr[] = {
72029 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
72030 NOT_GPL_ONLY, false },
72031 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
72032 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72033 #endif
72034 };
72035
72036 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
72037 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
72038 return true;
72039 }
72040 return false;
72041 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
72042 void *ptr;
72043 int cpu;
72044
72045 - if (align > PAGE_SIZE) {
72046 + if (align-1 >= PAGE_SIZE) {
72047 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
72048 name, align, PAGE_SIZE);
72049 align = PAGE_SIZE;
72050 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
72051 * /sys/module/foo/sections stuff
72052 * J. Corbet <corbet@lwn.net>
72053 */
72054 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
72055 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72056
72057 static inline bool sect_empty(const Elf_Shdr *sect)
72058 {
72059 @@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
72060 destroy_params(mod->kp, mod->num_kp);
72061
72062 /* This may be NULL, but that's OK */
72063 - module_free(mod, mod->module_init);
72064 + module_free(mod, mod->module_init_rw);
72065 + module_free_exec(mod, mod->module_init_rx);
72066 kfree(mod->args);
72067 if (mod->percpu)
72068 percpu_modfree(mod->percpu);
72069 @@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
72070 percpu_modfree(mod->refptr);
72071 #endif
72072 /* Free lock-classes: */
72073 - lockdep_free_key_range(mod->module_core, mod->core_size);
72074 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
72075 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
72076
72077 /* Finally, free the core (containing the module structure) */
72078 - module_free(mod, mod->module_core);
72079 + module_free_exec(mod, mod->module_core_rx);
72080 + module_free(mod, mod->module_core_rw);
72081
72082 #ifdef CONFIG_MPU
72083 update_protections(current->mm);
72084 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72085 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
72086 int ret = 0;
72087 const struct kernel_symbol *ksym;
72088 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72089 + int is_fs_load = 0;
72090 + int register_filesystem_found = 0;
72091 + char *p;
72092 +
72093 + p = strstr(mod->args, "grsec_modharden_fs");
72094 +
72095 + if (p) {
72096 + char *endptr = p + strlen("grsec_modharden_fs");
72097 + /* copy \0 as well */
72098 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
72099 + is_fs_load = 1;
72100 + }
72101 +#endif
72102 +
72103
72104 for (i = 1; i < n; i++) {
72105 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72106 + const char *name = strtab + sym[i].st_name;
72107 +
72108 + /* it's a real shame this will never get ripped and copied
72109 + upstream! ;(
72110 + */
72111 + if (is_fs_load && !strcmp(name, "register_filesystem"))
72112 + register_filesystem_found = 1;
72113 +#endif
72114 switch (sym[i].st_shndx) {
72115 case SHN_COMMON:
72116 /* We compiled with -fno-common. These are not
72117 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72118 strtab + sym[i].st_name, mod);
72119 /* Ok if resolved. */
72120 if (ksym) {
72121 + pax_open_kernel();
72122 sym[i].st_value = ksym->value;
72123 + pax_close_kernel();
72124 break;
72125 }
72126
72127 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72128 secbase = (unsigned long)mod->percpu;
72129 else
72130 secbase = sechdrs[sym[i].st_shndx].sh_addr;
72131 + pax_open_kernel();
72132 sym[i].st_value += secbase;
72133 + pax_close_kernel();
72134 break;
72135 }
72136 }
72137
72138 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72139 + if (is_fs_load && !register_filesystem_found) {
72140 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
72141 + ret = -EPERM;
72142 + }
72143 +#endif
72144 +
72145 return ret;
72146 }
72147
72148 @@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
72149 || s->sh_entsize != ~0UL
72150 || strstarts(secstrings + s->sh_name, ".init"))
72151 continue;
72152 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
72153 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72154 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
72155 + else
72156 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
72157 DEBUGP("\t%s\n", secstrings + s->sh_name);
72158 }
72159 - if (m == 0)
72160 - mod->core_text_size = mod->core_size;
72161 }
72162
72163 DEBUGP("Init section allocation order:\n");
72164 @@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
72165 || s->sh_entsize != ~0UL
72166 || !strstarts(secstrings + s->sh_name, ".init"))
72167 continue;
72168 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
72169 - | INIT_OFFSET_MASK);
72170 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72171 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
72172 + else
72173 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
72174 + s->sh_entsize |= INIT_OFFSET_MASK;
72175 DEBUGP("\t%s\n", secstrings + s->sh_name);
72176 }
72177 - if (m == 0)
72178 - mod->init_text_size = mod->init_size;
72179 }
72180 }
72181
72182 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
72183
72184 /* As per nm */
72185 static char elf_type(const Elf_Sym *sym,
72186 - Elf_Shdr *sechdrs,
72187 - const char *secstrings,
72188 - struct module *mod)
72189 + const Elf_Shdr *sechdrs,
72190 + const char *secstrings)
72191 {
72192 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
72193 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
72194 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
72195
72196 /* Put symbol section at end of init part of module. */
72197 symsect->sh_flags |= SHF_ALLOC;
72198 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
72199 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
72200 symindex) | INIT_OFFSET_MASK;
72201 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
72202
72203 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
72204 }
72205
72206 /* Append room for core symbols at end of core part. */
72207 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
72208 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
72209 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
72210 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
72211
72212 /* Put string table section at end of init part of module. */
72213 strsect->sh_flags |= SHF_ALLOC;
72214 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
72215 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
72216 strindex) | INIT_OFFSET_MASK;
72217 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
72218
72219 /* Append room for core symbols' strings at end of core part. */
72220 - *pstroffs = mod->core_size;
72221 + *pstroffs = mod->core_size_rx;
72222 __set_bit(0, strmap);
72223 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
72224 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
72225
72226 return symoffs;
72227 }
72228 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
72229 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
72230 mod->strtab = (void *)sechdrs[strindex].sh_addr;
72231
72232 + pax_open_kernel();
72233 +
72234 /* Set types up while we still have access to sections. */
72235 for (i = 0; i < mod->num_symtab; i++)
72236 mod->symtab[i].st_info
72237 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
72238 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
72239
72240 - mod->core_symtab = dst = mod->module_core + symoffs;
72241 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
72242 src = mod->symtab;
72243 *dst = *src;
72244 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
72245 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
72246 }
72247 mod->core_num_syms = ndst;
72248
72249 - mod->core_strtab = s = mod->module_core + stroffs;
72250 + mod->core_strtab = s = mod->module_core_rx + stroffs;
72251 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
72252 if (test_bit(i, strmap))
72253 *++s = mod->strtab[i];
72254 +
72255 + pax_close_kernel();
72256 }
72257 #else
72258 static inline unsigned long layout_symtab(struct module *mod,
72259 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
72260 #endif
72261 }
72262
72263 -static void *module_alloc_update_bounds(unsigned long size)
72264 +static void *module_alloc_update_bounds_rw(unsigned long size)
72265 {
72266 void *ret = module_alloc(size);
72267
72268 if (ret) {
72269 /* Update module bounds. */
72270 - if ((unsigned long)ret < module_addr_min)
72271 - module_addr_min = (unsigned long)ret;
72272 - if ((unsigned long)ret + size > module_addr_max)
72273 - module_addr_max = (unsigned long)ret + size;
72274 + if ((unsigned long)ret < module_addr_min_rw)
72275 + module_addr_min_rw = (unsigned long)ret;
72276 + if ((unsigned long)ret + size > module_addr_max_rw)
72277 + module_addr_max_rw = (unsigned long)ret + size;
72278 + }
72279 + return ret;
72280 +}
72281 +
72282 +static void *module_alloc_update_bounds_rx(unsigned long size)
72283 +{
72284 + void *ret = module_alloc_exec(size);
72285 +
72286 + if (ret) {
72287 + /* Update module bounds. */
72288 + if ((unsigned long)ret < module_addr_min_rx)
72289 + module_addr_min_rx = (unsigned long)ret;
72290 + if ((unsigned long)ret + size > module_addr_max_rx)
72291 + module_addr_max_rx = (unsigned long)ret + size;
72292 }
72293 return ret;
72294 }
72295 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
72296 unsigned int i;
72297
72298 /* only scan the sections containing data */
72299 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
72300 - (unsigned long)mod->module_core,
72301 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
72302 + (unsigned long)mod->module_core_rw,
72303 sizeof(struct module), GFP_KERNEL);
72304
72305 for (i = 1; i < hdr->e_shnum; i++) {
72306 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
72307 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
72308 continue;
72309
72310 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
72311 - (unsigned long)mod->module_core,
72312 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
72313 + (unsigned long)mod->module_core_rw,
72314 sechdrs[i].sh_size, GFP_KERNEL);
72315 }
72316 }
72317 @@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
72318 Elf_Ehdr *hdr;
72319 Elf_Shdr *sechdrs;
72320 char *secstrings, *args, *modmagic, *strtab = NULL;
72321 - char *staging;
72322 + char *staging, *license;
72323 unsigned int i;
72324 unsigned int symindex = 0;
72325 unsigned int strindex = 0;
72326 @@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
72327 goto free_hdr;
72328 }
72329
72330 + license = get_modinfo(sechdrs, infoindex, "license");
72331 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
72332 + if (!license || !license_is_gpl_compatible(license)) {
72333 + err -ENOEXEC;
72334 + goto free_hdr;
72335 + }
72336 +#endif
72337 +
72338 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
72339 /* This is allowed: modprobe --force will invalidate it. */
72340 if (!modmagic) {
72341 @@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
72342 secstrings, &stroffs, strmap);
72343
72344 /* Do the allocs. */
72345 - ptr = module_alloc_update_bounds(mod->core_size);
72346 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
72347 /*
72348 * The pointer to this block is stored in the module structure
72349 * which is inside the block. Just mark it as not being a
72350 @@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
72351 err = -ENOMEM;
72352 goto free_percpu;
72353 }
72354 - memset(ptr, 0, mod->core_size);
72355 - mod->module_core = ptr;
72356 + memset(ptr, 0, mod->core_size_rw);
72357 + mod->module_core_rw = ptr;
72358
72359 - ptr = module_alloc_update_bounds(mod->init_size);
72360 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
72361 /*
72362 * The pointer to this block is stored in the module structure
72363 * which is inside the block. This block doesn't need to be
72364 * scanned as it contains data and code that will be freed
72365 * after the module is initialized.
72366 */
72367 - kmemleak_ignore(ptr);
72368 - if (!ptr && mod->init_size) {
72369 + kmemleak_not_leak(ptr);
72370 + if (!ptr && mod->init_size_rw) {
72371 err = -ENOMEM;
72372 - goto free_core;
72373 + goto free_core_rw;
72374 }
72375 - memset(ptr, 0, mod->init_size);
72376 - mod->module_init = ptr;
72377 + memset(ptr, 0, mod->init_size_rw);
72378 + mod->module_init_rw = ptr;
72379 +
72380 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
72381 + kmemleak_not_leak(ptr);
72382 + if (!ptr) {
72383 + err = -ENOMEM;
72384 + goto free_init_rw;
72385 + }
72386 +
72387 + pax_open_kernel();
72388 + memset(ptr, 0, mod->core_size_rx);
72389 + pax_close_kernel();
72390 + mod->module_core_rx = ptr;
72391 +
72392 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
72393 + kmemleak_not_leak(ptr);
72394 + if (!ptr && mod->init_size_rx) {
72395 + err = -ENOMEM;
72396 + goto free_core_rx;
72397 + }
72398 +
72399 + pax_open_kernel();
72400 + memset(ptr, 0, mod->init_size_rx);
72401 + pax_close_kernel();
72402 + mod->module_init_rx = ptr;
72403
72404 /* Transfer each section which specifies SHF_ALLOC */
72405 DEBUGP("final section addresses:\n");
72406 @@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
72407 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
72408 continue;
72409
72410 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
72411 - dest = mod->module_init
72412 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72413 - else
72414 - dest = mod->module_core + sechdrs[i].sh_entsize;
72415 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
72416 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
72417 + dest = mod->module_init_rw
72418 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72419 + else
72420 + dest = mod->module_init_rx
72421 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72422 + } else {
72423 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
72424 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
72425 + else
72426 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
72427 + }
72428
72429 - if (sechdrs[i].sh_type != SHT_NOBITS)
72430 - memcpy(dest, (void *)sechdrs[i].sh_addr,
72431 - sechdrs[i].sh_size);
72432 + if (sechdrs[i].sh_type != SHT_NOBITS) {
72433 +
72434 +#ifdef CONFIG_PAX_KERNEXEC
72435 +#ifdef CONFIG_X86_64
72436 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
72437 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
72438 +#endif
72439 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
72440 + pax_open_kernel();
72441 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
72442 + pax_close_kernel();
72443 + } else
72444 +#endif
72445 +
72446 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
72447 + }
72448 /* Update sh_addr to point to copy in image. */
72449 - sechdrs[i].sh_addr = (unsigned long)dest;
72450 +
72451 +#ifdef CONFIG_PAX_KERNEXEC
72452 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
72453 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
72454 + else
72455 +#endif
72456 +
72457 + sechdrs[i].sh_addr = (unsigned long)dest;
72458 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
72459 }
72460 /* Module has been moved. */
72461 @@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
72462 mod->name);
72463 if (!mod->refptr) {
72464 err = -ENOMEM;
72465 - goto free_init;
72466 + goto free_init_rx;
72467 }
72468 #endif
72469 /* Now we've moved module, initialize linked lists, etc. */
72470 @@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
72471 goto free_unload;
72472
72473 /* Set up license info based on the info section */
72474 - set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
72475 + set_license(mod, license);
72476
72477 /*
72478 * ndiswrapper is under GPL by itself, but loads proprietary modules.
72479 @@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
72480 /* Set up MODINFO_ATTR fields */
72481 setup_modinfo(mod, sechdrs, infoindex);
72482
72483 + mod->args = args;
72484 +
72485 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72486 + {
72487 + char *p, *p2;
72488 +
72489 + if (strstr(mod->args, "grsec_modharden_netdev")) {
72490 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
72491 + err = -EPERM;
72492 + goto cleanup;
72493 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
72494 + p += strlen("grsec_modharden_normal");
72495 + p2 = strstr(p, "_");
72496 + if (p2) {
72497 + *p2 = '\0';
72498 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
72499 + *p2 = '_';
72500 + }
72501 + err = -EPERM;
72502 + goto cleanup;
72503 + }
72504 + }
72505 +#endif
72506 +
72507 +
72508 /* Fix up syms, so that st_value is a pointer to location. */
72509 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
72510 mod);
72511 @@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
72512
72513 /* Now do relocations. */
72514 for (i = 1; i < hdr->e_shnum; i++) {
72515 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
72516 unsigned int info = sechdrs[i].sh_info;
72517 + strtab = (char *)sechdrs[strindex].sh_addr;
72518
72519 /* Not a valid relocation section? */
72520 if (info >= hdr->e_shnum)
72521 @@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
72522 * Do it before processing of module parameters, so the module
72523 * can provide parameter accessor functions of its own.
72524 */
72525 - if (mod->module_init)
72526 - flush_icache_range((unsigned long)mod->module_init,
72527 - (unsigned long)mod->module_init
72528 - + mod->init_size);
72529 - flush_icache_range((unsigned long)mod->module_core,
72530 - (unsigned long)mod->module_core + mod->core_size);
72531 + if (mod->module_init_rx)
72532 + flush_icache_range((unsigned long)mod->module_init_rx,
72533 + (unsigned long)mod->module_init_rx
72534 + + mod->init_size_rx);
72535 + flush_icache_range((unsigned long)mod->module_core_rx,
72536 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
72537
72538 set_fs(old_fs);
72539
72540 - mod->args = args;
72541 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
72542 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
72543 mod->name);
72544 @@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
72545 free_unload:
72546 module_unload_free(mod);
72547 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
72548 + free_init_rx:
72549 percpu_modfree(mod->refptr);
72550 - free_init:
72551 #endif
72552 - module_free(mod, mod->module_init);
72553 - free_core:
72554 - module_free(mod, mod->module_core);
72555 + module_free_exec(mod, mod->module_init_rx);
72556 + free_core_rx:
72557 + module_free_exec(mod, mod->module_core_rx);
72558 + free_init_rw:
72559 + module_free(mod, mod->module_init_rw);
72560 + free_core_rw:
72561 + module_free(mod, mod->module_core_rw);
72562 /* mod will be freed with core. Don't access it beyond this line! */
72563 free_percpu:
72564 if (percpu)
72565 @@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
72566 mod->symtab = mod->core_symtab;
72567 mod->strtab = mod->core_strtab;
72568 #endif
72569 - module_free(mod, mod->module_init);
72570 - mod->module_init = NULL;
72571 - mod->init_size = 0;
72572 - mod->init_text_size = 0;
72573 + module_free(mod, mod->module_init_rw);
72574 + module_free_exec(mod, mod->module_init_rx);
72575 + mod->module_init_rw = NULL;
72576 + mod->module_init_rx = NULL;
72577 + mod->init_size_rw = 0;
72578 + mod->init_size_rx = 0;
72579 mutex_unlock(&module_mutex);
72580
72581 return 0;
72582 @@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
72583 unsigned long nextval;
72584
72585 /* At worse, next value is at end of module */
72586 - if (within_module_init(addr, mod))
72587 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
72588 + if (within_module_init_rx(addr, mod))
72589 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
72590 + else if (within_module_init_rw(addr, mod))
72591 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
72592 + else if (within_module_core_rx(addr, mod))
72593 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
72594 + else if (within_module_core_rw(addr, mod))
72595 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
72596 else
72597 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
72598 + return NULL;
72599
72600 /* Scan for closest preceeding symbol, and next symbol. (ELF
72601 starts real symbols at 1). */
72602 @@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
72603 char buf[8];
72604
72605 seq_printf(m, "%s %u",
72606 - mod->name, mod->init_size + mod->core_size);
72607 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
72608 print_unload_info(m, mod);
72609
72610 /* Informative for users. */
72611 @@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
72612 mod->state == MODULE_STATE_COMING ? "Loading":
72613 "Live");
72614 /* Used by oprofile and other similar tools. */
72615 - seq_printf(m, " 0x%p", mod->module_core);
72616 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
72617
72618 /* Taints info */
72619 if (mod->taints)
72620 @@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
72621
72622 static int __init proc_modules_init(void)
72623 {
72624 +#ifndef CONFIG_GRKERNSEC_HIDESYM
72625 +#ifdef CONFIG_GRKERNSEC_PROC_USER
72626 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
72627 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72628 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
72629 +#else
72630 proc_create("modules", 0, NULL, &proc_modules_operations);
72631 +#endif
72632 +#else
72633 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
72634 +#endif
72635 return 0;
72636 }
72637 module_init(proc_modules_init);
72638 @@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
72639 {
72640 struct module *mod;
72641
72642 - if (addr < module_addr_min || addr > module_addr_max)
72643 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
72644 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
72645 return NULL;
72646
72647 list_for_each_entry_rcu(mod, &modules, list)
72648 - if (within_module_core(addr, mod)
72649 - || within_module_init(addr, mod))
72650 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
72651 return mod;
72652 return NULL;
72653 }
72654 @@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
72655 */
72656 struct module *__module_text_address(unsigned long addr)
72657 {
72658 - struct module *mod = __module_address(addr);
72659 + struct module *mod;
72660 +
72661 +#ifdef CONFIG_X86_32
72662 + addr = ktla_ktva(addr);
72663 +#endif
72664 +
72665 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
72666 + return NULL;
72667 +
72668 + mod = __module_address(addr);
72669 +
72670 if (mod) {
72671 /* Make sure it's within the text section. */
72672 - if (!within(addr, mod->module_init, mod->init_text_size)
72673 - && !within(addr, mod->module_core, mod->core_text_size))
72674 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
72675 mod = NULL;
72676 }
72677 return mod;
72678 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
72679 index ec815a9..fe46e99 100644
72680 --- a/kernel/mutex-debug.c
72681 +++ b/kernel/mutex-debug.c
72682 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
72683 }
72684
72685 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72686 - struct thread_info *ti)
72687 + struct task_struct *task)
72688 {
72689 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
72690
72691 /* Mark the current thread as blocked on the lock: */
72692 - ti->task->blocked_on = waiter;
72693 + task->blocked_on = waiter;
72694 }
72695
72696 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72697 - struct thread_info *ti)
72698 + struct task_struct *task)
72699 {
72700 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
72701 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
72702 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
72703 - ti->task->blocked_on = NULL;
72704 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
72705 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
72706 + task->blocked_on = NULL;
72707
72708 list_del_init(&waiter->list);
72709 waiter->task = NULL;
72710 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
72711 return;
72712
72713 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
72714 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
72715 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
72716 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
72717 mutex_clear_owner(lock);
72718 }
72719 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
72720 index 6b2d735..372d3c4 100644
72721 --- a/kernel/mutex-debug.h
72722 +++ b/kernel/mutex-debug.h
72723 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
72724 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
72725 extern void debug_mutex_add_waiter(struct mutex *lock,
72726 struct mutex_waiter *waiter,
72727 - struct thread_info *ti);
72728 + struct task_struct *task);
72729 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
72730 - struct thread_info *ti);
72731 + struct task_struct *task);
72732 extern void debug_mutex_unlock(struct mutex *lock);
72733 extern void debug_mutex_init(struct mutex *lock, const char *name,
72734 struct lock_class_key *key);
72735
72736 static inline void mutex_set_owner(struct mutex *lock)
72737 {
72738 - lock->owner = current_thread_info();
72739 + lock->owner = current;
72740 }
72741
72742 static inline void mutex_clear_owner(struct mutex *lock)
72743 diff --git a/kernel/mutex.c b/kernel/mutex.c
72744 index f85644c..5ee9f77 100644
72745 --- a/kernel/mutex.c
72746 +++ b/kernel/mutex.c
72747 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72748 */
72749
72750 for (;;) {
72751 - struct thread_info *owner;
72752 + struct task_struct *owner;
72753
72754 /*
72755 * If we own the BKL, then don't spin. The owner of
72756 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72757 spin_lock_mutex(&lock->wait_lock, flags);
72758
72759 debug_mutex_lock_common(lock, &waiter);
72760 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
72761 + debug_mutex_add_waiter(lock, &waiter, task);
72762
72763 /* add waiting tasks to the end of the waitqueue (FIFO): */
72764 list_add_tail(&waiter.list, &lock->wait_list);
72765 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72766 * TASK_UNINTERRUPTIBLE case.)
72767 */
72768 if (unlikely(signal_pending_state(state, task))) {
72769 - mutex_remove_waiter(lock, &waiter,
72770 - task_thread_info(task));
72771 + mutex_remove_waiter(lock, &waiter, task);
72772 mutex_release(&lock->dep_map, 1, ip);
72773 spin_unlock_mutex(&lock->wait_lock, flags);
72774
72775 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
72776 done:
72777 lock_acquired(&lock->dep_map, ip);
72778 /* got the lock - rejoice! */
72779 - mutex_remove_waiter(lock, &waiter, current_thread_info());
72780 + mutex_remove_waiter(lock, &waiter, task);
72781 mutex_set_owner(lock);
72782
72783 /* set it to 0 if there are no waiters left: */
72784 diff --git a/kernel/mutex.h b/kernel/mutex.h
72785 index 67578ca..4115fbf 100644
72786 --- a/kernel/mutex.h
72787 +++ b/kernel/mutex.h
72788 @@ -19,7 +19,7 @@
72789 #ifdef CONFIG_SMP
72790 static inline void mutex_set_owner(struct mutex *lock)
72791 {
72792 - lock->owner = current_thread_info();
72793 + lock->owner = current;
72794 }
72795
72796 static inline void mutex_clear_owner(struct mutex *lock)
72797 diff --git a/kernel/panic.c b/kernel/panic.c
72798 index 96b45d0..45c447a 100644
72799 --- a/kernel/panic.c
72800 +++ b/kernel/panic.c
72801 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
72802 const char *board;
72803
72804 printk(KERN_WARNING "------------[ cut here ]------------\n");
72805 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
72806 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
72807 board = dmi_get_system_info(DMI_PRODUCT_NAME);
72808 if (board)
72809 printk(KERN_WARNING "Hardware name: %s\n", board);
72810 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
72811 */
72812 void __stack_chk_fail(void)
72813 {
72814 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
72815 + dump_stack();
72816 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
72817 __builtin_return_address(0));
72818 }
72819 EXPORT_SYMBOL(__stack_chk_fail);
72820 diff --git a/kernel/params.c b/kernel/params.c
72821 index d656c27..21e452c 100644
72822 --- a/kernel/params.c
72823 +++ b/kernel/params.c
72824 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
72825 return ret;
72826 }
72827
72828 -static struct sysfs_ops module_sysfs_ops = {
72829 +static const struct sysfs_ops module_sysfs_ops = {
72830 .show = module_attr_show,
72831 .store = module_attr_store,
72832 };
72833 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
72834 return 0;
72835 }
72836
72837 -static struct kset_uevent_ops module_uevent_ops = {
72838 +static const struct kset_uevent_ops module_uevent_ops = {
72839 .filter = uevent_filter,
72840 };
72841
72842 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
72843 index 37ebc14..9c121d9 100644
72844 --- a/kernel/perf_event.c
72845 +++ b/kernel/perf_event.c
72846 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
72847 */
72848 int sysctl_perf_event_sample_rate __read_mostly = 100000;
72849
72850 -static atomic64_t perf_event_id;
72851 +static atomic64_unchecked_t perf_event_id;
72852
72853 /*
72854 * Lock for (sysadmin-configurable) event reservations:
72855 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
72856 * In order to keep per-task stats reliable we need to flip the event
72857 * values when we flip the contexts.
72858 */
72859 - value = atomic64_read(&next_event->count);
72860 - value = atomic64_xchg(&event->count, value);
72861 - atomic64_set(&next_event->count, value);
72862 + value = atomic64_read_unchecked(&next_event->count);
72863 + value = atomic64_xchg_unchecked(&event->count, value);
72864 + atomic64_set_unchecked(&next_event->count, value);
72865
72866 swap(event->total_time_enabled, next_event->total_time_enabled);
72867 swap(event->total_time_running, next_event->total_time_running);
72868 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
72869 update_event_times(event);
72870 }
72871
72872 - return atomic64_read(&event->count);
72873 + return atomic64_read_unchecked(&event->count);
72874 }
72875
72876 /*
72877 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
72878 values[n++] = 1 + leader->nr_siblings;
72879 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72880 values[n++] = leader->total_time_enabled +
72881 - atomic64_read(&leader->child_total_time_enabled);
72882 + atomic64_read_unchecked(&leader->child_total_time_enabled);
72883 }
72884 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72885 values[n++] = leader->total_time_running +
72886 - atomic64_read(&leader->child_total_time_running);
72887 + atomic64_read_unchecked(&leader->child_total_time_running);
72888 }
72889
72890 size = n * sizeof(u64);
72891 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
72892 values[n++] = perf_event_read_value(event);
72893 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72894 values[n++] = event->total_time_enabled +
72895 - atomic64_read(&event->child_total_time_enabled);
72896 + atomic64_read_unchecked(&event->child_total_time_enabled);
72897 }
72898 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72899 values[n++] = event->total_time_running +
72900 - atomic64_read(&event->child_total_time_running);
72901 + atomic64_read_unchecked(&event->child_total_time_running);
72902 }
72903 if (read_format & PERF_FORMAT_ID)
72904 values[n++] = primary_event_id(event);
72905 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
72906 static void perf_event_reset(struct perf_event *event)
72907 {
72908 (void)perf_event_read(event);
72909 - atomic64_set(&event->count, 0);
72910 + atomic64_set_unchecked(&event->count, 0);
72911 perf_event_update_userpage(event);
72912 }
72913
72914 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
72915 ++userpg->lock;
72916 barrier();
72917 userpg->index = perf_event_index(event);
72918 - userpg->offset = atomic64_read(&event->count);
72919 + userpg->offset = atomic64_read_unchecked(&event->count);
72920 if (event->state == PERF_EVENT_STATE_ACTIVE)
72921 - userpg->offset -= atomic64_read(&event->hw.prev_count);
72922 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
72923
72924 userpg->time_enabled = event->total_time_enabled +
72925 - atomic64_read(&event->child_total_time_enabled);
72926 + atomic64_read_unchecked(&event->child_total_time_enabled);
72927
72928 userpg->time_running = event->total_time_running +
72929 - atomic64_read(&event->child_total_time_running);
72930 + atomic64_read_unchecked(&event->child_total_time_running);
72931
72932 barrier();
72933 ++userpg->lock;
72934 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
72935 u64 values[4];
72936 int n = 0;
72937
72938 - values[n++] = atomic64_read(&event->count);
72939 + values[n++] = atomic64_read_unchecked(&event->count);
72940 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
72941 values[n++] = event->total_time_enabled +
72942 - atomic64_read(&event->child_total_time_enabled);
72943 + atomic64_read_unchecked(&event->child_total_time_enabled);
72944 }
72945 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
72946 values[n++] = event->total_time_running +
72947 - atomic64_read(&event->child_total_time_running);
72948 + atomic64_read_unchecked(&event->child_total_time_running);
72949 }
72950 if (read_format & PERF_FORMAT_ID)
72951 values[n++] = primary_event_id(event);
72952 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
72953 if (leader != event)
72954 leader->pmu->read(leader);
72955
72956 - values[n++] = atomic64_read(&leader->count);
72957 + values[n++] = atomic64_read_unchecked(&leader->count);
72958 if (read_format & PERF_FORMAT_ID)
72959 values[n++] = primary_event_id(leader);
72960
72961 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
72962 if (sub != event)
72963 sub->pmu->read(sub);
72964
72965 - values[n++] = atomic64_read(&sub->count);
72966 + values[n++] = atomic64_read_unchecked(&sub->count);
72967 if (read_format & PERF_FORMAT_ID)
72968 values[n++] = primary_event_id(sub);
72969
72970 @@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
72971 * need to add enough zero bytes after the string to handle
72972 * the 64bit alignment we do later.
72973 */
72974 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
72975 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
72976 if (!buf) {
72977 name = strncpy(tmp, "//enomem", sizeof(tmp));
72978 goto got_name;
72979 }
72980 - name = d_path(&file->f_path, buf, PATH_MAX);
72981 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
72982 if (IS_ERR(name)) {
72983 name = strncpy(tmp, "//toolong", sizeof(tmp));
72984 goto got_name;
72985 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
72986 {
72987 struct hw_perf_event *hwc = &event->hw;
72988
72989 - atomic64_add(nr, &event->count);
72990 + atomic64_add_unchecked(nr, &event->count);
72991
72992 if (!hwc->sample_period)
72993 return;
72994 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
72995 u64 now;
72996
72997 now = cpu_clock(cpu);
72998 - prev = atomic64_read(&event->hw.prev_count);
72999 - atomic64_set(&event->hw.prev_count, now);
73000 - atomic64_add(now - prev, &event->count);
73001 + prev = atomic64_read_unchecked(&event->hw.prev_count);
73002 + atomic64_set_unchecked(&event->hw.prev_count, now);
73003 + atomic64_add_unchecked(now - prev, &event->count);
73004 }
73005
73006 static int cpu_clock_perf_event_enable(struct perf_event *event)
73007 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
73008 struct hw_perf_event *hwc = &event->hw;
73009 int cpu = raw_smp_processor_id();
73010
73011 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
73012 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
73013 perf_swevent_start_hrtimer(event);
73014
73015 return 0;
73016 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
73017 u64 prev;
73018 s64 delta;
73019
73020 - prev = atomic64_xchg(&event->hw.prev_count, now);
73021 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
73022 delta = now - prev;
73023 - atomic64_add(delta, &event->count);
73024 + atomic64_add_unchecked(delta, &event->count);
73025 }
73026
73027 static int task_clock_perf_event_enable(struct perf_event *event)
73028 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
73029
73030 now = event->ctx->time;
73031
73032 - atomic64_set(&hwc->prev_count, now);
73033 + atomic64_set_unchecked(&hwc->prev_count, now);
73034
73035 perf_swevent_start_hrtimer(event);
73036
73037 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
73038 event->parent = parent_event;
73039
73040 event->ns = get_pid_ns(current->nsproxy->pid_ns);
73041 - event->id = atomic64_inc_return(&perf_event_id);
73042 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
73043
73044 event->state = PERF_EVENT_STATE_INACTIVE;
73045
73046 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
73047 if (child_event->attr.inherit_stat)
73048 perf_event_read_event(child_event, child);
73049
73050 - child_val = atomic64_read(&child_event->count);
73051 + child_val = atomic64_read_unchecked(&child_event->count);
73052
73053 /*
73054 * Add back the child's count to the parent's count:
73055 */
73056 - atomic64_add(child_val, &parent_event->count);
73057 - atomic64_add(child_event->total_time_enabled,
73058 + atomic64_add_unchecked(child_val, &parent_event->count);
73059 + atomic64_add_unchecked(child_event->total_time_enabled,
73060 &parent_event->child_total_time_enabled);
73061 - atomic64_add(child_event->total_time_running,
73062 + atomic64_add_unchecked(child_event->total_time_running,
73063 &parent_event->child_total_time_running);
73064
73065 /*
73066 diff --git a/kernel/pid.c b/kernel/pid.c
73067 index fce7198..4f23a7e 100644
73068 --- a/kernel/pid.c
73069 +++ b/kernel/pid.c
73070 @@ -33,6 +33,7 @@
73071 #include <linux/rculist.h>
73072 #include <linux/bootmem.h>
73073 #include <linux/hash.h>
73074 +#include <linux/security.h>
73075 #include <linux/pid_namespace.h>
73076 #include <linux/init_task.h>
73077 #include <linux/syscalls.h>
73078 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
73079
73080 int pid_max = PID_MAX_DEFAULT;
73081
73082 -#define RESERVED_PIDS 300
73083 +#define RESERVED_PIDS 500
73084
73085 int pid_max_min = RESERVED_PIDS + 1;
73086 int pid_max_max = PID_MAX_LIMIT;
73087 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
73088 */
73089 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
73090 {
73091 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73092 + struct task_struct *task;
73093 +
73094 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73095 +
73096 + if (gr_pid_is_chrooted(task))
73097 + return NULL;
73098 +
73099 + return task;
73100 }
73101
73102 struct task_struct *find_task_by_vpid(pid_t vnr)
73103 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
73104 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
73105 }
73106
73107 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
73108 +{
73109 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
73110 +}
73111 +
73112 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
73113 {
73114 struct pid *pid;
73115 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
73116 index 5c9dc22..d271117 100644
73117 --- a/kernel/posix-cpu-timers.c
73118 +++ b/kernel/posix-cpu-timers.c
73119 @@ -6,6 +6,7 @@
73120 #include <linux/posix-timers.h>
73121 #include <linux/errno.h>
73122 #include <linux/math64.h>
73123 +#include <linux/security.h>
73124 #include <asm/uaccess.h>
73125 #include <linux/kernel_stat.h>
73126 #include <trace/events/timer.h>
73127 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
73128
73129 static __init int init_posix_cpu_timers(void)
73130 {
73131 - struct k_clock process = {
73132 + static struct k_clock process = {
73133 .clock_getres = process_cpu_clock_getres,
73134 .clock_get = process_cpu_clock_get,
73135 .clock_set = do_posix_clock_nosettime,
73136 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
73137 .nsleep = process_cpu_nsleep,
73138 .nsleep_restart = process_cpu_nsleep_restart,
73139 };
73140 - struct k_clock thread = {
73141 + static struct k_clock thread = {
73142 .clock_getres = thread_cpu_clock_getres,
73143 .clock_get = thread_cpu_clock_get,
73144 .clock_set = do_posix_clock_nosettime,
73145 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
73146 index 5e76d22..cf1baeb 100644
73147 --- a/kernel/posix-timers.c
73148 +++ b/kernel/posix-timers.c
73149 @@ -42,6 +42,7 @@
73150 #include <linux/compiler.h>
73151 #include <linux/idr.h>
73152 #include <linux/posix-timers.h>
73153 +#include <linux/grsecurity.h>
73154 #include <linux/syscalls.h>
73155 #include <linux/wait.h>
73156 #include <linux/workqueue.h>
73157 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
73158 * which we beg off on and pass to do_sys_settimeofday().
73159 */
73160
73161 -static struct k_clock posix_clocks[MAX_CLOCKS];
73162 +static struct k_clock *posix_clocks[MAX_CLOCKS];
73163
73164 /*
73165 * These ones are defined below.
73166 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
73167 */
73168 #define CLOCK_DISPATCH(clock, call, arglist) \
73169 ((clock) < 0 ? posix_cpu_##call arglist : \
73170 - (posix_clocks[clock].call != NULL \
73171 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
73172 + (posix_clocks[clock]->call != NULL \
73173 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
73174
73175 /*
73176 * Default clock hook functions when the struct k_clock passed
73177 @@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
73178 struct timespec *tp)
73179 {
73180 tp->tv_sec = 0;
73181 - tp->tv_nsec = posix_clocks[which_clock].res;
73182 + tp->tv_nsec = posix_clocks[which_clock]->res;
73183 return 0;
73184 }
73185
73186 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
73187 return 0;
73188 if ((unsigned) which_clock >= MAX_CLOCKS)
73189 return 1;
73190 - if (posix_clocks[which_clock].clock_getres != NULL)
73191 + if (posix_clocks[which_clock] == NULL)
73192 return 0;
73193 - if (posix_clocks[which_clock].res != 0)
73194 + if (posix_clocks[which_clock]->clock_getres != NULL)
73195 + return 0;
73196 + if (posix_clocks[which_clock]->res != 0)
73197 return 0;
73198 return 1;
73199 }
73200 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
73201 */
73202 static __init int init_posix_timers(void)
73203 {
73204 - struct k_clock clock_realtime = {
73205 + static struct k_clock clock_realtime = {
73206 .clock_getres = hrtimer_get_res,
73207 };
73208 - struct k_clock clock_monotonic = {
73209 + static struct k_clock clock_monotonic = {
73210 .clock_getres = hrtimer_get_res,
73211 .clock_get = posix_ktime_get_ts,
73212 .clock_set = do_posix_clock_nosettime,
73213 };
73214 - struct k_clock clock_monotonic_raw = {
73215 + static struct k_clock clock_monotonic_raw = {
73216 .clock_getres = hrtimer_get_res,
73217 .clock_get = posix_get_monotonic_raw,
73218 .clock_set = do_posix_clock_nosettime,
73219 .timer_create = no_timer_create,
73220 .nsleep = no_nsleep,
73221 };
73222 - struct k_clock clock_realtime_coarse = {
73223 + static struct k_clock clock_realtime_coarse = {
73224 .clock_getres = posix_get_coarse_res,
73225 .clock_get = posix_get_realtime_coarse,
73226 .clock_set = do_posix_clock_nosettime,
73227 .timer_create = no_timer_create,
73228 .nsleep = no_nsleep,
73229 };
73230 - struct k_clock clock_monotonic_coarse = {
73231 + static struct k_clock clock_monotonic_coarse = {
73232 .clock_getres = posix_get_coarse_res,
73233 .clock_get = posix_get_monotonic_coarse,
73234 .clock_set = do_posix_clock_nosettime,
73235 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
73236 .nsleep = no_nsleep,
73237 };
73238
73239 + pax_track_stack();
73240 +
73241 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
73242 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
73243 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
73244 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
73245 return;
73246 }
73247
73248 - posix_clocks[clock_id] = *new_clock;
73249 + posix_clocks[clock_id] = new_clock;
73250 }
73251 EXPORT_SYMBOL_GPL(register_posix_clock);
73252
73253 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
73254 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
73255 return -EFAULT;
73256
73257 + /* only the CLOCK_REALTIME clock can be set, all other clocks
73258 + have their clock_set fptr set to a nosettime dummy function
73259 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
73260 + call common_clock_set, which calls do_sys_settimeofday, which
73261 + we hook
73262 + */
73263 +
73264 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
73265 }
73266
73267 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
73268 index 04a9e90..bc355aa 100644
73269 --- a/kernel/power/hibernate.c
73270 +++ b/kernel/power/hibernate.c
73271 @@ -48,14 +48,14 @@ enum {
73272
73273 static int hibernation_mode = HIBERNATION_SHUTDOWN;
73274
73275 -static struct platform_hibernation_ops *hibernation_ops;
73276 +static const struct platform_hibernation_ops *hibernation_ops;
73277
73278 /**
73279 * hibernation_set_ops - set the global hibernate operations
73280 * @ops: the hibernation operations to use in subsequent hibernation transitions
73281 */
73282
73283 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
73284 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
73285 {
73286 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
73287 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
73288 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
73289 index e8b3370..484c2e4 100644
73290 --- a/kernel/power/poweroff.c
73291 +++ b/kernel/power/poweroff.c
73292 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
73293 .enable_mask = SYSRQ_ENABLE_BOOT,
73294 };
73295
73296 -static int pm_sysrq_init(void)
73297 +static int __init pm_sysrq_init(void)
73298 {
73299 register_sysrq_key('o', &sysrq_poweroff_op);
73300 return 0;
73301 diff --git a/kernel/power/process.c b/kernel/power/process.c
73302 index e7cd671..56d5f459 100644
73303 --- a/kernel/power/process.c
73304 +++ b/kernel/power/process.c
73305 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
73306 struct timeval start, end;
73307 u64 elapsed_csecs64;
73308 unsigned int elapsed_csecs;
73309 + bool timedout = false;
73310
73311 do_gettimeofday(&start);
73312
73313 end_time = jiffies + TIMEOUT;
73314 do {
73315 todo = 0;
73316 + if (time_after(jiffies, end_time))
73317 + timedout = true;
73318 read_lock(&tasklist_lock);
73319 do_each_thread(g, p) {
73320 if (frozen(p) || !freezeable(p))
73321 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
73322 * It is "frozen enough". If the task does wake
73323 * up, it will immediately call try_to_freeze.
73324 */
73325 - if (!task_is_stopped_or_traced(p) &&
73326 - !freezer_should_skip(p))
73327 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
73328 todo++;
73329 + if (timedout) {
73330 + printk(KERN_ERR "Task refusing to freeze:\n");
73331 + sched_show_task(p);
73332 + }
73333 + }
73334 } while_each_thread(g, p);
73335 read_unlock(&tasklist_lock);
73336 yield(); /* Yield is okay here */
73337 - if (time_after(jiffies, end_time))
73338 - break;
73339 - } while (todo);
73340 + } while (todo && !timedout);
73341
73342 do_gettimeofday(&end);
73343 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
73344 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
73345 index 40dd021..fb30ceb 100644
73346 --- a/kernel/power/suspend.c
73347 +++ b/kernel/power/suspend.c
73348 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
73349 [PM_SUSPEND_MEM] = "mem",
73350 };
73351
73352 -static struct platform_suspend_ops *suspend_ops;
73353 +static const struct platform_suspend_ops *suspend_ops;
73354
73355 /**
73356 * suspend_set_ops - Set the global suspend method table.
73357 * @ops: Pointer to ops structure.
73358 */
73359 -void suspend_set_ops(struct platform_suspend_ops *ops)
73360 +void suspend_set_ops(const struct platform_suspend_ops *ops)
73361 {
73362 mutex_lock(&pm_mutex);
73363 suspend_ops = ops;
73364 diff --git a/kernel/printk.c b/kernel/printk.c
73365 index 4cade47..4d17900 100644
73366 --- a/kernel/printk.c
73367 +++ b/kernel/printk.c
73368 @@ -33,6 +33,7 @@
73369 #include <linux/bootmem.h>
73370 #include <linux/syscalls.h>
73371 #include <linux/kexec.h>
73372 +#include <linux/syslog.h>
73373
73374 #include <asm/uaccess.h>
73375
73376 @@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
73377 }
73378 #endif
73379
73380 -/*
73381 - * Commands to do_syslog:
73382 - *
73383 - * 0 -- Close the log. Currently a NOP.
73384 - * 1 -- Open the log. Currently a NOP.
73385 - * 2 -- Read from the log.
73386 - * 3 -- Read all messages remaining in the ring buffer.
73387 - * 4 -- Read and clear all messages remaining in the ring buffer
73388 - * 5 -- Clear ring buffer.
73389 - * 6 -- Disable printk's to console
73390 - * 7 -- Enable printk's to console
73391 - * 8 -- Set level of messages printed to console
73392 - * 9 -- Return number of unread characters in the log buffer
73393 - * 10 -- Return size of the log buffer
73394 - */
73395 -int do_syslog(int type, char __user *buf, int len)
73396 +int do_syslog(int type, char __user *buf, int len, bool from_file)
73397 {
73398 unsigned i, j, limit, count;
73399 int do_clear = 0;
73400 char c;
73401 int error = 0;
73402
73403 - error = security_syslog(type);
73404 +#ifdef CONFIG_GRKERNSEC_DMESG
73405 + if (grsec_enable_dmesg &&
73406 + (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
73407 + !capable(CAP_SYS_ADMIN))
73408 + return -EPERM;
73409 +#endif
73410 +
73411 + error = security_syslog(type, from_file);
73412 if (error)
73413 return error;
73414
73415 switch (type) {
73416 - case 0: /* Close log */
73417 + case SYSLOG_ACTION_CLOSE: /* Close log */
73418 break;
73419 - case 1: /* Open log */
73420 + case SYSLOG_ACTION_OPEN: /* Open log */
73421 break;
73422 - case 2: /* Read from log */
73423 + case SYSLOG_ACTION_READ: /* Read from log */
73424 error = -EINVAL;
73425 if (!buf || len < 0)
73426 goto out;
73427 @@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
73428 if (!error)
73429 error = i;
73430 break;
73431 - case 4: /* Read/clear last kernel messages */
73432 + /* Read/clear last kernel messages */
73433 + case SYSLOG_ACTION_READ_CLEAR:
73434 do_clear = 1;
73435 /* FALL THRU */
73436 - case 3: /* Read last kernel messages */
73437 + /* Read last kernel messages */
73438 + case SYSLOG_ACTION_READ_ALL:
73439 error = -EINVAL;
73440 if (!buf || len < 0)
73441 goto out;
73442 @@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
73443 }
73444 }
73445 break;
73446 - case 5: /* Clear ring buffer */
73447 + /* Clear ring buffer */
73448 + case SYSLOG_ACTION_CLEAR:
73449 logged_chars = 0;
73450 break;
73451 - case 6: /* Disable logging to console */
73452 + /* Disable logging to console */
73453 + case SYSLOG_ACTION_CONSOLE_OFF:
73454 if (saved_console_loglevel == -1)
73455 saved_console_loglevel = console_loglevel;
73456 console_loglevel = minimum_console_loglevel;
73457 break;
73458 - case 7: /* Enable logging to console */
73459 + /* Enable logging to console */
73460 + case SYSLOG_ACTION_CONSOLE_ON:
73461 if (saved_console_loglevel != -1) {
73462 console_loglevel = saved_console_loglevel;
73463 saved_console_loglevel = -1;
73464 }
73465 break;
73466 - case 8: /* Set level of messages printed to console */
73467 + /* Set level of messages printed to console */
73468 + case SYSLOG_ACTION_CONSOLE_LEVEL:
73469 error = -EINVAL;
73470 if (len < 1 || len > 8)
73471 goto out;
73472 @@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
73473 saved_console_loglevel = -1;
73474 error = 0;
73475 break;
73476 - case 9: /* Number of chars in the log buffer */
73477 + /* Number of chars in the log buffer */
73478 + case SYSLOG_ACTION_SIZE_UNREAD:
73479 error = log_end - log_start;
73480 break;
73481 - case 10: /* Size of the log buffer */
73482 + /* Size of the log buffer */
73483 + case SYSLOG_ACTION_SIZE_BUFFER:
73484 error = log_buf_len;
73485 break;
73486 default:
73487 @@ -415,7 +416,7 @@ out:
73488
73489 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
73490 {
73491 - return do_syslog(type, buf, len);
73492 + return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
73493 }
73494
73495 /*
73496 diff --git a/kernel/profile.c b/kernel/profile.c
73497 index dfadc5b..7f59404 100644
73498 --- a/kernel/profile.c
73499 +++ b/kernel/profile.c
73500 @@ -39,7 +39,7 @@ struct profile_hit {
73501 /* Oprofile timer tick hook */
73502 static int (*timer_hook)(struct pt_regs *) __read_mostly;
73503
73504 -static atomic_t *prof_buffer;
73505 +static atomic_unchecked_t *prof_buffer;
73506 static unsigned long prof_len, prof_shift;
73507
73508 int prof_on __read_mostly;
73509 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
73510 hits[i].pc = 0;
73511 continue;
73512 }
73513 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
73514 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
73515 hits[i].hits = hits[i].pc = 0;
73516 }
73517 }
73518 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
73519 * Add the current hit(s) and flush the write-queue out
73520 * to the global buffer:
73521 */
73522 - atomic_add(nr_hits, &prof_buffer[pc]);
73523 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
73524 for (i = 0; i < NR_PROFILE_HIT; ++i) {
73525 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
73526 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
73527 hits[i].pc = hits[i].hits = 0;
73528 }
73529 out:
73530 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
73531 if (prof_on != type || !prof_buffer)
73532 return;
73533 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
73534 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
73535 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
73536 }
73537 #endif /* !CONFIG_SMP */
73538 EXPORT_SYMBOL_GPL(profile_hits);
73539 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
73540 return -EFAULT;
73541 buf++; p++; count--; read++;
73542 }
73543 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
73544 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
73545 if (copy_to_user(buf, (void *)pnt, count))
73546 return -EFAULT;
73547 read += count;
73548 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
73549 }
73550 #endif
73551 profile_discard_flip_buffers();
73552 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
73553 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
73554 return count;
73555 }
73556
73557 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
73558 index 05625f6..733bf70 100644
73559 --- a/kernel/ptrace.c
73560 +++ b/kernel/ptrace.c
73561 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
73562 return ret;
73563 }
73564
73565 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73566 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
73567 + unsigned int log)
73568 {
73569 const struct cred *cred = current_cred(), *tcred;
73570
73571 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73572 cred->gid != tcred->egid ||
73573 cred->gid != tcred->sgid ||
73574 cred->gid != tcred->gid) &&
73575 - !capable(CAP_SYS_PTRACE)) {
73576 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
73577 + (log && !capable(CAP_SYS_PTRACE)))
73578 + ) {
73579 rcu_read_unlock();
73580 return -EPERM;
73581 }
73582 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73583 smp_rmb();
73584 if (task->mm)
73585 dumpable = get_dumpable(task->mm);
73586 - if (!dumpable && !capable(CAP_SYS_PTRACE))
73587 + if (!dumpable &&
73588 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
73589 + (log && !capable(CAP_SYS_PTRACE))))
73590 return -EPERM;
73591
73592 return security_ptrace_access_check(task, mode);
73593 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
73594 {
73595 int err;
73596 task_lock(task);
73597 - err = __ptrace_may_access(task, mode);
73598 + err = __ptrace_may_access(task, mode, 0);
73599 + task_unlock(task);
73600 + return !err;
73601 +}
73602 +
73603 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
73604 +{
73605 + int err;
73606 + task_lock(task);
73607 + err = __ptrace_may_access(task, mode, 1);
73608 task_unlock(task);
73609 return !err;
73610 }
73611 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
73612 goto out;
73613
73614 task_lock(task);
73615 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
73616 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
73617 task_unlock(task);
73618 if (retval)
73619 goto unlock_creds;
73620 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
73621 goto unlock_tasklist;
73622
73623 task->ptrace = PT_PTRACED;
73624 - if (capable(CAP_SYS_PTRACE))
73625 + if (capable_nolog(CAP_SYS_PTRACE))
73626 task->ptrace |= PT_PTRACE_CAP;
73627
73628 __ptrace_link(task, current);
73629 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
73630 {
73631 int copied = 0;
73632
73633 + pax_track_stack();
73634 +
73635 while (len > 0) {
73636 char buf[128];
73637 int this_len, retval;
73638 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
73639 {
73640 int copied = 0;
73641
73642 + pax_track_stack();
73643 +
73644 while (len > 0) {
73645 char buf[128];
73646 int this_len, retval;
73647 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
73648 int ret = -EIO;
73649 siginfo_t siginfo;
73650
73651 + pax_track_stack();
73652 +
73653 switch (request) {
73654 case PTRACE_PEEKTEXT:
73655 case PTRACE_PEEKDATA:
73656 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
73657 ret = ptrace_setoptions(child, data);
73658 break;
73659 case PTRACE_GETEVENTMSG:
73660 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
73661 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
73662 break;
73663
73664 case PTRACE_GETSIGINFO:
73665 ret = ptrace_getsiginfo(child, &siginfo);
73666 if (!ret)
73667 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
73668 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
73669 &siginfo);
73670 break;
73671
73672 case PTRACE_SETSIGINFO:
73673 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
73674 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
73675 sizeof siginfo))
73676 ret = -EFAULT;
73677 else
73678 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
73679 goto out;
73680 }
73681
73682 + if (gr_handle_ptrace(child, request)) {
73683 + ret = -EPERM;
73684 + goto out_put_task_struct;
73685 + }
73686 +
73687 if (request == PTRACE_ATTACH) {
73688 ret = ptrace_attach(child);
73689 /*
73690 * Some architectures need to do book-keeping after
73691 * a ptrace attach.
73692 */
73693 - if (!ret)
73694 + if (!ret) {
73695 arch_ptrace_attach(child);
73696 + gr_audit_ptrace(child);
73697 + }
73698 goto out_put_task_struct;
73699 }
73700
73701 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
73702 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
73703 if (copied != sizeof(tmp))
73704 return -EIO;
73705 - return put_user(tmp, (unsigned long __user *)data);
73706 + return put_user(tmp, (__force unsigned long __user *)data);
73707 }
73708
73709 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
73710 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
73711 siginfo_t siginfo;
73712 int ret;
73713
73714 + pax_track_stack();
73715 +
73716 switch (request) {
73717 case PTRACE_PEEKTEXT:
73718 case PTRACE_PEEKDATA:
73719 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
73720 goto out;
73721 }
73722
73723 + if (gr_handle_ptrace(child, request)) {
73724 + ret = -EPERM;
73725 + goto out_put_task_struct;
73726 + }
73727 +
73728 if (request == PTRACE_ATTACH) {
73729 ret = ptrace_attach(child);
73730 /*
73731 * Some architectures need to do book-keeping after
73732 * a ptrace attach.
73733 */
73734 - if (!ret)
73735 + if (!ret) {
73736 arch_ptrace_attach(child);
73737 + gr_audit_ptrace(child);
73738 + }
73739 goto out_put_task_struct;
73740 }
73741
73742 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
73743 index 697c0a0..2402696 100644
73744 --- a/kernel/rcutorture.c
73745 +++ b/kernel/rcutorture.c
73746 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
73747 { 0 };
73748 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
73749 { 0 };
73750 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
73751 -static atomic_t n_rcu_torture_alloc;
73752 -static atomic_t n_rcu_torture_alloc_fail;
73753 -static atomic_t n_rcu_torture_free;
73754 -static atomic_t n_rcu_torture_mberror;
73755 -static atomic_t n_rcu_torture_error;
73756 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
73757 +static atomic_unchecked_t n_rcu_torture_alloc;
73758 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
73759 +static atomic_unchecked_t n_rcu_torture_free;
73760 +static atomic_unchecked_t n_rcu_torture_mberror;
73761 +static atomic_unchecked_t n_rcu_torture_error;
73762 static long n_rcu_torture_timers;
73763 static struct list_head rcu_torture_removed;
73764 static cpumask_var_t shuffle_tmp_mask;
73765 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
73766
73767 spin_lock_bh(&rcu_torture_lock);
73768 if (list_empty(&rcu_torture_freelist)) {
73769 - atomic_inc(&n_rcu_torture_alloc_fail);
73770 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
73771 spin_unlock_bh(&rcu_torture_lock);
73772 return NULL;
73773 }
73774 - atomic_inc(&n_rcu_torture_alloc);
73775 + atomic_inc_unchecked(&n_rcu_torture_alloc);
73776 p = rcu_torture_freelist.next;
73777 list_del_init(p);
73778 spin_unlock_bh(&rcu_torture_lock);
73779 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
73780 static void
73781 rcu_torture_free(struct rcu_torture *p)
73782 {
73783 - atomic_inc(&n_rcu_torture_free);
73784 + atomic_inc_unchecked(&n_rcu_torture_free);
73785 spin_lock_bh(&rcu_torture_lock);
73786 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
73787 spin_unlock_bh(&rcu_torture_lock);
73788 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
73789 i = rp->rtort_pipe_count;
73790 if (i > RCU_TORTURE_PIPE_LEN)
73791 i = RCU_TORTURE_PIPE_LEN;
73792 - atomic_inc(&rcu_torture_wcount[i]);
73793 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
73794 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
73795 rp->rtort_mbtest = 0;
73796 rcu_torture_free(rp);
73797 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
73798 i = rp->rtort_pipe_count;
73799 if (i > RCU_TORTURE_PIPE_LEN)
73800 i = RCU_TORTURE_PIPE_LEN;
73801 - atomic_inc(&rcu_torture_wcount[i]);
73802 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
73803 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
73804 rp->rtort_mbtest = 0;
73805 list_del(&rp->rtort_free);
73806 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
73807 i = old_rp->rtort_pipe_count;
73808 if (i > RCU_TORTURE_PIPE_LEN)
73809 i = RCU_TORTURE_PIPE_LEN;
73810 - atomic_inc(&rcu_torture_wcount[i]);
73811 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
73812 old_rp->rtort_pipe_count++;
73813 cur_ops->deferred_free(old_rp);
73814 }
73815 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
73816 return;
73817 }
73818 if (p->rtort_mbtest == 0)
73819 - atomic_inc(&n_rcu_torture_mberror);
73820 + atomic_inc_unchecked(&n_rcu_torture_mberror);
73821 spin_lock(&rand_lock);
73822 cur_ops->read_delay(&rand);
73823 n_rcu_torture_timers++;
73824 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
73825 continue;
73826 }
73827 if (p->rtort_mbtest == 0)
73828 - atomic_inc(&n_rcu_torture_mberror);
73829 + atomic_inc_unchecked(&n_rcu_torture_mberror);
73830 cur_ops->read_delay(&rand);
73831 preempt_disable();
73832 pipe_count = p->rtort_pipe_count;
73833 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
73834 rcu_torture_current,
73835 rcu_torture_current_version,
73836 list_empty(&rcu_torture_freelist),
73837 - atomic_read(&n_rcu_torture_alloc),
73838 - atomic_read(&n_rcu_torture_alloc_fail),
73839 - atomic_read(&n_rcu_torture_free),
73840 - atomic_read(&n_rcu_torture_mberror),
73841 + atomic_read_unchecked(&n_rcu_torture_alloc),
73842 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
73843 + atomic_read_unchecked(&n_rcu_torture_free),
73844 + atomic_read_unchecked(&n_rcu_torture_mberror),
73845 n_rcu_torture_timers);
73846 - if (atomic_read(&n_rcu_torture_mberror) != 0)
73847 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
73848 cnt += sprintf(&page[cnt], " !!!");
73849 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
73850 if (i > 1) {
73851 cnt += sprintf(&page[cnt], "!!! ");
73852 - atomic_inc(&n_rcu_torture_error);
73853 + atomic_inc_unchecked(&n_rcu_torture_error);
73854 WARN_ON_ONCE(1);
73855 }
73856 cnt += sprintf(&page[cnt], "Reader Pipe: ");
73857 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
73858 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
73859 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
73860 cnt += sprintf(&page[cnt], " %d",
73861 - atomic_read(&rcu_torture_wcount[i]));
73862 + atomic_read_unchecked(&rcu_torture_wcount[i]));
73863 }
73864 cnt += sprintf(&page[cnt], "\n");
73865 if (cur_ops->stats)
73866 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
73867
73868 if (cur_ops->cleanup)
73869 cur_ops->cleanup();
73870 - if (atomic_read(&n_rcu_torture_error))
73871 + if (atomic_read_unchecked(&n_rcu_torture_error))
73872 rcu_torture_print_module_parms("End of test: FAILURE");
73873 else
73874 rcu_torture_print_module_parms("End of test: SUCCESS");
73875 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
73876
73877 rcu_torture_current = NULL;
73878 rcu_torture_current_version = 0;
73879 - atomic_set(&n_rcu_torture_alloc, 0);
73880 - atomic_set(&n_rcu_torture_alloc_fail, 0);
73881 - atomic_set(&n_rcu_torture_free, 0);
73882 - atomic_set(&n_rcu_torture_mberror, 0);
73883 - atomic_set(&n_rcu_torture_error, 0);
73884 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
73885 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
73886 + atomic_set_unchecked(&n_rcu_torture_free, 0);
73887 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
73888 + atomic_set_unchecked(&n_rcu_torture_error, 0);
73889 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
73890 - atomic_set(&rcu_torture_wcount[i], 0);
73891 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
73892 for_each_possible_cpu(cpu) {
73893 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
73894 per_cpu(rcu_torture_count, cpu)[i] = 0;
73895 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
73896 index 683c4f3..97f54c6 100644
73897 --- a/kernel/rcutree.c
73898 +++ b/kernel/rcutree.c
73899 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
73900 /*
73901 * Do softirq processing for the current CPU.
73902 */
73903 -static void rcu_process_callbacks(struct softirq_action *unused)
73904 +static void rcu_process_callbacks(void)
73905 {
73906 /*
73907 * Memory references from any prior RCU read-side critical sections
73908 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
73909 index c03edf7..ac1b341 100644
73910 --- a/kernel/rcutree_plugin.h
73911 +++ b/kernel/rcutree_plugin.h
73912 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
73913 */
73914 void __rcu_read_lock(void)
73915 {
73916 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
73917 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
73918 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
73919 }
73920 EXPORT_SYMBOL_GPL(__rcu_read_lock);
73921 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
73922 struct task_struct *t = current;
73923
73924 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
73925 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
73926 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
73927 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
73928 rcu_read_unlock_special(t);
73929 }
73930 diff --git a/kernel/relay.c b/kernel/relay.c
73931 index 760c262..a9fd241 100644
73932 --- a/kernel/relay.c
73933 +++ b/kernel/relay.c
73934 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct file *in,
73935 unsigned int flags,
73936 int *nonpad_ret)
73937 {
73938 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
73939 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
73940 struct rchan_buf *rbuf = in->private_data;
73941 unsigned int subbuf_size = rbuf->chan->subbuf_size;
73942 uint64_t pos = (uint64_t) *ppos;
73943 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct file *in,
73944 .ops = &relay_pipe_buf_ops,
73945 .spd_release = relay_page_release,
73946 };
73947 + ssize_t ret;
73948 +
73949 + pax_track_stack();
73950
73951 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
73952 return 0;
73953 diff --git a/kernel/resource.c b/kernel/resource.c
73954 index fb11a58..4e61ae1 100644
73955 --- a/kernel/resource.c
73956 +++ b/kernel/resource.c
73957 @@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
73958
73959 static int __init ioresources_init(void)
73960 {
73961 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73962 +#ifdef CONFIG_GRKERNSEC_PROC_USER
73963 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
73964 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
73965 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73966 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
73967 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
73968 +#endif
73969 +#else
73970 proc_create("ioports", 0, NULL, &proc_ioports_operations);
73971 proc_create("iomem", 0, NULL, &proc_iomem_operations);
73972 +#endif
73973 return 0;
73974 }
73975 __initcall(ioresources_init);
73976 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
73977 index a56f629..1fc4989 100644
73978 --- a/kernel/rtmutex-tester.c
73979 +++ b/kernel/rtmutex-tester.c
73980 @@ -21,7 +21,7 @@
73981 #define MAX_RT_TEST_MUTEXES 8
73982
73983 static spinlock_t rttest_lock;
73984 -static atomic_t rttest_event;
73985 +static atomic_unchecked_t rttest_event;
73986
73987 struct test_thread_data {
73988 int opcode;
73989 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73990
73991 case RTTEST_LOCKCONT:
73992 td->mutexes[td->opdata] = 1;
73993 - td->event = atomic_add_return(1, &rttest_event);
73994 + td->event = atomic_add_return_unchecked(1, &rttest_event);
73995 return 0;
73996
73997 case RTTEST_RESET:
73998 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
73999 return 0;
74000
74001 case RTTEST_RESETEVENT:
74002 - atomic_set(&rttest_event, 0);
74003 + atomic_set_unchecked(&rttest_event, 0);
74004 return 0;
74005
74006 default:
74007 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74008 return ret;
74009
74010 td->mutexes[id] = 1;
74011 - td->event = atomic_add_return(1, &rttest_event);
74012 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74013 rt_mutex_lock(&mutexes[id]);
74014 - td->event = atomic_add_return(1, &rttest_event);
74015 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74016 td->mutexes[id] = 4;
74017 return 0;
74018
74019 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74020 return ret;
74021
74022 td->mutexes[id] = 1;
74023 - td->event = atomic_add_return(1, &rttest_event);
74024 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74025 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
74026 - td->event = atomic_add_return(1, &rttest_event);
74027 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74028 td->mutexes[id] = ret ? 0 : 4;
74029 return ret ? -EINTR : 0;
74030
74031 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74032 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
74033 return ret;
74034
74035 - td->event = atomic_add_return(1, &rttest_event);
74036 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74037 rt_mutex_unlock(&mutexes[id]);
74038 - td->event = atomic_add_return(1, &rttest_event);
74039 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74040 td->mutexes[id] = 0;
74041 return 0;
74042
74043 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74044 break;
74045
74046 td->mutexes[dat] = 2;
74047 - td->event = atomic_add_return(1, &rttest_event);
74048 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74049 break;
74050
74051 case RTTEST_LOCKBKL:
74052 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74053 return;
74054
74055 td->mutexes[dat] = 3;
74056 - td->event = atomic_add_return(1, &rttest_event);
74057 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74058 break;
74059
74060 case RTTEST_LOCKNOWAIT:
74061 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74062 return;
74063
74064 td->mutexes[dat] = 1;
74065 - td->event = atomic_add_return(1, &rttest_event);
74066 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74067 return;
74068
74069 case RTTEST_LOCKBKL:
74070 diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
74071 index 29bd4ba..8c5de90 100644
74072 --- a/kernel/rtmutex.c
74073 +++ b/kernel/rtmutex.c
74074 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
74075 */
74076 spin_lock_irqsave(&pendowner->pi_lock, flags);
74077
74078 - WARN_ON(!pendowner->pi_blocked_on);
74079 + BUG_ON(!pendowner->pi_blocked_on);
74080 WARN_ON(pendowner->pi_blocked_on != waiter);
74081 WARN_ON(pendowner->pi_blocked_on->lock != lock);
74082
74083 diff --git a/kernel/sched.c b/kernel/sched.c
74084 index 0591df8..6e343c3 100644
74085 --- a/kernel/sched.c
74086 +++ b/kernel/sched.c
74087 @@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
74088 {
74089 unsigned long flags;
74090 struct rq *rq;
74091 - int cpu = get_cpu();
74092
74093 #ifdef CONFIG_SMP
74094 + int cpu = get_cpu();
74095 +
74096 rq = task_rq_lock(p, &flags);
74097 p->state = TASK_WAKING;
74098
74099 @@ -5043,7 +5044,7 @@ out:
74100 * In CONFIG_NO_HZ case, the idle load balance owner will do the
74101 * rebalancing for all the cpus for whom scheduler ticks are stopped.
74102 */
74103 -static void run_rebalance_domains(struct softirq_action *h)
74104 +static void run_rebalance_domains(void)
74105 {
74106 int this_cpu = smp_processor_id();
74107 struct rq *this_rq = cpu_rq(this_cpu);
74108 @@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
74109 struct rq *rq;
74110 int cpu;
74111
74112 + pax_track_stack();
74113 +
74114 need_resched:
74115 preempt_disable();
74116 cpu = smp_processor_id();
74117 @@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
74118 * Look out! "owner" is an entirely speculative pointer
74119 * access and not reliable.
74120 */
74121 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74122 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
74123 {
74124 unsigned int cpu;
74125 struct rq *rq;
74126 @@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74127 * DEBUG_PAGEALLOC could have unmapped it if
74128 * the mutex owner just released it and exited.
74129 */
74130 - if (probe_kernel_address(&owner->cpu, cpu))
74131 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
74132 return 0;
74133 #else
74134 - cpu = owner->cpu;
74135 + cpu = task_thread_info(owner)->cpu;
74136 #endif
74137
74138 /*
74139 @@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74140 /*
74141 * Is that owner really running on that cpu?
74142 */
74143 - if (task_thread_info(rq->curr) != owner || need_resched())
74144 + if (rq->curr != owner || need_resched())
74145 return 0;
74146
74147 cpu_relax();
74148 @@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p, const int nice)
74149 /* convert nice value [19,-20] to rlimit style value [1,40] */
74150 int nice_rlim = 20 - nice;
74151
74152 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
74153 +
74154 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
74155 capable(CAP_SYS_NICE));
74156 }
74157 @@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
74158 if (nice > 19)
74159 nice = 19;
74160
74161 - if (increment < 0 && !can_nice(current, nice))
74162 + if (increment < 0 && (!can_nice(current, nice) ||
74163 + gr_handle_chroot_nice()))
74164 return -EPERM;
74165
74166 retval = security_task_setnice(current, nice);
74167 @@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
74168 long power;
74169 int weight;
74170
74171 - WARN_ON(!sd || !sd->groups);
74172 + BUG_ON(!sd || !sd->groups);
74173
74174 if (cpu != group_first_cpu(sd->groups))
74175 return;
74176 diff --git a/kernel/signal.c b/kernel/signal.c
74177 index 2494827..cda80a0 100644
74178 --- a/kernel/signal.c
74179 +++ b/kernel/signal.c
74180 @@ -41,12 +41,12 @@
74181
74182 static struct kmem_cache *sigqueue_cachep;
74183
74184 -static void __user *sig_handler(struct task_struct *t, int sig)
74185 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
74186 {
74187 return t->sighand->action[sig - 1].sa.sa_handler;
74188 }
74189
74190 -static int sig_handler_ignored(void __user *handler, int sig)
74191 +static int sig_handler_ignored(__sighandler_t handler, int sig)
74192 {
74193 /* Is it explicitly or implicitly ignored? */
74194 return handler == SIG_IGN ||
74195 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
74196 static int sig_task_ignored(struct task_struct *t, int sig,
74197 int from_ancestor_ns)
74198 {
74199 - void __user *handler;
74200 + __sighandler_t handler;
74201
74202 handler = sig_handler(t, sig);
74203
74204 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
74205 */
74206 user = get_uid(__task_cred(t)->user);
74207 atomic_inc(&user->sigpending);
74208 +
74209 + if (!override_rlimit)
74210 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
74211 if (override_rlimit ||
74212 atomic_read(&user->sigpending) <=
74213 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
74214 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
74215
74216 int unhandled_signal(struct task_struct *tsk, int sig)
74217 {
74218 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
74219 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
74220 if (is_global_init(tsk))
74221 return 1;
74222 if (handler != SIG_IGN && handler != SIG_DFL)
74223 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
74224 }
74225 }
74226
74227 + /* allow glibc communication via tgkill to other threads in our
74228 + thread group */
74229 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
74230 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
74231 + && gr_handle_signal(t, sig))
74232 + return -EPERM;
74233 +
74234 return security_task_kill(t, info, sig, 0);
74235 }
74236
74237 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
74238 return send_signal(sig, info, p, 1);
74239 }
74240
74241 -static int
74242 +int
74243 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74244 {
74245 return send_signal(sig, info, t, 0);
74246 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74247 unsigned long int flags;
74248 int ret, blocked, ignored;
74249 struct k_sigaction *action;
74250 + int is_unhandled = 0;
74251
74252 spin_lock_irqsave(&t->sighand->siglock, flags);
74253 action = &t->sighand->action[sig-1];
74254 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74255 }
74256 if (action->sa.sa_handler == SIG_DFL)
74257 t->signal->flags &= ~SIGNAL_UNKILLABLE;
74258 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
74259 + is_unhandled = 1;
74260 ret = specific_send_sig_info(sig, info, t);
74261 spin_unlock_irqrestore(&t->sighand->siglock, flags);
74262
74263 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
74264 + normal operation */
74265 + if (is_unhandled) {
74266 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
74267 + gr_handle_crash(t, sig);
74268 + }
74269 +
74270 return ret;
74271 }
74272
74273 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
74274 {
74275 int ret = check_kill_permission(sig, info, p);
74276
74277 - if (!ret && sig)
74278 + if (!ret && sig) {
74279 ret = do_send_sig_info(sig, info, p, true);
74280 + if (!ret)
74281 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
74282 + }
74283
74284 return ret;
74285 }
74286 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
74287 {
74288 siginfo_t info;
74289
74290 + pax_track_stack();
74291 +
74292 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
74293
74294 memset(&info, 0, sizeof info);
74295 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
74296 int error = -ESRCH;
74297
74298 rcu_read_lock();
74299 - p = find_task_by_vpid(pid);
74300 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
74301 + /* allow glibc communication via tgkill to other threads in our
74302 + thread group */
74303 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
74304 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
74305 + p = find_task_by_vpid_unrestricted(pid);
74306 + else
74307 +#endif
74308 + p = find_task_by_vpid(pid);
74309 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
74310 error = check_kill_permission(sig, info, p);
74311 /*
74312 diff --git a/kernel/smp.c b/kernel/smp.c
74313 index aa9cff3..631a0de 100644
74314 --- a/kernel/smp.c
74315 +++ b/kernel/smp.c
74316 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
74317 }
74318 EXPORT_SYMBOL(smp_call_function);
74319
74320 -void ipi_call_lock(void)
74321 +void ipi_call_lock(void) __acquires(call_function.lock)
74322 {
74323 spin_lock(&call_function.lock);
74324 }
74325
74326 -void ipi_call_unlock(void)
74327 +void ipi_call_unlock(void) __releases(call_function.lock)
74328 {
74329 spin_unlock(&call_function.lock);
74330 }
74331
74332 -void ipi_call_lock_irq(void)
74333 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
74334 {
74335 spin_lock_irq(&call_function.lock);
74336 }
74337
74338 -void ipi_call_unlock_irq(void)
74339 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
74340 {
74341 spin_unlock_irq(&call_function.lock);
74342 }
74343 diff --git a/kernel/softirq.c b/kernel/softirq.c
74344 index 04a0252..580c512 100644
74345 --- a/kernel/softirq.c
74346 +++ b/kernel/softirq.c
74347 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
74348
74349 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
74350
74351 -char *softirq_to_name[NR_SOFTIRQS] = {
74352 +const char * const softirq_to_name[NR_SOFTIRQS] = {
74353 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
74354 "TASKLET", "SCHED", "HRTIMER", "RCU"
74355 };
74356 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
74357
74358 asmlinkage void __do_softirq(void)
74359 {
74360 - struct softirq_action *h;
74361 + const struct softirq_action *h;
74362 __u32 pending;
74363 int max_restart = MAX_SOFTIRQ_RESTART;
74364 int cpu;
74365 @@ -233,7 +233,7 @@ restart:
74366 kstat_incr_softirqs_this_cpu(h - softirq_vec);
74367
74368 trace_softirq_entry(h, softirq_vec);
74369 - h->action(h);
74370 + h->action();
74371 trace_softirq_exit(h, softirq_vec);
74372 if (unlikely(prev_count != preempt_count())) {
74373 printk(KERN_ERR "huh, entered softirq %td %s %p"
74374 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
74375 local_irq_restore(flags);
74376 }
74377
74378 -void open_softirq(int nr, void (*action)(struct softirq_action *))
74379 +void open_softirq(int nr, void (*action)(void))
74380 {
74381 - softirq_vec[nr].action = action;
74382 + pax_open_kernel();
74383 + *(void **)&softirq_vec[nr].action = action;
74384 + pax_close_kernel();
74385 }
74386
74387 /*
74388 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
74389
74390 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
74391
74392 -static void tasklet_action(struct softirq_action *a)
74393 +static void tasklet_action(void)
74394 {
74395 struct tasklet_struct *list;
74396
74397 @@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
74398 }
74399 }
74400
74401 -static void tasklet_hi_action(struct softirq_action *a)
74402 +static void tasklet_hi_action(void)
74403 {
74404 struct tasklet_struct *list;
74405
74406 diff --git a/kernel/sys.c b/kernel/sys.c
74407 index e9512b1..3c265de 100644
74408 --- a/kernel/sys.c
74409 +++ b/kernel/sys.c
74410 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
74411 error = -EACCES;
74412 goto out;
74413 }
74414 +
74415 + if (gr_handle_chroot_setpriority(p, niceval)) {
74416 + error = -EACCES;
74417 + goto out;
74418 + }
74419 +
74420 no_nice = security_task_setnice(p, niceval);
74421 if (no_nice) {
74422 error = no_nice;
74423 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
74424 !(user = find_user(who)))
74425 goto out_unlock; /* No processes for this user */
74426
74427 - do_each_thread(g, p)
74428 + do_each_thread(g, p) {
74429 if (__task_cred(p)->uid == who)
74430 error = set_one_prio(p, niceval, error);
74431 - while_each_thread(g, p);
74432 + } while_each_thread(g, p);
74433 if (who != cred->uid)
74434 free_uid(user); /* For find_user() */
74435 break;
74436 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
74437 !(user = find_user(who)))
74438 goto out_unlock; /* No processes for this user */
74439
74440 - do_each_thread(g, p)
74441 + do_each_thread(g, p) {
74442 if (__task_cred(p)->uid == who) {
74443 niceval = 20 - task_nice(p);
74444 if (niceval > retval)
74445 retval = niceval;
74446 }
74447 - while_each_thread(g, p);
74448 + } while_each_thread(g, p);
74449 if (who != cred->uid)
74450 free_uid(user); /* for find_user() */
74451 break;
74452 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
74453 goto error;
74454 }
74455
74456 + if (gr_check_group_change(new->gid, new->egid, -1))
74457 + goto error;
74458 +
74459 if (rgid != (gid_t) -1 ||
74460 (egid != (gid_t) -1 && egid != old->gid))
74461 new->sgid = new->egid;
74462 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
74463 goto error;
74464
74465 retval = -EPERM;
74466 +
74467 + if (gr_check_group_change(gid, gid, gid))
74468 + goto error;
74469 +
74470 if (capable(CAP_SETGID))
74471 new->gid = new->egid = new->sgid = new->fsgid = gid;
74472 else if (gid == old->gid || gid == old->sgid)
74473 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
74474 if (!new_user)
74475 return -EAGAIN;
74476
74477 + /*
74478 + * We don't fail in case of NPROC limit excess here because too many
74479 + * poorly written programs don't check set*uid() return code, assuming
74480 + * it never fails if called by root. We may still enforce NPROC limit
74481 + * for programs doing set*uid()+execve() by harmlessly deferring the
74482 + * failure to the execve() stage.
74483 + */
74484 if (atomic_read(&new_user->processes) >=
74485 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
74486 - new_user != INIT_USER) {
74487 - free_uid(new_user);
74488 - return -EAGAIN;
74489 - }
74490 + new_user != INIT_USER)
74491 + current->flags |= PF_NPROC_EXCEEDED;
74492 + else
74493 + current->flags &= ~PF_NPROC_EXCEEDED;
74494
74495 free_uid(new->user);
74496 new->user = new_user;
74497 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
74498 goto error;
74499 }
74500
74501 + if (gr_check_user_change(new->uid, new->euid, -1))
74502 + goto error;
74503 +
74504 if (new->uid != old->uid) {
74505 retval = set_user(new);
74506 if (retval < 0)
74507 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
74508 goto error;
74509
74510 retval = -EPERM;
74511 +
74512 + if (gr_check_crash_uid(uid))
74513 + goto error;
74514 + if (gr_check_user_change(uid, uid, uid))
74515 + goto error;
74516 +
74517 if (capable(CAP_SETUID)) {
74518 new->suid = new->uid = uid;
74519 if (uid != old->uid) {
74520 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
74521 goto error;
74522 }
74523
74524 + if (gr_check_user_change(ruid, euid, -1))
74525 + goto error;
74526 +
74527 if (ruid != (uid_t) -1) {
74528 new->uid = ruid;
74529 if (ruid != old->uid) {
74530 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
74531 goto error;
74532 }
74533
74534 + if (gr_check_group_change(rgid, egid, -1))
74535 + goto error;
74536 +
74537 if (rgid != (gid_t) -1)
74538 new->gid = rgid;
74539 if (egid != (gid_t) -1)
74540 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
74541 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
74542 goto error;
74543
74544 + if (gr_check_user_change(-1, -1, uid))
74545 + goto error;
74546 +
74547 if (uid == old->uid || uid == old->euid ||
74548 uid == old->suid || uid == old->fsuid ||
74549 capable(CAP_SETUID)) {
74550 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
74551 if (gid == old->gid || gid == old->egid ||
74552 gid == old->sgid || gid == old->fsgid ||
74553 capable(CAP_SETGID)) {
74554 + if (gr_check_group_change(-1, -1, gid))
74555 + goto error;
74556 +
74557 if (gid != old_fsgid) {
74558 new->fsgid = gid;
74559 goto change_okay;
74560 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
74561 error = get_dumpable(me->mm);
74562 break;
74563 case PR_SET_DUMPABLE:
74564 - if (arg2 < 0 || arg2 > 1) {
74565 + if (arg2 > 1) {
74566 error = -EINVAL;
74567 break;
74568 }
74569 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
74570 index b8bd058..ab6a76be 100644
74571 --- a/kernel/sysctl.c
74572 +++ b/kernel/sysctl.c
74573 @@ -63,6 +63,13 @@
74574 static int deprecated_sysctl_warning(struct __sysctl_args *args);
74575
74576 #if defined(CONFIG_SYSCTL)
74577 +#include <linux/grsecurity.h>
74578 +#include <linux/grinternal.h>
74579 +
74580 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
74581 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
74582 + const int op);
74583 +extern int gr_handle_chroot_sysctl(const int op);
74584
74585 /* External variables not in a header file. */
74586 extern int C_A_D;
74587 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
74588 static int proc_taint(struct ctl_table *table, int write,
74589 void __user *buffer, size_t *lenp, loff_t *ppos);
74590 #endif
74591 +extern ctl_table grsecurity_table[];
74592
74593 static struct ctl_table root_table[];
74594 static struct ctl_table_root sysctl_table_root;
74595 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
74596 int sysctl_legacy_va_layout;
74597 #endif
74598
74599 +#ifdef CONFIG_PAX_SOFTMODE
74600 +static ctl_table pax_table[] = {
74601 + {
74602 + .ctl_name = CTL_UNNUMBERED,
74603 + .procname = "softmode",
74604 + .data = &pax_softmode,
74605 + .maxlen = sizeof(unsigned int),
74606 + .mode = 0600,
74607 + .proc_handler = &proc_dointvec,
74608 + },
74609 +
74610 + { .ctl_name = 0 }
74611 +};
74612 +#endif
74613 +
74614 extern int prove_locking;
74615 extern int lock_stat;
74616
74617 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
74618 #endif
74619
74620 static struct ctl_table kern_table[] = {
74621 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
74622 + {
74623 + .ctl_name = CTL_UNNUMBERED,
74624 + .procname = "grsecurity",
74625 + .mode = 0500,
74626 + .child = grsecurity_table,
74627 + },
74628 +#endif
74629 +
74630 +#ifdef CONFIG_PAX_SOFTMODE
74631 + {
74632 + .ctl_name = CTL_UNNUMBERED,
74633 + .procname = "pax",
74634 + .mode = 0500,
74635 + .child = pax_table,
74636 + },
74637 +#endif
74638 +
74639 {
74640 .ctl_name = CTL_UNNUMBERED,
74641 .procname = "sched_child_runs_first",
74642 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
74643 .data = &modprobe_path,
74644 .maxlen = KMOD_PATH_LEN,
74645 .mode = 0644,
74646 - .proc_handler = &proc_dostring,
74647 - .strategy = &sysctl_string,
74648 + .proc_handler = &proc_dostring_modpriv,
74649 + .strategy = &sysctl_string_modpriv,
74650 },
74651 {
74652 .ctl_name = CTL_UNNUMBERED,
74653 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
74654 .mode = 0644,
74655 .proc_handler = &proc_dointvec
74656 },
74657 + {
74658 + .procname = "heap_stack_gap",
74659 + .data = &sysctl_heap_stack_gap,
74660 + .maxlen = sizeof(sysctl_heap_stack_gap),
74661 + .mode = 0644,
74662 + .proc_handler = proc_doulongvec_minmax,
74663 + },
74664 #else
74665 {
74666 .ctl_name = CTL_UNNUMBERED,
74667 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
74668 return 0;
74669 }
74670
74671 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
74672 +
74673 static int parse_table(int __user *name, int nlen,
74674 void __user *oldval, size_t __user *oldlenp,
74675 void __user *newval, size_t newlen,
74676 @@ -1821,7 +1871,7 @@ repeat:
74677 if (n == table->ctl_name) {
74678 int error;
74679 if (table->child) {
74680 - if (sysctl_perm(root, table, MAY_EXEC))
74681 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
74682 return -EPERM;
74683 name++;
74684 nlen--;
74685 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
74686 int error;
74687 int mode;
74688
74689 + if (table->parent != NULL && table->parent->procname != NULL &&
74690 + table->procname != NULL &&
74691 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
74692 + return -EACCES;
74693 + if (gr_handle_chroot_sysctl(op))
74694 + return -EACCES;
74695 + error = gr_handle_sysctl(table, op);
74696 + if (error)
74697 + return error;
74698 +
74699 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
74700 + if (error)
74701 + return error;
74702 +
74703 + if (root->permissions)
74704 + mode = root->permissions(root, current->nsproxy, table);
74705 + else
74706 + mode = table->mode;
74707 +
74708 + return test_perm(mode, op);
74709 +}
74710 +
74711 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
74712 +{
74713 + int error;
74714 + int mode;
74715 +
74716 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
74717 if (error)
74718 return error;
74719 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
74720 buffer, lenp, ppos);
74721 }
74722
74723 +int proc_dostring_modpriv(struct ctl_table *table, int write,
74724 + void __user *buffer, size_t *lenp, loff_t *ppos)
74725 +{
74726 + if (write && !capable(CAP_SYS_MODULE))
74727 + return -EPERM;
74728 +
74729 + return _proc_do_string(table->data, table->maxlen, write,
74730 + buffer, lenp, ppos);
74731 +}
74732 +
74733
74734 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
74735 int *valp,
74736 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
74737 vleft = table->maxlen / sizeof(unsigned long);
74738 left = *lenp;
74739
74740 - for (; left && vleft--; i++, min++, max++, first=0) {
74741 + for (; left && vleft--; i++, first=0) {
74742 if (write) {
74743 while (left) {
74744 char c;
74745 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
74746 return -ENOSYS;
74747 }
74748
74749 +int proc_dostring_modpriv(struct ctl_table *table, int write,
74750 + void __user *buffer, size_t *lenp, loff_t *ppos)
74751 +{
74752 + return -ENOSYS;
74753 +}
74754 +
74755 int proc_dointvec(struct ctl_table *table, int write,
74756 void __user *buffer, size_t *lenp, loff_t *ppos)
74757 {
74758 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
74759 return 1;
74760 }
74761
74762 +int sysctl_string_modpriv(struct ctl_table *table,
74763 + void __user *oldval, size_t __user *oldlenp,
74764 + void __user *newval, size_t newlen)
74765 +{
74766 + if (newval && newlen && !capable(CAP_SYS_MODULE))
74767 + return -EPERM;
74768 +
74769 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
74770 +}
74771 +
74772 /*
74773 * This function makes sure that all of the integers in the vector
74774 * are between the minimum and maximum values given in the arrays
74775 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
74776 return -ENOSYS;
74777 }
74778
74779 +int sysctl_string_modpriv(struct ctl_table *table,
74780 + void __user *oldval, size_t __user *oldlenp,
74781 + void __user *newval, size_t newlen)
74782 +{
74783 + return -ENOSYS;
74784 +}
74785 +
74786 int sysctl_intvec(struct ctl_table *table,
74787 void __user *oldval, size_t __user *oldlenp,
74788 void __user *newval, size_t newlen)
74789 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
74790 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
74791 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
74792 EXPORT_SYMBOL(proc_dostring);
74793 +EXPORT_SYMBOL(proc_dostring_modpriv);
74794 EXPORT_SYMBOL(proc_doulongvec_minmax);
74795 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
74796 EXPORT_SYMBOL(register_sysctl_table);
74797 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
74798 EXPORT_SYMBOL(sysctl_jiffies);
74799 EXPORT_SYMBOL(sysctl_ms_jiffies);
74800 EXPORT_SYMBOL(sysctl_string);
74801 +EXPORT_SYMBOL(sysctl_string_modpriv);
74802 EXPORT_SYMBOL(sysctl_data);
74803 EXPORT_SYMBOL(unregister_sysctl_table);
74804 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
74805 index 469193c..ea3ecb2 100644
74806 --- a/kernel/sysctl_check.c
74807 +++ b/kernel/sysctl_check.c
74808 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
74809 } else {
74810 if ((table->strategy == sysctl_data) ||
74811 (table->strategy == sysctl_string) ||
74812 + (table->strategy == sysctl_string_modpriv) ||
74813 (table->strategy == sysctl_intvec) ||
74814 (table->strategy == sysctl_jiffies) ||
74815 (table->strategy == sysctl_ms_jiffies) ||
74816 (table->proc_handler == proc_dostring) ||
74817 + (table->proc_handler == proc_dostring_modpriv) ||
74818 (table->proc_handler == proc_dointvec) ||
74819 (table->proc_handler == proc_dointvec_minmax) ||
74820 (table->proc_handler == proc_dointvec_jiffies) ||
74821 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
74822 index a4ef542..798bcd7 100644
74823 --- a/kernel/taskstats.c
74824 +++ b/kernel/taskstats.c
74825 @@ -26,9 +26,12 @@
74826 #include <linux/cgroup.h>
74827 #include <linux/fs.h>
74828 #include <linux/file.h>
74829 +#include <linux/grsecurity.h>
74830 #include <net/genetlink.h>
74831 #include <asm/atomic.h>
74832
74833 +extern int gr_is_taskstats_denied(int pid);
74834 +
74835 /*
74836 * Maximum length of a cpumask that can be specified in
74837 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
74838 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
74839 size_t size;
74840 cpumask_var_t mask;
74841
74842 + if (gr_is_taskstats_denied(current->pid))
74843 + return -EACCES;
74844 +
74845 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
74846 return -ENOMEM;
74847
74848 diff --git a/kernel/time.c b/kernel/time.c
74849 index 33df60e..ca768bd 100644
74850 --- a/kernel/time.c
74851 +++ b/kernel/time.c
74852 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
74853 return error;
74854
74855 if (tz) {
74856 + /* we log in do_settimeofday called below, so don't log twice
74857 + */
74858 + if (!tv)
74859 + gr_log_timechange();
74860 +
74861 /* SMP safe, global irq locking makes it work. */
74862 sys_tz = *tz;
74863 update_vsyscall_tz();
74864 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
74865 * Avoid unnecessary multiplications/divisions in the
74866 * two most common HZ cases:
74867 */
74868 -unsigned int inline jiffies_to_msecs(const unsigned long j)
74869 +inline unsigned int jiffies_to_msecs(const unsigned long j)
74870 {
74871 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
74872 return (MSEC_PER_SEC / HZ) * j;
74873 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
74874 }
74875 EXPORT_SYMBOL(jiffies_to_msecs);
74876
74877 -unsigned int inline jiffies_to_usecs(const unsigned long j)
74878 +inline unsigned int jiffies_to_usecs(const unsigned long j)
74879 {
74880 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
74881 return (USEC_PER_SEC / HZ) * j;
74882 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
74883 index 57b953f..06f149f 100644
74884 --- a/kernel/time/tick-broadcast.c
74885 +++ b/kernel/time/tick-broadcast.c
74886 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
74887 * then clear the broadcast bit.
74888 */
74889 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
74890 - int cpu = smp_processor_id();
74891 + cpu = smp_processor_id();
74892
74893 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
74894 tick_broadcast_clear_oneshot(cpu);
74895 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
74896 index 4a71cff..ffb5548 100644
74897 --- a/kernel/time/timekeeping.c
74898 +++ b/kernel/time/timekeeping.c
74899 @@ -14,6 +14,7 @@
74900 #include <linux/init.h>
74901 #include <linux/mm.h>
74902 #include <linux/sched.h>
74903 +#include <linux/grsecurity.h>
74904 #include <linux/sysdev.h>
74905 #include <linux/clocksource.h>
74906 #include <linux/jiffies.h>
74907 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
74908 */
74909 struct timespec ts = xtime;
74910 timespec_add_ns(&ts, nsec);
74911 - ACCESS_ONCE(xtime_cache) = ts;
74912 + ACCESS_ONCE_RW(xtime_cache) = ts;
74913 }
74914
74915 /* must hold xtime_lock */
74916 @@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
74917 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
74918 return -EINVAL;
74919
74920 + gr_log_timechange();
74921 +
74922 write_seqlock_irqsave(&xtime_lock, flags);
74923
74924 timekeeping_forward_now();
74925 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
74926 index 54c0dda..e9095d9 100644
74927 --- a/kernel/time/timer_list.c
74928 +++ b/kernel/time/timer_list.c
74929 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
74930
74931 static void print_name_offset(struct seq_file *m, void *sym)
74932 {
74933 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74934 + SEQ_printf(m, "<%p>", NULL);
74935 +#else
74936 char symname[KSYM_NAME_LEN];
74937
74938 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
74939 SEQ_printf(m, "<%p>", sym);
74940 else
74941 SEQ_printf(m, "%s", symname);
74942 +#endif
74943 }
74944
74945 static void
74946 @@ -112,7 +116,11 @@ next_one:
74947 static void
74948 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
74949 {
74950 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74951 + SEQ_printf(m, " .base: %p\n", NULL);
74952 +#else
74953 SEQ_printf(m, " .base: %p\n", base);
74954 +#endif
74955 SEQ_printf(m, " .index: %d\n",
74956 base->index);
74957 SEQ_printf(m, " .resolution: %Lu nsecs\n",
74958 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
74959 {
74960 struct proc_dir_entry *pe;
74961
74962 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74963 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
74964 +#else
74965 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
74966 +#endif
74967 if (!pe)
74968 return -ENOMEM;
74969 return 0;
74970 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
74971 index ee5681f..634089b 100644
74972 --- a/kernel/time/timer_stats.c
74973 +++ b/kernel/time/timer_stats.c
74974 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
74975 static unsigned long nr_entries;
74976 static struct entry entries[MAX_ENTRIES];
74977
74978 -static atomic_t overflow_count;
74979 +static atomic_unchecked_t overflow_count;
74980
74981 /*
74982 * The entries are in a hash-table, for fast lookup:
74983 @@ -140,7 +140,7 @@ static void reset_entries(void)
74984 nr_entries = 0;
74985 memset(entries, 0, sizeof(entries));
74986 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
74987 - atomic_set(&overflow_count, 0);
74988 + atomic_set_unchecked(&overflow_count, 0);
74989 }
74990
74991 static struct entry *alloc_entry(void)
74992 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
74993 if (likely(entry))
74994 entry->count++;
74995 else
74996 - atomic_inc(&overflow_count);
74997 + atomic_inc_unchecked(&overflow_count);
74998
74999 out_unlock:
75000 spin_unlock_irqrestore(lock, flags);
75001 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75002
75003 static void print_name_offset(struct seq_file *m, unsigned long addr)
75004 {
75005 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75006 + seq_printf(m, "<%p>", NULL);
75007 +#else
75008 char symname[KSYM_NAME_LEN];
75009
75010 if (lookup_symbol_name(addr, symname) < 0)
75011 seq_printf(m, "<%p>", (void *)addr);
75012 else
75013 seq_printf(m, "%s", symname);
75014 +#endif
75015 }
75016
75017 static int tstats_show(struct seq_file *m, void *v)
75018 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
75019
75020 seq_puts(m, "Timer Stats Version: v0.2\n");
75021 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
75022 - if (atomic_read(&overflow_count))
75023 + if (atomic_read_unchecked(&overflow_count))
75024 seq_printf(m, "Overflow: %d entries\n",
75025 - atomic_read(&overflow_count));
75026 + atomic_read_unchecked(&overflow_count));
75027
75028 for (i = 0; i < nr_entries; i++) {
75029 entry = entries + i;
75030 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
75031 {
75032 struct proc_dir_entry *pe;
75033
75034 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75035 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
75036 +#else
75037 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
75038 +#endif
75039 if (!pe)
75040 return -ENOMEM;
75041 return 0;
75042 diff --git a/kernel/timer.c b/kernel/timer.c
75043 index cb3c1f1..8bf5526 100644
75044 --- a/kernel/timer.c
75045 +++ b/kernel/timer.c
75046 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
75047 /*
75048 * This function runs timers and the timer-tq in bottom half context.
75049 */
75050 -static void run_timer_softirq(struct softirq_action *h)
75051 +static void run_timer_softirq(void)
75052 {
75053 struct tvec_base *base = __get_cpu_var(tvec_bases);
75054
75055 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
75056 index d9d6206..f19467e 100644
75057 --- a/kernel/trace/blktrace.c
75058 +++ b/kernel/trace/blktrace.c
75059 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
75060 struct blk_trace *bt = filp->private_data;
75061 char buf[16];
75062
75063 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
75064 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
75065
75066 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
75067 }
75068 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
75069 return 1;
75070
75071 bt = buf->chan->private_data;
75072 - atomic_inc(&bt->dropped);
75073 + atomic_inc_unchecked(&bt->dropped);
75074 return 0;
75075 }
75076
75077 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
75078
75079 bt->dir = dir;
75080 bt->dev = dev;
75081 - atomic_set(&bt->dropped, 0);
75082 + atomic_set_unchecked(&bt->dropped, 0);
75083
75084 ret = -EIO;
75085 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
75086 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
75087 index 4872937..c794d40 100644
75088 --- a/kernel/trace/ftrace.c
75089 +++ b/kernel/trace/ftrace.c
75090 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
75091
75092 ip = rec->ip;
75093
75094 + ret = ftrace_arch_code_modify_prepare();
75095 + FTRACE_WARN_ON(ret);
75096 + if (ret)
75097 + return 0;
75098 +
75099 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
75100 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
75101 if (ret) {
75102 ftrace_bug(ret, ip);
75103 rec->flags |= FTRACE_FL_FAILED;
75104 - return 0;
75105 }
75106 - return 1;
75107 + return ret ? 0 : 1;
75108 }
75109
75110 /*
75111 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
75112 index e749a05..19c6e94 100644
75113 --- a/kernel/trace/ring_buffer.c
75114 +++ b/kernel/trace/ring_buffer.c
75115 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
75116 * the reader page). But if the next page is a header page,
75117 * its flags will be non zero.
75118 */
75119 -static int inline
75120 +static inline int
75121 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
75122 struct buffer_page *page, struct list_head *list)
75123 {
75124 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
75125 index a2a2d1f..7f32b09 100644
75126 --- a/kernel/trace/trace.c
75127 +++ b/kernel/trace/trace.c
75128 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
75129 size_t rem;
75130 unsigned int i;
75131
75132 + pax_track_stack();
75133 +
75134 /* copy the tracer to avoid using a global lock all around */
75135 mutex_lock(&trace_types_lock);
75136 if (unlikely(old_tracer != current_trace && current_trace)) {
75137 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
75138 int entries, size, i;
75139 size_t ret;
75140
75141 + pax_track_stack();
75142 +
75143 if (*ppos & (PAGE_SIZE - 1)) {
75144 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
75145 return -EINVAL;
75146 @@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
75147 };
75148 #endif
75149
75150 -static struct dentry *d_tracer;
75151 -
75152 struct dentry *tracing_init_dentry(void)
75153 {
75154 + static struct dentry *d_tracer;
75155 static int once;
75156
75157 if (d_tracer)
75158 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
75159 return d_tracer;
75160 }
75161
75162 -static struct dentry *d_percpu;
75163 -
75164 struct dentry *tracing_dentry_percpu(void)
75165 {
75166 + static struct dentry *d_percpu;
75167 static int once;
75168 struct dentry *d_tracer;
75169
75170 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
75171 index d128f65..f37b4af 100644
75172 --- a/kernel/trace/trace_events.c
75173 +++ b/kernel/trace/trace_events.c
75174 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
75175 * Modules must own their file_operations to keep up with
75176 * reference counting.
75177 */
75178 +
75179 struct ftrace_module_file_ops {
75180 struct list_head list;
75181 struct module *mod;
75182 - struct file_operations id;
75183 - struct file_operations enable;
75184 - struct file_operations format;
75185 - struct file_operations filter;
75186 };
75187
75188 static void remove_subsystem_dir(const char *name)
75189 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
75190
75191 file_ops->mod = mod;
75192
75193 - file_ops->id = ftrace_event_id_fops;
75194 - file_ops->id.owner = mod;
75195 -
75196 - file_ops->enable = ftrace_enable_fops;
75197 - file_ops->enable.owner = mod;
75198 -
75199 - file_ops->filter = ftrace_event_filter_fops;
75200 - file_ops->filter.owner = mod;
75201 -
75202 - file_ops->format = ftrace_event_format_fops;
75203 - file_ops->format.owner = mod;
75204 + pax_open_kernel();
75205 + *(void **)&mod->trace_id.owner = mod;
75206 + *(void **)&mod->trace_enable.owner = mod;
75207 + *(void **)&mod->trace_filter.owner = mod;
75208 + *(void **)&mod->trace_format.owner = mod;
75209 + pax_close_kernel();
75210
75211 list_add(&file_ops->list, &ftrace_module_file_list);
75212
75213 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
75214 call->mod = mod;
75215 list_add(&call->list, &ftrace_events);
75216 event_create_dir(call, d_events,
75217 - &file_ops->id, &file_ops->enable,
75218 - &file_ops->filter, &file_ops->format);
75219 + &mod->trace_id, &mod->trace_enable,
75220 + &mod->trace_filter, &mod->trace_format);
75221 }
75222 }
75223
75224 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
75225 index 0acd834..b800b56 100644
75226 --- a/kernel/trace/trace_mmiotrace.c
75227 +++ b/kernel/trace/trace_mmiotrace.c
75228 @@ -23,7 +23,7 @@ struct header_iter {
75229 static struct trace_array *mmio_trace_array;
75230 static bool overrun_detected;
75231 static unsigned long prev_overruns;
75232 -static atomic_t dropped_count;
75233 +static atomic_unchecked_t dropped_count;
75234
75235 static void mmio_reset_data(struct trace_array *tr)
75236 {
75237 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
75238
75239 static unsigned long count_overruns(struct trace_iterator *iter)
75240 {
75241 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
75242 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
75243 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
75244
75245 if (over > prev_overruns)
75246 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
75247 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
75248 sizeof(*entry), 0, pc);
75249 if (!event) {
75250 - atomic_inc(&dropped_count);
75251 + atomic_inc_unchecked(&dropped_count);
75252 return;
75253 }
75254 entry = ring_buffer_event_data(event);
75255 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
75256 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
75257 sizeof(*entry), 0, pc);
75258 if (!event) {
75259 - atomic_inc(&dropped_count);
75260 + atomic_inc_unchecked(&dropped_count);
75261 return;
75262 }
75263 entry = ring_buffer_event_data(event);
75264 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
75265 index b6c12c6..41fdc53 100644
75266 --- a/kernel/trace/trace_output.c
75267 +++ b/kernel/trace/trace_output.c
75268 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
75269 return 0;
75270 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
75271 if (!IS_ERR(p)) {
75272 - p = mangle_path(s->buffer + s->len, p, "\n");
75273 + p = mangle_path(s->buffer + s->len, p, "\n\\");
75274 if (p) {
75275 s->len = p - s->buffer;
75276 return 1;
75277 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
75278 index 8504ac7..ecf0adb 100644
75279 --- a/kernel/trace/trace_stack.c
75280 +++ b/kernel/trace/trace_stack.c
75281 @@ -50,7 +50,7 @@ static inline void check_stack(void)
75282 return;
75283
75284 /* we do not handle interrupt stacks yet */
75285 - if (!object_is_on_stack(&this_size))
75286 + if (!object_starts_on_stack(&this_size))
75287 return;
75288
75289 local_irq_save(flags);
75290 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
75291 index 40cafb0..d5ead43 100644
75292 --- a/kernel/trace/trace_workqueue.c
75293 +++ b/kernel/trace/trace_workqueue.c
75294 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
75295 int cpu;
75296 pid_t pid;
75297 /* Can be inserted from interrupt or user context, need to be atomic */
75298 - atomic_t inserted;
75299 + atomic_unchecked_t inserted;
75300 /*
75301 * Don't need to be atomic, works are serialized in a single workqueue thread
75302 * on a single CPU.
75303 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
75304 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
75305 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
75306 if (node->pid == wq_thread->pid) {
75307 - atomic_inc(&node->inserted);
75308 + atomic_inc_unchecked(&node->inserted);
75309 goto found;
75310 }
75311 }
75312 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
75313 tsk = get_pid_task(pid, PIDTYPE_PID);
75314 if (tsk) {
75315 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
75316 - atomic_read(&cws->inserted), cws->executed,
75317 + atomic_read_unchecked(&cws->inserted), cws->executed,
75318 tsk->comm);
75319 put_task_struct(tsk);
75320 }
75321 diff --git a/kernel/user.c b/kernel/user.c
75322 index 1b91701..8795237 100644
75323 --- a/kernel/user.c
75324 +++ b/kernel/user.c
75325 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
75326 spin_lock_irq(&uidhash_lock);
75327 up = uid_hash_find(uid, hashent);
75328 if (up) {
75329 + put_user_ns(ns);
75330 key_put(new->uid_keyring);
75331 key_put(new->session_keyring);
75332 kmem_cache_free(uid_cachep, new);
75333 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
75334 index 234ceb1..ad74049 100644
75335 --- a/lib/Kconfig.debug
75336 +++ b/lib/Kconfig.debug
75337 @@ -905,7 +905,7 @@ config LATENCYTOP
75338 select STACKTRACE
75339 select SCHEDSTATS
75340 select SCHED_DEBUG
75341 - depends on HAVE_LATENCYTOP_SUPPORT
75342 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
75343 help
75344 Enable this option if you want to use the LatencyTOP tool
75345 to find out which userspace is blocking on what kernel operations.
75346 diff --git a/lib/bitmap.c b/lib/bitmap.c
75347 index 7025658..8d14cab 100644
75348 --- a/lib/bitmap.c
75349 +++ b/lib/bitmap.c
75350 @@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
75351 {
75352 int c, old_c, totaldigits, ndigits, nchunks, nbits;
75353 u32 chunk;
75354 - const char __user *ubuf = buf;
75355 + const char __user *ubuf = (const char __force_user *)buf;
75356
75357 bitmap_zero(maskp, nmaskbits);
75358
75359 @@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
75360 {
75361 if (!access_ok(VERIFY_READ, ubuf, ulen))
75362 return -EFAULT;
75363 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
75364 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
75365 }
75366 EXPORT_SYMBOL(bitmap_parse_user);
75367
75368 diff --git a/lib/bug.c b/lib/bug.c
75369 index 300e41a..2779eb0 100644
75370 --- a/lib/bug.c
75371 +++ b/lib/bug.c
75372 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
75373 return BUG_TRAP_TYPE_NONE;
75374
75375 bug = find_bug(bugaddr);
75376 + if (!bug)
75377 + return BUG_TRAP_TYPE_NONE;
75378
75379 printk(KERN_EMERG "------------[ cut here ]------------\n");
75380
75381 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
75382 index 2b413db..e21d207 100644
75383 --- a/lib/debugobjects.c
75384 +++ b/lib/debugobjects.c
75385 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
75386 if (limit > 4)
75387 return;
75388
75389 - is_on_stack = object_is_on_stack(addr);
75390 + is_on_stack = object_starts_on_stack(addr);
75391 if (is_on_stack == onstack)
75392 return;
75393
75394 diff --git a/lib/devres.c b/lib/devres.c
75395 index 72c8909..7543868 100644
75396 --- a/lib/devres.c
75397 +++ b/lib/devres.c
75398 @@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
75399 {
75400 iounmap(addr);
75401 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
75402 - (void *)addr));
75403 + (void __force *)addr));
75404 }
75405 EXPORT_SYMBOL(devm_iounmap);
75406
75407 @@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
75408 {
75409 ioport_unmap(addr);
75410 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
75411 - devm_ioport_map_match, (void *)addr));
75412 + devm_ioport_map_match, (void __force *)addr));
75413 }
75414 EXPORT_SYMBOL(devm_ioport_unmap);
75415
75416 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
75417 index 084e879..0674448 100644
75418 --- a/lib/dma-debug.c
75419 +++ b/lib/dma-debug.c
75420 @@ -861,7 +861,7 @@ out:
75421
75422 static void check_for_stack(struct device *dev, void *addr)
75423 {
75424 - if (object_is_on_stack(addr))
75425 + if (object_starts_on_stack(addr))
75426 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
75427 "stack [addr=%p]\n", addr);
75428 }
75429 diff --git a/lib/idr.c b/lib/idr.c
75430 index eda7ba3..915dfae 100644
75431 --- a/lib/idr.c
75432 +++ b/lib/idr.c
75433 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
75434 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
75435
75436 /* if already at the top layer, we need to grow */
75437 - if (id >= 1 << (idp->layers * IDR_BITS)) {
75438 + if (id >= (1 << (idp->layers * IDR_BITS))) {
75439 *starting_id = id;
75440 return IDR_NEED_TO_GROW;
75441 }
75442 diff --git a/lib/inflate.c b/lib/inflate.c
75443 index d102559..4215f31 100644
75444 --- a/lib/inflate.c
75445 +++ b/lib/inflate.c
75446 @@ -266,7 +266,7 @@ static void free(void *where)
75447 malloc_ptr = free_mem_ptr;
75448 }
75449 #else
75450 -#define malloc(a) kmalloc(a, GFP_KERNEL)
75451 +#define malloc(a) kmalloc((a), GFP_KERNEL)
75452 #define free(a) kfree(a)
75453 #endif
75454
75455 diff --git a/lib/kobject.c b/lib/kobject.c
75456 index b512b74..8115eb1 100644
75457 --- a/lib/kobject.c
75458 +++ b/lib/kobject.c
75459 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
75460 return ret;
75461 }
75462
75463 -struct sysfs_ops kobj_sysfs_ops = {
75464 +const struct sysfs_ops kobj_sysfs_ops = {
75465 .show = kobj_attr_show,
75466 .store = kobj_attr_store,
75467 };
75468 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
75469 * If the kset was not able to be created, NULL will be returned.
75470 */
75471 static struct kset *kset_create(const char *name,
75472 - struct kset_uevent_ops *uevent_ops,
75473 + const struct kset_uevent_ops *uevent_ops,
75474 struct kobject *parent_kobj)
75475 {
75476 struct kset *kset;
75477 @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
75478 * If the kset was not able to be created, NULL will be returned.
75479 */
75480 struct kset *kset_create_and_add(const char *name,
75481 - struct kset_uevent_ops *uevent_ops,
75482 + const struct kset_uevent_ops *uevent_ops,
75483 struct kobject *parent_kobj)
75484 {
75485 struct kset *kset;
75486 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
75487 index 507b821..0bf8ed0 100644
75488 --- a/lib/kobject_uevent.c
75489 +++ b/lib/kobject_uevent.c
75490 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
75491 const char *subsystem;
75492 struct kobject *top_kobj;
75493 struct kset *kset;
75494 - struct kset_uevent_ops *uevent_ops;
75495 + const struct kset_uevent_ops *uevent_ops;
75496 u64 seq;
75497 int i = 0;
75498 int retval = 0;
75499 diff --git a/lib/kref.c b/lib/kref.c
75500 index 9ecd6e8..12c94c1 100644
75501 --- a/lib/kref.c
75502 +++ b/lib/kref.c
75503 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
75504 */
75505 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
75506 {
75507 - WARN_ON(release == NULL);
75508 + BUG_ON(release == NULL);
75509 WARN_ON(release == (void (*)(struct kref *))kfree);
75510
75511 if (atomic_dec_and_test(&kref->refcount)) {
75512 diff --git a/lib/parser.c b/lib/parser.c
75513 index b00d020..1b34325 100644
75514 --- a/lib/parser.c
75515 +++ b/lib/parser.c
75516 @@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
75517 char *buf;
75518 int ret;
75519
75520 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
75521 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
75522 if (!buf)
75523 return -ENOMEM;
75524 memcpy(buf, s->from, s->to - s->from);
75525 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
75526 index 92cdd99..a8149d7 100644
75527 --- a/lib/radix-tree.c
75528 +++ b/lib/radix-tree.c
75529 @@ -81,7 +81,7 @@ struct radix_tree_preload {
75530 int nr;
75531 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
75532 };
75533 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
75534 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
75535
75536 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
75537 {
75538 diff --git a/lib/random32.c b/lib/random32.c
75539 index 217d5c4..45aba8a 100644
75540 --- a/lib/random32.c
75541 +++ b/lib/random32.c
75542 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
75543 */
75544 static inline u32 __seed(u32 x, u32 m)
75545 {
75546 - return (x < m) ? x + m : x;
75547 + return (x <= m) ? x + m + 1 : x;
75548 }
75549
75550 /**
75551 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
75552 index 33bed5e..1477e46 100644
75553 --- a/lib/vsprintf.c
75554 +++ b/lib/vsprintf.c
75555 @@ -16,6 +16,9 @@
75556 * - scnprintf and vscnprintf
75557 */
75558
75559 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75560 +#define __INCLUDED_BY_HIDESYM 1
75561 +#endif
75562 #include <stdarg.h>
75563 #include <linux/module.h>
75564 #include <linux/types.h>
75565 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
75566 return buf;
75567 }
75568
75569 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
75570 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
75571 {
75572 int len, i;
75573
75574 if ((unsigned long)s < PAGE_SIZE)
75575 - s = "<NULL>";
75576 + s = "(null)";
75577
75578 len = strnlen(s, spec.precision);
75579
75580 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
75581 unsigned long value = (unsigned long) ptr;
75582 #ifdef CONFIG_KALLSYMS
75583 char sym[KSYM_SYMBOL_LEN];
75584 - if (ext != 'f' && ext != 's')
75585 + if (ext != 'f' && ext != 's' && ext != 'a')
75586 sprint_symbol(sym, value);
75587 else
75588 kallsyms_lookup(value, NULL, NULL, NULL, sym);
75589 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
75590 * - 'f' For simple symbolic function names without offset
75591 * - 'S' For symbolic direct pointers with offset
75592 * - 's' For symbolic direct pointers without offset
75593 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
75594 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
75595 * - 'R' For a struct resource pointer, it prints the range of
75596 * addresses (not the name nor the flags)
75597 * - 'M' For a 6-byte MAC address, it prints the address in the
75598 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75599 struct printf_spec spec)
75600 {
75601 if (!ptr)
75602 - return string(buf, end, "(null)", spec);
75603 + return string(buf, end, "(nil)", spec);
75604
75605 switch (*fmt) {
75606 case 'F':
75607 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75608 case 's':
75609 /* Fallthrough */
75610 case 'S':
75611 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75612 + break;
75613 +#else
75614 + return symbol_string(buf, end, ptr, spec, *fmt);
75615 +#endif
75616 + case 'a':
75617 + /* Fallthrough */
75618 + case 'A':
75619 return symbol_string(buf, end, ptr, spec, *fmt);
75620 case 'R':
75621 return resource_string(buf, end, ptr, spec);
75622 @@ -1445,7 +1458,7 @@ do { \
75623 size_t len;
75624 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
75625 || (unsigned long)save_str < PAGE_SIZE)
75626 - save_str = "<NULL>";
75627 + save_str = "(null)";
75628 len = strlen(save_str);
75629 if (str + len + 1 < end)
75630 memcpy(str, save_str, len + 1);
75631 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75632 typeof(type) value; \
75633 if (sizeof(type) == 8) { \
75634 args = PTR_ALIGN(args, sizeof(u32)); \
75635 - *(u32 *)&value = *(u32 *)args; \
75636 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
75637 + *(u32 *)&value = *(const u32 *)args; \
75638 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
75639 } else { \
75640 args = PTR_ALIGN(args, sizeof(type)); \
75641 - value = *(typeof(type) *)args; \
75642 + value = *(const typeof(type) *)args; \
75643 } \
75644 args += sizeof(type); \
75645 value; \
75646 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75647 const char *str_arg = args;
75648 size_t len = strlen(str_arg);
75649 args += len + 1;
75650 - str = string(str, end, (char *)str_arg, spec);
75651 + str = string(str, end, str_arg, spec);
75652 break;
75653 }
75654
75655 diff --git a/localversion-grsec b/localversion-grsec
75656 new file mode 100644
75657 index 0000000..7cd6065
75658 --- /dev/null
75659 +++ b/localversion-grsec
75660 @@ -0,0 +1 @@
75661 +-grsec
75662 diff --git a/mm/Kconfig b/mm/Kconfig
75663 index 2c19c0b..f3c3f83 100644
75664 --- a/mm/Kconfig
75665 +++ b/mm/Kconfig
75666 @@ -228,7 +228,7 @@ config KSM
75667 config DEFAULT_MMAP_MIN_ADDR
75668 int "Low address space to protect from user allocation"
75669 depends on MMU
75670 - default 4096
75671 + default 65536
75672 help
75673 This is the portion of low virtual memory which should be protected
75674 from userspace allocation. Keeping a user from writing to low pages
75675 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
75676 index 67a33a5..094dcf1 100644
75677 --- a/mm/backing-dev.c
75678 +++ b/mm/backing-dev.c
75679 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
75680 list_add_tail_rcu(&wb->list, &bdi->wb_list);
75681 spin_unlock(&bdi->wb_lock);
75682
75683 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
75684 + tsk->flags |= PF_SWAPWRITE;
75685 set_freezable();
75686
75687 /*
75688 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
75689 * Add the default flusher task that gets created for any bdi
75690 * that has dirty data pending writeout
75691 */
75692 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
75693 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
75694 {
75695 if (!bdi_cap_writeback_dirty(bdi))
75696 return;
75697 diff --git a/mm/filemap.c b/mm/filemap.c
75698 index a1fe378..e26702f 100644
75699 --- a/mm/filemap.c
75700 +++ b/mm/filemap.c
75701 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
75702 struct address_space *mapping = file->f_mapping;
75703
75704 if (!mapping->a_ops->readpage)
75705 - return -ENOEXEC;
75706 + return -ENODEV;
75707 file_accessed(file);
75708 vma->vm_ops = &generic_file_vm_ops;
75709 vma->vm_flags |= VM_CAN_NONLINEAR;
75710 @@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
75711 *pos = i_size_read(inode);
75712
75713 if (limit != RLIM_INFINITY) {
75714 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
75715 if (*pos >= limit) {
75716 send_sig(SIGXFSZ, current, 0);
75717 return -EFBIG;
75718 diff --git a/mm/fremap.c b/mm/fremap.c
75719 index b6ec85a..a24ac22 100644
75720 --- a/mm/fremap.c
75721 +++ b/mm/fremap.c
75722 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
75723 retry:
75724 vma = find_vma(mm, start);
75725
75726 +#ifdef CONFIG_PAX_SEGMEXEC
75727 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
75728 + goto out;
75729 +#endif
75730 +
75731 /*
75732 * Make sure the vma is shared, that it supports prefaulting,
75733 * and that the remapped range is valid and fully within
75734 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
75735 /*
75736 * drop PG_Mlocked flag for over-mapped range
75737 */
75738 - unsigned int saved_flags = vma->vm_flags;
75739 + unsigned long saved_flags = vma->vm_flags;
75740 munlock_vma_pages_range(vma, start, start + size);
75741 vma->vm_flags = saved_flags;
75742 }
75743 diff --git a/mm/highmem.c b/mm/highmem.c
75744 index 9c1e627..5ca9447 100644
75745 --- a/mm/highmem.c
75746 +++ b/mm/highmem.c
75747 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
75748 * So no dangers, even with speculative execution.
75749 */
75750 page = pte_page(pkmap_page_table[i]);
75751 + pax_open_kernel();
75752 pte_clear(&init_mm, (unsigned long)page_address(page),
75753 &pkmap_page_table[i]);
75754 -
75755 + pax_close_kernel();
75756 set_page_address(page, NULL);
75757 need_flush = 1;
75758 }
75759 @@ -177,9 +178,11 @@ start:
75760 }
75761 }
75762 vaddr = PKMAP_ADDR(last_pkmap_nr);
75763 +
75764 + pax_open_kernel();
75765 set_pte_at(&init_mm, vaddr,
75766 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
75767 -
75768 + pax_close_kernel();
75769 pkmap_count[last_pkmap_nr] = 1;
75770 set_page_address(page, (void *)vaddr);
75771
75772 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
75773 index 5e1e508..ac70275 100644
75774 --- a/mm/hugetlb.c
75775 +++ b/mm/hugetlb.c
75776 @@ -869,6 +869,7 @@ free:
75777 list_del(&page->lru);
75778 enqueue_huge_page(h, page);
75779 }
75780 + spin_unlock(&hugetlb_lock);
75781
75782 /* Free unnecessary surplus pages to the buddy allocator */
75783 if (!list_empty(&surplus_list)) {
75784 @@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
75785 return 1;
75786 }
75787
75788 +#ifdef CONFIG_PAX_SEGMEXEC
75789 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
75790 +{
75791 + struct mm_struct *mm = vma->vm_mm;
75792 + struct vm_area_struct *vma_m;
75793 + unsigned long address_m;
75794 + pte_t *ptep_m;
75795 +
75796 + vma_m = pax_find_mirror_vma(vma);
75797 + if (!vma_m)
75798 + return;
75799 +
75800 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
75801 + address_m = address + SEGMEXEC_TASK_SIZE;
75802 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
75803 + get_page(page_m);
75804 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
75805 +}
75806 +#endif
75807 +
75808 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
75809 unsigned long address, pte_t *ptep, pte_t pte,
75810 struct page *pagecache_page)
75811 @@ -2004,6 +2025,11 @@ retry_avoidcopy:
75812 huge_ptep_clear_flush(vma, address, ptep);
75813 set_huge_pte_at(mm, address, ptep,
75814 make_huge_pte(vma, new_page, 1));
75815 +
75816 +#ifdef CONFIG_PAX_SEGMEXEC
75817 + pax_mirror_huge_pte(vma, address, new_page);
75818 +#endif
75819 +
75820 /* Make the old page be freed below */
75821 new_page = old_page;
75822 }
75823 @@ -2135,6 +2161,10 @@ retry:
75824 && (vma->vm_flags & VM_SHARED)));
75825 set_huge_pte_at(mm, address, ptep, new_pte);
75826
75827 +#ifdef CONFIG_PAX_SEGMEXEC
75828 + pax_mirror_huge_pte(vma, address, page);
75829 +#endif
75830 +
75831 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
75832 /* Optimization, do the COW without a second fault */
75833 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
75834 @@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
75835 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
75836 struct hstate *h = hstate_vma(vma);
75837
75838 +#ifdef CONFIG_PAX_SEGMEXEC
75839 + struct vm_area_struct *vma_m;
75840 +
75841 + vma_m = pax_find_mirror_vma(vma);
75842 + if (vma_m) {
75843 + unsigned long address_m;
75844 +
75845 + if (vma->vm_start > vma_m->vm_start) {
75846 + address_m = address;
75847 + address -= SEGMEXEC_TASK_SIZE;
75848 + vma = vma_m;
75849 + h = hstate_vma(vma);
75850 + } else
75851 + address_m = address + SEGMEXEC_TASK_SIZE;
75852 +
75853 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
75854 + return VM_FAULT_OOM;
75855 + address_m &= HPAGE_MASK;
75856 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
75857 + }
75858 +#endif
75859 +
75860 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
75861 if (!ptep)
75862 return VM_FAULT_OOM;
75863 diff --git a/mm/internal.h b/mm/internal.h
75864 index f03e8e2..7354343 100644
75865 --- a/mm/internal.h
75866 +++ b/mm/internal.h
75867 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
75868 * in mm/page_alloc.c
75869 */
75870 extern void __free_pages_bootmem(struct page *page, unsigned int order);
75871 +extern void free_compound_page(struct page *page);
75872 extern void prep_compound_page(struct page *page, unsigned long order);
75873
75874
75875 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
75876 index c346660..b47382f 100644
75877 --- a/mm/kmemleak.c
75878 +++ b/mm/kmemleak.c
75879 @@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
75880
75881 for (i = 0; i < object->trace_len; i++) {
75882 void *ptr = (void *)object->trace[i];
75883 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
75884 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
75885 }
75886 }
75887
75888 diff --git a/mm/maccess.c b/mm/maccess.c
75889 index 9073695..1127f348 100644
75890 --- a/mm/maccess.c
75891 +++ b/mm/maccess.c
75892 @@ -14,7 +14,7 @@
75893 * Safely read from address @src to the buffer at @dst. If a kernel fault
75894 * happens, handle that and return -EFAULT.
75895 */
75896 -long probe_kernel_read(void *dst, void *src, size_t size)
75897 +long probe_kernel_read(void *dst, const void *src, size_t size)
75898 {
75899 long ret;
75900 mm_segment_t old_fs = get_fs();
75901 @@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
75902 set_fs(KERNEL_DS);
75903 pagefault_disable();
75904 ret = __copy_from_user_inatomic(dst,
75905 - (__force const void __user *)src, size);
75906 + (const void __force_user *)src, size);
75907 pagefault_enable();
75908 set_fs(old_fs);
75909
75910 @@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
75911 * Safely write to address @dst from the buffer at @src. If a kernel fault
75912 * happens, handle that and return -EFAULT.
75913 */
75914 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
75915 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
75916 {
75917 long ret;
75918 mm_segment_t old_fs = get_fs();
75919
75920 set_fs(KERNEL_DS);
75921 pagefault_disable();
75922 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
75923 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
75924 pagefault_enable();
75925 set_fs(old_fs);
75926
75927 diff --git a/mm/madvise.c b/mm/madvise.c
75928 index 35b1479..499f7d4 100644
75929 --- a/mm/madvise.c
75930 +++ b/mm/madvise.c
75931 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
75932 pgoff_t pgoff;
75933 unsigned long new_flags = vma->vm_flags;
75934
75935 +#ifdef CONFIG_PAX_SEGMEXEC
75936 + struct vm_area_struct *vma_m;
75937 +#endif
75938 +
75939 switch (behavior) {
75940 case MADV_NORMAL:
75941 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
75942 @@ -103,6 +107,13 @@ success:
75943 /*
75944 * vm_flags is protected by the mmap_sem held in write mode.
75945 */
75946 +
75947 +#ifdef CONFIG_PAX_SEGMEXEC
75948 + vma_m = pax_find_mirror_vma(vma);
75949 + if (vma_m)
75950 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
75951 +#endif
75952 +
75953 vma->vm_flags = new_flags;
75954
75955 out:
75956 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
75957 struct vm_area_struct ** prev,
75958 unsigned long start, unsigned long end)
75959 {
75960 +
75961 +#ifdef CONFIG_PAX_SEGMEXEC
75962 + struct vm_area_struct *vma_m;
75963 +#endif
75964 +
75965 *prev = vma;
75966 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
75967 return -EINVAL;
75968 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
75969 zap_page_range(vma, start, end - start, &details);
75970 } else
75971 zap_page_range(vma, start, end - start, NULL);
75972 +
75973 +#ifdef CONFIG_PAX_SEGMEXEC
75974 + vma_m = pax_find_mirror_vma(vma);
75975 + if (vma_m) {
75976 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
75977 + struct zap_details details = {
75978 + .nonlinear_vma = vma_m,
75979 + .last_index = ULONG_MAX,
75980 + };
75981 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
75982 + } else
75983 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
75984 + }
75985 +#endif
75986 +
75987 return 0;
75988 }
75989
75990 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
75991 if (end < start)
75992 goto out;
75993
75994 +#ifdef CONFIG_PAX_SEGMEXEC
75995 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
75996 + if (end > SEGMEXEC_TASK_SIZE)
75997 + goto out;
75998 + } else
75999 +#endif
76000 +
76001 + if (end > TASK_SIZE)
76002 + goto out;
76003 +
76004 error = 0;
76005 if (end == start)
76006 goto out;
76007 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
76008 index 8aeba53..b4a4198 100644
76009 --- a/mm/memory-failure.c
76010 +++ b/mm/memory-failure.c
76011 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
76012
76013 int sysctl_memory_failure_recovery __read_mostly = 1;
76014
76015 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76016 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76017
76018 /*
76019 * Send all the processes who have the page mapped an ``action optional''
76020 @@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
76021 si.si_signo = SIGBUS;
76022 si.si_errno = 0;
76023 si.si_code = BUS_MCEERR_AO;
76024 - si.si_addr = (void *)addr;
76025 + si.si_addr = (void __user *)addr;
76026 #ifdef __ARCH_SI_TRAPNO
76027 si.si_trapno = trapno;
76028 #endif
76029 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
76030 return 0;
76031 }
76032
76033 - atomic_long_add(1, &mce_bad_pages);
76034 + atomic_long_add_unchecked(1, &mce_bad_pages);
76035
76036 /*
76037 * We need/can do nothing about count=0 pages.
76038 diff --git a/mm/memory.c b/mm/memory.c
76039 index 6c836d3..48f3264 100644
76040 --- a/mm/memory.c
76041 +++ b/mm/memory.c
76042 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
76043 return;
76044
76045 pmd = pmd_offset(pud, start);
76046 +
76047 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
76048 pud_clear(pud);
76049 pmd_free_tlb(tlb, pmd, start);
76050 +#endif
76051 +
76052 }
76053
76054 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76055 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76056 if (end - 1 > ceiling - 1)
76057 return;
76058
76059 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
76060 pud = pud_offset(pgd, start);
76061 pgd_clear(pgd);
76062 pud_free_tlb(tlb, pud, start);
76063 +#endif
76064 +
76065 }
76066
76067 /*
76068 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76069 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
76070 i = 0;
76071
76072 - do {
76073 + while (nr_pages) {
76074 struct vm_area_struct *vma;
76075
76076 - vma = find_extend_vma(mm, start);
76077 + vma = find_vma(mm, start);
76078 if (!vma && in_gate_area(tsk, start)) {
76079 unsigned long pg = start & PAGE_MASK;
76080 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
76081 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76082 continue;
76083 }
76084
76085 - if (!vma ||
76086 + if (!vma || start < vma->vm_start ||
76087 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
76088 !(vm_flags & vma->vm_flags))
76089 return i ? : -EFAULT;
76090 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76091 start += PAGE_SIZE;
76092 nr_pages--;
76093 } while (nr_pages && start < vma->vm_end);
76094 - } while (nr_pages);
76095 + }
76096 return i;
76097 }
76098
76099 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
76100 page_add_file_rmap(page);
76101 set_pte_at(mm, addr, pte, mk_pte(page, prot));
76102
76103 +#ifdef CONFIG_PAX_SEGMEXEC
76104 + pax_mirror_file_pte(vma, addr, page, ptl);
76105 +#endif
76106 +
76107 retval = 0;
76108 pte_unmap_unlock(pte, ptl);
76109 return retval;
76110 @@ -1560,10 +1571,22 @@ out:
76111 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
76112 struct page *page)
76113 {
76114 +
76115 +#ifdef CONFIG_PAX_SEGMEXEC
76116 + struct vm_area_struct *vma_m;
76117 +#endif
76118 +
76119 if (addr < vma->vm_start || addr >= vma->vm_end)
76120 return -EFAULT;
76121 if (!page_count(page))
76122 return -EINVAL;
76123 +
76124 +#ifdef CONFIG_PAX_SEGMEXEC
76125 + vma_m = pax_find_mirror_vma(vma);
76126 + if (vma_m)
76127 + vma_m->vm_flags |= VM_INSERTPAGE;
76128 +#endif
76129 +
76130 vma->vm_flags |= VM_INSERTPAGE;
76131 return insert_page(vma, addr, page, vma->vm_page_prot);
76132 }
76133 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
76134 unsigned long pfn)
76135 {
76136 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
76137 + BUG_ON(vma->vm_mirror);
76138
76139 if (addr < vma->vm_start || addr >= vma->vm_end)
76140 return -EFAULT;
76141 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
76142 copy_user_highpage(dst, src, va, vma);
76143 }
76144
76145 +#ifdef CONFIG_PAX_SEGMEXEC
76146 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
76147 +{
76148 + struct mm_struct *mm = vma->vm_mm;
76149 + spinlock_t *ptl;
76150 + pte_t *pte, entry;
76151 +
76152 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
76153 + entry = *pte;
76154 + if (!pte_present(entry)) {
76155 + if (!pte_none(entry)) {
76156 + BUG_ON(pte_file(entry));
76157 + free_swap_and_cache(pte_to_swp_entry(entry));
76158 + pte_clear_not_present_full(mm, address, pte, 0);
76159 + }
76160 + } else {
76161 + struct page *page;
76162 +
76163 + flush_cache_page(vma, address, pte_pfn(entry));
76164 + entry = ptep_clear_flush(vma, address, pte);
76165 + BUG_ON(pte_dirty(entry));
76166 + page = vm_normal_page(vma, address, entry);
76167 + if (page) {
76168 + update_hiwater_rss(mm);
76169 + if (PageAnon(page))
76170 + dec_mm_counter(mm, anon_rss);
76171 + else
76172 + dec_mm_counter(mm, file_rss);
76173 + page_remove_rmap(page);
76174 + page_cache_release(page);
76175 + }
76176 + }
76177 + pte_unmap_unlock(pte, ptl);
76178 +}
76179 +
76180 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
76181 + *
76182 + * the ptl of the lower mapped page is held on entry and is not released on exit
76183 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
76184 + */
76185 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
76186 +{
76187 + struct mm_struct *mm = vma->vm_mm;
76188 + unsigned long address_m;
76189 + spinlock_t *ptl_m;
76190 + struct vm_area_struct *vma_m;
76191 + pmd_t *pmd_m;
76192 + pte_t *pte_m, entry_m;
76193 +
76194 + BUG_ON(!page_m || !PageAnon(page_m));
76195 +
76196 + vma_m = pax_find_mirror_vma(vma);
76197 + if (!vma_m)
76198 + return;
76199 +
76200 + BUG_ON(!PageLocked(page_m));
76201 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76202 + address_m = address + SEGMEXEC_TASK_SIZE;
76203 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
76204 + pte_m = pte_offset_map_nested(pmd_m, address_m);
76205 + ptl_m = pte_lockptr(mm, pmd_m);
76206 + if (ptl != ptl_m) {
76207 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
76208 + if (!pte_none(*pte_m))
76209 + goto out;
76210 + }
76211 +
76212 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
76213 + page_cache_get(page_m);
76214 + page_add_anon_rmap(page_m, vma_m, address_m);
76215 + inc_mm_counter(mm, anon_rss);
76216 + set_pte_at(mm, address_m, pte_m, entry_m);
76217 + update_mmu_cache(vma_m, address_m, entry_m);
76218 +out:
76219 + if (ptl != ptl_m)
76220 + spin_unlock(ptl_m);
76221 + pte_unmap_nested(pte_m);
76222 + unlock_page(page_m);
76223 +}
76224 +
76225 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
76226 +{
76227 + struct mm_struct *mm = vma->vm_mm;
76228 + unsigned long address_m;
76229 + spinlock_t *ptl_m;
76230 + struct vm_area_struct *vma_m;
76231 + pmd_t *pmd_m;
76232 + pte_t *pte_m, entry_m;
76233 +
76234 + BUG_ON(!page_m || PageAnon(page_m));
76235 +
76236 + vma_m = pax_find_mirror_vma(vma);
76237 + if (!vma_m)
76238 + return;
76239 +
76240 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76241 + address_m = address + SEGMEXEC_TASK_SIZE;
76242 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
76243 + pte_m = pte_offset_map_nested(pmd_m, address_m);
76244 + ptl_m = pte_lockptr(mm, pmd_m);
76245 + if (ptl != ptl_m) {
76246 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
76247 + if (!pte_none(*pte_m))
76248 + goto out;
76249 + }
76250 +
76251 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
76252 + page_cache_get(page_m);
76253 + page_add_file_rmap(page_m);
76254 + inc_mm_counter(mm, file_rss);
76255 + set_pte_at(mm, address_m, pte_m, entry_m);
76256 + update_mmu_cache(vma_m, address_m, entry_m);
76257 +out:
76258 + if (ptl != ptl_m)
76259 + spin_unlock(ptl_m);
76260 + pte_unmap_nested(pte_m);
76261 +}
76262 +
76263 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
76264 +{
76265 + struct mm_struct *mm = vma->vm_mm;
76266 + unsigned long address_m;
76267 + spinlock_t *ptl_m;
76268 + struct vm_area_struct *vma_m;
76269 + pmd_t *pmd_m;
76270 + pte_t *pte_m, entry_m;
76271 +
76272 + vma_m = pax_find_mirror_vma(vma);
76273 + if (!vma_m)
76274 + return;
76275 +
76276 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76277 + address_m = address + SEGMEXEC_TASK_SIZE;
76278 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
76279 + pte_m = pte_offset_map_nested(pmd_m, address_m);
76280 + ptl_m = pte_lockptr(mm, pmd_m);
76281 + if (ptl != ptl_m) {
76282 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
76283 + if (!pte_none(*pte_m))
76284 + goto out;
76285 + }
76286 +
76287 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
76288 + set_pte_at(mm, address_m, pte_m, entry_m);
76289 +out:
76290 + if (ptl != ptl_m)
76291 + spin_unlock(ptl_m);
76292 + pte_unmap_nested(pte_m);
76293 +}
76294 +
76295 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
76296 +{
76297 + struct page *page_m;
76298 + pte_t entry;
76299 +
76300 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
76301 + goto out;
76302 +
76303 + entry = *pte;
76304 + page_m = vm_normal_page(vma, address, entry);
76305 + if (!page_m)
76306 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
76307 + else if (PageAnon(page_m)) {
76308 + if (pax_find_mirror_vma(vma)) {
76309 + pte_unmap_unlock(pte, ptl);
76310 + lock_page(page_m);
76311 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
76312 + if (pte_same(entry, *pte))
76313 + pax_mirror_anon_pte(vma, address, page_m, ptl);
76314 + else
76315 + unlock_page(page_m);
76316 + }
76317 + } else
76318 + pax_mirror_file_pte(vma, address, page_m, ptl);
76319 +
76320 +out:
76321 + pte_unmap_unlock(pte, ptl);
76322 +}
76323 +#endif
76324 +
76325 /*
76326 * This routine handles present pages, when users try to write
76327 * to a shared page. It is done by copying the page to a new address
76328 @@ -2156,6 +2360,12 @@ gotten:
76329 */
76330 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
76331 if (likely(pte_same(*page_table, orig_pte))) {
76332 +
76333 +#ifdef CONFIG_PAX_SEGMEXEC
76334 + if (pax_find_mirror_vma(vma))
76335 + BUG_ON(!trylock_page(new_page));
76336 +#endif
76337 +
76338 if (old_page) {
76339 if (!PageAnon(old_page)) {
76340 dec_mm_counter(mm, file_rss);
76341 @@ -2207,6 +2417,10 @@ gotten:
76342 page_remove_rmap(old_page);
76343 }
76344
76345 +#ifdef CONFIG_PAX_SEGMEXEC
76346 + pax_mirror_anon_pte(vma, address, new_page, ptl);
76347 +#endif
76348 +
76349 /* Free the old page.. */
76350 new_page = old_page;
76351 ret |= VM_FAULT_WRITE;
76352 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
76353 swap_free(entry);
76354 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
76355 try_to_free_swap(page);
76356 +
76357 +#ifdef CONFIG_PAX_SEGMEXEC
76358 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
76359 +#endif
76360 +
76361 unlock_page(page);
76362
76363 if (flags & FAULT_FLAG_WRITE) {
76364 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
76365
76366 /* No need to invalidate - it was non-present before */
76367 update_mmu_cache(vma, address, pte);
76368 +
76369 +#ifdef CONFIG_PAX_SEGMEXEC
76370 + pax_mirror_anon_pte(vma, address, page, ptl);
76371 +#endif
76372 +
76373 unlock:
76374 pte_unmap_unlock(page_table, ptl);
76375 out:
76376 @@ -2632,40 +2856,6 @@ out_release:
76377 }
76378
76379 /*
76380 - * This is like a special single-page "expand_{down|up}wards()",
76381 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
76382 - * doesn't hit another vma.
76383 - */
76384 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
76385 -{
76386 - address &= PAGE_MASK;
76387 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
76388 - struct vm_area_struct *prev = vma->vm_prev;
76389 -
76390 - /*
76391 - * Is there a mapping abutting this one below?
76392 - *
76393 - * That's only ok if it's the same stack mapping
76394 - * that has gotten split..
76395 - */
76396 - if (prev && prev->vm_end == address)
76397 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
76398 -
76399 - expand_stack(vma, address - PAGE_SIZE);
76400 - }
76401 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
76402 - struct vm_area_struct *next = vma->vm_next;
76403 -
76404 - /* As VM_GROWSDOWN but s/below/above/ */
76405 - if (next && next->vm_start == address + PAGE_SIZE)
76406 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
76407 -
76408 - expand_upwards(vma, address + PAGE_SIZE);
76409 - }
76410 - return 0;
76411 -}
76412 -
76413 -/*
76414 * We enter with non-exclusive mmap_sem (to exclude vma changes,
76415 * but allow concurrent faults), and pte mapped but not yet locked.
76416 * We return with mmap_sem still held, but pte unmapped and unlocked.
76417 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
76418 unsigned long address, pte_t *page_table, pmd_t *pmd,
76419 unsigned int flags)
76420 {
76421 - struct page *page;
76422 + struct page *page = NULL;
76423 spinlock_t *ptl;
76424 pte_t entry;
76425
76426 - pte_unmap(page_table);
76427 -
76428 - /* Check if we need to add a guard page to the stack */
76429 - if (check_stack_guard_page(vma, address) < 0)
76430 - return VM_FAULT_SIGBUS;
76431 -
76432 - /* Use the zero-page for reads */
76433 if (!(flags & FAULT_FLAG_WRITE)) {
76434 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
76435 vma->vm_page_prot));
76436 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
76437 + ptl = pte_lockptr(mm, pmd);
76438 + spin_lock(ptl);
76439 if (!pte_none(*page_table))
76440 goto unlock;
76441 goto setpte;
76442 }
76443
76444 /* Allocate our own private page. */
76445 + pte_unmap(page_table);
76446 +
76447 if (unlikely(anon_vma_prepare(vma)))
76448 goto oom;
76449 page = alloc_zeroed_user_highpage_movable(vma, address);
76450 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
76451 if (!pte_none(*page_table))
76452 goto release;
76453
76454 +#ifdef CONFIG_PAX_SEGMEXEC
76455 + if (pax_find_mirror_vma(vma))
76456 + BUG_ON(!trylock_page(page));
76457 +#endif
76458 +
76459 inc_mm_counter(mm, anon_rss);
76460 page_add_new_anon_rmap(page, vma, address);
76461 setpte:
76462 @@ -2720,6 +2911,12 @@ setpte:
76463
76464 /* No need to invalidate - it was non-present before */
76465 update_mmu_cache(vma, address, entry);
76466 +
76467 +#ifdef CONFIG_PAX_SEGMEXEC
76468 + if (page)
76469 + pax_mirror_anon_pte(vma, address, page, ptl);
76470 +#endif
76471 +
76472 unlock:
76473 pte_unmap_unlock(page_table, ptl);
76474 return 0;
76475 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76476 */
76477 /* Only go through if we didn't race with anybody else... */
76478 if (likely(pte_same(*page_table, orig_pte))) {
76479 +
76480 +#ifdef CONFIG_PAX_SEGMEXEC
76481 + if (anon && pax_find_mirror_vma(vma))
76482 + BUG_ON(!trylock_page(page));
76483 +#endif
76484 +
76485 flush_icache_page(vma, page);
76486 entry = mk_pte(page, vma->vm_page_prot);
76487 if (flags & FAULT_FLAG_WRITE)
76488 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76489
76490 /* no need to invalidate: a not-present page won't be cached */
76491 update_mmu_cache(vma, address, entry);
76492 +
76493 +#ifdef CONFIG_PAX_SEGMEXEC
76494 + if (anon)
76495 + pax_mirror_anon_pte(vma, address, page, ptl);
76496 + else
76497 + pax_mirror_file_pte(vma, address, page, ptl);
76498 +#endif
76499 +
76500 } else {
76501 if (charged)
76502 mem_cgroup_uncharge_page(page);
76503 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
76504 if (flags & FAULT_FLAG_WRITE)
76505 flush_tlb_page(vma, address);
76506 }
76507 +
76508 +#ifdef CONFIG_PAX_SEGMEXEC
76509 + pax_mirror_pte(vma, address, pte, pmd, ptl);
76510 + return 0;
76511 +#endif
76512 +
76513 unlock:
76514 pte_unmap_unlock(pte, ptl);
76515 return 0;
76516 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76517 pmd_t *pmd;
76518 pte_t *pte;
76519
76520 +#ifdef CONFIG_PAX_SEGMEXEC
76521 + struct vm_area_struct *vma_m;
76522 +#endif
76523 +
76524 __set_current_state(TASK_RUNNING);
76525
76526 count_vm_event(PGFAULT);
76527 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76528 if (unlikely(is_vm_hugetlb_page(vma)))
76529 return hugetlb_fault(mm, vma, address, flags);
76530
76531 +#ifdef CONFIG_PAX_SEGMEXEC
76532 + vma_m = pax_find_mirror_vma(vma);
76533 + if (vma_m) {
76534 + unsigned long address_m;
76535 + pgd_t *pgd_m;
76536 + pud_t *pud_m;
76537 + pmd_t *pmd_m;
76538 +
76539 + if (vma->vm_start > vma_m->vm_start) {
76540 + address_m = address;
76541 + address -= SEGMEXEC_TASK_SIZE;
76542 + vma = vma_m;
76543 + } else
76544 + address_m = address + SEGMEXEC_TASK_SIZE;
76545 +
76546 + pgd_m = pgd_offset(mm, address_m);
76547 + pud_m = pud_alloc(mm, pgd_m, address_m);
76548 + if (!pud_m)
76549 + return VM_FAULT_OOM;
76550 + pmd_m = pmd_alloc(mm, pud_m, address_m);
76551 + if (!pmd_m)
76552 + return VM_FAULT_OOM;
76553 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
76554 + return VM_FAULT_OOM;
76555 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
76556 + }
76557 +#endif
76558 +
76559 pgd = pgd_offset(mm, address);
76560 pud = pud_alloc(mm, pgd, address);
76561 if (!pud)
76562 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
76563 gate_vma.vm_start = FIXADDR_USER_START;
76564 gate_vma.vm_end = FIXADDR_USER_END;
76565 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
76566 - gate_vma.vm_page_prot = __P101;
76567 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
76568 /*
76569 * Make sure the vDSO gets into every core dump.
76570 * Dumping its contents makes post-mortem fully interpretable later
76571 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
76572 index 3c6e3e2..ad9871c 100644
76573 --- a/mm/mempolicy.c
76574 +++ b/mm/mempolicy.c
76575 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
76576 struct vm_area_struct *next;
76577 int err;
76578
76579 +#ifdef CONFIG_PAX_SEGMEXEC
76580 + struct vm_area_struct *vma_m;
76581 +#endif
76582 +
76583 err = 0;
76584 for (; vma && vma->vm_start < end; vma = next) {
76585 next = vma->vm_next;
76586 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
76587 err = policy_vma(vma, new);
76588 if (err)
76589 break;
76590 +
76591 +#ifdef CONFIG_PAX_SEGMEXEC
76592 + vma_m = pax_find_mirror_vma(vma);
76593 + if (vma_m) {
76594 + err = policy_vma(vma_m, new);
76595 + if (err)
76596 + break;
76597 + }
76598 +#endif
76599 +
76600 }
76601 return err;
76602 }
76603 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
76604
76605 if (end < start)
76606 return -EINVAL;
76607 +
76608 +#ifdef CONFIG_PAX_SEGMEXEC
76609 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
76610 + if (end > SEGMEXEC_TASK_SIZE)
76611 + return -EINVAL;
76612 + } else
76613 +#endif
76614 +
76615 + if (end > TASK_SIZE)
76616 + return -EINVAL;
76617 +
76618 if (end == start)
76619 return 0;
76620
76621 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76622 if (!mm)
76623 return -EINVAL;
76624
76625 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76626 + if (mm != current->mm &&
76627 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
76628 + err = -EPERM;
76629 + goto out;
76630 + }
76631 +#endif
76632 +
76633 /*
76634 * Check if this process has the right to modify the specified
76635 * process. The right exists if the process has administrative
76636 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76637 rcu_read_lock();
76638 tcred = __task_cred(task);
76639 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
76640 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
76641 - !capable(CAP_SYS_NICE)) {
76642 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
76643 rcu_read_unlock();
76644 err = -EPERM;
76645 goto out;
76646 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, void *v)
76647
76648 if (file) {
76649 seq_printf(m, " file=");
76650 - seq_path(m, &file->f_path, "\n\t= ");
76651 + seq_path(m, &file->f_path, "\n\t\\= ");
76652 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
76653 seq_printf(m, " heap");
76654 } else if (vma->vm_start <= mm->start_stack &&
76655 diff --git a/mm/migrate.c b/mm/migrate.c
76656 index aaca868..2ebecdc 100644
76657 --- a/mm/migrate.c
76658 +++ b/mm/migrate.c
76659 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
76660 unsigned long chunk_start;
76661 int err;
76662
76663 + pax_track_stack();
76664 +
76665 task_nodes = cpuset_mems_allowed(task);
76666
76667 err = -ENOMEM;
76668 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
76669 if (!mm)
76670 return -EINVAL;
76671
76672 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76673 + if (mm != current->mm &&
76674 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
76675 + err = -EPERM;
76676 + goto out;
76677 + }
76678 +#endif
76679 +
76680 /*
76681 * Check if this process has the right to modify the specified
76682 * process. The right exists if the process has administrative
76683 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
76684 rcu_read_lock();
76685 tcred = __task_cred(task);
76686 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
76687 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
76688 - !capable(CAP_SYS_NICE)) {
76689 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
76690 rcu_read_unlock();
76691 err = -EPERM;
76692 goto out;
76693 diff --git a/mm/mlock.c b/mm/mlock.c
76694 index 2d846cf..98134d2 100644
76695 --- a/mm/mlock.c
76696 +++ b/mm/mlock.c
76697 @@ -13,6 +13,7 @@
76698 #include <linux/pagemap.h>
76699 #include <linux/mempolicy.h>
76700 #include <linux/syscalls.h>
76701 +#include <linux/security.h>
76702 #include <linux/sched.h>
76703 #include <linux/module.h>
76704 #include <linux/rmap.h>
76705 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
76706 }
76707 }
76708
76709 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
76710 -{
76711 - return (vma->vm_flags & VM_GROWSDOWN) &&
76712 - (vma->vm_start == addr) &&
76713 - !vma_stack_continue(vma->vm_prev, addr);
76714 -}
76715 -
76716 /**
76717 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
76718 * @vma: target vma
76719 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
76720 if (vma->vm_flags & VM_WRITE)
76721 gup_flags |= FOLL_WRITE;
76722
76723 - /* We don't try to access the guard page of a stack vma */
76724 - if (stack_guard_page(vma, start)) {
76725 - addr += PAGE_SIZE;
76726 - nr_pages--;
76727 - }
76728 -
76729 while (nr_pages > 0) {
76730 int i;
76731
76732 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
76733 {
76734 unsigned long nstart, end, tmp;
76735 struct vm_area_struct * vma, * prev;
76736 - int error;
76737 + int error = -EINVAL;
76738
76739 len = PAGE_ALIGN(len);
76740 end = start + len;
76741 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
76742 return -EINVAL;
76743 if (end == start)
76744 return 0;
76745 + if (end > TASK_SIZE)
76746 + return -EINVAL;
76747 +
76748 vma = find_vma_prev(current->mm, start, &prev);
76749 if (!vma || vma->vm_start > start)
76750 return -ENOMEM;
76751 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
76752 for (nstart = start ; ; ) {
76753 unsigned int newflags;
76754
76755 +#ifdef CONFIG_PAX_SEGMEXEC
76756 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
76757 + break;
76758 +#endif
76759 +
76760 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
76761
76762 newflags = vma->vm_flags | VM_LOCKED;
76763 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
76764 lock_limit >>= PAGE_SHIFT;
76765
76766 /* check against resource limits */
76767 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
76768 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
76769 error = do_mlock(start, len, 1);
76770 up_write(&current->mm->mmap_sem);
76771 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
76772 static int do_mlockall(int flags)
76773 {
76774 struct vm_area_struct * vma, * prev = NULL;
76775 - unsigned int def_flags = 0;
76776
76777 if (flags & MCL_FUTURE)
76778 - def_flags = VM_LOCKED;
76779 - current->mm->def_flags = def_flags;
76780 + current->mm->def_flags |= VM_LOCKED;
76781 + else
76782 + current->mm->def_flags &= ~VM_LOCKED;
76783 if (flags == MCL_FUTURE)
76784 goto out;
76785
76786 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
76787 - unsigned int newflags;
76788 + unsigned long newflags;
76789
76790 +#ifdef CONFIG_PAX_SEGMEXEC
76791 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
76792 + break;
76793 +#endif
76794 +
76795 + BUG_ON(vma->vm_end > TASK_SIZE);
76796 newflags = vma->vm_flags | VM_LOCKED;
76797 if (!(flags & MCL_CURRENT))
76798 newflags &= ~VM_LOCKED;
76799 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
76800 lock_limit >>= PAGE_SHIFT;
76801
76802 ret = -ENOMEM;
76803 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
76804 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
76805 capable(CAP_IPC_LOCK))
76806 ret = do_mlockall(flags);
76807 diff --git a/mm/mmap.c b/mm/mmap.c
76808 index 4b80cbf..c5ce1df 100644
76809 --- a/mm/mmap.c
76810 +++ b/mm/mmap.c
76811 @@ -45,6 +45,16 @@
76812 #define arch_rebalance_pgtables(addr, len) (addr)
76813 #endif
76814
76815 +static inline void verify_mm_writelocked(struct mm_struct *mm)
76816 +{
76817 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
76818 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
76819 + up_read(&mm->mmap_sem);
76820 + BUG();
76821 + }
76822 +#endif
76823 +}
76824 +
76825 static void unmap_region(struct mm_struct *mm,
76826 struct vm_area_struct *vma, struct vm_area_struct *prev,
76827 unsigned long start, unsigned long end);
76828 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
76829 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
76830 *
76831 */
76832 -pgprot_t protection_map[16] = {
76833 +pgprot_t protection_map[16] __read_only = {
76834 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
76835 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
76836 };
76837
76838 pgprot_t vm_get_page_prot(unsigned long vm_flags)
76839 {
76840 - return __pgprot(pgprot_val(protection_map[vm_flags &
76841 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
76842 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
76843 pgprot_val(arch_vm_get_page_prot(vm_flags)));
76844 +
76845 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76846 + if (!nx_enabled &&
76847 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
76848 + (vm_flags & (VM_READ | VM_WRITE)))
76849 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
76850 +#endif
76851 +
76852 + return prot;
76853 }
76854 EXPORT_SYMBOL(vm_get_page_prot);
76855
76856 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
76857 int sysctl_overcommit_ratio = 50; /* default is 50% */
76858 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
76859 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
76860 struct percpu_counter vm_committed_as;
76861
76862 /*
76863 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
76864 struct vm_area_struct *next = vma->vm_next;
76865
76866 might_sleep();
76867 + BUG_ON(vma->vm_mirror);
76868 if (vma->vm_ops && vma->vm_ops->close)
76869 vma->vm_ops->close(vma);
76870 if (vma->vm_file) {
76871 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
76872 * not page aligned -Ram Gupta
76873 */
76874 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
76875 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
76876 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
76877 (mm->end_data - mm->start_data) > rlim)
76878 goto out;
76879 @@ -704,6 +726,12 @@ static int
76880 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
76881 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
76882 {
76883 +
76884 +#ifdef CONFIG_PAX_SEGMEXEC
76885 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
76886 + return 0;
76887 +#endif
76888 +
76889 if (is_mergeable_vma(vma, file, vm_flags) &&
76890 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
76891 if (vma->vm_pgoff == vm_pgoff)
76892 @@ -723,6 +751,12 @@ static int
76893 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
76894 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
76895 {
76896 +
76897 +#ifdef CONFIG_PAX_SEGMEXEC
76898 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
76899 + return 0;
76900 +#endif
76901 +
76902 if (is_mergeable_vma(vma, file, vm_flags) &&
76903 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
76904 pgoff_t vm_pglen;
76905 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
76906 struct vm_area_struct *vma_merge(struct mm_struct *mm,
76907 struct vm_area_struct *prev, unsigned long addr,
76908 unsigned long end, unsigned long vm_flags,
76909 - struct anon_vma *anon_vma, struct file *file,
76910 + struct anon_vma *anon_vma, struct file *file,
76911 pgoff_t pgoff, struct mempolicy *policy)
76912 {
76913 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
76914 struct vm_area_struct *area, *next;
76915
76916 +#ifdef CONFIG_PAX_SEGMEXEC
76917 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
76918 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
76919 +
76920 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
76921 +#endif
76922 +
76923 /*
76924 * We later require that vma->vm_flags == vm_flags,
76925 * so this tests vma->vm_flags & VM_SPECIAL, too.
76926 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76927 if (next && next->vm_end == end) /* cases 6, 7, 8 */
76928 next = next->vm_next;
76929
76930 +#ifdef CONFIG_PAX_SEGMEXEC
76931 + if (prev)
76932 + prev_m = pax_find_mirror_vma(prev);
76933 + if (area)
76934 + area_m = pax_find_mirror_vma(area);
76935 + if (next)
76936 + next_m = pax_find_mirror_vma(next);
76937 +#endif
76938 +
76939 /*
76940 * Can it merge with the predecessor?
76941 */
76942 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76943 /* cases 1, 6 */
76944 vma_adjust(prev, prev->vm_start,
76945 next->vm_end, prev->vm_pgoff, NULL);
76946 - } else /* cases 2, 5, 7 */
76947 +
76948 +#ifdef CONFIG_PAX_SEGMEXEC
76949 + if (prev_m)
76950 + vma_adjust(prev_m, prev_m->vm_start,
76951 + next_m->vm_end, prev_m->vm_pgoff, NULL);
76952 +#endif
76953 +
76954 + } else { /* cases 2, 5, 7 */
76955 vma_adjust(prev, prev->vm_start,
76956 end, prev->vm_pgoff, NULL);
76957 +
76958 +#ifdef CONFIG_PAX_SEGMEXEC
76959 + if (prev_m)
76960 + vma_adjust(prev_m, prev_m->vm_start,
76961 + end_m, prev_m->vm_pgoff, NULL);
76962 +#endif
76963 +
76964 + }
76965 return prev;
76966 }
76967
76968 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
76969 mpol_equal(policy, vma_policy(next)) &&
76970 can_vma_merge_before(next, vm_flags,
76971 anon_vma, file, pgoff+pglen)) {
76972 - if (prev && addr < prev->vm_end) /* case 4 */
76973 + if (prev && addr < prev->vm_end) { /* case 4 */
76974 vma_adjust(prev, prev->vm_start,
76975 addr, prev->vm_pgoff, NULL);
76976 - else /* cases 3, 8 */
76977 +
76978 +#ifdef CONFIG_PAX_SEGMEXEC
76979 + if (prev_m)
76980 + vma_adjust(prev_m, prev_m->vm_start,
76981 + addr_m, prev_m->vm_pgoff, NULL);
76982 +#endif
76983 +
76984 + } else { /* cases 3, 8 */
76985 vma_adjust(area, addr, next->vm_end,
76986 next->vm_pgoff - pglen, NULL);
76987 +
76988 +#ifdef CONFIG_PAX_SEGMEXEC
76989 + if (area_m)
76990 + vma_adjust(area_m, addr_m, next_m->vm_end,
76991 + next_m->vm_pgoff - pglen, NULL);
76992 +#endif
76993 +
76994 + }
76995 return area;
76996 }
76997
76998 @@ -898,14 +978,11 @@ none:
76999 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
77000 struct file *file, long pages)
77001 {
77002 - const unsigned long stack_flags
77003 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
77004 -
77005 if (file) {
77006 mm->shared_vm += pages;
77007 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
77008 mm->exec_vm += pages;
77009 - } else if (flags & stack_flags)
77010 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
77011 mm->stack_vm += pages;
77012 if (flags & (VM_RESERVED|VM_IO))
77013 mm->reserved_vm += pages;
77014 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77015 * (the exception is when the underlying filesystem is noexec
77016 * mounted, in which case we dont add PROT_EXEC.)
77017 */
77018 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
77019 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
77020 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
77021 prot |= PROT_EXEC;
77022
77023 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77024 /* Obtain the address to map to. we verify (or select) it and ensure
77025 * that it represents a valid section of the address space.
77026 */
77027 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
77028 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
77029 if (addr & ~PAGE_MASK)
77030 return addr;
77031
77032 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77033 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
77034 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
77035
77036 +#ifdef CONFIG_PAX_MPROTECT
77037 + if (mm->pax_flags & MF_PAX_MPROTECT) {
77038 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
77039 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
77040 + gr_log_rwxmmap(file);
77041 +
77042 +#ifdef CONFIG_PAX_EMUPLT
77043 + vm_flags &= ~VM_EXEC;
77044 +#else
77045 + return -EPERM;
77046 +#endif
77047 +
77048 + }
77049 +
77050 + if (!(vm_flags & VM_EXEC))
77051 + vm_flags &= ~VM_MAYEXEC;
77052 +#else
77053 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
77054 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
77055 +#endif
77056 + else
77057 + vm_flags &= ~VM_MAYWRITE;
77058 + }
77059 +#endif
77060 +
77061 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77062 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
77063 + vm_flags &= ~VM_PAGEEXEC;
77064 +#endif
77065 +
77066 if (flags & MAP_LOCKED)
77067 if (!can_do_mlock())
77068 return -EPERM;
77069 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77070 locked += mm->locked_vm;
77071 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
77072 lock_limit >>= PAGE_SHIFT;
77073 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
77074 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
77075 return -EAGAIN;
77076 }
77077 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77078 if (error)
77079 return error;
77080
77081 + if (!gr_acl_handle_mmap(file, prot))
77082 + return -EACCES;
77083 +
77084 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
77085 }
77086 EXPORT_SYMBOL(do_mmap_pgoff);
77087 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
77088 */
77089 int vma_wants_writenotify(struct vm_area_struct *vma)
77090 {
77091 - unsigned int vm_flags = vma->vm_flags;
77092 + unsigned long vm_flags = vma->vm_flags;
77093
77094 /* If it was private or non-writable, the write bit is already clear */
77095 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
77096 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
77097 return 0;
77098
77099 /* The backer wishes to know when pages are first written to? */
77100 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
77101 unsigned long charged = 0;
77102 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
77103
77104 +#ifdef CONFIG_PAX_SEGMEXEC
77105 + struct vm_area_struct *vma_m = NULL;
77106 +#endif
77107 +
77108 + /*
77109 + * mm->mmap_sem is required to protect against another thread
77110 + * changing the mappings in case we sleep.
77111 + */
77112 + verify_mm_writelocked(mm);
77113 +
77114 /* Clear old maps */
77115 error = -ENOMEM;
77116 -munmap_back:
77117 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77118 if (vma && vma->vm_start < addr + len) {
77119 if (do_munmap(mm, addr, len))
77120 return -ENOMEM;
77121 - goto munmap_back;
77122 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77123 + BUG_ON(vma && vma->vm_start < addr + len);
77124 }
77125
77126 /* Check against address space limit. */
77127 @@ -1173,6 +1294,16 @@ munmap_back:
77128 goto unacct_error;
77129 }
77130
77131 +#ifdef CONFIG_PAX_SEGMEXEC
77132 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
77133 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77134 + if (!vma_m) {
77135 + error = -ENOMEM;
77136 + goto free_vma;
77137 + }
77138 + }
77139 +#endif
77140 +
77141 vma->vm_mm = mm;
77142 vma->vm_start = addr;
77143 vma->vm_end = addr + len;
77144 @@ -1195,6 +1326,19 @@ munmap_back:
77145 error = file->f_op->mmap(file, vma);
77146 if (error)
77147 goto unmap_and_free_vma;
77148 +
77149 +#ifdef CONFIG_PAX_SEGMEXEC
77150 + if (vma_m && (vm_flags & VM_EXECUTABLE))
77151 + added_exe_file_vma(mm);
77152 +#endif
77153 +
77154 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77155 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
77156 + vma->vm_flags |= VM_PAGEEXEC;
77157 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77158 + }
77159 +#endif
77160 +
77161 if (vm_flags & VM_EXECUTABLE)
77162 added_exe_file_vma(mm);
77163
77164 @@ -1218,6 +1362,11 @@ munmap_back:
77165 vma_link(mm, vma, prev, rb_link, rb_parent);
77166 file = vma->vm_file;
77167
77168 +#ifdef CONFIG_PAX_SEGMEXEC
77169 + if (vma_m)
77170 + pax_mirror_vma(vma_m, vma);
77171 +#endif
77172 +
77173 /* Once vma denies write, undo our temporary denial count */
77174 if (correct_wcount)
77175 atomic_inc(&inode->i_writecount);
77176 @@ -1226,6 +1375,7 @@ out:
77177
77178 mm->total_vm += len >> PAGE_SHIFT;
77179 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
77180 + track_exec_limit(mm, addr, addr + len, vm_flags);
77181 if (vm_flags & VM_LOCKED) {
77182 /*
77183 * makes pages present; downgrades, drops, reacquires mmap_sem
77184 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
77185 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
77186 charged = 0;
77187 free_vma:
77188 +
77189 +#ifdef CONFIG_PAX_SEGMEXEC
77190 + if (vma_m)
77191 + kmem_cache_free(vm_area_cachep, vma_m);
77192 +#endif
77193 +
77194 kmem_cache_free(vm_area_cachep, vma);
77195 unacct_error:
77196 if (charged)
77197 @@ -1255,6 +1411,44 @@ unacct_error:
77198 return error;
77199 }
77200
77201 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
77202 +{
77203 + if (!vma) {
77204 +#ifdef CONFIG_STACK_GROWSUP
77205 + if (addr > sysctl_heap_stack_gap)
77206 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
77207 + else
77208 + vma = find_vma(current->mm, 0);
77209 + if (vma && (vma->vm_flags & VM_GROWSUP))
77210 + return false;
77211 +#endif
77212 + return true;
77213 + }
77214 +
77215 + if (addr + len > vma->vm_start)
77216 + return false;
77217 +
77218 + if (vma->vm_flags & VM_GROWSDOWN)
77219 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
77220 +#ifdef CONFIG_STACK_GROWSUP
77221 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
77222 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
77223 +#endif
77224 +
77225 + return true;
77226 +}
77227 +
77228 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
77229 +{
77230 + if (vma->vm_start < len)
77231 + return -ENOMEM;
77232 + if (!(vma->vm_flags & VM_GROWSDOWN))
77233 + return vma->vm_start - len;
77234 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
77235 + return vma->vm_start - len - sysctl_heap_stack_gap;
77236 + return -ENOMEM;
77237 +}
77238 +
77239 /* Get an address range which is currently unmapped.
77240 * For shmat() with addr=0.
77241 *
77242 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
77243 if (flags & MAP_FIXED)
77244 return addr;
77245
77246 +#ifdef CONFIG_PAX_RANDMMAP
77247 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
77248 +#endif
77249 +
77250 if (addr) {
77251 addr = PAGE_ALIGN(addr);
77252 - vma = find_vma(mm, addr);
77253 - if (TASK_SIZE - len >= addr &&
77254 - (!vma || addr + len <= vma->vm_start))
77255 - return addr;
77256 + if (TASK_SIZE - len >= addr) {
77257 + vma = find_vma(mm, addr);
77258 + if (check_heap_stack_gap(vma, addr, len))
77259 + return addr;
77260 + }
77261 }
77262 if (len > mm->cached_hole_size) {
77263 - start_addr = addr = mm->free_area_cache;
77264 + start_addr = addr = mm->free_area_cache;
77265 } else {
77266 - start_addr = addr = TASK_UNMAPPED_BASE;
77267 - mm->cached_hole_size = 0;
77268 + start_addr = addr = mm->mmap_base;
77269 + mm->cached_hole_size = 0;
77270 }
77271
77272 full_search:
77273 @@ -1303,34 +1502,40 @@ full_search:
77274 * Start a new search - just in case we missed
77275 * some holes.
77276 */
77277 - if (start_addr != TASK_UNMAPPED_BASE) {
77278 - addr = TASK_UNMAPPED_BASE;
77279 - start_addr = addr;
77280 + if (start_addr != mm->mmap_base) {
77281 + start_addr = addr = mm->mmap_base;
77282 mm->cached_hole_size = 0;
77283 goto full_search;
77284 }
77285 return -ENOMEM;
77286 }
77287 - if (!vma || addr + len <= vma->vm_start) {
77288 - /*
77289 - * Remember the place where we stopped the search:
77290 - */
77291 - mm->free_area_cache = addr + len;
77292 - return addr;
77293 - }
77294 + if (check_heap_stack_gap(vma, addr, len))
77295 + break;
77296 if (addr + mm->cached_hole_size < vma->vm_start)
77297 mm->cached_hole_size = vma->vm_start - addr;
77298 addr = vma->vm_end;
77299 }
77300 +
77301 + /*
77302 + * Remember the place where we stopped the search:
77303 + */
77304 + mm->free_area_cache = addr + len;
77305 + return addr;
77306 }
77307 #endif
77308
77309 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
77310 {
77311 +
77312 +#ifdef CONFIG_PAX_SEGMEXEC
77313 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
77314 + return;
77315 +#endif
77316 +
77317 /*
77318 * Is this a new hole at the lowest possible address?
77319 */
77320 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
77321 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
77322 mm->free_area_cache = addr;
77323 mm->cached_hole_size = ~0UL;
77324 }
77325 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77326 {
77327 struct vm_area_struct *vma;
77328 struct mm_struct *mm = current->mm;
77329 - unsigned long addr = addr0;
77330 + unsigned long base = mm->mmap_base, addr = addr0;
77331
77332 /* requested length too big for entire address space */
77333 if (len > TASK_SIZE)
77334 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77335 if (flags & MAP_FIXED)
77336 return addr;
77337
77338 +#ifdef CONFIG_PAX_RANDMMAP
77339 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
77340 +#endif
77341 +
77342 /* requesting a specific address */
77343 if (addr) {
77344 addr = PAGE_ALIGN(addr);
77345 - vma = find_vma(mm, addr);
77346 - if (TASK_SIZE - len >= addr &&
77347 - (!vma || addr + len <= vma->vm_start))
77348 - return addr;
77349 + if (TASK_SIZE - len >= addr) {
77350 + vma = find_vma(mm, addr);
77351 + if (check_heap_stack_gap(vma, addr, len))
77352 + return addr;
77353 + }
77354 }
77355
77356 /* check if free_area_cache is useful for us */
77357 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77358 /* make sure it can fit in the remaining address space */
77359 if (addr > len) {
77360 vma = find_vma(mm, addr-len);
77361 - if (!vma || addr <= vma->vm_start)
77362 + if (check_heap_stack_gap(vma, addr - len, len))
77363 /* remember the address as a hint for next time */
77364 return (mm->free_area_cache = addr-len);
77365 }
77366 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77367 * return with success:
77368 */
77369 vma = find_vma(mm, addr);
77370 - if (!vma || addr+len <= vma->vm_start)
77371 + if (check_heap_stack_gap(vma, addr, len))
77372 /* remember the address as a hint for next time */
77373 return (mm->free_area_cache = addr);
77374
77375 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77376 mm->cached_hole_size = vma->vm_start - addr;
77377
77378 /* try just below the current vma->vm_start */
77379 - addr = vma->vm_start-len;
77380 - } while (len < vma->vm_start);
77381 + addr = skip_heap_stack_gap(vma, len);
77382 + } while (!IS_ERR_VALUE(addr));
77383
77384 bottomup:
77385 /*
77386 @@ -1414,13 +1624,21 @@ bottomup:
77387 * can happen with large stack limits and large mmap()
77388 * allocations.
77389 */
77390 + mm->mmap_base = TASK_UNMAPPED_BASE;
77391 +
77392 +#ifdef CONFIG_PAX_RANDMMAP
77393 + if (mm->pax_flags & MF_PAX_RANDMMAP)
77394 + mm->mmap_base += mm->delta_mmap;
77395 +#endif
77396 +
77397 + mm->free_area_cache = mm->mmap_base;
77398 mm->cached_hole_size = ~0UL;
77399 - mm->free_area_cache = TASK_UNMAPPED_BASE;
77400 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
77401 /*
77402 * Restore the topdown base:
77403 */
77404 - mm->free_area_cache = mm->mmap_base;
77405 + mm->mmap_base = base;
77406 + mm->free_area_cache = base;
77407 mm->cached_hole_size = ~0UL;
77408
77409 return addr;
77410 @@ -1429,6 +1647,12 @@ bottomup:
77411
77412 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
77413 {
77414 +
77415 +#ifdef CONFIG_PAX_SEGMEXEC
77416 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
77417 + return;
77418 +#endif
77419 +
77420 /*
77421 * Is this a new hole at the highest possible address?
77422 */
77423 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
77424 mm->free_area_cache = addr;
77425
77426 /* dont allow allocations above current base */
77427 - if (mm->free_area_cache > mm->mmap_base)
77428 + if (mm->free_area_cache > mm->mmap_base) {
77429 mm->free_area_cache = mm->mmap_base;
77430 + mm->cached_hole_size = ~0UL;
77431 + }
77432 }
77433
77434 unsigned long
77435 @@ -1545,6 +1771,27 @@ out:
77436 return prev ? prev->vm_next : vma;
77437 }
77438
77439 +#ifdef CONFIG_PAX_SEGMEXEC
77440 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
77441 +{
77442 + struct vm_area_struct *vma_m;
77443 +
77444 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
77445 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
77446 + BUG_ON(vma->vm_mirror);
77447 + return NULL;
77448 + }
77449 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
77450 + vma_m = vma->vm_mirror;
77451 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
77452 + BUG_ON(vma->vm_file != vma_m->vm_file);
77453 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
77454 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
77455 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
77456 + return vma_m;
77457 +}
77458 +#endif
77459 +
77460 /*
77461 * Verify that the stack growth is acceptable and
77462 * update accounting. This is shared with both the
77463 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77464 return -ENOMEM;
77465
77466 /* Stack limit test */
77467 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
77468 if (size > rlim[RLIMIT_STACK].rlim_cur)
77469 return -ENOMEM;
77470
77471 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77472 unsigned long limit;
77473 locked = mm->locked_vm + grow;
77474 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
77475 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
77476 if (locked > limit && !capable(CAP_IPC_LOCK))
77477 return -ENOMEM;
77478 }
77479 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77480 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
77481 * vma is the last one with address > vma->vm_end. Have to extend vma.
77482 */
77483 +#ifndef CONFIG_IA64
77484 +static
77485 +#endif
77486 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
77487 {
77488 int error;
77489 + bool locknext;
77490
77491 if (!(vma->vm_flags & VM_GROWSUP))
77492 return -EFAULT;
77493
77494 + /* Also guard against wrapping around to address 0. */
77495 + if (address < PAGE_ALIGN(address+1))
77496 + address = PAGE_ALIGN(address+1);
77497 + else
77498 + return -ENOMEM;
77499 +
77500 /*
77501 * We must make sure the anon_vma is allocated
77502 * so that the anon_vma locking is not a noop.
77503 */
77504 if (unlikely(anon_vma_prepare(vma)))
77505 return -ENOMEM;
77506 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
77507 + if (locknext && anon_vma_prepare(vma->vm_next))
77508 + return -ENOMEM;
77509 anon_vma_lock(vma);
77510 + if (locknext)
77511 + anon_vma_lock(vma->vm_next);
77512
77513 /*
77514 * vma->vm_start/vm_end cannot change under us because the caller
77515 * is required to hold the mmap_sem in read mode. We need the
77516 - * anon_vma lock to serialize against concurrent expand_stacks.
77517 - * Also guard against wrapping around to address 0.
77518 + * anon_vma locks to serialize against concurrent expand_stacks
77519 + * and expand_upwards.
77520 */
77521 - if (address < PAGE_ALIGN(address+4))
77522 - address = PAGE_ALIGN(address+4);
77523 - else {
77524 - anon_vma_unlock(vma);
77525 - return -ENOMEM;
77526 - }
77527 error = 0;
77528
77529 /* Somebody else might have raced and expanded it already */
77530 - if (address > vma->vm_end) {
77531 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
77532 + error = -ENOMEM;
77533 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
77534 unsigned long size, grow;
77535
77536 size = address - vma->vm_start;
77537 @@ -1643,6 +1903,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
77538 vma->vm_end = address;
77539 }
77540 }
77541 + if (locknext)
77542 + anon_vma_unlock(vma->vm_next);
77543 anon_vma_unlock(vma);
77544 return error;
77545 }
77546 @@ -1655,6 +1917,8 @@ static int expand_downwards(struct vm_area_struct *vma,
77547 unsigned long address)
77548 {
77549 int error;
77550 + bool lockprev = false;
77551 + struct vm_area_struct *prev;
77552
77553 /*
77554 * We must make sure the anon_vma is allocated
77555 @@ -1668,6 +1932,15 @@ static int expand_downwards(struct vm_area_struct *vma,
77556 if (error)
77557 return error;
77558
77559 + prev = vma->vm_prev;
77560 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
77561 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
77562 +#endif
77563 + if (lockprev && anon_vma_prepare(prev))
77564 + return -ENOMEM;
77565 + if (lockprev)
77566 + anon_vma_lock(prev);
77567 +
77568 anon_vma_lock(vma);
77569
77570 /*
77571 @@ -1677,9 +1950,17 @@ static int expand_downwards(struct vm_area_struct *vma,
77572 */
77573
77574 /* Somebody else might have raced and expanded it already */
77575 - if (address < vma->vm_start) {
77576 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
77577 + error = -ENOMEM;
77578 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
77579 unsigned long size, grow;
77580
77581 +#ifdef CONFIG_PAX_SEGMEXEC
77582 + struct vm_area_struct *vma_m;
77583 +
77584 + vma_m = pax_find_mirror_vma(vma);
77585 +#endif
77586 +
77587 size = vma->vm_end - address;
77588 grow = (vma->vm_start - address) >> PAGE_SHIFT;
77589
77590 @@ -1689,10 +1970,22 @@ static int expand_downwards(struct vm_area_struct *vma,
77591 if (!error) {
77592 vma->vm_start = address;
77593 vma->vm_pgoff -= grow;
77594 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
77595 +
77596 +#ifdef CONFIG_PAX_SEGMEXEC
77597 + if (vma_m) {
77598 + vma_m->vm_start -= grow << PAGE_SHIFT;
77599 + vma_m->vm_pgoff -= grow;
77600 + }
77601 +#endif
77602 +
77603 +
77604 }
77605 }
77606 }
77607 anon_vma_unlock(vma);
77608 + if (lockprev)
77609 + anon_vma_unlock(prev);
77610 return error;
77611 }
77612
77613 @@ -1768,6 +2061,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
77614 do {
77615 long nrpages = vma_pages(vma);
77616
77617 +#ifdef CONFIG_PAX_SEGMEXEC
77618 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
77619 + vma = remove_vma(vma);
77620 + continue;
77621 + }
77622 +#endif
77623 +
77624 mm->total_vm -= nrpages;
77625 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
77626 vma = remove_vma(vma);
77627 @@ -1813,6 +2113,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
77628 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
77629 vma->vm_prev = NULL;
77630 do {
77631 +
77632 +#ifdef CONFIG_PAX_SEGMEXEC
77633 + if (vma->vm_mirror) {
77634 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
77635 + vma->vm_mirror->vm_mirror = NULL;
77636 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
77637 + vma->vm_mirror = NULL;
77638 + }
77639 +#endif
77640 +
77641 rb_erase(&vma->vm_rb, &mm->mm_rb);
77642 mm->map_count--;
77643 tail_vma = vma;
77644 @@ -1840,10 +2150,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77645 struct mempolicy *pol;
77646 struct vm_area_struct *new;
77647
77648 +#ifdef CONFIG_PAX_SEGMEXEC
77649 + struct vm_area_struct *vma_m, *new_m = NULL;
77650 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
77651 +#endif
77652 +
77653 if (is_vm_hugetlb_page(vma) && (addr &
77654 ~(huge_page_mask(hstate_vma(vma)))))
77655 return -EINVAL;
77656
77657 +#ifdef CONFIG_PAX_SEGMEXEC
77658 + vma_m = pax_find_mirror_vma(vma);
77659 +
77660 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77661 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
77662 + if (mm->map_count >= sysctl_max_map_count-1)
77663 + return -ENOMEM;
77664 + } else
77665 +#endif
77666 +
77667 if (mm->map_count >= sysctl_max_map_count)
77668 return -ENOMEM;
77669
77670 @@ -1851,6 +2176,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77671 if (!new)
77672 return -ENOMEM;
77673
77674 +#ifdef CONFIG_PAX_SEGMEXEC
77675 + if (vma_m) {
77676 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
77677 + if (!new_m) {
77678 + kmem_cache_free(vm_area_cachep, new);
77679 + return -ENOMEM;
77680 + }
77681 + }
77682 +#endif
77683 +
77684 /* most fields are the same, copy all, and then fixup */
77685 *new = *vma;
77686
77687 @@ -1861,8 +2196,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77688 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
77689 }
77690
77691 +#ifdef CONFIG_PAX_SEGMEXEC
77692 + if (vma_m) {
77693 + *new_m = *vma_m;
77694 + new_m->vm_mirror = new;
77695 + new->vm_mirror = new_m;
77696 +
77697 + if (new_below)
77698 + new_m->vm_end = addr_m;
77699 + else {
77700 + new_m->vm_start = addr_m;
77701 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
77702 + }
77703 + }
77704 +#endif
77705 +
77706 pol = mpol_dup(vma_policy(vma));
77707 if (IS_ERR(pol)) {
77708 +
77709 +#ifdef CONFIG_PAX_SEGMEXEC
77710 + if (new_m)
77711 + kmem_cache_free(vm_area_cachep, new_m);
77712 +#endif
77713 +
77714 kmem_cache_free(vm_area_cachep, new);
77715 return PTR_ERR(pol);
77716 }
77717 @@ -1883,6 +2239,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77718 else
77719 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
77720
77721 +#ifdef CONFIG_PAX_SEGMEXEC
77722 + if (vma_m) {
77723 + mpol_get(pol);
77724 + vma_set_policy(new_m, pol);
77725 +
77726 + if (new_m->vm_file) {
77727 + get_file(new_m->vm_file);
77728 + if (vma_m->vm_flags & VM_EXECUTABLE)
77729 + added_exe_file_vma(mm);
77730 + }
77731 +
77732 + if (new_m->vm_ops && new_m->vm_ops->open)
77733 + new_m->vm_ops->open(new_m);
77734 +
77735 + if (new_below)
77736 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
77737 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
77738 + else
77739 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
77740 + }
77741 +#endif
77742 +
77743 return 0;
77744 }
77745
77746 @@ -1891,11 +2269,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77747 * work. This now handles partial unmappings.
77748 * Jeremy Fitzhardinge <jeremy@goop.org>
77749 */
77750 +#ifdef CONFIG_PAX_SEGMEXEC
77751 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77752 {
77753 + int ret = __do_munmap(mm, start, len);
77754 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
77755 + return ret;
77756 +
77757 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
77758 +}
77759 +
77760 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77761 +#else
77762 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77763 +#endif
77764 +{
77765 unsigned long end;
77766 struct vm_area_struct *vma, *prev, *last;
77767
77768 + /*
77769 + * mm->mmap_sem is required to protect against another thread
77770 + * changing the mappings in case we sleep.
77771 + */
77772 + verify_mm_writelocked(mm);
77773 +
77774 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
77775 return -EINVAL;
77776
77777 @@ -1959,6 +2356,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
77778 /* Fix up all other VM information */
77779 remove_vma_list(mm, vma);
77780
77781 + track_exec_limit(mm, start, end, 0UL);
77782 +
77783 return 0;
77784 }
77785
77786 @@ -1971,22 +2370,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
77787
77788 profile_munmap(addr);
77789
77790 +#ifdef CONFIG_PAX_SEGMEXEC
77791 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
77792 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
77793 + return -EINVAL;
77794 +#endif
77795 +
77796 down_write(&mm->mmap_sem);
77797 ret = do_munmap(mm, addr, len);
77798 up_write(&mm->mmap_sem);
77799 return ret;
77800 }
77801
77802 -static inline void verify_mm_writelocked(struct mm_struct *mm)
77803 -{
77804 -#ifdef CONFIG_DEBUG_VM
77805 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
77806 - WARN_ON(1);
77807 - up_read(&mm->mmap_sem);
77808 - }
77809 -#endif
77810 -}
77811 -
77812 /*
77813 * this is really a simplified "do_mmap". it only handles
77814 * anonymous maps. eventually we may be able to do some
77815 @@ -2000,6 +2395,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77816 struct rb_node ** rb_link, * rb_parent;
77817 pgoff_t pgoff = addr >> PAGE_SHIFT;
77818 int error;
77819 + unsigned long charged;
77820
77821 len = PAGE_ALIGN(len);
77822 if (!len)
77823 @@ -2011,16 +2407,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77824
77825 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
77826
77827 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
77828 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
77829 + flags &= ~VM_EXEC;
77830 +
77831 +#ifdef CONFIG_PAX_MPROTECT
77832 + if (mm->pax_flags & MF_PAX_MPROTECT)
77833 + flags &= ~VM_MAYEXEC;
77834 +#endif
77835 +
77836 + }
77837 +#endif
77838 +
77839 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
77840 if (error & ~PAGE_MASK)
77841 return error;
77842
77843 + charged = len >> PAGE_SHIFT;
77844 +
77845 /*
77846 * mlock MCL_FUTURE?
77847 */
77848 if (mm->def_flags & VM_LOCKED) {
77849 unsigned long locked, lock_limit;
77850 - locked = len >> PAGE_SHIFT;
77851 + locked = charged;
77852 locked += mm->locked_vm;
77853 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
77854 lock_limit >>= PAGE_SHIFT;
77855 @@ -2037,22 +2447,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77856 /*
77857 * Clear old maps. this also does some error checking for us
77858 */
77859 - munmap_back:
77860 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77861 if (vma && vma->vm_start < addr + len) {
77862 if (do_munmap(mm, addr, len))
77863 return -ENOMEM;
77864 - goto munmap_back;
77865 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77866 + BUG_ON(vma && vma->vm_start < addr + len);
77867 }
77868
77869 /* Check against address space limits *after* clearing old maps... */
77870 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
77871 + if (!may_expand_vm(mm, charged))
77872 return -ENOMEM;
77873
77874 if (mm->map_count > sysctl_max_map_count)
77875 return -ENOMEM;
77876
77877 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
77878 + if (security_vm_enough_memory(charged))
77879 return -ENOMEM;
77880
77881 /* Can we just expand an old private anonymous mapping? */
77882 @@ -2066,7 +2476,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77883 */
77884 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77885 if (!vma) {
77886 - vm_unacct_memory(len >> PAGE_SHIFT);
77887 + vm_unacct_memory(charged);
77888 return -ENOMEM;
77889 }
77890
77891 @@ -2078,11 +2488,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
77892 vma->vm_page_prot = vm_get_page_prot(flags);
77893 vma_link(mm, vma, prev, rb_link, rb_parent);
77894 out:
77895 - mm->total_vm += len >> PAGE_SHIFT;
77896 + mm->total_vm += charged;
77897 if (flags & VM_LOCKED) {
77898 if (!mlock_vma_pages_range(vma, addr, addr + len))
77899 - mm->locked_vm += (len >> PAGE_SHIFT);
77900 + mm->locked_vm += charged;
77901 }
77902 + track_exec_limit(mm, addr, addr + len, flags);
77903 return addr;
77904 }
77905
77906 @@ -2129,8 +2540,10 @@ void exit_mmap(struct mm_struct *mm)
77907 * Walk the list again, actually closing and freeing it,
77908 * with preemption enabled, without holding any MM locks.
77909 */
77910 - while (vma)
77911 + while (vma) {
77912 + vma->vm_mirror = NULL;
77913 vma = remove_vma(vma);
77914 + }
77915
77916 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
77917 }
77918 @@ -2144,6 +2557,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
77919 struct vm_area_struct * __vma, * prev;
77920 struct rb_node ** rb_link, * rb_parent;
77921
77922 +#ifdef CONFIG_PAX_SEGMEXEC
77923 + struct vm_area_struct *vma_m = NULL;
77924 +#endif
77925 +
77926 /*
77927 * The vm_pgoff of a purely anonymous vma should be irrelevant
77928 * until its first write fault, when page's anon_vma and index
77929 @@ -2166,7 +2583,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
77930 if ((vma->vm_flags & VM_ACCOUNT) &&
77931 security_vm_enough_memory_mm(mm, vma_pages(vma)))
77932 return -ENOMEM;
77933 +
77934 +#ifdef CONFIG_PAX_SEGMEXEC
77935 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
77936 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77937 + if (!vma_m)
77938 + return -ENOMEM;
77939 + }
77940 +#endif
77941 +
77942 vma_link(mm, vma, prev, rb_link, rb_parent);
77943 +
77944 +#ifdef CONFIG_PAX_SEGMEXEC
77945 + if (vma_m)
77946 + pax_mirror_vma(vma_m, vma);
77947 +#endif
77948 +
77949 return 0;
77950 }
77951
77952 @@ -2184,6 +2616,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
77953 struct rb_node **rb_link, *rb_parent;
77954 struct mempolicy *pol;
77955
77956 + BUG_ON(vma->vm_mirror);
77957 +
77958 /*
77959 * If anonymous vma has not yet been faulted, update new pgoff
77960 * to match new location, to increase its chance of merging.
77961 @@ -2227,6 +2661,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
77962 return new_vma;
77963 }
77964
77965 +#ifdef CONFIG_PAX_SEGMEXEC
77966 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
77967 +{
77968 + struct vm_area_struct *prev_m;
77969 + struct rb_node **rb_link_m, *rb_parent_m;
77970 + struct mempolicy *pol_m;
77971 +
77972 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
77973 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
77974 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
77975 + *vma_m = *vma;
77976 + pol_m = vma_policy(vma_m);
77977 + mpol_get(pol_m);
77978 + vma_set_policy(vma_m, pol_m);
77979 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
77980 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
77981 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
77982 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
77983 + if (vma_m->vm_file)
77984 + get_file(vma_m->vm_file);
77985 + if (vma_m->vm_ops && vma_m->vm_ops->open)
77986 + vma_m->vm_ops->open(vma_m);
77987 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
77988 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
77989 + vma_m->vm_mirror = vma;
77990 + vma->vm_mirror = vma_m;
77991 +}
77992 +#endif
77993 +
77994 /*
77995 * Return true if the calling process may expand its vm space by the passed
77996 * number of pages
77997 @@ -2237,7 +2700,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
77998 unsigned long lim;
77999
78000 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
78001 -
78002 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
78003 if (cur + npages > lim)
78004 return 0;
78005 return 1;
78006 @@ -2307,6 +2770,22 @@ int install_special_mapping(struct mm_struct *mm,
78007 vma->vm_start = addr;
78008 vma->vm_end = addr + len;
78009
78010 +#ifdef CONFIG_PAX_MPROTECT
78011 + if (mm->pax_flags & MF_PAX_MPROTECT) {
78012 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
78013 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
78014 + return -EPERM;
78015 + if (!(vm_flags & VM_EXEC))
78016 + vm_flags &= ~VM_MAYEXEC;
78017 +#else
78018 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78019 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78020 +#endif
78021 + else
78022 + vm_flags &= ~VM_MAYWRITE;
78023 + }
78024 +#endif
78025 +
78026 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
78027 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78028
78029 diff --git a/mm/mprotect.c b/mm/mprotect.c
78030 index 1737c7e..c7faeb4 100644
78031 --- a/mm/mprotect.c
78032 +++ b/mm/mprotect.c
78033 @@ -24,10 +24,16 @@
78034 #include <linux/mmu_notifier.h>
78035 #include <linux/migrate.h>
78036 #include <linux/perf_event.h>
78037 +
78038 +#ifdef CONFIG_PAX_MPROTECT
78039 +#include <linux/elf.h>
78040 +#endif
78041 +
78042 #include <asm/uaccess.h>
78043 #include <asm/pgtable.h>
78044 #include <asm/cacheflush.h>
78045 #include <asm/tlbflush.h>
78046 +#include <asm/mmu_context.h>
78047
78048 #ifndef pgprot_modify
78049 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
78050 @@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
78051 flush_tlb_range(vma, start, end);
78052 }
78053
78054 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
78055 +/* called while holding the mmap semaphor for writing except stack expansion */
78056 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
78057 +{
78058 + unsigned long oldlimit, newlimit = 0UL;
78059 +
78060 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
78061 + return;
78062 +
78063 + spin_lock(&mm->page_table_lock);
78064 + oldlimit = mm->context.user_cs_limit;
78065 + if ((prot & VM_EXEC) && oldlimit < end)
78066 + /* USER_CS limit moved up */
78067 + newlimit = end;
78068 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
78069 + /* USER_CS limit moved down */
78070 + newlimit = start;
78071 +
78072 + if (newlimit) {
78073 + mm->context.user_cs_limit = newlimit;
78074 +
78075 +#ifdef CONFIG_SMP
78076 + wmb();
78077 + cpus_clear(mm->context.cpu_user_cs_mask);
78078 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
78079 +#endif
78080 +
78081 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
78082 + }
78083 + spin_unlock(&mm->page_table_lock);
78084 + if (newlimit == end) {
78085 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
78086 +
78087 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
78088 + if (is_vm_hugetlb_page(vma))
78089 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
78090 + else
78091 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
78092 + }
78093 +}
78094 +#endif
78095 +
78096 int
78097 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78098 unsigned long start, unsigned long end, unsigned long newflags)
78099 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78100 int error;
78101 int dirty_accountable = 0;
78102
78103 +#ifdef CONFIG_PAX_SEGMEXEC
78104 + struct vm_area_struct *vma_m = NULL;
78105 + unsigned long start_m, end_m;
78106 +
78107 + start_m = start + SEGMEXEC_TASK_SIZE;
78108 + end_m = end + SEGMEXEC_TASK_SIZE;
78109 +#endif
78110 +
78111 if (newflags == oldflags) {
78112 *pprev = vma;
78113 return 0;
78114 }
78115
78116 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
78117 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
78118 +
78119 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
78120 + return -ENOMEM;
78121 +
78122 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
78123 + return -ENOMEM;
78124 + }
78125 +
78126 /*
78127 * If we make a private mapping writable we increase our commit;
78128 * but (without finer accounting) cannot reduce our commit if we
78129 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78130 }
78131 }
78132
78133 +#ifdef CONFIG_PAX_SEGMEXEC
78134 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
78135 + if (start != vma->vm_start) {
78136 + error = split_vma(mm, vma, start, 1);
78137 + if (error)
78138 + goto fail;
78139 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
78140 + *pprev = (*pprev)->vm_next;
78141 + }
78142 +
78143 + if (end != vma->vm_end) {
78144 + error = split_vma(mm, vma, end, 0);
78145 + if (error)
78146 + goto fail;
78147 + }
78148 +
78149 + if (pax_find_mirror_vma(vma)) {
78150 + error = __do_munmap(mm, start_m, end_m - start_m);
78151 + if (error)
78152 + goto fail;
78153 + } else {
78154 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78155 + if (!vma_m) {
78156 + error = -ENOMEM;
78157 + goto fail;
78158 + }
78159 + vma->vm_flags = newflags;
78160 + pax_mirror_vma(vma_m, vma);
78161 + }
78162 + }
78163 +#endif
78164 +
78165 /*
78166 * First try to merge with previous and/or next vma.
78167 */
78168 @@ -195,9 +293,21 @@ success:
78169 * vm_flags and vm_page_prot are protected by the mmap_sem
78170 * held in write mode.
78171 */
78172 +
78173 +#ifdef CONFIG_PAX_SEGMEXEC
78174 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
78175 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
78176 +#endif
78177 +
78178 vma->vm_flags = newflags;
78179 +
78180 +#ifdef CONFIG_PAX_MPROTECT
78181 + if (mm->binfmt && mm->binfmt->handle_mprotect)
78182 + mm->binfmt->handle_mprotect(vma, newflags);
78183 +#endif
78184 +
78185 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
78186 - vm_get_page_prot(newflags));
78187 + vm_get_page_prot(vma->vm_flags));
78188
78189 if (vma_wants_writenotify(vma)) {
78190 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
78191 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78192 end = start + len;
78193 if (end <= start)
78194 return -ENOMEM;
78195 +
78196 +#ifdef CONFIG_PAX_SEGMEXEC
78197 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
78198 + if (end > SEGMEXEC_TASK_SIZE)
78199 + return -EINVAL;
78200 + } else
78201 +#endif
78202 +
78203 + if (end > TASK_SIZE)
78204 + return -EINVAL;
78205 +
78206 if (!arch_validate_prot(prot))
78207 return -EINVAL;
78208
78209 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78210 /*
78211 * Does the application expect PROT_READ to imply PROT_EXEC:
78212 */
78213 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
78214 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
78215 prot |= PROT_EXEC;
78216
78217 vm_flags = calc_vm_prot_bits(prot);
78218 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78219 if (start > vma->vm_start)
78220 prev = vma;
78221
78222 +#ifdef CONFIG_PAX_MPROTECT
78223 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
78224 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
78225 +#endif
78226 +
78227 for (nstart = start ; ; ) {
78228 unsigned long newflags;
78229
78230 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78231
78232 /* newflags >> 4 shift VM_MAY% in place of VM_% */
78233 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
78234 + if (prot & (PROT_WRITE | PROT_EXEC))
78235 + gr_log_rwxmprotect(vma->vm_file);
78236 +
78237 + error = -EACCES;
78238 + goto out;
78239 + }
78240 +
78241 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
78242 error = -EACCES;
78243 goto out;
78244 }
78245 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78246 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
78247 if (error)
78248 goto out;
78249 +
78250 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
78251 +
78252 nstart = tmp;
78253
78254 if (nstart < prev->vm_end)
78255 diff --git a/mm/mremap.c b/mm/mremap.c
78256 index 3e98d79..1706cec 100644
78257 --- a/mm/mremap.c
78258 +++ b/mm/mremap.c
78259 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
78260 continue;
78261 pte = ptep_clear_flush(vma, old_addr, old_pte);
78262 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
78263 +
78264 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
78265 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
78266 + pte = pte_exprotect(pte);
78267 +#endif
78268 +
78269 set_pte_at(mm, new_addr, new_pte, pte);
78270 }
78271
78272 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
78273 if (is_vm_hugetlb_page(vma))
78274 goto Einval;
78275
78276 +#ifdef CONFIG_PAX_SEGMEXEC
78277 + if (pax_find_mirror_vma(vma))
78278 + goto Einval;
78279 +#endif
78280 +
78281 /* We can't remap across vm area boundaries */
78282 if (old_len > vma->vm_end - addr)
78283 goto Efault;
78284 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
78285 unsigned long ret = -EINVAL;
78286 unsigned long charged = 0;
78287 unsigned long map_flags;
78288 + unsigned long pax_task_size = TASK_SIZE;
78289
78290 if (new_addr & ~PAGE_MASK)
78291 goto out;
78292
78293 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
78294 +#ifdef CONFIG_PAX_SEGMEXEC
78295 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
78296 + pax_task_size = SEGMEXEC_TASK_SIZE;
78297 +#endif
78298 +
78299 + pax_task_size -= PAGE_SIZE;
78300 +
78301 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
78302 goto out;
78303
78304 /* Check if the location we're moving into overlaps the
78305 * old location at all, and fail if it does.
78306 */
78307 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
78308 - goto out;
78309 -
78310 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
78311 + if (addr + old_len > new_addr && new_addr + new_len > addr)
78312 goto out;
78313
78314 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
78315 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
78316 struct vm_area_struct *vma;
78317 unsigned long ret = -EINVAL;
78318 unsigned long charged = 0;
78319 + unsigned long pax_task_size = TASK_SIZE;
78320
78321 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
78322 goto out;
78323 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
78324 if (!new_len)
78325 goto out;
78326
78327 +#ifdef CONFIG_PAX_SEGMEXEC
78328 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
78329 + pax_task_size = SEGMEXEC_TASK_SIZE;
78330 +#endif
78331 +
78332 + pax_task_size -= PAGE_SIZE;
78333 +
78334 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
78335 + old_len > pax_task_size || addr > pax_task_size-old_len)
78336 + goto out;
78337 +
78338 if (flags & MREMAP_FIXED) {
78339 if (flags & MREMAP_MAYMOVE)
78340 ret = mremap_to(addr, old_len, new_addr, new_len);
78341 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
78342 addr + new_len);
78343 }
78344 ret = addr;
78345 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
78346 goto out;
78347 }
78348 }
78349 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
78350 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
78351 if (ret)
78352 goto out;
78353 +
78354 + map_flags = vma->vm_flags;
78355 ret = move_vma(vma, addr, old_len, new_len, new_addr);
78356 + if (!(ret & ~PAGE_MASK)) {
78357 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
78358 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
78359 + }
78360 }
78361 out:
78362 if (ret & ~PAGE_MASK)
78363 diff --git a/mm/nommu.c b/mm/nommu.c
78364 index 406e8d4..53970d3 100644
78365 --- a/mm/nommu.c
78366 +++ b/mm/nommu.c
78367 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
78368 int sysctl_overcommit_ratio = 50; /* default is 50% */
78369 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
78370 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
78371 -int heap_stack_gap = 0;
78372
78373 atomic_long_t mmap_pages_allocated;
78374
78375 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
78376 EXPORT_SYMBOL(find_vma);
78377
78378 /*
78379 - * find a VMA
78380 - * - we don't extend stack VMAs under NOMMU conditions
78381 - */
78382 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
78383 -{
78384 - return find_vma(mm, addr);
78385 -}
78386 -
78387 -/*
78388 * expand a stack to a given address
78389 * - not supported under NOMMU conditions
78390 */
78391 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
78392 index 3ecab7e..594a471 100644
78393 --- a/mm/page_alloc.c
78394 +++ b/mm/page_alloc.c
78395 @@ -289,7 +289,7 @@ out:
78396 * This usage means that zero-order pages may not be compound.
78397 */
78398
78399 -static void free_compound_page(struct page *page)
78400 +void free_compound_page(struct page *page)
78401 {
78402 __free_pages_ok(page, compound_order(page));
78403 }
78404 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
78405 int bad = 0;
78406 int wasMlocked = __TestClearPageMlocked(page);
78407
78408 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
78409 + unsigned long index = 1UL << order;
78410 +#endif
78411 +
78412 kmemcheck_free_shadow(page, order);
78413
78414 for (i = 0 ; i < (1 << order) ; ++i)
78415 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
78416 debug_check_no_obj_freed(page_address(page),
78417 PAGE_SIZE << order);
78418 }
78419 +
78420 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
78421 + for (; index; --index)
78422 + sanitize_highpage(page + index - 1);
78423 +#endif
78424 +
78425 arch_free_page(page, order);
78426 kernel_map_pages(page, 1 << order, 0);
78427
78428 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
78429 arch_alloc_page(page, order);
78430 kernel_map_pages(page, 1 << order, 1);
78431
78432 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
78433 if (gfp_flags & __GFP_ZERO)
78434 prep_zero_page(page, order, gfp_flags);
78435 +#endif
78436
78437 if (order && (gfp_flags & __GFP_COMP))
78438 prep_compound_page(page, order);
78439 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
78440 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
78441 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
78442 }
78443 +
78444 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
78445 + sanitize_highpage(page);
78446 +#endif
78447 +
78448 arch_free_page(page, 0);
78449 kernel_map_pages(page, 1, 0);
78450
78451 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
78452 int cpu;
78453 struct zone *zone;
78454
78455 + pax_track_stack();
78456 +
78457 for_each_populated_zone(zone) {
78458 show_node(zone);
78459 printk("%s per-cpu:\n", zone->name);
78460 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
78461 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
78462 }
78463 #else
78464 -static void inline setup_usemap(struct pglist_data *pgdat,
78465 +static inline void setup_usemap(struct pglist_data *pgdat,
78466 struct zone *zone, unsigned long zonesize) {}
78467 #endif /* CONFIG_SPARSEMEM */
78468
78469 diff --git a/mm/percpu.c b/mm/percpu.c
78470 index c90614a..5f7b7b8 100644
78471 --- a/mm/percpu.c
78472 +++ b/mm/percpu.c
78473 @@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
78474 static unsigned int pcpu_high_unit_cpu __read_mostly;
78475
78476 /* the address of the first chunk which starts with the kernel static area */
78477 -void *pcpu_base_addr __read_mostly;
78478 +void *pcpu_base_addr __read_only;
78479 EXPORT_SYMBOL_GPL(pcpu_base_addr);
78480
78481 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
78482 diff --git a/mm/rmap.c b/mm/rmap.c
78483 index dd43373..d848cd7 100644
78484 --- a/mm/rmap.c
78485 +++ b/mm/rmap.c
78486 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
78487 /* page_table_lock to protect against threads */
78488 spin_lock(&mm->page_table_lock);
78489 if (likely(!vma->anon_vma)) {
78490 +
78491 +#ifdef CONFIG_PAX_SEGMEXEC
78492 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
78493 +
78494 + if (vma_m) {
78495 + BUG_ON(vma_m->anon_vma);
78496 + vma_m->anon_vma = anon_vma;
78497 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
78498 + }
78499 +#endif
78500 +
78501 vma->anon_vma = anon_vma;
78502 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
78503 allocated = NULL;
78504 diff --git a/mm/shmem.c b/mm/shmem.c
78505 index 3e0005b..1d659a8 100644
78506 --- a/mm/shmem.c
78507 +++ b/mm/shmem.c
78508 @@ -31,7 +31,7 @@
78509 #include <linux/swap.h>
78510 #include <linux/ima.h>
78511
78512 -static struct vfsmount *shm_mnt;
78513 +struct vfsmount *shm_mnt;
78514
78515 #ifdef CONFIG_SHMEM
78516 /*
78517 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
78518 goto unlock;
78519 }
78520 entry = shmem_swp_entry(info, index, NULL);
78521 + if (!entry)
78522 + goto unlock;
78523 if (entry->val) {
78524 /*
78525 * The more uptodate page coming down from a stacked
78526 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
78527 struct vm_area_struct pvma;
78528 struct page *page;
78529
78530 + pax_track_stack();
78531 +
78532 spol = mpol_cond_copy(&mpol,
78533 mpol_shared_policy_lookup(&info->policy, idx));
78534
78535 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
78536
78537 info = SHMEM_I(inode);
78538 inode->i_size = len-1;
78539 - if (len <= (char *)inode - (char *)info) {
78540 + if (len <= (char *)inode - (char *)info && len <= 64) {
78541 /* do it inline */
78542 memcpy(info, symname, len);
78543 inode->i_op = &shmem_symlink_inline_operations;
78544 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
78545 int err = -ENOMEM;
78546
78547 /* Round up to L1_CACHE_BYTES to resist false sharing */
78548 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
78549 - L1_CACHE_BYTES), GFP_KERNEL);
78550 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
78551 if (!sbinfo)
78552 return -ENOMEM;
78553
78554 diff --git a/mm/slab.c b/mm/slab.c
78555 index c8d466a..909e01e 100644
78556 --- a/mm/slab.c
78557 +++ b/mm/slab.c
78558 @@ -174,7 +174,7 @@
78559
78560 /* Legal flag mask for kmem_cache_create(). */
78561 #if DEBUG
78562 -# define CREATE_MASK (SLAB_RED_ZONE | \
78563 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
78564 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
78565 SLAB_CACHE_DMA | \
78566 SLAB_STORE_USER | \
78567 @@ -182,7 +182,7 @@
78568 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
78569 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
78570 #else
78571 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
78572 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
78573 SLAB_CACHE_DMA | \
78574 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
78575 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
78576 @@ -308,7 +308,7 @@ struct kmem_list3 {
78577 * Need this for bootstrapping a per node allocator.
78578 */
78579 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
78580 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
78581 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
78582 #define CACHE_CACHE 0
78583 #define SIZE_AC MAX_NUMNODES
78584 #define SIZE_L3 (2 * MAX_NUMNODES)
78585 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
78586 if ((x)->max_freeable < i) \
78587 (x)->max_freeable = i; \
78588 } while (0)
78589 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
78590 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
78591 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
78592 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
78593 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
78594 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
78595 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
78596 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
78597 #else
78598 #define STATS_INC_ACTIVE(x) do { } while (0)
78599 #define STATS_DEC_ACTIVE(x) do { } while (0)
78600 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
78601 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
78602 */
78603 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
78604 - const struct slab *slab, void *obj)
78605 + const struct slab *slab, const void *obj)
78606 {
78607 u32 offset = (obj - slab->s_mem);
78608 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
78609 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
78610 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
78611 sizes[INDEX_AC].cs_size,
78612 ARCH_KMALLOC_MINALIGN,
78613 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78614 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78615 NULL);
78616
78617 if (INDEX_AC != INDEX_L3) {
78618 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
78619 kmem_cache_create(names[INDEX_L3].name,
78620 sizes[INDEX_L3].cs_size,
78621 ARCH_KMALLOC_MINALIGN,
78622 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78623 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78624 NULL);
78625 }
78626
78627 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
78628 sizes->cs_cachep = kmem_cache_create(names->name,
78629 sizes->cs_size,
78630 ARCH_KMALLOC_MINALIGN,
78631 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78632 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78633 NULL);
78634 }
78635 #ifdef CONFIG_ZONE_DMA
78636 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
78637 }
78638 /* cpu stats */
78639 {
78640 - unsigned long allochit = atomic_read(&cachep->allochit);
78641 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
78642 - unsigned long freehit = atomic_read(&cachep->freehit);
78643 - unsigned long freemiss = atomic_read(&cachep->freemiss);
78644 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
78645 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
78646 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
78647 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
78648
78649 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
78650 allochit, allocmiss, freehit, freemiss);
78651 @@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
78652
78653 static int __init slab_proc_init(void)
78654 {
78655 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
78656 + mode_t gr_mode = S_IRUGO;
78657 +
78658 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
78659 + gr_mode = S_IRUSR;
78660 +#endif
78661 +
78662 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
78663 #ifdef CONFIG_DEBUG_SLAB_LEAK
78664 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
78665 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
78666 #endif
78667 return 0;
78668 }
78669 module_init(slab_proc_init);
78670 #endif
78671
78672 +void check_object_size(const void *ptr, unsigned long n, bool to)
78673 +{
78674 +
78675 +#ifdef CONFIG_PAX_USERCOPY
78676 + struct page *page;
78677 + struct kmem_cache *cachep = NULL;
78678 + struct slab *slabp;
78679 + unsigned int objnr;
78680 + unsigned long offset;
78681 + const char *type;
78682 +
78683 + if (!n)
78684 + return;
78685 +
78686 + type = "<null>";
78687 + if (ZERO_OR_NULL_PTR(ptr))
78688 + goto report;
78689 +
78690 + if (!virt_addr_valid(ptr))
78691 + return;
78692 +
78693 + page = virt_to_head_page(ptr);
78694 +
78695 + type = "<process stack>";
78696 + if (!PageSlab(page)) {
78697 + if (object_is_on_stack(ptr, n) == -1)
78698 + goto report;
78699 + return;
78700 + }
78701 +
78702 + cachep = page_get_cache(page);
78703 + type = cachep->name;
78704 + if (!(cachep->flags & SLAB_USERCOPY))
78705 + goto report;
78706 +
78707 + slabp = page_get_slab(page);
78708 + objnr = obj_to_index(cachep, slabp, ptr);
78709 + BUG_ON(objnr >= cachep->num);
78710 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
78711 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
78712 + return;
78713 +
78714 +report:
78715 + pax_report_usercopy(ptr, n, to, type);
78716 +#endif
78717 +
78718 +}
78719 +EXPORT_SYMBOL(check_object_size);
78720 +
78721 /**
78722 * ksize - get the actual amount of memory allocated for a given object
78723 * @objp: Pointer to the object
78724 diff --git a/mm/slob.c b/mm/slob.c
78725 index 837ebd6..4712174 100644
78726 --- a/mm/slob.c
78727 +++ b/mm/slob.c
78728 @@ -29,7 +29,7 @@
78729 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
78730 * alloc_pages() directly, allocating compound pages so the page order
78731 * does not have to be separately tracked, and also stores the exact
78732 - * allocation size in page->private so that it can be used to accurately
78733 + * allocation size in slob_page->size so that it can be used to accurately
78734 * provide ksize(). These objects are detected in kfree() because slob_page()
78735 * is false for them.
78736 *
78737 @@ -58,6 +58,7 @@
78738 */
78739
78740 #include <linux/kernel.h>
78741 +#include <linux/sched.h>
78742 #include <linux/slab.h>
78743 #include <linux/mm.h>
78744 #include <linux/swap.h> /* struct reclaim_state */
78745 @@ -100,7 +101,8 @@ struct slob_page {
78746 unsigned long flags; /* mandatory */
78747 atomic_t _count; /* mandatory */
78748 slobidx_t units; /* free units left in page */
78749 - unsigned long pad[2];
78750 + unsigned long pad[1];
78751 + unsigned long size; /* size when >=PAGE_SIZE */
78752 slob_t *free; /* first free slob_t in page */
78753 struct list_head list; /* linked list of free pages */
78754 };
78755 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
78756 */
78757 static inline int is_slob_page(struct slob_page *sp)
78758 {
78759 - return PageSlab((struct page *)sp);
78760 + return PageSlab((struct page *)sp) && !sp->size;
78761 }
78762
78763 static inline void set_slob_page(struct slob_page *sp)
78764 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
78765
78766 static inline struct slob_page *slob_page(const void *addr)
78767 {
78768 - return (struct slob_page *)virt_to_page(addr);
78769 + return (struct slob_page *)virt_to_head_page(addr);
78770 }
78771
78772 /*
78773 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
78774 /*
78775 * Return the size of a slob block.
78776 */
78777 -static slobidx_t slob_units(slob_t *s)
78778 +static slobidx_t slob_units(const slob_t *s)
78779 {
78780 if (s->units > 0)
78781 return s->units;
78782 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
78783 /*
78784 * Return the next free slob block pointer after this one.
78785 */
78786 -static slob_t *slob_next(slob_t *s)
78787 +static slob_t *slob_next(const slob_t *s)
78788 {
78789 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
78790 slobidx_t next;
78791 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
78792 /*
78793 * Returns true if s is the last free block in its page.
78794 */
78795 -static int slob_last(slob_t *s)
78796 +static int slob_last(const slob_t *s)
78797 {
78798 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
78799 }
78800 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
78801 if (!page)
78802 return NULL;
78803
78804 + set_slob_page(page);
78805 return page_address(page);
78806 }
78807
78808 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
78809 if (!b)
78810 return NULL;
78811 sp = slob_page(b);
78812 - set_slob_page(sp);
78813
78814 spin_lock_irqsave(&slob_lock, flags);
78815 sp->units = SLOB_UNITS(PAGE_SIZE);
78816 sp->free = b;
78817 + sp->size = 0;
78818 INIT_LIST_HEAD(&sp->list);
78819 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
78820 set_slob_page_free(sp, slob_list);
78821 @@ -475,10 +478,9 @@ out:
78822 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
78823 #endif
78824
78825 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78826 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
78827 {
78828 - unsigned int *m;
78829 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78830 + slob_t *m;
78831 void *ret;
78832
78833 lockdep_trace_alloc(gfp);
78834 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78835
78836 if (!m)
78837 return NULL;
78838 - *m = size;
78839 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
78840 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
78841 + m[0].units = size;
78842 + m[1].units = align;
78843 ret = (void *)m + align;
78844
78845 trace_kmalloc_node(_RET_IP_, ret,
78846 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78847
78848 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
78849 if (ret) {
78850 - struct page *page;
78851 - page = virt_to_page(ret);
78852 - page->private = size;
78853 + struct slob_page *sp;
78854 + sp = slob_page(ret);
78855 + sp->size = size;
78856 }
78857
78858 trace_kmalloc_node(_RET_IP_, ret,
78859 size, PAGE_SIZE << order, gfp, node);
78860 }
78861
78862 - kmemleak_alloc(ret, size, 1, gfp);
78863 + return ret;
78864 +}
78865 +
78866 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
78867 +{
78868 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78869 + void *ret = __kmalloc_node_align(size, gfp, node, align);
78870 +
78871 + if (!ZERO_OR_NULL_PTR(ret))
78872 + kmemleak_alloc(ret, size, 1, gfp);
78873 return ret;
78874 }
78875 EXPORT_SYMBOL(__kmalloc_node);
78876 @@ -528,13 +542,92 @@ void kfree(const void *block)
78877 sp = slob_page(block);
78878 if (is_slob_page(sp)) {
78879 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78880 - unsigned int *m = (unsigned int *)(block - align);
78881 - slob_free(m, *m + align);
78882 - } else
78883 + slob_t *m = (slob_t *)(block - align);
78884 + slob_free(m, m[0].units + align);
78885 + } else {
78886 + clear_slob_page(sp);
78887 + free_slob_page(sp);
78888 + sp->size = 0;
78889 put_page(&sp->page);
78890 + }
78891 }
78892 EXPORT_SYMBOL(kfree);
78893
78894 +void check_object_size(const void *ptr, unsigned long n, bool to)
78895 +{
78896 +
78897 +#ifdef CONFIG_PAX_USERCOPY
78898 + struct slob_page *sp;
78899 + const slob_t *free;
78900 + const void *base;
78901 + unsigned long flags;
78902 + const char *type;
78903 +
78904 + if (!n)
78905 + return;
78906 +
78907 + type = "<null>";
78908 + if (ZERO_OR_NULL_PTR(ptr))
78909 + goto report;
78910 +
78911 + if (!virt_addr_valid(ptr))
78912 + return;
78913 +
78914 + type = "<process stack>";
78915 + sp = slob_page(ptr);
78916 + if (!PageSlab((struct page*)sp)) {
78917 + if (object_is_on_stack(ptr, n) == -1)
78918 + goto report;
78919 + return;
78920 + }
78921 +
78922 + type = "<slob>";
78923 + if (sp->size) {
78924 + base = page_address(&sp->page);
78925 + if (base <= ptr && n <= sp->size - (ptr - base))
78926 + return;
78927 + goto report;
78928 + }
78929 +
78930 + /* some tricky double walking to find the chunk */
78931 + spin_lock_irqsave(&slob_lock, flags);
78932 + base = (void *)((unsigned long)ptr & PAGE_MASK);
78933 + free = sp->free;
78934 +
78935 + while (!slob_last(free) && (void *)free <= ptr) {
78936 + base = free + slob_units(free);
78937 + free = slob_next(free);
78938 + }
78939 +
78940 + while (base < (void *)free) {
78941 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
78942 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
78943 + int offset;
78944 +
78945 + if (ptr < base + align)
78946 + break;
78947 +
78948 + offset = ptr - base - align;
78949 + if (offset >= m) {
78950 + base += size;
78951 + continue;
78952 + }
78953 +
78954 + if (n > m - offset)
78955 + break;
78956 +
78957 + spin_unlock_irqrestore(&slob_lock, flags);
78958 + return;
78959 + }
78960 +
78961 + spin_unlock_irqrestore(&slob_lock, flags);
78962 +report:
78963 + pax_report_usercopy(ptr, n, to, type);
78964 +#endif
78965 +
78966 +}
78967 +EXPORT_SYMBOL(check_object_size);
78968 +
78969 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
78970 size_t ksize(const void *block)
78971 {
78972 @@ -547,10 +640,10 @@ size_t ksize(const void *block)
78973 sp = slob_page(block);
78974 if (is_slob_page(sp)) {
78975 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
78976 - unsigned int *m = (unsigned int *)(block - align);
78977 - return SLOB_UNITS(*m) * SLOB_UNIT;
78978 + slob_t *m = (slob_t *)(block - align);
78979 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
78980 } else
78981 - return sp->page.private;
78982 + return sp->size;
78983 }
78984 EXPORT_SYMBOL(ksize);
78985
78986 @@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
78987 {
78988 struct kmem_cache *c;
78989
78990 +#ifdef CONFIG_PAX_USERCOPY
78991 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
78992 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
78993 +#else
78994 c = slob_alloc(sizeof(struct kmem_cache),
78995 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
78996 +#endif
78997
78998 if (c) {
78999 c->name = name;
79000 @@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
79001 {
79002 void *b;
79003
79004 +#ifdef CONFIG_PAX_USERCOPY
79005 + b = __kmalloc_node_align(c->size, flags, node, c->align);
79006 +#else
79007 if (c->size < PAGE_SIZE) {
79008 b = slob_alloc(c->size, flags, c->align, node);
79009 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79010 SLOB_UNITS(c->size) * SLOB_UNIT,
79011 flags, node);
79012 } else {
79013 + struct slob_page *sp;
79014 +
79015 b = slob_new_pages(flags, get_order(c->size), node);
79016 + sp = slob_page(b);
79017 + sp->size = c->size;
79018 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79019 PAGE_SIZE << get_order(c->size),
79020 flags, node);
79021 }
79022 +#endif
79023
79024 if (c->ctor)
79025 c->ctor(b);
79026 @@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
79027
79028 static void __kmem_cache_free(void *b, int size)
79029 {
79030 - if (size < PAGE_SIZE)
79031 + struct slob_page *sp = slob_page(b);
79032 +
79033 + if (is_slob_page(sp))
79034 slob_free(b, size);
79035 - else
79036 + else {
79037 + clear_slob_page(sp);
79038 + free_slob_page(sp);
79039 + sp->size = 0;
79040 slob_free_pages(b, get_order(size));
79041 + }
79042 }
79043
79044 static void kmem_rcu_free(struct rcu_head *head)
79045 @@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
79046
79047 void kmem_cache_free(struct kmem_cache *c, void *b)
79048 {
79049 + int size = c->size;
79050 +
79051 +#ifdef CONFIG_PAX_USERCOPY
79052 + if (size + c->align < PAGE_SIZE) {
79053 + size += c->align;
79054 + b -= c->align;
79055 + }
79056 +#endif
79057 +
79058 kmemleak_free_recursive(b, c->flags);
79059 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
79060 struct slob_rcu *slob_rcu;
79061 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
79062 + slob_rcu = b + (size - sizeof(struct slob_rcu));
79063 INIT_RCU_HEAD(&slob_rcu->head);
79064 - slob_rcu->size = c->size;
79065 + slob_rcu->size = size;
79066 call_rcu(&slob_rcu->head, kmem_rcu_free);
79067 } else {
79068 - __kmem_cache_free(b, c->size);
79069 + __kmem_cache_free(b, size);
79070 }
79071
79072 +#ifdef CONFIG_PAX_USERCOPY
79073 + trace_kfree(_RET_IP_, b);
79074 +#else
79075 trace_kmem_cache_free(_RET_IP_, b);
79076 +#endif
79077 +
79078 }
79079 EXPORT_SYMBOL(kmem_cache_free);
79080
79081 diff --git a/mm/slub.c b/mm/slub.c
79082 index 4996fc7..87e01d0 100644
79083 --- a/mm/slub.c
79084 +++ b/mm/slub.c
79085 @@ -201,7 +201,7 @@ struct track {
79086
79087 enum track_item { TRACK_ALLOC, TRACK_FREE };
79088
79089 -#ifdef CONFIG_SLUB_DEBUG
79090 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79091 static int sysfs_slab_add(struct kmem_cache *);
79092 static int sysfs_slab_alias(struct kmem_cache *, const char *);
79093 static void sysfs_slab_remove(struct kmem_cache *);
79094 @@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
79095 if (!t->addr)
79096 return;
79097
79098 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
79099 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
79100 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
79101 }
79102
79103 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
79104
79105 page = virt_to_head_page(x);
79106
79107 + BUG_ON(!PageSlab(page));
79108 +
79109 slab_free(s, page, x, _RET_IP_);
79110
79111 trace_kmem_cache_free(_RET_IP_, x);
79112 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
79113 * Merge control. If this is set then no merging of slab caches will occur.
79114 * (Could be removed. This was introduced to pacify the merge skeptics.)
79115 */
79116 -static int slub_nomerge;
79117 +static int slub_nomerge = 1;
79118
79119 /*
79120 * Calculate the order of allocation given an slab object size.
79121 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
79122 * list to avoid pounding the page allocator excessively.
79123 */
79124 set_min_partial(s, ilog2(s->size));
79125 - s->refcount = 1;
79126 + atomic_set(&s->refcount, 1);
79127 #ifdef CONFIG_NUMA
79128 s->remote_node_defrag_ratio = 1000;
79129 #endif
79130 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
79131 void kmem_cache_destroy(struct kmem_cache *s)
79132 {
79133 down_write(&slub_lock);
79134 - s->refcount--;
79135 - if (!s->refcount) {
79136 + if (atomic_dec_and_test(&s->refcount)) {
79137 list_del(&s->list);
79138 up_write(&slub_lock);
79139 if (kmem_cache_close(s)) {
79140 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
79141 __setup("slub_nomerge", setup_slub_nomerge);
79142
79143 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
79144 - const char *name, int size, gfp_t gfp_flags)
79145 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
79146 {
79147 - unsigned int flags = 0;
79148 -
79149 if (gfp_flags & SLUB_DMA)
79150 - flags = SLAB_CACHE_DMA;
79151 + flags |= SLAB_CACHE_DMA;
79152
79153 /*
79154 * This function is called with IRQs disabled during early-boot on
79155 @@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
79156 EXPORT_SYMBOL(__kmalloc_node);
79157 #endif
79158
79159 +void check_object_size(const void *ptr, unsigned long n, bool to)
79160 +{
79161 +
79162 +#ifdef CONFIG_PAX_USERCOPY
79163 + struct page *page;
79164 + struct kmem_cache *s = NULL;
79165 + unsigned long offset;
79166 + const char *type;
79167 +
79168 + if (!n)
79169 + return;
79170 +
79171 + type = "<null>";
79172 + if (ZERO_OR_NULL_PTR(ptr))
79173 + goto report;
79174 +
79175 + if (!virt_addr_valid(ptr))
79176 + return;
79177 +
79178 + page = get_object_page(ptr);
79179 +
79180 + type = "<process stack>";
79181 + if (!page) {
79182 + if (object_is_on_stack(ptr, n) == -1)
79183 + goto report;
79184 + return;
79185 + }
79186 +
79187 + s = page->slab;
79188 + type = s->name;
79189 + if (!(s->flags & SLAB_USERCOPY))
79190 + goto report;
79191 +
79192 + offset = (ptr - page_address(page)) % s->size;
79193 + if (offset <= s->objsize && n <= s->objsize - offset)
79194 + return;
79195 +
79196 +report:
79197 + pax_report_usercopy(ptr, n, to, type);
79198 +#endif
79199 +
79200 +}
79201 +EXPORT_SYMBOL(check_object_size);
79202 +
79203 size_t ksize(const void *object)
79204 {
79205 struct page *page;
79206 @@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
79207 * kmem_cache_open for slab_state == DOWN.
79208 */
79209 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
79210 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
79211 - kmalloc_caches[0].refcount = -1;
79212 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
79213 + atomic_set(&kmalloc_caches[0].refcount, -1);
79214 caches++;
79215
79216 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
79217 @@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
79218 /* Caches that are not of the two-to-the-power-of size */
79219 if (KMALLOC_MIN_SIZE <= 32) {
79220 create_kmalloc_cache(&kmalloc_caches[1],
79221 - "kmalloc-96", 96, GFP_NOWAIT);
79222 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
79223 caches++;
79224 }
79225 if (KMALLOC_MIN_SIZE <= 64) {
79226 create_kmalloc_cache(&kmalloc_caches[2],
79227 - "kmalloc-192", 192, GFP_NOWAIT);
79228 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
79229 caches++;
79230 }
79231
79232 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
79233 create_kmalloc_cache(&kmalloc_caches[i],
79234 - "kmalloc", 1 << i, GFP_NOWAIT);
79235 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
79236 caches++;
79237 }
79238
79239 @@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
79240 /*
79241 * We may have set a slab to be unmergeable during bootstrap.
79242 */
79243 - if (s->refcount < 0)
79244 + if (atomic_read(&s->refcount) < 0)
79245 return 1;
79246
79247 return 0;
79248 @@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79249 if (s) {
79250 int cpu;
79251
79252 - s->refcount++;
79253 + atomic_inc(&s->refcount);
79254 /*
79255 * Adjust the object sizes so that we clear
79256 * the complete object on kzalloc.
79257 @@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79258
79259 if (sysfs_slab_alias(s, name)) {
79260 down_write(&slub_lock);
79261 - s->refcount--;
79262 + atomic_dec(&s->refcount);
79263 up_write(&slub_lock);
79264 goto err;
79265 }
79266 @@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
79267
79268 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
79269 {
79270 - return sprintf(buf, "%d\n", s->refcount - 1);
79271 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
79272 }
79273 SLAB_ATTR_RO(aliases);
79274
79275 @@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
79276 kfree(s);
79277 }
79278
79279 -static struct sysfs_ops slab_sysfs_ops = {
79280 +static const struct sysfs_ops slab_sysfs_ops = {
79281 .show = slab_attr_show,
79282 .store = slab_attr_store,
79283 };
79284 @@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
79285 return 0;
79286 }
79287
79288 -static struct kset_uevent_ops slab_uevent_ops = {
79289 +static const struct kset_uevent_ops slab_uevent_ops = {
79290 .filter = uevent_filter,
79291 };
79292
79293 @@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
79294 return name;
79295 }
79296
79297 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79298 static int sysfs_slab_add(struct kmem_cache *s)
79299 {
79300 int err;
79301 @@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
79302 kobject_del(&s->kobj);
79303 kobject_put(&s->kobj);
79304 }
79305 +#endif
79306
79307 /*
79308 * Need to buffer aliases during bootup until sysfs becomes
79309 @@ -4632,6 +4677,7 @@ struct saved_alias {
79310
79311 static struct saved_alias *alias_list;
79312
79313 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79314 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
79315 {
79316 struct saved_alias *al;
79317 @@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
79318 alias_list = al;
79319 return 0;
79320 }
79321 +#endif
79322
79323 static int __init slab_sysfs_init(void)
79324 {
79325 @@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
79326
79327 static int __init slab_proc_init(void)
79328 {
79329 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
79330 + mode_t gr_mode = S_IRUGO;
79331 +
79332 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
79333 + gr_mode = S_IRUSR;
79334 +#endif
79335 +
79336 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
79337 return 0;
79338 }
79339 module_init(slab_proc_init);
79340 diff --git a/mm/swap.c b/mm/swap.c
79341 index 308e57d..5de19c0 100644
79342 --- a/mm/swap.c
79343 +++ b/mm/swap.c
79344 @@ -30,6 +30,7 @@
79345 #include <linux/notifier.h>
79346 #include <linux/backing-dev.h>
79347 #include <linux/memcontrol.h>
79348 +#include <linux/hugetlb.h>
79349
79350 #include "internal.h"
79351
79352 @@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
79353 compound_page_dtor *dtor;
79354
79355 dtor = get_compound_page_dtor(page);
79356 + if (!PageHuge(page))
79357 + BUG_ON(dtor != free_compound_page);
79358 (*dtor)(page);
79359 }
79360 }
79361 diff --git a/mm/util.c b/mm/util.c
79362 index e48b493..24a601d 100644
79363 --- a/mm/util.c
79364 +++ b/mm/util.c
79365 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
79366 void arch_pick_mmap_layout(struct mm_struct *mm)
79367 {
79368 mm->mmap_base = TASK_UNMAPPED_BASE;
79369 +
79370 +#ifdef CONFIG_PAX_RANDMMAP
79371 + if (mm->pax_flags & MF_PAX_RANDMMAP)
79372 + mm->mmap_base += mm->delta_mmap;
79373 +#endif
79374 +
79375 mm->get_unmapped_area = arch_get_unmapped_area;
79376 mm->unmap_area = arch_unmap_area;
79377 }
79378 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
79379 index f34ffd0..e60c44f 100644
79380 --- a/mm/vmalloc.c
79381 +++ b/mm/vmalloc.c
79382 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
79383
79384 pte = pte_offset_kernel(pmd, addr);
79385 do {
79386 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
79387 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
79388 +
79389 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79390 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
79391 + BUG_ON(!pte_exec(*pte));
79392 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
79393 + continue;
79394 + }
79395 +#endif
79396 +
79397 + {
79398 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
79399 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
79400 + }
79401 } while (pte++, addr += PAGE_SIZE, addr != end);
79402 }
79403
79404 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
79405 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
79406 {
79407 pte_t *pte;
79408 + int ret = -ENOMEM;
79409
79410 /*
79411 * nr is a running index into the array which helps higher level
79412 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
79413 pte = pte_alloc_kernel(pmd, addr);
79414 if (!pte)
79415 return -ENOMEM;
79416 +
79417 + pax_open_kernel();
79418 do {
79419 struct page *page = pages[*nr];
79420
79421 - if (WARN_ON(!pte_none(*pte)))
79422 - return -EBUSY;
79423 - if (WARN_ON(!page))
79424 - return -ENOMEM;
79425 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79426 + if (!(pgprot_val(prot) & _PAGE_NX))
79427 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
79428 + else
79429 +#endif
79430 +
79431 + if (WARN_ON(!pte_none(*pte))) {
79432 + ret = -EBUSY;
79433 + goto out;
79434 + }
79435 + if (WARN_ON(!page)) {
79436 + ret = -ENOMEM;
79437 + goto out;
79438 + }
79439 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
79440 (*nr)++;
79441 } while (pte++, addr += PAGE_SIZE, addr != end);
79442 - return 0;
79443 + ret = 0;
79444 +out:
79445 + pax_close_kernel();
79446 + return ret;
79447 }
79448
79449 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
79450 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
79451 * and fall back on vmalloc() if that fails. Others
79452 * just put it in the vmalloc space.
79453 */
79454 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
79455 +#ifdef CONFIG_MODULES
79456 +#ifdef MODULES_VADDR
79457 unsigned long addr = (unsigned long)x;
79458 if (addr >= MODULES_VADDR && addr < MODULES_END)
79459 return 1;
79460 #endif
79461 +
79462 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79463 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
79464 + return 1;
79465 +#endif
79466 +
79467 +#endif
79468 +
79469 return is_vmalloc_addr(x);
79470 }
79471
79472 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
79473
79474 if (!pgd_none(*pgd)) {
79475 pud_t *pud = pud_offset(pgd, addr);
79476 +#ifdef CONFIG_X86
79477 + if (!pud_large(*pud))
79478 +#endif
79479 if (!pud_none(*pud)) {
79480 pmd_t *pmd = pmd_offset(pud, addr);
79481 +#ifdef CONFIG_X86
79482 + if (!pmd_large(*pmd))
79483 +#endif
79484 if (!pmd_none(*pmd)) {
79485 pte_t *ptep, pte;
79486
79487 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
79488 struct rb_node *tmp;
79489
79490 while (*p) {
79491 - struct vmap_area *tmp;
79492 + struct vmap_area *varea;
79493
79494 parent = *p;
79495 - tmp = rb_entry(parent, struct vmap_area, rb_node);
79496 - if (va->va_start < tmp->va_end)
79497 + varea = rb_entry(parent, struct vmap_area, rb_node);
79498 + if (va->va_start < varea->va_end)
79499 p = &(*p)->rb_left;
79500 - else if (va->va_end > tmp->va_start)
79501 + else if (va->va_end > varea->va_start)
79502 p = &(*p)->rb_right;
79503 else
79504 BUG();
79505 @@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
79506 struct vm_struct *area;
79507
79508 BUG_ON(in_interrupt());
79509 +
79510 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79511 + if (flags & VM_KERNEXEC) {
79512 + if (start != VMALLOC_START || end != VMALLOC_END)
79513 + return NULL;
79514 + start = (unsigned long)MODULES_EXEC_VADDR;
79515 + end = (unsigned long)MODULES_EXEC_END;
79516 + }
79517 +#endif
79518 +
79519 if (flags & VM_IOREMAP) {
79520 int bit = fls(size);
79521
79522 @@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
79523 if (count > totalram_pages)
79524 return NULL;
79525
79526 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79527 + if (!(pgprot_val(prot) & _PAGE_NX))
79528 + flags |= VM_KERNEXEC;
79529 +#endif
79530 +
79531 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
79532 __builtin_return_address(0));
79533 if (!area)
79534 @@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
79535 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
79536 return NULL;
79537
79538 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79539 + if (!(pgprot_val(prot) & _PAGE_NX))
79540 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
79541 + VMALLOC_START, VMALLOC_END, node,
79542 + gfp_mask, caller);
79543 + else
79544 +#endif
79545 +
79546 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
79547 VMALLOC_START, VMALLOC_END, node,
79548 gfp_mask, caller);
79549 @@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
79550 return addr;
79551 }
79552
79553 +#undef __vmalloc
79554 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
79555 {
79556 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
79557 @@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
79558 * For tight control over page level allocator and protection flags
79559 * use __vmalloc() instead.
79560 */
79561 +#undef vmalloc
79562 void *vmalloc(unsigned long size)
79563 {
79564 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
79565 @@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
79566 * The resulting memory area is zeroed so it can be mapped to userspace
79567 * without leaking data.
79568 */
79569 +#undef vmalloc_user
79570 void *vmalloc_user(unsigned long size)
79571 {
79572 struct vm_struct *area;
79573 @@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
79574 * For tight control over page level allocator and protection flags
79575 * use __vmalloc() instead.
79576 */
79577 +#undef vmalloc_node
79578 void *vmalloc_node(unsigned long size, int node)
79579 {
79580 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
79581 @@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
79582 * For tight control over page level allocator and protection flags
79583 * use __vmalloc() instead.
79584 */
79585 -
79586 +#undef vmalloc_exec
79587 void *vmalloc_exec(unsigned long size)
79588 {
79589 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
79590 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
79591 -1, __builtin_return_address(0));
79592 }
79593
79594 @@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
79595 * Allocate enough 32bit PA addressable pages to cover @size from the
79596 * page level allocator and map them into contiguous kernel virtual space.
79597 */
79598 +#undef vmalloc_32
79599 void *vmalloc_32(unsigned long size)
79600 {
79601 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
79602 @@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
79603 * The resulting memory area is 32bit addressable and zeroed so it can be
79604 * mapped to userspace without leaking data.
79605 */
79606 +#undef vmalloc_32_user
79607 void *vmalloc_32_user(unsigned long size)
79608 {
79609 struct vm_struct *area;
79610 @@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
79611 unsigned long uaddr = vma->vm_start;
79612 unsigned long usize = vma->vm_end - vma->vm_start;
79613
79614 + BUG_ON(vma->vm_mirror);
79615 +
79616 if ((PAGE_SIZE-1) & (unsigned long)addr)
79617 return -EINVAL;
79618
79619 diff --git a/mm/vmstat.c b/mm/vmstat.c
79620 index 42d76c6..5643dc4 100644
79621 --- a/mm/vmstat.c
79622 +++ b/mm/vmstat.c
79623 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
79624 *
79625 * vm_stat contains the global counters
79626 */
79627 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79628 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79629 EXPORT_SYMBOL(vm_stat);
79630
79631 #ifdef CONFIG_SMP
79632 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
79633 v = p->vm_stat_diff[i];
79634 p->vm_stat_diff[i] = 0;
79635 local_irq_restore(flags);
79636 - atomic_long_add(v, &zone->vm_stat[i]);
79637 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
79638 global_diff[i] += v;
79639 #ifdef CONFIG_NUMA
79640 /* 3 seconds idle till flush */
79641 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
79642
79643 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
79644 if (global_diff[i])
79645 - atomic_long_add(global_diff[i], &vm_stat[i]);
79646 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
79647 }
79648
79649 #endif
79650 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
79651 start_cpu_timer(cpu);
79652 #endif
79653 #ifdef CONFIG_PROC_FS
79654 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
79655 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
79656 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
79657 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
79658 + {
79659 + mode_t gr_mode = S_IRUGO;
79660 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
79661 + gr_mode = S_IRUSR;
79662 +#endif
79663 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
79664 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
79665 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
79666 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
79667 +#else
79668 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
79669 +#endif
79670 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
79671 + }
79672 #endif
79673 return 0;
79674 }
79675 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
79676 index a29c5ab..6143f20 100644
79677 --- a/net/8021q/vlan.c
79678 +++ b/net/8021q/vlan.c
79679 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
79680 err = -EPERM;
79681 if (!capable(CAP_NET_ADMIN))
79682 break;
79683 - if ((args.u.name_type >= 0) &&
79684 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
79685 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
79686 struct vlan_net *vn;
79687
79688 vn = net_generic(net, vlan_net_id);
79689 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
79690 index a2d2984..f9eb711 100644
79691 --- a/net/9p/trans_fd.c
79692 +++ b/net/9p/trans_fd.c
79693 @@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
79694 oldfs = get_fs();
79695 set_fs(get_ds());
79696 /* The cast to a user pointer is valid due to the set_fs() */
79697 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
79698 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
79699 set_fs(oldfs);
79700
79701 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
79702 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
79703 index 02cc7e7..4514f1b 100644
79704 --- a/net/atm/atm_misc.c
79705 +++ b/net/atm/atm_misc.c
79706 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
79707 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
79708 return 1;
79709 atm_return(vcc,truesize);
79710 - atomic_inc(&vcc->stats->rx_drop);
79711 + atomic_inc_unchecked(&vcc->stats->rx_drop);
79712 return 0;
79713 }
79714
79715 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
79716 }
79717 }
79718 atm_return(vcc,guess);
79719 - atomic_inc(&vcc->stats->rx_drop);
79720 + atomic_inc_unchecked(&vcc->stats->rx_drop);
79721 return NULL;
79722 }
79723
79724 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
79725
79726 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79727 {
79728 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
79729 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
79730 __SONET_ITEMS
79731 #undef __HANDLE_ITEM
79732 }
79733 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79734
79735 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
79736 {
79737 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
79738 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
79739 __SONET_ITEMS
79740 #undef __HANDLE_ITEM
79741 }
79742 diff --git a/net/atm/lec.h b/net/atm/lec.h
79743 index 9d14d19..5c145f3 100644
79744 --- a/net/atm/lec.h
79745 +++ b/net/atm/lec.h
79746 @@ -48,7 +48,7 @@ struct lane2_ops {
79747 const u8 *tlvs, u32 sizeoftlvs);
79748 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
79749 const u8 *tlvs, u32 sizeoftlvs);
79750 -};
79751 +} __no_const;
79752
79753 /*
79754 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
79755 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
79756 index 0919a88..a23d54e 100644
79757 --- a/net/atm/mpc.h
79758 +++ b/net/atm/mpc.h
79759 @@ -33,7 +33,7 @@ struct mpoa_client {
79760 struct mpc_parameters parameters; /* parameters for this client */
79761
79762 const struct net_device_ops *old_ops;
79763 - struct net_device_ops new_ops;
79764 + net_device_ops_no_const new_ops;
79765 };
79766
79767
79768 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
79769 index 4504a4b..1733f1e 100644
79770 --- a/net/atm/mpoa_caches.c
79771 +++ b/net/atm/mpoa_caches.c
79772 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
79773 struct timeval now;
79774 struct k_message msg;
79775
79776 + pax_track_stack();
79777 +
79778 do_gettimeofday(&now);
79779
79780 write_lock_irq(&client->egress_lock);
79781 diff --git a/net/atm/proc.c b/net/atm/proc.c
79782 index ab8419a..aa91497 100644
79783 --- a/net/atm/proc.c
79784 +++ b/net/atm/proc.c
79785 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
79786 const struct k_atm_aal_stats *stats)
79787 {
79788 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
79789 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
79790 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
79791 - atomic_read(&stats->rx_drop));
79792 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
79793 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
79794 + atomic_read_unchecked(&stats->rx_drop));
79795 }
79796
79797 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
79798 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
79799 {
79800 struct sock *sk = sk_atm(vcc);
79801
79802 +#ifdef CONFIG_GRKERNSEC_HIDESYM
79803 + seq_printf(seq, "%p ", NULL);
79804 +#else
79805 seq_printf(seq, "%p ", vcc);
79806 +#endif
79807 +
79808 if (!vcc->dev)
79809 seq_printf(seq, "Unassigned ");
79810 else
79811 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
79812 {
79813 if (!vcc->dev)
79814 seq_printf(seq, sizeof(void *) == 4 ?
79815 +#ifdef CONFIG_GRKERNSEC_HIDESYM
79816 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
79817 +#else
79818 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
79819 +#endif
79820 else
79821 seq_printf(seq, "%3d %3d %5d ",
79822 vcc->dev->number, vcc->vpi, vcc->vci);
79823 diff --git a/net/atm/resources.c b/net/atm/resources.c
79824 index 56b7322..c48b84e 100644
79825 --- a/net/atm/resources.c
79826 +++ b/net/atm/resources.c
79827 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
79828 static void copy_aal_stats(struct k_atm_aal_stats *from,
79829 struct atm_aal_stats *to)
79830 {
79831 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
79832 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
79833 __AAL_STAT_ITEMS
79834 #undef __HANDLE_ITEM
79835 }
79836 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
79837 static void subtract_aal_stats(struct k_atm_aal_stats *from,
79838 struct atm_aal_stats *to)
79839 {
79840 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
79841 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
79842 __AAL_STAT_ITEMS
79843 #undef __HANDLE_ITEM
79844 }
79845 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
79846 index 8567d47..bba2292 100644
79847 --- a/net/bridge/br_private.h
79848 +++ b/net/bridge/br_private.h
79849 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
79850
79851 #ifdef CONFIG_SYSFS
79852 /* br_sysfs_if.c */
79853 -extern struct sysfs_ops brport_sysfs_ops;
79854 +extern const struct sysfs_ops brport_sysfs_ops;
79855 extern int br_sysfs_addif(struct net_bridge_port *p);
79856
79857 /* br_sysfs_br.c */
79858 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
79859 index 9a52ac5..c97538e 100644
79860 --- a/net/bridge/br_stp_if.c
79861 +++ b/net/bridge/br_stp_if.c
79862 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
79863 char *envp[] = { NULL };
79864
79865 if (br->stp_enabled == BR_USER_STP) {
79866 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
79867 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
79868 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
79869 br->dev->name, r);
79870
79871 diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
79872 index 820643a..ce77fb3 100644
79873 --- a/net/bridge/br_sysfs_if.c
79874 +++ b/net/bridge/br_sysfs_if.c
79875 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
79876 return ret;
79877 }
79878
79879 -struct sysfs_ops brport_sysfs_ops = {
79880 +const struct sysfs_ops brport_sysfs_ops = {
79881 .show = brport_show,
79882 .store = brport_store,
79883 };
79884 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
79885 index d73d47f..72df42a 100644
79886 --- a/net/bridge/netfilter/ebtables.c
79887 +++ b/net/bridge/netfilter/ebtables.c
79888 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
79889 unsigned int entries_size, nentries;
79890 char *entries;
79891
79892 + pax_track_stack();
79893 +
79894 if (cmd == EBT_SO_GET_ENTRIES) {
79895 entries_size = t->private->entries_size;
79896 nentries = t->private->nentries;
79897 diff --git a/net/can/bcm.c b/net/can/bcm.c
79898 index 2ffd2e0..72a7486 100644
79899 --- a/net/can/bcm.c
79900 +++ b/net/can/bcm.c
79901 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
79902 struct bcm_sock *bo = bcm_sk(sk);
79903 struct bcm_op *op;
79904
79905 +#ifdef CONFIG_GRKERNSEC_HIDESYM
79906 + seq_printf(m, ">>> socket %p", NULL);
79907 + seq_printf(m, " / sk %p", NULL);
79908 + seq_printf(m, " / bo %p", NULL);
79909 +#else
79910 seq_printf(m, ">>> socket %p", sk->sk_socket);
79911 seq_printf(m, " / sk %p", sk);
79912 seq_printf(m, " / bo %p", bo);
79913 +#endif
79914 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
79915 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
79916 seq_printf(m, " <<<\n");
79917 diff --git a/net/compat.c b/net/compat.c
79918 index 9559afc..ccd74e1 100644
79919 --- a/net/compat.c
79920 +++ b/net/compat.c
79921 @@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
79922 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
79923 __get_user(kmsg->msg_flags, &umsg->msg_flags))
79924 return -EFAULT;
79925 - kmsg->msg_name = compat_ptr(tmp1);
79926 - kmsg->msg_iov = compat_ptr(tmp2);
79927 - kmsg->msg_control = compat_ptr(tmp3);
79928 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
79929 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
79930 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
79931 return 0;
79932 }
79933
79934 @@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
79935 kern_msg->msg_name = NULL;
79936
79937 tot_len = iov_from_user_compat_to_kern(kern_iov,
79938 - (struct compat_iovec __user *)kern_msg->msg_iov,
79939 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
79940 kern_msg->msg_iovlen);
79941 if (tot_len >= 0)
79942 kern_msg->msg_iov = kern_iov;
79943 @@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
79944
79945 #define CMSG_COMPAT_FIRSTHDR(msg) \
79946 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
79947 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
79948 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
79949 (struct compat_cmsghdr __user *)NULL)
79950
79951 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
79952 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
79953 (ucmlen) <= (unsigned long) \
79954 ((mhdr)->msg_controllen - \
79955 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
79956 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
79957
79958 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
79959 struct compat_cmsghdr __user *cmsg, int cmsg_len)
79960 {
79961 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
79962 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
79963 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
79964 msg->msg_controllen)
79965 return NULL;
79966 return (struct compat_cmsghdr __user *)ptr;
79967 @@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
79968 {
79969 struct compat_timeval ctv;
79970 struct compat_timespec cts[3];
79971 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
79972 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
79973 struct compat_cmsghdr cmhdr;
79974 int cmlen;
79975
79976 @@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
79977
79978 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
79979 {
79980 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
79981 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
79982 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
79983 int fdnum = scm->fp->count;
79984 struct file **fp = scm->fp->fp;
79985 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
79986 len = sizeof(ktime);
79987 old_fs = get_fs();
79988 set_fs(KERNEL_DS);
79989 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
79990 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
79991 set_fs(old_fs);
79992
79993 if (!err) {
79994 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
79995 case MCAST_JOIN_GROUP:
79996 case MCAST_LEAVE_GROUP:
79997 {
79998 - struct compat_group_req __user *gr32 = (void *)optval;
79999 + struct compat_group_req __user *gr32 = (void __user *)optval;
80000 struct group_req __user *kgr =
80001 compat_alloc_user_space(sizeof(struct group_req));
80002 u32 interface;
80003 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80004 case MCAST_BLOCK_SOURCE:
80005 case MCAST_UNBLOCK_SOURCE:
80006 {
80007 - struct compat_group_source_req __user *gsr32 = (void *)optval;
80008 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
80009 struct group_source_req __user *kgsr = compat_alloc_user_space(
80010 sizeof(struct group_source_req));
80011 u32 interface;
80012 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80013 }
80014 case MCAST_MSFILTER:
80015 {
80016 - struct compat_group_filter __user *gf32 = (void *)optval;
80017 + struct compat_group_filter __user *gf32 = (void __user *)optval;
80018 struct group_filter __user *kgf;
80019 u32 interface, fmode, numsrc;
80020
80021 diff --git a/net/core/dev.c b/net/core/dev.c
80022 index 84a0705..575db4c 100644
80023 --- a/net/core/dev.c
80024 +++ b/net/core/dev.c
80025 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
80026 if (no_module && capable(CAP_NET_ADMIN))
80027 no_module = request_module("netdev-%s", name);
80028 if (no_module && capable(CAP_SYS_MODULE)) {
80029 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
80030 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
80031 +#else
80032 if (!request_module("%s", name))
80033 pr_err("Loading kernel module for a network device "
80034 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
80035 "instead\n", name);
80036 +#endif
80037 }
80038 }
80039 EXPORT_SYMBOL(dev_load);
80040 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
80041
80042 struct dev_gso_cb {
80043 void (*destructor)(struct sk_buff *skb);
80044 -};
80045 +} __no_const;
80046
80047 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
80048
80049 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
80050 }
80051 EXPORT_SYMBOL(netif_rx_ni);
80052
80053 -static void net_tx_action(struct softirq_action *h)
80054 +static void net_tx_action(void)
80055 {
80056 struct softnet_data *sd = &__get_cpu_var(softnet_data);
80057
80058 @@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
80059 EXPORT_SYMBOL(netif_napi_del);
80060
80061
80062 -static void net_rx_action(struct softirq_action *h)
80063 +static void net_rx_action(void)
80064 {
80065 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
80066 unsigned long time_limit = jiffies + 2;
80067 diff --git a/net/core/flow.c b/net/core/flow.c
80068 index 9601587..8c4824e 100644
80069 --- a/net/core/flow.c
80070 +++ b/net/core/flow.c
80071 @@ -35,11 +35,11 @@ struct flow_cache_entry {
80072 atomic_t *object_ref;
80073 };
80074
80075 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
80076 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
80077
80078 static u32 flow_hash_shift;
80079 #define flow_hash_size (1 << flow_hash_shift)
80080 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
80081 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
80082
80083 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
80084
80085 @@ -52,7 +52,7 @@ struct flow_percpu_info {
80086 u32 hash_rnd;
80087 int count;
80088 };
80089 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
80090 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
80091
80092 #define flow_hash_rnd_recalc(cpu) \
80093 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
80094 @@ -69,7 +69,7 @@ struct flow_flush_info {
80095 atomic_t cpuleft;
80096 struct completion completion;
80097 };
80098 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
80099 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
80100
80101 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
80102
80103 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
80104 if (fle->family == family &&
80105 fle->dir == dir &&
80106 flow_key_compare(key, &fle->key) == 0) {
80107 - if (fle->genid == atomic_read(&flow_cache_genid)) {
80108 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
80109 void *ret = fle->object;
80110
80111 if (ret)
80112 @@ -228,7 +228,7 @@ nocache:
80113 err = resolver(net, key, family, dir, &obj, &obj_ref);
80114
80115 if (fle && !err) {
80116 - fle->genid = atomic_read(&flow_cache_genid);
80117 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
80118
80119 if (fle->object)
80120 atomic_dec(fle->object_ref);
80121 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
80122
80123 fle = flow_table(cpu)[i];
80124 for (; fle; fle = fle->next) {
80125 - unsigned genid = atomic_read(&flow_cache_genid);
80126 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
80127
80128 if (!fle->object || fle->genid == genid)
80129 continue;
80130 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
80131 index d4fd895..ac9b1e6 100644
80132 --- a/net/core/rtnetlink.c
80133 +++ b/net/core/rtnetlink.c
80134 @@ -57,7 +57,7 @@ struct rtnl_link
80135 {
80136 rtnl_doit_func doit;
80137 rtnl_dumpit_func dumpit;
80138 -};
80139 +} __no_const;
80140
80141 static DEFINE_MUTEX(rtnl_mutex);
80142
80143 diff --git a/net/core/scm.c b/net/core/scm.c
80144 index d98eafc..1a190a9 100644
80145 --- a/net/core/scm.c
80146 +++ b/net/core/scm.c
80147 @@ -191,7 +191,7 @@ error:
80148 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80149 {
80150 struct cmsghdr __user *cm
80151 - = (__force struct cmsghdr __user *)msg->msg_control;
80152 + = (struct cmsghdr __force_user *)msg->msg_control;
80153 struct cmsghdr cmhdr;
80154 int cmlen = CMSG_LEN(len);
80155 int err;
80156 @@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80157 err = -EFAULT;
80158 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
80159 goto out;
80160 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
80161 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
80162 goto out;
80163 cmlen = CMSG_SPACE(len);
80164 if (msg->msg_controllen < cmlen)
80165 @@ -229,7 +229,7 @@ out:
80166 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
80167 {
80168 struct cmsghdr __user *cm
80169 - = (__force struct cmsghdr __user*)msg->msg_control;
80170 + = (struct cmsghdr __force_user *)msg->msg_control;
80171
80172 int fdmax = 0;
80173 int fdnum = scm->fp->count;
80174 @@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
80175 if (fdnum < fdmax)
80176 fdmax = fdnum;
80177
80178 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
80179 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
80180 i++, cmfptr++)
80181 {
80182 int new_fd;
80183 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
80184 index 45329d7..626aaa6 100644
80185 --- a/net/core/secure_seq.c
80186 +++ b/net/core/secure_seq.c
80187 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
80188 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
80189
80190 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
80191 - __be16 dport)
80192 + __be16 dport)
80193 {
80194 u32 secret[MD5_MESSAGE_BYTES / 4];
80195 u32 hash[MD5_DIGEST_WORDS];
80196 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
80197 secret[i] = net_secret[i];
80198
80199 md5_transform(hash, secret);
80200 -
80201 return hash[0];
80202 }
80203 #endif
80204 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
80205 index a807f8c..65f906f 100644
80206 --- a/net/core/skbuff.c
80207 +++ b/net/core/skbuff.c
80208 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
80209 struct sk_buff *frag_iter;
80210 struct sock *sk = skb->sk;
80211
80212 + pax_track_stack();
80213 +
80214 /*
80215 * __skb_splice_bits() only fails if the output has no room left,
80216 * so no point in going over the frag_list for the error case.
80217 diff --git a/net/core/sock.c b/net/core/sock.c
80218 index 6605e75..3acebda 100644
80219 --- a/net/core/sock.c
80220 +++ b/net/core/sock.c
80221 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
80222 break;
80223
80224 case SO_PEERCRED:
80225 + {
80226 + struct ucred peercred;
80227 if (len > sizeof(sk->sk_peercred))
80228 len = sizeof(sk->sk_peercred);
80229 - if (copy_to_user(optval, &sk->sk_peercred, len))
80230 + peercred = sk->sk_peercred;
80231 + if (copy_to_user(optval, &peercred, len))
80232 return -EFAULT;
80233 goto lenout;
80234 + }
80235
80236 case SO_PEERNAME:
80237 {
80238 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
80239 */
80240 smp_wmb();
80241 atomic_set(&sk->sk_refcnt, 1);
80242 - atomic_set(&sk->sk_drops, 0);
80243 + atomic_set_unchecked(&sk->sk_drops, 0);
80244 }
80245 EXPORT_SYMBOL(sock_init_data);
80246
80247 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
80248 index 2036568..c55883d 100644
80249 --- a/net/decnet/sysctl_net_decnet.c
80250 +++ b/net/decnet/sysctl_net_decnet.c
80251 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
80252
80253 if (len > *lenp) len = *lenp;
80254
80255 - if (copy_to_user(buffer, addr, len))
80256 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
80257 return -EFAULT;
80258
80259 *lenp = len;
80260 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
80261
80262 if (len > *lenp) len = *lenp;
80263
80264 - if (copy_to_user(buffer, devname, len))
80265 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
80266 return -EFAULT;
80267
80268 *lenp = len;
80269 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
80270 index 39a2d29..f39c0fe 100644
80271 --- a/net/econet/Kconfig
80272 +++ b/net/econet/Kconfig
80273 @@ -4,7 +4,7 @@
80274
80275 config ECONET
80276 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
80277 - depends on EXPERIMENTAL && INET
80278 + depends on EXPERIMENTAL && INET && BROKEN
80279 ---help---
80280 Econet is a fairly old and slow networking protocol mainly used by
80281 Acorn computers to access file and print servers. It uses native
80282 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
80283 index a413b1b..380849c 100644
80284 --- a/net/ieee802154/dgram.c
80285 +++ b/net/ieee802154/dgram.c
80286 @@ -318,7 +318,7 @@ out:
80287 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
80288 {
80289 if (sock_queue_rcv_skb(sk, skb) < 0) {
80290 - atomic_inc(&sk->sk_drops);
80291 + atomic_inc_unchecked(&sk->sk_drops);
80292 kfree_skb(skb);
80293 return NET_RX_DROP;
80294 }
80295 diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
80296 index 30e74ee..bfc6ee0 100644
80297 --- a/net/ieee802154/raw.c
80298 +++ b/net/ieee802154/raw.c
80299 @@ -206,7 +206,7 @@ out:
80300 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
80301 {
80302 if (sock_queue_rcv_skb(sk, skb) < 0) {
80303 - atomic_inc(&sk->sk_drops);
80304 + atomic_inc_unchecked(&sk->sk_drops);
80305 kfree_skb(skb);
80306 return NET_RX_DROP;
80307 }
80308 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
80309 index dba56d2..acee5d6 100644
80310 --- a/net/ipv4/inet_diag.c
80311 +++ b/net/ipv4/inet_diag.c
80312 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
80313 r->idiag_retrans = 0;
80314
80315 r->id.idiag_if = sk->sk_bound_dev_if;
80316 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80317 + r->id.idiag_cookie[0] = 0;
80318 + r->id.idiag_cookie[1] = 0;
80319 +#else
80320 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
80321 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
80322 +#endif
80323
80324 r->id.idiag_sport = inet->sport;
80325 r->id.idiag_dport = inet->dport;
80326 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
80327 r->idiag_family = tw->tw_family;
80328 r->idiag_retrans = 0;
80329 r->id.idiag_if = tw->tw_bound_dev_if;
80330 +
80331 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80332 + r->id.idiag_cookie[0] = 0;
80333 + r->id.idiag_cookie[1] = 0;
80334 +#else
80335 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
80336 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
80337 +#endif
80338 +
80339 r->id.idiag_sport = tw->tw_sport;
80340 r->id.idiag_dport = tw->tw_dport;
80341 r->id.idiag_src[0] = tw->tw_rcv_saddr;
80342 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
80343 if (sk == NULL)
80344 goto unlock;
80345
80346 +#ifndef CONFIG_GRKERNSEC_HIDESYM
80347 err = -ESTALE;
80348 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
80349 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
80350 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
80351 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
80352 goto out;
80353 +#endif
80354
80355 err = -ENOMEM;
80356 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
80357 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
80358 r->idiag_retrans = req->retrans;
80359
80360 r->id.idiag_if = sk->sk_bound_dev_if;
80361 +
80362 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80363 + r->id.idiag_cookie[0] = 0;
80364 + r->id.idiag_cookie[1] = 0;
80365 +#else
80366 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
80367 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
80368 +#endif
80369
80370 tmo = req->expires - jiffies;
80371 if (tmo < 0)
80372 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
80373 index d717267..56de7e7 100644
80374 --- a/net/ipv4/inet_hashtables.c
80375 +++ b/net/ipv4/inet_hashtables.c
80376 @@ -18,12 +18,15 @@
80377 #include <linux/sched.h>
80378 #include <linux/slab.h>
80379 #include <linux/wait.h>
80380 +#include <linux/security.h>
80381
80382 #include <net/inet_connection_sock.h>
80383 #include <net/inet_hashtables.h>
80384 #include <net/secure_seq.h>
80385 #include <net/ip.h>
80386
80387 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
80388 +
80389 /*
80390 * Allocate and initialize a new local port bind bucket.
80391 * The bindhash mutex for snum's hash chain must be held here.
80392 @@ -491,6 +494,8 @@ ok:
80393 }
80394 spin_unlock(&head->lock);
80395
80396 + gr_update_task_in_ip_table(current, inet_sk(sk));
80397 +
80398 if (tw) {
80399 inet_twsk_deschedule(tw, death_row);
80400 inet_twsk_put(tw);
80401 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
80402 index 13b229f..6956484 100644
80403 --- a/net/ipv4/inetpeer.c
80404 +++ b/net/ipv4/inetpeer.c
80405 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
80406 struct inet_peer *p, *n;
80407 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
80408
80409 + pax_track_stack();
80410 +
80411 /* Look up for the address quickly. */
80412 read_lock_bh(&peer_pool_lock);
80413 p = lookup(daddr, NULL);
80414 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
80415 return NULL;
80416 n->v4daddr = daddr;
80417 atomic_set(&n->refcnt, 1);
80418 - atomic_set(&n->rid, 0);
80419 + atomic_set_unchecked(&n->rid, 0);
80420 n->ip_id_count = secure_ip_id(daddr);
80421 n->tcp_ts_stamp = 0;
80422
80423 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
80424 index d3fe10b..feeafc9 100644
80425 --- a/net/ipv4/ip_fragment.c
80426 +++ b/net/ipv4/ip_fragment.c
80427 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
80428 return 0;
80429
80430 start = qp->rid;
80431 - end = atomic_inc_return(&peer->rid);
80432 + end = atomic_inc_return_unchecked(&peer->rid);
80433 qp->rid = end;
80434
80435 rc = qp->q.fragments && (end - start) > max;
80436 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
80437 index e982b5c..f079d75 100644
80438 --- a/net/ipv4/ip_sockglue.c
80439 +++ b/net/ipv4/ip_sockglue.c
80440 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
80441 int val;
80442 int len;
80443
80444 + pax_track_stack();
80445 +
80446 if (level != SOL_IP)
80447 return -EOPNOTSUPP;
80448
80449 @@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
80450 if (sk->sk_type != SOCK_STREAM)
80451 return -ENOPROTOOPT;
80452
80453 - msg.msg_control = optval;
80454 + msg.msg_control = (void __force_kernel *)optval;
80455 msg.msg_controllen = len;
80456 msg.msg_flags = 0;
80457
80458 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
80459 index f8d04c2..c1188f2 100644
80460 --- a/net/ipv4/ipconfig.c
80461 +++ b/net/ipv4/ipconfig.c
80462 @@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
80463
80464 mm_segment_t oldfs = get_fs();
80465 set_fs(get_ds());
80466 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
80467 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
80468 set_fs(oldfs);
80469 return res;
80470 }
80471 @@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
80472
80473 mm_segment_t oldfs = get_fs();
80474 set_fs(get_ds());
80475 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
80476 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
80477 set_fs(oldfs);
80478 return res;
80479 }
80480 @@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
80481
80482 mm_segment_t oldfs = get_fs();
80483 set_fs(get_ds());
80484 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
80485 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
80486 set_fs(oldfs);
80487 return res;
80488 }
80489 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
80490 index c8b0cc3..4da5ae2 100644
80491 --- a/net/ipv4/netfilter/arp_tables.c
80492 +++ b/net/ipv4/netfilter/arp_tables.c
80493 @@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
80494 private = &tmp;
80495 }
80496 #endif
80497 + memset(&info, 0, sizeof(info));
80498 info.valid_hooks = t->valid_hooks;
80499 memcpy(info.hook_entry, private->hook_entry,
80500 sizeof(info.hook_entry));
80501 diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
80502 index c156db2..e772975 100644
80503 --- a/net/ipv4/netfilter/ip_queue.c
80504 +++ b/net/ipv4/netfilter/ip_queue.c
80505 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
80506
80507 if (v->data_len < sizeof(*user_iph))
80508 return 0;
80509 + if (v->data_len > 65535)
80510 + return -EMSGSIZE;
80511 +
80512 diff = v->data_len - e->skb->len;
80513 if (diff < 0) {
80514 if (pskb_trim(e->skb, v->data_len))
80515 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
80516 static inline void
80517 __ipq_rcv_skb(struct sk_buff *skb)
80518 {
80519 - int status, type, pid, flags, nlmsglen, skblen;
80520 + int status, type, pid, flags;
80521 + unsigned int nlmsglen, skblen;
80522 struct nlmsghdr *nlh;
80523
80524 skblen = skb->len;
80525 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
80526 index 0606db1..02e7e4c 100644
80527 --- a/net/ipv4/netfilter/ip_tables.c
80528 +++ b/net/ipv4/netfilter/ip_tables.c
80529 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
80530 private = &tmp;
80531 }
80532 #endif
80533 + memset(&info, 0, sizeof(info));
80534 info.valid_hooks = t->valid_hooks;
80535 memcpy(info.hook_entry, private->hook_entry,
80536 sizeof(info.hook_entry));
80537 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
80538 index d9521f6..3c3eb25 100644
80539 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
80540 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
80541 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
80542
80543 *len = 0;
80544
80545 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
80546 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
80547 if (*octets == NULL) {
80548 if (net_ratelimit())
80549 printk("OOM in bsalg (%d)\n", __LINE__);
80550 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
80551 index ab996f9..3da5f96 100644
80552 --- a/net/ipv4/raw.c
80553 +++ b/net/ipv4/raw.c
80554 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
80555 /* Charge it to the socket. */
80556
80557 if (sock_queue_rcv_skb(sk, skb) < 0) {
80558 - atomic_inc(&sk->sk_drops);
80559 + atomic_inc_unchecked(&sk->sk_drops);
80560 kfree_skb(skb);
80561 return NET_RX_DROP;
80562 }
80563 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
80564 int raw_rcv(struct sock *sk, struct sk_buff *skb)
80565 {
80566 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
80567 - atomic_inc(&sk->sk_drops);
80568 + atomic_inc_unchecked(&sk->sk_drops);
80569 kfree_skb(skb);
80570 return NET_RX_DROP;
80571 }
80572 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
80573
80574 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
80575 {
80576 + struct icmp_filter filter;
80577 +
80578 + if (optlen < 0)
80579 + return -EINVAL;
80580 if (optlen > sizeof(struct icmp_filter))
80581 optlen = sizeof(struct icmp_filter);
80582 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
80583 + if (copy_from_user(&filter, optval, optlen))
80584 return -EFAULT;
80585 + raw_sk(sk)->filter = filter;
80586 +
80587 return 0;
80588 }
80589
80590 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
80591 {
80592 int len, ret = -EFAULT;
80593 + struct icmp_filter filter;
80594
80595 if (get_user(len, optlen))
80596 goto out;
80597 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
80598 if (len > sizeof(struct icmp_filter))
80599 len = sizeof(struct icmp_filter);
80600 ret = -EFAULT;
80601 - if (put_user(len, optlen) ||
80602 - copy_to_user(optval, &raw_sk(sk)->filter, len))
80603 + filter = raw_sk(sk)->filter;
80604 + if (put_user(len, optlen) || len > sizeof filter ||
80605 + copy_to_user(optval, &filter, len))
80606 goto out;
80607 ret = 0;
80608 out: return ret;
80609 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
80610 sk_wmem_alloc_get(sp),
80611 sk_rmem_alloc_get(sp),
80612 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
80613 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
80614 + atomic_read(&sp->sk_refcnt),
80615 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80616 + NULL,
80617 +#else
80618 + sp,
80619 +#endif
80620 + atomic_read_unchecked(&sp->sk_drops));
80621 }
80622
80623 static int raw_seq_show(struct seq_file *seq, void *v)
80624 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
80625 index 58f141b..b759702 100644
80626 --- a/net/ipv4/route.c
80627 +++ b/net/ipv4/route.c
80628 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
80629
80630 static inline int rt_genid(struct net *net)
80631 {
80632 - return atomic_read(&net->ipv4.rt_genid);
80633 + return atomic_read_unchecked(&net->ipv4.rt_genid);
80634 }
80635
80636 #ifdef CONFIG_PROC_FS
80637 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
80638 unsigned char shuffle;
80639
80640 get_random_bytes(&shuffle, sizeof(shuffle));
80641 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
80642 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
80643 }
80644
80645 /*
80646 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
80647
80648 static __net_init int rt_secret_timer_init(struct net *net)
80649 {
80650 - atomic_set(&net->ipv4.rt_genid,
80651 + atomic_set_unchecked(&net->ipv4.rt_genid,
80652 (int) ((num_physpages ^ (num_physpages>>8)) ^
80653 (jiffies ^ (jiffies >> 7))));
80654
80655 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
80656 index f095659..adc892a 100644
80657 --- a/net/ipv4/tcp.c
80658 +++ b/net/ipv4/tcp.c
80659 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
80660 int val;
80661 int err = 0;
80662
80663 + pax_track_stack();
80664 +
80665 /* This is a string value all the others are int's */
80666 if (optname == TCP_CONGESTION) {
80667 char name[TCP_CA_NAME_MAX];
80668 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
80669 struct tcp_sock *tp = tcp_sk(sk);
80670 int val, len;
80671
80672 + pax_track_stack();
80673 +
80674 if (get_user(len, optlen))
80675 return -EFAULT;
80676
80677 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
80678 index 6fc7961..33bad4a 100644
80679 --- a/net/ipv4/tcp_ipv4.c
80680 +++ b/net/ipv4/tcp_ipv4.c
80681 @@ -85,6 +85,9 @@
80682 int sysctl_tcp_tw_reuse __read_mostly;
80683 int sysctl_tcp_low_latency __read_mostly;
80684
80685 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80686 +extern int grsec_enable_blackhole;
80687 +#endif
80688
80689 #ifdef CONFIG_TCP_MD5SIG
80690 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
80691 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
80692 return 0;
80693
80694 reset:
80695 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80696 + if (!grsec_enable_blackhole)
80697 +#endif
80698 tcp_v4_send_reset(rsk, skb);
80699 discard:
80700 kfree_skb(skb);
80701 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
80702 TCP_SKB_CB(skb)->sacked = 0;
80703
80704 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
80705 - if (!sk)
80706 + if (!sk) {
80707 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80708 + ret = 1;
80709 +#endif
80710 goto no_tcp_socket;
80711 + }
80712
80713 process:
80714 - if (sk->sk_state == TCP_TIME_WAIT)
80715 + if (sk->sk_state == TCP_TIME_WAIT) {
80716 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80717 + ret = 2;
80718 +#endif
80719 goto do_time_wait;
80720 + }
80721
80722 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
80723 goto discard_and_relse;
80724 @@ -1651,6 +1665,10 @@ no_tcp_socket:
80725 bad_packet:
80726 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
80727 } else {
80728 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80729 + if (!grsec_enable_blackhole || (ret == 1 &&
80730 + (skb->dev->flags & IFF_LOOPBACK)))
80731 +#endif
80732 tcp_v4_send_reset(NULL, skb);
80733 }
80734
80735 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
80736 0, /* non standard timer */
80737 0, /* open_requests have no inode */
80738 atomic_read(&sk->sk_refcnt),
80739 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80740 + NULL,
80741 +#else
80742 req,
80743 +#endif
80744 len);
80745 }
80746
80747 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
80748 sock_i_uid(sk),
80749 icsk->icsk_probes_out,
80750 sock_i_ino(sk),
80751 - atomic_read(&sk->sk_refcnt), sk,
80752 + atomic_read(&sk->sk_refcnt),
80753 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80754 + NULL,
80755 +#else
80756 + sk,
80757 +#endif
80758 jiffies_to_clock_t(icsk->icsk_rto),
80759 jiffies_to_clock_t(icsk->icsk_ack.ato),
80760 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
80761 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
80762 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
80763 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
80764 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
80765 - atomic_read(&tw->tw_refcnt), tw, len);
80766 + atomic_read(&tw->tw_refcnt),
80767 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80768 + NULL,
80769 +#else
80770 + tw,
80771 +#endif
80772 + len);
80773 }
80774
80775 #define TMPSZ 150
80776 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
80777 index 4c03598..e09a8e8 100644
80778 --- a/net/ipv4/tcp_minisocks.c
80779 +++ b/net/ipv4/tcp_minisocks.c
80780 @@ -26,6 +26,10 @@
80781 #include <net/inet_common.h>
80782 #include <net/xfrm.h>
80783
80784 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80785 +extern int grsec_enable_blackhole;
80786 +#endif
80787 +
80788 #ifdef CONFIG_SYSCTL
80789 #define SYNC_INIT 0 /* let the user enable it */
80790 #else
80791 @@ -672,6 +676,10 @@ listen_overflow:
80792
80793 embryonic_reset:
80794 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
80795 +
80796 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80797 + if (!grsec_enable_blackhole)
80798 +#endif
80799 if (!(flg & TCP_FLAG_RST))
80800 req->rsk_ops->send_reset(sk, skb);
80801
80802 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
80803 index af83bdf..ec91cb2 100644
80804 --- a/net/ipv4/tcp_output.c
80805 +++ b/net/ipv4/tcp_output.c
80806 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
80807 __u8 *md5_hash_location;
80808 int mss;
80809
80810 + pax_track_stack();
80811 +
80812 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
80813 if (skb == NULL)
80814 return NULL;
80815 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
80816 index 59f5b5e..193860f 100644
80817 --- a/net/ipv4/tcp_probe.c
80818 +++ b/net/ipv4/tcp_probe.c
80819 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
80820 if (cnt + width >= len)
80821 break;
80822
80823 - if (copy_to_user(buf + cnt, tbuf, width))
80824 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
80825 return -EFAULT;
80826 cnt += width;
80827 }
80828 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
80829 index 57d5501..a9ed13a 100644
80830 --- a/net/ipv4/tcp_timer.c
80831 +++ b/net/ipv4/tcp_timer.c
80832 @@ -21,6 +21,10 @@
80833 #include <linux/module.h>
80834 #include <net/tcp.h>
80835
80836 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80837 +extern int grsec_lastack_retries;
80838 +#endif
80839 +
80840 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
80841 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
80842 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
80843 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
80844 }
80845 }
80846
80847 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80848 + if ((sk->sk_state == TCP_LAST_ACK) &&
80849 + (grsec_lastack_retries > 0) &&
80850 + (grsec_lastack_retries < retry_until))
80851 + retry_until = grsec_lastack_retries;
80852 +#endif
80853 +
80854 if (retransmits_timed_out(sk, retry_until)) {
80855 /* Has it gone just too far? */
80856 tcp_write_err(sk);
80857 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
80858 index 0ac8833..58d8c43 100644
80859 --- a/net/ipv4/udp.c
80860 +++ b/net/ipv4/udp.c
80861 @@ -86,6 +86,7 @@
80862 #include <linux/types.h>
80863 #include <linux/fcntl.h>
80864 #include <linux/module.h>
80865 +#include <linux/security.h>
80866 #include <linux/socket.h>
80867 #include <linux/sockios.h>
80868 #include <linux/igmp.h>
80869 @@ -106,6 +107,10 @@
80870 #include <net/xfrm.h>
80871 #include "udp_impl.h"
80872
80873 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80874 +extern int grsec_enable_blackhole;
80875 +#endif
80876 +
80877 struct udp_table udp_table;
80878 EXPORT_SYMBOL(udp_table);
80879
80880 @@ -371,6 +376,9 @@ found:
80881 return s;
80882 }
80883
80884 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
80885 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
80886 +
80887 /*
80888 * This routine is called by the ICMP module when it gets some
80889 * sort of error condition. If err < 0 then the socket should
80890 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
80891 dport = usin->sin_port;
80892 if (dport == 0)
80893 return -EINVAL;
80894 +
80895 + err = gr_search_udp_sendmsg(sk, usin);
80896 + if (err)
80897 + return err;
80898 } else {
80899 if (sk->sk_state != TCP_ESTABLISHED)
80900 return -EDESTADDRREQ;
80901 +
80902 + err = gr_search_udp_sendmsg(sk, NULL);
80903 + if (err)
80904 + return err;
80905 +
80906 daddr = inet->daddr;
80907 dport = inet->dport;
80908 /* Open fast path for connected socket.
80909 @@ -945,6 +962,10 @@ try_again:
80910 if (!skb)
80911 goto out;
80912
80913 + err = gr_search_udp_recvmsg(sk, skb);
80914 + if (err)
80915 + goto out_free;
80916 +
80917 ulen = skb->len - sizeof(struct udphdr);
80918 copied = len;
80919 if (copied > ulen)
80920 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
80921 if (rc == -ENOMEM) {
80922 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
80923 is_udplite);
80924 - atomic_inc(&sk->sk_drops);
80925 + atomic_inc_unchecked(&sk->sk_drops);
80926 }
80927 goto drop;
80928 }
80929 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
80930 goto csum_error;
80931
80932 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
80933 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
80934 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
80935 +#endif
80936 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
80937
80938 /*
80939 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
80940 sk_wmem_alloc_get(sp),
80941 sk_rmem_alloc_get(sp),
80942 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
80943 - atomic_read(&sp->sk_refcnt), sp,
80944 - atomic_read(&sp->sk_drops), len);
80945 + atomic_read(&sp->sk_refcnt),
80946 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80947 + NULL,
80948 +#else
80949 + sp,
80950 +#endif
80951 + atomic_read_unchecked(&sp->sk_drops), len);
80952 }
80953
80954 int udp4_seq_show(struct seq_file *seq, void *v)
80955 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
80956 index 8ac3d09..fc58c5f 100644
80957 --- a/net/ipv6/addrconf.c
80958 +++ b/net/ipv6/addrconf.c
80959 @@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
80960 p.iph.ihl = 5;
80961 p.iph.protocol = IPPROTO_IPV6;
80962 p.iph.ttl = 64;
80963 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
80964 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
80965
80966 if (ops->ndo_do_ioctl) {
80967 mm_segment_t oldfs = get_fs();
80968 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
80969 index cc4797d..7cfdfcc 100644
80970 --- a/net/ipv6/inet6_connection_sock.c
80971 +++ b/net/ipv6/inet6_connection_sock.c
80972 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
80973 #ifdef CONFIG_XFRM
80974 {
80975 struct rt6_info *rt = (struct rt6_info *)dst;
80976 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
80977 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
80978 }
80979 #endif
80980 }
80981 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
80982 #ifdef CONFIG_XFRM
80983 if (dst) {
80984 struct rt6_info *rt = (struct rt6_info *)dst;
80985 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
80986 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
80987 sk->sk_dst_cache = NULL;
80988 dst_release(dst);
80989 dst = NULL;
80990 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
80991 index 093e9b2..f72cddb 100644
80992 --- a/net/ipv6/inet6_hashtables.c
80993 +++ b/net/ipv6/inet6_hashtables.c
80994 @@ -119,7 +119,7 @@ out:
80995 }
80996 EXPORT_SYMBOL(__inet6_lookup_established);
80997
80998 -static int inline compute_score(struct sock *sk, struct net *net,
80999 +static inline int compute_score(struct sock *sk, struct net *net,
81000 const unsigned short hnum,
81001 const struct in6_addr *daddr,
81002 const int dif)
81003 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
81004 index 4f7aaf6..f7acf45 100644
81005 --- a/net/ipv6/ipv6_sockglue.c
81006 +++ b/net/ipv6/ipv6_sockglue.c
81007 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
81008 int val, valbool;
81009 int retv = -ENOPROTOOPT;
81010
81011 + pax_track_stack();
81012 +
81013 if (optval == NULL)
81014 val=0;
81015 else {
81016 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81017 int len;
81018 int val;
81019
81020 + pax_track_stack();
81021 +
81022 if (ip6_mroute_opt(optname))
81023 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
81024
81025 @@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81026 if (sk->sk_type != SOCK_STREAM)
81027 return -ENOPROTOOPT;
81028
81029 - msg.msg_control = optval;
81030 + msg.msg_control = (void __force_kernel *)optval;
81031 msg.msg_controllen = len;
81032 msg.msg_flags = 0;
81033
81034 diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
81035 index 1cf3f0c..1d4376f 100644
81036 --- a/net/ipv6/netfilter/ip6_queue.c
81037 +++ b/net/ipv6/netfilter/ip6_queue.c
81038 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81039
81040 if (v->data_len < sizeof(*user_iph))
81041 return 0;
81042 + if (v->data_len > 65535)
81043 + return -EMSGSIZE;
81044 +
81045 diff = v->data_len - e->skb->len;
81046 if (diff < 0) {
81047 if (pskb_trim(e->skb, v->data_len))
81048 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
81049 static inline void
81050 __ipq_rcv_skb(struct sk_buff *skb)
81051 {
81052 - int status, type, pid, flags, nlmsglen, skblen;
81053 + int status, type, pid, flags;
81054 + unsigned int nlmsglen, skblen;
81055 struct nlmsghdr *nlh;
81056
81057 skblen = skb->len;
81058 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
81059 index 78b5a36..7f37433 100644
81060 --- a/net/ipv6/netfilter/ip6_tables.c
81061 +++ b/net/ipv6/netfilter/ip6_tables.c
81062 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81063 private = &tmp;
81064 }
81065 #endif
81066 + memset(&info, 0, sizeof(info));
81067 info.valid_hooks = t->valid_hooks;
81068 memcpy(info.hook_entry, private->hook_entry,
81069 sizeof(info.hook_entry));
81070 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
81071 index 4f24570..b813b34 100644
81072 --- a/net/ipv6/raw.c
81073 +++ b/net/ipv6/raw.c
81074 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
81075 {
81076 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
81077 skb_checksum_complete(skb)) {
81078 - atomic_inc(&sk->sk_drops);
81079 + atomic_inc_unchecked(&sk->sk_drops);
81080 kfree_skb(skb);
81081 return NET_RX_DROP;
81082 }
81083
81084 /* Charge it to the socket. */
81085 if (sock_queue_rcv_skb(sk,skb)<0) {
81086 - atomic_inc(&sk->sk_drops);
81087 + atomic_inc_unchecked(&sk->sk_drops);
81088 kfree_skb(skb);
81089 return NET_RX_DROP;
81090 }
81091 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81092 struct raw6_sock *rp = raw6_sk(sk);
81093
81094 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
81095 - atomic_inc(&sk->sk_drops);
81096 + atomic_inc_unchecked(&sk->sk_drops);
81097 kfree_skb(skb);
81098 return NET_RX_DROP;
81099 }
81100 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81101
81102 if (inet->hdrincl) {
81103 if (skb_checksum_complete(skb)) {
81104 - atomic_inc(&sk->sk_drops);
81105 + atomic_inc_unchecked(&sk->sk_drops);
81106 kfree_skb(skb);
81107 return NET_RX_DROP;
81108 }
81109 @@ -518,7 +518,7 @@ csum_copy_err:
81110 as some normal condition.
81111 */
81112 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
81113 - atomic_inc(&sk->sk_drops);
81114 + atomic_inc_unchecked(&sk->sk_drops);
81115 goto out;
81116 }
81117
81118 @@ -600,7 +600,7 @@ out:
81119 return err;
81120 }
81121
81122 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
81123 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
81124 struct flowi *fl, struct rt6_info *rt,
81125 unsigned int flags)
81126 {
81127 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
81128 u16 proto;
81129 int err;
81130
81131 + pax_track_stack();
81132 +
81133 /* Rough check on arithmetic overflow,
81134 better check is made in ip6_append_data().
81135 */
81136 @@ -916,12 +918,17 @@ do_confirm:
81137 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
81138 char __user *optval, int optlen)
81139 {
81140 + struct icmp6_filter filter;
81141 +
81142 switch (optname) {
81143 case ICMPV6_FILTER:
81144 + if (optlen < 0)
81145 + return -EINVAL;
81146 if (optlen > sizeof(struct icmp6_filter))
81147 optlen = sizeof(struct icmp6_filter);
81148 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
81149 + if (copy_from_user(&filter, optval, optlen))
81150 return -EFAULT;
81151 + raw6_sk(sk)->filter = filter;
81152 return 0;
81153 default:
81154 return -ENOPROTOOPT;
81155 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
81156 char __user *optval, int __user *optlen)
81157 {
81158 int len;
81159 + struct icmp6_filter filter;
81160
81161 switch (optname) {
81162 case ICMPV6_FILTER:
81163 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
81164 len = sizeof(struct icmp6_filter);
81165 if (put_user(len, optlen))
81166 return -EFAULT;
81167 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
81168 + filter = raw6_sk(sk)->filter;
81169 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
81170 return -EFAULT;
81171 return 0;
81172 default:
81173 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81174 0, 0L, 0,
81175 sock_i_uid(sp), 0,
81176 sock_i_ino(sp),
81177 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81178 + atomic_read(&sp->sk_refcnt),
81179 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81180 + NULL,
81181 +#else
81182 + sp,
81183 +#endif
81184 + atomic_read_unchecked(&sp->sk_drops));
81185 }
81186
81187 static int raw6_seq_show(struct seq_file *seq, void *v)
81188 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
81189 index faae6df..d4430c1 100644
81190 --- a/net/ipv6/tcp_ipv6.c
81191 +++ b/net/ipv6/tcp_ipv6.c
81192 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
81193 }
81194 #endif
81195
81196 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81197 +extern int grsec_enable_blackhole;
81198 +#endif
81199 +
81200 static void tcp_v6_hash(struct sock *sk)
81201 {
81202 if (sk->sk_state != TCP_CLOSE) {
81203 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
81204 return 0;
81205
81206 reset:
81207 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81208 + if (!grsec_enable_blackhole)
81209 +#endif
81210 tcp_v6_send_reset(sk, skb);
81211 discard:
81212 if (opt_skb)
81213 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
81214 TCP_SKB_CB(skb)->sacked = 0;
81215
81216 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81217 - if (!sk)
81218 + if (!sk) {
81219 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81220 + ret = 1;
81221 +#endif
81222 goto no_tcp_socket;
81223 + }
81224
81225 process:
81226 - if (sk->sk_state == TCP_TIME_WAIT)
81227 + if (sk->sk_state == TCP_TIME_WAIT) {
81228 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81229 + ret = 2;
81230 +#endif
81231 goto do_time_wait;
81232 + }
81233
81234 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
81235 goto discard_and_relse;
81236 @@ -1701,6 +1716,10 @@ no_tcp_socket:
81237 bad_packet:
81238 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81239 } else {
81240 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81241 + if (!grsec_enable_blackhole || (ret == 1 &&
81242 + (skb->dev->flags & IFF_LOOPBACK)))
81243 +#endif
81244 tcp_v6_send_reset(NULL, skb);
81245 }
81246
81247 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
81248 uid,
81249 0, /* non standard timer */
81250 0, /* open_requests have no inode */
81251 - 0, req);
81252 + 0,
81253 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81254 + NULL
81255 +#else
81256 + req
81257 +#endif
81258 + );
81259 }
81260
81261 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
81262 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
81263 sock_i_uid(sp),
81264 icsk->icsk_probes_out,
81265 sock_i_ino(sp),
81266 - atomic_read(&sp->sk_refcnt), sp,
81267 + atomic_read(&sp->sk_refcnt),
81268 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81269 + NULL,
81270 +#else
81271 + sp,
81272 +#endif
81273 jiffies_to_clock_t(icsk->icsk_rto),
81274 jiffies_to_clock_t(icsk->icsk_ack.ato),
81275 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
81276 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
81277 dest->s6_addr32[2], dest->s6_addr32[3], destp,
81278 tw->tw_substate, 0, 0,
81279 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
81280 - atomic_read(&tw->tw_refcnt), tw);
81281 + atomic_read(&tw->tw_refcnt),
81282 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81283 + NULL
81284 +#else
81285 + tw
81286 +#endif
81287 + );
81288 }
81289
81290 static int tcp6_seq_show(struct seq_file *seq, void *v)
81291 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
81292 index 9cc6289..052c521 100644
81293 --- a/net/ipv6/udp.c
81294 +++ b/net/ipv6/udp.c
81295 @@ -49,6 +49,10 @@
81296 #include <linux/seq_file.h>
81297 #include "udp_impl.h"
81298
81299 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81300 +extern int grsec_enable_blackhole;
81301 +#endif
81302 +
81303 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
81304 {
81305 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
81306 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
81307 if (rc == -ENOMEM) {
81308 UDP6_INC_STATS_BH(sock_net(sk),
81309 UDP_MIB_RCVBUFERRORS, is_udplite);
81310 - atomic_inc(&sk->sk_drops);
81311 + atomic_inc_unchecked(&sk->sk_drops);
81312 }
81313 goto drop;
81314 }
81315 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
81316 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
81317 proto == IPPROTO_UDPLITE);
81318
81319 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81320 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
81321 +#endif
81322 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
81323
81324 kfree_skb(skb);
81325 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
81326 0, 0L, 0,
81327 sock_i_uid(sp), 0,
81328 sock_i_ino(sp),
81329 - atomic_read(&sp->sk_refcnt), sp,
81330 - atomic_read(&sp->sk_drops));
81331 + atomic_read(&sp->sk_refcnt),
81332 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81333 + NULL,
81334 +#else
81335 + sp,
81336 +#endif
81337 + atomic_read_unchecked(&sp->sk_drops));
81338 }
81339
81340 int udp6_seq_show(struct seq_file *seq, void *v)
81341 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
81342 index 811984d..11f59b7 100644
81343 --- a/net/irda/ircomm/ircomm_tty.c
81344 +++ b/net/irda/ircomm/ircomm_tty.c
81345 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
81346 add_wait_queue(&self->open_wait, &wait);
81347
81348 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
81349 - __FILE__,__LINE__, tty->driver->name, self->open_count );
81350 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
81351
81352 /* As far as I can see, we protect open_count - Jean II */
81353 spin_lock_irqsave(&self->spinlock, flags);
81354 if (!tty_hung_up_p(filp)) {
81355 extra_count = 1;
81356 - self->open_count--;
81357 + local_dec(&self->open_count);
81358 }
81359 spin_unlock_irqrestore(&self->spinlock, flags);
81360 - self->blocked_open++;
81361 + local_inc(&self->blocked_open);
81362
81363 while (1) {
81364 if (tty->termios->c_cflag & CBAUD) {
81365 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
81366 }
81367
81368 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
81369 - __FILE__,__LINE__, tty->driver->name, self->open_count );
81370 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
81371
81372 schedule();
81373 }
81374 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
81375 if (extra_count) {
81376 /* ++ is not atomic, so this should be protected - Jean II */
81377 spin_lock_irqsave(&self->spinlock, flags);
81378 - self->open_count++;
81379 + local_inc(&self->open_count);
81380 spin_unlock_irqrestore(&self->spinlock, flags);
81381 }
81382 - self->blocked_open--;
81383 + local_dec(&self->blocked_open);
81384
81385 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
81386 - __FILE__,__LINE__, tty->driver->name, self->open_count);
81387 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
81388
81389 if (!retval)
81390 self->flags |= ASYNC_NORMAL_ACTIVE;
81391 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
81392 }
81393 /* ++ is not atomic, so this should be protected - Jean II */
81394 spin_lock_irqsave(&self->spinlock, flags);
81395 - self->open_count++;
81396 + local_inc(&self->open_count);
81397
81398 tty->driver_data = self;
81399 self->tty = tty;
81400 spin_unlock_irqrestore(&self->spinlock, flags);
81401
81402 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
81403 - self->line, self->open_count);
81404 + self->line, local_read(&self->open_count));
81405
81406 /* Not really used by us, but lets do it anyway */
81407 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
81408 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
81409 return;
81410 }
81411
81412 - if ((tty->count == 1) && (self->open_count != 1)) {
81413 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
81414 /*
81415 * Uh, oh. tty->count is 1, which means that the tty
81416 * structure will be freed. state->count should always
81417 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
81418 */
81419 IRDA_DEBUG(0, "%s(), bad serial port count; "
81420 "tty->count is 1, state->count is %d\n", __func__ ,
81421 - self->open_count);
81422 - self->open_count = 1;
81423 + local_read(&self->open_count));
81424 + local_set(&self->open_count, 1);
81425 }
81426
81427 - if (--self->open_count < 0) {
81428 + if (local_dec_return(&self->open_count) < 0) {
81429 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
81430 - __func__, self->line, self->open_count);
81431 - self->open_count = 0;
81432 + __func__, self->line, local_read(&self->open_count));
81433 + local_set(&self->open_count, 0);
81434 }
81435 - if (self->open_count) {
81436 + if (local_read(&self->open_count)) {
81437 spin_unlock_irqrestore(&self->spinlock, flags);
81438
81439 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
81440 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
81441 tty->closing = 0;
81442 self->tty = NULL;
81443
81444 - if (self->blocked_open) {
81445 + if (local_read(&self->blocked_open)) {
81446 if (self->close_delay)
81447 schedule_timeout_interruptible(self->close_delay);
81448 wake_up_interruptible(&self->open_wait);
81449 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
81450 spin_lock_irqsave(&self->spinlock, flags);
81451 self->flags &= ~ASYNC_NORMAL_ACTIVE;
81452 self->tty = NULL;
81453 - self->open_count = 0;
81454 + local_set(&self->open_count, 0);
81455 spin_unlock_irqrestore(&self->spinlock, flags);
81456
81457 wake_up_interruptible(&self->open_wait);
81458 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
81459 seq_putc(m, '\n');
81460
81461 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
81462 - seq_printf(m, "Open count: %d\n", self->open_count);
81463 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
81464 seq_printf(m, "Max data size: %d\n", self->max_data_size);
81465 seq_printf(m, "Max header size: %d\n", self->max_header_size);
81466
81467 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
81468 index bada1b9..f325943 100644
81469 --- a/net/iucv/af_iucv.c
81470 +++ b/net/iucv/af_iucv.c
81471 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
81472
81473 write_lock_bh(&iucv_sk_list.lock);
81474
81475 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
81476 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
81477 while (__iucv_get_sock_by_name(name)) {
81478 sprintf(name, "%08x",
81479 - atomic_inc_return(&iucv_sk_list.autobind_name));
81480 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
81481 }
81482
81483 write_unlock_bh(&iucv_sk_list.lock);
81484 diff --git a/net/key/af_key.c b/net/key/af_key.c
81485 index 4e98193..439b449 100644
81486 --- a/net/key/af_key.c
81487 +++ b/net/key/af_key.c
81488 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
81489 struct xfrm_migrate m[XFRM_MAX_DEPTH];
81490 struct xfrm_kmaddress k;
81491
81492 + pax_track_stack();
81493 +
81494 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
81495 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
81496 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
81497 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
81498 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
81499 else
81500 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
81501 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81502 + NULL,
81503 +#else
81504 s,
81505 +#endif
81506 atomic_read(&s->sk_refcnt),
81507 sk_rmem_alloc_get(s),
81508 sk_wmem_alloc_get(s),
81509 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
81510 index bda96d1..c038b72 100644
81511 --- a/net/lapb/lapb_iface.c
81512 +++ b/net/lapb/lapb_iface.c
81513 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
81514 goto out;
81515
81516 lapb->dev = dev;
81517 - lapb->callbacks = *callbacks;
81518 + lapb->callbacks = callbacks;
81519
81520 __lapb_insert_cb(lapb);
81521
81522 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
81523
81524 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
81525 {
81526 - if (lapb->callbacks.connect_confirmation)
81527 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
81528 + if (lapb->callbacks->connect_confirmation)
81529 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
81530 }
81531
81532 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
81533 {
81534 - if (lapb->callbacks.connect_indication)
81535 - lapb->callbacks.connect_indication(lapb->dev, reason);
81536 + if (lapb->callbacks->connect_indication)
81537 + lapb->callbacks->connect_indication(lapb->dev, reason);
81538 }
81539
81540 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
81541 {
81542 - if (lapb->callbacks.disconnect_confirmation)
81543 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
81544 + if (lapb->callbacks->disconnect_confirmation)
81545 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
81546 }
81547
81548 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
81549 {
81550 - if (lapb->callbacks.disconnect_indication)
81551 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
81552 + if (lapb->callbacks->disconnect_indication)
81553 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
81554 }
81555
81556 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
81557 {
81558 - if (lapb->callbacks.data_indication)
81559 - return lapb->callbacks.data_indication(lapb->dev, skb);
81560 + if (lapb->callbacks->data_indication)
81561 + return lapb->callbacks->data_indication(lapb->dev, skb);
81562
81563 kfree_skb(skb);
81564 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
81565 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
81566 {
81567 int used = 0;
81568
81569 - if (lapb->callbacks.data_transmit) {
81570 - lapb->callbacks.data_transmit(lapb->dev, skb);
81571 + if (lapb->callbacks->data_transmit) {
81572 + lapb->callbacks->data_transmit(lapb->dev, skb);
81573 used = 1;
81574 }
81575
81576 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
81577 index fe2d3f8..e57f683 100644
81578 --- a/net/mac80211/cfg.c
81579 +++ b/net/mac80211/cfg.c
81580 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
81581 return err;
81582 }
81583
81584 -struct cfg80211_ops mac80211_config_ops = {
81585 +const struct cfg80211_ops mac80211_config_ops = {
81586 .add_virtual_intf = ieee80211_add_iface,
81587 .del_virtual_intf = ieee80211_del_iface,
81588 .change_virtual_intf = ieee80211_change_iface,
81589 diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
81590 index 7d7879f..2d51f62 100644
81591 --- a/net/mac80211/cfg.h
81592 +++ b/net/mac80211/cfg.h
81593 @@ -4,6 +4,6 @@
81594 #ifndef __CFG_H
81595 #define __CFG_H
81596
81597 -extern struct cfg80211_ops mac80211_config_ops;
81598 +extern const struct cfg80211_ops mac80211_config_ops;
81599
81600 #endif /* __CFG_H */
81601 diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
81602 index 99c7525..9cb4937 100644
81603 --- a/net/mac80211/debugfs_key.c
81604 +++ b/net/mac80211/debugfs_key.c
81605 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
81606 size_t count, loff_t *ppos)
81607 {
81608 struct ieee80211_key *key = file->private_data;
81609 - int i, res, bufsize = 2 * key->conf.keylen + 2;
81610 + int i, bufsize = 2 * key->conf.keylen + 2;
81611 char *buf = kmalloc(bufsize, GFP_KERNEL);
81612 char *p = buf;
81613 + ssize_t res;
81614 +
81615 + if (buf == NULL)
81616 + return -ENOMEM;
81617
81618 for (i = 0; i < key->conf.keylen; i++)
81619 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
81620 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
81621 index 33a2e89..08650c8 100644
81622 --- a/net/mac80211/debugfs_sta.c
81623 +++ b/net/mac80211/debugfs_sta.c
81624 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
81625 int i;
81626 struct sta_info *sta = file->private_data;
81627
81628 + pax_track_stack();
81629 +
81630 spin_lock_bh(&sta->lock);
81631 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
81632 sta->ampdu_mlme.dialog_token_allocator + 1);
81633 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
81634 index ca62bfe..6657a03 100644
81635 --- a/net/mac80211/ieee80211_i.h
81636 +++ b/net/mac80211/ieee80211_i.h
81637 @@ -25,6 +25,7 @@
81638 #include <linux/etherdevice.h>
81639 #include <net/cfg80211.h>
81640 #include <net/mac80211.h>
81641 +#include <asm/local.h>
81642 #include "key.h"
81643 #include "sta_info.h"
81644
81645 @@ -635,7 +636,7 @@ struct ieee80211_local {
81646 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
81647 spinlock_t queue_stop_reason_lock;
81648
81649 - int open_count;
81650 + local_t open_count;
81651 int monitors, cooked_mntrs;
81652 /* number of interfaces with corresponding FIF_ flags */
81653 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
81654 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
81655 index 079c500..eb3c6d4 100644
81656 --- a/net/mac80211/iface.c
81657 +++ b/net/mac80211/iface.c
81658 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
81659 break;
81660 }
81661
81662 - if (local->open_count == 0) {
81663 + if (local_read(&local->open_count) == 0) {
81664 res = drv_start(local);
81665 if (res)
81666 goto err_del_bss;
81667 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
81668 * Validate the MAC address for this device.
81669 */
81670 if (!is_valid_ether_addr(dev->dev_addr)) {
81671 - if (!local->open_count)
81672 + if (!local_read(&local->open_count))
81673 drv_stop(local);
81674 return -EADDRNOTAVAIL;
81675 }
81676 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
81677
81678 hw_reconf_flags |= __ieee80211_recalc_idle(local);
81679
81680 - local->open_count++;
81681 + local_inc(&local->open_count);
81682 if (hw_reconf_flags) {
81683 ieee80211_hw_config(local, hw_reconf_flags);
81684 /*
81685 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
81686 err_del_interface:
81687 drv_remove_interface(local, &conf);
81688 err_stop:
81689 - if (!local->open_count)
81690 + if (!local_read(&local->open_count))
81691 drv_stop(local);
81692 err_del_bss:
81693 sdata->bss = NULL;
81694 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
81695 WARN_ON(!list_empty(&sdata->u.ap.vlans));
81696 }
81697
81698 - local->open_count--;
81699 + local_dec(&local->open_count);
81700
81701 switch (sdata->vif.type) {
81702 case NL80211_IFTYPE_AP_VLAN:
81703 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
81704
81705 ieee80211_recalc_ps(local, -1);
81706
81707 - if (local->open_count == 0) {
81708 + if (local_read(&local->open_count) == 0) {
81709 ieee80211_clear_tx_pending(local);
81710 ieee80211_stop_device(local);
81711
81712 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
81713 index 2dfe176..74e4388 100644
81714 --- a/net/mac80211/main.c
81715 +++ b/net/mac80211/main.c
81716 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
81717 local->hw.conf.power_level = power;
81718 }
81719
81720 - if (changed && local->open_count) {
81721 + if (changed && local_read(&local->open_count)) {
81722 ret = drv_config(local, changed);
81723 /*
81724 * Goal:
81725 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
81726 index e67eea7..fcc227e 100644
81727 --- a/net/mac80211/mlme.c
81728 +++ b/net/mac80211/mlme.c
81729 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
81730 bool have_higher_than_11mbit = false, newsta = false;
81731 u16 ap_ht_cap_flags;
81732
81733 + pax_track_stack();
81734 +
81735 /*
81736 * AssocResp and ReassocResp have identical structure, so process both
81737 * of them in this function.
81738 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
81739 index e535f1c..4d733d1 100644
81740 --- a/net/mac80211/pm.c
81741 +++ b/net/mac80211/pm.c
81742 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
81743 }
81744
81745 /* stop hardware - this must stop RX */
81746 - if (local->open_count)
81747 + if (local_read(&local->open_count))
81748 ieee80211_stop_device(local);
81749
81750 local->suspended = true;
81751 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
81752 index b33efc4..0a2efb6 100644
81753 --- a/net/mac80211/rate.c
81754 +++ b/net/mac80211/rate.c
81755 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
81756 struct rate_control_ref *ref, *old;
81757
81758 ASSERT_RTNL();
81759 - if (local->open_count)
81760 + if (local_read(&local->open_count))
81761 return -EBUSY;
81762
81763 ref = rate_control_alloc(name, local);
81764 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
81765 index b1d7904..57e4da7 100644
81766 --- a/net/mac80211/tx.c
81767 +++ b/net/mac80211/tx.c
81768 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
81769 return cpu_to_le16(dur);
81770 }
81771
81772 -static int inline is_ieee80211_device(struct ieee80211_local *local,
81773 +static inline int is_ieee80211_device(struct ieee80211_local *local,
81774 struct net_device *dev)
81775 {
81776 return local == wdev_priv(dev->ieee80211_ptr);
81777 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
81778 index 31b1085..48fb26d 100644
81779 --- a/net/mac80211/util.c
81780 +++ b/net/mac80211/util.c
81781 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
81782 local->resuming = true;
81783
81784 /* restart hardware */
81785 - if (local->open_count) {
81786 + if (local_read(&local->open_count)) {
81787 /*
81788 * Upon resume hardware can sometimes be goofy due to
81789 * various platform / driver / bus issues, so restarting
81790 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
81791 index 634d14a..b35a608 100644
81792 --- a/net/netfilter/Kconfig
81793 +++ b/net/netfilter/Kconfig
81794 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
81795
81796 To compile it as a module, choose M here. If unsure, say N.
81797
81798 +config NETFILTER_XT_MATCH_GRADM
81799 + tristate '"gradm" match support'
81800 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
81801 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
81802 + ---help---
81803 + The gradm match allows to match on grsecurity RBAC being enabled.
81804 + It is useful when iptables rules are applied early on bootup to
81805 + prevent connections to the machine (except from a trusted host)
81806 + while the RBAC system is disabled.
81807 +
81808 config NETFILTER_XT_MATCH_HASHLIMIT
81809 tristate '"hashlimit" match support'
81810 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
81811 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
81812 index 49f62ee..a17b2c6 100644
81813 --- a/net/netfilter/Makefile
81814 +++ b/net/netfilter/Makefile
81815 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
81816 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
81817 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
81818 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
81819 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
81820 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
81821 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
81822 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
81823 diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
81824 index 3c7e427..724043c 100644
81825 --- a/net/netfilter/ipvs/ip_vs_app.c
81826 +++ b/net/netfilter/ipvs/ip_vs_app.c
81827 @@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
81828 .open = ip_vs_app_open,
81829 .read = seq_read,
81830 .llseek = seq_lseek,
81831 - .release = seq_release,
81832 + .release = seq_release_net,
81833 };
81834 #endif
81835
81836 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
81837 index 95682e5..457dbac 100644
81838 --- a/net/netfilter/ipvs/ip_vs_conn.c
81839 +++ b/net/netfilter/ipvs/ip_vs_conn.c
81840 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
81841 /* if the connection is not template and is created
81842 * by sync, preserve the activity flag.
81843 */
81844 - cp->flags |= atomic_read(&dest->conn_flags) &
81845 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
81846 (~IP_VS_CONN_F_INACTIVE);
81847 else
81848 - cp->flags |= atomic_read(&dest->conn_flags);
81849 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
81850 cp->dest = dest;
81851
81852 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
81853 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
81854 atomic_set(&cp->refcnt, 1);
81855
81856 atomic_set(&cp->n_control, 0);
81857 - atomic_set(&cp->in_pkts, 0);
81858 + atomic_set_unchecked(&cp->in_pkts, 0);
81859
81860 atomic_inc(&ip_vs_conn_count);
81861 if (flags & IP_VS_CONN_F_NO_CPORT)
81862 @@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
81863 .open = ip_vs_conn_open,
81864 .read = seq_read,
81865 .llseek = seq_lseek,
81866 - .release = seq_release,
81867 + .release = seq_release_net,
81868 };
81869
81870 static const char *ip_vs_origin_name(unsigned flags)
81871 @@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
81872 .open = ip_vs_conn_sync_open,
81873 .read = seq_read,
81874 .llseek = seq_lseek,
81875 - .release = seq_release,
81876 + .release = seq_release_net,
81877 };
81878
81879 #endif
81880 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
81881
81882 /* Don't drop the entry if its number of incoming packets is not
81883 located in [0, 8] */
81884 - i = atomic_read(&cp->in_pkts);
81885 + i = atomic_read_unchecked(&cp->in_pkts);
81886 if (i > 8 || i < 0) return 0;
81887
81888 if (!todrop_rate[i]) return 0;
81889 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
81890 index b95699f..5fee919 100644
81891 --- a/net/netfilter/ipvs/ip_vs_core.c
81892 +++ b/net/netfilter/ipvs/ip_vs_core.c
81893 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
81894 ret = cp->packet_xmit(skb, cp, pp);
81895 /* do not touch skb anymore */
81896
81897 - atomic_inc(&cp->in_pkts);
81898 + atomic_inc_unchecked(&cp->in_pkts);
81899 ip_vs_conn_put(cp);
81900 return ret;
81901 }
81902 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
81903 * Sync connection if it is about to close to
81904 * encorage the standby servers to update the connections timeout
81905 */
81906 - pkts = atomic_add_return(1, &cp->in_pkts);
81907 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
81908 if (af == AF_INET &&
81909 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
81910 (((cp->protocol != IPPROTO_TCP ||
81911 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
81912 index 02b2610..2d89424 100644
81913 --- a/net/netfilter/ipvs/ip_vs_ctl.c
81914 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
81915 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
81916 ip_vs_rs_hash(dest);
81917 write_unlock_bh(&__ip_vs_rs_lock);
81918 }
81919 - atomic_set(&dest->conn_flags, conn_flags);
81920 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
81921
81922 /* bind the service */
81923 if (!dest->svc) {
81924 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
81925 " %-7s %-6d %-10d %-10d\n",
81926 &dest->addr.in6,
81927 ntohs(dest->port),
81928 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
81929 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
81930 atomic_read(&dest->weight),
81931 atomic_read(&dest->activeconns),
81932 atomic_read(&dest->inactconns));
81933 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
81934 "%-7s %-6d %-10d %-10d\n",
81935 ntohl(dest->addr.ip),
81936 ntohs(dest->port),
81937 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
81938 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
81939 atomic_read(&dest->weight),
81940 atomic_read(&dest->activeconns),
81941 atomic_read(&dest->inactconns));
81942 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
81943 .open = ip_vs_info_open,
81944 .read = seq_read,
81945 .llseek = seq_lseek,
81946 - .release = seq_release_private,
81947 + .release = seq_release_net,
81948 };
81949
81950 #endif
81951 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
81952 .open = ip_vs_stats_seq_open,
81953 .read = seq_read,
81954 .llseek = seq_lseek,
81955 - .release = single_release,
81956 + .release = single_release_net,
81957 };
81958
81959 #endif
81960 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
81961
81962 entry.addr = dest->addr.ip;
81963 entry.port = dest->port;
81964 - entry.conn_flags = atomic_read(&dest->conn_flags);
81965 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
81966 entry.weight = atomic_read(&dest->weight);
81967 entry.u_threshold = dest->u_threshold;
81968 entry.l_threshold = dest->l_threshold;
81969 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
81970 unsigned char arg[128];
81971 int ret = 0;
81972
81973 + pax_track_stack();
81974 +
81975 if (!capable(CAP_NET_ADMIN))
81976 return -EPERM;
81977
81978 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
81979 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
81980
81981 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
81982 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
81983 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
81984 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
81985 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
81986 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
81987 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
81988 index e177f0d..55e8581 100644
81989 --- a/net/netfilter/ipvs/ip_vs_sync.c
81990 +++ b/net/netfilter/ipvs/ip_vs_sync.c
81991 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
81992
81993 if (opt)
81994 memcpy(&cp->in_seq, opt, sizeof(*opt));
81995 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
81996 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
81997 cp->state = state;
81998 cp->old_state = cp->state;
81999 /*
82000 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
82001 index 30b3189..e2e4b55 100644
82002 --- a/net/netfilter/ipvs/ip_vs_xmit.c
82003 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
82004 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
82005 else
82006 rc = NF_ACCEPT;
82007 /* do not touch skb anymore */
82008 - atomic_inc(&cp->in_pkts);
82009 + atomic_inc_unchecked(&cp->in_pkts);
82010 goto out;
82011 }
82012
82013 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
82014 else
82015 rc = NF_ACCEPT;
82016 /* do not touch skb anymore */
82017 - atomic_inc(&cp->in_pkts);
82018 + atomic_inc_unchecked(&cp->in_pkts);
82019 goto out;
82020 }
82021
82022 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
82023 index d521718..d0fd7a1 100644
82024 --- a/net/netfilter/nf_conntrack_netlink.c
82025 +++ b/net/netfilter/nf_conntrack_netlink.c
82026 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
82027 static int
82028 ctnetlink_parse_tuple(const struct nlattr * const cda[],
82029 struct nf_conntrack_tuple *tuple,
82030 - enum ctattr_tuple type, u_int8_t l3num)
82031 + enum ctattr_type type, u_int8_t l3num)
82032 {
82033 struct nlattr *tb[CTA_TUPLE_MAX+1];
82034 int err;
82035 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
82036 index f900dc3..5e45346 100644
82037 --- a/net/netfilter/nfnetlink_log.c
82038 +++ b/net/netfilter/nfnetlink_log.c
82039 @@ -68,7 +68,7 @@ struct nfulnl_instance {
82040 };
82041
82042 static DEFINE_RWLOCK(instances_lock);
82043 -static atomic_t global_seq;
82044 +static atomic_unchecked_t global_seq;
82045
82046 #define INSTANCE_BUCKETS 16
82047 static struct hlist_head instance_table[INSTANCE_BUCKETS];
82048 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
82049 /* global sequence number */
82050 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
82051 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
82052 - htonl(atomic_inc_return(&global_seq)));
82053 + htonl(atomic_inc_return_unchecked(&global_seq)));
82054
82055 if (data_len) {
82056 struct nlattr *nla;
82057 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
82058 new file mode 100644
82059 index 0000000..b1bac76
82060 --- /dev/null
82061 +++ b/net/netfilter/xt_gradm.c
82062 @@ -0,0 +1,51 @@
82063 +/*
82064 + * gradm match for netfilter
82065 + * Copyright © Zbigniew Krzystolik, 2010
82066 + *
82067 + * This program is free software; you can redistribute it and/or modify
82068 + * it under the terms of the GNU General Public License; either version
82069 + * 2 or 3 as published by the Free Software Foundation.
82070 + */
82071 +#include <linux/module.h>
82072 +#include <linux/moduleparam.h>
82073 +#include <linux/skbuff.h>
82074 +#include <linux/netfilter/x_tables.h>
82075 +#include <linux/grsecurity.h>
82076 +#include <linux/netfilter/xt_gradm.h>
82077 +
82078 +static bool
82079 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
82080 +{
82081 + const struct xt_gradm_mtinfo *info = par->matchinfo;
82082 + bool retval = false;
82083 + if (gr_acl_is_enabled())
82084 + retval = true;
82085 + return retval ^ info->invflags;
82086 +}
82087 +
82088 +static struct xt_match gradm_mt_reg __read_mostly = {
82089 + .name = "gradm",
82090 + .revision = 0,
82091 + .family = NFPROTO_UNSPEC,
82092 + .match = gradm_mt,
82093 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
82094 + .me = THIS_MODULE,
82095 +};
82096 +
82097 +static int __init gradm_mt_init(void)
82098 +{
82099 + return xt_register_match(&gradm_mt_reg);
82100 +}
82101 +
82102 +static void __exit gradm_mt_exit(void)
82103 +{
82104 + xt_unregister_match(&gradm_mt_reg);
82105 +}
82106 +
82107 +module_init(gradm_mt_init);
82108 +module_exit(gradm_mt_exit);
82109 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
82110 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
82111 +MODULE_LICENSE("GPL");
82112 +MODULE_ALIAS("ipt_gradm");
82113 +MODULE_ALIAS("ip6t_gradm");
82114 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
82115 index 5a7dcdf..24a3578 100644
82116 --- a/net/netlink/af_netlink.c
82117 +++ b/net/netlink/af_netlink.c
82118 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock *sk)
82119 sk->sk_error_report(sk);
82120 }
82121 }
82122 - atomic_inc(&sk->sk_drops);
82123 + atomic_inc_unchecked(&sk->sk_drops);
82124 }
82125
82126 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
82127 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
82128 struct netlink_sock *nlk = nlk_sk(s);
82129
82130 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
82131 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82132 + NULL,
82133 +#else
82134 s,
82135 +#endif
82136 s->sk_protocol,
82137 nlk->pid,
82138 nlk->groups ? (u32)nlk->groups[0] : 0,
82139 sk_rmem_alloc_get(s),
82140 sk_wmem_alloc_get(s),
82141 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82142 + NULL,
82143 +#else
82144 nlk->cb,
82145 +#endif
82146 atomic_read(&s->sk_refcnt),
82147 - atomic_read(&s->sk_drops)
82148 + atomic_read_unchecked(&s->sk_drops)
82149 );
82150
82151 }
82152 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
82153 index 7a83495..ab0062f 100644
82154 --- a/net/netrom/af_netrom.c
82155 +++ b/net/netrom/af_netrom.c
82156 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
82157 struct sock *sk = sock->sk;
82158 struct nr_sock *nr = nr_sk(sk);
82159
82160 + memset(sax, 0, sizeof(*sax));
82161 lock_sock(sk);
82162 if (peer != 0) {
82163 if (sk->sk_state != TCP_ESTABLISHED) {
82164 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
82165 *uaddr_len = sizeof(struct full_sockaddr_ax25);
82166 } else {
82167 sax->fsa_ax25.sax25_family = AF_NETROM;
82168 - sax->fsa_ax25.sax25_ndigis = 0;
82169 sax->fsa_ax25.sax25_call = nr->source_addr;
82170 *uaddr_len = sizeof(struct sockaddr_ax25);
82171 }
82172 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
82173 index 35cfa79..4e78ff7 100644
82174 --- a/net/packet/af_packet.c
82175 +++ b/net/packet/af_packet.c
82176 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_file *seq, void *v)
82177
82178 seq_printf(seq,
82179 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
82180 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82181 + NULL,
82182 +#else
82183 s,
82184 +#endif
82185 atomic_read(&s->sk_refcnt),
82186 s->sk_type,
82187 ntohs(po->num),
82188 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
82189 index 519ff9d..a422a90 100644
82190 --- a/net/phonet/af_phonet.c
82191 +++ b/net/phonet/af_phonet.c
82192 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
82193 {
82194 struct phonet_protocol *pp;
82195
82196 - if (protocol >= PHONET_NPROTO)
82197 + if (protocol < 0 || protocol >= PHONET_NPROTO)
82198 return NULL;
82199
82200 spin_lock(&proto_tab_lock);
82201 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_register(int protocol,
82202 {
82203 int err = 0;
82204
82205 - if (protocol >= PHONET_NPROTO)
82206 + if (protocol < 0 || protocol >= PHONET_NPROTO)
82207 return -EINVAL;
82208
82209 err = proto_register(pp->prot, 1);
82210 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
82211 index ef5c75c..2b6c2fa 100644
82212 --- a/net/phonet/datagram.c
82213 +++ b/net/phonet/datagram.c
82214 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
82215 if (err < 0) {
82216 kfree_skb(skb);
82217 if (err == -ENOMEM)
82218 - atomic_inc(&sk->sk_drops);
82219 + atomic_inc_unchecked(&sk->sk_drops);
82220 }
82221 return err ? NET_RX_DROP : NET_RX_SUCCESS;
82222 }
82223 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
82224 index 9cdd35e..16cd850 100644
82225 --- a/net/phonet/pep.c
82226 +++ b/net/phonet/pep.c
82227 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
82228
82229 case PNS_PEP_CTRL_REQ:
82230 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
82231 - atomic_inc(&sk->sk_drops);
82232 + atomic_inc_unchecked(&sk->sk_drops);
82233 break;
82234 }
82235 __skb_pull(skb, 4);
82236 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
82237 if (!err)
82238 return 0;
82239 if (err == -ENOMEM)
82240 - atomic_inc(&sk->sk_drops);
82241 + atomic_inc_unchecked(&sk->sk_drops);
82242 break;
82243 }
82244
82245 if (pn->rx_credits == 0) {
82246 - atomic_inc(&sk->sk_drops);
82247 + atomic_inc_unchecked(&sk->sk_drops);
82248 err = -ENOBUFS;
82249 break;
82250 }
82251 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
82252 index aa5b5a9..c09b4f8 100644
82253 --- a/net/phonet/socket.c
82254 +++ b/net/phonet/socket.c
82255 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
82256 sk->sk_state,
82257 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
82258 sock_i_uid(sk), sock_i_ino(sk),
82259 - atomic_read(&sk->sk_refcnt), sk,
82260 - atomic_read(&sk->sk_drops), &len);
82261 + atomic_read(&sk->sk_refcnt),
82262 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82263 + NULL,
82264 +#else
82265 + sk,
82266 +#endif
82267 + atomic_read_unchecked(&sk->sk_drops), &len);
82268 }
82269 seq_printf(seq, "%*s\n", 127 - len, "");
82270 return 0;
82271 diff --git a/net/rds/Kconfig b/net/rds/Kconfig
82272 index ec753b3..821187c 100644
82273 --- a/net/rds/Kconfig
82274 +++ b/net/rds/Kconfig
82275 @@ -1,7 +1,7 @@
82276
82277 config RDS
82278 tristate "The RDS Protocol (EXPERIMENTAL)"
82279 - depends on INET && EXPERIMENTAL
82280 + depends on INET && EXPERIMENTAL && BROKEN
82281 ---help---
82282 The RDS (Reliable Datagram Sockets) protocol provides reliable,
82283 sequenced delivery of datagrams over Infiniband, iWARP,
82284 diff --git a/net/rds/cong.c b/net/rds/cong.c
82285 index dd2711d..1c7ed12 100644
82286 --- a/net/rds/cong.c
82287 +++ b/net/rds/cong.c
82288 @@ -77,7 +77,7 @@
82289 * finds that the saved generation number is smaller than the global generation
82290 * number, it wakes up the process.
82291 */
82292 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
82293 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
82294
82295 /*
82296 * Congestion monitoring
82297 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
82298 rdsdebug("waking map %p for %pI4\n",
82299 map, &map->m_addr);
82300 rds_stats_inc(s_cong_update_received);
82301 - atomic_inc(&rds_cong_generation);
82302 + atomic_inc_unchecked(&rds_cong_generation);
82303 if (waitqueue_active(&map->m_waitq))
82304 wake_up(&map->m_waitq);
82305 if (waitqueue_active(&rds_poll_waitq))
82306 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
82307
82308 int rds_cong_updated_since(unsigned long *recent)
82309 {
82310 - unsigned long gen = atomic_read(&rds_cong_generation);
82311 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
82312
82313 if (likely(*recent == gen))
82314 return 0;
82315 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
82316 index de4a1b1..94ec861 100644
82317 --- a/net/rds/iw_rdma.c
82318 +++ b/net/rds/iw_rdma.c
82319 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
82320 struct rdma_cm_id *pcm_id;
82321 int rc;
82322
82323 + pax_track_stack();
82324 +
82325 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
82326 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
82327
82328 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
82329 index b5198ae..8b9fb90 100644
82330 --- a/net/rds/tcp.c
82331 +++ b/net/rds/tcp.c
82332 @@ -57,7 +57,7 @@ void rds_tcp_nonagle(struct socket *sock)
82333 int val = 1;
82334
82335 set_fs(KERNEL_DS);
82336 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
82337 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
82338 sizeof(val));
82339 set_fs(oldfs);
82340 }
82341 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
82342 index ab545e0..4079b3b 100644
82343 --- a/net/rds/tcp_send.c
82344 +++ b/net/rds/tcp_send.c
82345 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
82346
82347 oldfs = get_fs();
82348 set_fs(KERNEL_DS);
82349 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
82350 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
82351 sizeof(val));
82352 set_fs(oldfs);
82353 }
82354 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
82355 index a86afce..8657bce 100644
82356 --- a/net/rxrpc/af_rxrpc.c
82357 +++ b/net/rxrpc/af_rxrpc.c
82358 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ops;
82359 __be32 rxrpc_epoch;
82360
82361 /* current debugging ID */
82362 -atomic_t rxrpc_debug_id;
82363 +atomic_unchecked_t rxrpc_debug_id;
82364
82365 /* count of skbs currently in use */
82366 atomic_t rxrpc_n_skbs;
82367 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
82368 index b4a2209..539106c 100644
82369 --- a/net/rxrpc/ar-ack.c
82370 +++ b/net/rxrpc/ar-ack.c
82371 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
82372
82373 _enter("{%d,%d,%d,%d},",
82374 call->acks_hard, call->acks_unacked,
82375 - atomic_read(&call->sequence),
82376 + atomic_read_unchecked(&call->sequence),
82377 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
82378
82379 stop = 0;
82380 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
82381
82382 /* each Tx packet has a new serial number */
82383 sp->hdr.serial =
82384 - htonl(atomic_inc_return(&call->conn->serial));
82385 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
82386
82387 hdr = (struct rxrpc_header *) txb->head;
82388 hdr->serial = sp->hdr.serial;
82389 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
82390 */
82391 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
82392 {
82393 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
82394 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
82395 }
82396
82397 /*
82398 @@ -627,7 +627,7 @@ process_further:
82399
82400 latest = ntohl(sp->hdr.serial);
82401 hard = ntohl(ack.firstPacket);
82402 - tx = atomic_read(&call->sequence);
82403 + tx = atomic_read_unchecked(&call->sequence);
82404
82405 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
82406 latest,
82407 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_struct *work)
82408 u32 abort_code = RX_PROTOCOL_ERROR;
82409 u8 *acks = NULL;
82410
82411 + pax_track_stack();
82412 +
82413 //printk("\n--------------------\n");
82414 _enter("{%d,%s,%lx} [%lu]",
82415 call->debug_id, rxrpc_call_states[call->state], call->events,
82416 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
82417 goto maybe_reschedule;
82418
82419 send_ACK_with_skew:
82420 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
82421 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
82422 ntohl(ack.serial));
82423 send_ACK:
82424 mtu = call->conn->trans->peer->if_mtu;
82425 @@ -1171,7 +1173,7 @@ send_ACK:
82426 ackinfo.rxMTU = htonl(5692);
82427 ackinfo.jumbo_max = htonl(4);
82428
82429 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
82430 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
82431 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
82432 ntohl(hdr.serial),
82433 ntohs(ack.maxSkew),
82434 @@ -1189,7 +1191,7 @@ send_ACK:
82435 send_message:
82436 _debug("send message");
82437
82438 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
82439 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
82440 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
82441 send_message_2:
82442
82443 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
82444 index bc0019f..e1b4b24 100644
82445 --- a/net/rxrpc/ar-call.c
82446 +++ b/net/rxrpc/ar-call.c
82447 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
82448 spin_lock_init(&call->lock);
82449 rwlock_init(&call->state_lock);
82450 atomic_set(&call->usage, 1);
82451 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
82452 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82453 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
82454
82455 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
82456 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
82457 index 9f1ce84..ff8d061 100644
82458 --- a/net/rxrpc/ar-connection.c
82459 +++ b/net/rxrpc/ar-connection.c
82460 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
82461 rwlock_init(&conn->lock);
82462 spin_lock_init(&conn->state_lock);
82463 atomic_set(&conn->usage, 1);
82464 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
82465 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82466 conn->avail_calls = RXRPC_MAXCALLS;
82467 conn->size_align = 4;
82468 conn->header_size = sizeof(struct rxrpc_header);
82469 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
82470 index 0505cdc..f0748ce 100644
82471 --- a/net/rxrpc/ar-connevent.c
82472 +++ b/net/rxrpc/ar-connevent.c
82473 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
82474
82475 len = iov[0].iov_len + iov[1].iov_len;
82476
82477 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
82478 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
82479 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
82480
82481 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
82482 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
82483 index f98c802..9e8488e 100644
82484 --- a/net/rxrpc/ar-input.c
82485 +++ b/net/rxrpc/ar-input.c
82486 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
82487 /* track the latest serial number on this connection for ACK packet
82488 * information */
82489 serial = ntohl(sp->hdr.serial);
82490 - hi_serial = atomic_read(&call->conn->hi_serial);
82491 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
82492 while (serial > hi_serial)
82493 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
82494 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
82495 serial);
82496
82497 /* request ACK generation for any ACK or DATA packet that requests
82498 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
82499 index 7043b29..06edcdf 100644
82500 --- a/net/rxrpc/ar-internal.h
82501 +++ b/net/rxrpc/ar-internal.h
82502 @@ -272,8 +272,8 @@ struct rxrpc_connection {
82503 int error; /* error code for local abort */
82504 int debug_id; /* debug ID for printks */
82505 unsigned call_counter; /* call ID counter */
82506 - atomic_t serial; /* packet serial number counter */
82507 - atomic_t hi_serial; /* highest serial number received */
82508 + atomic_unchecked_t serial; /* packet serial number counter */
82509 + atomic_unchecked_t hi_serial; /* highest serial number received */
82510 u8 avail_calls; /* number of calls available */
82511 u8 size_align; /* data size alignment (for security) */
82512 u8 header_size; /* rxrpc + security header size */
82513 @@ -346,7 +346,7 @@ struct rxrpc_call {
82514 spinlock_t lock;
82515 rwlock_t state_lock; /* lock for state transition */
82516 atomic_t usage;
82517 - atomic_t sequence; /* Tx data packet sequence counter */
82518 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
82519 u32 abort_code; /* local/remote abort code */
82520 enum { /* current state of call */
82521 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
82522 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
82523 */
82524 extern atomic_t rxrpc_n_skbs;
82525 extern __be32 rxrpc_epoch;
82526 -extern atomic_t rxrpc_debug_id;
82527 +extern atomic_unchecked_t rxrpc_debug_id;
82528 extern struct workqueue_struct *rxrpc_workqueue;
82529
82530 /*
82531 diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
82532 index 74697b2..10f9b77 100644
82533 --- a/net/rxrpc/ar-key.c
82534 +++ b/net/rxrpc/ar-key.c
82535 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
82536 return ret;
82537
82538 plen -= sizeof(*token);
82539 - token = kmalloc(sizeof(*token), GFP_KERNEL);
82540 + token = kzalloc(sizeof(*token), GFP_KERNEL);
82541 if (!token)
82542 return -ENOMEM;
82543
82544 - token->kad = kmalloc(plen, GFP_KERNEL);
82545 + token->kad = kzalloc(plen, GFP_KERNEL);
82546 if (!token->kad) {
82547 kfree(token);
82548 return -ENOMEM;
82549 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
82550 goto error;
82551
82552 ret = -ENOMEM;
82553 - token = kmalloc(sizeof(*token), GFP_KERNEL);
82554 + token = kzalloc(sizeof(*token), GFP_KERNEL);
82555 if (!token)
82556 goto error;
82557 - token->kad = kmalloc(plen, GFP_KERNEL);
82558 + token->kad = kzalloc(plen, GFP_KERNEL);
82559 if (!token->kad)
82560 goto error_free;
82561
82562 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
82563 index 807535f..5b7f19e 100644
82564 --- a/net/rxrpc/ar-local.c
82565 +++ b/net/rxrpc/ar-local.c
82566 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
82567 spin_lock_init(&local->lock);
82568 rwlock_init(&local->services_lock);
82569 atomic_set(&local->usage, 1);
82570 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
82571 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82572 memcpy(&local->srx, srx, sizeof(*srx));
82573 }
82574
82575 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
82576 index cc9102c..7d3888e 100644
82577 --- a/net/rxrpc/ar-output.c
82578 +++ b/net/rxrpc/ar-output.c
82579 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
82580 sp->hdr.cid = call->cid;
82581 sp->hdr.callNumber = call->call_id;
82582 sp->hdr.seq =
82583 - htonl(atomic_inc_return(&call->sequence));
82584 + htonl(atomic_inc_return_unchecked(&call->sequence));
82585 sp->hdr.serial =
82586 - htonl(atomic_inc_return(&conn->serial));
82587 + htonl(atomic_inc_return_unchecked(&conn->serial));
82588 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
82589 sp->hdr.userStatus = 0;
82590 sp->hdr.securityIndex = conn->security_ix;
82591 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
82592 index edc026c..4bd4e2d 100644
82593 --- a/net/rxrpc/ar-peer.c
82594 +++ b/net/rxrpc/ar-peer.c
82595 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
82596 INIT_LIST_HEAD(&peer->error_targets);
82597 spin_lock_init(&peer->lock);
82598 atomic_set(&peer->usage, 1);
82599 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
82600 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82601 memcpy(&peer->srx, srx, sizeof(*srx));
82602
82603 rxrpc_assess_MTU_size(peer);
82604 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
82605 index 38047f7..9f48511 100644
82606 --- a/net/rxrpc/ar-proc.c
82607 +++ b/net/rxrpc/ar-proc.c
82608 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
82609 atomic_read(&conn->usage),
82610 rxrpc_conn_states[conn->state],
82611 key_serial(conn->key),
82612 - atomic_read(&conn->serial),
82613 - atomic_read(&conn->hi_serial));
82614 + atomic_read_unchecked(&conn->serial),
82615 + atomic_read_unchecked(&conn->hi_serial));
82616
82617 return 0;
82618 }
82619 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
82620 index 0936e1a..437c640 100644
82621 --- a/net/rxrpc/ar-transport.c
82622 +++ b/net/rxrpc/ar-transport.c
82623 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
82624 spin_lock_init(&trans->client_lock);
82625 rwlock_init(&trans->conn_lock);
82626 atomic_set(&trans->usage, 1);
82627 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
82628 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82629
82630 if (peer->srx.transport.family == AF_INET) {
82631 switch (peer->srx.transport_type) {
82632 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
82633 index 713ac59..306f6ae 100644
82634 --- a/net/rxrpc/rxkad.c
82635 +++ b/net/rxrpc/rxkad.c
82636 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
82637 u16 check;
82638 int nsg;
82639
82640 + pax_track_stack();
82641 +
82642 sp = rxrpc_skb(skb);
82643
82644 _enter("");
82645 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
82646 u16 check;
82647 int nsg;
82648
82649 + pax_track_stack();
82650 +
82651 _enter("");
82652
82653 sp = rxrpc_skb(skb);
82654 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
82655
82656 len = iov[0].iov_len + iov[1].iov_len;
82657
82658 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
82659 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
82660 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
82661
82662 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
82663 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
82664
82665 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
82666
82667 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
82668 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
82669 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
82670
82671 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
82672 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
82673 index 914c419..7a16d2c 100644
82674 --- a/net/sctp/auth.c
82675 +++ b/net/sctp/auth.c
82676 @@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
82677 struct sctp_auth_bytes *key;
82678
82679 /* Verify that we are not going to overflow INT_MAX */
82680 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
82681 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
82682 return NULL;
82683
82684 /* Allocate the shared key */
82685 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
82686 index d093cbf..9fc36fc 100644
82687 --- a/net/sctp/proc.c
82688 +++ b/net/sctp/proc.c
82689 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
82690 sctp_for_each_hentry(epb, node, &head->chain) {
82691 ep = sctp_ep(epb);
82692 sk = epb->sk;
82693 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
82694 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
82695 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82696 + NULL, NULL,
82697 +#else
82698 + ep, sk,
82699 +#endif
82700 sctp_sk(sk)->type, sk->sk_state, hash,
82701 epb->bind_addr.port,
82702 sock_i_uid(sk), sock_i_ino(sk));
82703 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
82704 seq_printf(seq,
82705 "%8p %8p %-3d %-3d %-2d %-4d "
82706 "%4d %8d %8d %7d %5lu %-5d %5d ",
82707 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
82708 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82709 + NULL, NULL,
82710 +#else
82711 + assoc, sk,
82712 +#endif
82713 + sctp_sk(sk)->type, sk->sk_state,
82714 assoc->state, hash,
82715 assoc->assoc_id,
82716 assoc->sndbuf_used,
82717 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
82718 index 3a95fcb..c40fc1d 100644
82719 --- a/net/sctp/socket.c
82720 +++ b/net/sctp/socket.c
82721 @@ -5802,7 +5802,6 @@ pp_found:
82722 */
82723 int reuse = sk->sk_reuse;
82724 struct sock *sk2;
82725 - struct hlist_node *node;
82726
82727 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
82728 if (pp->fastreuse && sk->sk_reuse &&
82729 diff --git a/net/socket.c b/net/socket.c
82730 index d449812..4ac08d3c 100644
82731 --- a/net/socket.c
82732 +++ b/net/socket.c
82733 @@ -87,6 +87,7 @@
82734 #include <linux/wireless.h>
82735 #include <linux/nsproxy.h>
82736 #include <linux/magic.h>
82737 +#include <linux/in.h>
82738
82739 #include <asm/uaccess.h>
82740 #include <asm/unistd.h>
82741 @@ -97,6 +98,21 @@
82742 #include <net/sock.h>
82743 #include <linux/netfilter.h>
82744
82745 +extern void gr_attach_curr_ip(const struct sock *sk);
82746 +extern int gr_handle_sock_all(const int family, const int type,
82747 + const int protocol);
82748 +extern int gr_handle_sock_server(const struct sockaddr *sck);
82749 +extern int gr_handle_sock_server_other(const struct sock *sck);
82750 +extern int gr_handle_sock_client(const struct sockaddr *sck);
82751 +extern int gr_search_connect(struct socket * sock,
82752 + struct sockaddr_in * addr);
82753 +extern int gr_search_bind(struct socket * sock,
82754 + struct sockaddr_in * addr);
82755 +extern int gr_search_listen(struct socket * sock);
82756 +extern int gr_search_accept(struct socket * sock);
82757 +extern int gr_search_socket(const int domain, const int type,
82758 + const int protocol);
82759 +
82760 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
82761 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
82762 unsigned long nr_segs, loff_t pos);
82763 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type,
82764 mnt);
82765 }
82766
82767 -static struct vfsmount *sock_mnt __read_mostly;
82768 +struct vfsmount *sock_mnt __read_mostly;
82769
82770 static struct file_system_type sock_fs_type = {
82771 .name = "sockfs",
82772 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
82773 return -EAFNOSUPPORT;
82774 if (type < 0 || type >= SOCK_MAX)
82775 return -EINVAL;
82776 + if (protocol < 0)
82777 + return -EINVAL;
82778
82779 /* Compatibility.
82780
82781 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
82782 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
82783 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
82784
82785 + if(!gr_search_socket(family, type, protocol)) {
82786 + retval = -EACCES;
82787 + goto out;
82788 + }
82789 +
82790 + if (gr_handle_sock_all(family, type, protocol)) {
82791 + retval = -EACCES;
82792 + goto out;
82793 + }
82794 +
82795 retval = sock_create(family, type, protocol, &sock);
82796 if (retval < 0)
82797 goto out;
82798 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
82799 if (sock) {
82800 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
82801 if (err >= 0) {
82802 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
82803 + err = -EACCES;
82804 + goto error;
82805 + }
82806 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
82807 + if (err)
82808 + goto error;
82809 +
82810 err = security_socket_bind(sock,
82811 (struct sockaddr *)&address,
82812 addrlen);
82813 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
82814 (struct sockaddr *)
82815 &address, addrlen);
82816 }
82817 +error:
82818 fput_light(sock->file, fput_needed);
82819 }
82820 return err;
82821 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
82822 if ((unsigned)backlog > somaxconn)
82823 backlog = somaxconn;
82824
82825 + if (gr_handle_sock_server_other(sock->sk)) {
82826 + err = -EPERM;
82827 + goto error;
82828 + }
82829 +
82830 + err = gr_search_listen(sock);
82831 + if (err)
82832 + goto error;
82833 +
82834 err = security_socket_listen(sock, backlog);
82835 if (!err)
82836 err = sock->ops->listen(sock, backlog);
82837
82838 +error:
82839 fput_light(sock->file, fput_needed);
82840 }
82841 return err;
82842 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
82843 newsock->type = sock->type;
82844 newsock->ops = sock->ops;
82845
82846 + if (gr_handle_sock_server_other(sock->sk)) {
82847 + err = -EPERM;
82848 + sock_release(newsock);
82849 + goto out_put;
82850 + }
82851 +
82852 + err = gr_search_accept(sock);
82853 + if (err) {
82854 + sock_release(newsock);
82855 + goto out_put;
82856 + }
82857 +
82858 /*
82859 * We don't need try_module_get here, as the listening socket (sock)
82860 * has the protocol module (sock->ops->owner) held.
82861 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
82862 fd_install(newfd, newfile);
82863 err = newfd;
82864
82865 + gr_attach_curr_ip(newsock->sk);
82866 +
82867 out_put:
82868 fput_light(sock->file, fput_needed);
82869 out:
82870 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
82871 int, addrlen)
82872 {
82873 struct socket *sock;
82874 + struct sockaddr *sck;
82875 struct sockaddr_storage address;
82876 int err, fput_needed;
82877
82878 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
82879 if (err < 0)
82880 goto out_put;
82881
82882 + sck = (struct sockaddr *)&address;
82883 +
82884 + if (gr_handle_sock_client(sck)) {
82885 + err = -EACCES;
82886 + goto out_put;
82887 + }
82888 +
82889 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
82890 + if (err)
82891 + goto out_put;
82892 +
82893 err =
82894 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
82895 if (err)
82896 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
82897 int err, ctl_len, iov_size, total_len;
82898 int fput_needed;
82899
82900 + pax_track_stack();
82901 +
82902 err = -EFAULT;
82903 if (MSG_CMSG_COMPAT & flags) {
82904 if (get_compat_msghdr(&msg_sys, msg_compat))
82905 @@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
82906 * kernel msghdr to use the kernel address space)
82907 */
82908
82909 - uaddr = (__force void __user *)msg_sys.msg_name;
82910 + uaddr = (void __force_user *)msg_sys.msg_name;
82911 uaddr_len = COMPAT_NAMELEN(msg);
82912 if (MSG_CMSG_COMPAT & flags) {
82913 err = verify_compat_iovec(&msg_sys, iov,
82914 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
82915 index ac94477..8afe5c3 100644
82916 --- a/net/sunrpc/sched.c
82917 +++ b/net/sunrpc/sched.c
82918 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *word)
82919 #ifdef RPC_DEBUG
82920 static void rpc_task_set_debuginfo(struct rpc_task *task)
82921 {
82922 - static atomic_t rpc_pid;
82923 + static atomic_unchecked_t rpc_pid;
82924
82925 task->tk_magic = RPC_TASK_MAGIC_ID;
82926 - task->tk_pid = atomic_inc_return(&rpc_pid);
82927 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
82928 }
82929 #else
82930 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
82931 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
82932 index 35fb68b..236a8bf 100644
82933 --- a/net/sunrpc/xprtrdma/svc_rdma.c
82934 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
82935 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
82936 static unsigned int min_max_inline = 4096;
82937 static unsigned int max_max_inline = 65536;
82938
82939 -atomic_t rdma_stat_recv;
82940 -atomic_t rdma_stat_read;
82941 -atomic_t rdma_stat_write;
82942 -atomic_t rdma_stat_sq_starve;
82943 -atomic_t rdma_stat_rq_starve;
82944 -atomic_t rdma_stat_rq_poll;
82945 -atomic_t rdma_stat_rq_prod;
82946 -atomic_t rdma_stat_sq_poll;
82947 -atomic_t rdma_stat_sq_prod;
82948 +atomic_unchecked_t rdma_stat_recv;
82949 +atomic_unchecked_t rdma_stat_read;
82950 +atomic_unchecked_t rdma_stat_write;
82951 +atomic_unchecked_t rdma_stat_sq_starve;
82952 +atomic_unchecked_t rdma_stat_rq_starve;
82953 +atomic_unchecked_t rdma_stat_rq_poll;
82954 +atomic_unchecked_t rdma_stat_rq_prod;
82955 +atomic_unchecked_t rdma_stat_sq_poll;
82956 +atomic_unchecked_t rdma_stat_sq_prod;
82957
82958 /* Temporary NFS request map and context caches */
82959 struct kmem_cache *svc_rdma_map_cachep;
82960 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *table, int write,
82961 len -= *ppos;
82962 if (len > *lenp)
82963 len = *lenp;
82964 - if (len && copy_to_user(buffer, str_buf, len))
82965 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
82966 return -EFAULT;
82967 *lenp = len;
82968 *ppos += len;
82969 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = {
82970 {
82971 .procname = "rdma_stat_read",
82972 .data = &rdma_stat_read,
82973 - .maxlen = sizeof(atomic_t),
82974 + .maxlen = sizeof(atomic_unchecked_t),
82975 .mode = 0644,
82976 .proc_handler = &read_reset_stat,
82977 },
82978 {
82979 .procname = "rdma_stat_recv",
82980 .data = &rdma_stat_recv,
82981 - .maxlen = sizeof(atomic_t),
82982 + .maxlen = sizeof(atomic_unchecked_t),
82983 .mode = 0644,
82984 .proc_handler = &read_reset_stat,
82985 },
82986 {
82987 .procname = "rdma_stat_write",
82988 .data = &rdma_stat_write,
82989 - .maxlen = sizeof(atomic_t),
82990 + .maxlen = sizeof(atomic_unchecked_t),
82991 .mode = 0644,
82992 .proc_handler = &read_reset_stat,
82993 },
82994 {
82995 .procname = "rdma_stat_sq_starve",
82996 .data = &rdma_stat_sq_starve,
82997 - .maxlen = sizeof(atomic_t),
82998 + .maxlen = sizeof(atomic_unchecked_t),
82999 .mode = 0644,
83000 .proc_handler = &read_reset_stat,
83001 },
83002 {
83003 .procname = "rdma_stat_rq_starve",
83004 .data = &rdma_stat_rq_starve,
83005 - .maxlen = sizeof(atomic_t),
83006 + .maxlen = sizeof(atomic_unchecked_t),
83007 .mode = 0644,
83008 .proc_handler = &read_reset_stat,
83009 },
83010 {
83011 .procname = "rdma_stat_rq_poll",
83012 .data = &rdma_stat_rq_poll,
83013 - .maxlen = sizeof(atomic_t),
83014 + .maxlen = sizeof(atomic_unchecked_t),
83015 .mode = 0644,
83016 .proc_handler = &read_reset_stat,
83017 },
83018 {
83019 .procname = "rdma_stat_rq_prod",
83020 .data = &rdma_stat_rq_prod,
83021 - .maxlen = sizeof(atomic_t),
83022 + .maxlen = sizeof(atomic_unchecked_t),
83023 .mode = 0644,
83024 .proc_handler = &read_reset_stat,
83025 },
83026 {
83027 .procname = "rdma_stat_sq_poll",
83028 .data = &rdma_stat_sq_poll,
83029 - .maxlen = sizeof(atomic_t),
83030 + .maxlen = sizeof(atomic_unchecked_t),
83031 .mode = 0644,
83032 .proc_handler = &read_reset_stat,
83033 },
83034 {
83035 .procname = "rdma_stat_sq_prod",
83036 .data = &rdma_stat_sq_prod,
83037 - .maxlen = sizeof(atomic_t),
83038 + .maxlen = sizeof(atomic_unchecked_t),
83039 .mode = 0644,
83040 .proc_handler = &read_reset_stat,
83041 },
83042 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83043 index 9e88438..8ed5cf0 100644
83044 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83045 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83046 @@ -495,7 +495,7 @@ next_sge:
83047 svc_rdma_put_context(ctxt, 0);
83048 goto out;
83049 }
83050 - atomic_inc(&rdma_stat_read);
83051 + atomic_inc_unchecked(&rdma_stat_read);
83052
83053 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
83054 chl_map->ch[ch_no].count -= read_wr.num_sge;
83055 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
83056 dto_q);
83057 list_del_init(&ctxt->dto_q);
83058 } else {
83059 - atomic_inc(&rdma_stat_rq_starve);
83060 + atomic_inc_unchecked(&rdma_stat_rq_starve);
83061 clear_bit(XPT_DATA, &xprt->xpt_flags);
83062 ctxt = NULL;
83063 }
83064 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
83065 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
83066 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
83067 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
83068 - atomic_inc(&rdma_stat_recv);
83069 + atomic_inc_unchecked(&rdma_stat_recv);
83070
83071 /* Build up the XDR from the receive buffers. */
83072 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
83073 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83074 index f11be72..7aad4e8 100644
83075 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83076 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83077 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
83078 write_wr.wr.rdma.remote_addr = to;
83079
83080 /* Post It */
83081 - atomic_inc(&rdma_stat_write);
83082 + atomic_inc_unchecked(&rdma_stat_write);
83083 if (svc_rdma_send(xprt, &write_wr))
83084 goto err;
83085 return 0;
83086 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
83087 index 3fa5751..030ba89 100644
83088 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
83089 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
83090 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
83091 return;
83092
83093 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
83094 - atomic_inc(&rdma_stat_rq_poll);
83095 + atomic_inc_unchecked(&rdma_stat_rq_poll);
83096
83097 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
83098 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
83099 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
83100 }
83101
83102 if (ctxt)
83103 - atomic_inc(&rdma_stat_rq_prod);
83104 + atomic_inc_unchecked(&rdma_stat_rq_prod);
83105
83106 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
83107 /*
83108 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
83109 return;
83110
83111 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
83112 - atomic_inc(&rdma_stat_sq_poll);
83113 + atomic_inc_unchecked(&rdma_stat_sq_poll);
83114 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
83115 if (wc.status != IB_WC_SUCCESS)
83116 /* Close the transport */
83117 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
83118 }
83119
83120 if (ctxt)
83121 - atomic_inc(&rdma_stat_sq_prod);
83122 + atomic_inc_unchecked(&rdma_stat_sq_prod);
83123 }
83124
83125 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
83126 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
83127 spin_lock_bh(&xprt->sc_lock);
83128 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
83129 spin_unlock_bh(&xprt->sc_lock);
83130 - atomic_inc(&rdma_stat_sq_starve);
83131 + atomic_inc_unchecked(&rdma_stat_sq_starve);
83132
83133 /* See if we can opportunistically reap SQ WR to make room */
83134 sq_cq_reap(xprt);
83135 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
83136 index 0b15d72..7934fbb 100644
83137 --- a/net/sysctl_net.c
83138 +++ b/net/sysctl_net.c
83139 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
83140 struct ctl_table *table)
83141 {
83142 /* Allow network administrator to have same access as root. */
83143 - if (capable(CAP_NET_ADMIN)) {
83144 + if (capable_nolog(CAP_NET_ADMIN)) {
83145 int mode = (table->mode >> 6) & 7;
83146 return (mode << 6) | (mode << 3) | mode;
83147 }
83148 diff --git a/net/tipc/link.c b/net/tipc/link.c
83149 index dd4c18b..f40d38d 100644
83150 --- a/net/tipc/link.c
83151 +++ b/net/tipc/link.c
83152 @@ -1418,7 +1418,7 @@ again:
83153
83154 if (!sect_rest) {
83155 sect_rest = msg_sect[++curr_sect].iov_len;
83156 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
83157 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
83158 }
83159
83160 if (sect_rest < fragm_rest)
83161 @@ -1437,7 +1437,7 @@ error:
83162 }
83163 } else
83164 skb_copy_to_linear_data_offset(buf, fragm_crs,
83165 - sect_crs, sz);
83166 + (const void __force_kernel *)sect_crs, sz);
83167 sect_crs += sz;
83168 sect_rest -= sz;
83169 fragm_crs += sz;
83170 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
83171 index 0747d8a..e8bf3f3 100644
83172 --- a/net/tipc/subscr.c
83173 +++ b/net/tipc/subscr.c
83174 @@ -104,7 +104,7 @@ static void subscr_send_event(struct subscription *sub,
83175 {
83176 struct iovec msg_sect;
83177
83178 - msg_sect.iov_base = (void *)&sub->evt;
83179 + msg_sect.iov_base = (void __force_user *)&sub->evt;
83180 msg_sect.iov_len = sizeof(struct tipc_event);
83181
83182 sub->evt.event = htohl(event, sub->swap);
83183 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
83184 index db8d51a..608692d 100644
83185 --- a/net/unix/af_unix.c
83186 +++ b/net/unix/af_unix.c
83187 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(struct net *net,
83188 err = -ECONNREFUSED;
83189 if (!S_ISSOCK(inode->i_mode))
83190 goto put_fail;
83191 +
83192 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
83193 + err = -EACCES;
83194 + goto put_fail;
83195 + }
83196 +
83197 u = unix_find_socket_byinode(net, inode);
83198 if (!u)
83199 goto put_fail;
83200 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(struct net *net,
83201 if (u) {
83202 struct dentry *dentry;
83203 dentry = unix_sk(u)->dentry;
83204 +
83205 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
83206 + err = -EPERM;
83207 + sock_put(u);
83208 + goto fail;
83209 + }
83210 +
83211 if (dentry)
83212 touch_atime(unix_sk(u)->mnt, dentry);
83213 } else
83214 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
83215 err = security_path_mknod(&nd.path, dentry, mode, 0);
83216 if (err)
83217 goto out_mknod_drop_write;
83218 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
83219 + err = -EACCES;
83220 + goto out_mknod_drop_write;
83221 + }
83222 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
83223 out_mknod_drop_write:
83224 mnt_drop_write(nd.path.mnt);
83225 if (err)
83226 goto out_mknod_dput;
83227 +
83228 + gr_handle_create(dentry, nd.path.mnt);
83229 +
83230 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
83231 dput(nd.path.dentry);
83232 nd.path.dentry = dentry;
83233 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file *seq, void *v)
83234 unix_state_lock(s);
83235
83236 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
83237 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83238 + NULL,
83239 +#else
83240 s,
83241 +#endif
83242 atomic_read(&s->sk_refcnt),
83243 0,
83244 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
83245 diff --git a/net/wireless/core.h b/net/wireless/core.h
83246 index 376798f..109a61f 100644
83247 --- a/net/wireless/core.h
83248 +++ b/net/wireless/core.h
83249 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
83250 struct mutex mtx;
83251
83252 /* rfkill support */
83253 - struct rfkill_ops rfkill_ops;
83254 + rfkill_ops_no_const rfkill_ops;
83255 struct rfkill *rfkill;
83256 struct work_struct rfkill_sync;
83257
83258 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
83259 index a2e4c60..0979cbe 100644
83260 --- a/net/wireless/wext.c
83261 +++ b/net/wireless/wext.c
83262 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
83263 */
83264
83265 /* Support for very large requests */
83266 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
83267 - (user_length > descr->max_tokens)) {
83268 + if (user_length > descr->max_tokens) {
83269 /* Allow userspace to GET more than max so
83270 * we can support any size GET requests.
83271 * There is still a limit : -ENOMEM.
83272 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
83273 }
83274 }
83275
83276 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
83277 - /*
83278 - * If this is a GET, but not NOMAX, it means that the extra
83279 - * data is not bounded by userspace, but by max_tokens. Thus
83280 - * set the length to max_tokens. This matches the extra data
83281 - * allocation.
83282 - * The driver should fill it with the number of tokens it
83283 - * provided, and it may check iwp->length rather than having
83284 - * knowledge of max_tokens. If the driver doesn't change the
83285 - * iwp->length, this ioctl just copies back max_token tokens
83286 - * filled with zeroes. Hopefully the driver isn't claiming
83287 - * them to be valid data.
83288 - */
83289 - iwp->length = descr->max_tokens;
83290 - }
83291 -
83292 err = handler(dev, info, (union iwreq_data *) iwp, extra);
83293
83294 iwp->length += essid_compat;
83295 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
83296 index cb81ca3..e15d49a 100644
83297 --- a/net/xfrm/xfrm_policy.c
83298 +++ b/net/xfrm/xfrm_policy.c
83299 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
83300 hlist_add_head(&policy->bydst, chain);
83301 xfrm_pol_hold(policy);
83302 net->xfrm.policy_count[dir]++;
83303 - atomic_inc(&flow_cache_genid);
83304 + atomic_inc_unchecked(&flow_cache_genid);
83305 if (delpol)
83306 __xfrm_policy_unlink(delpol, dir);
83307 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
83308 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
83309 write_unlock_bh(&xfrm_policy_lock);
83310
83311 if (ret && delete) {
83312 - atomic_inc(&flow_cache_genid);
83313 + atomic_inc_unchecked(&flow_cache_genid);
83314 xfrm_policy_kill(ret);
83315 }
83316 return ret;
83317 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
83318 write_unlock_bh(&xfrm_policy_lock);
83319
83320 if (ret && delete) {
83321 - atomic_inc(&flow_cache_genid);
83322 + atomic_inc_unchecked(&flow_cache_genid);
83323 xfrm_policy_kill(ret);
83324 }
83325 return ret;
83326 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
83327 }
83328
83329 }
83330 - atomic_inc(&flow_cache_genid);
83331 + atomic_inc_unchecked(&flow_cache_genid);
83332 out:
83333 write_unlock_bh(&xfrm_policy_lock);
83334 return err;
83335 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
83336 write_unlock_bh(&xfrm_policy_lock);
83337 if (pol) {
83338 if (dir < XFRM_POLICY_MAX)
83339 - atomic_inc(&flow_cache_genid);
83340 + atomic_inc_unchecked(&flow_cache_genid);
83341 xfrm_policy_kill(pol);
83342 return 0;
83343 }
83344 @@ -1477,7 +1477,7 @@ free_dst:
83345 goto out;
83346 }
83347
83348 -static int inline
83349 +static inline int
83350 xfrm_dst_alloc_copy(void **target, void *src, int size)
83351 {
83352 if (!*target) {
83353 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
83354 return 0;
83355 }
83356
83357 -static int inline
83358 +static inline int
83359 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
83360 {
83361 #ifdef CONFIG_XFRM_SUB_POLICY
83362 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
83363 #endif
83364 }
83365
83366 -static int inline
83367 +static inline int
83368 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
83369 {
83370 #ifdef CONFIG_XFRM_SUB_POLICY
83371 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
83372 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
83373
83374 restart:
83375 - genid = atomic_read(&flow_cache_genid);
83376 + genid = atomic_read_unchecked(&flow_cache_genid);
83377 policy = NULL;
83378 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
83379 pols[pi] = NULL;
83380 @@ -1680,7 +1680,7 @@ restart:
83381 goto error;
83382 }
83383 if (nx == -EAGAIN ||
83384 - genid != atomic_read(&flow_cache_genid)) {
83385 + genid != atomic_read_unchecked(&flow_cache_genid)) {
83386 xfrm_pols_put(pols, npols);
83387 goto restart;
83388 }
83389 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
83390 index b95a2d6..85c4d78 100644
83391 --- a/net/xfrm/xfrm_user.c
83392 +++ b/net/xfrm/xfrm_user.c
83393 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
83394 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
83395 int i;
83396
83397 + pax_track_stack();
83398 +
83399 if (xp->xfrm_nr == 0)
83400 return 0;
83401
83402 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
83403 int err;
83404 int n = 0;
83405
83406 + pax_track_stack();
83407 +
83408 if (attrs[XFRMA_MIGRATE] == NULL)
83409 return -EINVAL;
83410
83411 diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
83412 index 45b7d56..19e828c 100644
83413 --- a/samples/kobject/kset-example.c
83414 +++ b/samples/kobject/kset-example.c
83415 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kobject *kobj,
83416 }
83417
83418 /* Our custom sysfs_ops that we will associate with our ktype later on */
83419 -static struct sysfs_ops foo_sysfs_ops = {
83420 +static const struct sysfs_ops foo_sysfs_ops = {
83421 .show = foo_attr_show,
83422 .store = foo_attr_store,
83423 };
83424 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
83425 index 341b589..405aed3 100644
83426 --- a/scripts/Makefile.build
83427 +++ b/scripts/Makefile.build
83428 @@ -59,7 +59,7 @@ endif
83429 endif
83430
83431 # Do not include host rules unless needed
83432 -ifneq ($(hostprogs-y)$(hostprogs-m),)
83433 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
83434 include scripts/Makefile.host
83435 endif
83436
83437 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
83438 index 6f89fbb..53adc9c 100644
83439 --- a/scripts/Makefile.clean
83440 +++ b/scripts/Makefile.clean
83441 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
83442 __clean-files := $(extra-y) $(always) \
83443 $(targets) $(clean-files) \
83444 $(host-progs) \
83445 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
83446 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
83447 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
83448
83449 # as clean-files is given relative to the current directory, this adds
83450 # a $(obj) prefix, except for absolute paths
83451 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
83452 index 1ac414f..a1c1451 100644
83453 --- a/scripts/Makefile.host
83454 +++ b/scripts/Makefile.host
83455 @@ -31,6 +31,7 @@
83456 # Note: Shared libraries consisting of C++ files are not supported
83457
83458 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
83459 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
83460
83461 # C code
83462 # Executables compiled from a single .c file
83463 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
83464 # Shared libaries (only .c supported)
83465 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
83466 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
83467 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
83468 # Remove .so files from "xxx-objs"
83469 host-cobjs := $(filter-out %.so,$(host-cobjs))
83470
83471 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
83472 index 6bf21f8..c0546b3 100644
83473 --- a/scripts/basic/fixdep.c
83474 +++ b/scripts/basic/fixdep.c
83475 @@ -162,7 +162,7 @@ static void grow_config(int len)
83476 /*
83477 * Lookup a value in the configuration string.
83478 */
83479 -static int is_defined_config(const char * name, int len)
83480 +static int is_defined_config(const char * name, unsigned int len)
83481 {
83482 const char * pconfig;
83483 const char * plast = str_config + len_config - len;
83484 @@ -199,7 +199,7 @@ static void clear_config(void)
83485 /*
83486 * Record the use of a CONFIG_* word.
83487 */
83488 -static void use_config(char *m, int slen)
83489 +static void use_config(char *m, unsigned int slen)
83490 {
83491 char s[PATH_MAX];
83492 char *p;
83493 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen)
83494
83495 static void parse_config_file(char *map, size_t len)
83496 {
83497 - int *end = (int *) (map + len);
83498 + unsigned int *end = (unsigned int *) (map + len);
83499 /* start at +1, so that p can never be < map */
83500 - int *m = (int *) map + 1;
83501 + unsigned int *m = (unsigned int *) map + 1;
83502 char *p, *q;
83503
83504 for (; m < end; m++) {
83505 @@ -371,7 +371,7 @@ static void print_deps(void)
83506 static void traps(void)
83507 {
83508 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
83509 - int *p = (int *)test;
83510 + unsigned int *p = (unsigned int *)test;
83511
83512 if (*p != INT_CONF) {
83513 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
83514 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
83515 new file mode 100644
83516 index 0000000..8729101
83517 --- /dev/null
83518 +++ b/scripts/gcc-plugin.sh
83519 @@ -0,0 +1,2 @@
83520 +#!/bin/sh
83521 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
83522 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
83523 index 62a9025..65b82ad 100644
83524 --- a/scripts/mod/file2alias.c
83525 +++ b/scripts/mod/file2alias.c
83526 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
83527 unsigned long size, unsigned long id_size,
83528 void *symval)
83529 {
83530 - int i;
83531 + unsigned int i;
83532
83533 if (size % id_size || size < id_size) {
83534 if (cross_build != 0)
83535 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
83536 /* USB is special because the bcdDevice can be matched against a numeric range */
83537 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
83538 static void do_usb_entry(struct usb_device_id *id,
83539 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
83540 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
83541 unsigned char range_lo, unsigned char range_hi,
83542 struct module *mod)
83543 {
83544 @@ -151,7 +151,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
83545 {
83546 unsigned int devlo, devhi;
83547 unsigned char chi, clo;
83548 - int ndigits;
83549 + unsigned int ndigits;
83550
83551 id->match_flags = TO_NATIVE(id->match_flags);
83552 id->idVendor = TO_NATIVE(id->idVendor);
83553 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
83554 for (i = 0; i < count; i++) {
83555 const char *id = (char *)devs[i].id;
83556 char acpi_id[sizeof(devs[0].id)];
83557 - int j;
83558 + unsigned int j;
83559
83560 buf_printf(&mod->dev_table_buf,
83561 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
83562 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
83563
83564 for (j = 0; j < PNP_MAX_DEVICES; j++) {
83565 const char *id = (char *)card->devs[j].id;
83566 - int i2, j2;
83567 + unsigned int i2, j2;
83568 int dup = 0;
83569
83570 if (!id[0])
83571 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
83572 /* add an individual alias for every device entry */
83573 if (!dup) {
83574 char acpi_id[sizeof(card->devs[0].id)];
83575 - int k;
83576 + unsigned int k;
83577
83578 buf_printf(&mod->dev_table_buf,
83579 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
83580 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, const char *s)
83581 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
83582 char *alias)
83583 {
83584 - int i, j;
83585 + unsigned int i, j;
83586
83587 sprintf(alias, "dmi*");
83588
83589 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
83590 index 03efeab..0888989 100644
83591 --- a/scripts/mod/modpost.c
83592 +++ b/scripts/mod/modpost.c
83593 @@ -835,6 +835,7 @@ enum mismatch {
83594 INIT_TO_EXIT,
83595 EXIT_TO_INIT,
83596 EXPORT_TO_INIT_EXIT,
83597 + DATA_TO_TEXT
83598 };
83599
83600 struct sectioncheck {
83601 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[] = {
83602 .fromsec = { "__ksymtab*", NULL },
83603 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
83604 .mismatch = EXPORT_TO_INIT_EXIT
83605 +},
83606 +/* Do not reference code from writable data */
83607 +{
83608 + .fromsec = { DATA_SECTIONS, NULL },
83609 + .tosec = { TEXT_SECTIONS, NULL },
83610 + .mismatch = DATA_TO_TEXT
83611 }
83612 };
83613
83614 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
83615 continue;
83616 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
83617 continue;
83618 - if (sym->st_value == addr)
83619 - return sym;
83620 /* Find a symbol nearby - addr are maybe negative */
83621 d = sym->st_value - addr;
83622 + if (d == 0)
83623 + return sym;
83624 if (d < 0)
83625 d = addr - sym->st_value;
83626 if (d < distance) {
83627 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
83628 "Fix this by removing the %sannotation of %s "
83629 "or drop the export.\n",
83630 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
83631 + case DATA_TO_TEXT:
83632 +/*
83633 + fprintf(stderr,
83634 + "The variable %s references\n"
83635 + "the %s %s%s%s\n",
83636 + fromsym, to, sec2annotation(tosec), tosym, to_p);
83637 +*/
83638 + break;
83639 case NO_MISMATCH:
83640 /* To get warnings on missing members */
83641 break;
83642 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
83643 static void check_sec_ref(struct module *mod, const char *modname,
83644 struct elf_info *elf)
83645 {
83646 - int i;
83647 + unsigned int i;
83648 Elf_Shdr *sechdrs = elf->sechdrs;
83649
83650 /* Walk through all sections */
83651 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
83652 va_end(ap);
83653 }
83654
83655 -void buf_write(struct buffer *buf, const char *s, int len)
83656 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
83657 {
83658 if (buf->size - buf->pos < len) {
83659 buf->size += len + SZ;
83660 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
83661 if (fstat(fileno(file), &st) < 0)
83662 goto close_write;
83663
83664 - if (st.st_size != b->pos)
83665 + if (st.st_size != (off_t)b->pos)
83666 goto close_write;
83667
83668 tmp = NOFAIL(malloc(b->pos));
83669 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
83670 index 09f58e3..4b66092 100644
83671 --- a/scripts/mod/modpost.h
83672 +++ b/scripts/mod/modpost.h
83673 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
83674
83675 struct buffer {
83676 char *p;
83677 - int pos;
83678 - int size;
83679 + unsigned int pos;
83680 + unsigned int size;
83681 };
83682
83683 void __attribute__((format(printf, 2, 3)))
83684 buf_printf(struct buffer *buf, const char *fmt, ...);
83685
83686 void
83687 -buf_write(struct buffer *buf, const char *s, int len);
83688 +buf_write(struct buffer *buf, const char *s, unsigned int len);
83689
83690 struct module {
83691 struct module *next;
83692 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
83693 index ecf9c7d..d52b38e 100644
83694 --- a/scripts/mod/sumversion.c
83695 +++ b/scripts/mod/sumversion.c
83696 @@ -455,7 +455,7 @@ static void write_version(const char *filename, const char *sum,
83697 goto out;
83698 }
83699
83700 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
83701 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
83702 warn("writing sum in %s failed: %s\n",
83703 filename, strerror(errno));
83704 goto out;
83705 diff --git a/scripts/package/mkspec b/scripts/package/mkspec
83706 index 47bdd2f..d4d4e93 100755
83707 --- a/scripts/package/mkspec
83708 +++ b/scripts/package/mkspec
83709 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
83710 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
83711 echo "%endif"
83712
83713 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
83714 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
83715 echo "%ifarch ia64"
83716 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
83717 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
83718 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
83719 index 5c11312..72742b5 100644
83720 --- a/scripts/pnmtologo.c
83721 +++ b/scripts/pnmtologo.c
83722 @@ -237,14 +237,14 @@ static void write_header(void)
83723 fprintf(out, " * Linux logo %s\n", logoname);
83724 fputs(" */\n\n", out);
83725 fputs("#include <linux/linux_logo.h>\n\n", out);
83726 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
83727 + fprintf(out, "static unsigned char %s_data[] = {\n",
83728 logoname);
83729 }
83730
83731 static void write_footer(void)
83732 {
83733 fputs("\n};\n\n", out);
83734 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
83735 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
83736 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
83737 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
83738 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
83739 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
83740 fputs("\n};\n\n", out);
83741
83742 /* write logo clut */
83743 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
83744 + fprintf(out, "static unsigned char %s_clut[] = {\n",
83745 logoname);
83746 write_hex_cnt = 0;
83747 for (i = 0; i < logo_clutsize; i++) {
83748 diff --git a/scripts/tags.sh b/scripts/tags.sh
83749 index d52f7a0..269eb1b 100755
83750 --- a/scripts/tags.sh
83751 +++ b/scripts/tags.sh
83752 @@ -93,6 +93,11 @@ docscope()
83753 cscope -b -f cscope.out
83754 }
83755
83756 +dogtags()
83757 +{
83758 + all_sources | gtags -f -
83759 +}
83760 +
83761 exuberant()
83762 {
83763 all_sources | xargs $1 -a \
83764 @@ -164,6 +169,10 @@ case "$1" in
83765 docscope
83766 ;;
83767
83768 + "gtags")
83769 + dogtags
83770 + ;;
83771 +
83772 "tags")
83773 rm -f tags
83774 xtags ctags
83775 diff --git a/security/Kconfig b/security/Kconfig
83776 index fb363cd..4d21be6 100644
83777 --- a/security/Kconfig
83778 +++ b/security/Kconfig
83779 @@ -4,6 +4,627 @@
83780
83781 menu "Security options"
83782
83783 +source grsecurity/Kconfig
83784 +
83785 +menu "PaX"
83786 +
83787 + config ARCH_TRACK_EXEC_LIMIT
83788 + bool
83789 +
83790 + config PAX_KERNEXEC_PLUGIN
83791 + bool
83792 +
83793 + config PAX_PER_CPU_PGD
83794 + bool
83795 +
83796 + config TASK_SIZE_MAX_SHIFT
83797 + int
83798 + depends on X86_64
83799 + default 47 if !PAX_PER_CPU_PGD
83800 + default 42 if PAX_PER_CPU_PGD
83801 +
83802 + config PAX_ENABLE_PAE
83803 + bool
83804 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
83805 +
83806 +config PAX
83807 + bool "Enable various PaX features"
83808 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
83809 + help
83810 + This allows you to enable various PaX features. PaX adds
83811 + intrusion prevention mechanisms to the kernel that reduce
83812 + the risks posed by exploitable memory corruption bugs.
83813 +
83814 +menu "PaX Control"
83815 + depends on PAX
83816 +
83817 +config PAX_SOFTMODE
83818 + bool 'Support soft mode'
83819 + help
83820 + Enabling this option will allow you to run PaX in soft mode, that
83821 + is, PaX features will not be enforced by default, only on executables
83822 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
83823 + support as they are the only way to mark executables for soft mode use.
83824 +
83825 + Soft mode can be activated by using the "pax_softmode=1" kernel command
83826 + line option on boot. Furthermore you can control various PaX features
83827 + at runtime via the entries in /proc/sys/kernel/pax.
83828 +
83829 +config PAX_EI_PAX
83830 + bool 'Use legacy ELF header marking'
83831 + help
83832 + Enabling this option will allow you to control PaX features on
83833 + a per executable basis via the 'chpax' utility available at
83834 + http://pax.grsecurity.net/. The control flags will be read from
83835 + an otherwise reserved part of the ELF header. This marking has
83836 + numerous drawbacks (no support for soft-mode, toolchain does not
83837 + know about the non-standard use of the ELF header) therefore it
83838 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
83839 + support.
83840 +
83841 + If you have applications not marked by the PT_PAX_FLAGS ELF program
83842 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
83843 + option otherwise they will not get any protection.
83844 +
83845 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
83846 + support as well, they will override the legacy EI_PAX marks.
83847 +
83848 +config PAX_PT_PAX_FLAGS
83849 + bool 'Use ELF program header marking'
83850 + help
83851 + Enabling this option will allow you to control PaX features on
83852 + a per executable basis via the 'paxctl' utility available at
83853 + http://pax.grsecurity.net/. The control flags will be read from
83854 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
83855 + has the benefits of supporting both soft mode and being fully
83856 + integrated into the toolchain (the binutils patch is available
83857 + from http://pax.grsecurity.net).
83858 +
83859 + If you have applications not marked by the PT_PAX_FLAGS ELF program
83860 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
83861 + support otherwise they will not get any protection.
83862 +
83863 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
83864 + must make sure that the marks are the same if a binary has both marks.
83865 +
83866 + Note that if you enable the legacy EI_PAX marking support as well,
83867 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
83868 +
83869 +config PAX_XATTR_PAX_FLAGS
83870 + bool 'Use filesystem extended attributes marking'
83871 + depends on EXPERT
83872 + select CIFS_XATTR if CIFS
83873 + select EXT2_FS_XATTR if EXT2_FS
83874 + select EXT3_FS_XATTR if EXT3_FS
83875 + select EXT4_FS_XATTR if EXT4_FS
83876 + select JFFS2_FS_XATTR if JFFS2_FS
83877 + select REISERFS_FS_XATTR if REISERFS_FS
83878 + select SQUASHFS_XATTR if SQUASHFS
83879 + select TMPFS_XATTR if TMPFS
83880 + select UBIFS_FS_XATTR if UBIFS_FS
83881 + help
83882 + Enabling this option will allow you to control PaX features on
83883 + a per executable basis via the 'setfattr' utility. The control
83884 + flags will be read from the user.pax.flags extended attribute of
83885 + the file. This marking has the benefit of supporting binary-only
83886 + applications that self-check themselves (e.g., skype) and would
83887 + not tolerate chpax/paxctl changes. The main drawback is that
83888 + extended attributes are not supported by some filesystems (e.g.,
83889 + isofs, udf, vfat) so copying files through such filesystems will
83890 + lose the extended attributes and these PaX markings.
83891 +
83892 + If you have applications not marked by the PT_PAX_FLAGS ELF program
83893 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
83894 + support otherwise they will not get any protection.
83895 +
83896 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
83897 + must make sure that the marks are the same if a binary has both marks.
83898 +
83899 + Note that if you enable the legacy EI_PAX marking support as well,
83900 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
83901 +
83902 +choice
83903 + prompt 'MAC system integration'
83904 + default PAX_HAVE_ACL_FLAGS
83905 + help
83906 + Mandatory Access Control systems have the option of controlling
83907 + PaX flags on a per executable basis, choose the method supported
83908 + by your particular system.
83909 +
83910 + - "none": if your MAC system does not interact with PaX,
83911 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
83912 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
83913 +
83914 + NOTE: this option is for developers/integrators only.
83915 +
83916 + config PAX_NO_ACL_FLAGS
83917 + bool 'none'
83918 +
83919 + config PAX_HAVE_ACL_FLAGS
83920 + bool 'direct'
83921 +
83922 + config PAX_HOOK_ACL_FLAGS
83923 + bool 'hook'
83924 +endchoice
83925 +
83926 +endmenu
83927 +
83928 +menu "Non-executable pages"
83929 + depends on PAX
83930 +
83931 +config PAX_NOEXEC
83932 + bool "Enforce non-executable pages"
83933 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
83934 + help
83935 + By design some architectures do not allow for protecting memory
83936 + pages against execution or even if they do, Linux does not make
83937 + use of this feature. In practice this means that if a page is
83938 + readable (such as the stack or heap) it is also executable.
83939 +
83940 + There is a well known exploit technique that makes use of this
83941 + fact and a common programming mistake where an attacker can
83942 + introduce code of his choice somewhere in the attacked program's
83943 + memory (typically the stack or the heap) and then execute it.
83944 +
83945 + If the attacked program was running with different (typically
83946 + higher) privileges than that of the attacker, then he can elevate
83947 + his own privilege level (e.g. get a root shell, write to files for
83948 + which he does not have write access to, etc).
83949 +
83950 + Enabling this option will let you choose from various features
83951 + that prevent the injection and execution of 'foreign' code in
83952 + a program.
83953 +
83954 + This will also break programs that rely on the old behaviour and
83955 + expect that dynamically allocated memory via the malloc() family
83956 + of functions is executable (which it is not). Notable examples
83957 + are the XFree86 4.x server, the java runtime and wine.
83958 +
83959 +config PAX_PAGEEXEC
83960 + bool "Paging based non-executable pages"
83961 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
83962 + select S390_SWITCH_AMODE if S390
83963 + select S390_EXEC_PROTECT if S390
83964 + select ARCH_TRACK_EXEC_LIMIT if X86_32
83965 + help
83966 + This implementation is based on the paging feature of the CPU.
83967 + On i386 without hardware non-executable bit support there is a
83968 + variable but usually low performance impact, however on Intel's
83969 + P4 core based CPUs it is very high so you should not enable this
83970 + for kernels meant to be used on such CPUs.
83971 +
83972 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
83973 + with hardware non-executable bit support there is no performance
83974 + impact, on ppc the impact is negligible.
83975 +
83976 + Note that several architectures require various emulations due to
83977 + badly designed userland ABIs, this will cause a performance impact
83978 + but will disappear as soon as userland is fixed. For example, ppc
83979 + userland MUST have been built with secure-plt by a recent toolchain.
83980 +
83981 +config PAX_SEGMEXEC
83982 + bool "Segmentation based non-executable pages"
83983 + depends on PAX_NOEXEC && X86_32
83984 + help
83985 + This implementation is based on the segmentation feature of the
83986 + CPU and has a very small performance impact, however applications
83987 + will be limited to a 1.5 GB address space instead of the normal
83988 + 3 GB.
83989 +
83990 +config PAX_EMUTRAMP
83991 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
83992 + default y if PARISC
83993 + help
83994 + There are some programs and libraries that for one reason or
83995 + another attempt to execute special small code snippets from
83996 + non-executable memory pages. Most notable examples are the
83997 + signal handler return code generated by the kernel itself and
83998 + the GCC trampolines.
83999 +
84000 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
84001 + such programs will no longer work under your kernel.
84002 +
84003 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
84004 + utilities to enable trampoline emulation for the affected programs
84005 + yet still have the protection provided by the non-executable pages.
84006 +
84007 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
84008 + your system will not even boot.
84009 +
84010 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
84011 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
84012 + for the affected files.
84013 +
84014 + NOTE: enabling this feature *may* open up a loophole in the
84015 + protection provided by non-executable pages that an attacker
84016 + could abuse. Therefore the best solution is to not have any
84017 + files on your system that would require this option. This can
84018 + be achieved by not using libc5 (which relies on the kernel
84019 + signal handler return code) and not using or rewriting programs
84020 + that make use of the nested function implementation of GCC.
84021 + Skilled users can just fix GCC itself so that it implements
84022 + nested function calls in a way that does not interfere with PaX.
84023 +
84024 +config PAX_EMUSIGRT
84025 + bool "Automatically emulate sigreturn trampolines"
84026 + depends on PAX_EMUTRAMP && PARISC
84027 + default y
84028 + help
84029 + Enabling this option will have the kernel automatically detect
84030 + and emulate signal return trampolines executing on the stack
84031 + that would otherwise lead to task termination.
84032 +
84033 + This solution is intended as a temporary one for users with
84034 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
84035 + Modula-3 runtime, etc) or executables linked to such, basically
84036 + everything that does not specify its own SA_RESTORER function in
84037 + normal executable memory like glibc 2.1+ does.
84038 +
84039 + On parisc you MUST enable this option, otherwise your system will
84040 + not even boot.
84041 +
84042 + NOTE: this feature cannot be disabled on a per executable basis
84043 + and since it *does* open up a loophole in the protection provided
84044 + by non-executable pages, the best solution is to not have any
84045 + files on your system that would require this option.
84046 +
84047 +config PAX_MPROTECT
84048 + bool "Restrict mprotect()"
84049 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
84050 + help
84051 + Enabling this option will prevent programs from
84052 + - changing the executable status of memory pages that were
84053 + not originally created as executable,
84054 + - making read-only executable pages writable again,
84055 + - creating executable pages from anonymous memory,
84056 + - making read-only-after-relocations (RELRO) data pages writable again.
84057 +
84058 + You should say Y here to complete the protection provided by
84059 + the enforcement of non-executable pages.
84060 +
84061 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
84062 + this feature on a per file basis.
84063 +
84064 +config PAX_MPROTECT_COMPAT
84065 + bool "Use legacy/compat protection demoting (read help)"
84066 + depends on PAX_MPROTECT
84067 + default n
84068 + help
84069 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
84070 + by sending the proper error code to the application. For some broken
84071 + userland, this can cause problems with Python or other applications. The
84072 + current implementation however allows for applications like clamav to
84073 + detect if JIT compilation/execution is allowed and to fall back gracefully
84074 + to an interpreter-based mode if it does not. While we encourage everyone
84075 + to use the current implementation as-is and push upstream to fix broken
84076 + userland (note that the RWX logging option can assist with this), in some
84077 + environments this may not be possible. Having to disable MPROTECT
84078 + completely on certain binaries reduces the security benefit of PaX,
84079 + so this option is provided for those environments to revert to the old
84080 + behavior.
84081 +
84082 +config PAX_ELFRELOCS
84083 + bool "Allow ELF text relocations (read help)"
84084 + depends on PAX_MPROTECT
84085 + default n
84086 + help
84087 + Non-executable pages and mprotect() restrictions are effective
84088 + in preventing the introduction of new executable code into an
84089 + attacked task's address space. There remain only two venues
84090 + for this kind of attack: if the attacker can execute already
84091 + existing code in the attacked task then he can either have it
84092 + create and mmap() a file containing his code or have it mmap()
84093 + an already existing ELF library that does not have position
84094 + independent code in it and use mprotect() on it to make it
84095 + writable and copy his code there. While protecting against
84096 + the former approach is beyond PaX, the latter can be prevented
84097 + by having only PIC ELF libraries on one's system (which do not
84098 + need to relocate their code). If you are sure this is your case,
84099 + as is the case with all modern Linux distributions, then leave
84100 + this option disabled. You should say 'n' here.
84101 +
84102 +config PAX_ETEXECRELOCS
84103 + bool "Allow ELF ET_EXEC text relocations"
84104 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
84105 + select PAX_ELFRELOCS
84106 + default y
84107 + help
84108 + On some architectures there are incorrectly created applications
84109 + that require text relocations and would not work without enabling
84110 + this option. If you are an alpha, ia64 or parisc user, you should
84111 + enable this option and disable it once you have made sure that
84112 + none of your applications need it.
84113 +
84114 +config PAX_EMUPLT
84115 + bool "Automatically emulate ELF PLT"
84116 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
84117 + default y
84118 + help
84119 + Enabling this option will have the kernel automatically detect
84120 + and emulate the Procedure Linkage Table entries in ELF files.
84121 + On some architectures such entries are in writable memory, and
84122 + become non-executable leading to task termination. Therefore
84123 + it is mandatory that you enable this option on alpha, parisc,
84124 + sparc and sparc64, otherwise your system would not even boot.
84125 +
84126 + NOTE: this feature *does* open up a loophole in the protection
84127 + provided by the non-executable pages, therefore the proper
84128 + solution is to modify the toolchain to produce a PLT that does
84129 + not need to be writable.
84130 +
84131 +config PAX_DLRESOLVE
84132 + bool 'Emulate old glibc resolver stub'
84133 + depends on PAX_EMUPLT && SPARC
84134 + default n
84135 + help
84136 + This option is needed if userland has an old glibc (before 2.4)
84137 + that puts a 'save' instruction into the runtime generated resolver
84138 + stub that needs special emulation.
84139 +
84140 +config PAX_KERNEXEC
84141 + bool "Enforce non-executable kernel pages"
84142 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
84143 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
84144 + select PAX_KERNEXEC_PLUGIN if X86_64
84145 + help
84146 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
84147 + that is, enabling this option will make it harder to inject
84148 + and execute 'foreign' code in kernel memory itself.
84149 +
84150 + Note that on x86_64 kernels there is a known regression when
84151 + this feature and KVM/VMX are both enabled in the host kernel.
84152 +
84153 +choice
84154 + prompt "Return Address Instrumentation Method"
84155 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
84156 + depends on PAX_KERNEXEC_PLUGIN
84157 + help
84158 + Select the method used to instrument function pointer dereferences.
84159 + Note that binary modules cannot be instrumented by this approach.
84160 +
84161 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
84162 + bool "bts"
84163 + help
84164 + This method is compatible with binary only modules but has
84165 + a higher runtime overhead.
84166 +
84167 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
84168 + bool "or"
84169 + depends on !PARAVIRT
84170 + help
84171 + This method is incompatible with binary only modules but has
84172 + a lower runtime overhead.
84173 +endchoice
84174 +
84175 +config PAX_KERNEXEC_PLUGIN_METHOD
84176 + string
84177 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
84178 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
84179 + default ""
84180 +
84181 +config PAX_KERNEXEC_MODULE_TEXT
84182 + int "Minimum amount of memory reserved for module code"
84183 + default "4"
84184 + depends on PAX_KERNEXEC && X86_32 && MODULES
84185 + help
84186 + Due to implementation details the kernel must reserve a fixed
84187 + amount of memory for module code at compile time that cannot be
84188 + changed at runtime. Here you can specify the minimum amount
84189 + in MB that will be reserved. Due to the same implementation
84190 + details this size will always be rounded up to the next 2/4 MB
84191 + boundary (depends on PAE) so the actually available memory for
84192 + module code will usually be more than this minimum.
84193 +
84194 + The default 4 MB should be enough for most users but if you have
84195 + an excessive number of modules (e.g., most distribution configs
84196 + compile many drivers as modules) or use huge modules such as
84197 + nvidia's kernel driver, you will need to adjust this amount.
84198 + A good rule of thumb is to look at your currently loaded kernel
84199 + modules and add up their sizes.
84200 +
84201 +endmenu
84202 +
84203 +menu "Address Space Layout Randomization"
84204 + depends on PAX
84205 +
84206 +config PAX_ASLR
84207 + bool "Address Space Layout Randomization"
84208 + help
84209 + Many if not most exploit techniques rely on the knowledge of
84210 + certain addresses in the attacked program. The following options
84211 + will allow the kernel to apply a certain amount of randomization
84212 + to specific parts of the program thereby forcing an attacker to
84213 + guess them in most cases. Any failed guess will most likely crash
84214 + the attacked program which allows the kernel to detect such attempts
84215 + and react on them. PaX itself provides no reaction mechanisms,
84216 + instead it is strongly encouraged that you make use of Nergal's
84217 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
84218 + (http://www.grsecurity.net/) built-in crash detection features or
84219 + develop one yourself.
84220 +
84221 + By saying Y here you can choose to randomize the following areas:
84222 + - top of the task's kernel stack
84223 + - top of the task's userland stack
84224 + - base address for mmap() requests that do not specify one
84225 + (this includes all libraries)
84226 + - base address of the main executable
84227 +
84228 + It is strongly recommended to say Y here as address space layout
84229 + randomization has negligible impact on performance yet it provides
84230 + a very effective protection.
84231 +
84232 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
84233 + this feature on a per file basis.
84234 +
84235 +config PAX_RANDKSTACK
84236 + bool "Randomize kernel stack base"
84237 + depends on X86_TSC && X86
84238 + help
84239 + By saying Y here the kernel will randomize every task's kernel
84240 + stack on every system call. This will not only force an attacker
84241 + to guess it but also prevent him from making use of possible
84242 + leaked information about it.
84243 +
84244 + Since the kernel stack is a rather scarce resource, randomization
84245 + may cause unexpected stack overflows, therefore you should very
84246 + carefully test your system. Note that once enabled in the kernel
84247 + configuration, this feature cannot be disabled on a per file basis.
84248 +
84249 +config PAX_RANDUSTACK
84250 + bool "Randomize user stack base"
84251 + depends on PAX_ASLR
84252 + help
84253 + By saying Y here the kernel will randomize every task's userland
84254 + stack. The randomization is done in two steps where the second
84255 + one may apply a big amount of shift to the top of the stack and
84256 + cause problems for programs that want to use lots of memory (more
84257 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
84258 + For this reason the second step can be controlled by 'chpax' or
84259 + 'paxctl' on a per file basis.
84260 +
84261 +config PAX_RANDMMAP
84262 + bool "Randomize mmap() base"
84263 + depends on PAX_ASLR
84264 + help
84265 + By saying Y here the kernel will use a randomized base address for
84266 + mmap() requests that do not specify one themselves. As a result
84267 + all dynamically loaded libraries will appear at random addresses
84268 + and therefore be harder to exploit by a technique where an attacker
84269 + attempts to execute library code for his purposes (e.g. spawn a
84270 + shell from an exploited program that is running at an elevated
84271 + privilege level).
84272 +
84273 + Furthermore, if a program is relinked as a dynamic ELF file, its
84274 + base address will be randomized as well, completing the full
84275 + randomization of the address space layout. Attacking such programs
84276 + becomes a guess game. You can find an example of doing this at
84277 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
84278 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
84279 +
84280 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
84281 + feature on a per file basis.
84282 +
84283 +endmenu
84284 +
84285 +menu "Miscellaneous hardening features"
84286 +
84287 +config PAX_MEMORY_SANITIZE
84288 + bool "Sanitize all freed memory"
84289 + help
84290 + By saying Y here the kernel will erase memory pages as soon as they
84291 + are freed. This in turn reduces the lifetime of data stored in the
84292 + pages, making it less likely that sensitive information such as
84293 + passwords, cryptographic secrets, etc stay in memory for too long.
84294 +
84295 + This is especially useful for programs whose runtime is short, long
84296 + lived processes and the kernel itself benefit from this as long as
84297 + they operate on whole memory pages and ensure timely freeing of pages
84298 + that may hold sensitive information.
84299 +
84300 + The tradeoff is performance impact, on a single CPU system kernel
84301 + compilation sees a 3% slowdown, other systems and workloads may vary
84302 + and you are advised to test this feature on your expected workload
84303 + before deploying it.
84304 +
84305 + Note that this feature does not protect data stored in live pages,
84306 + e.g., process memory swapped to disk may stay there for a long time.
84307 +
84308 +config PAX_MEMORY_STACKLEAK
84309 + bool "Sanitize kernel stack"
84310 + depends on X86
84311 + help
84312 + By saying Y here the kernel will erase the kernel stack before it
84313 + returns from a system call. This in turn reduces the information
84314 + that a kernel stack leak bug can reveal.
84315 +
84316 + Note that such a bug can still leak information that was put on
84317 + the stack by the current system call (the one eventually triggering
84318 + the bug) but traces of earlier system calls on the kernel stack
84319 + cannot leak anymore.
84320 +
84321 + The tradeoff is performance impact, on a single CPU system kernel
84322 + compilation sees a 1% slowdown, other systems and workloads may vary
84323 + and you are advised to test this feature on your expected workload
84324 + before deploying it.
84325 +
84326 + Note: full support for this feature requires gcc with plugin support
84327 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
84328 + versions means that functions with large enough stack frames may
84329 + leave uninitialized memory behind that may be exposed to a later
84330 + syscall leaking the stack.
84331 +
84332 +config PAX_MEMORY_UDEREF
84333 + bool "Prevent invalid userland pointer dereference"
84334 + depends on X86 && !UML_X86 && !XEN
84335 + select PAX_PER_CPU_PGD if X86_64
84336 + help
84337 + By saying Y here the kernel will be prevented from dereferencing
84338 + userland pointers in contexts where the kernel expects only kernel
84339 + pointers. This is both a useful runtime debugging feature and a
84340 + security measure that prevents exploiting a class of kernel bugs.
84341 +
84342 + The tradeoff is that some virtualization solutions may experience
84343 + a huge slowdown and therefore you should not enable this feature
84344 + for kernels meant to run in such environments. Whether a given VM
84345 + solution is affected or not is best determined by simply trying it
84346 + out, the performance impact will be obvious right on boot as this
84347 + mechanism engages from very early on. A good rule of thumb is that
84348 + VMs running on CPUs without hardware virtualization support (i.e.,
84349 + the majority of IA-32 CPUs) will likely experience the slowdown.
84350 +
84351 +config PAX_REFCOUNT
84352 + bool "Prevent various kernel object reference counter overflows"
84353 + depends on GRKERNSEC && (X86 || SPARC64)
84354 + help
84355 + By saying Y here the kernel will detect and prevent overflowing
84356 + various (but not all) kinds of object reference counters. Such
84357 + overflows can normally occur due to bugs only and are often, if
84358 + not always, exploitable.
84359 +
84360 + The tradeoff is that data structures protected by an overflowed
84361 + refcount will never be freed and therefore will leak memory. Note
84362 + that this leak also happens even without this protection but in
84363 + that case the overflow can eventually trigger the freeing of the
84364 + data structure while it is still being used elsewhere, resulting
84365 + in the exploitable situation that this feature prevents.
84366 +
84367 + Since this has a negligible performance impact, you should enable
84368 + this feature.
84369 +
84370 +config PAX_USERCOPY
84371 + bool "Harden heap object copies between kernel and userland"
84372 + depends on X86 || PPC || SPARC || ARM
84373 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
84374 + help
84375 + By saying Y here the kernel will enforce the size of heap objects
84376 + when they are copied in either direction between the kernel and
84377 + userland, even if only a part of the heap object is copied.
84378 +
84379 + Specifically, this checking prevents information leaking from the
84380 + kernel heap during kernel to userland copies (if the kernel heap
84381 + object is otherwise fully initialized) and prevents kernel heap
84382 + overflows during userland to kernel copies.
84383 +
84384 + Note that the current implementation provides the strictest bounds
84385 + checks for the SLUB allocator.
84386 +
84387 + Enabling this option also enables per-slab cache protection against
84388 + data in a given cache being copied into/out of via userland
84389 + accessors. Though the whitelist of regions will be reduced over
84390 + time, it notably protects important data structures like task structs.
84391 +
84392 +
84393 + If frame pointers are enabled on x86, this option will also
84394 + restrict copies into and out of the kernel stack to local variables
84395 + within a single frame.
84396 +
84397 + Since this has a negligible performance impact, you should enable
84398 + this feature.
84399 +
84400 +endmenu
84401 +
84402 +endmenu
84403 +
84404 config KEYS
84405 bool "Enable access key retention support"
84406 help
84407 @@ -146,7 +767,7 @@ config INTEL_TXT
84408 config LSM_MMAP_MIN_ADDR
84409 int "Low address space for LSM to protect from user allocation"
84410 depends on SECURITY && SECURITY_SELINUX
84411 - default 65536
84412 + default 32768
84413 help
84414 This is the portion of low virtual memory which should be protected
84415 from userspace allocation. Keeping a user from writing to low pages
84416 diff --git a/security/capability.c b/security/capability.c
84417 index fce07a7..5f12858 100644
84418 --- a/security/capability.c
84419 +++ b/security/capability.c
84420 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *lsmrule)
84421 }
84422 #endif /* CONFIG_AUDIT */
84423
84424 -struct security_operations default_security_ops = {
84425 +struct security_operations default_security_ops __read_only = {
84426 .name = "default",
84427 };
84428
84429 diff --git a/security/commoncap.c b/security/commoncap.c
84430 index fe30751..aaba312 100644
84431 --- a/security/commoncap.c
84432 +++ b/security/commoncap.c
84433 @@ -27,6 +27,8 @@
84434 #include <linux/sched.h>
84435 #include <linux/prctl.h>
84436 #include <linux/securebits.h>
84437 +#include <linux/syslog.h>
84438 +#include <net/sock.h>
84439
84440 /*
84441 * If a non-root user executes a setuid-root binary in
84442 @@ -50,9 +52,18 @@ static void warn_setuid_and_fcaps_mixed(char *fname)
84443 }
84444 }
84445
84446 +#ifdef CONFIG_NET
84447 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
84448 +#endif
84449 +
84450 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
84451 {
84452 +#ifdef CONFIG_NET
84453 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
84454 +#else
84455 NETLINK_CB(skb).eff_cap = current_cap();
84456 +#endif
84457 +
84458 return 0;
84459 }
84460
84461 @@ -582,6 +593,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
84462 {
84463 const struct cred *cred = current_cred();
84464
84465 + if (gr_acl_enable_at_secure())
84466 + return 1;
84467 +
84468 if (cred->uid != 0) {
84469 if (bprm->cap_effective)
84470 return 1;
84471 @@ -956,13 +970,18 @@ error:
84472 /**
84473 * cap_syslog - Determine whether syslog function is permitted
84474 * @type: Function requested
84475 + * @from_file: Whether this request came from an open file (i.e. /proc)
84476 *
84477 * Determine whether the current process is permitted to use a particular
84478 * syslog function, returning 0 if permission is granted, -ve if not.
84479 */
84480 -int cap_syslog(int type)
84481 +int cap_syslog(int type, bool from_file)
84482 {
84483 - if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
84484 + /* /proc/kmsg can open be opened by CAP_SYS_ADMIN */
84485 + if (type != SYSLOG_ACTION_OPEN && from_file)
84486 + return 0;
84487 + if ((type != SYSLOG_ACTION_READ_ALL &&
84488 + type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
84489 return -EPERM;
84490 return 0;
84491 }
84492 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
84493 index 165eb53..b1db4eb 100644
84494 --- a/security/integrity/ima/ima.h
84495 +++ b/security/integrity/ima/ima.h
84496 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
84497 extern spinlock_t ima_queue_lock;
84498
84499 struct ima_h_table {
84500 - atomic_long_t len; /* number of stored measurements in the list */
84501 - atomic_long_t violations;
84502 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
84503 + atomic_long_unchecked_t violations;
84504 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
84505 };
84506 extern struct ima_h_table ima_htable;
84507 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
84508 index 3cd58b6..b4c284f 100644
84509 --- a/security/integrity/ima/ima_api.c
84510 +++ b/security/integrity/ima/ima_api.c
84511 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
84512 int result;
84513
84514 /* can overflow, only indicator */
84515 - atomic_long_inc(&ima_htable.violations);
84516 + atomic_long_inc_unchecked(&ima_htable.violations);
84517
84518 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
84519 if (!entry) {
84520 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
84521 index 0c72c9c..433e29b 100644
84522 --- a/security/integrity/ima/ima_fs.c
84523 +++ b/security/integrity/ima/ima_fs.c
84524 @@ -27,12 +27,12 @@
84525 static int valid_policy = 1;
84526 #define TMPBUFLEN 12
84527 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
84528 - loff_t *ppos, atomic_long_t *val)
84529 + loff_t *ppos, atomic_long_unchecked_t *val)
84530 {
84531 char tmpbuf[TMPBUFLEN];
84532 ssize_t len;
84533
84534 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
84535 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
84536 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
84537 }
84538
84539 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
84540 index a0880e9..93e4ebb 100644
84541 --- a/security/integrity/ima/ima_queue.c
84542 +++ b/security/integrity/ima/ima_queue.c
84543 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
84544 INIT_LIST_HEAD(&qe->later);
84545 list_add_tail_rcu(&qe->later, &ima_measurements);
84546
84547 - atomic_long_inc(&ima_htable.len);
84548 + atomic_long_inc_unchecked(&ima_htable.len);
84549 key = ima_hash_key(entry->digest);
84550 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
84551 return 0;
84552 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
84553 index e031952..c9a535d 100644
84554 --- a/security/keys/keyring.c
84555 +++ b/security/keys/keyring.c
84556 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
84557 ret = -EFAULT;
84558
84559 for (loop = 0; loop < klist->nkeys; loop++) {
84560 + key_serial_t serial;
84561 key = klist->keys[loop];
84562 + serial = key->serial;
84563
84564 tmp = sizeof(key_serial_t);
84565 if (tmp > buflen)
84566 tmp = buflen;
84567
84568 - if (copy_to_user(buffer,
84569 - &key->serial,
84570 - tmp) != 0)
84571 + if (copy_to_user(buffer, &serial, tmp))
84572 goto error;
84573
84574 buflen -= tmp;
84575 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
84576 index 931cfda..e71808a 100644
84577 --- a/security/keys/process_keys.c
84578 +++ b/security/keys/process_keys.c
84579 @@ -208,7 +208,7 @@ static int install_process_keyring(void)
84580 ret = install_process_keyring_to_cred(new);
84581 if (ret < 0) {
84582 abort_creds(new);
84583 - return ret != -EEXIST ?: 0;
84584 + return ret != -EEXIST ? ret : 0;
84585 }
84586
84587 return commit_creds(new);
84588 diff --git a/security/min_addr.c b/security/min_addr.c
84589 index d9f9425..c28cef4 100644
84590 --- a/security/min_addr.c
84591 +++ b/security/min_addr.c
84592 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
84593 */
84594 static void update_mmap_min_addr(void)
84595 {
84596 +#ifndef SPARC
84597 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
84598 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
84599 mmap_min_addr = dac_mmap_min_addr;
84600 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
84601 #else
84602 mmap_min_addr = dac_mmap_min_addr;
84603 #endif
84604 +#endif
84605 }
84606
84607 /*
84608 diff --git a/security/root_plug.c b/security/root_plug.c
84609 index 2f7ffa6..0455400 100644
84610 --- a/security/root_plug.c
84611 +++ b/security/root_plug.c
84612 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm)
84613 return 0;
84614 }
84615
84616 -static struct security_operations rootplug_security_ops = {
84617 +static struct security_operations rootplug_security_ops __read_only = {
84618 .bprm_check_security = rootplug_bprm_check_security,
84619 };
84620
84621 diff --git a/security/security.c b/security/security.c
84622 index c4c6732..7abf13b 100644
84623 --- a/security/security.c
84624 +++ b/security/security.c
84625 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1];
84626 extern struct security_operations default_security_ops;
84627 extern void security_fixup_ops(struct security_operations *ops);
84628
84629 -struct security_operations *security_ops; /* Initialized to NULL */
84630 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
84631
84632 static inline int verify(struct security_operations *ops)
84633 {
84634 @@ -106,7 +106,7 @@ int __init security_module_enable(struct security_operations *ops)
84635 * If there is already a security module registered with the kernel,
84636 * an error will be returned. Otherwise %0 is returned on success.
84637 */
84638 -int register_security(struct security_operations *ops)
84639 +int __init register_security(struct security_operations *ops)
84640 {
84641 if (verify(ops)) {
84642 printk(KERN_DEBUG "%s could not verify "
84643 @@ -199,9 +199,9 @@ int security_quota_on(struct dentry *dentry)
84644 return security_ops->quota_on(dentry);
84645 }
84646
84647 -int security_syslog(int type)
84648 +int security_syslog(int type, bool from_file)
84649 {
84650 - return security_ops->syslog(type);
84651 + return security_ops->syslog(type, from_file);
84652 }
84653
84654 int security_settime(struct timespec *ts, struct timezone *tz)
84655 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
84656 index a106754..ca3a589 100644
84657 --- a/security/selinux/hooks.c
84658 +++ b/security/selinux/hooks.c
84659 @@ -76,6 +76,7 @@
84660 #include <linux/selinux.h>
84661 #include <linux/mutex.h>
84662 #include <linux/posix-timers.h>
84663 +#include <linux/syslog.h>
84664
84665 #include "avc.h"
84666 #include "objsec.h"
84667 @@ -131,7 +132,7 @@ int selinux_enabled = 1;
84668 * Minimal support for a secondary security module,
84669 * just to allow the use of the capability module.
84670 */
84671 -static struct security_operations *secondary_ops;
84672 +static struct security_operations *secondary_ops __read_only;
84673
84674 /* Lists of inode and superblock security structures initialized
84675 before the policy was loaded. */
84676 @@ -2050,29 +2051,30 @@ static int selinux_quota_on(struct dentry *dentry)
84677 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
84678 }
84679
84680 -static int selinux_syslog(int type)
84681 +static int selinux_syslog(int type, bool from_file)
84682 {
84683 int rc;
84684
84685 - rc = cap_syslog(type);
84686 + rc = cap_syslog(type, from_file);
84687 if (rc)
84688 return rc;
84689
84690 switch (type) {
84691 - case 3: /* Read last kernel messages */
84692 - case 10: /* Return size of the log buffer */
84693 + case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
84694 + case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
84695 rc = task_has_system(current, SYSTEM__SYSLOG_READ);
84696 break;
84697 - case 6: /* Disable logging to console */
84698 - case 7: /* Enable logging to console */
84699 - case 8: /* Set level of messages printed to console */
84700 + case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
84701 + case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
84702 + /* Set level of messages printed to console */
84703 + case SYSLOG_ACTION_CONSOLE_LEVEL:
84704 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
84705 break;
84706 - case 0: /* Close log */
84707 - case 1: /* Open log */
84708 - case 2: /* Read from log */
84709 - case 4: /* Read/clear last kernel messages */
84710 - case 5: /* Clear ring buffer */
84711 + case SYSLOG_ACTION_CLOSE: /* Close log */
84712 + case SYSLOG_ACTION_OPEN: /* Open log */
84713 + case SYSLOG_ACTION_READ: /* Read from log */
84714 + case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
84715 + case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
84716 default:
84717 rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
84718 break;
84719 @@ -5457,7 +5459,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
84720
84721 #endif
84722
84723 -static struct security_operations selinux_ops = {
84724 +static struct security_operations selinux_ops __read_only = {
84725 .name = "selinux",
84726
84727 .ptrace_access_check = selinux_ptrace_access_check,
84728 @@ -5841,7 +5843,9 @@ int selinux_disable(void)
84729 avc_disable();
84730
84731 /* Reset security_ops to the secondary module, dummy or capability. */
84732 + pax_open_kernel();
84733 security_ops = secondary_ops;
84734 + pax_close_kernel();
84735
84736 /* Unregister netfilter hooks. */
84737 selinux_nf_ip_exit();
84738 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
84739 index 13128f9..c23c736 100644
84740 --- a/security/selinux/include/xfrm.h
84741 +++ b/security/selinux/include/xfrm.h
84742 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
84743
84744 static inline void selinux_xfrm_notify_policyload(void)
84745 {
84746 - atomic_inc(&flow_cache_genid);
84747 + atomic_inc_unchecked(&flow_cache_genid);
84748 }
84749 #else
84750 static inline int selinux_xfrm_enabled(void)
84751 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
84752 index ff17820..d68084c 100644
84753 --- a/security/selinux/ss/services.c
84754 +++ b/security/selinux/ss/services.c
84755 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, size_t len)
84756 int rc = 0;
84757 struct policy_file file = { data, len }, *fp = &file;
84758
84759 + pax_track_stack();
84760 +
84761 if (!ss_initialized) {
84762 avtab_cache_init();
84763 if (policydb_read(&policydb, fp)) {
84764 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
84765 index c33b6bb..b51f19e 100644
84766 --- a/security/smack/smack_lsm.c
84767 +++ b/security/smack/smack_lsm.c
84768 @@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
84769 *
84770 * Returns 0 on success, error code otherwise.
84771 */
84772 -static int smack_syslog(int type)
84773 +static int smack_syslog(int type, bool from_file)
84774 {
84775 int rc;
84776 char *sp = current_security();
84777
84778 - rc = cap_syslog(type);
84779 + rc = cap_syslog(type, from_file);
84780 if (rc != 0)
84781 return rc;
84782
84783 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
84784 return 0;
84785 }
84786
84787 -struct security_operations smack_ops = {
84788 +struct security_operations smack_ops __read_only = {
84789 .name = "smack",
84790
84791 .ptrace_access_check = smack_ptrace_access_check,
84792 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
84793 index 9548a09..9a5f384 100644
84794 --- a/security/tomoyo/tomoyo.c
84795 +++ b/security/tomoyo/tomoyo.c
84796 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
84797 * tomoyo_security_ops is a "struct security_operations" which is used for
84798 * registering TOMOYO.
84799 */
84800 -static struct security_operations tomoyo_security_ops = {
84801 +static struct security_operations tomoyo_security_ops __read_only = {
84802 .name = "tomoyo",
84803 .cred_alloc_blank = tomoyo_cred_alloc_blank,
84804 .cred_prepare = tomoyo_cred_prepare,
84805 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
84806 index 84bb07d..c2ab6b6 100644
84807 --- a/sound/aoa/codecs/onyx.c
84808 +++ b/sound/aoa/codecs/onyx.c
84809 @@ -53,7 +53,7 @@ struct onyx {
84810 spdif_locked:1,
84811 analog_locked:1,
84812 original_mute:2;
84813 - int open_count;
84814 + local_t open_count;
84815 struct codec_info *codec_info;
84816
84817 /* mutex serializes concurrent access to the device
84818 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_item *cii,
84819 struct onyx *onyx = cii->codec_data;
84820
84821 mutex_lock(&onyx->mutex);
84822 - onyx->open_count++;
84823 + local_inc(&onyx->open_count);
84824 mutex_unlock(&onyx->mutex);
84825
84826 return 0;
84827 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_item *cii,
84828 struct onyx *onyx = cii->codec_data;
84829
84830 mutex_lock(&onyx->mutex);
84831 - onyx->open_count--;
84832 - if (!onyx->open_count)
84833 + if (local_dec_and_test(&onyx->open_count))
84834 onyx->spdif_locked = onyx->analog_locked = 0;
84835 mutex_unlock(&onyx->mutex);
84836
84837 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
84838 index ffd2025..df062c9 100644
84839 --- a/sound/aoa/codecs/onyx.h
84840 +++ b/sound/aoa/codecs/onyx.h
84841 @@ -11,6 +11,7 @@
84842 #include <linux/i2c.h>
84843 #include <asm/pmac_low_i2c.h>
84844 #include <asm/prom.h>
84845 +#include <asm/local.h>
84846
84847 /* PCM3052 register definitions */
84848
84849 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
84850 index d9c9635..bc0a5a2 100644
84851 --- a/sound/core/oss/pcm_oss.c
84852 +++ b/sound/core/oss/pcm_oss.c
84853 @@ -1395,7 +1395,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
84854 }
84855 } else {
84856 tmp = snd_pcm_oss_write2(substream,
84857 - (const char __force *)buf,
84858 + (const char __force_kernel *)buf,
84859 runtime->oss.period_bytes, 0);
84860 if (tmp <= 0)
84861 goto err;
84862 @@ -1483,7 +1483,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
84863 xfer += tmp;
84864 runtime->oss.buffer_used -= tmp;
84865 } else {
84866 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
84867 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
84868 runtime->oss.period_bytes, 0);
84869 if (tmp <= 0)
84870 goto err;
84871 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
84872 index 038232d..7dd9e5c 100644
84873 --- a/sound/core/pcm_compat.c
84874 +++ b/sound/core/pcm_compat.c
84875 @@ -30,7 +30,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
84876 int err;
84877
84878 fs = snd_enter_user();
84879 - err = snd_pcm_delay(substream, &delay);
84880 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
84881 snd_leave_user(fs);
84882 if (err < 0)
84883 return err;
84884 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
84885 index e6d2d97..4843949 100644
84886 --- a/sound/core/pcm_native.c
84887 +++ b/sound/core/pcm_native.c
84888 @@ -2747,11 +2747,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
84889 switch (substream->stream) {
84890 case SNDRV_PCM_STREAM_PLAYBACK:
84891 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
84892 - (void __user *)arg);
84893 + (void __force_user *)arg);
84894 break;
84895 case SNDRV_PCM_STREAM_CAPTURE:
84896 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
84897 - (void __user *)arg);
84898 + (void __force_user *)arg);
84899 break;
84900 default:
84901 result = -EINVAL;
84902 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
84903 index 1f99767..14636533 100644
84904 --- a/sound/core/seq/seq_device.c
84905 +++ b/sound/core/seq/seq_device.c
84906 @@ -63,7 +63,7 @@ struct ops_list {
84907 int argsize; /* argument size */
84908
84909 /* operators */
84910 - struct snd_seq_dev_ops ops;
84911 + struct snd_seq_dev_ops *ops;
84912
84913 /* registred devices */
84914 struct list_head dev_list; /* list of devices */
84915 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
84916
84917 mutex_lock(&ops->reg_mutex);
84918 /* copy driver operators */
84919 - ops->ops = *entry;
84920 + ops->ops = entry;
84921 ops->driver |= DRIVER_LOADED;
84922 ops->argsize = argsize;
84923
84924 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
84925 dev->name, ops->id, ops->argsize, dev->argsize);
84926 return -EINVAL;
84927 }
84928 - if (ops->ops.init_device(dev) >= 0) {
84929 + if (ops->ops->init_device(dev) >= 0) {
84930 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
84931 ops->num_init_devices++;
84932 } else {
84933 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
84934 dev->name, ops->id, ops->argsize, dev->argsize);
84935 return -EINVAL;
84936 }
84937 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
84938 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
84939 dev->status = SNDRV_SEQ_DEVICE_FREE;
84940 dev->driver_data = NULL;
84941 ops->num_init_devices--;
84942 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
84943 index 9284829..ac8e8b2 100644
84944 --- a/sound/drivers/mts64.c
84945 +++ b/sound/drivers/mts64.c
84946 @@ -27,6 +27,7 @@
84947 #include <sound/initval.h>
84948 #include <sound/rawmidi.h>
84949 #include <sound/control.h>
84950 +#include <asm/local.h>
84951
84952 #define CARD_NAME "Miditerminal 4140"
84953 #define DRIVER_NAME "MTS64"
84954 @@ -65,7 +66,7 @@ struct mts64 {
84955 struct pardevice *pardev;
84956 int pardev_claimed;
84957
84958 - int open_count;
84959 + local_t open_count;
84960 int current_midi_output_port;
84961 int current_midi_input_port;
84962 u8 mode[MTS64_NUM_INPUT_PORTS];
84963 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
84964 {
84965 struct mts64 *mts = substream->rmidi->private_data;
84966
84967 - if (mts->open_count == 0) {
84968 + if (local_read(&mts->open_count) == 0) {
84969 /* We don't need a spinlock here, because this is just called
84970 if the device has not been opened before.
84971 So there aren't any IRQs from the device */
84972 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
84973
84974 msleep(50);
84975 }
84976 - ++(mts->open_count);
84977 + local_inc(&mts->open_count);
84978
84979 return 0;
84980 }
84981 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
84982 struct mts64 *mts = substream->rmidi->private_data;
84983 unsigned long flags;
84984
84985 - --(mts->open_count);
84986 - if (mts->open_count == 0) {
84987 + if (local_dec_return(&mts->open_count) == 0) {
84988 /* We need the spinlock_irqsave here because we can still
84989 have IRQs at this point */
84990 spin_lock_irqsave(&mts->lock, flags);
84991 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
84992
84993 msleep(500);
84994
84995 - } else if (mts->open_count < 0)
84996 - mts->open_count = 0;
84997 + } else if (local_read(&mts->open_count) < 0)
84998 + local_set(&mts->open_count, 0);
84999
85000 return 0;
85001 }
85002 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
85003 index 01997f2..cbc1195 100644
85004 --- a/sound/drivers/opl4/opl4_lib.c
85005 +++ b/sound/drivers/opl4/opl4_lib.c
85006 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
85007 MODULE_DESCRIPTION("OPL4 driver");
85008 MODULE_LICENSE("GPL");
85009
85010 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
85011 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
85012 {
85013 int timeout = 10;
85014 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
85015 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
85016 index 60158e2..0a0cc1a 100644
85017 --- a/sound/drivers/portman2x4.c
85018 +++ b/sound/drivers/portman2x4.c
85019 @@ -46,6 +46,7 @@
85020 #include <sound/initval.h>
85021 #include <sound/rawmidi.h>
85022 #include <sound/control.h>
85023 +#include <asm/local.h>
85024
85025 #define CARD_NAME "Portman 2x4"
85026 #define DRIVER_NAME "portman"
85027 @@ -83,7 +84,7 @@ struct portman {
85028 struct pardevice *pardev;
85029 int pardev_claimed;
85030
85031 - int open_count;
85032 + local_t open_count;
85033 int mode[PORTMAN_NUM_INPUT_PORTS];
85034 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
85035 };
85036 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
85037 index 02f79d2..8691d43 100644
85038 --- a/sound/isa/cmi8330.c
85039 +++ b/sound/isa/cmi8330.c
85040 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
85041
85042 struct snd_pcm *pcm;
85043 struct snd_cmi8330_stream {
85044 - struct snd_pcm_ops ops;
85045 + snd_pcm_ops_no_const ops;
85046 snd_pcm_open_callback_t open;
85047 void *private_data; /* sb or wss */
85048 } streams[2];
85049 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
85050 index 733b014..56ce96f 100644
85051 --- a/sound/oss/sb_audio.c
85052 +++ b/sound/oss/sb_audio.c
85053 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
85054 buf16 = (signed short *)(localbuf + localoffs);
85055 while (c)
85056 {
85057 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
85058 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
85059 if (copy_from_user(lbuf8,
85060 userbuf+useroffs + p,
85061 locallen))
85062 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
85063 index 3136c88..28ad950 100644
85064 --- a/sound/oss/swarm_cs4297a.c
85065 +++ b/sound/oss/swarm_cs4297a.c
85066 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
85067 {
85068 struct cs4297a_state *s;
85069 u32 pwr, id;
85070 - mm_segment_t fs;
85071 int rval;
85072 #ifndef CONFIG_BCM_CS4297A_CSWARM
85073 u64 cfg;
85074 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
85075 if (!rval) {
85076 char *sb1250_duart_present;
85077
85078 +#if 0
85079 + mm_segment_t fs;
85080 fs = get_fs();
85081 set_fs(KERNEL_DS);
85082 -#if 0
85083 val = SOUND_MASK_LINE;
85084 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
85085 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
85086 val = initvol[i].vol;
85087 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
85088 }
85089 + set_fs(fs);
85090 // cs4297a_write_ac97(s, 0x18, 0x0808);
85091 #else
85092 // cs4297a_write_ac97(s, 0x5e, 0x180);
85093 cs4297a_write_ac97(s, 0x02, 0x0808);
85094 cs4297a_write_ac97(s, 0x18, 0x0808);
85095 #endif
85096 - set_fs(fs);
85097
85098 list_add(&s->list, &cs4297a_devs);
85099
85100 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
85101 index 78288db..0406809 100644
85102 --- a/sound/pci/ac97/ac97_codec.c
85103 +++ b/sound/pci/ac97/ac97_codec.c
85104 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
85105 }
85106
85107 /* build_ops to do nothing */
85108 -static struct snd_ac97_build_ops null_build_ops;
85109 +static const struct snd_ac97_build_ops null_build_ops;
85110
85111 #ifdef CONFIG_SND_AC97_POWER_SAVE
85112 static void do_update_power(struct work_struct *work)
85113 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
85114 index eeb2e23..82bf625 100644
85115 --- a/sound/pci/ac97/ac97_patch.c
85116 +++ b/sound/pci/ac97/ac97_patch.c
85117 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97)
85118 return 0;
85119 }
85120
85121 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
85122 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
85123 .build_spdif = patch_yamaha_ymf743_build_spdif,
85124 .build_3d = patch_yamaha_ymf7x3_3d,
85125 };
85126 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
85127 return 0;
85128 }
85129
85130 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
85131 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
85132 .build_3d = patch_yamaha_ymf7x3_3d,
85133 .build_post_spdif = patch_yamaha_ymf753_post_spdif
85134 };
85135 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
85136 return 0;
85137 }
85138
85139 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
85140 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
85141 .build_specific = patch_wolfson_wm9703_specific,
85142 };
85143
85144 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
85145 return 0;
85146 }
85147
85148 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
85149 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
85150 .build_specific = patch_wolfson_wm9704_specific,
85151 };
85152
85153 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97)
85154 return 0;
85155 }
85156
85157 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
85158 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
85159 .build_specific = patch_wolfson_wm9705_specific,
85160 };
85161
85162 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
85163 return 0;
85164 }
85165
85166 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
85167 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
85168 .build_specific = patch_wolfson_wm9711_specific,
85169 };
85170
85171 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97)
85172 }
85173 #endif
85174
85175 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
85176 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
85177 .build_specific = patch_wolfson_wm9713_specific,
85178 .build_3d = patch_wolfson_wm9713_3d,
85179 #ifdef CONFIG_PM
85180 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
85181 return 0;
85182 }
85183
85184 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
85185 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
85186 .build_3d = patch_sigmatel_stac9700_3d,
85187 .build_specific = patch_sigmatel_stac97xx_specific
85188 };
85189 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
85190 return patch_sigmatel_stac97xx_specific(ac97);
85191 }
85192
85193 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
85194 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
85195 .build_3d = patch_sigmatel_stac9708_3d,
85196 .build_specific = patch_sigmatel_stac9708_specific
85197 };
85198 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97)
85199 return 0;
85200 }
85201
85202 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
85203 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
85204 .build_3d = patch_sigmatel_stac9700_3d,
85205 .build_specific = patch_sigmatel_stac9758_specific
85206 };
85207 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
85208 return 0;
85209 }
85210
85211 -static struct snd_ac97_build_ops patch_cirrus_ops = {
85212 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
85213 .build_spdif = patch_cirrus_build_spdif
85214 };
85215
85216 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
85217 return 0;
85218 }
85219
85220 -static struct snd_ac97_build_ops patch_conexant_ops = {
85221 +static const struct snd_ac97_build_ops patch_conexant_ops = {
85222 .build_spdif = patch_conexant_build_spdif
85223 };
85224
85225 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int
85226 }
85227 }
85228
85229 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
85230 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
85231 #ifdef CONFIG_PM
85232 .resume = ad18xx_resume
85233 #endif
85234 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97)
85235 return 0;
85236 }
85237
85238 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
85239 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
85240 .build_specific = &patch_ad1885_specific,
85241 #ifdef CONFIG_PM
85242 .resume = ad18xx_resume
85243 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct snd_ac97 * ac97)
85244 return 0;
85245 }
85246
85247 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
85248 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
85249 .build_specific = &patch_ad1886_specific,
85250 #ifdef CONFIG_PM
85251 .resume = ad18xx_resume
85252 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct snd_ac97 * ac97)
85253 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
85254 }
85255
85256 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
85257 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
85258 .build_post_spdif = patch_ad198x_post_spdif,
85259 .build_specific = patch_ad1981a_specific,
85260 #ifdef CONFIG_PM
85261 @@ -1952,7 +1952,7 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97)
85262 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
85263 }
85264
85265 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
85266 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
85267 .build_post_spdif = patch_ad198x_post_spdif,
85268 .build_specific = patch_ad1981b_specific,
85269 #ifdef CONFIG_PM
85270 @@ -2091,7 +2091,7 @@ static int patch_ad1888_specific(struct snd_ac97 *ac97)
85271 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
85272 }
85273
85274 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
85275 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
85276 .build_post_spdif = patch_ad198x_post_spdif,
85277 .build_specific = patch_ad1888_specific,
85278 #ifdef CONFIG_PM
85279 @@ -2140,7 +2140,7 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97)
85280 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
85281 }
85282
85283 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
85284 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
85285 .build_post_spdif = patch_ad198x_post_spdif,
85286 .build_specific = patch_ad1980_specific,
85287 #ifdef CONFIG_PM
85288 @@ -2255,7 +2255,7 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97)
85289 ARRAY_SIZE(snd_ac97_ad1985_controls));
85290 }
85291
85292 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
85293 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
85294 .build_post_spdif = patch_ad198x_post_spdif,
85295 .build_specific = patch_ad1985_specific,
85296 #ifdef CONFIG_PM
85297 @@ -2547,7 +2547,7 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97)
85298 ARRAY_SIZE(snd_ac97_ad1985_controls));
85299 }
85300
85301 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
85302 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
85303 .build_post_spdif = patch_ad198x_post_spdif,
85304 .build_specific = patch_ad1986_specific,
85305 #ifdef CONFIG_PM
85306 @@ -2652,7 +2652,7 @@ static int patch_alc650_specific(struct snd_ac97 * ac97)
85307 return 0;
85308 }
85309
85310 -static struct snd_ac97_build_ops patch_alc650_ops = {
85311 +static const struct snd_ac97_build_ops patch_alc650_ops = {
85312 .build_specific = patch_alc650_specific,
85313 .update_jacks = alc650_update_jacks
85314 };
85315 @@ -2804,7 +2804,7 @@ static int patch_alc655_specific(struct snd_ac97 * ac97)
85316 return 0;
85317 }
85318
85319 -static struct snd_ac97_build_ops patch_alc655_ops = {
85320 +static const struct snd_ac97_build_ops patch_alc655_ops = {
85321 .build_specific = patch_alc655_specific,
85322 .update_jacks = alc655_update_jacks
85323 };
85324 @@ -2916,7 +2916,7 @@ static int patch_alc850_specific(struct snd_ac97 *ac97)
85325 return 0;
85326 }
85327
85328 -static struct snd_ac97_build_ops patch_alc850_ops = {
85329 +static const struct snd_ac97_build_ops patch_alc850_ops = {
85330 .build_specific = patch_alc850_specific,
85331 .update_jacks = alc850_update_jacks
85332 };
85333 @@ -2978,7 +2978,7 @@ static int patch_cm9738_specific(struct snd_ac97 * ac97)
85334 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
85335 }
85336
85337 -static struct snd_ac97_build_ops patch_cm9738_ops = {
85338 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
85339 .build_specific = patch_cm9738_specific,
85340 .update_jacks = cm9738_update_jacks
85341 };
85342 @@ -3069,7 +3069,7 @@ static int patch_cm9739_post_spdif(struct snd_ac97 * ac97)
85343 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
85344 }
85345
85346 -static struct snd_ac97_build_ops patch_cm9739_ops = {
85347 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
85348 .build_specific = patch_cm9739_specific,
85349 .build_post_spdif = patch_cm9739_post_spdif,
85350 .update_jacks = cm9739_update_jacks
85351 @@ -3243,7 +3243,7 @@ static int patch_cm9761_specific(struct snd_ac97 * ac97)
85352 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
85353 }
85354
85355 -static struct snd_ac97_build_ops patch_cm9761_ops = {
85356 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
85357 .build_specific = patch_cm9761_specific,
85358 .build_post_spdif = patch_cm9761_post_spdif,
85359 .update_jacks = cm9761_update_jacks
85360 @@ -3339,7 +3339,7 @@ static int patch_cm9780_specific(struct snd_ac97 *ac97)
85361 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
85362 }
85363
85364 -static struct snd_ac97_build_ops patch_cm9780_ops = {
85365 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
85366 .build_specific = patch_cm9780_specific,
85367 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
85368 };
85369 @@ -3459,7 +3459,7 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97)
85370 return 0;
85371 }
85372
85373 -static struct snd_ac97_build_ops patch_vt1616_ops = {
85374 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
85375 .build_specific = patch_vt1616_specific
85376 };
85377
85378 @@ -3813,7 +3813,7 @@ static int patch_it2646_specific(struct snd_ac97 * ac97)
85379 return 0;
85380 }
85381
85382 -static struct snd_ac97_build_ops patch_it2646_ops = {
85383 +static const struct snd_ac97_build_ops patch_it2646_ops = {
85384 .build_specific = patch_it2646_specific,
85385 .update_jacks = it2646_update_jacks
85386 };
85387 @@ -3847,7 +3847,7 @@ static int patch_si3036_specific(struct snd_ac97 * ac97)
85388 return 0;
85389 }
85390
85391 -static struct snd_ac97_build_ops patch_si3036_ops = {
85392 +static const struct snd_ac97_build_ops patch_si3036_ops = {
85393 .build_specific = patch_si3036_specific,
85394 };
85395
85396 @@ -3914,7 +3914,7 @@ static int patch_ucb1400_specific(struct snd_ac97 * ac97)
85397 return 0;
85398 }
85399
85400 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
85401 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
85402 .build_specific = patch_ucb1400_specific,
85403 };
85404
85405 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
85406 index 99552fb..4dcc2c5 100644
85407 --- a/sound/pci/hda/hda_codec.h
85408 +++ b/sound/pci/hda/hda_codec.h
85409 @@ -580,7 +580,7 @@ struct hda_bus_ops {
85410 /* notify power-up/down from codec to controller */
85411 void (*pm_notify)(struct hda_bus *bus);
85412 #endif
85413 -};
85414 +} __no_const;
85415
85416 /* template to pass to the bus constructor */
85417 struct hda_bus_template {
85418 @@ -675,6 +675,7 @@ struct hda_codec_ops {
85419 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
85420 #endif
85421 };
85422 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
85423
85424 /* record for amp information cache */
85425 struct hda_cache_head {
85426 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
85427 struct snd_pcm_substream *substream);
85428 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
85429 struct snd_pcm_substream *substream);
85430 -};
85431 +} __no_const;
85432
85433 /* PCM information for each substream */
85434 struct hda_pcm_stream {
85435 @@ -760,7 +761,7 @@ struct hda_codec {
85436 const char *modelname; /* model name for preset */
85437
85438 /* set by patch */
85439 - struct hda_codec_ops patch_ops;
85440 + hda_codec_ops_no_const patch_ops;
85441
85442 /* PCM to create, set by patch_ops.build_pcms callback */
85443 unsigned int num_pcms;
85444 diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
85445 index fb684f0..2b11cea 100644
85446 --- a/sound/pci/hda/patch_atihdmi.c
85447 +++ b/sound/pci/hda/patch_atihdmi.c
85448 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_codec *codec)
85449 */
85450 spec->multiout.dig_out_nid = CVT_NID;
85451
85452 - codec->patch_ops = atihdmi_patch_ops;
85453 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
85454
85455 return 0;
85456 }
85457 diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
85458 index 7c23016..c5bfdd7 100644
85459 --- a/sound/pci/hda/patch_intelhdmi.c
85460 +++ b/sound/pci/hda/patch_intelhdmi.c
85461 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
85462 cp_ready);
85463
85464 /* TODO */
85465 - if (cp_state)
85466 - ;
85467 - if (cp_ready)
85468 - ;
85469 + if (cp_state) {
85470 + }
85471 + if (cp_ready) {
85472 + }
85473 }
85474
85475
85476 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hda_codec *codec)
85477 spec->multiout.dig_out_nid = cvt_nid;
85478
85479 codec->spec = spec;
85480 - codec->patch_ops = intel_hdmi_patch_ops;
85481 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
85482
85483 snd_hda_eld_proc_new(codec, &spec->sink_eld);
85484
85485 diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
85486 index 6afdab0..68ed352 100644
85487 --- a/sound/pci/hda/patch_nvhdmi.c
85488 +++ b/sound/pci/hda/patch_nvhdmi.c
85489 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_codec *codec)
85490 spec->multiout.max_channels = 8;
85491 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
85492
85493 - codec->patch_ops = nvhdmi_patch_ops_8ch;
85494 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
85495
85496 return 0;
85497 }
85498 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
85499 spec->multiout.max_channels = 2;
85500 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
85501
85502 - codec->patch_ops = nvhdmi_patch_ops_2ch;
85503 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
85504
85505 return 0;
85506 }
85507 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
85508 index 01da10b..01bd71f 100644
85509 --- a/sound/pci/hda/patch_sigmatel.c
85510 +++ b/sound/pci/hda/patch_sigmatel.c
85511 @@ -5220,7 +5220,7 @@ again:
85512 snd_hda_codec_write_cache(codec, nid, 0,
85513 AC_VERB_SET_CONNECT_SEL, num_dacs);
85514
85515 - codec->patch_ops = stac92xx_patch_ops;
85516 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
85517
85518 codec->proc_widget_hook = stac92hd_proc_hook;
85519
85520 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
85521 return -ENOMEM;
85522
85523 codec->spec = spec;
85524 - codec->patch_ops = stac92xx_patch_ops;
85525 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
85526 spec->num_pins = STAC92HD71BXX_NUM_PINS;
85527 switch (codec->vendor_id) {
85528 case 0x111d76b6:
85529 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
85530 index d063149..01599a4 100644
85531 --- a/sound/pci/ice1712/ice1712.h
85532 +++ b/sound/pci/ice1712/ice1712.h
85533 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
85534 unsigned int mask_flags; /* total mask bits */
85535 struct snd_akm4xxx_ops {
85536 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
85537 - } ops;
85538 + } __no_const ops;
85539 };
85540
85541 struct snd_ice1712_spdif {
85542 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
85543 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
85544 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
85545 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
85546 - } ops;
85547 + } __no_const ops;
85548 };
85549
85550
85551 diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
85552 index 9e7d12e..3e3bc64 100644
85553 --- a/sound/pci/intel8x0m.c
85554 +++ b/sound/pci/intel8x0m.c
85555 @@ -1264,7 +1264,7 @@ static struct shortname_table {
85556 { 0x5455, "ALi M5455" },
85557 { 0x746d, "AMD AMD8111" },
85558 #endif
85559 - { 0 },
85560 + { 0, },
85561 };
85562
85563 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
85564 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
85565 index 5518371..45cf7ac 100644
85566 --- a/sound/pci/ymfpci/ymfpci_main.c
85567 +++ b/sound/pci/ymfpci/ymfpci_main.c
85568 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
85569 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
85570 break;
85571 }
85572 - if (atomic_read(&chip->interrupt_sleep_count)) {
85573 - atomic_set(&chip->interrupt_sleep_count, 0);
85574 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
85575 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
85576 wake_up(&chip->interrupt_sleep);
85577 }
85578 __end:
85579 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
85580 continue;
85581 init_waitqueue_entry(&wait, current);
85582 add_wait_queue(&chip->interrupt_sleep, &wait);
85583 - atomic_inc(&chip->interrupt_sleep_count);
85584 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
85585 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
85586 remove_wait_queue(&chip->interrupt_sleep, &wait);
85587 }
85588 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
85589 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
85590 spin_unlock(&chip->reg_lock);
85591
85592 - if (atomic_read(&chip->interrupt_sleep_count)) {
85593 - atomic_set(&chip->interrupt_sleep_count, 0);
85594 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
85595 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
85596 wake_up(&chip->interrupt_sleep);
85597 }
85598 }
85599 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
85600 spin_lock_init(&chip->reg_lock);
85601 spin_lock_init(&chip->voice_lock);
85602 init_waitqueue_head(&chip->interrupt_sleep);
85603 - atomic_set(&chip->interrupt_sleep_count, 0);
85604 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
85605 chip->card = card;
85606 chip->pci = pci;
85607 chip->irq = -1;
85608 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
85609 index 0a1b2f6..776bb19 100644
85610 --- a/sound/soc/soc-core.c
85611 +++ b/sound/soc/soc-core.c
85612 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
85613 }
85614
85615 /* ASoC PCM operations */
85616 -static struct snd_pcm_ops soc_pcm_ops = {
85617 +static snd_pcm_ops_no_const soc_pcm_ops = {
85618 .open = soc_pcm_open,
85619 .close = soc_codec_close,
85620 .hw_params = soc_pcm_hw_params,
85621 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
85622 index 79633ea..9732e90 100644
85623 --- a/sound/usb/usbaudio.c
85624 +++ b/sound/usb/usbaudio.c
85625 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream,
85626 switch (cmd) {
85627 case SNDRV_PCM_TRIGGER_START:
85628 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
85629 - subs->ops.prepare = prepare_playback_urb;
85630 + *(void **)&subs->ops.prepare = prepare_playback_urb;
85631 return 0;
85632 case SNDRV_PCM_TRIGGER_STOP:
85633 return deactivate_urbs(subs, 0, 0);
85634 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
85635 - subs->ops.prepare = prepare_nodata_playback_urb;
85636 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
85637 return 0;
85638 default:
85639 return -EINVAL;
85640 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream,
85641
85642 switch (cmd) {
85643 case SNDRV_PCM_TRIGGER_START:
85644 - subs->ops.retire = retire_capture_urb;
85645 + *(void **)&subs->ops.retire = retire_capture_urb;
85646 return start_urbs(subs, substream->runtime);
85647 case SNDRV_PCM_TRIGGER_STOP:
85648 return deactivate_urbs(subs, 0, 0);
85649 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
85650 - subs->ops.retire = retire_paused_capture_urb;
85651 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
85652 return 0;
85653 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
85654 - subs->ops.retire = retire_capture_urb;
85655 + *(void **)&subs->ops.retire = retire_capture_urb;
85656 return 0;
85657 default:
85658 return -EINVAL;
85659 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
85660 /* for playback, submit the URBs now; otherwise, the first hwptr_done
85661 * updates for all URBs would happen at the same time when starting */
85662 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
85663 - subs->ops.prepare = prepare_nodata_playback_urb;
85664 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
85665 return start_urbs(subs, runtime);
85666 } else
85667 return 0;
85668 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
85669 subs->direction = stream;
85670 subs->dev = as->chip->dev;
85671 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
85672 - subs->ops = audio_urb_ops[stream];
85673 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
85674 } else {
85675 - subs->ops = audio_urb_ops_high_speed[stream];
85676 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
85677 switch (as->chip->usb_id) {
85678 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
85679 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
85680 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
85681 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
85682 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
85683 break;
85684 }
85685 }
85686 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
85687 new file mode 100644
85688 index 0000000..b044b80
85689 --- /dev/null
85690 +++ b/tools/gcc/Makefile
85691 @@ -0,0 +1,21 @@
85692 +#CC := gcc
85693 +#PLUGIN_SOURCE_FILES := pax_plugin.c
85694 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
85695 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
85696 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
85697 +
85698 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
85699 +
85700 +hostlibs-y := constify_plugin.so
85701 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
85702 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
85703 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
85704 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
85705 +
85706 +always := $(hostlibs-y)
85707 +
85708 +constify_plugin-objs := constify_plugin.o
85709 +stackleak_plugin-objs := stackleak_plugin.o
85710 +kallocstat_plugin-objs := kallocstat_plugin.o
85711 +kernexec_plugin-objs := kernexec_plugin.o
85712 +checker_plugin-objs := checker_plugin.o
85713 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
85714 new file mode 100644
85715 index 0000000..d41b5af
85716 --- /dev/null
85717 +++ b/tools/gcc/checker_plugin.c
85718 @@ -0,0 +1,171 @@
85719 +/*
85720 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
85721 + * Licensed under the GPL v2
85722 + *
85723 + * Note: the choice of the license means that the compilation process is
85724 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
85725 + * but for the kernel it doesn't matter since it doesn't link against
85726 + * any of the gcc libraries
85727 + *
85728 + * gcc plugin to implement various sparse (source code checker) features
85729 + *
85730 + * TODO:
85731 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
85732 + *
85733 + * BUGS:
85734 + * - none known
85735 + */
85736 +#include "gcc-plugin.h"
85737 +#include "config.h"
85738 +#include "system.h"
85739 +#include "coretypes.h"
85740 +#include "tree.h"
85741 +#include "tree-pass.h"
85742 +#include "flags.h"
85743 +#include "intl.h"
85744 +#include "toplev.h"
85745 +#include "plugin.h"
85746 +//#include "expr.h" where are you...
85747 +#include "diagnostic.h"
85748 +#include "plugin-version.h"
85749 +#include "tm.h"
85750 +#include "function.h"
85751 +#include "basic-block.h"
85752 +#include "gimple.h"
85753 +#include "rtl.h"
85754 +#include "emit-rtl.h"
85755 +#include "tree-flow.h"
85756 +#include "target.h"
85757 +
85758 +extern void c_register_addr_space (const char *str, addr_space_t as);
85759 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
85760 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
85761 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
85762 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
85763 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
85764 +
85765 +extern void print_gimple_stmt(FILE *, gimple, int, int);
85766 +extern rtx emit_move_insn(rtx x, rtx y);
85767 +
85768 +int plugin_is_GPL_compatible;
85769 +
85770 +static struct plugin_info checker_plugin_info = {
85771 + .version = "201111150100",
85772 +};
85773 +
85774 +#define ADDR_SPACE_KERNEL 0
85775 +#define ADDR_SPACE_FORCE_KERNEL 1
85776 +#define ADDR_SPACE_USER 2
85777 +#define ADDR_SPACE_FORCE_USER 3
85778 +#define ADDR_SPACE_IOMEM 0
85779 +#define ADDR_SPACE_FORCE_IOMEM 0
85780 +#define ADDR_SPACE_PERCPU 0
85781 +#define ADDR_SPACE_FORCE_PERCPU 0
85782 +#define ADDR_SPACE_RCU 0
85783 +#define ADDR_SPACE_FORCE_RCU 0
85784 +
85785 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
85786 +{
85787 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
85788 +}
85789 +
85790 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
85791 +{
85792 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
85793 +}
85794 +
85795 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
85796 +{
85797 + return default_addr_space_valid_pointer_mode(mode, as);
85798 +}
85799 +
85800 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
85801 +{
85802 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
85803 +}
85804 +
85805 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
85806 +{
85807 + return default_addr_space_legitimize_address(x, oldx, mode, as);
85808 +}
85809 +
85810 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
85811 +{
85812 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
85813 + return true;
85814 +
85815 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
85816 + return true;
85817 +
85818 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
85819 + return true;
85820 +
85821 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
85822 + return true;
85823 +
85824 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
85825 + return true;
85826 +
85827 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
85828 + return true;
85829 +
85830 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
85831 + return true;
85832 +
85833 + return subset == superset;
85834 +}
85835 +
85836 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
85837 +{
85838 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
85839 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
85840 +
85841 + return op;
85842 +}
85843 +
85844 +static void register_checker_address_spaces(void *event_data, void *data)
85845 +{
85846 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
85847 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
85848 + c_register_addr_space("__user", ADDR_SPACE_USER);
85849 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
85850 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
85851 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
85852 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
85853 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
85854 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
85855 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
85856 +
85857 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
85858 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
85859 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
85860 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
85861 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
85862 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
85863 + targetm.addr_space.convert = checker_addr_space_convert;
85864 +}
85865 +
85866 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
85867 +{
85868 + const char * const plugin_name = plugin_info->base_name;
85869 + const int argc = plugin_info->argc;
85870 + const struct plugin_argument * const argv = plugin_info->argv;
85871 + int i;
85872 +
85873 + if (!plugin_default_version_check(version, &gcc_version)) {
85874 + error(G_("incompatible gcc/plugin versions"));
85875 + return 1;
85876 + }
85877 +
85878 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
85879 +
85880 + for (i = 0; i < argc; ++i)
85881 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
85882 +
85883 + if (TARGET_64BIT == 0)
85884 + return 0;
85885 +
85886 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
85887 +
85888 + return 0;
85889 +}
85890 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
85891 new file mode 100644
85892 index 0000000..704a564
85893 --- /dev/null
85894 +++ b/tools/gcc/constify_plugin.c
85895 @@ -0,0 +1,303 @@
85896 +/*
85897 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
85898 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
85899 + * Licensed under the GPL v2, or (at your option) v3
85900 + *
85901 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
85902 + *
85903 + * Homepage:
85904 + * http://www.grsecurity.net/~ephox/const_plugin/
85905 + *
85906 + * Usage:
85907 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
85908 + * $ gcc -fplugin=constify_plugin.so test.c -O2
85909 + */
85910 +
85911 +#include "gcc-plugin.h"
85912 +#include "config.h"
85913 +#include "system.h"
85914 +#include "coretypes.h"
85915 +#include "tree.h"
85916 +#include "tree-pass.h"
85917 +#include "flags.h"
85918 +#include "intl.h"
85919 +#include "toplev.h"
85920 +#include "plugin.h"
85921 +#include "diagnostic.h"
85922 +#include "plugin-version.h"
85923 +#include "tm.h"
85924 +#include "function.h"
85925 +#include "basic-block.h"
85926 +#include "gimple.h"
85927 +#include "rtl.h"
85928 +#include "emit-rtl.h"
85929 +#include "tree-flow.h"
85930 +
85931 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
85932 +
85933 +int plugin_is_GPL_compatible;
85934 +
85935 +static struct plugin_info const_plugin_info = {
85936 + .version = "201111150100",
85937 + .help = "no-constify\tturn off constification\n",
85938 +};
85939 +
85940 +static void constify_type(tree type);
85941 +static bool walk_struct(tree node);
85942 +
85943 +static tree deconstify_type(tree old_type)
85944 +{
85945 + tree new_type, field;
85946 +
85947 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
85948 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
85949 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
85950 + DECL_FIELD_CONTEXT(field) = new_type;
85951 + TYPE_READONLY(new_type) = 0;
85952 + C_TYPE_FIELDS_READONLY(new_type) = 0;
85953 + return new_type;
85954 +}
85955 +
85956 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
85957 +{
85958 + tree type;
85959 +
85960 + *no_add_attrs = true;
85961 + if (TREE_CODE(*node) == FUNCTION_DECL) {
85962 + error("%qE attribute does not apply to functions", name);
85963 + return NULL_TREE;
85964 + }
85965 +
85966 + if (TREE_CODE(*node) == VAR_DECL) {
85967 + error("%qE attribute does not apply to variables", name);
85968 + return NULL_TREE;
85969 + }
85970 +
85971 + if (TYPE_P(*node)) {
85972 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
85973 + *no_add_attrs = false;
85974 + else
85975 + error("%qE attribute applies to struct and union types only", name);
85976 + return NULL_TREE;
85977 + }
85978 +
85979 + type = TREE_TYPE(*node);
85980 +
85981 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
85982 + error("%qE attribute applies to struct and union types only", name);
85983 + return NULL_TREE;
85984 + }
85985 +
85986 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
85987 + error("%qE attribute is already applied to the type", name);
85988 + return NULL_TREE;
85989 + }
85990 +
85991 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
85992 + error("%qE attribute used on type that is not constified", name);
85993 + return NULL_TREE;
85994 + }
85995 +
85996 + if (TREE_CODE(*node) == TYPE_DECL) {
85997 + TREE_TYPE(*node) = deconstify_type(type);
85998 + TREE_READONLY(*node) = 0;
85999 + return NULL_TREE;
86000 + }
86001 +
86002 + return NULL_TREE;
86003 +}
86004 +
86005 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
86006 +{
86007 + *no_add_attrs = true;
86008 + if (!TYPE_P(*node)) {
86009 + error("%qE attribute applies to types only", name);
86010 + return NULL_TREE;
86011 + }
86012 +
86013 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
86014 + error("%qE attribute applies to struct and union types only", name);
86015 + return NULL_TREE;
86016 + }
86017 +
86018 + *no_add_attrs = false;
86019 + constify_type(*node);
86020 + return NULL_TREE;
86021 +}
86022 +
86023 +static struct attribute_spec no_const_attr = {
86024 + .name = "no_const",
86025 + .min_length = 0,
86026 + .max_length = 0,
86027 + .decl_required = false,
86028 + .type_required = false,
86029 + .function_type_required = false,
86030 + .handler = handle_no_const_attribute,
86031 +#if BUILDING_GCC_VERSION >= 4007
86032 + .affects_type_identity = true
86033 +#endif
86034 +};
86035 +
86036 +static struct attribute_spec do_const_attr = {
86037 + .name = "do_const",
86038 + .min_length = 0,
86039 + .max_length = 0,
86040 + .decl_required = false,
86041 + .type_required = false,
86042 + .function_type_required = false,
86043 + .handler = handle_do_const_attribute,
86044 +#if BUILDING_GCC_VERSION >= 4007
86045 + .affects_type_identity = true
86046 +#endif
86047 +};
86048 +
86049 +static void register_attributes(void *event_data, void *data)
86050 +{
86051 + register_attribute(&no_const_attr);
86052 + register_attribute(&do_const_attr);
86053 +}
86054 +
86055 +static void constify_type(tree type)
86056 +{
86057 + TYPE_READONLY(type) = 1;
86058 + C_TYPE_FIELDS_READONLY(type) = 1;
86059 +}
86060 +
86061 +static bool is_fptr(tree field)
86062 +{
86063 + tree ptr = TREE_TYPE(field);
86064 +
86065 + if (TREE_CODE(ptr) != POINTER_TYPE)
86066 + return false;
86067 +
86068 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
86069 +}
86070 +
86071 +static bool walk_struct(tree node)
86072 +{
86073 + tree field;
86074 +
86075 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
86076 + return false;
86077 +
86078 + if (TYPE_FIELDS(node) == NULL_TREE)
86079 + return false;
86080 +
86081 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
86082 + tree type = TREE_TYPE(field);
86083 + enum tree_code code = TREE_CODE(type);
86084 + if (code == RECORD_TYPE || code == UNION_TYPE) {
86085 + if (!(walk_struct(type)))
86086 + return false;
86087 + } else if (!is_fptr(field) && !TREE_READONLY(field))
86088 + return false;
86089 + }
86090 + return true;
86091 +}
86092 +
86093 +static void finish_type(void *event_data, void *data)
86094 +{
86095 + tree type = (tree)event_data;
86096 +
86097 + if (type == NULL_TREE)
86098 + return;
86099 +
86100 + if (TYPE_READONLY(type))
86101 + return;
86102 +
86103 + if (walk_struct(type))
86104 + constify_type(type);
86105 +}
86106 +
86107 +static unsigned int check_local_variables(void);
86108 +
86109 +struct gimple_opt_pass pass_local_variable = {
86110 + {
86111 + .type = GIMPLE_PASS,
86112 + .name = "check_local_variables",
86113 + .gate = NULL,
86114 + .execute = check_local_variables,
86115 + .sub = NULL,
86116 + .next = NULL,
86117 + .static_pass_number = 0,
86118 + .tv_id = TV_NONE,
86119 + .properties_required = 0,
86120 + .properties_provided = 0,
86121 + .properties_destroyed = 0,
86122 + .todo_flags_start = 0,
86123 + .todo_flags_finish = 0
86124 + }
86125 +};
86126 +
86127 +static unsigned int check_local_variables(void)
86128 +{
86129 + tree var;
86130 + referenced_var_iterator rvi;
86131 +
86132 +#if BUILDING_GCC_VERSION == 4005
86133 + FOR_EACH_REFERENCED_VAR(var, rvi) {
86134 +#else
86135 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
86136 +#endif
86137 + tree type = TREE_TYPE(var);
86138 +
86139 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
86140 + continue;
86141 +
86142 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
86143 + continue;
86144 +
86145 + if (!TYPE_READONLY(type))
86146 + continue;
86147 +
86148 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
86149 +// continue;
86150 +
86151 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
86152 +// continue;
86153 +
86154 + if (walk_struct(type)) {
86155 + error("constified variable %qE cannot be local", var);
86156 + return 1;
86157 + }
86158 + }
86159 + return 0;
86160 +}
86161 +
86162 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86163 +{
86164 + const char * const plugin_name = plugin_info->base_name;
86165 + const int argc = plugin_info->argc;
86166 + const struct plugin_argument * const argv = plugin_info->argv;
86167 + int i;
86168 + bool constify = true;
86169 +
86170 + struct register_pass_info local_variable_pass_info = {
86171 + .pass = &pass_local_variable.pass,
86172 + .reference_pass_name = "*referenced_vars",
86173 + .ref_pass_instance_number = 0,
86174 + .pos_op = PASS_POS_INSERT_AFTER
86175 + };
86176 +
86177 + if (!plugin_default_version_check(version, &gcc_version)) {
86178 + error(G_("incompatible gcc/plugin versions"));
86179 + return 1;
86180 + }
86181 +
86182 + for (i = 0; i < argc; ++i) {
86183 + if (!(strcmp(argv[i].key, "no-constify"))) {
86184 + constify = false;
86185 + continue;
86186 + }
86187 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86188 + }
86189 +
86190 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
86191 + if (constify) {
86192 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
86193 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
86194 + }
86195 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
86196 +
86197 + return 0;
86198 +}
86199 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
86200 new file mode 100644
86201 index 0000000..a5eabce
86202 --- /dev/null
86203 +++ b/tools/gcc/kallocstat_plugin.c
86204 @@ -0,0 +1,167 @@
86205 +/*
86206 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
86207 + * Licensed under the GPL v2
86208 + *
86209 + * Note: the choice of the license means that the compilation process is
86210 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86211 + * but for the kernel it doesn't matter since it doesn't link against
86212 + * any of the gcc libraries
86213 + *
86214 + * gcc plugin to find the distribution of k*alloc sizes
86215 + *
86216 + * TODO:
86217 + *
86218 + * BUGS:
86219 + * - none known
86220 + */
86221 +#include "gcc-plugin.h"
86222 +#include "config.h"
86223 +#include "system.h"
86224 +#include "coretypes.h"
86225 +#include "tree.h"
86226 +#include "tree-pass.h"
86227 +#include "flags.h"
86228 +#include "intl.h"
86229 +#include "toplev.h"
86230 +#include "plugin.h"
86231 +//#include "expr.h" where are you...
86232 +#include "diagnostic.h"
86233 +#include "plugin-version.h"
86234 +#include "tm.h"
86235 +#include "function.h"
86236 +#include "basic-block.h"
86237 +#include "gimple.h"
86238 +#include "rtl.h"
86239 +#include "emit-rtl.h"
86240 +
86241 +extern void print_gimple_stmt(FILE *, gimple, int, int);
86242 +
86243 +int plugin_is_GPL_compatible;
86244 +
86245 +static const char * const kalloc_functions[] = {
86246 + "__kmalloc",
86247 + "kmalloc",
86248 + "kmalloc_large",
86249 + "kmalloc_node",
86250 + "kmalloc_order",
86251 + "kmalloc_order_trace",
86252 + "kmalloc_slab",
86253 + "kzalloc",
86254 + "kzalloc_node",
86255 +};
86256 +
86257 +static struct plugin_info kallocstat_plugin_info = {
86258 + .version = "201111150100",
86259 +};
86260 +
86261 +static unsigned int execute_kallocstat(void);
86262 +
86263 +static struct gimple_opt_pass kallocstat_pass = {
86264 + .pass = {
86265 + .type = GIMPLE_PASS,
86266 + .name = "kallocstat",
86267 + .gate = NULL,
86268 + .execute = execute_kallocstat,
86269 + .sub = NULL,
86270 + .next = NULL,
86271 + .static_pass_number = 0,
86272 + .tv_id = TV_NONE,
86273 + .properties_required = 0,
86274 + .properties_provided = 0,
86275 + .properties_destroyed = 0,
86276 + .todo_flags_start = 0,
86277 + .todo_flags_finish = 0
86278 + }
86279 +};
86280 +
86281 +static bool is_kalloc(const char *fnname)
86282 +{
86283 + size_t i;
86284 +
86285 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
86286 + if (!strcmp(fnname, kalloc_functions[i]))
86287 + return true;
86288 + return false;
86289 +}
86290 +
86291 +static unsigned int execute_kallocstat(void)
86292 +{
86293 + basic_block bb;
86294 +
86295 + // 1. loop through BBs and GIMPLE statements
86296 + FOR_EACH_BB(bb) {
86297 + gimple_stmt_iterator gsi;
86298 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
86299 + // gimple match:
86300 + tree fndecl, size;
86301 + gimple call_stmt;
86302 + const char *fnname;
86303 +
86304 + // is it a call
86305 + call_stmt = gsi_stmt(gsi);
86306 + if (!is_gimple_call(call_stmt))
86307 + continue;
86308 + fndecl = gimple_call_fndecl(call_stmt);
86309 + if (fndecl == NULL_TREE)
86310 + continue;
86311 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
86312 + continue;
86313 +
86314 + // is it a call to k*alloc
86315 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
86316 + if (!is_kalloc(fnname))
86317 + continue;
86318 +
86319 + // is the size arg the result of a simple const assignment
86320 + size = gimple_call_arg(call_stmt, 0);
86321 + while (true) {
86322 + gimple def_stmt;
86323 + expanded_location xloc;
86324 + size_t size_val;
86325 +
86326 + if (TREE_CODE(size) != SSA_NAME)
86327 + break;
86328 + def_stmt = SSA_NAME_DEF_STMT(size);
86329 + if (!def_stmt || !is_gimple_assign(def_stmt))
86330 + break;
86331 + if (gimple_num_ops(def_stmt) != 2)
86332 + break;
86333 + size = gimple_assign_rhs1(def_stmt);
86334 + if (!TREE_CONSTANT(size))
86335 + continue;
86336 + xloc = expand_location(gimple_location(def_stmt));
86337 + if (!xloc.file)
86338 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
86339 + size_val = TREE_INT_CST_LOW(size);
86340 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
86341 + break;
86342 + }
86343 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
86344 +//debug_tree(gimple_call_fn(call_stmt));
86345 +//print_node(stderr, "pax", fndecl, 4);
86346 + }
86347 + }
86348 +
86349 + return 0;
86350 +}
86351 +
86352 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86353 +{
86354 + const char * const plugin_name = plugin_info->base_name;
86355 + struct register_pass_info kallocstat_pass_info = {
86356 + .pass = &kallocstat_pass.pass,
86357 + .reference_pass_name = "ssa",
86358 + .ref_pass_instance_number = 0,
86359 + .pos_op = PASS_POS_INSERT_AFTER
86360 + };
86361 +
86362 + if (!plugin_default_version_check(version, &gcc_version)) {
86363 + error(G_("incompatible gcc/plugin versions"));
86364 + return 1;
86365 + }
86366 +
86367 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
86368 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
86369 +
86370 + return 0;
86371 +}
86372 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
86373 new file mode 100644
86374 index 0000000..51f747e
86375 --- /dev/null
86376 +++ b/tools/gcc/kernexec_plugin.c
86377 @@ -0,0 +1,348 @@
86378 +/*
86379 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
86380 + * Licensed under the GPL v2
86381 + *
86382 + * Note: the choice of the license means that the compilation process is
86383 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86384 + * but for the kernel it doesn't matter since it doesn't link against
86385 + * any of the gcc libraries
86386 + *
86387 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
86388 + *
86389 + * TODO:
86390 + *
86391 + * BUGS:
86392 + * - none known
86393 + */
86394 +#include "gcc-plugin.h"
86395 +#include "config.h"
86396 +#include "system.h"
86397 +#include "coretypes.h"
86398 +#include "tree.h"
86399 +#include "tree-pass.h"
86400 +#include "flags.h"
86401 +#include "intl.h"
86402 +#include "toplev.h"
86403 +#include "plugin.h"
86404 +//#include "expr.h" where are you...
86405 +#include "diagnostic.h"
86406 +#include "plugin-version.h"
86407 +#include "tm.h"
86408 +#include "function.h"
86409 +#include "basic-block.h"
86410 +#include "gimple.h"
86411 +#include "rtl.h"
86412 +#include "emit-rtl.h"
86413 +#include "tree-flow.h"
86414 +
86415 +extern void print_gimple_stmt(FILE *, gimple, int, int);
86416 +extern rtx emit_move_insn(rtx x, rtx y);
86417 +
86418 +int plugin_is_GPL_compatible;
86419 +
86420 +static struct plugin_info kernexec_plugin_info = {
86421 + .version = "201111291120",
86422 + .help = "method=[bts|or]\tinstrumentation method\n"
86423 +};
86424 +
86425 +static unsigned int execute_kernexec_fptr(void);
86426 +static unsigned int execute_kernexec_retaddr(void);
86427 +static bool kernexec_cmodel_check(void);
86428 +
86429 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator);
86430 +static void (*kernexec_instrument_retaddr)(rtx);
86431 +
86432 +static struct gimple_opt_pass kernexec_fptr_pass = {
86433 + .pass = {
86434 + .type = GIMPLE_PASS,
86435 + .name = "kernexec_fptr",
86436 + .gate = kernexec_cmodel_check,
86437 + .execute = execute_kernexec_fptr,
86438 + .sub = NULL,
86439 + .next = NULL,
86440 + .static_pass_number = 0,
86441 + .tv_id = TV_NONE,
86442 + .properties_required = 0,
86443 + .properties_provided = 0,
86444 + .properties_destroyed = 0,
86445 + .todo_flags_start = 0,
86446 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
86447 + }
86448 +};
86449 +
86450 +static struct rtl_opt_pass kernexec_retaddr_pass = {
86451 + .pass = {
86452 + .type = RTL_PASS,
86453 + .name = "kernexec_retaddr",
86454 + .gate = kernexec_cmodel_check,
86455 + .execute = execute_kernexec_retaddr,
86456 + .sub = NULL,
86457 + .next = NULL,
86458 + .static_pass_number = 0,
86459 + .tv_id = TV_NONE,
86460 + .properties_required = 0,
86461 + .properties_provided = 0,
86462 + .properties_destroyed = 0,
86463 + .todo_flags_start = 0,
86464 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
86465 + }
86466 +};
86467 +
86468 +static bool kernexec_cmodel_check(void)
86469 +{
86470 + tree section;
86471 +
86472 + if (ix86_cmodel != CM_KERNEL)
86473 + return false;
86474 +
86475 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
86476 + if (!section || !TREE_VALUE(section))
86477 + return true;
86478 +
86479 + section = TREE_VALUE(TREE_VALUE(section));
86480 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
86481 + return true;
86482 +
86483 + return false;
86484 +}
86485 +
86486 +/*
86487 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
86488 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
86489 + */
86490 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator gsi)
86491 +{
86492 + gimple assign_intptr, assign_new_fptr, call_stmt;
86493 + tree intptr, old_fptr, new_fptr, kernexec_mask;
86494 +
86495 + call_stmt = gsi_stmt(gsi);
86496 + old_fptr = gimple_call_fn(call_stmt);
86497 +
86498 + // create temporary unsigned long variable used for bitops and cast fptr to it
86499 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
86500 + add_referenced_var(intptr);
86501 + mark_sym_for_renaming(intptr);
86502 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
86503 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
86504 + update_stmt(assign_intptr);
86505 +
86506 + // apply logical or to temporary unsigned long and bitmask
86507 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
86508 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
86509 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
86510 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
86511 + update_stmt(assign_intptr);
86512 +
86513 + // cast temporary unsigned long back to a temporary fptr variable
86514 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
86515 + add_referenced_var(new_fptr);
86516 + mark_sym_for_renaming(new_fptr);
86517 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
86518 + gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
86519 + update_stmt(assign_new_fptr);
86520 +
86521 + // replace call stmt fn with the new fptr
86522 + gimple_call_set_fn(call_stmt, new_fptr);
86523 + update_stmt(call_stmt);
86524 +}
86525 +
86526 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator gsi)
86527 +{
86528 + gimple asm_or_stmt, call_stmt;
86529 + tree old_fptr, new_fptr, input, output;
86530 + VEC(tree, gc) *inputs = NULL;
86531 + VEC(tree, gc) *outputs = NULL;
86532 +
86533 + call_stmt = gsi_stmt(gsi);
86534 + old_fptr = gimple_call_fn(call_stmt);
86535 +
86536 + // create temporary fptr variable
86537 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
86538 + add_referenced_var(new_fptr);
86539 + mark_sym_for_renaming(new_fptr);
86540 +
86541 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
86542 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
86543 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
86544 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
86545 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
86546 + VEC_safe_push(tree, gc, inputs, input);
86547 + VEC_safe_push(tree, gc, outputs, output);
86548 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
86549 + gimple_asm_set_volatile(asm_or_stmt, true);
86550 + gsi_insert_before(&gsi, asm_or_stmt, GSI_SAME_STMT);
86551 + update_stmt(asm_or_stmt);
86552 +
86553 + // replace call stmt fn with the new fptr
86554 + gimple_call_set_fn(call_stmt, new_fptr);
86555 + update_stmt(call_stmt);
86556 +}
86557 +
86558 +/*
86559 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
86560 + */
86561 +static unsigned int execute_kernexec_fptr(void)
86562 +{
86563 + basic_block bb;
86564 + gimple_stmt_iterator gsi;
86565 +
86566 + // 1. loop through BBs and GIMPLE statements
86567 + FOR_EACH_BB(bb) {
86568 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
86569 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
86570 + tree fn;
86571 + gimple call_stmt;
86572 +
86573 + // is it a call ...
86574 + call_stmt = gsi_stmt(gsi);
86575 + if (!is_gimple_call(call_stmt))
86576 + continue;
86577 + fn = gimple_call_fn(call_stmt);
86578 + if (TREE_CODE(fn) == ADDR_EXPR)
86579 + continue;
86580 + if (TREE_CODE(fn) != SSA_NAME)
86581 + gcc_unreachable();
86582 +
86583 + // ... through a function pointer
86584 + fn = SSA_NAME_VAR(fn);
86585 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
86586 + continue;
86587 + fn = TREE_TYPE(fn);
86588 + if (TREE_CODE(fn) != POINTER_TYPE)
86589 + continue;
86590 + fn = TREE_TYPE(fn);
86591 + if (TREE_CODE(fn) != FUNCTION_TYPE)
86592 + continue;
86593 +
86594 + kernexec_instrument_fptr(gsi);
86595 +
86596 +//debug_tree(gimple_call_fn(call_stmt));
86597 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
86598 + }
86599 + }
86600 +
86601 + return 0;
86602 +}
86603 +
86604 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
86605 +static void kernexec_instrument_retaddr_bts(rtx insn)
86606 +{
86607 + rtx btsq;
86608 + rtvec argvec, constraintvec, labelvec;
86609 + int line;
86610 +
86611 + // create asm volatile("btsq $63,(%%rsp)":::)
86612 + argvec = rtvec_alloc(0);
86613 + constraintvec = rtvec_alloc(0);
86614 + labelvec = rtvec_alloc(0);
86615 + line = expand_location(RTL_LOCATION(insn)).line;
86616 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
86617 + MEM_VOLATILE_P(btsq) = 1;
86618 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
86619 + emit_insn_before(btsq, insn);
86620 +}
86621 +
86622 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
86623 +static void kernexec_instrument_retaddr_or(rtx insn)
86624 +{
86625 + rtx orq;
86626 + rtvec argvec, constraintvec, labelvec;
86627 + int line;
86628 +
86629 + // create asm volatile("orq %%r10,(%%rsp)":::)
86630 + argvec = rtvec_alloc(0);
86631 + constraintvec = rtvec_alloc(0);
86632 + labelvec = rtvec_alloc(0);
86633 + line = expand_location(RTL_LOCATION(insn)).line;
86634 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
86635 + MEM_VOLATILE_P(orq) = 1;
86636 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
86637 + emit_insn_before(orq, insn);
86638 +}
86639 +
86640 +/*
86641 + * find all asm level function returns and forcibly set the highest bit of the return address
86642 + */
86643 +static unsigned int execute_kernexec_retaddr(void)
86644 +{
86645 + rtx insn;
86646 +
86647 + // 1. find function returns
86648 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
86649 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
86650 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
86651 + rtx body;
86652 +
86653 + // is it a retn
86654 + if (!JUMP_P(insn))
86655 + continue;
86656 + body = PATTERN(insn);
86657 + if (GET_CODE(body) == PARALLEL)
86658 + body = XVECEXP(body, 0, 0);
86659 + if (GET_CODE(body) != RETURN)
86660 + continue;
86661 + kernexec_instrument_retaddr(insn);
86662 + }
86663 +
86664 +// print_simple_rtl(stderr, get_insns());
86665 +// print_rtl(stderr, get_insns());
86666 +
86667 + return 0;
86668 +}
86669 +
86670 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86671 +{
86672 + const char * const plugin_name = plugin_info->base_name;
86673 + const int argc = plugin_info->argc;
86674 + const struct plugin_argument * const argv = plugin_info->argv;
86675 + int i;
86676 + struct register_pass_info kernexec_fptr_pass_info = {
86677 + .pass = &kernexec_fptr_pass.pass,
86678 + .reference_pass_name = "ssa",
86679 + .ref_pass_instance_number = 0,
86680 + .pos_op = PASS_POS_INSERT_AFTER
86681 + };
86682 + struct register_pass_info kernexec_retaddr_pass_info = {
86683 + .pass = &kernexec_retaddr_pass.pass,
86684 + .reference_pass_name = "pro_and_epilogue",
86685 + .ref_pass_instance_number = 0,
86686 + .pos_op = PASS_POS_INSERT_AFTER
86687 + };
86688 +
86689 + if (!plugin_default_version_check(version, &gcc_version)) {
86690 + error(G_("incompatible gcc/plugin versions"));
86691 + return 1;
86692 + }
86693 +
86694 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
86695 +
86696 + if (TARGET_64BIT == 0)
86697 + return 0;
86698 +
86699 + for (i = 0; i < argc; ++i) {
86700 + if (!strcmp(argv[i].key, "method")) {
86701 + if (!argv[i].value) {
86702 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86703 + continue;
86704 + }
86705 + if (!strcmp(argv[i].value, "bts")) {
86706 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
86707 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
86708 + } else if (!strcmp(argv[i].value, "or")) {
86709 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
86710 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
86711 + fix_register("r10", 1, 1);
86712 + } else
86713 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
86714 + continue;
86715 + }
86716 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86717 + }
86718 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
86719 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
86720 +
86721 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
86722 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
86723 +
86724 + return 0;
86725 +}
86726 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
86727 new file mode 100644
86728 index 0000000..d44f37c
86729 --- /dev/null
86730 +++ b/tools/gcc/stackleak_plugin.c
86731 @@ -0,0 +1,291 @@
86732 +/*
86733 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
86734 + * Licensed under the GPL v2
86735 + *
86736 + * Note: the choice of the license means that the compilation process is
86737 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86738 + * but for the kernel it doesn't matter since it doesn't link against
86739 + * any of the gcc libraries
86740 + *
86741 + * gcc plugin to help implement various PaX features
86742 + *
86743 + * - track lowest stack pointer
86744 + *
86745 + * TODO:
86746 + * - initialize all local variables
86747 + *
86748 + * BUGS:
86749 + * - none known
86750 + */
86751 +#include "gcc-plugin.h"
86752 +#include "config.h"
86753 +#include "system.h"
86754 +#include "coretypes.h"
86755 +#include "tree.h"
86756 +#include "tree-pass.h"
86757 +#include "flags.h"
86758 +#include "intl.h"
86759 +#include "toplev.h"
86760 +#include "plugin.h"
86761 +//#include "expr.h" where are you...
86762 +#include "diagnostic.h"
86763 +#include "plugin-version.h"
86764 +#include "tm.h"
86765 +#include "function.h"
86766 +#include "basic-block.h"
86767 +#include "gimple.h"
86768 +#include "rtl.h"
86769 +#include "emit-rtl.h"
86770 +
86771 +extern void print_gimple_stmt(FILE *, gimple, int, int);
86772 +
86773 +int plugin_is_GPL_compatible;
86774 +
86775 +static int track_frame_size = -1;
86776 +static const char track_function[] = "pax_track_stack";
86777 +static const char check_function[] = "pax_check_alloca";
86778 +static bool init_locals;
86779 +
86780 +static struct plugin_info stackleak_plugin_info = {
86781 + .version = "201111150100",
86782 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
86783 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
86784 +};
86785 +
86786 +static bool gate_stackleak_track_stack(void);
86787 +static unsigned int execute_stackleak_tree_instrument(void);
86788 +static unsigned int execute_stackleak_final(void);
86789 +
86790 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
86791 + .pass = {
86792 + .type = GIMPLE_PASS,
86793 + .name = "stackleak_tree_instrument",
86794 + .gate = gate_stackleak_track_stack,
86795 + .execute = execute_stackleak_tree_instrument,
86796 + .sub = NULL,
86797 + .next = NULL,
86798 + .static_pass_number = 0,
86799 + .tv_id = TV_NONE,
86800 + .properties_required = PROP_gimple_leh | PROP_cfg,
86801 + .properties_provided = 0,
86802 + .properties_destroyed = 0,
86803 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
86804 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
86805 + }
86806 +};
86807 +
86808 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
86809 + .pass = {
86810 + .type = RTL_PASS,
86811 + .name = "stackleak_final",
86812 + .gate = gate_stackleak_track_stack,
86813 + .execute = execute_stackleak_final,
86814 + .sub = NULL,
86815 + .next = NULL,
86816 + .static_pass_number = 0,
86817 + .tv_id = TV_NONE,
86818 + .properties_required = 0,
86819 + .properties_provided = 0,
86820 + .properties_destroyed = 0,
86821 + .todo_flags_start = 0,
86822 + .todo_flags_finish = TODO_dump_func
86823 + }
86824 +};
86825 +
86826 +static bool gate_stackleak_track_stack(void)
86827 +{
86828 + return track_frame_size >= 0;
86829 +}
86830 +
86831 +static void stackleak_check_alloca(gimple_stmt_iterator gsi)
86832 +{
86833 + gimple check_alloca;
86834 + tree fndecl, fntype, alloca_size;
86835 +
86836 + // insert call to void pax_check_alloca(unsigned long size)
86837 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
86838 + fndecl = build_fn_decl(check_function, fntype);
86839 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
86840 + alloca_size = gimple_call_arg(gsi_stmt(gsi), 0);
86841 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
86842 + gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING);
86843 +}
86844 +
86845 +static void stackleak_add_instrumentation(gimple_stmt_iterator gsi)
86846 +{
86847 + gimple track_stack;
86848 + tree fndecl, fntype;
86849 +
86850 + // insert call to void pax_track_stack(void)
86851 + fntype = build_function_type_list(void_type_node, NULL_TREE);
86852 + fndecl = build_fn_decl(track_function, fntype);
86853 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
86854 + track_stack = gimple_build_call(fndecl, 0);
86855 + gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING);
86856 +}
86857 +
86858 +#if BUILDING_GCC_VERSION == 4005
86859 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
86860 +{
86861 + tree fndecl;
86862 +
86863 + if (!is_gimple_call(stmt))
86864 + return false;
86865 + fndecl = gimple_call_fndecl(stmt);
86866 + if (!fndecl)
86867 + return false;
86868 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
86869 + return false;
86870 +// print_node(stderr, "pax", fndecl, 4);
86871 + return DECL_FUNCTION_CODE(fndecl) == code;
86872 +}
86873 +#endif
86874 +
86875 +static bool is_alloca(gimple stmt)
86876 +{
86877 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
86878 + return true;
86879 +
86880 +#if BUILDING_GCC_VERSION >= 4007
86881 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
86882 + return true;
86883 +#endif
86884 +
86885 + return false;
86886 +}
86887 +
86888 +static unsigned int execute_stackleak_tree_instrument(void)
86889 +{
86890 + basic_block bb, entry_bb;
86891 + bool prologue_instrumented = false;
86892 +
86893 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
86894 +
86895 + // 1. loop through BBs and GIMPLE statements
86896 + FOR_EACH_BB(bb) {
86897 + gimple_stmt_iterator gsi;
86898 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
86899 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
86900 + if (!is_alloca(gsi_stmt(gsi)))
86901 + continue;
86902 +
86903 + // 2. insert stack overflow check before each __builtin_alloca call
86904 + stackleak_check_alloca(gsi);
86905 +
86906 + // 3. insert track call after each __builtin_alloca call
86907 + stackleak_add_instrumentation(gsi);
86908 + if (bb == entry_bb)
86909 + prologue_instrumented = true;
86910 + }
86911 + }
86912 +
86913 + // 4. insert track call at the beginning
86914 + if (!prologue_instrumented) {
86915 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
86916 + if (dom_info_available_p(CDI_DOMINATORS))
86917 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
86918 + stackleak_add_instrumentation(gsi_start_bb(bb));
86919 + }
86920 +
86921 + return 0;
86922 +}
86923 +
86924 +static unsigned int execute_stackleak_final(void)
86925 +{
86926 + rtx insn;
86927 +
86928 + if (cfun->calls_alloca)
86929 + return 0;
86930 +
86931 + // keep calls only if function frame is big enough
86932 + if (get_frame_size() >= track_frame_size)
86933 + return 0;
86934 +
86935 + // 1. find pax_track_stack calls
86936 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
86937 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
86938 + rtx body;
86939 +
86940 + if (!CALL_P(insn))
86941 + continue;
86942 + body = PATTERN(insn);
86943 + if (GET_CODE(body) != CALL)
86944 + continue;
86945 + body = XEXP(body, 0);
86946 + if (GET_CODE(body) != MEM)
86947 + continue;
86948 + body = XEXP(body, 0);
86949 + if (GET_CODE(body) != SYMBOL_REF)
86950 + continue;
86951 + if (strcmp(XSTR(body, 0), track_function))
86952 + continue;
86953 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
86954 + // 2. delete call
86955 + insn = delete_insn_and_edges(insn);
86956 +#if BUILDING_GCC_VERSION >= 4007
86957 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
86958 + insn = delete_insn_and_edges(insn);
86959 +#endif
86960 + }
86961 +
86962 +// print_simple_rtl(stderr, get_insns());
86963 +// print_rtl(stderr, get_insns());
86964 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
86965 +
86966 + return 0;
86967 +}
86968 +
86969 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86970 +{
86971 + const char * const plugin_name = plugin_info->base_name;
86972 + const int argc = plugin_info->argc;
86973 + const struct plugin_argument * const argv = plugin_info->argv;
86974 + int i;
86975 + struct register_pass_info stackleak_tree_instrument_pass_info = {
86976 + .pass = &stackleak_tree_instrument_pass.pass,
86977 +// .reference_pass_name = "tree_profile",
86978 + .reference_pass_name = "optimized",
86979 + .ref_pass_instance_number = 0,
86980 + .pos_op = PASS_POS_INSERT_AFTER
86981 + };
86982 + struct register_pass_info stackleak_final_pass_info = {
86983 + .pass = &stackleak_final_rtl_opt_pass.pass,
86984 + .reference_pass_name = "final",
86985 + .ref_pass_instance_number = 0,
86986 + .pos_op = PASS_POS_INSERT_BEFORE
86987 + };
86988 +
86989 + if (!plugin_default_version_check(version, &gcc_version)) {
86990 + error(G_("incompatible gcc/plugin versions"));
86991 + return 1;
86992 + }
86993 +
86994 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
86995 +
86996 + for (i = 0; i < argc; ++i) {
86997 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
86998 + if (!argv[i].value) {
86999 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87000 + continue;
87001 + }
87002 + track_frame_size = atoi(argv[i].value);
87003 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
87004 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
87005 + continue;
87006 + }
87007 + if (!strcmp(argv[i].key, "initialize-locals")) {
87008 + if (argv[i].value) {
87009 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
87010 + continue;
87011 + }
87012 + init_locals = true;
87013 + continue;
87014 + }
87015 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87016 + }
87017 +
87018 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
87019 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
87020 +
87021 + return 0;
87022 +}
87023 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
87024 index 83b3dde..835bee7 100644
87025 --- a/usr/gen_init_cpio.c
87026 +++ b/usr/gen_init_cpio.c
87027 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, const char *location,
87028 int retval;
87029 int rc = -1;
87030 int namesize;
87031 - int i;
87032 + unsigned int i;
87033
87034 mode |= S_IFREG;
87035
87036 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_location)
87037 *env_var = *expanded = '\0';
87038 strncat(env_var, start + 2, end - start - 2);
87039 strncat(expanded, new_location, start - new_location);
87040 - strncat(expanded, getenv(env_var), PATH_MAX);
87041 - strncat(expanded, end + 1, PATH_MAX);
87042 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
87043 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
87044 strncpy(new_location, expanded, PATH_MAX);
87045 + new_location[PATH_MAX] = 0;
87046 } else
87047 break;
87048 }
87049 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
87050 index 4f3434f..159bc3e 100644
87051 --- a/virt/kvm/kvm_main.c
87052 +++ b/virt/kvm/kvm_main.c
87053 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
87054 if (kvm_rebooting)
87055 /* spin while reset goes on */
87056 while (true)
87057 - ;
87058 + cpu_relax();
87059 /* Fault while not rebooting. We want the trace. */
87060 BUG();
87061 }
87062 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
87063 kvm_arch_vcpu_put(vcpu);
87064 }
87065
87066 -int kvm_init(void *opaque, unsigned int vcpu_size,
87067 +int kvm_init(const void *opaque, unsigned int vcpu_size,
87068 struct module *module)
87069 {
87070 int r;
87071 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
87072 /* A kmem cache lets us meet the alignment requirements of fx_save. */
87073 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
87074 __alignof__(struct kvm_vcpu),
87075 - 0, NULL);
87076 + SLAB_USERCOPY, NULL);
87077 if (!kvm_vcpu_cache) {
87078 r = -ENOMEM;
87079 goto out_free_5;
87080 }
87081
87082 - kvm_chardev_ops.owner = module;
87083 - kvm_vm_fops.owner = module;
87084 - kvm_vcpu_fops.owner = module;
87085 + pax_open_kernel();
87086 + *(void **)&kvm_chardev_ops.owner = module;
87087 + *(void **)&kvm_vm_fops.owner = module;
87088 + *(void **)&kvm_vcpu_fops.owner = module;
87089 + pax_close_kernel();
87090
87091 r = misc_register(&kvm_dev);
87092 if (r) {