]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.54-201201182131.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.54-201201182131.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index e1efc40..47f0daf 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9 +*.cis
10 *.cpio
11 *.csp
12 +*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18 +*.gcno
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *_MODULES
32 +*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36 @@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40 +GPATH
41 +GRTAGS
42 +GSYMS
43 +GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49 +PERF*
50 SCCS
51 System.map*
52 TAGS
53 @@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57 +capability_names.h
58 +capflags.c
59 classlist.h*
60 +clut_vga16.c
61 +common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65 @@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69 +config.c
70 +config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74 @@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78 +gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90 +initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103 +mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110 +mkpiggy
111 mkprep
112 +mkregtable
113 mktables
114 mktree
115 modpost
116 @@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120 +piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124 @@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128 +regdb.c
129 relocs
130 +rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152 +vmlinux.bin.all
153 +vmlinux.bin.bz2
154 vmlinux.lds
155 +vmlinux.relocs
156 +voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zoffset.h
169 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170 index c840e7d..f4c451c 100644
171 --- a/Documentation/kernel-parameters.txt
172 +++ b/Documentation/kernel-parameters.txt
173 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178 + virtualization environments that don't cope well with the
179 + expand down segment used by UDEREF on X86-32 or the frequent
180 + page table updates on X86-64.
181 +
182 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183 +
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187 diff --git a/Makefile b/Makefile
188 index e480d8c..c7b2c86 100644
189 --- a/Makefile
190 +++ b/Makefile
191 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196 -HOSTCXXFLAGS = -O2
197 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203 @@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207 -PHONY += scripts_basic
208 -scripts_basic:
209 +PHONY += scripts_basic gcc-plugins
210 +scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214 @@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218 - cscope TAGS tags help %docs check% \
219 + cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223 @@ -526,6 +527,46 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227 +ifndef DISABLE_PAX_PLUGINS
228 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231 +endif
232 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
233 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234 +STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235 +endif
236 +ifdef CONFIG_KALLOCSTAT_PLUGIN
237 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238 +endif
239 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241 +KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
242 +endif
243 +ifdef CONFIG_CHECKER_PLUGIN
244 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
245 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
246 +endif
247 +endif
248 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
249 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
250 +ifeq ($(KBUILD_EXTMOD),)
251 +gcc-plugins:
252 + $(Q)$(MAKE) $(build)=tools/gcc
253 +else
254 +gcc-plugins: ;
255 +endif
256 +else
257 +gcc-plugins:
258 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
259 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
260 +else
261 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
262 +endif
263 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
264 +endif
265 +endif
266 +
267 include $(srctree)/arch/$(SRCARCH)/Makefile
268
269 ifneq ($(CONFIG_FRAME_WARN),0)
270 @@ -647,7 +688,7 @@ export mod_strip_cmd
271
272
273 ifeq ($(KBUILD_EXTMOD),)
274 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
275 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
276
277 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
278 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
279 @@ -868,6 +909,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
280
281 # The actual objects are generated when descending,
282 # make sure no implicit rule kicks in
283 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
284 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
285
286 # Handle descending into subdirectories listed in $(vmlinux-dirs)
287 @@ -877,7 +919,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288 # Error messages still appears in the original language
289
290 PHONY += $(vmlinux-dirs)
291 -$(vmlinux-dirs): prepare scripts
292 +$(vmlinux-dirs): gcc-plugins prepare scripts
293 $(Q)$(MAKE) $(build)=$@
294
295 # Build the kernel release string
296 @@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
297 $(Q)$(MAKE) $(build)=. missing-syscalls
298
299 # All the preparing..
300 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
301 prepare: prepare0
302
303 # The asm symlink changes when $(ARCH) changes.
304 @@ -1127,6 +1170,7 @@ all: modules
305 # using awk while concatenating to the final file.
306
307 PHONY += modules
308 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
309 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
310 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
311 @$(kecho) ' Building modules, stage 2.';
312 @@ -1136,7 +1180,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
313
314 # Target to prepare building external modules
315 PHONY += modules_prepare
316 -modules_prepare: prepare scripts
317 +modules_prepare: gcc-plugins prepare scripts
318
319 # Target to install modules
320 PHONY += modules_install
321 @@ -1201,7 +1245,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
322 include/linux/autoconf.h include/linux/version.h \
323 include/linux/utsrelease.h \
324 include/linux/bounds.h include/asm*/asm-offsets.h \
325 - Module.symvers Module.markers tags TAGS cscope*
326 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
327
328 # clean - Delete most, but leave enough to build external modules
329 #
330 @@ -1245,7 +1289,7 @@ distclean: mrproper
331 @find $(srctree) $(RCS_FIND_IGNORE) \
332 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
333 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
334 - -o -name '.*.rej' -o -size 0 \
335 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
336 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
337 -type f -print | xargs rm -f
338
339 @@ -1292,6 +1336,7 @@ help:
340 @echo ' modules_prepare - Set up for building external modules'
341 @echo ' tags/TAGS - Generate tags file for editors'
342 @echo ' cscope - Generate cscope index'
343 + @echo ' gtags - Generate GNU GLOBAL index'
344 @echo ' kernelrelease - Output the release version string'
345 @echo ' kernelversion - Output the version stored in Makefile'
346 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
347 @@ -1393,6 +1438,7 @@ PHONY += $(module-dirs) modules
348 $(module-dirs): crmodverdir $(objtree)/Module.symvers
349 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
350
351 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
352 modules: $(module-dirs)
353 @$(kecho) ' Building modules, stage 2.';
354 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
355 @@ -1448,7 +1494,7 @@ endif # KBUILD_EXTMOD
356 quiet_cmd_tags = GEN $@
357 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
358
359 -tags TAGS cscope: FORCE
360 +tags TAGS cscope gtags: FORCE
361 $(call cmd,tags)
362
363 # Scripts to check various things for consistency
364 @@ -1513,17 +1559,19 @@ else
365 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
366 endif
367
368 -%.s: %.c prepare scripts FORCE
369 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
370 +%.s: %.c gcc-plugins prepare scripts FORCE
371 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
372 %.i: %.c prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374 -%.o: %.c prepare scripts FORCE
375 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
376 +%.o: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.lst: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380 -%.s: %.S prepare scripts FORCE
381 +%.s: %.S gcc-plugins prepare scripts FORCE
382 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
383 -%.o: %.S prepare scripts FORCE
384 +%.o: %.S gcc-plugins prepare scripts FORCE
385 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
386 %.symtypes: %.c prepare scripts FORCE
387 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
388 @@ -1533,11 +1581,13 @@ endif
389 $(cmd_crmodverdir)
390 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
391 $(build)=$(build-dir)
392 -%/: prepare scripts FORCE
393 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
394 +%/: gcc-plugins prepare scripts FORCE
395 $(cmd_crmodverdir)
396 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
397 $(build)=$(build-dir)
398 -%.ko: prepare scripts FORCE
399 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
400 +%.ko: gcc-plugins prepare scripts FORCE
401 $(cmd_crmodverdir)
402 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
403 $(build)=$(build-dir) $(@:.ko=.o)
404 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
405 index 5c75c1b..c82f878 100644
406 --- a/arch/alpha/include/asm/elf.h
407 +++ b/arch/alpha/include/asm/elf.h
408 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
409
410 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
411
412 +#ifdef CONFIG_PAX_ASLR
413 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
414 +
415 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
416 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
417 +#endif
418 +
419 /* $0 is set by ld.so to a pointer to a function which might be
420 registered using atexit. This provides a mean for the dynamic
421 linker to call DT_FINI functions for shared libraries that have
422 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
423 index 3f0c59f..cf1e100 100644
424 --- a/arch/alpha/include/asm/pgtable.h
425 +++ b/arch/alpha/include/asm/pgtable.h
426 @@ -101,6 +101,17 @@ struct vm_area_struct;
427 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
428 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
429 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
430 +
431 +#ifdef CONFIG_PAX_PAGEEXEC
432 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
433 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
434 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
435 +#else
436 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
437 +# define PAGE_COPY_NOEXEC PAGE_COPY
438 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
439 +#endif
440 +
441 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
442
443 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
444 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
445 index ebc3c89..20cfa63 100644
446 --- a/arch/alpha/kernel/module.c
447 +++ b/arch/alpha/kernel/module.c
448 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
449
450 /* The small sections were sorted to the end of the segment.
451 The following should definitely cover them. */
452 - gp = (u64)me->module_core + me->core_size - 0x8000;
453 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
454 got = sechdrs[me->arch.gotsecindex].sh_addr;
455
456 for (i = 0; i < n; i++) {
457 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
458 index a94e49c..d71dd44 100644
459 --- a/arch/alpha/kernel/osf_sys.c
460 +++ b/arch/alpha/kernel/osf_sys.c
461 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
462 /* At this point: (!vma || addr < vma->vm_end). */
463 if (limit - len < addr)
464 return -ENOMEM;
465 - if (!vma || addr + len <= vma->vm_start)
466 + if (check_heap_stack_gap(vma, addr, len))
467 return addr;
468 addr = vma->vm_end;
469 vma = vma->vm_next;
470 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
471 merely specific addresses, but regions of memory -- perhaps
472 this feature should be incorporated into all ports? */
473
474 +#ifdef CONFIG_PAX_RANDMMAP
475 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
476 +#endif
477 +
478 if (addr) {
479 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
480 if (addr != (unsigned long) -ENOMEM)
481 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
482 }
483
484 /* Next, try allocating at TASK_UNMAPPED_BASE. */
485 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
486 - len, limit);
487 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
488 +
489 if (addr != (unsigned long) -ENOMEM)
490 return addr;
491
492 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
493 index 00a31de..2ded0f2 100644
494 --- a/arch/alpha/mm/fault.c
495 +++ b/arch/alpha/mm/fault.c
496 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
497 __reload_thread(pcb);
498 }
499
500 +#ifdef CONFIG_PAX_PAGEEXEC
501 +/*
502 + * PaX: decide what to do with offenders (regs->pc = fault address)
503 + *
504 + * returns 1 when task should be killed
505 + * 2 when patched PLT trampoline was detected
506 + * 3 when unpatched PLT trampoline was detected
507 + */
508 +static int pax_handle_fetch_fault(struct pt_regs *regs)
509 +{
510 +
511 +#ifdef CONFIG_PAX_EMUPLT
512 + int err;
513 +
514 + do { /* PaX: patched PLT emulation #1 */
515 + unsigned int ldah, ldq, jmp;
516 +
517 + err = get_user(ldah, (unsigned int *)regs->pc);
518 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
519 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
520 +
521 + if (err)
522 + break;
523 +
524 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
525 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
526 + jmp == 0x6BFB0000U)
527 + {
528 + unsigned long r27, addr;
529 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
530 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
531 +
532 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
533 + err = get_user(r27, (unsigned long *)addr);
534 + if (err)
535 + break;
536 +
537 + regs->r27 = r27;
538 + regs->pc = r27;
539 + return 2;
540 + }
541 + } while (0);
542 +
543 + do { /* PaX: patched PLT emulation #2 */
544 + unsigned int ldah, lda, br;
545 +
546 + err = get_user(ldah, (unsigned int *)regs->pc);
547 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
548 + err |= get_user(br, (unsigned int *)(regs->pc+8));
549 +
550 + if (err)
551 + break;
552 +
553 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
554 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
555 + (br & 0xFFE00000U) == 0xC3E00000U)
556 + {
557 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
558 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
559 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
560 +
561 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
562 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
563 + return 2;
564 + }
565 + } while (0);
566 +
567 + do { /* PaX: unpatched PLT emulation */
568 + unsigned int br;
569 +
570 + err = get_user(br, (unsigned int *)regs->pc);
571 +
572 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
573 + unsigned int br2, ldq, nop, jmp;
574 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
575 +
576 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
577 + err = get_user(br2, (unsigned int *)addr);
578 + err |= get_user(ldq, (unsigned int *)(addr+4));
579 + err |= get_user(nop, (unsigned int *)(addr+8));
580 + err |= get_user(jmp, (unsigned int *)(addr+12));
581 + err |= get_user(resolver, (unsigned long *)(addr+16));
582 +
583 + if (err)
584 + break;
585 +
586 + if (br2 == 0xC3600000U &&
587 + ldq == 0xA77B000CU &&
588 + nop == 0x47FF041FU &&
589 + jmp == 0x6B7B0000U)
590 + {
591 + regs->r28 = regs->pc+4;
592 + regs->r27 = addr+16;
593 + regs->pc = resolver;
594 + return 3;
595 + }
596 + }
597 + } while (0);
598 +#endif
599 +
600 + return 1;
601 +}
602 +
603 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
604 +{
605 + unsigned long i;
606 +
607 + printk(KERN_ERR "PAX: bytes at PC: ");
608 + for (i = 0; i < 5; i++) {
609 + unsigned int c;
610 + if (get_user(c, (unsigned int *)pc+i))
611 + printk(KERN_CONT "???????? ");
612 + else
613 + printk(KERN_CONT "%08x ", c);
614 + }
615 + printk("\n");
616 +}
617 +#endif
618
619 /*
620 * This routine handles page faults. It determines the address,
621 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
622 good_area:
623 si_code = SEGV_ACCERR;
624 if (cause < 0) {
625 - if (!(vma->vm_flags & VM_EXEC))
626 + if (!(vma->vm_flags & VM_EXEC)) {
627 +
628 +#ifdef CONFIG_PAX_PAGEEXEC
629 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
630 + goto bad_area;
631 +
632 + up_read(&mm->mmap_sem);
633 + switch (pax_handle_fetch_fault(regs)) {
634 +
635 +#ifdef CONFIG_PAX_EMUPLT
636 + case 2:
637 + case 3:
638 + return;
639 +#endif
640 +
641 + }
642 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
643 + do_group_exit(SIGKILL);
644 +#else
645 goto bad_area;
646 +#endif
647 +
648 + }
649 } else if (!cause) {
650 /* Allow reads even for write-only mappings */
651 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
652 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
653 index 6aac3f5..265536b 100644
654 --- a/arch/arm/include/asm/elf.h
655 +++ b/arch/arm/include/asm/elf.h
656 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 the loader. We need to make sure that it is out of the way of the program
658 that it will "exec", and that there is sufficient room for the brk. */
659
660 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
661 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
662 +
663 +#ifdef CONFIG_PAX_ASLR
664 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
665 +
666 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
667 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
668 +#endif
669
670 /* When the program starts, a1 contains a pointer to a function to be
671 registered with atexit, as per the SVR4 ABI. A value of 0 means we
672 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
673 index c019949..388fdd1 100644
674 --- a/arch/arm/include/asm/kmap_types.h
675 +++ b/arch/arm/include/asm/kmap_types.h
676 @@ -19,6 +19,7 @@ enum km_type {
677 KM_SOFTIRQ0,
678 KM_SOFTIRQ1,
679 KM_L2_CACHE,
680 + KM_CLEARPAGE,
681 KM_TYPE_NR
682 };
683
684 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
685 index 1d6bd40..fba0cb9 100644
686 --- a/arch/arm/include/asm/uaccess.h
687 +++ b/arch/arm/include/asm/uaccess.h
688 @@ -22,6 +22,8 @@
689 #define VERIFY_READ 0
690 #define VERIFY_WRITE 1
691
692 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
693 +
694 /*
695 * The exception table consists of pairs of addresses: the first is the
696 * address of an instruction that is allowed to fault, and the second is
697 @@ -387,8 +389,23 @@ do { \
698
699
700 #ifdef CONFIG_MMU
701 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
702 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
703 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
704 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
705 +
706 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
707 +{
708 + if (!__builtin_constant_p(n))
709 + check_object_size(to, n, false);
710 + return ___copy_from_user(to, from, n);
711 +}
712 +
713 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
714 +{
715 + if (!__builtin_constant_p(n))
716 + check_object_size(from, n, true);
717 + return ___copy_to_user(to, from, n);
718 +}
719 +
720 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
721 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
722 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
723 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
724
725 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
726 {
727 + if ((long)n < 0)
728 + return n;
729 +
730 if (access_ok(VERIFY_READ, from, n))
731 n = __copy_from_user(to, from, n);
732 else /* security hole - plug it */
733 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
734
735 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
736 {
737 + if ((long)n < 0)
738 + return n;
739 +
740 if (access_ok(VERIFY_WRITE, to, n))
741 n = __copy_to_user(to, from, n);
742 return n;
743 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
744 index 0e62770..e2c2cd6 100644
745 --- a/arch/arm/kernel/armksyms.c
746 +++ b/arch/arm/kernel/armksyms.c
747 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
748 #ifdef CONFIG_MMU
749 EXPORT_SYMBOL(copy_page);
750
751 -EXPORT_SYMBOL(__copy_from_user);
752 -EXPORT_SYMBOL(__copy_to_user);
753 +EXPORT_SYMBOL(___copy_from_user);
754 +EXPORT_SYMBOL(___copy_to_user);
755 EXPORT_SYMBOL(__clear_user);
756
757 EXPORT_SYMBOL(__get_user_1);
758 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
759 index ba8ccfe..2dc34dc 100644
760 --- a/arch/arm/kernel/kgdb.c
761 +++ b/arch/arm/kernel/kgdb.c
762 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
763 * and we handle the normal undef case within the do_undefinstr
764 * handler.
765 */
766 -struct kgdb_arch arch_kgdb_ops = {
767 +const struct kgdb_arch arch_kgdb_ops = {
768 #ifndef __ARMEB__
769 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
770 #else /* ! __ARMEB__ */
771 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
772 index 3f361a7..6e806e1 100644
773 --- a/arch/arm/kernel/traps.c
774 +++ b/arch/arm/kernel/traps.c
775 @@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
776
777 DEFINE_SPINLOCK(die_lock);
778
779 +extern void gr_handle_kernel_exploit(void);
780 +
781 /*
782 * This function is protected against re-entrancy.
783 */
784 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
785 if (panic_on_oops)
786 panic("Fatal exception");
787
788 + gr_handle_kernel_exploit();
789 +
790 do_exit(SIGSEGV);
791 }
792
793 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
794 index e4fe124..0fc246b 100644
795 --- a/arch/arm/lib/copy_from_user.S
796 +++ b/arch/arm/lib/copy_from_user.S
797 @@ -16,7 +16,7 @@
798 /*
799 * Prototype:
800 *
801 - * size_t __copy_from_user(void *to, const void *from, size_t n)
802 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
803 *
804 * Purpose:
805 *
806 @@ -84,11 +84,11 @@
807
808 .text
809
810 -ENTRY(__copy_from_user)
811 +ENTRY(___copy_from_user)
812
813 #include "copy_template.S"
814
815 -ENDPROC(__copy_from_user)
816 +ENDPROC(___copy_from_user)
817
818 .section .fixup,"ax"
819 .align 0
820 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
821 index 1a71e15..ac7b258 100644
822 --- a/arch/arm/lib/copy_to_user.S
823 +++ b/arch/arm/lib/copy_to_user.S
824 @@ -16,7 +16,7 @@
825 /*
826 * Prototype:
827 *
828 - * size_t __copy_to_user(void *to, const void *from, size_t n)
829 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
830 *
831 * Purpose:
832 *
833 @@ -88,11 +88,11 @@
834 .text
835
836 ENTRY(__copy_to_user_std)
837 -WEAK(__copy_to_user)
838 +WEAK(___copy_to_user)
839
840 #include "copy_template.S"
841
842 -ENDPROC(__copy_to_user)
843 +ENDPROC(___copy_to_user)
844
845 .section .fixup,"ax"
846 .align 0
847 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
848 index ffdd274..91017b6 100644
849 --- a/arch/arm/lib/uaccess.S
850 +++ b/arch/arm/lib/uaccess.S
851 @@ -19,7 +19,7 @@
852
853 #define PAGE_SHIFT 12
854
855 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
856 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
857 * Purpose : copy a block to user memory from kernel memory
858 * Params : to - user memory
859 * : from - kernel memory
860 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
861 sub r2, r2, ip
862 b .Lc2u_dest_aligned
863
864 -ENTRY(__copy_to_user)
865 +ENTRY(___copy_to_user)
866 stmfd sp!, {r2, r4 - r7, lr}
867 cmp r2, #4
868 blt .Lc2u_not_enough
869 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
870 ldrgtb r3, [r1], #0
871 USER( strgtbt r3, [r0], #1) @ May fault
872 b .Lc2u_finished
873 -ENDPROC(__copy_to_user)
874 +ENDPROC(___copy_to_user)
875
876 .section .fixup,"ax"
877 .align 0
878 9001: ldmfd sp!, {r0, r4 - r7, pc}
879 .previous
880
881 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
882 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
883 * Purpose : copy a block from user memory to kernel memory
884 * Params : to - kernel memory
885 * : from - user memory
886 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
887 sub r2, r2, ip
888 b .Lcfu_dest_aligned
889
890 -ENTRY(__copy_from_user)
891 +ENTRY(___copy_from_user)
892 stmfd sp!, {r0, r2, r4 - r7, lr}
893 cmp r2, #4
894 blt .Lcfu_not_enough
895 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
896 USER( ldrgtbt r3, [r1], #1) @ May fault
897 strgtb r3, [r0], #1
898 b .Lcfu_finished
899 -ENDPROC(__copy_from_user)
900 +ENDPROC(___copy_from_user)
901
902 .section .fixup,"ax"
903 .align 0
904 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
905 index 6b967ff..67d5b2b 100644
906 --- a/arch/arm/lib/uaccess_with_memcpy.c
907 +++ b/arch/arm/lib/uaccess_with_memcpy.c
908 @@ -97,7 +97,7 @@ out:
909 }
910
911 unsigned long
912 -__copy_to_user(void __user *to, const void *from, unsigned long n)
913 +___copy_to_user(void __user *to, const void *from, unsigned long n)
914 {
915 /*
916 * This test is stubbed out of the main function above to keep
917 diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
918 index 4028724..beec230 100644
919 --- a/arch/arm/mach-at91/pm.c
920 +++ b/arch/arm/mach-at91/pm.c
921 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
922 }
923
924
925 -static struct platform_suspend_ops at91_pm_ops ={
926 +static const struct platform_suspend_ops at91_pm_ops ={
927 .valid = at91_pm_valid_state,
928 .begin = at91_pm_begin,
929 .enter = at91_pm_enter,
930 diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
931 index 5218943..0a34552 100644
932 --- a/arch/arm/mach-omap1/pm.c
933 +++ b/arch/arm/mach-omap1/pm.c
934 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
935
936
937
938 -static struct platform_suspend_ops omap_pm_ops ={
939 +static const struct platform_suspend_ops omap_pm_ops ={
940 .prepare = omap_pm_prepare,
941 .enter = omap_pm_enter,
942 .finish = omap_pm_finish,
943 diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
944 index bff5c4e..d4c649b 100644
945 --- a/arch/arm/mach-omap2/pm24xx.c
946 +++ b/arch/arm/mach-omap2/pm24xx.c
947 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
948 enable_hlt();
949 }
950
951 -static struct platform_suspend_ops omap_pm_ops = {
952 +static const struct platform_suspend_ops omap_pm_ops = {
953 .prepare = omap2_pm_prepare,
954 .enter = omap2_pm_enter,
955 .finish = omap2_pm_finish,
956 diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
957 index 8946319..7d3e661 100644
958 --- a/arch/arm/mach-omap2/pm34xx.c
959 +++ b/arch/arm/mach-omap2/pm34xx.c
960 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
961 return;
962 }
963
964 -static struct platform_suspend_ops omap_pm_ops = {
965 +static const struct platform_suspend_ops omap_pm_ops = {
966 .begin = omap3_pm_begin,
967 .end = omap3_pm_end,
968 .prepare = omap3_pm_prepare,
969 diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
970 index b3d8d53..6e68ebc 100644
971 --- a/arch/arm/mach-pnx4008/pm.c
972 +++ b/arch/arm/mach-pnx4008/pm.c
973 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
974 (state == PM_SUSPEND_MEM);
975 }
976
977 -static struct platform_suspend_ops pnx4008_pm_ops = {
978 +static const struct platform_suspend_ops pnx4008_pm_ops = {
979 .enter = pnx4008_pm_enter,
980 .valid = pnx4008_pm_valid,
981 };
982 diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
983 index 7693355..9beb00a 100644
984 --- a/arch/arm/mach-pxa/pm.c
985 +++ b/arch/arm/mach-pxa/pm.c
986 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
987 pxa_cpu_pm_fns->finish();
988 }
989
990 -static struct platform_suspend_ops pxa_pm_ops = {
991 +static const struct platform_suspend_ops pxa_pm_ops = {
992 .valid = pxa_pm_valid,
993 .enter = pxa_pm_enter,
994 .prepare = pxa_pm_prepare,
995 diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
996 index 629e05d..06be589 100644
997 --- a/arch/arm/mach-pxa/sharpsl_pm.c
998 +++ b/arch/arm/mach-pxa/sharpsl_pm.c
999 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1000 }
1001
1002 #ifdef CONFIG_PM
1003 -static struct platform_suspend_ops sharpsl_pm_ops = {
1004 +static const struct platform_suspend_ops sharpsl_pm_ops = {
1005 .prepare = pxa_pm_prepare,
1006 .finish = pxa_pm_finish,
1007 .enter = corgi_pxa_pm_enter,
1008 diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1009 index c83fdc8..ab9fc44 100644
1010 --- a/arch/arm/mach-sa1100/pm.c
1011 +++ b/arch/arm/mach-sa1100/pm.c
1012 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1013 return virt_to_phys(sp);
1014 }
1015
1016 -static struct platform_suspend_ops sa11x0_pm_ops = {
1017 +static const struct platform_suspend_ops sa11x0_pm_ops = {
1018 .enter = sa11x0_pm_enter,
1019 .valid = suspend_valid_only_mem,
1020 };
1021 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1022 index 3191cd6..c0739db 100644
1023 --- a/arch/arm/mm/fault.c
1024 +++ b/arch/arm/mm/fault.c
1025 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1026 }
1027 #endif
1028
1029 +#ifdef CONFIG_PAX_PAGEEXEC
1030 + if (fsr & FSR_LNX_PF) {
1031 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1032 + do_group_exit(SIGKILL);
1033 + }
1034 +#endif
1035 +
1036 tsk->thread.address = addr;
1037 tsk->thread.error_code = fsr;
1038 tsk->thread.trap_no = 14;
1039 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1040 }
1041 #endif /* CONFIG_MMU */
1042
1043 +#ifdef CONFIG_PAX_PAGEEXEC
1044 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1045 +{
1046 + long i;
1047 +
1048 + printk(KERN_ERR "PAX: bytes at PC: ");
1049 + for (i = 0; i < 20; i++) {
1050 + unsigned char c;
1051 + if (get_user(c, (__force unsigned char __user *)pc+i))
1052 + printk(KERN_CONT "?? ");
1053 + else
1054 + printk(KERN_CONT "%02x ", c);
1055 + }
1056 + printk("\n");
1057 +
1058 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1059 + for (i = -1; i < 20; i++) {
1060 + unsigned long c;
1061 + if (get_user(c, (__force unsigned long __user *)sp+i))
1062 + printk(KERN_CONT "???????? ");
1063 + else
1064 + printk(KERN_CONT "%08lx ", c);
1065 + }
1066 + printk("\n");
1067 +}
1068 +#endif
1069 +
1070 /*
1071 * First Level Translation Fault Handler
1072 *
1073 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1074 index f5abc51..7ec524c 100644
1075 --- a/arch/arm/mm/mmap.c
1076 +++ b/arch/arm/mm/mmap.c
1077 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1078 if (len > TASK_SIZE)
1079 return -ENOMEM;
1080
1081 +#ifdef CONFIG_PAX_RANDMMAP
1082 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1083 +#endif
1084 +
1085 if (addr) {
1086 if (do_align)
1087 addr = COLOUR_ALIGN(addr, pgoff);
1088 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1089 addr = PAGE_ALIGN(addr);
1090
1091 vma = find_vma(mm, addr);
1092 - if (TASK_SIZE - len >= addr &&
1093 - (!vma || addr + len <= vma->vm_start))
1094 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1095 return addr;
1096 }
1097 if (len > mm->cached_hole_size) {
1098 - start_addr = addr = mm->free_area_cache;
1099 + start_addr = addr = mm->free_area_cache;
1100 } else {
1101 - start_addr = addr = TASK_UNMAPPED_BASE;
1102 - mm->cached_hole_size = 0;
1103 + start_addr = addr = mm->mmap_base;
1104 + mm->cached_hole_size = 0;
1105 }
1106
1107 full_search:
1108 @@ -94,14 +97,14 @@ full_search:
1109 * Start a new search - just in case we missed
1110 * some holes.
1111 */
1112 - if (start_addr != TASK_UNMAPPED_BASE) {
1113 - start_addr = addr = TASK_UNMAPPED_BASE;
1114 + if (start_addr != mm->mmap_base) {
1115 + start_addr = addr = mm->mmap_base;
1116 mm->cached_hole_size = 0;
1117 goto full_search;
1118 }
1119 return -ENOMEM;
1120 }
1121 - if (!vma || addr + len <= vma->vm_start) {
1122 + if (check_heap_stack_gap(vma, addr, len)) {
1123 /*
1124 * Remember the place where we stopped the search:
1125 */
1126 diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1127 index 8d97db2..b66cfa5 100644
1128 --- a/arch/arm/plat-s3c/pm.c
1129 +++ b/arch/arm/plat-s3c/pm.c
1130 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1131 s3c_pm_check_cleanup();
1132 }
1133
1134 -static struct platform_suspend_ops s3c_pm_ops = {
1135 +static const struct platform_suspend_ops s3c_pm_ops = {
1136 .enter = s3c_pm_enter,
1137 .prepare = s3c_pm_prepare,
1138 .finish = s3c_pm_finish,
1139 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1140 index d5d1d41..856e2ed 100644
1141 --- a/arch/avr32/include/asm/elf.h
1142 +++ b/arch/avr32/include/asm/elf.h
1143 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1144 the loader. We need to make sure that it is out of the way of the program
1145 that it will "exec", and that there is sufficient room for the brk. */
1146
1147 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1148 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1149
1150 +#ifdef CONFIG_PAX_ASLR
1151 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1152 +
1153 +#define PAX_DELTA_MMAP_LEN 15
1154 +#define PAX_DELTA_STACK_LEN 15
1155 +#endif
1156
1157 /* This yields a mask that user programs can use to figure out what
1158 instruction set this CPU supports. This could be done in user space,
1159 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1160 index b7f5c68..556135c 100644
1161 --- a/arch/avr32/include/asm/kmap_types.h
1162 +++ b/arch/avr32/include/asm/kmap_types.h
1163 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1164 D(11) KM_IRQ1,
1165 D(12) KM_SOFTIRQ0,
1166 D(13) KM_SOFTIRQ1,
1167 -D(14) KM_TYPE_NR
1168 +D(14) KM_CLEARPAGE,
1169 +D(15) KM_TYPE_NR
1170 };
1171
1172 #undef D
1173 diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1174 index f021edf..32d680e 100644
1175 --- a/arch/avr32/mach-at32ap/pm.c
1176 +++ b/arch/avr32/mach-at32ap/pm.c
1177 @@ -176,7 +176,7 @@ out:
1178 return 0;
1179 }
1180
1181 -static struct platform_suspend_ops avr32_pm_ops = {
1182 +static const struct platform_suspend_ops avr32_pm_ops = {
1183 .valid = avr32_pm_valid_state,
1184 .enter = avr32_pm_enter,
1185 };
1186 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1187 index b61d86d..e292c7f 100644
1188 --- a/arch/avr32/mm/fault.c
1189 +++ b/arch/avr32/mm/fault.c
1190 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1191
1192 int exception_trace = 1;
1193
1194 +#ifdef CONFIG_PAX_PAGEEXEC
1195 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1196 +{
1197 + unsigned long i;
1198 +
1199 + printk(KERN_ERR "PAX: bytes at PC: ");
1200 + for (i = 0; i < 20; i++) {
1201 + unsigned char c;
1202 + if (get_user(c, (unsigned char *)pc+i))
1203 + printk(KERN_CONT "???????? ");
1204 + else
1205 + printk(KERN_CONT "%02x ", c);
1206 + }
1207 + printk("\n");
1208 +}
1209 +#endif
1210 +
1211 /*
1212 * This routine handles page faults. It determines the address and the
1213 * problem, and then passes it off to one of the appropriate routines.
1214 @@ -157,6 +174,16 @@ bad_area:
1215 up_read(&mm->mmap_sem);
1216
1217 if (user_mode(regs)) {
1218 +
1219 +#ifdef CONFIG_PAX_PAGEEXEC
1220 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1221 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1222 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1223 + do_group_exit(SIGKILL);
1224 + }
1225 + }
1226 +#endif
1227 +
1228 if (exception_trace && printk_ratelimit())
1229 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1230 "sp %08lx ecr %lu\n",
1231 diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1232 index cce79d0..c406c85 100644
1233 --- a/arch/blackfin/kernel/kgdb.c
1234 +++ b/arch/blackfin/kernel/kgdb.c
1235 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1236 return -1; /* this means that we do not want to exit from the handler */
1237 }
1238
1239 -struct kgdb_arch arch_kgdb_ops = {
1240 +const struct kgdb_arch arch_kgdb_ops = {
1241 .gdb_bpt_instr = {0xa1},
1242 #ifdef CONFIG_SMP
1243 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1244 diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1245 index 8837be4..b2fb413 100644
1246 --- a/arch/blackfin/mach-common/pm.c
1247 +++ b/arch/blackfin/mach-common/pm.c
1248 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1249 return 0;
1250 }
1251
1252 -struct platform_suspend_ops bfin_pm_ops = {
1253 +const struct platform_suspend_ops bfin_pm_ops = {
1254 .enter = bfin_pm_enter,
1255 .valid = bfin_pm_valid,
1256 };
1257 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1258 index f8e16b2..c73ff79 100644
1259 --- a/arch/frv/include/asm/kmap_types.h
1260 +++ b/arch/frv/include/asm/kmap_types.h
1261 @@ -23,6 +23,7 @@ enum km_type {
1262 KM_IRQ1,
1263 KM_SOFTIRQ0,
1264 KM_SOFTIRQ1,
1265 + KM_CLEARPAGE,
1266 KM_TYPE_NR
1267 };
1268
1269 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1270 index 385fd30..6c3d97e 100644
1271 --- a/arch/frv/mm/elf-fdpic.c
1272 +++ b/arch/frv/mm/elf-fdpic.c
1273 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1274 if (addr) {
1275 addr = PAGE_ALIGN(addr);
1276 vma = find_vma(current->mm, addr);
1277 - if (TASK_SIZE - len >= addr &&
1278 - (!vma || addr + len <= vma->vm_start))
1279 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1280 goto success;
1281 }
1282
1283 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1284 for (; vma; vma = vma->vm_next) {
1285 if (addr > limit)
1286 break;
1287 - if (addr + len <= vma->vm_start)
1288 + if (check_heap_stack_gap(vma, addr, len))
1289 goto success;
1290 addr = vma->vm_end;
1291 }
1292 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1293 for (; vma; vma = vma->vm_next) {
1294 if (addr > limit)
1295 break;
1296 - if (addr + len <= vma->vm_start)
1297 + if (check_heap_stack_gap(vma, addr, len))
1298 goto success;
1299 addr = vma->vm_end;
1300 }
1301 diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1302 index e4a80d8..11a7ea1 100644
1303 --- a/arch/ia64/hp/common/hwsw_iommu.c
1304 +++ b/arch/ia64/hp/common/hwsw_iommu.c
1305 @@ -17,7 +17,7 @@
1306 #include <linux/swiotlb.h>
1307 #include <asm/machvec.h>
1308
1309 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1310 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1311
1312 /* swiotlb declarations & definitions: */
1313 extern int swiotlb_late_init_with_default_size (size_t size);
1314 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1315 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1316 }
1317
1318 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1319 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1320 {
1321 if (use_swiotlb(dev))
1322 return &swiotlb_dma_ops;
1323 diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1324 index 01ae69b..35752fd 100644
1325 --- a/arch/ia64/hp/common/sba_iommu.c
1326 +++ b/arch/ia64/hp/common/sba_iommu.c
1327 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1328 },
1329 };
1330
1331 -extern struct dma_map_ops swiotlb_dma_ops;
1332 +extern const struct dma_map_ops swiotlb_dma_ops;
1333
1334 static int __init
1335 sba_init(void)
1336 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1337
1338 __setup("sbapagesize=",sba_page_override);
1339
1340 -struct dma_map_ops sba_dma_ops = {
1341 +const struct dma_map_ops sba_dma_ops = {
1342 .alloc_coherent = sba_alloc_coherent,
1343 .free_coherent = sba_free_coherent,
1344 .map_page = sba_map_page,
1345 diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1346 index c69552b..c7122f4 100644
1347 --- a/arch/ia64/ia32/binfmt_elf32.c
1348 +++ b/arch/ia64/ia32/binfmt_elf32.c
1349 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1350
1351 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1352
1353 +#ifdef CONFIG_PAX_ASLR
1354 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1355 +
1356 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1357 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1358 +#endif
1359 +
1360 /* Ugly but avoids duplication */
1361 #include "../../../fs/binfmt_elf.c"
1362
1363 diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1364 index 0f15349..26b3429 100644
1365 --- a/arch/ia64/ia32/ia32priv.h
1366 +++ b/arch/ia64/ia32/ia32priv.h
1367 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1368 #define ELF_DATA ELFDATA2LSB
1369 #define ELF_ARCH EM_386
1370
1371 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
1372 +#ifdef CONFIG_PAX_RANDUSTACK
1373 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
1374 +#else
1375 +#define __IA32_DELTA_STACK 0UL
1376 +#endif
1377 +
1378 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1379 +
1380 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1381 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1382
1383 diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1384 index 8d3c79c..71b3af6 100644
1385 --- a/arch/ia64/include/asm/dma-mapping.h
1386 +++ b/arch/ia64/include/asm/dma-mapping.h
1387 @@ -12,7 +12,7 @@
1388
1389 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1390
1391 -extern struct dma_map_ops *dma_ops;
1392 +extern const struct dma_map_ops *dma_ops;
1393 extern struct ia64_machine_vector ia64_mv;
1394 extern void set_iommu_machvec(void);
1395
1396 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1397 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1398 dma_addr_t *daddr, gfp_t gfp)
1399 {
1400 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1401 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1402 void *caddr;
1403
1404 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1405 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1406 static inline void dma_free_coherent(struct device *dev, size_t size,
1407 void *caddr, dma_addr_t daddr)
1408 {
1409 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1410 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1411 debug_dma_free_coherent(dev, size, caddr, daddr);
1412 ops->free_coherent(dev, size, caddr, daddr);
1413 }
1414 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1415
1416 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1417 {
1418 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1419 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1420 return ops->mapping_error(dev, daddr);
1421 }
1422
1423 static inline int dma_supported(struct device *dev, u64 mask)
1424 {
1425 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1426 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1427 return ops->dma_supported(dev, mask);
1428 }
1429
1430 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1431 index 86eddee..b116bb4 100644
1432 --- a/arch/ia64/include/asm/elf.h
1433 +++ b/arch/ia64/include/asm/elf.h
1434 @@ -43,6 +43,13 @@
1435 */
1436 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1437
1438 +#ifdef CONFIG_PAX_ASLR
1439 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1440 +
1441 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1442 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1443 +#endif
1444 +
1445 #define PT_IA_64_UNWIND 0x70000001
1446
1447 /* IA-64 relocations: */
1448 diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1449 index 367d299..9ad4279 100644
1450 --- a/arch/ia64/include/asm/machvec.h
1451 +++ b/arch/ia64/include/asm/machvec.h
1452 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1453 /* DMA-mapping interface: */
1454 typedef void ia64_mv_dma_init (void);
1455 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1456 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1457 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1458
1459 /*
1460 * WARNING: The legacy I/O space is _architected_. Platforms are
1461 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1462 # endif /* CONFIG_IA64_GENERIC */
1463
1464 extern void swiotlb_dma_init(void);
1465 -extern struct dma_map_ops *dma_get_ops(struct device *);
1466 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1467
1468 /*
1469 * Define default versions so we can extend machvec for new platforms without having
1470 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1471 index 8840a69..cdb63d9 100644
1472 --- a/arch/ia64/include/asm/pgtable.h
1473 +++ b/arch/ia64/include/asm/pgtable.h
1474 @@ -12,7 +12,7 @@
1475 * David Mosberger-Tang <davidm@hpl.hp.com>
1476 */
1477
1478 -
1479 +#include <linux/const.h>
1480 #include <asm/mman.h>
1481 #include <asm/page.h>
1482 #include <asm/processor.h>
1483 @@ -143,6 +143,17 @@
1484 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1485 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1486 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1487 +
1488 +#ifdef CONFIG_PAX_PAGEEXEC
1489 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1490 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1491 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1492 +#else
1493 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1494 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1495 +# define PAGE_COPY_NOEXEC PAGE_COPY
1496 +#endif
1497 +
1498 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1499 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1500 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1501 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1502 index 239ecdc..f94170e 100644
1503 --- a/arch/ia64/include/asm/spinlock.h
1504 +++ b/arch/ia64/include/asm/spinlock.h
1505 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1506 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1507
1508 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1509 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1510 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1511 }
1512
1513 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1514 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1515 index 449c8c0..432a3d2 100644
1516 --- a/arch/ia64/include/asm/uaccess.h
1517 +++ b/arch/ia64/include/asm/uaccess.h
1518 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1519 const void *__cu_from = (from); \
1520 long __cu_len = (n); \
1521 \
1522 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1523 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1524 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1525 __cu_len; \
1526 })
1527 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1528 long __cu_len = (n); \
1529 \
1530 __chk_user_ptr(__cu_from); \
1531 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1532 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1533 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1534 __cu_len; \
1535 })
1536 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1537 index f2c1600..969398a 100644
1538 --- a/arch/ia64/kernel/dma-mapping.c
1539 +++ b/arch/ia64/kernel/dma-mapping.c
1540 @@ -3,7 +3,7 @@
1541 /* Set this to 1 if there is a HW IOMMU in the system */
1542 int iommu_detected __read_mostly;
1543
1544 -struct dma_map_ops *dma_ops;
1545 +const struct dma_map_ops *dma_ops;
1546 EXPORT_SYMBOL(dma_ops);
1547
1548 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1549 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1550 }
1551 fs_initcall(dma_init);
1552
1553 -struct dma_map_ops *dma_get_ops(struct device *dev)
1554 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1555 {
1556 return dma_ops;
1557 }
1558 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1559 index 1481b0a..e7d38ff 100644
1560 --- a/arch/ia64/kernel/module.c
1561 +++ b/arch/ia64/kernel/module.c
1562 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1563 void
1564 module_free (struct module *mod, void *module_region)
1565 {
1566 - if (mod && mod->arch.init_unw_table &&
1567 - module_region == mod->module_init) {
1568 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1569 unw_remove_unwind_table(mod->arch.init_unw_table);
1570 mod->arch.init_unw_table = NULL;
1571 }
1572 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1573 }
1574
1575 static inline int
1576 +in_init_rx (const struct module *mod, uint64_t addr)
1577 +{
1578 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1579 +}
1580 +
1581 +static inline int
1582 +in_init_rw (const struct module *mod, uint64_t addr)
1583 +{
1584 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1585 +}
1586 +
1587 +static inline int
1588 in_init (const struct module *mod, uint64_t addr)
1589 {
1590 - return addr - (uint64_t) mod->module_init < mod->init_size;
1591 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1592 +}
1593 +
1594 +static inline int
1595 +in_core_rx (const struct module *mod, uint64_t addr)
1596 +{
1597 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1598 +}
1599 +
1600 +static inline int
1601 +in_core_rw (const struct module *mod, uint64_t addr)
1602 +{
1603 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1604 }
1605
1606 static inline int
1607 in_core (const struct module *mod, uint64_t addr)
1608 {
1609 - return addr - (uint64_t) mod->module_core < mod->core_size;
1610 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1611 }
1612
1613 static inline int
1614 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1615 break;
1616
1617 case RV_BDREL:
1618 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1619 + if (in_init_rx(mod, val))
1620 + val -= (uint64_t) mod->module_init_rx;
1621 + else if (in_init_rw(mod, val))
1622 + val -= (uint64_t) mod->module_init_rw;
1623 + else if (in_core_rx(mod, val))
1624 + val -= (uint64_t) mod->module_core_rx;
1625 + else if (in_core_rw(mod, val))
1626 + val -= (uint64_t) mod->module_core_rw;
1627 break;
1628
1629 case RV_LTV:
1630 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1631 * addresses have been selected...
1632 */
1633 uint64_t gp;
1634 - if (mod->core_size > MAX_LTOFF)
1635 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1636 /*
1637 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1638 * at the end of the module.
1639 */
1640 - gp = mod->core_size - MAX_LTOFF / 2;
1641 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1642 else
1643 - gp = mod->core_size / 2;
1644 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1645 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1646 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1647 mod->arch.gp = gp;
1648 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1649 }
1650 diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1651 index f6b1ff0..de773fb 100644
1652 --- a/arch/ia64/kernel/pci-dma.c
1653 +++ b/arch/ia64/kernel/pci-dma.c
1654 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1655 .dma_mask = &fallback_dev.coherent_dma_mask,
1656 };
1657
1658 -extern struct dma_map_ops intel_dma_ops;
1659 +extern const struct dma_map_ops intel_dma_ops;
1660
1661 static int __init pci_iommu_init(void)
1662 {
1663 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1664 }
1665 EXPORT_SYMBOL(iommu_dma_supported);
1666
1667 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1668 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1669 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1670 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1671 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1672 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1673 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1674 +
1675 +static const struct dma_map_ops intel_iommu_dma_ops = {
1676 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1677 + .alloc_coherent = intel_alloc_coherent,
1678 + .free_coherent = intel_free_coherent,
1679 + .map_sg = intel_map_sg,
1680 + .unmap_sg = intel_unmap_sg,
1681 + .map_page = intel_map_page,
1682 + .unmap_page = intel_unmap_page,
1683 + .mapping_error = intel_mapping_error,
1684 +
1685 + .sync_single_for_cpu = machvec_dma_sync_single,
1686 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1687 + .sync_single_for_device = machvec_dma_sync_single,
1688 + .sync_sg_for_device = machvec_dma_sync_sg,
1689 + .dma_supported = iommu_dma_supported,
1690 +};
1691 +
1692 void __init pci_iommu_alloc(void)
1693 {
1694 - dma_ops = &intel_dma_ops;
1695 -
1696 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1697 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1698 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1699 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1700 - dma_ops->dma_supported = iommu_dma_supported;
1701 + dma_ops = &intel_iommu_dma_ops;
1702
1703 /*
1704 * The order of these functions is important for
1705 diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1706 index 285aae8..61dbab6 100644
1707 --- a/arch/ia64/kernel/pci-swiotlb.c
1708 +++ b/arch/ia64/kernel/pci-swiotlb.c
1709 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1710 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1711 }
1712
1713 -struct dma_map_ops swiotlb_dma_ops = {
1714 +const struct dma_map_ops swiotlb_dma_ops = {
1715 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1716 .free_coherent = swiotlb_free_coherent,
1717 .map_page = swiotlb_map_page,
1718 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1719 index 609d500..7dde2a8 100644
1720 --- a/arch/ia64/kernel/sys_ia64.c
1721 +++ b/arch/ia64/kernel/sys_ia64.c
1722 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1723 if (REGION_NUMBER(addr) == RGN_HPAGE)
1724 addr = 0;
1725 #endif
1726 +
1727 +#ifdef CONFIG_PAX_RANDMMAP
1728 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1729 + addr = mm->free_area_cache;
1730 + else
1731 +#endif
1732 +
1733 if (!addr)
1734 addr = mm->free_area_cache;
1735
1736 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1737 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1738 /* At this point: (!vma || addr < vma->vm_end). */
1739 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1740 - if (start_addr != TASK_UNMAPPED_BASE) {
1741 + if (start_addr != mm->mmap_base) {
1742 /* Start a new search --- just in case we missed some holes. */
1743 - addr = TASK_UNMAPPED_BASE;
1744 + addr = mm->mmap_base;
1745 goto full_search;
1746 }
1747 return -ENOMEM;
1748 }
1749 - if (!vma || addr + len <= vma->vm_start) {
1750 + if (check_heap_stack_gap(vma, addr, len)) {
1751 /* Remember the address where we stopped this search: */
1752 mm->free_area_cache = addr + len;
1753 return addr;
1754 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1755 index 8f06035..b3a5818 100644
1756 --- a/arch/ia64/kernel/topology.c
1757 +++ b/arch/ia64/kernel/topology.c
1758 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1759 return ret;
1760 }
1761
1762 -static struct sysfs_ops cache_sysfs_ops = {
1763 +static const struct sysfs_ops cache_sysfs_ops = {
1764 .show = cache_show
1765 };
1766
1767 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1768 index 0a0c77b..8e55a81 100644
1769 --- a/arch/ia64/kernel/vmlinux.lds.S
1770 +++ b/arch/ia64/kernel/vmlinux.lds.S
1771 @@ -190,7 +190,7 @@ SECTIONS
1772 /* Per-cpu data: */
1773 . = ALIGN(PERCPU_PAGE_SIZE);
1774 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1775 - __phys_per_cpu_start = __per_cpu_load;
1776 + __phys_per_cpu_start = per_cpu_load;
1777 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1778 * into percpu page size
1779 */
1780 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1781 index 19261a9..1611b7a 100644
1782 --- a/arch/ia64/mm/fault.c
1783 +++ b/arch/ia64/mm/fault.c
1784 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1785 return pte_present(pte);
1786 }
1787
1788 +#ifdef CONFIG_PAX_PAGEEXEC
1789 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1790 +{
1791 + unsigned long i;
1792 +
1793 + printk(KERN_ERR "PAX: bytes at PC: ");
1794 + for (i = 0; i < 8; i++) {
1795 + unsigned int c;
1796 + if (get_user(c, (unsigned int *)pc+i))
1797 + printk(KERN_CONT "???????? ");
1798 + else
1799 + printk(KERN_CONT "%08x ", c);
1800 + }
1801 + printk("\n");
1802 +}
1803 +#endif
1804 +
1805 void __kprobes
1806 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1807 {
1808 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1809 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1810 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1811
1812 - if ((vma->vm_flags & mask) != mask)
1813 + if ((vma->vm_flags & mask) != mask) {
1814 +
1815 +#ifdef CONFIG_PAX_PAGEEXEC
1816 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1817 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1818 + goto bad_area;
1819 +
1820 + up_read(&mm->mmap_sem);
1821 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1822 + do_group_exit(SIGKILL);
1823 + }
1824 +#endif
1825 +
1826 goto bad_area;
1827
1828 + }
1829 +
1830 survive:
1831 /*
1832 * If for any reason at all we couldn't handle the fault, make
1833 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1834 index b0f6157..a082bbc 100644
1835 --- a/arch/ia64/mm/hugetlbpage.c
1836 +++ b/arch/ia64/mm/hugetlbpage.c
1837 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1838 /* At this point: (!vmm || addr < vmm->vm_end). */
1839 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1840 return -ENOMEM;
1841 - if (!vmm || (addr + len) <= vmm->vm_start)
1842 + if (check_heap_stack_gap(vmm, addr, len))
1843 return addr;
1844 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1845 }
1846 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1847 index 1857766..05cc6a3 100644
1848 --- a/arch/ia64/mm/init.c
1849 +++ b/arch/ia64/mm/init.c
1850 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1851 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1852 vma->vm_end = vma->vm_start + PAGE_SIZE;
1853 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1854 +
1855 +#ifdef CONFIG_PAX_PAGEEXEC
1856 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1857 + vma->vm_flags &= ~VM_EXEC;
1858 +
1859 +#ifdef CONFIG_PAX_MPROTECT
1860 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1861 + vma->vm_flags &= ~VM_MAYEXEC;
1862 +#endif
1863 +
1864 + }
1865 +#endif
1866 +
1867 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1868 down_write(&current->mm->mmap_sem);
1869 if (insert_vm_struct(current->mm, vma)) {
1870 diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1871 index 98b6849..8046766 100644
1872 --- a/arch/ia64/sn/pci/pci_dma.c
1873 +++ b/arch/ia64/sn/pci/pci_dma.c
1874 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1875 return ret;
1876 }
1877
1878 -static struct dma_map_ops sn_dma_ops = {
1879 +static const struct dma_map_ops sn_dma_ops = {
1880 .alloc_coherent = sn_dma_alloc_coherent,
1881 .free_coherent = sn_dma_free_coherent,
1882 .map_page = sn_dma_map_page,
1883 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1884 index 82abd15..d95ae5d 100644
1885 --- a/arch/m32r/lib/usercopy.c
1886 +++ b/arch/m32r/lib/usercopy.c
1887 @@ -14,6 +14,9 @@
1888 unsigned long
1889 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1890 {
1891 + if ((long)n < 0)
1892 + return n;
1893 +
1894 prefetch(from);
1895 if (access_ok(VERIFY_WRITE, to, n))
1896 __copy_user(to,from,n);
1897 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1898 unsigned long
1899 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1900 {
1901 + if ((long)n < 0)
1902 + return n;
1903 +
1904 prefetchw(to);
1905 if (access_ok(VERIFY_READ, from, n))
1906 __copy_user_zeroing(to,from,n);
1907 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
1908 index 77f5021..2b1db8a 100644
1909 --- a/arch/mips/Makefile
1910 +++ b/arch/mips/Makefile
1911 @@ -51,6 +51,8 @@ endif
1912 cflags-y := -ffunction-sections
1913 cflags-y += $(call cc-option, -mno-check-zero-division)
1914
1915 +cflags-y += -Wno-sign-compare -Wno-extra
1916 +
1917 ifdef CONFIG_32BIT
1918 ld-emul = $(32bit-emul)
1919 vmlinux-32 = vmlinux
1920 diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
1921 index 632f986..fd0378d 100644
1922 --- a/arch/mips/alchemy/devboards/pm.c
1923 +++ b/arch/mips/alchemy/devboards/pm.c
1924 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1925
1926 }
1927
1928 -static struct platform_suspend_ops db1x_pm_ops = {
1929 +static const struct platform_suspend_ops db1x_pm_ops = {
1930 .valid = suspend_valid_only_mem,
1931 .begin = db1x_pm_begin,
1932 .enter = db1x_pm_enter,
1933 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1934 index 7990694..4e93acf 100644
1935 --- a/arch/mips/include/asm/elf.h
1936 +++ b/arch/mips/include/asm/elf.h
1937 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
1938 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1939 #endif
1940
1941 +#ifdef CONFIG_PAX_ASLR
1942 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1943 +
1944 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1945 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1946 +#endif
1947 +
1948 #endif /* _ASM_ELF_H */
1949 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1950 index f266295..627cfff 100644
1951 --- a/arch/mips/include/asm/page.h
1952 +++ b/arch/mips/include/asm/page.h
1953 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1954 #ifdef CONFIG_CPU_MIPS32
1955 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1956 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1957 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1958 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1959 #else
1960 typedef struct { unsigned long long pte; } pte_t;
1961 #define pte_val(x) ((x).pte)
1962 diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
1963 index e48c0bf..f3acf65 100644
1964 --- a/arch/mips/include/asm/reboot.h
1965 +++ b/arch/mips/include/asm/reboot.h
1966 @@ -9,7 +9,7 @@
1967 #ifndef _ASM_REBOOT_H
1968 #define _ASM_REBOOT_H
1969
1970 -extern void (*_machine_restart)(char *command);
1971 -extern void (*_machine_halt)(void);
1972 +extern void (*__noreturn _machine_restart)(char *command);
1973 +extern void (*__noreturn _machine_halt)(void);
1974
1975 #endif /* _ASM_REBOOT_H */
1976 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1977 index 83b5509..9fa24a23 100644
1978 --- a/arch/mips/include/asm/system.h
1979 +++ b/arch/mips/include/asm/system.h
1980 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1981 */
1982 #define __ARCH_WANT_UNLOCKED_CTXSW
1983
1984 -extern unsigned long arch_align_stack(unsigned long sp);
1985 +#define arch_align_stack(x) ((x) & ~0xfUL)
1986
1987 #endif /* _ASM_SYSTEM_H */
1988 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1989 index 9fdd8bc..fcf9d68 100644
1990 --- a/arch/mips/kernel/binfmt_elfn32.c
1991 +++ b/arch/mips/kernel/binfmt_elfn32.c
1992 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1993 #undef ELF_ET_DYN_BASE
1994 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1995
1996 +#ifdef CONFIG_PAX_ASLR
1997 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1998 +
1999 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2000 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2001 +#endif
2002 +
2003 #include <asm/processor.h>
2004 #include <linux/module.h>
2005 #include <linux/elfcore.h>
2006 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2007 index ff44823..cf0b48a 100644
2008 --- a/arch/mips/kernel/binfmt_elfo32.c
2009 +++ b/arch/mips/kernel/binfmt_elfo32.c
2010 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2011 #undef ELF_ET_DYN_BASE
2012 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2013
2014 +#ifdef CONFIG_PAX_ASLR
2015 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2016 +
2017 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2018 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2019 +#endif
2020 +
2021 #include <asm/processor.h>
2022
2023 /*
2024 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2025 index 50c9bb8..efdd5f8 100644
2026 --- a/arch/mips/kernel/kgdb.c
2027 +++ b/arch/mips/kernel/kgdb.c
2028 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2029 return -1;
2030 }
2031
2032 +/* cannot be const */
2033 struct kgdb_arch arch_kgdb_ops;
2034
2035 /*
2036 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2037 index f3d73e1..bb3f57a 100644
2038 --- a/arch/mips/kernel/process.c
2039 +++ b/arch/mips/kernel/process.c
2040 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2041 out:
2042 return pc;
2043 }
2044 -
2045 -/*
2046 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2047 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2048 - */
2049 -unsigned long arch_align_stack(unsigned long sp)
2050 -{
2051 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2052 - sp -= get_random_int() & ~PAGE_MASK;
2053 -
2054 - return sp & ALMASK;
2055 -}
2056 diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2057 index 060563a..7fbf310 100644
2058 --- a/arch/mips/kernel/reset.c
2059 +++ b/arch/mips/kernel/reset.c
2060 @@ -19,8 +19,8 @@
2061 * So handle all using function pointers to machine specific
2062 * functions.
2063 */
2064 -void (*_machine_restart)(char *command);
2065 -void (*_machine_halt)(void);
2066 +void (*__noreturn _machine_restart)(char *command);
2067 +void (*__noreturn _machine_halt)(void);
2068 void (*pm_power_off)(void);
2069
2070 EXPORT_SYMBOL(pm_power_off);
2071 @@ -29,16 +29,19 @@ void machine_restart(char *command)
2072 {
2073 if (_machine_restart)
2074 _machine_restart(command);
2075 + BUG();
2076 }
2077
2078 void machine_halt(void)
2079 {
2080 if (_machine_halt)
2081 _machine_halt();
2082 + BUG();
2083 }
2084
2085 void machine_power_off(void)
2086 {
2087 if (pm_power_off)
2088 pm_power_off();
2089 + BUG();
2090 }
2091 diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2092 index 3f7f466..3abe0b5 100644
2093 --- a/arch/mips/kernel/syscall.c
2094 +++ b/arch/mips/kernel/syscall.c
2095 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2096 do_color_align = 0;
2097 if (filp || (flags & MAP_SHARED))
2098 do_color_align = 1;
2099 +
2100 +#ifdef CONFIG_PAX_RANDMMAP
2101 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2102 +#endif
2103 +
2104 if (addr) {
2105 if (do_color_align)
2106 addr = COLOUR_ALIGN(addr, pgoff);
2107 else
2108 addr = PAGE_ALIGN(addr);
2109 vmm = find_vma(current->mm, addr);
2110 - if (task_size - len >= addr &&
2111 - (!vmm || addr + len <= vmm->vm_start))
2112 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2113 return addr;
2114 }
2115 - addr = TASK_UNMAPPED_BASE;
2116 + addr = current->mm->mmap_base;
2117 if (do_color_align)
2118 addr = COLOUR_ALIGN(addr, pgoff);
2119 else
2120 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2121 /* At this point: (!vmm || addr < vmm->vm_end). */
2122 if (task_size - len < addr)
2123 return -ENOMEM;
2124 - if (!vmm || addr + len <= vmm->vm_start)
2125 + if (check_heap_stack_gap(vmm, addr, len))
2126 return addr;
2127 addr = vmm->vm_end;
2128 if (do_color_align)
2129 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2130 index e97a7a2..f18f5b0 100644
2131 --- a/arch/mips/mm/fault.c
2132 +++ b/arch/mips/mm/fault.c
2133 @@ -26,6 +26,23 @@
2134 #include <asm/ptrace.h>
2135 #include <asm/highmem.h> /* For VMALLOC_END */
2136
2137 +#ifdef CONFIG_PAX_PAGEEXEC
2138 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2139 +{
2140 + unsigned long i;
2141 +
2142 + printk(KERN_ERR "PAX: bytes at PC: ");
2143 + for (i = 0; i < 5; i++) {
2144 + unsigned int c;
2145 + if (get_user(c, (unsigned int *)pc+i))
2146 + printk(KERN_CONT "???????? ");
2147 + else
2148 + printk(KERN_CONT "%08x ", c);
2149 + }
2150 + printk("\n");
2151 +}
2152 +#endif
2153 +
2154 /*
2155 * This routine handles page faults. It determines the address,
2156 * and the problem, and then passes it off to one of the appropriate
2157 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2158 index 9c802eb..0592e41 100644
2159 --- a/arch/parisc/include/asm/elf.h
2160 +++ b/arch/parisc/include/asm/elf.h
2161 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2162
2163 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2164
2165 +#ifdef CONFIG_PAX_ASLR
2166 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2167 +
2168 +#define PAX_DELTA_MMAP_LEN 16
2169 +#define PAX_DELTA_STACK_LEN 16
2170 +#endif
2171 +
2172 /* This yields a mask that user programs can use to figure out what
2173 instruction set this CPU supports. This could be done in user space,
2174 but it's not easy, and we've already done it here. */
2175 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2176 index a27d2e2..18fd845 100644
2177 --- a/arch/parisc/include/asm/pgtable.h
2178 +++ b/arch/parisc/include/asm/pgtable.h
2179 @@ -207,6 +207,17 @@
2180 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2181 #define PAGE_COPY PAGE_EXECREAD
2182 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2183 +
2184 +#ifdef CONFIG_PAX_PAGEEXEC
2185 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2186 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2187 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2188 +#else
2189 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2190 +# define PAGE_COPY_NOEXEC PAGE_COPY
2191 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2192 +#endif
2193 +
2194 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2195 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2196 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2197 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2198 index 2120746..8d70a5e 100644
2199 --- a/arch/parisc/kernel/module.c
2200 +++ b/arch/parisc/kernel/module.c
2201 @@ -95,16 +95,38 @@
2202
2203 /* three functions to determine where in the module core
2204 * or init pieces the location is */
2205 +static inline int in_init_rx(struct module *me, void *loc)
2206 +{
2207 + return (loc >= me->module_init_rx &&
2208 + loc < (me->module_init_rx + me->init_size_rx));
2209 +}
2210 +
2211 +static inline int in_init_rw(struct module *me, void *loc)
2212 +{
2213 + return (loc >= me->module_init_rw &&
2214 + loc < (me->module_init_rw + me->init_size_rw));
2215 +}
2216 +
2217 static inline int in_init(struct module *me, void *loc)
2218 {
2219 - return (loc >= me->module_init &&
2220 - loc <= (me->module_init + me->init_size));
2221 + return in_init_rx(me, loc) || in_init_rw(me, loc);
2222 +}
2223 +
2224 +static inline int in_core_rx(struct module *me, void *loc)
2225 +{
2226 + return (loc >= me->module_core_rx &&
2227 + loc < (me->module_core_rx + me->core_size_rx));
2228 +}
2229 +
2230 +static inline int in_core_rw(struct module *me, void *loc)
2231 +{
2232 + return (loc >= me->module_core_rw &&
2233 + loc < (me->module_core_rw + me->core_size_rw));
2234 }
2235
2236 static inline int in_core(struct module *me, void *loc)
2237 {
2238 - return (loc >= me->module_core &&
2239 - loc <= (me->module_core + me->core_size));
2240 + return in_core_rx(me, loc) || in_core_rw(me, loc);
2241 }
2242
2243 static inline int in_local(struct module *me, void *loc)
2244 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2245 }
2246
2247 /* align things a bit */
2248 - me->core_size = ALIGN(me->core_size, 16);
2249 - me->arch.got_offset = me->core_size;
2250 - me->core_size += gots * sizeof(struct got_entry);
2251 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2252 + me->arch.got_offset = me->core_size_rw;
2253 + me->core_size_rw += gots * sizeof(struct got_entry);
2254
2255 - me->core_size = ALIGN(me->core_size, 16);
2256 - me->arch.fdesc_offset = me->core_size;
2257 - me->core_size += fdescs * sizeof(Elf_Fdesc);
2258 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
2259 + me->arch.fdesc_offset = me->core_size_rw;
2260 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2261
2262 me->arch.got_max = gots;
2263 me->arch.fdesc_max = fdescs;
2264 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2265
2266 BUG_ON(value == 0);
2267
2268 - got = me->module_core + me->arch.got_offset;
2269 + got = me->module_core_rw + me->arch.got_offset;
2270 for (i = 0; got[i].addr; i++)
2271 if (got[i].addr == value)
2272 goto out;
2273 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2274 #ifdef CONFIG_64BIT
2275 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2276 {
2277 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2278 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2279
2280 if (!value) {
2281 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2282 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2283
2284 /* Create new one */
2285 fdesc->addr = value;
2286 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2287 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2288 return (Elf_Addr)fdesc;
2289 }
2290 #endif /* CONFIG_64BIT */
2291 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2292
2293 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2294 end = table + sechdrs[me->arch.unwind_section].sh_size;
2295 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2296 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2297
2298 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2299 me->arch.unwind_section, table, end, gp);
2300 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2301 index 9147391..f3d949a 100644
2302 --- a/arch/parisc/kernel/sys_parisc.c
2303 +++ b/arch/parisc/kernel/sys_parisc.c
2304 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2305 /* At this point: (!vma || addr < vma->vm_end). */
2306 if (TASK_SIZE - len < addr)
2307 return -ENOMEM;
2308 - if (!vma || addr + len <= vma->vm_start)
2309 + if (check_heap_stack_gap(vma, addr, len))
2310 return addr;
2311 addr = vma->vm_end;
2312 }
2313 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2314 /* At this point: (!vma || addr < vma->vm_end). */
2315 if (TASK_SIZE - len < addr)
2316 return -ENOMEM;
2317 - if (!vma || addr + len <= vma->vm_start)
2318 + if (check_heap_stack_gap(vma, addr, len))
2319 return addr;
2320 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2321 if (addr < vma->vm_end) /* handle wraparound */
2322 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2323 if (flags & MAP_FIXED)
2324 return addr;
2325 if (!addr)
2326 - addr = TASK_UNMAPPED_BASE;
2327 + addr = current->mm->mmap_base;
2328
2329 if (filp) {
2330 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2331 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2332 index 8b58bf0..7afff03 100644
2333 --- a/arch/parisc/kernel/traps.c
2334 +++ b/arch/parisc/kernel/traps.c
2335 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2336
2337 down_read(&current->mm->mmap_sem);
2338 vma = find_vma(current->mm,regs->iaoq[0]);
2339 - if (vma && (regs->iaoq[0] >= vma->vm_start)
2340 - && (vma->vm_flags & VM_EXEC)) {
2341 -
2342 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2343 fault_address = regs->iaoq[0];
2344 fault_space = regs->iasq[0];
2345
2346 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2347 index c6afbfc..c5839f6 100644
2348 --- a/arch/parisc/mm/fault.c
2349 +++ b/arch/parisc/mm/fault.c
2350 @@ -15,6 +15,7 @@
2351 #include <linux/sched.h>
2352 #include <linux/interrupt.h>
2353 #include <linux/module.h>
2354 +#include <linux/unistd.h>
2355
2356 #include <asm/uaccess.h>
2357 #include <asm/traps.h>
2358 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2359 static unsigned long
2360 parisc_acctyp(unsigned long code, unsigned int inst)
2361 {
2362 - if (code == 6 || code == 16)
2363 + if (code == 6 || code == 7 || code == 16)
2364 return VM_EXEC;
2365
2366 switch (inst & 0xf0000000) {
2367 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2368 }
2369 #endif
2370
2371 +#ifdef CONFIG_PAX_PAGEEXEC
2372 +/*
2373 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2374 + *
2375 + * returns 1 when task should be killed
2376 + * 2 when rt_sigreturn trampoline was detected
2377 + * 3 when unpatched PLT trampoline was detected
2378 + */
2379 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2380 +{
2381 +
2382 +#ifdef CONFIG_PAX_EMUPLT
2383 + int err;
2384 +
2385 + do { /* PaX: unpatched PLT emulation */
2386 + unsigned int bl, depwi;
2387 +
2388 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2389 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2390 +
2391 + if (err)
2392 + break;
2393 +
2394 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2395 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2396 +
2397 + err = get_user(ldw, (unsigned int *)addr);
2398 + err |= get_user(bv, (unsigned int *)(addr+4));
2399 + err |= get_user(ldw2, (unsigned int *)(addr+8));
2400 +
2401 + if (err)
2402 + break;
2403 +
2404 + if (ldw == 0x0E801096U &&
2405 + bv == 0xEAC0C000U &&
2406 + ldw2 == 0x0E881095U)
2407 + {
2408 + unsigned int resolver, map;
2409 +
2410 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2411 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2412 + if (err)
2413 + break;
2414 +
2415 + regs->gr[20] = instruction_pointer(regs)+8;
2416 + regs->gr[21] = map;
2417 + regs->gr[22] = resolver;
2418 + regs->iaoq[0] = resolver | 3UL;
2419 + regs->iaoq[1] = regs->iaoq[0] + 4;
2420 + return 3;
2421 + }
2422 + }
2423 + } while (0);
2424 +#endif
2425 +
2426 +#ifdef CONFIG_PAX_EMUTRAMP
2427 +
2428 +#ifndef CONFIG_PAX_EMUSIGRT
2429 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2430 + return 1;
2431 +#endif
2432 +
2433 + do { /* PaX: rt_sigreturn emulation */
2434 + unsigned int ldi1, ldi2, bel, nop;
2435 +
2436 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2437 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2438 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2439 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2440 +
2441 + if (err)
2442 + break;
2443 +
2444 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2445 + ldi2 == 0x3414015AU &&
2446 + bel == 0xE4008200U &&
2447 + nop == 0x08000240U)
2448 + {
2449 + regs->gr[25] = (ldi1 & 2) >> 1;
2450 + regs->gr[20] = __NR_rt_sigreturn;
2451 + regs->gr[31] = regs->iaoq[1] + 16;
2452 + regs->sr[0] = regs->iasq[1];
2453 + regs->iaoq[0] = 0x100UL;
2454 + regs->iaoq[1] = regs->iaoq[0] + 4;
2455 + regs->iasq[0] = regs->sr[2];
2456 + regs->iasq[1] = regs->sr[2];
2457 + return 2;
2458 + }
2459 + } while (0);
2460 +#endif
2461 +
2462 + return 1;
2463 +}
2464 +
2465 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2466 +{
2467 + unsigned long i;
2468 +
2469 + printk(KERN_ERR "PAX: bytes at PC: ");
2470 + for (i = 0; i < 5; i++) {
2471 + unsigned int c;
2472 + if (get_user(c, (unsigned int *)pc+i))
2473 + printk(KERN_CONT "???????? ");
2474 + else
2475 + printk(KERN_CONT "%08x ", c);
2476 + }
2477 + printk("\n");
2478 +}
2479 +#endif
2480 +
2481 int fixup_exception(struct pt_regs *regs)
2482 {
2483 const struct exception_table_entry *fix;
2484 @@ -192,8 +303,33 @@ good_area:
2485
2486 acc_type = parisc_acctyp(code,regs->iir);
2487
2488 - if ((vma->vm_flags & acc_type) != acc_type)
2489 + if ((vma->vm_flags & acc_type) != acc_type) {
2490 +
2491 +#ifdef CONFIG_PAX_PAGEEXEC
2492 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2493 + (address & ~3UL) == instruction_pointer(regs))
2494 + {
2495 + up_read(&mm->mmap_sem);
2496 + switch (pax_handle_fetch_fault(regs)) {
2497 +
2498 +#ifdef CONFIG_PAX_EMUPLT
2499 + case 3:
2500 + return;
2501 +#endif
2502 +
2503 +#ifdef CONFIG_PAX_EMUTRAMP
2504 + case 2:
2505 + return;
2506 +#endif
2507 +
2508 + }
2509 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2510 + do_group_exit(SIGKILL);
2511 + }
2512 +#endif
2513 +
2514 goto bad_area;
2515 + }
2516
2517 /*
2518 * If for any reason at all we couldn't handle the fault, make
2519 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2520 index c107b74..409dc0f 100644
2521 --- a/arch/powerpc/Makefile
2522 +++ b/arch/powerpc/Makefile
2523 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2524 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2525 CPP = $(CC) -E $(KBUILD_CFLAGS)
2526
2527 +cflags-y += -Wno-sign-compare -Wno-extra
2528 +
2529 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2530
2531 ifeq ($(CONFIG_PPC64),y)
2532 diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2533 index 6d94d27..50d4cad 100644
2534 --- a/arch/powerpc/include/asm/device.h
2535 +++ b/arch/powerpc/include/asm/device.h
2536 @@ -14,7 +14,7 @@ struct dev_archdata {
2537 struct device_node *of_node;
2538
2539 /* DMA operations on that device */
2540 - struct dma_map_ops *dma_ops;
2541 + const struct dma_map_ops *dma_ops;
2542
2543 /*
2544 * When an iommu is in use, dma_data is used as a ptr to the base of the
2545 diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2546 index e281dae..2b8a784 100644
2547 --- a/arch/powerpc/include/asm/dma-mapping.h
2548 +++ b/arch/powerpc/include/asm/dma-mapping.h
2549 @@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2550 #ifdef CONFIG_PPC64
2551 extern struct dma_map_ops dma_iommu_ops;
2552 #endif
2553 -extern struct dma_map_ops dma_direct_ops;
2554 +extern const struct dma_map_ops dma_direct_ops;
2555
2556 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2557 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2558 {
2559 /* We don't handle the NULL dev case for ISA for now. We could
2560 * do it via an out of line call but it is not needed for now. The
2561 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2562 return dev->archdata.dma_ops;
2563 }
2564
2565 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2566 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2567 {
2568 dev->archdata.dma_ops = ops;
2569 }
2570 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2571
2572 static inline int dma_supported(struct device *dev, u64 mask)
2573 {
2574 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2575 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2576
2577 if (unlikely(dma_ops == NULL))
2578 return 0;
2579 @@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2580
2581 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2582 {
2583 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2584 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2585
2586 if (unlikely(dma_ops == NULL))
2587 return -EIO;
2588 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2589 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2590 dma_addr_t *dma_handle, gfp_t flag)
2591 {
2592 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2593 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2594 void *cpu_addr;
2595
2596 BUG_ON(!dma_ops);
2597 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2598 static inline void dma_free_coherent(struct device *dev, size_t size,
2599 void *cpu_addr, dma_addr_t dma_handle)
2600 {
2601 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2602 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2603
2604 BUG_ON(!dma_ops);
2605
2606 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2607
2608 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2609 {
2610 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2611 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2612
2613 if (dma_ops->mapping_error)
2614 return dma_ops->mapping_error(dev, dma_addr);
2615 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2616 index 5698502..5db093c 100644
2617 --- a/arch/powerpc/include/asm/elf.h
2618 +++ b/arch/powerpc/include/asm/elf.h
2619 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2620 the loader. We need to make sure that it is out of the way of the program
2621 that it will "exec", and that there is sufficient room for the brk. */
2622
2623 -extern unsigned long randomize_et_dyn(unsigned long base);
2624 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2625 +#define ELF_ET_DYN_BASE (0x20000000)
2626 +
2627 +#ifdef CONFIG_PAX_ASLR
2628 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2629 +
2630 +#ifdef __powerpc64__
2631 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2632 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2633 +#else
2634 +#define PAX_DELTA_MMAP_LEN 15
2635 +#define PAX_DELTA_STACK_LEN 15
2636 +#endif
2637 +#endif
2638
2639 /*
2640 * Our registers are always unsigned longs, whether we're a 32 bit
2641 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2642 (0x7ff >> (PAGE_SHIFT - 12)) : \
2643 (0x3ffff >> (PAGE_SHIFT - 12)))
2644
2645 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2646 -#define arch_randomize_brk arch_randomize_brk
2647 -
2648 #endif /* __KERNEL__ */
2649
2650 /*
2651 diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2652 index edfc980..1766f59 100644
2653 --- a/arch/powerpc/include/asm/iommu.h
2654 +++ b/arch/powerpc/include/asm/iommu.h
2655 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2656 extern void iommu_init_early_dart(void);
2657 extern void iommu_init_early_pasemi(void);
2658
2659 +/* dma-iommu.c */
2660 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2661 +
2662 #ifdef CONFIG_PCI
2663 extern void pci_iommu_init(void);
2664 extern void pci_direct_iommu_init(void);
2665 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2666 index 9163695..5a00112 100644
2667 --- a/arch/powerpc/include/asm/kmap_types.h
2668 +++ b/arch/powerpc/include/asm/kmap_types.h
2669 @@ -26,6 +26,7 @@ enum km_type {
2670 KM_SOFTIRQ1,
2671 KM_PPC_SYNC_PAGE,
2672 KM_PPC_SYNC_ICACHE,
2673 + KM_CLEARPAGE,
2674 KM_TYPE_NR
2675 };
2676
2677 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2678 index ff24254..fe45b21 100644
2679 --- a/arch/powerpc/include/asm/page.h
2680 +++ b/arch/powerpc/include/asm/page.h
2681 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2682 * and needs to be executable. This means the whole heap ends
2683 * up being executable.
2684 */
2685 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2686 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2687 +#define VM_DATA_DEFAULT_FLAGS32 \
2688 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2689 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2690
2691 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2692 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2693 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2694 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2695 #endif
2696
2697 +#define ktla_ktva(addr) (addr)
2698 +#define ktva_ktla(addr) (addr)
2699 +
2700 #ifndef __ASSEMBLY__
2701
2702 #undef STRICT_MM_TYPECHECKS
2703 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2704 index 3f17b83..1f9e766 100644
2705 --- a/arch/powerpc/include/asm/page_64.h
2706 +++ b/arch/powerpc/include/asm/page_64.h
2707 @@ -180,15 +180,18 @@ do { \
2708 * stack by default, so in the absense of a PT_GNU_STACK program header
2709 * we turn execute permission off.
2710 */
2711 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2712 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2713 +#define VM_STACK_DEFAULT_FLAGS32 \
2714 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2715 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2716
2717 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2718 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2719
2720 +#ifndef CONFIG_PAX_PAGEEXEC
2721 #define VM_STACK_DEFAULT_FLAGS \
2722 (test_thread_flag(TIF_32BIT) ? \
2723 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2724 +#endif
2725
2726 #include <asm-generic/getorder.h>
2727
2728 diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2729 index b5ea626..4030822 100644
2730 --- a/arch/powerpc/include/asm/pci.h
2731 +++ b/arch/powerpc/include/asm/pci.h
2732 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2733 }
2734
2735 #ifdef CONFIG_PCI
2736 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2737 -extern struct dma_map_ops *get_pci_dma_ops(void);
2738 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2739 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2740 #else /* CONFIG_PCI */
2741 #define set_pci_dma_ops(d)
2742 #define get_pci_dma_ops() NULL
2743 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2744 index 2a5da06..d65bea2 100644
2745 --- a/arch/powerpc/include/asm/pgtable.h
2746 +++ b/arch/powerpc/include/asm/pgtable.h
2747 @@ -2,6 +2,7 @@
2748 #define _ASM_POWERPC_PGTABLE_H
2749 #ifdef __KERNEL__
2750
2751 +#include <linux/const.h>
2752 #ifndef __ASSEMBLY__
2753 #include <asm/processor.h> /* For TASK_SIZE */
2754 #include <asm/mmu.h>
2755 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2756 index 4aad413..85d86bf 100644
2757 --- a/arch/powerpc/include/asm/pte-hash32.h
2758 +++ b/arch/powerpc/include/asm/pte-hash32.h
2759 @@ -21,6 +21,7 @@
2760 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2761 #define _PAGE_USER 0x004 /* usermode access allowed */
2762 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2763 +#define _PAGE_EXEC _PAGE_GUARDED
2764 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2765 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2766 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2767 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2768 index 8c34149..78f425a 100644
2769 --- a/arch/powerpc/include/asm/ptrace.h
2770 +++ b/arch/powerpc/include/asm/ptrace.h
2771 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2772 } while(0)
2773
2774 struct task_struct;
2775 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2776 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2777 extern int ptrace_put_reg(struct task_struct *task, int regno,
2778 unsigned long data);
2779
2780 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2781 index 32a7c30..be3a8bb 100644
2782 --- a/arch/powerpc/include/asm/reg.h
2783 +++ b/arch/powerpc/include/asm/reg.h
2784 @@ -191,6 +191,7 @@
2785 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2786 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2787 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2788 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2789 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2790 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2791 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2792 diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2793 index 8979d4c..d2fd0d3 100644
2794 --- a/arch/powerpc/include/asm/swiotlb.h
2795 +++ b/arch/powerpc/include/asm/swiotlb.h
2796 @@ -13,7 +13,7 @@
2797
2798 #include <linux/swiotlb.h>
2799
2800 -extern struct dma_map_ops swiotlb_dma_ops;
2801 +extern const struct dma_map_ops swiotlb_dma_ops;
2802
2803 static inline void dma_mark_clean(void *addr, size_t size) {}
2804
2805 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2806 index 094a12a..877a60a 100644
2807 --- a/arch/powerpc/include/asm/system.h
2808 +++ b/arch/powerpc/include/asm/system.h
2809 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2810 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2811 #endif
2812
2813 -extern unsigned long arch_align_stack(unsigned long sp);
2814 +#define arch_align_stack(x) ((x) & ~0xfUL)
2815
2816 /* Used in very early kernel initialization. */
2817 extern unsigned long reloc_offset(void);
2818 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2819 index bd0fb84..a42a14b 100644
2820 --- a/arch/powerpc/include/asm/uaccess.h
2821 +++ b/arch/powerpc/include/asm/uaccess.h
2822 @@ -13,6 +13,8 @@
2823 #define VERIFY_READ 0
2824 #define VERIFY_WRITE 1
2825
2826 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2827 +
2828 /*
2829 * The fs value determines whether argument validity checking should be
2830 * performed or not. If get_fs() == USER_DS, checking is performed, with
2831 @@ -327,52 +329,6 @@ do { \
2832 extern unsigned long __copy_tofrom_user(void __user *to,
2833 const void __user *from, unsigned long size);
2834
2835 -#ifndef __powerpc64__
2836 -
2837 -static inline unsigned long copy_from_user(void *to,
2838 - const void __user *from, unsigned long n)
2839 -{
2840 - unsigned long over;
2841 -
2842 - if (access_ok(VERIFY_READ, from, n))
2843 - return __copy_tofrom_user((__force void __user *)to, from, n);
2844 - if ((unsigned long)from < TASK_SIZE) {
2845 - over = (unsigned long)from + n - TASK_SIZE;
2846 - return __copy_tofrom_user((__force void __user *)to, from,
2847 - n - over) + over;
2848 - }
2849 - return n;
2850 -}
2851 -
2852 -static inline unsigned long copy_to_user(void __user *to,
2853 - const void *from, unsigned long n)
2854 -{
2855 - unsigned long over;
2856 -
2857 - if (access_ok(VERIFY_WRITE, to, n))
2858 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2859 - if ((unsigned long)to < TASK_SIZE) {
2860 - over = (unsigned long)to + n - TASK_SIZE;
2861 - return __copy_tofrom_user(to, (__force void __user *)from,
2862 - n - over) + over;
2863 - }
2864 - return n;
2865 -}
2866 -
2867 -#else /* __powerpc64__ */
2868 -
2869 -#define __copy_in_user(to, from, size) \
2870 - __copy_tofrom_user((to), (from), (size))
2871 -
2872 -extern unsigned long copy_from_user(void *to, const void __user *from,
2873 - unsigned long n);
2874 -extern unsigned long copy_to_user(void __user *to, const void *from,
2875 - unsigned long n);
2876 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2877 - unsigned long n);
2878 -
2879 -#endif /* __powerpc64__ */
2880 -
2881 static inline unsigned long __copy_from_user_inatomic(void *to,
2882 const void __user *from, unsigned long n)
2883 {
2884 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2885 if (ret == 0)
2886 return 0;
2887 }
2888 +
2889 + if (!__builtin_constant_p(n))
2890 + check_object_size(to, n, false);
2891 +
2892 return __copy_tofrom_user((__force void __user *)to, from, n);
2893 }
2894
2895 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2896 if (ret == 0)
2897 return 0;
2898 }
2899 +
2900 + if (!__builtin_constant_p(n))
2901 + check_object_size(from, n, true);
2902 +
2903 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2904 }
2905
2906 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2907 return __copy_to_user_inatomic(to, from, size);
2908 }
2909
2910 +#ifndef __powerpc64__
2911 +
2912 +static inline unsigned long __must_check copy_from_user(void *to,
2913 + const void __user *from, unsigned long n)
2914 +{
2915 + unsigned long over;
2916 +
2917 + if ((long)n < 0)
2918 + return n;
2919 +
2920 + if (access_ok(VERIFY_READ, from, n)) {
2921 + if (!__builtin_constant_p(n))
2922 + check_object_size(to, n, false);
2923 + return __copy_tofrom_user((__force void __user *)to, from, n);
2924 + }
2925 + if ((unsigned long)from < TASK_SIZE) {
2926 + over = (unsigned long)from + n - TASK_SIZE;
2927 + if (!__builtin_constant_p(n - over))
2928 + check_object_size(to, n - over, false);
2929 + return __copy_tofrom_user((__force void __user *)to, from,
2930 + n - over) + over;
2931 + }
2932 + return n;
2933 +}
2934 +
2935 +static inline unsigned long __must_check copy_to_user(void __user *to,
2936 + const void *from, unsigned long n)
2937 +{
2938 + unsigned long over;
2939 +
2940 + if ((long)n < 0)
2941 + return n;
2942 +
2943 + if (access_ok(VERIFY_WRITE, to, n)) {
2944 + if (!__builtin_constant_p(n))
2945 + check_object_size(from, n, true);
2946 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2947 + }
2948 + if ((unsigned long)to < TASK_SIZE) {
2949 + over = (unsigned long)to + n - TASK_SIZE;
2950 + if (!__builtin_constant_p(n))
2951 + check_object_size(from, n - over, true);
2952 + return __copy_tofrom_user(to, (__force void __user *)from,
2953 + n - over) + over;
2954 + }
2955 + return n;
2956 +}
2957 +
2958 +#else /* __powerpc64__ */
2959 +
2960 +#define __copy_in_user(to, from, size) \
2961 + __copy_tofrom_user((to), (from), (size))
2962 +
2963 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2964 +{
2965 + if ((long)n < 0 || n > INT_MAX)
2966 + return n;
2967 +
2968 + if (!__builtin_constant_p(n))
2969 + check_object_size(to, n, false);
2970 +
2971 + if (likely(access_ok(VERIFY_READ, from, n)))
2972 + n = __copy_from_user(to, from, n);
2973 + else
2974 + memset(to, 0, n);
2975 + return n;
2976 +}
2977 +
2978 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2979 +{
2980 + if ((long)n < 0 || n > INT_MAX)
2981 + return n;
2982 +
2983 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2984 + if (!__builtin_constant_p(n))
2985 + check_object_size(from, n, true);
2986 + n = __copy_to_user(to, from, n);
2987 + }
2988 + return n;
2989 +}
2990 +
2991 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2992 + unsigned long n);
2993 +
2994 +#endif /* __powerpc64__ */
2995 +
2996 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2997
2998 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2999 diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3000 index bb37b1d..01fe9ce 100644
3001 --- a/arch/powerpc/kernel/cacheinfo.c
3002 +++ b/arch/powerpc/kernel/cacheinfo.c
3003 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3004 &cache_assoc_attr,
3005 };
3006
3007 -static struct sysfs_ops cache_index_ops = {
3008 +static const struct sysfs_ops cache_index_ops = {
3009 .show = cache_index_show,
3010 };
3011
3012 diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3013 index 37771a5..648530c 100644
3014 --- a/arch/powerpc/kernel/dma-iommu.c
3015 +++ b/arch/powerpc/kernel/dma-iommu.c
3016 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3017 }
3018
3019 /* We support DMA to/from any memory page via the iommu */
3020 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3021 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
3022 {
3023 struct iommu_table *tbl = get_iommu_table_base(dev);
3024
3025 diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3026 index e96cbbd..bdd6d41 100644
3027 --- a/arch/powerpc/kernel/dma-swiotlb.c
3028 +++ b/arch/powerpc/kernel/dma-swiotlb.c
3029 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3030 * map_page, and unmap_page on highmem, use normal dma_ops
3031 * for everything else.
3032 */
3033 -struct dma_map_ops swiotlb_dma_ops = {
3034 +const struct dma_map_ops swiotlb_dma_ops = {
3035 .alloc_coherent = dma_direct_alloc_coherent,
3036 .free_coherent = dma_direct_free_coherent,
3037 .map_sg = swiotlb_map_sg_attrs,
3038 diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3039 index 6215062..ebea59c 100644
3040 --- a/arch/powerpc/kernel/dma.c
3041 +++ b/arch/powerpc/kernel/dma.c
3042 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3043 }
3044 #endif
3045
3046 -struct dma_map_ops dma_direct_ops = {
3047 +const struct dma_map_ops dma_direct_ops = {
3048 .alloc_coherent = dma_direct_alloc_coherent,
3049 .free_coherent = dma_direct_free_coherent,
3050 .map_sg = dma_direct_map_sg,
3051 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3052 index 24dcc0e..a300455 100644
3053 --- a/arch/powerpc/kernel/exceptions-64e.S
3054 +++ b/arch/powerpc/kernel/exceptions-64e.S
3055 @@ -455,6 +455,7 @@ storage_fault_common:
3056 std r14,_DAR(r1)
3057 std r15,_DSISR(r1)
3058 addi r3,r1,STACK_FRAME_OVERHEAD
3059 + bl .save_nvgprs
3060 mr r4,r14
3061 mr r5,r15
3062 ld r14,PACA_EXGEN+EX_R14(r13)
3063 @@ -464,8 +465,7 @@ storage_fault_common:
3064 cmpdi r3,0
3065 bne- 1f
3066 b .ret_from_except_lite
3067 -1: bl .save_nvgprs
3068 - mr r5,r3
3069 +1: mr r5,r3
3070 addi r3,r1,STACK_FRAME_OVERHEAD
3071 ld r4,_DAR(r1)
3072 bl .bad_page_fault
3073 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3074 index 1808876..9fd206a 100644
3075 --- a/arch/powerpc/kernel/exceptions-64s.S
3076 +++ b/arch/powerpc/kernel/exceptions-64s.S
3077 @@ -818,10 +818,10 @@ handle_page_fault:
3078 11: ld r4,_DAR(r1)
3079 ld r5,_DSISR(r1)
3080 addi r3,r1,STACK_FRAME_OVERHEAD
3081 + bl .save_nvgprs
3082 bl .do_page_fault
3083 cmpdi r3,0
3084 beq+ 13f
3085 - bl .save_nvgprs
3086 mr r5,r3
3087 addi r3,r1,STACK_FRAME_OVERHEAD
3088 lwz r4,_DAR(r1)
3089 diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3090 index a4c8b38..1b09ad9 100644
3091 --- a/arch/powerpc/kernel/ibmebus.c
3092 +++ b/arch/powerpc/kernel/ibmebus.c
3093 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3094 return 1;
3095 }
3096
3097 -static struct dma_map_ops ibmebus_dma_ops = {
3098 +static const struct dma_map_ops ibmebus_dma_ops = {
3099 .alloc_coherent = ibmebus_alloc_coherent,
3100 .free_coherent = ibmebus_free_coherent,
3101 .map_sg = ibmebus_map_sg,
3102 diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3103 index 641c74b..8339ad7 100644
3104 --- a/arch/powerpc/kernel/kgdb.c
3105 +++ b/arch/powerpc/kernel/kgdb.c
3106 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3107 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3108 return 0;
3109
3110 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3111 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3112 regs->nip += 4;
3113
3114 return 1;
3115 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3116 /*
3117 * Global data
3118 */
3119 -struct kgdb_arch arch_kgdb_ops = {
3120 +const struct kgdb_arch arch_kgdb_ops = {
3121 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3122 };
3123
3124 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3125 index 477c663..4f50234 100644
3126 --- a/arch/powerpc/kernel/module.c
3127 +++ b/arch/powerpc/kernel/module.c
3128 @@ -31,11 +31,24 @@
3129
3130 LIST_HEAD(module_bug_list);
3131
3132 +#ifdef CONFIG_PAX_KERNEXEC
3133 void *module_alloc(unsigned long size)
3134 {
3135 if (size == 0)
3136 return NULL;
3137
3138 + return vmalloc(size);
3139 +}
3140 +
3141 +void *module_alloc_exec(unsigned long size)
3142 +#else
3143 +void *module_alloc(unsigned long size)
3144 +#endif
3145 +
3146 +{
3147 + if (size == 0)
3148 + return NULL;
3149 +
3150 return vmalloc_exec(size);
3151 }
3152
3153 @@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3154 vfree(module_region);
3155 }
3156
3157 +#ifdef CONFIG_PAX_KERNEXEC
3158 +void module_free_exec(struct module *mod, void *module_region)
3159 +{
3160 + module_free(mod, module_region);
3161 +}
3162 +#endif
3163 +
3164 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3165 const Elf_Shdr *sechdrs,
3166 const char *name)
3167 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3168 index f832773..0507238 100644
3169 --- a/arch/powerpc/kernel/module_32.c
3170 +++ b/arch/powerpc/kernel/module_32.c
3171 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3172 me->arch.core_plt_section = i;
3173 }
3174 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3175 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3176 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3177 return -ENOEXEC;
3178 }
3179
3180 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3181
3182 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3183 /* Init, or core PLT? */
3184 - if (location >= mod->module_core
3185 - && location < mod->module_core + mod->core_size)
3186 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3187 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3188 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3189 - else
3190 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3191 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3192 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3193 + else {
3194 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3195 + return ~0UL;
3196 + }
3197
3198 /* Find this entry, or if that fails, the next avail. entry */
3199 while (entry->jump[0]) {
3200 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3201 index cadbed6..b9bbb00 100644
3202 --- a/arch/powerpc/kernel/pci-common.c
3203 +++ b/arch/powerpc/kernel/pci-common.c
3204 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3205 unsigned int ppc_pci_flags = 0;
3206
3207
3208 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3209 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3210
3211 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3212 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3213 {
3214 pci_dma_ops = dma_ops;
3215 }
3216
3217 -struct dma_map_ops *get_pci_dma_ops(void)
3218 +const struct dma_map_ops *get_pci_dma_ops(void)
3219 {
3220 return pci_dma_ops;
3221 }
3222 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3223 index 7b816da..8d5c277 100644
3224 --- a/arch/powerpc/kernel/process.c
3225 +++ b/arch/powerpc/kernel/process.c
3226 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3227 * Lookup NIP late so we have the best change of getting the
3228 * above info out without failing
3229 */
3230 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3231 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3232 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3233 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3234 #endif
3235 show_stack(current, (unsigned long *) regs->gpr[1]);
3236 if (!user_mode(regs))
3237 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3238 newsp = stack[0];
3239 ip = stack[STACK_FRAME_LR_SAVE];
3240 if (!firstframe || ip != lr) {
3241 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3242 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3243 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3244 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3245 - printk(" (%pS)",
3246 + printk(" (%pA)",
3247 (void *)current->ret_stack[curr_frame].ret);
3248 curr_frame--;
3249 }
3250 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3251 struct pt_regs *regs = (struct pt_regs *)
3252 (sp + STACK_FRAME_OVERHEAD);
3253 lr = regs->link;
3254 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3255 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3256 regs->trap, (void *)regs->nip, (void *)lr);
3257 firstframe = 1;
3258 }
3259 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3260 }
3261
3262 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3263 -
3264 -unsigned long arch_align_stack(unsigned long sp)
3265 -{
3266 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3267 - sp -= get_random_int() & ~PAGE_MASK;
3268 - return sp & ~0xf;
3269 -}
3270 -
3271 -static inline unsigned long brk_rnd(void)
3272 -{
3273 - unsigned long rnd = 0;
3274 -
3275 - /* 8MB for 32bit, 1GB for 64bit */
3276 - if (is_32bit_task())
3277 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3278 - else
3279 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3280 -
3281 - return rnd << PAGE_SHIFT;
3282 -}
3283 -
3284 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3285 -{
3286 - unsigned long base = mm->brk;
3287 - unsigned long ret;
3288 -
3289 -#ifdef CONFIG_PPC_STD_MMU_64
3290 - /*
3291 - * If we are using 1TB segments and we are allowed to randomise
3292 - * the heap, we can put it above 1TB so it is backed by a 1TB
3293 - * segment. Otherwise the heap will be in the bottom 1TB
3294 - * which always uses 256MB segments and this may result in a
3295 - * performance penalty.
3296 - */
3297 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3298 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3299 -#endif
3300 -
3301 - ret = PAGE_ALIGN(base + brk_rnd());
3302 -
3303 - if (ret < mm->brk)
3304 - return mm->brk;
3305 -
3306 - return ret;
3307 -}
3308 -
3309 -unsigned long randomize_et_dyn(unsigned long base)
3310 -{
3311 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3312 -
3313 - if (ret < base)
3314 - return base;
3315 -
3316 - return ret;
3317 -}
3318 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3319 index ef14988..856c4bc 100644
3320 --- a/arch/powerpc/kernel/ptrace.c
3321 +++ b/arch/powerpc/kernel/ptrace.c
3322 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3323 /*
3324 * Get contents of register REGNO in task TASK.
3325 */
3326 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3327 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3328 {
3329 if (task->thread.regs == NULL)
3330 return -EIO;
3331 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3332
3333 CHECK_FULL_REGS(child->thread.regs);
3334 if (index < PT_FPR0) {
3335 - tmp = ptrace_get_reg(child, (int) index);
3336 + tmp = ptrace_get_reg(child, index);
3337 } else {
3338 flush_fp_to_thread(child);
3339 tmp = ((unsigned long *)child->thread.fpr)
3340 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3341 index d670429..2bc59b2 100644
3342 --- a/arch/powerpc/kernel/signal_32.c
3343 +++ b/arch/powerpc/kernel/signal_32.c
3344 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3345 /* Save user registers on the stack */
3346 frame = &rt_sf->uc.uc_mcontext;
3347 addr = frame;
3348 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3349 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3350 if (save_user_regs(regs, frame, 0, 1))
3351 goto badframe;
3352 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3353 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3354 index 2fe6fc6..ada0d96 100644
3355 --- a/arch/powerpc/kernel/signal_64.c
3356 +++ b/arch/powerpc/kernel/signal_64.c
3357 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3358 current->thread.fpscr.val = 0;
3359
3360 /* Set up to return from userspace. */
3361 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3362 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3363 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3364 } else {
3365 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3366 diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3367 index b97c2d6..dd01a6a 100644
3368 --- a/arch/powerpc/kernel/sys_ppc32.c
3369 +++ b/arch/powerpc/kernel/sys_ppc32.c
3370 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3371 if (oldlenp) {
3372 if (!error) {
3373 if (get_user(oldlen, oldlenp) ||
3374 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3375 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3376 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3377 error = -EFAULT;
3378 }
3379 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3380 }
3381 return error;
3382 }
3383 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3384 index 6f0ae1a..e4b6a56 100644
3385 --- a/arch/powerpc/kernel/traps.c
3386 +++ b/arch/powerpc/kernel/traps.c
3387 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3388 static inline void pmac_backlight_unblank(void) { }
3389 #endif
3390
3391 +extern void gr_handle_kernel_exploit(void);
3392 +
3393 int die(const char *str, struct pt_regs *regs, long err)
3394 {
3395 static struct {
3396 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3397 if (panic_on_oops)
3398 panic("Fatal exception");
3399
3400 + gr_handle_kernel_exploit();
3401 +
3402 oops_exit();
3403 do_exit(err);
3404
3405 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3406 index 137dc22..fe57a79 100644
3407 --- a/arch/powerpc/kernel/vdso.c
3408 +++ b/arch/powerpc/kernel/vdso.c
3409 @@ -36,6 +36,7 @@
3410 #include <asm/firmware.h>
3411 #include <asm/vdso.h>
3412 #include <asm/vdso_datapage.h>
3413 +#include <asm/mman.h>
3414
3415 #include "setup.h"
3416
3417 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3418 vdso_base = VDSO32_MBASE;
3419 #endif
3420
3421 - current->mm->context.vdso_base = 0;
3422 + current->mm->context.vdso_base = ~0UL;
3423
3424 /* vDSO has a problem and was disabled, just don't "enable" it for the
3425 * process
3426 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3427 vdso_base = get_unmapped_area(NULL, vdso_base,
3428 (vdso_pages << PAGE_SHIFT) +
3429 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3430 - 0, 0);
3431 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3432 if (IS_ERR_VALUE(vdso_base)) {
3433 rc = vdso_base;
3434 goto fail_mmapsem;
3435 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3436 index 77f6421..829564a 100644
3437 --- a/arch/powerpc/kernel/vio.c
3438 +++ b/arch/powerpc/kernel/vio.c
3439 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3440 vio_cmo_dealloc(viodev, alloc_size);
3441 }
3442
3443 -struct dma_map_ops vio_dma_mapping_ops = {
3444 +static const struct dma_map_ops vio_dma_mapping_ops = {
3445 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3446 .free_coherent = vio_dma_iommu_free_coherent,
3447 .map_sg = vio_dma_iommu_map_sg,
3448 .unmap_sg = vio_dma_iommu_unmap_sg,
3449 + .dma_supported = dma_iommu_dma_supported,
3450 .map_page = vio_dma_iommu_map_page,
3451 .unmap_page = vio_dma_iommu_unmap_page,
3452
3453 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3454
3455 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3456 {
3457 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3458 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3459 }
3460
3461 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3462 index 5eea6f3..5d10396 100644
3463 --- a/arch/powerpc/lib/usercopy_64.c
3464 +++ b/arch/powerpc/lib/usercopy_64.c
3465 @@ -9,22 +9,6 @@
3466 #include <linux/module.h>
3467 #include <asm/uaccess.h>
3468
3469 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3470 -{
3471 - if (likely(access_ok(VERIFY_READ, from, n)))
3472 - n = __copy_from_user(to, from, n);
3473 - else
3474 - memset(to, 0, n);
3475 - return n;
3476 -}
3477 -
3478 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3479 -{
3480 - if (likely(access_ok(VERIFY_WRITE, to, n)))
3481 - n = __copy_to_user(to, from, n);
3482 - return n;
3483 -}
3484 -
3485 unsigned long copy_in_user(void __user *to, const void __user *from,
3486 unsigned long n)
3487 {
3488 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3489 return n;
3490 }
3491
3492 -EXPORT_SYMBOL(copy_from_user);
3493 -EXPORT_SYMBOL(copy_to_user);
3494 EXPORT_SYMBOL(copy_in_user);
3495
3496 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3497 index e7dae82..877ce0d 100644
3498 --- a/arch/powerpc/mm/fault.c
3499 +++ b/arch/powerpc/mm/fault.c
3500 @@ -30,6 +30,10 @@
3501 #include <linux/kprobes.h>
3502 #include <linux/kdebug.h>
3503 #include <linux/perf_event.h>
3504 +#include <linux/slab.h>
3505 +#include <linux/pagemap.h>
3506 +#include <linux/compiler.h>
3507 +#include <linux/unistd.h>
3508
3509 #include <asm/firmware.h>
3510 #include <asm/page.h>
3511 @@ -40,6 +44,7 @@
3512 #include <asm/uaccess.h>
3513 #include <asm/tlbflush.h>
3514 #include <asm/siginfo.h>
3515 +#include <asm/ptrace.h>
3516
3517
3518 #ifdef CONFIG_KPROBES
3519 @@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3520 }
3521 #endif
3522
3523 +#ifdef CONFIG_PAX_PAGEEXEC
3524 +/*
3525 + * PaX: decide what to do with offenders (regs->nip = fault address)
3526 + *
3527 + * returns 1 when task should be killed
3528 + */
3529 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3530 +{
3531 + return 1;
3532 +}
3533 +
3534 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3535 +{
3536 + unsigned long i;
3537 +
3538 + printk(KERN_ERR "PAX: bytes at PC: ");
3539 + for (i = 0; i < 5; i++) {
3540 + unsigned int c;
3541 + if (get_user(c, (unsigned int __user *)pc+i))
3542 + printk(KERN_CONT "???????? ");
3543 + else
3544 + printk(KERN_CONT "%08x ", c);
3545 + }
3546 + printk("\n");
3547 +}
3548 +#endif
3549 +
3550 /*
3551 * Check whether the instruction at regs->nip is a store using
3552 * an update addressing form which will update r1.
3553 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3554 * indicate errors in DSISR but can validly be set in SRR1.
3555 */
3556 if (trap == 0x400)
3557 - error_code &= 0x48200000;
3558 + error_code &= 0x58200000;
3559 else
3560 is_write = error_code & DSISR_ISSTORE;
3561 #else
3562 @@ -250,7 +282,7 @@ good_area:
3563 * "undefined". Of those that can be set, this is the only
3564 * one which seems bad.
3565 */
3566 - if (error_code & 0x10000000)
3567 + if (error_code & DSISR_GUARDED)
3568 /* Guarded storage error. */
3569 goto bad_area;
3570 #endif /* CONFIG_8xx */
3571 @@ -265,7 +297,7 @@ good_area:
3572 * processors use the same I/D cache coherency mechanism
3573 * as embedded.
3574 */
3575 - if (error_code & DSISR_PROTFAULT)
3576 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3577 goto bad_area;
3578 #endif /* CONFIG_PPC_STD_MMU */
3579
3580 @@ -335,6 +367,23 @@ bad_area:
3581 bad_area_nosemaphore:
3582 /* User mode accesses cause a SIGSEGV */
3583 if (user_mode(regs)) {
3584 +
3585 +#ifdef CONFIG_PAX_PAGEEXEC
3586 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3587 +#ifdef CONFIG_PPC_STD_MMU
3588 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3589 +#else
3590 + if (is_exec && regs->nip == address) {
3591 +#endif
3592 + switch (pax_handle_fetch_fault(regs)) {
3593 + }
3594 +
3595 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3596 + do_group_exit(SIGKILL);
3597 + }
3598 + }
3599 +#endif
3600 +
3601 _exception(SIGSEGV, regs, code, address);
3602 return 0;
3603 }
3604 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3605 index 5973631..ad617af 100644
3606 --- a/arch/powerpc/mm/mem.c
3607 +++ b/arch/powerpc/mm/mem.c
3608 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3609 {
3610 unsigned long lmb_next_region_start_pfn,
3611 lmb_region_max_pfn;
3612 - int i;
3613 + unsigned int i;
3614
3615 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3616 lmb_region_max_pfn =
3617 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3618 index 0d957a4..26d968f 100644
3619 --- a/arch/powerpc/mm/mmap_64.c
3620 +++ b/arch/powerpc/mm/mmap_64.c
3621 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3622 */
3623 if (mmap_is_legacy()) {
3624 mm->mmap_base = TASK_UNMAPPED_BASE;
3625 +
3626 +#ifdef CONFIG_PAX_RANDMMAP
3627 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3628 + mm->mmap_base += mm->delta_mmap;
3629 +#endif
3630 +
3631 mm->get_unmapped_area = arch_get_unmapped_area;
3632 mm->unmap_area = arch_unmap_area;
3633 } else {
3634 mm->mmap_base = mmap_base();
3635 +
3636 +#ifdef CONFIG_PAX_RANDMMAP
3637 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3638 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3639 +#endif
3640 +
3641 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3642 mm->unmap_area = arch_unmap_area_topdown;
3643 }
3644 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3645 index ba51948..23009d9 100644
3646 --- a/arch/powerpc/mm/slice.c
3647 +++ b/arch/powerpc/mm/slice.c
3648 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3649 if ((mm->task_size - len) < addr)
3650 return 0;
3651 vma = find_vma(mm, addr);
3652 - return (!vma || (addr + len) <= vma->vm_start);
3653 + return check_heap_stack_gap(vma, addr, len);
3654 }
3655
3656 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3657 @@ -256,7 +256,7 @@ full_search:
3658 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3659 continue;
3660 }
3661 - if (!vma || addr + len <= vma->vm_start) {
3662 + if (check_heap_stack_gap(vma, addr, len)) {
3663 /*
3664 * Remember the place where we stopped the search:
3665 */
3666 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3667 }
3668 }
3669
3670 - addr = mm->mmap_base;
3671 - while (addr > len) {
3672 + if (mm->mmap_base < len)
3673 + addr = -ENOMEM;
3674 + else
3675 + addr = mm->mmap_base - len;
3676 +
3677 + while (!IS_ERR_VALUE(addr)) {
3678 /* Go down by chunk size */
3679 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3680 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3681
3682 /* Check for hit with different page size */
3683 mask = slice_range_to_mask(addr, len);
3684 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3685 * return with success:
3686 */
3687 vma = find_vma(mm, addr);
3688 - if (!vma || (addr + len) <= vma->vm_start) {
3689 + if (check_heap_stack_gap(vma, addr, len)) {
3690 /* remember the address as a hint for next time */
3691 if (use_cache)
3692 mm->free_area_cache = addr;
3693 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3694 mm->cached_hole_size = vma->vm_start - addr;
3695
3696 /* try just below the current vma->vm_start */
3697 - addr = vma->vm_start;
3698 + addr = skip_heap_stack_gap(vma, len);
3699 }
3700
3701 /*
3702 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3703 if (fixed && addr > (mm->task_size - len))
3704 return -EINVAL;
3705
3706 +#ifdef CONFIG_PAX_RANDMMAP
3707 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3708 + addr = 0;
3709 +#endif
3710 +
3711 /* If hint, make sure it matches our alignment restrictions */
3712 if (!fixed && addr) {
3713 addr = _ALIGN_UP(addr, 1ul << pshift);
3714 diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3715 index b5c753d..8f01abe 100644
3716 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3717 +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3718 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3719 lite5200_pm_target_state = PM_SUSPEND_ON;
3720 }
3721
3722 -static struct platform_suspend_ops lite5200_pm_ops = {
3723 +static const struct platform_suspend_ops lite5200_pm_ops = {
3724 .valid = lite5200_pm_valid,
3725 .begin = lite5200_pm_begin,
3726 .prepare = lite5200_pm_prepare,
3727 diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3728 index a55b0b6..478c18e 100644
3729 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3730 +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3731 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3732 iounmap(mbar);
3733 }
3734
3735 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3736 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3737 .valid = mpc52xx_pm_valid,
3738 .prepare = mpc52xx_pm_prepare,
3739 .enter = mpc52xx_pm_enter,
3740 diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3741 index 08e65fc..643d3ac 100644
3742 --- a/arch/powerpc/platforms/83xx/suspend.c
3743 +++ b/arch/powerpc/platforms/83xx/suspend.c
3744 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3745 return ret;
3746 }
3747
3748 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3749 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3750 .valid = mpc83xx_suspend_valid,
3751 .begin = mpc83xx_suspend_begin,
3752 .enter = mpc83xx_suspend_enter,
3753 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3754 index ca5bfdf..1602e09 100644
3755 --- a/arch/powerpc/platforms/cell/iommu.c
3756 +++ b/arch/powerpc/platforms/cell/iommu.c
3757 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3758
3759 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3760
3761 -struct dma_map_ops dma_iommu_fixed_ops = {
3762 +const struct dma_map_ops dma_iommu_fixed_ops = {
3763 .alloc_coherent = dma_fixed_alloc_coherent,
3764 .free_coherent = dma_fixed_free_coherent,
3765 .map_sg = dma_fixed_map_sg,
3766 diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3767 index e34b305..20e48ec 100644
3768 --- a/arch/powerpc/platforms/ps3/system-bus.c
3769 +++ b/arch/powerpc/platforms/ps3/system-bus.c
3770 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3771 return mask >= DMA_BIT_MASK(32);
3772 }
3773
3774 -static struct dma_map_ops ps3_sb_dma_ops = {
3775 +static const struct dma_map_ops ps3_sb_dma_ops = {
3776 .alloc_coherent = ps3_alloc_coherent,
3777 .free_coherent = ps3_free_coherent,
3778 .map_sg = ps3_sb_map_sg,
3779 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3780 .unmap_page = ps3_unmap_page,
3781 };
3782
3783 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3784 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3785 .alloc_coherent = ps3_alloc_coherent,
3786 .free_coherent = ps3_free_coherent,
3787 .map_sg = ps3_ioc0_map_sg,
3788 diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3789 index f0e6f28..60d53ed 100644
3790 --- a/arch/powerpc/platforms/pseries/Kconfig
3791 +++ b/arch/powerpc/platforms/pseries/Kconfig
3792 @@ -2,6 +2,8 @@ config PPC_PSERIES
3793 depends on PPC64 && PPC_BOOK3S
3794 bool "IBM pSeries & new (POWER5-based) iSeries"
3795 select MPIC
3796 + select PCI_MSI
3797 + select XICS
3798 select PPC_I8259
3799 select PPC_RTAS
3800 select RTAS_ERROR_LOGGING
3801 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3802 index 43c0aca..42c045b 100644
3803 --- a/arch/s390/Kconfig
3804 +++ b/arch/s390/Kconfig
3805 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3806
3807 config S390_SWITCH_AMODE
3808 bool "Switch kernel/user addressing modes"
3809 + default y
3810 help
3811 This option allows to switch the addressing modes of kernel and user
3812 - space. The kernel parameter switch_amode=on will enable this feature,
3813 - default is disabled. Enabling this (via kernel parameter) on machines
3814 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3815 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3816 + will reduce system performance.
3817
3818 Note that this option will also be selected by selecting the execute
3819 - protection option below. Enabling the execute protection via the
3820 - noexec kernel parameter will also switch the addressing modes,
3821 - independent of the switch_amode kernel parameter.
3822 + protection option below. Enabling the execute protection will also
3823 + switch the addressing modes, independent of this option.
3824
3825
3826 config S390_EXEC_PROTECT
3827 bool "Data execute protection"
3828 + default y
3829 select S390_SWITCH_AMODE
3830 help
3831 This option allows to enable a buffer overflow protection for user
3832 space programs and it also selects the addressing mode option above.
3833 - The kernel parameter noexec=on will enable this feature and also
3834 - switch the addressing modes, default is disabled. Enabling this (via
3835 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3836 - will reduce system performance.
3837 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3838 + reduce system performance.
3839
3840 comment "Code generation options"
3841
3842 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3843 index e885442..5e6c303 100644
3844 --- a/arch/s390/include/asm/elf.h
3845 +++ b/arch/s390/include/asm/elf.h
3846 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3847 that it will "exec", and that there is sufficient room for the brk. */
3848 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3849
3850 +#ifdef CONFIG_PAX_ASLR
3851 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3852 +
3853 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3854 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3855 +#endif
3856 +
3857 /* This yields a mask that user programs can use to figure out what
3858 instruction set this CPU supports. */
3859
3860 diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
3861 index e37478e..9ce0e9f 100644
3862 --- a/arch/s390/include/asm/setup.h
3863 +++ b/arch/s390/include/asm/setup.h
3864 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3865 void detect_memory_layout(struct mem_chunk chunk[]);
3866
3867 #ifdef CONFIG_S390_SWITCH_AMODE
3868 -extern unsigned int switch_amode;
3869 +#define switch_amode (1)
3870 #else
3871 #define switch_amode (0)
3872 #endif
3873
3874 #ifdef CONFIG_S390_EXEC_PROTECT
3875 -extern unsigned int s390_noexec;
3876 +#define s390_noexec (1)
3877 #else
3878 #define s390_noexec (0)
3879 #endif
3880 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3881 index 8377e91..e28e6f1 100644
3882 --- a/arch/s390/include/asm/uaccess.h
3883 +++ b/arch/s390/include/asm/uaccess.h
3884 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3885 copy_to_user(void __user *to, const void *from, unsigned long n)
3886 {
3887 might_fault();
3888 +
3889 + if ((long)n < 0)
3890 + return n;
3891 +
3892 if (access_ok(VERIFY_WRITE, to, n))
3893 n = __copy_to_user(to, from, n);
3894 return n;
3895 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3896 static inline unsigned long __must_check
3897 __copy_from_user(void *to, const void __user *from, unsigned long n)
3898 {
3899 + if ((long)n < 0)
3900 + return n;
3901 +
3902 if (__builtin_constant_p(n) && (n <= 256))
3903 return uaccess.copy_from_user_small(n, from, to);
3904 else
3905 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3906 copy_from_user(void *to, const void __user *from, unsigned long n)
3907 {
3908 might_fault();
3909 +
3910 + if ((long)n < 0)
3911 + return n;
3912 +
3913 if (access_ok(VERIFY_READ, from, n))
3914 n = __copy_from_user(to, from, n);
3915 else
3916 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3917 index 639380a..72e3c02 100644
3918 --- a/arch/s390/kernel/module.c
3919 +++ b/arch/s390/kernel/module.c
3920 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3921
3922 /* Increase core size by size of got & plt and set start
3923 offsets for got and plt. */
3924 - me->core_size = ALIGN(me->core_size, 4);
3925 - me->arch.got_offset = me->core_size;
3926 - me->core_size += me->arch.got_size;
3927 - me->arch.plt_offset = me->core_size;
3928 - me->core_size += me->arch.plt_size;
3929 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3930 + me->arch.got_offset = me->core_size_rw;
3931 + me->core_size_rw += me->arch.got_size;
3932 + me->arch.plt_offset = me->core_size_rx;
3933 + me->core_size_rx += me->arch.plt_size;
3934 return 0;
3935 }
3936
3937 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3938 if (info->got_initialized == 0) {
3939 Elf_Addr *gotent;
3940
3941 - gotent = me->module_core + me->arch.got_offset +
3942 + gotent = me->module_core_rw + me->arch.got_offset +
3943 info->got_offset;
3944 *gotent = val;
3945 info->got_initialized = 1;
3946 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3947 else if (r_type == R_390_GOTENT ||
3948 r_type == R_390_GOTPLTENT)
3949 *(unsigned int *) loc =
3950 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3951 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3952 else if (r_type == R_390_GOT64 ||
3953 r_type == R_390_GOTPLT64)
3954 *(unsigned long *) loc = val;
3955 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3956 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3957 if (info->plt_initialized == 0) {
3958 unsigned int *ip;
3959 - ip = me->module_core + me->arch.plt_offset +
3960 + ip = me->module_core_rx + me->arch.plt_offset +
3961 info->plt_offset;
3962 #ifndef CONFIG_64BIT
3963 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3964 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3965 val - loc + 0xffffUL < 0x1ffffeUL) ||
3966 (r_type == R_390_PLT32DBL &&
3967 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3968 - val = (Elf_Addr) me->module_core +
3969 + val = (Elf_Addr) me->module_core_rx +
3970 me->arch.plt_offset +
3971 info->plt_offset;
3972 val += rela->r_addend - loc;
3973 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3974 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3975 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3976 val = val + rela->r_addend -
3977 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3978 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3979 if (r_type == R_390_GOTOFF16)
3980 *(unsigned short *) loc = val;
3981 else if (r_type == R_390_GOTOFF32)
3982 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3983 break;
3984 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3985 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3986 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3987 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3988 rela->r_addend - loc;
3989 if (r_type == R_390_GOTPC)
3990 *(unsigned int *) loc = val;
3991 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3992 index 061479f..dbfb08c 100644
3993 --- a/arch/s390/kernel/setup.c
3994 +++ b/arch/s390/kernel/setup.c
3995 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
3996 early_param("mem", early_parse_mem);
3997
3998 #ifdef CONFIG_S390_SWITCH_AMODE
3999 -unsigned int switch_amode = 0;
4000 -EXPORT_SYMBOL_GPL(switch_amode);
4001 -
4002 static int set_amode_and_uaccess(unsigned long user_amode,
4003 unsigned long user32_amode)
4004 {
4005 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4006 return 0;
4007 }
4008 }
4009 -
4010 -/*
4011 - * Switch kernel/user addressing modes?
4012 - */
4013 -static int __init early_parse_switch_amode(char *p)
4014 -{
4015 - switch_amode = 1;
4016 - return 0;
4017 -}
4018 -early_param("switch_amode", early_parse_switch_amode);
4019 -
4020 #else /* CONFIG_S390_SWITCH_AMODE */
4021 static inline int set_amode_and_uaccess(unsigned long user_amode,
4022 unsigned long user32_amode)
4023 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4024 }
4025 #endif /* CONFIG_S390_SWITCH_AMODE */
4026
4027 -#ifdef CONFIG_S390_EXEC_PROTECT
4028 -unsigned int s390_noexec = 0;
4029 -EXPORT_SYMBOL_GPL(s390_noexec);
4030 -
4031 -/*
4032 - * Enable execute protection?
4033 - */
4034 -static int __init early_parse_noexec(char *p)
4035 -{
4036 - if (!strncmp(p, "off", 3))
4037 - return 0;
4038 - switch_amode = 1;
4039 - s390_noexec = 1;
4040 - return 0;
4041 -}
4042 -early_param("noexec", early_parse_noexec);
4043 -#endif /* CONFIG_S390_EXEC_PROTECT */
4044 -
4045 static void setup_addressing_mode(void)
4046 {
4047 if (s390_noexec) {
4048 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4049 index f4558cc..e461f37 100644
4050 --- a/arch/s390/mm/mmap.c
4051 +++ b/arch/s390/mm/mmap.c
4052 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 */
4054 if (mmap_is_legacy()) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE;
4056 +
4057 +#ifdef CONFIG_PAX_RANDMMAP
4058 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4059 + mm->mmap_base += mm->delta_mmap;
4060 +#endif
4061 +
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065 mm->mmap_base = mmap_base();
4066 +
4067 +#ifdef CONFIG_PAX_RANDMMAP
4068 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4069 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4070 +#endif
4071 +
4072 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4073 mm->unmap_area = arch_unmap_area_topdown;
4074 }
4075 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4076 */
4077 if (mmap_is_legacy()) {
4078 mm->mmap_base = TASK_UNMAPPED_BASE;
4079 +
4080 +#ifdef CONFIG_PAX_RANDMMAP
4081 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4082 + mm->mmap_base += mm->delta_mmap;
4083 +#endif
4084 +
4085 mm->get_unmapped_area = s390_get_unmapped_area;
4086 mm->unmap_area = arch_unmap_area;
4087 } else {
4088 mm->mmap_base = mmap_base();
4089 +
4090 +#ifdef CONFIG_PAX_RANDMMAP
4091 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4092 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4093 +#endif
4094 +
4095 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4096 mm->unmap_area = arch_unmap_area_topdown;
4097 }
4098 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4099 index 589d5c7..669e274 100644
4100 --- a/arch/score/include/asm/system.h
4101 +++ b/arch/score/include/asm/system.h
4102 @@ -17,7 +17,7 @@ do { \
4103 #define finish_arch_switch(prev) do {} while (0)
4104
4105 typedef void (*vi_handler_t)(void);
4106 -extern unsigned long arch_align_stack(unsigned long sp);
4107 +#define arch_align_stack(x) (x)
4108
4109 #define mb() barrier()
4110 #define rmb() barrier()
4111 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4112 index 25d0803..d6c8e36 100644
4113 --- a/arch/score/kernel/process.c
4114 +++ b/arch/score/kernel/process.c
4115 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4116
4117 return task_pt_regs(task)->cp0_epc;
4118 }
4119 -
4120 -unsigned long arch_align_stack(unsigned long sp)
4121 -{
4122 - return sp;
4123 -}
4124 diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4125 index d936c1a..304a252 100644
4126 --- a/arch/sh/boards/mach-hp6xx/pm.c
4127 +++ b/arch/sh/boards/mach-hp6xx/pm.c
4128 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4129 return 0;
4130 }
4131
4132 -static struct platform_suspend_ops hp6x0_pm_ops = {
4133 +static const struct platform_suspend_ops hp6x0_pm_ops = {
4134 .enter = hp6x0_pm_enter,
4135 .valid = suspend_valid_only_mem,
4136 };
4137 diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4138 index 8a8a993..7b3079b 100644
4139 --- a/arch/sh/kernel/cpu/sh4/sq.c
4140 +++ b/arch/sh/kernel/cpu/sh4/sq.c
4141 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4142 NULL,
4143 };
4144
4145 -static struct sysfs_ops sq_sysfs_ops = {
4146 +static const struct sysfs_ops sq_sysfs_ops = {
4147 .show = sq_sysfs_show,
4148 .store = sq_sysfs_store,
4149 };
4150 diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4151 index ee3c2aa..c49cee6 100644
4152 --- a/arch/sh/kernel/cpu/shmobile/pm.c
4153 +++ b/arch/sh/kernel/cpu/shmobile/pm.c
4154 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4155 return 0;
4156 }
4157
4158 -static struct platform_suspend_ops sh_pm_ops = {
4159 +static const struct platform_suspend_ops sh_pm_ops = {
4160 .enter = sh_pm_enter,
4161 .valid = suspend_valid_only_mem,
4162 };
4163 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4164 index 3e532d0..9faa306 100644
4165 --- a/arch/sh/kernel/kgdb.c
4166 +++ b/arch/sh/kernel/kgdb.c
4167 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4168 {
4169 }
4170
4171 -struct kgdb_arch arch_kgdb_ops = {
4172 +const struct kgdb_arch arch_kgdb_ops = {
4173 /* Breakpoint instruction: trapa #0x3c */
4174 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4175 .gdb_bpt_instr = { 0x3c, 0xc3 },
4176 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4177 index afeb710..d1d1289 100644
4178 --- a/arch/sh/mm/mmap.c
4179 +++ b/arch/sh/mm/mmap.c
4180 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4181 addr = PAGE_ALIGN(addr);
4182
4183 vma = find_vma(mm, addr);
4184 - if (TASK_SIZE - len >= addr &&
4185 - (!vma || addr + len <= vma->vm_start))
4186 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4187 return addr;
4188 }
4189
4190 @@ -106,7 +105,7 @@ full_search:
4191 }
4192 return -ENOMEM;
4193 }
4194 - if (likely(!vma || addr + len <= vma->vm_start)) {
4195 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4196 /*
4197 * Remember the place where we stopped the search:
4198 */
4199 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4200 addr = PAGE_ALIGN(addr);
4201
4202 vma = find_vma(mm, addr);
4203 - if (TASK_SIZE - len >= addr &&
4204 - (!vma || addr + len <= vma->vm_start))
4205 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4206 return addr;
4207 }
4208
4209 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4210 /* make sure it can fit in the remaining address space */
4211 if (likely(addr > len)) {
4212 vma = find_vma(mm, addr-len);
4213 - if (!vma || addr <= vma->vm_start) {
4214 + if (check_heap_stack_gap(vma, addr - len, len)) {
4215 /* remember the address as a hint for next time */
4216 return (mm->free_area_cache = addr-len);
4217 }
4218 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4219 if (unlikely(mm->mmap_base < len))
4220 goto bottomup;
4221
4222 - addr = mm->mmap_base-len;
4223 - if (do_colour_align)
4224 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4225 + addr = mm->mmap_base - len;
4226
4227 do {
4228 + if (do_colour_align)
4229 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4230 /*
4231 * Lookup failure means no vma is above this address,
4232 * else if new region fits below vma->vm_start,
4233 * return with success:
4234 */
4235 vma = find_vma(mm, addr);
4236 - if (likely(!vma || addr+len <= vma->vm_start)) {
4237 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4238 /* remember the address as a hint for next time */
4239 return (mm->free_area_cache = addr);
4240 }
4241 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4242 mm->cached_hole_size = vma->vm_start - addr;
4243
4244 /* try just below the current vma->vm_start */
4245 - addr = vma->vm_start-len;
4246 - if (do_colour_align)
4247 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4248 - } while (likely(len < vma->vm_start));
4249 + addr = skip_heap_stack_gap(vma, len);
4250 + } while (!IS_ERR_VALUE(addr));
4251
4252 bottomup:
4253 /*
4254 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4255 index 113225b..7fd04e7 100644
4256 --- a/arch/sparc/Makefile
4257 +++ b/arch/sparc/Makefile
4258 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4259 # Export what is needed by arch/sparc/boot/Makefile
4260 export VMLINUX_INIT VMLINUX_MAIN
4261 VMLINUX_INIT := $(head-y) $(init-y)
4262 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4263 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4264 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4265 VMLINUX_MAIN += $(drivers-y) $(net-y)
4266
4267 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4268 index f5cc06f..f858d47 100644
4269 --- a/arch/sparc/include/asm/atomic_64.h
4270 +++ b/arch/sparc/include/asm/atomic_64.h
4271 @@ -14,18 +14,40 @@
4272 #define ATOMIC64_INIT(i) { (i) }
4273
4274 #define atomic_read(v) ((v)->counter)
4275 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4276 +{
4277 + return v->counter;
4278 +}
4279 #define atomic64_read(v) ((v)->counter)
4280 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4281 +{
4282 + return v->counter;
4283 +}
4284
4285 #define atomic_set(v, i) (((v)->counter) = i)
4286 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4287 +{
4288 + v->counter = i;
4289 +}
4290 #define atomic64_set(v, i) (((v)->counter) = i)
4291 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4292 +{
4293 + v->counter = i;
4294 +}
4295
4296 extern void atomic_add(int, atomic_t *);
4297 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4298 extern void atomic64_add(long, atomic64_t *);
4299 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4300 extern void atomic_sub(int, atomic_t *);
4301 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4302 extern void atomic64_sub(long, atomic64_t *);
4303 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4304
4305 extern int atomic_add_ret(int, atomic_t *);
4306 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4307 extern long atomic64_add_ret(long, atomic64_t *);
4308 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4309 extern int atomic_sub_ret(int, atomic_t *);
4310 extern long atomic64_sub_ret(long, atomic64_t *);
4311
4312 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4313 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4314
4315 #define atomic_inc_return(v) atomic_add_ret(1, v)
4316 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4317 +{
4318 + return atomic_add_ret_unchecked(1, v);
4319 +}
4320 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4321 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4322 +{
4323 + return atomic64_add_ret_unchecked(1, v);
4324 +}
4325
4326 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4327 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4328
4329 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4330 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4331 +{
4332 + return atomic_add_ret_unchecked(i, v);
4333 +}
4334 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4335 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4336 +{
4337 + return atomic64_add_ret_unchecked(i, v);
4338 +}
4339
4340 /*
4341 * atomic_inc_and_test - increment and test
4342 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4343 * other cases.
4344 */
4345 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4346 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4347 +{
4348 + return atomic_inc_return_unchecked(v) == 0;
4349 +}
4350 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4351
4352 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4353 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4354 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4355
4356 #define atomic_inc(v) atomic_add(1, v)
4357 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4358 +{
4359 + atomic_add_unchecked(1, v);
4360 +}
4361 #define atomic64_inc(v) atomic64_add(1, v)
4362 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4363 +{
4364 + atomic64_add_unchecked(1, v);
4365 +}
4366
4367 #define atomic_dec(v) atomic_sub(1, v)
4368 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4369 +{
4370 + atomic_sub_unchecked(1, v);
4371 +}
4372 #define atomic64_dec(v) atomic64_sub(1, v)
4373 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4374 +{
4375 + atomic64_sub_unchecked(1, v);
4376 +}
4377
4378 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4379 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4380
4381 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4382 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4383 +{
4384 + return cmpxchg(&v->counter, old, new);
4385 +}
4386 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4387 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4388 +{
4389 + return xchg(&v->counter, new);
4390 +}
4391
4392 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4393 {
4394 - int c, old;
4395 + int c, old, new;
4396 c = atomic_read(v);
4397 for (;;) {
4398 - if (unlikely(c == (u)))
4399 + if (unlikely(c == u))
4400 break;
4401 - old = atomic_cmpxchg((v), c, c + (a));
4402 +
4403 + asm volatile("addcc %2, %0, %0\n"
4404 +
4405 +#ifdef CONFIG_PAX_REFCOUNT
4406 + "tvs %%icc, 6\n"
4407 +#endif
4408 +
4409 + : "=r" (new)
4410 + : "0" (c), "ir" (a)
4411 + : "cc");
4412 +
4413 + old = atomic_cmpxchg(v, c, new);
4414 if (likely(old == c))
4415 break;
4416 c = old;
4417 }
4418 - return c != (u);
4419 + return c != u;
4420 }
4421
4422 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4423 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4424 #define atomic64_cmpxchg(v, o, n) \
4425 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4426 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4427 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4428 +{
4429 + return xchg(&v->counter, new);
4430 +}
4431
4432 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4433 {
4434 - long c, old;
4435 + long c, old, new;
4436 c = atomic64_read(v);
4437 for (;;) {
4438 - if (unlikely(c == (u)))
4439 + if (unlikely(c == u))
4440 break;
4441 - old = atomic64_cmpxchg((v), c, c + (a));
4442 +
4443 + asm volatile("addcc %2, %0, %0\n"
4444 +
4445 +#ifdef CONFIG_PAX_REFCOUNT
4446 + "tvs %%xcc, 6\n"
4447 +#endif
4448 +
4449 + : "=r" (new)
4450 + : "0" (c), "ir" (a)
4451 + : "cc");
4452 +
4453 + old = atomic64_cmpxchg(v, c, new);
4454 if (likely(old == c))
4455 break;
4456 c = old;
4457 }
4458 - return c != (u);
4459 + return c != u;
4460 }
4461
4462 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4463 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4464 index 41f85ae..fb54d5e 100644
4465 --- a/arch/sparc/include/asm/cache.h
4466 +++ b/arch/sparc/include/asm/cache.h
4467 @@ -8,7 +8,7 @@
4468 #define _SPARC_CACHE_H
4469
4470 #define L1_CACHE_SHIFT 5
4471 -#define L1_CACHE_BYTES 32
4472 +#define L1_CACHE_BYTES 32UL
4473 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4474
4475 #ifdef CONFIG_SPARC32
4476 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4477 index 5a8c308..38def92 100644
4478 --- a/arch/sparc/include/asm/dma-mapping.h
4479 +++ b/arch/sparc/include/asm/dma-mapping.h
4480 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4481 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4482 #define dma_is_consistent(d, h) (1)
4483
4484 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4485 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4486 extern struct bus_type pci_bus_type;
4487
4488 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4489 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4490 {
4491 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4492 if (dev->bus == &pci_bus_type)
4493 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4494 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4495 dma_addr_t *dma_handle, gfp_t flag)
4496 {
4497 - struct dma_map_ops *ops = get_dma_ops(dev);
4498 + const struct dma_map_ops *ops = get_dma_ops(dev);
4499 void *cpu_addr;
4500
4501 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4502 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4503 static inline void dma_free_coherent(struct device *dev, size_t size,
4504 void *cpu_addr, dma_addr_t dma_handle)
4505 {
4506 - struct dma_map_ops *ops = get_dma_ops(dev);
4507 + const struct dma_map_ops *ops = get_dma_ops(dev);
4508
4509 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4510 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4511 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4512 index 381a1b5..b97e3ff 100644
4513 --- a/arch/sparc/include/asm/elf_32.h
4514 +++ b/arch/sparc/include/asm/elf_32.h
4515 @@ -116,6 +116,13 @@ typedef struct {
4516
4517 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4518
4519 +#ifdef CONFIG_PAX_ASLR
4520 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4521 +
4522 +#define PAX_DELTA_MMAP_LEN 16
4523 +#define PAX_DELTA_STACK_LEN 16
4524 +#endif
4525 +
4526 /* This yields a mask that user programs can use to figure out what
4527 instruction set this cpu supports. This can NOT be done in userspace
4528 on Sparc. */
4529 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4530 index 9968085..c2106ef 100644
4531 --- a/arch/sparc/include/asm/elf_64.h
4532 +++ b/arch/sparc/include/asm/elf_64.h
4533 @@ -163,6 +163,12 @@ typedef struct {
4534 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4535 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4536
4537 +#ifdef CONFIG_PAX_ASLR
4538 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4539 +
4540 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4541 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4542 +#endif
4543
4544 /* This yields a mask that user programs can use to figure out what
4545 instruction set this cpu supports. */
4546 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4547 index e0cabe7..efd60f1 100644
4548 --- a/arch/sparc/include/asm/pgtable_32.h
4549 +++ b/arch/sparc/include/asm/pgtable_32.h
4550 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4551 BTFIXUPDEF_INT(page_none)
4552 BTFIXUPDEF_INT(page_copy)
4553 BTFIXUPDEF_INT(page_readonly)
4554 +
4555 +#ifdef CONFIG_PAX_PAGEEXEC
4556 +BTFIXUPDEF_INT(page_shared_noexec)
4557 +BTFIXUPDEF_INT(page_copy_noexec)
4558 +BTFIXUPDEF_INT(page_readonly_noexec)
4559 +#endif
4560 +
4561 BTFIXUPDEF_INT(page_kernel)
4562
4563 #define PMD_SHIFT SUN4C_PMD_SHIFT
4564 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4565 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4566 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4567
4568 +#ifdef CONFIG_PAX_PAGEEXEC
4569 +extern pgprot_t PAGE_SHARED_NOEXEC;
4570 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4571 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4572 +#else
4573 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4574 +# define PAGE_COPY_NOEXEC PAGE_COPY
4575 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4576 +#endif
4577 +
4578 extern unsigned long page_kernel;
4579
4580 #ifdef MODULE
4581 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4582 index 1407c07..7e10231 100644
4583 --- a/arch/sparc/include/asm/pgtsrmmu.h
4584 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4585 @@ -115,6 +115,13 @@
4586 SRMMU_EXEC | SRMMU_REF)
4587 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4588 SRMMU_EXEC | SRMMU_REF)
4589 +
4590 +#ifdef CONFIG_PAX_PAGEEXEC
4591 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4592 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4593 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4594 +#endif
4595 +
4596 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4597 SRMMU_DIRTY | SRMMU_REF)
4598
4599 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4600 index 43e5147..47622a1 100644
4601 --- a/arch/sparc/include/asm/spinlock_64.h
4602 +++ b/arch/sparc/include/asm/spinlock_64.h
4603 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4604
4605 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4606
4607 -static void inline arch_read_lock(raw_rwlock_t *lock)
4608 +static inline void arch_read_lock(raw_rwlock_t *lock)
4609 {
4610 unsigned long tmp1, tmp2;
4611
4612 __asm__ __volatile__ (
4613 "1: ldsw [%2], %0\n"
4614 " brlz,pn %0, 2f\n"
4615 -"4: add %0, 1, %1\n"
4616 +"4: addcc %0, 1, %1\n"
4617 +
4618 +#ifdef CONFIG_PAX_REFCOUNT
4619 +" tvs %%icc, 6\n"
4620 +#endif
4621 +
4622 " cas [%2], %0, %1\n"
4623 " cmp %0, %1\n"
4624 " bne,pn %%icc, 1b\n"
4625 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4626 " .previous"
4627 : "=&r" (tmp1), "=&r" (tmp2)
4628 : "r" (lock)
4629 - : "memory");
4630 + : "memory", "cc");
4631 }
4632
4633 -static int inline arch_read_trylock(raw_rwlock_t *lock)
4634 +static inline int arch_read_trylock(raw_rwlock_t *lock)
4635 {
4636 int tmp1, tmp2;
4637
4638 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4639 "1: ldsw [%2], %0\n"
4640 " brlz,a,pn %0, 2f\n"
4641 " mov 0, %0\n"
4642 -" add %0, 1, %1\n"
4643 +" addcc %0, 1, %1\n"
4644 +
4645 +#ifdef CONFIG_PAX_REFCOUNT
4646 +" tvs %%icc, 6\n"
4647 +#endif
4648 +
4649 " cas [%2], %0, %1\n"
4650 " cmp %0, %1\n"
4651 " bne,pn %%icc, 1b\n"
4652 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4653 return tmp1;
4654 }
4655
4656 -static void inline arch_read_unlock(raw_rwlock_t *lock)
4657 +static inline void arch_read_unlock(raw_rwlock_t *lock)
4658 {
4659 unsigned long tmp1, tmp2;
4660
4661 __asm__ __volatile__(
4662 "1: lduw [%2], %0\n"
4663 -" sub %0, 1, %1\n"
4664 +" subcc %0, 1, %1\n"
4665 +
4666 +#ifdef CONFIG_PAX_REFCOUNT
4667 +" tvs %%icc, 6\n"
4668 +#endif
4669 +
4670 " cas [%2], %0, %1\n"
4671 " cmp %0, %1\n"
4672 " bne,pn %%xcc, 1b\n"
4673 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4674 : "memory");
4675 }
4676
4677 -static void inline arch_write_lock(raw_rwlock_t *lock)
4678 +static inline void arch_write_lock(raw_rwlock_t *lock)
4679 {
4680 unsigned long mask, tmp1, tmp2;
4681
4682 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4683 : "memory");
4684 }
4685
4686 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4687 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4688 {
4689 __asm__ __volatile__(
4690 " stw %%g0, [%0]"
4691 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4692 : "memory");
4693 }
4694
4695 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4696 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4697 {
4698 unsigned long mask, tmp1, tmp2, result;
4699
4700 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4701 index 844d73a..f787fb9 100644
4702 --- a/arch/sparc/include/asm/thread_info_32.h
4703 +++ b/arch/sparc/include/asm/thread_info_32.h
4704 @@ -50,6 +50,8 @@ struct thread_info {
4705 unsigned long w_saved;
4706
4707 struct restart_block restart_block;
4708 +
4709 + unsigned long lowest_stack;
4710 };
4711
4712 /*
4713 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4714 index f78ad9a..9f55fc7 100644
4715 --- a/arch/sparc/include/asm/thread_info_64.h
4716 +++ b/arch/sparc/include/asm/thread_info_64.h
4717 @@ -68,6 +68,8 @@ struct thread_info {
4718 struct pt_regs *kern_una_regs;
4719 unsigned int kern_una_insn;
4720
4721 + unsigned long lowest_stack;
4722 +
4723 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4724 };
4725
4726 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4727 index e88fbe5..96b0ce5 100644
4728 --- a/arch/sparc/include/asm/uaccess.h
4729 +++ b/arch/sparc/include/asm/uaccess.h
4730 @@ -1,5 +1,13 @@
4731 #ifndef ___ASM_SPARC_UACCESS_H
4732 #define ___ASM_SPARC_UACCESS_H
4733 +
4734 +#ifdef __KERNEL__
4735 +#ifndef __ASSEMBLY__
4736 +#include <linux/types.h>
4737 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4738 +#endif
4739 +#endif
4740 +
4741 #if defined(__sparc__) && defined(__arch64__)
4742 #include <asm/uaccess_64.h>
4743 #else
4744 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4745 index 8303ac4..07f333d 100644
4746 --- a/arch/sparc/include/asm/uaccess_32.h
4747 +++ b/arch/sparc/include/asm/uaccess_32.h
4748 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4749
4750 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4751 {
4752 - if (n && __access_ok((unsigned long) to, n))
4753 + if ((long)n < 0)
4754 + return n;
4755 +
4756 + if (n && __access_ok((unsigned long) to, n)) {
4757 + if (!__builtin_constant_p(n))
4758 + check_object_size(from, n, true);
4759 return __copy_user(to, (__force void __user *) from, n);
4760 - else
4761 + } else
4762 return n;
4763 }
4764
4765 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4766 {
4767 + if ((long)n < 0)
4768 + return n;
4769 +
4770 + if (!__builtin_constant_p(n))
4771 + check_object_size(from, n, true);
4772 +
4773 return __copy_user(to, (__force void __user *) from, n);
4774 }
4775
4776 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4777 {
4778 - if (n && __access_ok((unsigned long) from, n))
4779 + if ((long)n < 0)
4780 + return n;
4781 +
4782 + if (n && __access_ok((unsigned long) from, n)) {
4783 + if (!__builtin_constant_p(n))
4784 + check_object_size(to, n, false);
4785 return __copy_user((__force void __user *) to, from, n);
4786 - else
4787 + } else
4788 return n;
4789 }
4790
4791 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4792 {
4793 + if ((long)n < 0)
4794 + return n;
4795 +
4796 return __copy_user((__force void __user *) to, from, n);
4797 }
4798
4799 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4800 index 9ea271e..7b8a271 100644
4801 --- a/arch/sparc/include/asm/uaccess_64.h
4802 +++ b/arch/sparc/include/asm/uaccess_64.h
4803 @@ -9,6 +9,7 @@
4804 #include <linux/compiler.h>
4805 #include <linux/string.h>
4806 #include <linux/thread_info.h>
4807 +#include <linux/kernel.h>
4808 #include <asm/asi.h>
4809 #include <asm/system.h>
4810 #include <asm/spitfire.h>
4811 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4812 static inline unsigned long __must_check
4813 copy_from_user(void *to, const void __user *from, unsigned long size)
4814 {
4815 - unsigned long ret = ___copy_from_user(to, from, size);
4816 + unsigned long ret;
4817
4818 + if ((long)size < 0 || size > INT_MAX)
4819 + return size;
4820 +
4821 + if (!__builtin_constant_p(size))
4822 + check_object_size(to, size, false);
4823 +
4824 + ret = ___copy_from_user(to, from, size);
4825 if (unlikely(ret))
4826 ret = copy_from_user_fixup(to, from, size);
4827 return ret;
4828 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4829 static inline unsigned long __must_check
4830 copy_to_user(void __user *to, const void *from, unsigned long size)
4831 {
4832 - unsigned long ret = ___copy_to_user(to, from, size);
4833 + unsigned long ret;
4834
4835 + if ((long)size < 0 || size > INT_MAX)
4836 + return size;
4837 +
4838 + if (!__builtin_constant_p(size))
4839 + check_object_size(from, size, true);
4840 +
4841 + ret = ___copy_to_user(to, from, size);
4842 if (unlikely(ret))
4843 ret = copy_to_user_fixup(to, from, size);
4844 return ret;
4845 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4846 index 2782681..77ded84 100644
4847 --- a/arch/sparc/kernel/Makefile
4848 +++ b/arch/sparc/kernel/Makefile
4849 @@ -3,7 +3,7 @@
4850 #
4851
4852 asflags-y := -ansi
4853 -ccflags-y := -Werror
4854 +#ccflags-y := -Werror
4855
4856 extra-y := head_$(BITS).o
4857 extra-y += init_task.o
4858 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
4859 index 7690cc2..ece64c9 100644
4860 --- a/arch/sparc/kernel/iommu.c
4861 +++ b/arch/sparc/kernel/iommu.c
4862 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
4863 spin_unlock_irqrestore(&iommu->lock, flags);
4864 }
4865
4866 -static struct dma_map_ops sun4u_dma_ops = {
4867 +static const struct dma_map_ops sun4u_dma_ops = {
4868 .alloc_coherent = dma_4u_alloc_coherent,
4869 .free_coherent = dma_4u_free_coherent,
4870 .map_page = dma_4u_map_page,
4871 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
4872 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4873 };
4874
4875 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4876 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4877 EXPORT_SYMBOL(dma_ops);
4878
4879 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4880 diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
4881 index 9f61fd8..bd048db 100644
4882 --- a/arch/sparc/kernel/ioport.c
4883 +++ b/arch/sparc/kernel/ioport.c
4884 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
4885 BUG();
4886 }
4887
4888 -struct dma_map_ops sbus_dma_ops = {
4889 +const struct dma_map_ops sbus_dma_ops = {
4890 .alloc_coherent = sbus_alloc_coherent,
4891 .free_coherent = sbus_free_coherent,
4892 .map_page = sbus_map_page,
4893 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4894 .sync_sg_for_device = sbus_sync_sg_for_device,
4895 };
4896
4897 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4898 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4899 EXPORT_SYMBOL(dma_ops);
4900
4901 static int __init sparc_register_ioport(void)
4902 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
4903 }
4904 }
4905
4906 -struct dma_map_ops pci32_dma_ops = {
4907 +const struct dma_map_ops pci32_dma_ops = {
4908 .alloc_coherent = pci32_alloc_coherent,
4909 .free_coherent = pci32_free_coherent,
4910 .map_page = pci32_map_page,
4911 diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
4912 index 04df4ed..55c4b6e 100644
4913 --- a/arch/sparc/kernel/kgdb_32.c
4914 +++ b/arch/sparc/kernel/kgdb_32.c
4915 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4916 {
4917 }
4918
4919 -struct kgdb_arch arch_kgdb_ops = {
4920 +const struct kgdb_arch arch_kgdb_ops = {
4921 /* Breakpoint instruction: ta 0x7d */
4922 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4923 };
4924 diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
4925 index f5a0fd4..d886f71 100644
4926 --- a/arch/sparc/kernel/kgdb_64.c
4927 +++ b/arch/sparc/kernel/kgdb_64.c
4928 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4929 {
4930 }
4931
4932 -struct kgdb_arch arch_kgdb_ops = {
4933 +const struct kgdb_arch arch_kgdb_ops = {
4934 /* Breakpoint instruction: ta 0x72 */
4935 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4936 };
4937 diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
4938 index 23c33ff..d137fbd 100644
4939 --- a/arch/sparc/kernel/pci_sun4v.c
4940 +++ b/arch/sparc/kernel/pci_sun4v.c
4941 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
4942 spin_unlock_irqrestore(&iommu->lock, flags);
4943 }
4944
4945 -static struct dma_map_ops sun4v_dma_ops = {
4946 +static const struct dma_map_ops sun4v_dma_ops = {
4947 .alloc_coherent = dma_4v_alloc_coherent,
4948 .free_coherent = dma_4v_free_coherent,
4949 .map_page = dma_4v_map_page,
4950 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4951 index c49865b..b41a81b 100644
4952 --- a/arch/sparc/kernel/process_32.c
4953 +++ b/arch/sparc/kernel/process_32.c
4954 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4955 rw->ins[4], rw->ins[5],
4956 rw->ins[6],
4957 rw->ins[7]);
4958 - printk("%pS\n", (void *) rw->ins[7]);
4959 + printk("%pA\n", (void *) rw->ins[7]);
4960 rw = (struct reg_window32 *) rw->ins[6];
4961 }
4962 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4963 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4964
4965 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4966 r->psr, r->pc, r->npc, r->y, print_tainted());
4967 - printk("PC: <%pS>\n", (void *) r->pc);
4968 + printk("PC: <%pA>\n", (void *) r->pc);
4969 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4970 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4971 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4972 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4973 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4974 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4975 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4976 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4977
4978 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4979 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4980 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4981 rw = (struct reg_window32 *) fp;
4982 pc = rw->ins[7];
4983 printk("[%08lx : ", pc);
4984 - printk("%pS ] ", (void *) pc);
4985 + printk("%pA ] ", (void *) pc);
4986 fp = rw->ins[6];
4987 } while (++count < 16);
4988 printk("\n");
4989 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4990 index cb70476..3d0c191 100644
4991 --- a/arch/sparc/kernel/process_64.c
4992 +++ b/arch/sparc/kernel/process_64.c
4993 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4994 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4995 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4996 if (regs->tstate & TSTATE_PRIV)
4997 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4998 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4999 }
5000
5001 void show_regs(struct pt_regs *regs)
5002 {
5003 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5004 regs->tpc, regs->tnpc, regs->y, print_tainted());
5005 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5006 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5007 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5008 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5009 regs->u_regs[3]);
5010 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5011 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5012 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5013 regs->u_regs[15]);
5014 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5015 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5016 show_regwindow(regs);
5017 }
5018
5019 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5020 ((tp && tp->task) ? tp->task->pid : -1));
5021
5022 if (gp->tstate & TSTATE_PRIV) {
5023 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5024 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5025 (void *) gp->tpc,
5026 (void *) gp->o7,
5027 (void *) gp->i7,
5028 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5029 index 6edc4e5..06a69b4 100644
5030 --- a/arch/sparc/kernel/sigutil_64.c
5031 +++ b/arch/sparc/kernel/sigutil_64.c
5032 @@ -2,6 +2,7 @@
5033 #include <linux/types.h>
5034 #include <linux/thread_info.h>
5035 #include <linux/uaccess.h>
5036 +#include <linux/errno.h>
5037
5038 #include <asm/sigcontext.h>
5039 #include <asm/fpumacro.h>
5040 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5041 index 3a82e65..ce0a53a 100644
5042 --- a/arch/sparc/kernel/sys_sparc_32.c
5043 +++ b/arch/sparc/kernel/sys_sparc_32.c
5044 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5045 if (ARCH_SUN4C && len > 0x20000000)
5046 return -ENOMEM;
5047 if (!addr)
5048 - addr = TASK_UNMAPPED_BASE;
5049 + addr = current->mm->mmap_base;
5050
5051 if (flags & MAP_SHARED)
5052 addr = COLOUR_ALIGN(addr);
5053 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5054 }
5055 if (TASK_SIZE - PAGE_SIZE - len < addr)
5056 return -ENOMEM;
5057 - if (!vmm || addr + len <= vmm->vm_start)
5058 + if (check_heap_stack_gap(vmm, addr, len))
5059 return addr;
5060 addr = vmm->vm_end;
5061 if (flags & MAP_SHARED)
5062 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5063 index cfa0e19..98972ac 100644
5064 --- a/arch/sparc/kernel/sys_sparc_64.c
5065 +++ b/arch/sparc/kernel/sys_sparc_64.c
5066 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5067 /* We do not accept a shared mapping if it would violate
5068 * cache aliasing constraints.
5069 */
5070 - if ((flags & MAP_SHARED) &&
5071 + if ((filp || (flags & MAP_SHARED)) &&
5072 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5073 return -EINVAL;
5074 return addr;
5075 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5076 if (filp || (flags & MAP_SHARED))
5077 do_color_align = 1;
5078
5079 +#ifdef CONFIG_PAX_RANDMMAP
5080 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5081 +#endif
5082 +
5083 if (addr) {
5084 if (do_color_align)
5085 addr = COLOUR_ALIGN(addr, pgoff);
5086 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5087 addr = PAGE_ALIGN(addr);
5088
5089 vma = find_vma(mm, addr);
5090 - if (task_size - len >= addr &&
5091 - (!vma || addr + len <= vma->vm_start))
5092 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5093 return addr;
5094 }
5095
5096 if (len > mm->cached_hole_size) {
5097 - start_addr = addr = mm->free_area_cache;
5098 + start_addr = addr = mm->free_area_cache;
5099 } else {
5100 - start_addr = addr = TASK_UNMAPPED_BASE;
5101 + start_addr = addr = mm->mmap_base;
5102 mm->cached_hole_size = 0;
5103 }
5104
5105 @@ -175,14 +178,14 @@ full_search:
5106 vma = find_vma(mm, VA_EXCLUDE_END);
5107 }
5108 if (unlikely(task_size < addr)) {
5109 - if (start_addr != TASK_UNMAPPED_BASE) {
5110 - start_addr = addr = TASK_UNMAPPED_BASE;
5111 + if (start_addr != mm->mmap_base) {
5112 + start_addr = addr = mm->mmap_base;
5113 mm->cached_hole_size = 0;
5114 goto full_search;
5115 }
5116 return -ENOMEM;
5117 }
5118 - if (likely(!vma || addr + len <= vma->vm_start)) {
5119 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5120 /*
5121 * Remember the place where we stopped the search:
5122 */
5123 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5124 /* We do not accept a shared mapping if it would violate
5125 * cache aliasing constraints.
5126 */
5127 - if ((flags & MAP_SHARED) &&
5128 + if ((filp || (flags & MAP_SHARED)) &&
5129 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5130 return -EINVAL;
5131 return addr;
5132 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5133 addr = PAGE_ALIGN(addr);
5134
5135 vma = find_vma(mm, addr);
5136 - if (task_size - len >= addr &&
5137 - (!vma || addr + len <= vma->vm_start))
5138 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5139 return addr;
5140 }
5141
5142 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5143 /* make sure it can fit in the remaining address space */
5144 if (likely(addr > len)) {
5145 vma = find_vma(mm, addr-len);
5146 - if (!vma || addr <= vma->vm_start) {
5147 + if (check_heap_stack_gap(vma, addr - len, len)) {
5148 /* remember the address as a hint for next time */
5149 return (mm->free_area_cache = addr-len);
5150 }
5151 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5152 if (unlikely(mm->mmap_base < len))
5153 goto bottomup;
5154
5155 - addr = mm->mmap_base-len;
5156 - if (do_color_align)
5157 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5158 + addr = mm->mmap_base - len;
5159
5160 do {
5161 + if (do_color_align)
5162 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5163 /*
5164 * Lookup failure means no vma is above this address,
5165 * else if new region fits below vma->vm_start,
5166 * return with success:
5167 */
5168 vma = find_vma(mm, addr);
5169 - if (likely(!vma || addr+len <= vma->vm_start)) {
5170 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5171 /* remember the address as a hint for next time */
5172 return (mm->free_area_cache = addr);
5173 }
5174 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5175 mm->cached_hole_size = vma->vm_start - addr;
5176
5177 /* try just below the current vma->vm_start */
5178 - addr = vma->vm_start-len;
5179 - if (do_color_align)
5180 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5181 - } while (likely(len < vma->vm_start));
5182 + addr = skip_heap_stack_gap(vma, len);
5183 + } while (!IS_ERR_VALUE(addr));
5184
5185 bottomup:
5186 /*
5187 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5188 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5189 sysctl_legacy_va_layout) {
5190 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5191 +
5192 +#ifdef CONFIG_PAX_RANDMMAP
5193 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5194 + mm->mmap_base += mm->delta_mmap;
5195 +#endif
5196 +
5197 mm->get_unmapped_area = arch_get_unmapped_area;
5198 mm->unmap_area = arch_unmap_area;
5199 } else {
5200 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5201 gap = (task_size / 6 * 5);
5202
5203 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5204 +
5205 +#ifdef CONFIG_PAX_RANDMMAP
5206 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5207 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5208 +#endif
5209 +
5210 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5211 mm->unmap_area = arch_unmap_area_topdown;
5212 }
5213 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5214 index c0490c7..84959d1 100644
5215 --- a/arch/sparc/kernel/traps_32.c
5216 +++ b/arch/sparc/kernel/traps_32.c
5217 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5218 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5219 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5220
5221 +extern void gr_handle_kernel_exploit(void);
5222 +
5223 void die_if_kernel(char *str, struct pt_regs *regs)
5224 {
5225 static int die_counter;
5226 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5227 count++ < 30 &&
5228 (((unsigned long) rw) >= PAGE_OFFSET) &&
5229 !(((unsigned long) rw) & 0x7)) {
5230 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5231 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5232 (void *) rw->ins[7]);
5233 rw = (struct reg_window32 *)rw->ins[6];
5234 }
5235 }
5236 printk("Instruction DUMP:");
5237 instruction_dump ((unsigned long *) regs->pc);
5238 - if(regs->psr & PSR_PS)
5239 + if(regs->psr & PSR_PS) {
5240 + gr_handle_kernel_exploit();
5241 do_exit(SIGKILL);
5242 + }
5243 do_exit(SIGSEGV);
5244 }
5245
5246 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5247 index 10f7bb9..cdb6793 100644
5248 --- a/arch/sparc/kernel/traps_64.c
5249 +++ b/arch/sparc/kernel/traps_64.c
5250 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5251 i + 1,
5252 p->trapstack[i].tstate, p->trapstack[i].tpc,
5253 p->trapstack[i].tnpc, p->trapstack[i].tt);
5254 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5255 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5256 }
5257 }
5258
5259 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5260
5261 lvl -= 0x100;
5262 if (regs->tstate & TSTATE_PRIV) {
5263 +
5264 +#ifdef CONFIG_PAX_REFCOUNT
5265 + if (lvl == 6)
5266 + pax_report_refcount_overflow(regs);
5267 +#endif
5268 +
5269 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5270 die_if_kernel(buffer, regs);
5271 }
5272 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5273 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5274 {
5275 char buffer[32];
5276 -
5277 +
5278 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5279 0, lvl, SIGTRAP) == NOTIFY_STOP)
5280 return;
5281
5282 +#ifdef CONFIG_PAX_REFCOUNT
5283 + if (lvl == 6)
5284 + pax_report_refcount_overflow(regs);
5285 +#endif
5286 +
5287 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5288
5289 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5290 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5291 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5292 printk("%s" "ERROR(%d): ",
5293 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5294 - printk("TPC<%pS>\n", (void *) regs->tpc);
5295 + printk("TPC<%pA>\n", (void *) regs->tpc);
5296 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5297 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5298 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5299 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5300 smp_processor_id(),
5301 (type & 0x1) ? 'I' : 'D',
5302 regs->tpc);
5303 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5304 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5305 panic("Irrecoverable Cheetah+ parity error.");
5306 }
5307
5308 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5309 smp_processor_id(),
5310 (type & 0x1) ? 'I' : 'D',
5311 regs->tpc);
5312 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5313 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5314 }
5315
5316 struct sun4v_error_entry {
5317 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5318
5319 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5320 regs->tpc, tl);
5321 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5322 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5323 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5324 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5325 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5326 (void *) regs->u_regs[UREG_I7]);
5327 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5328 "pte[%lx] error[%lx]\n",
5329 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5330
5331 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5332 regs->tpc, tl);
5333 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5334 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5335 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5336 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5337 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5338 (void *) regs->u_regs[UREG_I7]);
5339 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5340 "pte[%lx] error[%lx]\n",
5341 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5342 fp = (unsigned long)sf->fp + STACK_BIAS;
5343 }
5344
5345 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5346 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5347 } while (++count < 16);
5348 }
5349
5350 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5351 return (struct reg_window *) (fp + STACK_BIAS);
5352 }
5353
5354 +extern void gr_handle_kernel_exploit(void);
5355 +
5356 void die_if_kernel(char *str, struct pt_regs *regs)
5357 {
5358 static int die_counter;
5359 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5360 while (rw &&
5361 count++ < 30&&
5362 is_kernel_stack(current, rw)) {
5363 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5364 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5365 (void *) rw->ins[7]);
5366
5367 rw = kernel_stack_up(rw);
5368 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5369 }
5370 user_instruction_dump ((unsigned int __user *) regs->tpc);
5371 }
5372 - if (regs->tstate & TSTATE_PRIV)
5373 + if (regs->tstate & TSTATE_PRIV) {
5374 + gr_handle_kernel_exploit();
5375 do_exit(SIGKILL);
5376 + }
5377 +
5378 do_exit(SIGSEGV);
5379 }
5380 EXPORT_SYMBOL(die_if_kernel);
5381 diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5382 index be183fe..1c8d332 100644
5383 --- a/arch/sparc/kernel/una_asm_64.S
5384 +++ b/arch/sparc/kernel/una_asm_64.S
5385 @@ -127,7 +127,7 @@ do_int_load:
5386 wr %o5, 0x0, %asi
5387 retl
5388 mov 0, %o0
5389 - .size __do_int_load, .-__do_int_load
5390 + .size do_int_load, .-do_int_load
5391
5392 .section __ex_table,"a"
5393 .word 4b, __retl_efault
5394 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5395 index 3792099..2af17d8 100644
5396 --- a/arch/sparc/kernel/unaligned_64.c
5397 +++ b/arch/sparc/kernel/unaligned_64.c
5398 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5399 if (count < 5) {
5400 last_time = jiffies;
5401 count++;
5402 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5403 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5404 regs->tpc, (void *) regs->tpc);
5405 }
5406 }
5407 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5408 index e75faf0..24f12f9 100644
5409 --- a/arch/sparc/lib/Makefile
5410 +++ b/arch/sparc/lib/Makefile
5411 @@ -2,7 +2,7 @@
5412 #
5413
5414 asflags-y := -ansi -DST_DIV0=0x02
5415 -ccflags-y := -Werror
5416 +#ccflags-y := -Werror
5417
5418 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5419 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5420 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5421 index 0268210..f0291ca 100644
5422 --- a/arch/sparc/lib/atomic_64.S
5423 +++ b/arch/sparc/lib/atomic_64.S
5424 @@ -18,7 +18,12 @@
5425 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5426 BACKOFF_SETUP(%o2)
5427 1: lduw [%o1], %g1
5428 - add %g1, %o0, %g7
5429 + addcc %g1, %o0, %g7
5430 +
5431 +#ifdef CONFIG_PAX_REFCOUNT
5432 + tvs %icc, 6
5433 +#endif
5434 +
5435 cas [%o1], %g1, %g7
5436 cmp %g1, %g7
5437 bne,pn %icc, 2f
5438 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5439 2: BACKOFF_SPIN(%o2, %o3, 1b)
5440 .size atomic_add, .-atomic_add
5441
5442 + .globl atomic_add_unchecked
5443 + .type atomic_add_unchecked,#function
5444 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5445 + BACKOFF_SETUP(%o2)
5446 +1: lduw [%o1], %g1
5447 + add %g1, %o0, %g7
5448 + cas [%o1], %g1, %g7
5449 + cmp %g1, %g7
5450 + bne,pn %icc, 2f
5451 + nop
5452 + retl
5453 + nop
5454 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5455 + .size atomic_add_unchecked, .-atomic_add_unchecked
5456 +
5457 .globl atomic_sub
5458 .type atomic_sub,#function
5459 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5460 BACKOFF_SETUP(%o2)
5461 1: lduw [%o1], %g1
5462 - sub %g1, %o0, %g7
5463 + subcc %g1, %o0, %g7
5464 +
5465 +#ifdef CONFIG_PAX_REFCOUNT
5466 + tvs %icc, 6
5467 +#endif
5468 +
5469 cas [%o1], %g1, %g7
5470 cmp %g1, %g7
5471 bne,pn %icc, 2f
5472 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5473 2: BACKOFF_SPIN(%o2, %o3, 1b)
5474 .size atomic_sub, .-atomic_sub
5475
5476 + .globl atomic_sub_unchecked
5477 + .type atomic_sub_unchecked,#function
5478 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5479 + BACKOFF_SETUP(%o2)
5480 +1: lduw [%o1], %g1
5481 + sub %g1, %o0, %g7
5482 + cas [%o1], %g1, %g7
5483 + cmp %g1, %g7
5484 + bne,pn %icc, 2f
5485 + nop
5486 + retl
5487 + nop
5488 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5489 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5490 +
5491 .globl atomic_add_ret
5492 .type atomic_add_ret,#function
5493 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5494 BACKOFF_SETUP(%o2)
5495 1: lduw [%o1], %g1
5496 - add %g1, %o0, %g7
5497 + addcc %g1, %o0, %g7
5498 +
5499 +#ifdef CONFIG_PAX_REFCOUNT
5500 + tvs %icc, 6
5501 +#endif
5502 +
5503 cas [%o1], %g1, %g7
5504 cmp %g1, %g7
5505 bne,pn %icc, 2f
5506 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5507 2: BACKOFF_SPIN(%o2, %o3, 1b)
5508 .size atomic_add_ret, .-atomic_add_ret
5509
5510 + .globl atomic_add_ret_unchecked
5511 + .type atomic_add_ret_unchecked,#function
5512 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5513 + BACKOFF_SETUP(%o2)
5514 +1: lduw [%o1], %g1
5515 + addcc %g1, %o0, %g7
5516 + cas [%o1], %g1, %g7
5517 + cmp %g1, %g7
5518 + bne,pn %icc, 2f
5519 + add %g7, %o0, %g7
5520 + sra %g7, 0, %o0
5521 + retl
5522 + nop
5523 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5524 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5525 +
5526 .globl atomic_sub_ret
5527 .type atomic_sub_ret,#function
5528 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5529 BACKOFF_SETUP(%o2)
5530 1: lduw [%o1], %g1
5531 - sub %g1, %o0, %g7
5532 + subcc %g1, %o0, %g7
5533 +
5534 +#ifdef CONFIG_PAX_REFCOUNT
5535 + tvs %icc, 6
5536 +#endif
5537 +
5538 cas [%o1], %g1, %g7
5539 cmp %g1, %g7
5540 bne,pn %icc, 2f
5541 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5542 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5543 BACKOFF_SETUP(%o2)
5544 1: ldx [%o1], %g1
5545 - add %g1, %o0, %g7
5546 + addcc %g1, %o0, %g7
5547 +
5548 +#ifdef CONFIG_PAX_REFCOUNT
5549 + tvs %xcc, 6
5550 +#endif
5551 +
5552 casx [%o1], %g1, %g7
5553 cmp %g1, %g7
5554 bne,pn %xcc, 2f
5555 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5556 2: BACKOFF_SPIN(%o2, %o3, 1b)
5557 .size atomic64_add, .-atomic64_add
5558
5559 + .globl atomic64_add_unchecked
5560 + .type atomic64_add_unchecked,#function
5561 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5562 + BACKOFF_SETUP(%o2)
5563 +1: ldx [%o1], %g1
5564 + addcc %g1, %o0, %g7
5565 + casx [%o1], %g1, %g7
5566 + cmp %g1, %g7
5567 + bne,pn %xcc, 2f
5568 + nop
5569 + retl
5570 + nop
5571 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5572 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5573 +
5574 .globl atomic64_sub
5575 .type atomic64_sub,#function
5576 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5577 BACKOFF_SETUP(%o2)
5578 1: ldx [%o1], %g1
5579 - sub %g1, %o0, %g7
5580 + subcc %g1, %o0, %g7
5581 +
5582 +#ifdef CONFIG_PAX_REFCOUNT
5583 + tvs %xcc, 6
5584 +#endif
5585 +
5586 casx [%o1], %g1, %g7
5587 cmp %g1, %g7
5588 bne,pn %xcc, 2f
5589 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5590 2: BACKOFF_SPIN(%o2, %o3, 1b)
5591 .size atomic64_sub, .-atomic64_sub
5592
5593 + .globl atomic64_sub_unchecked
5594 + .type atomic64_sub_unchecked,#function
5595 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5596 + BACKOFF_SETUP(%o2)
5597 +1: ldx [%o1], %g1
5598 + subcc %g1, %o0, %g7
5599 + casx [%o1], %g1, %g7
5600 + cmp %g1, %g7
5601 + bne,pn %xcc, 2f
5602 + nop
5603 + retl
5604 + nop
5605 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5606 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5607 +
5608 .globl atomic64_add_ret
5609 .type atomic64_add_ret,#function
5610 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5611 BACKOFF_SETUP(%o2)
5612 1: ldx [%o1], %g1
5613 - add %g1, %o0, %g7
5614 + addcc %g1, %o0, %g7
5615 +
5616 +#ifdef CONFIG_PAX_REFCOUNT
5617 + tvs %xcc, 6
5618 +#endif
5619 +
5620 casx [%o1], %g1, %g7
5621 cmp %g1, %g7
5622 bne,pn %xcc, 2f
5623 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5624 2: BACKOFF_SPIN(%o2, %o3, 1b)
5625 .size atomic64_add_ret, .-atomic64_add_ret
5626
5627 + .globl atomic64_add_ret_unchecked
5628 + .type atomic64_add_ret_unchecked,#function
5629 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5630 + BACKOFF_SETUP(%o2)
5631 +1: ldx [%o1], %g1
5632 + addcc %g1, %o0, %g7
5633 + casx [%o1], %g1, %g7
5634 + cmp %g1, %g7
5635 + bne,pn %xcc, 2f
5636 + add %g7, %o0, %g7
5637 + mov %g7, %o0
5638 + retl
5639 + nop
5640 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5641 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5642 +
5643 .globl atomic64_sub_ret
5644 .type atomic64_sub_ret,#function
5645 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5646 BACKOFF_SETUP(%o2)
5647 1: ldx [%o1], %g1
5648 - sub %g1, %o0, %g7
5649 + subcc %g1, %o0, %g7
5650 +
5651 +#ifdef CONFIG_PAX_REFCOUNT
5652 + tvs %xcc, 6
5653 +#endif
5654 +
5655 casx [%o1], %g1, %g7
5656 cmp %g1, %g7
5657 bne,pn %xcc, 2f
5658 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5659 index 704b126..2e79d76 100644
5660 --- a/arch/sparc/lib/ksyms.c
5661 +++ b/arch/sparc/lib/ksyms.c
5662 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5663
5664 /* Atomic counter implementation. */
5665 EXPORT_SYMBOL(atomic_add);
5666 +EXPORT_SYMBOL(atomic_add_unchecked);
5667 EXPORT_SYMBOL(atomic_add_ret);
5668 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5669 EXPORT_SYMBOL(atomic_sub);
5670 +EXPORT_SYMBOL(atomic_sub_unchecked);
5671 EXPORT_SYMBOL(atomic_sub_ret);
5672 EXPORT_SYMBOL(atomic64_add);
5673 +EXPORT_SYMBOL(atomic64_add_unchecked);
5674 EXPORT_SYMBOL(atomic64_add_ret);
5675 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5676 EXPORT_SYMBOL(atomic64_sub);
5677 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5678 EXPORT_SYMBOL(atomic64_sub_ret);
5679
5680 /* Atomic bit operations. */
5681 diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5682 index 91a7d29..ce75c29 100644
5683 --- a/arch/sparc/lib/rwsem_64.S
5684 +++ b/arch/sparc/lib/rwsem_64.S
5685 @@ -11,7 +11,12 @@
5686 .globl __down_read
5687 __down_read:
5688 1: lduw [%o0], %g1
5689 - add %g1, 1, %g7
5690 + addcc %g1, 1, %g7
5691 +
5692 +#ifdef CONFIG_PAX_REFCOUNT
5693 + tvs %icc, 6
5694 +#endif
5695 +
5696 cas [%o0], %g1, %g7
5697 cmp %g1, %g7
5698 bne,pn %icc, 1b
5699 @@ -33,7 +38,12 @@ __down_read:
5700 .globl __down_read_trylock
5701 __down_read_trylock:
5702 1: lduw [%o0], %g1
5703 - add %g1, 1, %g7
5704 + addcc %g1, 1, %g7
5705 +
5706 +#ifdef CONFIG_PAX_REFCOUNT
5707 + tvs %icc, 6
5708 +#endif
5709 +
5710 cmp %g7, 0
5711 bl,pn %icc, 2f
5712 mov 0, %o1
5713 @@ -51,7 +61,12 @@ __down_write:
5714 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5715 1:
5716 lduw [%o0], %g3
5717 - add %g3, %g1, %g7
5718 + addcc %g3, %g1, %g7
5719 +
5720 +#ifdef CONFIG_PAX_REFCOUNT
5721 + tvs %icc, 6
5722 +#endif
5723 +
5724 cas [%o0], %g3, %g7
5725 cmp %g3, %g7
5726 bne,pn %icc, 1b
5727 @@ -77,7 +92,12 @@ __down_write_trylock:
5728 cmp %g3, 0
5729 bne,pn %icc, 2f
5730 mov 0, %o1
5731 - add %g3, %g1, %g7
5732 + addcc %g3, %g1, %g7
5733 +
5734 +#ifdef CONFIG_PAX_REFCOUNT
5735 + tvs %icc, 6
5736 +#endif
5737 +
5738 cas [%o0], %g3, %g7
5739 cmp %g3, %g7
5740 bne,pn %icc, 1b
5741 @@ -90,7 +110,12 @@ __down_write_trylock:
5742 __up_read:
5743 1:
5744 lduw [%o0], %g1
5745 - sub %g1, 1, %g7
5746 + subcc %g1, 1, %g7
5747 +
5748 +#ifdef CONFIG_PAX_REFCOUNT
5749 + tvs %icc, 6
5750 +#endif
5751 +
5752 cas [%o0], %g1, %g7
5753 cmp %g1, %g7
5754 bne,pn %icc, 1b
5755 @@ -118,7 +143,12 @@ __up_write:
5756 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5757 1:
5758 lduw [%o0], %g3
5759 - sub %g3, %g1, %g7
5760 + subcc %g3, %g1, %g7
5761 +
5762 +#ifdef CONFIG_PAX_REFCOUNT
5763 + tvs %icc, 6
5764 +#endif
5765 +
5766 cas [%o0], %g3, %g7
5767 cmp %g3, %g7
5768 bne,pn %icc, 1b
5769 @@ -143,7 +173,12 @@ __downgrade_write:
5770 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5771 1:
5772 lduw [%o0], %g3
5773 - sub %g3, %g1, %g7
5774 + subcc %g3, %g1, %g7
5775 +
5776 +#ifdef CONFIG_PAX_REFCOUNT
5777 + tvs %icc, 6
5778 +#endif
5779 +
5780 cas [%o0], %g3, %g7
5781 cmp %g3, %g7
5782 bne,pn %icc, 1b
5783 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5784 index 79836a7..62f47a2 100644
5785 --- a/arch/sparc/mm/Makefile
5786 +++ b/arch/sparc/mm/Makefile
5787 @@ -2,7 +2,7 @@
5788 #
5789
5790 asflags-y := -ansi
5791 -ccflags-y := -Werror
5792 +#ccflags-y := -Werror
5793
5794 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5795 obj-y += fault_$(BITS).o
5796 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5797 index b99f81c..3453e93 100644
5798 --- a/arch/sparc/mm/fault_32.c
5799 +++ b/arch/sparc/mm/fault_32.c
5800 @@ -21,6 +21,9 @@
5801 #include <linux/interrupt.h>
5802 #include <linux/module.h>
5803 #include <linux/kdebug.h>
5804 +#include <linux/slab.h>
5805 +#include <linux/pagemap.h>
5806 +#include <linux/compiler.h>
5807
5808 #include <asm/system.h>
5809 #include <asm/page.h>
5810 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5811 return safe_compute_effective_address(regs, insn);
5812 }
5813
5814 +#ifdef CONFIG_PAX_PAGEEXEC
5815 +#ifdef CONFIG_PAX_DLRESOLVE
5816 +static void pax_emuplt_close(struct vm_area_struct *vma)
5817 +{
5818 + vma->vm_mm->call_dl_resolve = 0UL;
5819 +}
5820 +
5821 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5822 +{
5823 + unsigned int *kaddr;
5824 +
5825 + vmf->page = alloc_page(GFP_HIGHUSER);
5826 + if (!vmf->page)
5827 + return VM_FAULT_OOM;
5828 +
5829 + kaddr = kmap(vmf->page);
5830 + memset(kaddr, 0, PAGE_SIZE);
5831 + kaddr[0] = 0x9DE3BFA8U; /* save */
5832 + flush_dcache_page(vmf->page);
5833 + kunmap(vmf->page);
5834 + return VM_FAULT_MAJOR;
5835 +}
5836 +
5837 +static const struct vm_operations_struct pax_vm_ops = {
5838 + .close = pax_emuplt_close,
5839 + .fault = pax_emuplt_fault
5840 +};
5841 +
5842 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5843 +{
5844 + int ret;
5845 +
5846 + vma->vm_mm = current->mm;
5847 + vma->vm_start = addr;
5848 + vma->vm_end = addr + PAGE_SIZE;
5849 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5850 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5851 + vma->vm_ops = &pax_vm_ops;
5852 +
5853 + ret = insert_vm_struct(current->mm, vma);
5854 + if (ret)
5855 + return ret;
5856 +
5857 + ++current->mm->total_vm;
5858 + return 0;
5859 +}
5860 +#endif
5861 +
5862 +/*
5863 + * PaX: decide what to do with offenders (regs->pc = fault address)
5864 + *
5865 + * returns 1 when task should be killed
5866 + * 2 when patched PLT trampoline was detected
5867 + * 3 when unpatched PLT trampoline was detected
5868 + */
5869 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5870 +{
5871 +
5872 +#ifdef CONFIG_PAX_EMUPLT
5873 + int err;
5874 +
5875 + do { /* PaX: patched PLT emulation #1 */
5876 + unsigned int sethi1, sethi2, jmpl;
5877 +
5878 + err = get_user(sethi1, (unsigned int *)regs->pc);
5879 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5880 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5881 +
5882 + if (err)
5883 + break;
5884 +
5885 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5886 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5887 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5888 + {
5889 + unsigned int addr;
5890 +
5891 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5892 + addr = regs->u_regs[UREG_G1];
5893 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5894 + regs->pc = addr;
5895 + regs->npc = addr+4;
5896 + return 2;
5897 + }
5898 + } while (0);
5899 +
5900 + { /* PaX: patched PLT emulation #2 */
5901 + unsigned int ba;
5902 +
5903 + err = get_user(ba, (unsigned int *)regs->pc);
5904 +
5905 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5906 + unsigned int addr;
5907 +
5908 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5909 + regs->pc = addr;
5910 + regs->npc = addr+4;
5911 + return 2;
5912 + }
5913 + }
5914 +
5915 + do { /* PaX: patched PLT emulation #3 */
5916 + unsigned int sethi, jmpl, nop;
5917 +
5918 + err = get_user(sethi, (unsigned int *)regs->pc);
5919 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5920 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5921 +
5922 + if (err)
5923 + break;
5924 +
5925 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5926 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5927 + nop == 0x01000000U)
5928 + {
5929 + unsigned int addr;
5930 +
5931 + addr = (sethi & 0x003FFFFFU) << 10;
5932 + regs->u_regs[UREG_G1] = addr;
5933 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5934 + regs->pc = addr;
5935 + regs->npc = addr+4;
5936 + return 2;
5937 + }
5938 + } while (0);
5939 +
5940 + do { /* PaX: unpatched PLT emulation step 1 */
5941 + unsigned int sethi, ba, nop;
5942 +
5943 + err = get_user(sethi, (unsigned int *)regs->pc);
5944 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5945 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5946 +
5947 + if (err)
5948 + break;
5949 +
5950 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5951 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5952 + nop == 0x01000000U)
5953 + {
5954 + unsigned int addr, save, call;
5955 +
5956 + if ((ba & 0xFFC00000U) == 0x30800000U)
5957 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5958 + else
5959 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5960 +
5961 + err = get_user(save, (unsigned int *)addr);
5962 + err |= get_user(call, (unsigned int *)(addr+4));
5963 + err |= get_user(nop, (unsigned int *)(addr+8));
5964 + if (err)
5965 + break;
5966 +
5967 +#ifdef CONFIG_PAX_DLRESOLVE
5968 + if (save == 0x9DE3BFA8U &&
5969 + (call & 0xC0000000U) == 0x40000000U &&
5970 + nop == 0x01000000U)
5971 + {
5972 + struct vm_area_struct *vma;
5973 + unsigned long call_dl_resolve;
5974 +
5975 + down_read(&current->mm->mmap_sem);
5976 + call_dl_resolve = current->mm->call_dl_resolve;
5977 + up_read(&current->mm->mmap_sem);
5978 + if (likely(call_dl_resolve))
5979 + goto emulate;
5980 +
5981 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5982 +
5983 + down_write(&current->mm->mmap_sem);
5984 + if (current->mm->call_dl_resolve) {
5985 + call_dl_resolve = current->mm->call_dl_resolve;
5986 + up_write(&current->mm->mmap_sem);
5987 + if (vma)
5988 + kmem_cache_free(vm_area_cachep, vma);
5989 + goto emulate;
5990 + }
5991 +
5992 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5993 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5994 + up_write(&current->mm->mmap_sem);
5995 + if (vma)
5996 + kmem_cache_free(vm_area_cachep, vma);
5997 + return 1;
5998 + }
5999 +
6000 + if (pax_insert_vma(vma, call_dl_resolve)) {
6001 + up_write(&current->mm->mmap_sem);
6002 + kmem_cache_free(vm_area_cachep, vma);
6003 + return 1;
6004 + }
6005 +
6006 + current->mm->call_dl_resolve = call_dl_resolve;
6007 + up_write(&current->mm->mmap_sem);
6008 +
6009 +emulate:
6010 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6011 + regs->pc = call_dl_resolve;
6012 + regs->npc = addr+4;
6013 + return 3;
6014 + }
6015 +#endif
6016 +
6017 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6018 + if ((save & 0xFFC00000U) == 0x05000000U &&
6019 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6020 + nop == 0x01000000U)
6021 + {
6022 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6023 + regs->u_regs[UREG_G2] = addr + 4;
6024 + addr = (save & 0x003FFFFFU) << 10;
6025 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6026 + regs->pc = addr;
6027 + regs->npc = addr+4;
6028 + return 3;
6029 + }
6030 + }
6031 + } while (0);
6032 +
6033 + do { /* PaX: unpatched PLT emulation step 2 */
6034 + unsigned int save, call, nop;
6035 +
6036 + err = get_user(save, (unsigned int *)(regs->pc-4));
6037 + err |= get_user(call, (unsigned int *)regs->pc);
6038 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6039 + if (err)
6040 + break;
6041 +
6042 + if (save == 0x9DE3BFA8U &&
6043 + (call & 0xC0000000U) == 0x40000000U &&
6044 + nop == 0x01000000U)
6045 + {
6046 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6047 +
6048 + regs->u_regs[UREG_RETPC] = regs->pc;
6049 + regs->pc = dl_resolve;
6050 + regs->npc = dl_resolve+4;
6051 + return 3;
6052 + }
6053 + } while (0);
6054 +#endif
6055 +
6056 + return 1;
6057 +}
6058 +
6059 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6060 +{
6061 + unsigned long i;
6062 +
6063 + printk(KERN_ERR "PAX: bytes at PC: ");
6064 + for (i = 0; i < 8; i++) {
6065 + unsigned int c;
6066 + if (get_user(c, (unsigned int *)pc+i))
6067 + printk(KERN_CONT "???????? ");
6068 + else
6069 + printk(KERN_CONT "%08x ", c);
6070 + }
6071 + printk("\n");
6072 +}
6073 +#endif
6074 +
6075 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6076 unsigned long address)
6077 {
6078 @@ -231,6 +495,24 @@ good_area:
6079 if(!(vma->vm_flags & VM_WRITE))
6080 goto bad_area;
6081 } else {
6082 +
6083 +#ifdef CONFIG_PAX_PAGEEXEC
6084 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6085 + up_read(&mm->mmap_sem);
6086 + switch (pax_handle_fetch_fault(regs)) {
6087 +
6088 +#ifdef CONFIG_PAX_EMUPLT
6089 + case 2:
6090 + case 3:
6091 + return;
6092 +#endif
6093 +
6094 + }
6095 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6096 + do_group_exit(SIGKILL);
6097 + }
6098 +#endif
6099 +
6100 /* Allow reads even for write-only mappings */
6101 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6102 goto bad_area;
6103 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6104 index 43b0da9..a0b78f9 100644
6105 --- a/arch/sparc/mm/fault_64.c
6106 +++ b/arch/sparc/mm/fault_64.c
6107 @@ -20,6 +20,9 @@
6108 #include <linux/kprobes.h>
6109 #include <linux/kdebug.h>
6110 #include <linux/percpu.h>
6111 +#include <linux/slab.h>
6112 +#include <linux/pagemap.h>
6113 +#include <linux/compiler.h>
6114
6115 #include <asm/page.h>
6116 #include <asm/pgtable.h>
6117 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6118 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6119 regs->tpc);
6120 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6121 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6122 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6123 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6124 dump_stack();
6125 unhandled_fault(regs->tpc, current, regs);
6126 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6127 show_regs(regs);
6128 }
6129
6130 +#ifdef CONFIG_PAX_PAGEEXEC
6131 +#ifdef CONFIG_PAX_DLRESOLVE
6132 +static void pax_emuplt_close(struct vm_area_struct *vma)
6133 +{
6134 + vma->vm_mm->call_dl_resolve = 0UL;
6135 +}
6136 +
6137 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6138 +{
6139 + unsigned int *kaddr;
6140 +
6141 + vmf->page = alloc_page(GFP_HIGHUSER);
6142 + if (!vmf->page)
6143 + return VM_FAULT_OOM;
6144 +
6145 + kaddr = kmap(vmf->page);
6146 + memset(kaddr, 0, PAGE_SIZE);
6147 + kaddr[0] = 0x9DE3BFA8U; /* save */
6148 + flush_dcache_page(vmf->page);
6149 + kunmap(vmf->page);
6150 + return VM_FAULT_MAJOR;
6151 +}
6152 +
6153 +static const struct vm_operations_struct pax_vm_ops = {
6154 + .close = pax_emuplt_close,
6155 + .fault = pax_emuplt_fault
6156 +};
6157 +
6158 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6159 +{
6160 + int ret;
6161 +
6162 + vma->vm_mm = current->mm;
6163 + vma->vm_start = addr;
6164 + vma->vm_end = addr + PAGE_SIZE;
6165 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6166 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6167 + vma->vm_ops = &pax_vm_ops;
6168 +
6169 + ret = insert_vm_struct(current->mm, vma);
6170 + if (ret)
6171 + return ret;
6172 +
6173 + ++current->mm->total_vm;
6174 + return 0;
6175 +}
6176 +#endif
6177 +
6178 +/*
6179 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6180 + *
6181 + * returns 1 when task should be killed
6182 + * 2 when patched PLT trampoline was detected
6183 + * 3 when unpatched PLT trampoline was detected
6184 + */
6185 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6186 +{
6187 +
6188 +#ifdef CONFIG_PAX_EMUPLT
6189 + int err;
6190 +
6191 + do { /* PaX: patched PLT emulation #1 */
6192 + unsigned int sethi1, sethi2, jmpl;
6193 +
6194 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6195 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6196 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6197 +
6198 + if (err)
6199 + break;
6200 +
6201 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6202 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6203 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6204 + {
6205 + unsigned long addr;
6206 +
6207 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6208 + addr = regs->u_regs[UREG_G1];
6209 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6210 +
6211 + if (test_thread_flag(TIF_32BIT))
6212 + addr &= 0xFFFFFFFFUL;
6213 +
6214 + regs->tpc = addr;
6215 + regs->tnpc = addr+4;
6216 + return 2;
6217 + }
6218 + } while (0);
6219 +
6220 + { /* PaX: patched PLT emulation #2 */
6221 + unsigned int ba;
6222 +
6223 + err = get_user(ba, (unsigned int *)regs->tpc);
6224 +
6225 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6226 + unsigned long addr;
6227 +
6228 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6229 +
6230 + if (test_thread_flag(TIF_32BIT))
6231 + addr &= 0xFFFFFFFFUL;
6232 +
6233 + regs->tpc = addr;
6234 + regs->tnpc = addr+4;
6235 + return 2;
6236 + }
6237 + }
6238 +
6239 + do { /* PaX: patched PLT emulation #3 */
6240 + unsigned int sethi, jmpl, nop;
6241 +
6242 + err = get_user(sethi, (unsigned int *)regs->tpc);
6243 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6244 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6245 +
6246 + if (err)
6247 + break;
6248 +
6249 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6250 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6251 + nop == 0x01000000U)
6252 + {
6253 + unsigned long addr;
6254 +
6255 + addr = (sethi & 0x003FFFFFU) << 10;
6256 + regs->u_regs[UREG_G1] = addr;
6257 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6258 +
6259 + if (test_thread_flag(TIF_32BIT))
6260 + addr &= 0xFFFFFFFFUL;
6261 +
6262 + regs->tpc = addr;
6263 + regs->tnpc = addr+4;
6264 + return 2;
6265 + }
6266 + } while (0);
6267 +
6268 + do { /* PaX: patched PLT emulation #4 */
6269 + unsigned int sethi, mov1, call, mov2;
6270 +
6271 + err = get_user(sethi, (unsigned int *)regs->tpc);
6272 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6273 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6274 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6275 +
6276 + if (err)
6277 + break;
6278 +
6279 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6280 + mov1 == 0x8210000FU &&
6281 + (call & 0xC0000000U) == 0x40000000U &&
6282 + mov2 == 0x9E100001U)
6283 + {
6284 + unsigned long addr;
6285 +
6286 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6287 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6288 +
6289 + if (test_thread_flag(TIF_32BIT))
6290 + addr &= 0xFFFFFFFFUL;
6291 +
6292 + regs->tpc = addr;
6293 + regs->tnpc = addr+4;
6294 + return 2;
6295 + }
6296 + } while (0);
6297 +
6298 + do { /* PaX: patched PLT emulation #5 */
6299 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6300 +
6301 + err = get_user(sethi, (unsigned int *)regs->tpc);
6302 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6303 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6304 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6305 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6306 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6307 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6308 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6309 +
6310 + if (err)
6311 + break;
6312 +
6313 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6314 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6315 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6316 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6317 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6318 + sllx == 0x83287020U &&
6319 + jmpl == 0x81C04005U &&
6320 + nop == 0x01000000U)
6321 + {
6322 + unsigned long addr;
6323 +
6324 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6325 + regs->u_regs[UREG_G1] <<= 32;
6326 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6327 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6328 + regs->tpc = addr;
6329 + regs->tnpc = addr+4;
6330 + return 2;
6331 + }
6332 + } while (0);
6333 +
6334 + do { /* PaX: patched PLT emulation #6 */
6335 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6336 +
6337 + err = get_user(sethi, (unsigned int *)regs->tpc);
6338 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6339 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6340 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6341 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6342 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6343 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6344 +
6345 + if (err)
6346 + break;
6347 +
6348 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6349 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6350 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6351 + sllx == 0x83287020U &&
6352 + (or & 0xFFFFE000U) == 0x8A116000U &&
6353 + jmpl == 0x81C04005U &&
6354 + nop == 0x01000000U)
6355 + {
6356 + unsigned long addr;
6357 +
6358 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6359 + regs->u_regs[UREG_G1] <<= 32;
6360 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6361 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6362 + regs->tpc = addr;
6363 + regs->tnpc = addr+4;
6364 + return 2;
6365 + }
6366 + } while (0);
6367 +
6368 + do { /* PaX: unpatched PLT emulation step 1 */
6369 + unsigned int sethi, ba, nop;
6370 +
6371 + err = get_user(sethi, (unsigned int *)regs->tpc);
6372 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6373 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6374 +
6375 + if (err)
6376 + break;
6377 +
6378 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6379 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6380 + nop == 0x01000000U)
6381 + {
6382 + unsigned long addr;
6383 + unsigned int save, call;
6384 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6385 +
6386 + if ((ba & 0xFFC00000U) == 0x30800000U)
6387 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6388 + else
6389 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6390 +
6391 + if (test_thread_flag(TIF_32BIT))
6392 + addr &= 0xFFFFFFFFUL;
6393 +
6394 + err = get_user(save, (unsigned int *)addr);
6395 + err |= get_user(call, (unsigned int *)(addr+4));
6396 + err |= get_user(nop, (unsigned int *)(addr+8));
6397 + if (err)
6398 + break;
6399 +
6400 +#ifdef CONFIG_PAX_DLRESOLVE
6401 + if (save == 0x9DE3BFA8U &&
6402 + (call & 0xC0000000U) == 0x40000000U &&
6403 + nop == 0x01000000U)
6404 + {
6405 + struct vm_area_struct *vma;
6406 + unsigned long call_dl_resolve;
6407 +
6408 + down_read(&current->mm->mmap_sem);
6409 + call_dl_resolve = current->mm->call_dl_resolve;
6410 + up_read(&current->mm->mmap_sem);
6411 + if (likely(call_dl_resolve))
6412 + goto emulate;
6413 +
6414 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6415 +
6416 + down_write(&current->mm->mmap_sem);
6417 + if (current->mm->call_dl_resolve) {
6418 + call_dl_resolve = current->mm->call_dl_resolve;
6419 + up_write(&current->mm->mmap_sem);
6420 + if (vma)
6421 + kmem_cache_free(vm_area_cachep, vma);
6422 + goto emulate;
6423 + }
6424 +
6425 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6426 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6427 + up_write(&current->mm->mmap_sem);
6428 + if (vma)
6429 + kmem_cache_free(vm_area_cachep, vma);
6430 + return 1;
6431 + }
6432 +
6433 + if (pax_insert_vma(vma, call_dl_resolve)) {
6434 + up_write(&current->mm->mmap_sem);
6435 + kmem_cache_free(vm_area_cachep, vma);
6436 + return 1;
6437 + }
6438 +
6439 + current->mm->call_dl_resolve = call_dl_resolve;
6440 + up_write(&current->mm->mmap_sem);
6441 +
6442 +emulate:
6443 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6444 + regs->tpc = call_dl_resolve;
6445 + regs->tnpc = addr+4;
6446 + return 3;
6447 + }
6448 +#endif
6449 +
6450 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6451 + if ((save & 0xFFC00000U) == 0x05000000U &&
6452 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6453 + nop == 0x01000000U)
6454 + {
6455 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6456 + regs->u_regs[UREG_G2] = addr + 4;
6457 + addr = (save & 0x003FFFFFU) << 10;
6458 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6459 +
6460 + if (test_thread_flag(TIF_32BIT))
6461 + addr &= 0xFFFFFFFFUL;
6462 +
6463 + regs->tpc = addr;
6464 + regs->tnpc = addr+4;
6465 + return 3;
6466 + }
6467 +
6468 + /* PaX: 64-bit PLT stub */
6469 + err = get_user(sethi1, (unsigned int *)addr);
6470 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6471 + err |= get_user(or1, (unsigned int *)(addr+8));
6472 + err |= get_user(or2, (unsigned int *)(addr+12));
6473 + err |= get_user(sllx, (unsigned int *)(addr+16));
6474 + err |= get_user(add, (unsigned int *)(addr+20));
6475 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6476 + err |= get_user(nop, (unsigned int *)(addr+28));
6477 + if (err)
6478 + break;
6479 +
6480 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6481 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6482 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6483 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6484 + sllx == 0x89293020U &&
6485 + add == 0x8A010005U &&
6486 + jmpl == 0x89C14000U &&
6487 + nop == 0x01000000U)
6488 + {
6489 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6490 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6491 + regs->u_regs[UREG_G4] <<= 32;
6492 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6493 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6494 + regs->u_regs[UREG_G4] = addr + 24;
6495 + addr = regs->u_regs[UREG_G5];
6496 + regs->tpc = addr;
6497 + regs->tnpc = addr+4;
6498 + return 3;
6499 + }
6500 + }
6501 + } while (0);
6502 +
6503 +#ifdef CONFIG_PAX_DLRESOLVE
6504 + do { /* PaX: unpatched PLT emulation step 2 */
6505 + unsigned int save, call, nop;
6506 +
6507 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6508 + err |= get_user(call, (unsigned int *)regs->tpc);
6509 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6510 + if (err)
6511 + break;
6512 +
6513 + if (save == 0x9DE3BFA8U &&
6514 + (call & 0xC0000000U) == 0x40000000U &&
6515 + nop == 0x01000000U)
6516 + {
6517 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6518 +
6519 + if (test_thread_flag(TIF_32BIT))
6520 + dl_resolve &= 0xFFFFFFFFUL;
6521 +
6522 + regs->u_regs[UREG_RETPC] = regs->tpc;
6523 + regs->tpc = dl_resolve;
6524 + regs->tnpc = dl_resolve+4;
6525 + return 3;
6526 + }
6527 + } while (0);
6528 +#endif
6529 +
6530 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6531 + unsigned int sethi, ba, nop;
6532 +
6533 + err = get_user(sethi, (unsigned int *)regs->tpc);
6534 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6535 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6536 +
6537 + if (err)
6538 + break;
6539 +
6540 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6541 + (ba & 0xFFF00000U) == 0x30600000U &&
6542 + nop == 0x01000000U)
6543 + {
6544 + unsigned long addr;
6545 +
6546 + addr = (sethi & 0x003FFFFFU) << 10;
6547 + regs->u_regs[UREG_G1] = addr;
6548 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6549 +
6550 + if (test_thread_flag(TIF_32BIT))
6551 + addr &= 0xFFFFFFFFUL;
6552 +
6553 + regs->tpc = addr;
6554 + regs->tnpc = addr+4;
6555 + return 2;
6556 + }
6557 + } while (0);
6558 +
6559 +#endif
6560 +
6561 + return 1;
6562 +}
6563 +
6564 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6565 +{
6566 + unsigned long i;
6567 +
6568 + printk(KERN_ERR "PAX: bytes at PC: ");
6569 + for (i = 0; i < 8; i++) {
6570 + unsigned int c;
6571 + if (get_user(c, (unsigned int *)pc+i))
6572 + printk(KERN_CONT "???????? ");
6573 + else
6574 + printk(KERN_CONT "%08x ", c);
6575 + }
6576 + printk("\n");
6577 +}
6578 +#endif
6579 +
6580 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6581 {
6582 struct mm_struct *mm = current->mm;
6583 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6584 if (!vma)
6585 goto bad_area;
6586
6587 +#ifdef CONFIG_PAX_PAGEEXEC
6588 + /* PaX: detect ITLB misses on non-exec pages */
6589 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6590 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6591 + {
6592 + if (address != regs->tpc)
6593 + goto good_area;
6594 +
6595 + up_read(&mm->mmap_sem);
6596 + switch (pax_handle_fetch_fault(regs)) {
6597 +
6598 +#ifdef CONFIG_PAX_EMUPLT
6599 + case 2:
6600 + case 3:
6601 + return;
6602 +#endif
6603 +
6604 + }
6605 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6606 + do_group_exit(SIGKILL);
6607 + }
6608 +#endif
6609 +
6610 /* Pure DTLB misses do not tell us whether the fault causing
6611 * load/store/atomic was a write or not, it only says that there
6612 * was no match. So in such a case we (carefully) read the
6613 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6614 index f27d103..1b06377 100644
6615 --- a/arch/sparc/mm/hugetlbpage.c
6616 +++ b/arch/sparc/mm/hugetlbpage.c
6617 @@ -69,7 +69,7 @@ full_search:
6618 }
6619 return -ENOMEM;
6620 }
6621 - if (likely(!vma || addr + len <= vma->vm_start)) {
6622 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6623 /*
6624 * Remember the place where we stopped the search:
6625 */
6626 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6627 /* make sure it can fit in the remaining address space */
6628 if (likely(addr > len)) {
6629 vma = find_vma(mm, addr-len);
6630 - if (!vma || addr <= vma->vm_start) {
6631 + if (check_heap_stack_gap(vma, addr - len, len)) {
6632 /* remember the address as a hint for next time */
6633 return (mm->free_area_cache = addr-len);
6634 }
6635 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6636 if (unlikely(mm->mmap_base < len))
6637 goto bottomup;
6638
6639 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6640 + addr = mm->mmap_base - len;
6641
6642 do {
6643 + addr &= HPAGE_MASK;
6644 /*
6645 * Lookup failure means no vma is above this address,
6646 * else if new region fits below vma->vm_start,
6647 * return with success:
6648 */
6649 vma = find_vma(mm, addr);
6650 - if (likely(!vma || addr+len <= vma->vm_start)) {
6651 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6652 /* remember the address as a hint for next time */
6653 return (mm->free_area_cache = addr);
6654 }
6655 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6656 mm->cached_hole_size = vma->vm_start - addr;
6657
6658 /* try just below the current vma->vm_start */
6659 - addr = (vma->vm_start-len) & HPAGE_MASK;
6660 - } while (likely(len < vma->vm_start));
6661 + addr = skip_heap_stack_gap(vma, len);
6662 + } while (!IS_ERR_VALUE(addr));
6663
6664 bottomup:
6665 /*
6666 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6667 if (addr) {
6668 addr = ALIGN(addr, HPAGE_SIZE);
6669 vma = find_vma(mm, addr);
6670 - if (task_size - len >= addr &&
6671 - (!vma || addr + len <= vma->vm_start))
6672 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6673 return addr;
6674 }
6675 if (mm->get_unmapped_area == arch_get_unmapped_area)
6676 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6677 index dc7c3b1..34c0070 100644
6678 --- a/arch/sparc/mm/init_32.c
6679 +++ b/arch/sparc/mm/init_32.c
6680 @@ -317,6 +317,9 @@ extern void device_scan(void);
6681 pgprot_t PAGE_SHARED __read_mostly;
6682 EXPORT_SYMBOL(PAGE_SHARED);
6683
6684 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6685 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6686 +
6687 void __init paging_init(void)
6688 {
6689 switch(sparc_cpu_model) {
6690 @@ -345,17 +348,17 @@ void __init paging_init(void)
6691
6692 /* Initialize the protection map with non-constant, MMU dependent values. */
6693 protection_map[0] = PAGE_NONE;
6694 - protection_map[1] = PAGE_READONLY;
6695 - protection_map[2] = PAGE_COPY;
6696 - protection_map[3] = PAGE_COPY;
6697 + protection_map[1] = PAGE_READONLY_NOEXEC;
6698 + protection_map[2] = PAGE_COPY_NOEXEC;
6699 + protection_map[3] = PAGE_COPY_NOEXEC;
6700 protection_map[4] = PAGE_READONLY;
6701 protection_map[5] = PAGE_READONLY;
6702 protection_map[6] = PAGE_COPY;
6703 protection_map[7] = PAGE_COPY;
6704 protection_map[8] = PAGE_NONE;
6705 - protection_map[9] = PAGE_READONLY;
6706 - protection_map[10] = PAGE_SHARED;
6707 - protection_map[11] = PAGE_SHARED;
6708 + protection_map[9] = PAGE_READONLY_NOEXEC;
6709 + protection_map[10] = PAGE_SHARED_NOEXEC;
6710 + protection_map[11] = PAGE_SHARED_NOEXEC;
6711 protection_map[12] = PAGE_READONLY;
6712 protection_map[13] = PAGE_READONLY;
6713 protection_map[14] = PAGE_SHARED;
6714 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6715 index 509b1ff..bfd7118 100644
6716 --- a/arch/sparc/mm/srmmu.c
6717 +++ b/arch/sparc/mm/srmmu.c
6718 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6719 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6720 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6721 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6722 +
6723 +#ifdef CONFIG_PAX_PAGEEXEC
6724 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6725 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6726 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6727 +#endif
6728 +
6729 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6730 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6731
6732 diff --git a/arch/um/Makefile b/arch/um/Makefile
6733 index fc633db..5e1a1c2 100644
6734 --- a/arch/um/Makefile
6735 +++ b/arch/um/Makefile
6736 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6737 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6738 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6739
6740 +ifdef CONSTIFY_PLUGIN
6741 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6742 +endif
6743 +
6744 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6745
6746 #This will adjust *FLAGS accordingly to the platform.
6747 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6748 index 6c03acd..a5e0215 100644
6749 --- a/arch/um/include/asm/kmap_types.h
6750 +++ b/arch/um/include/asm/kmap_types.h
6751 @@ -23,6 +23,7 @@ enum km_type {
6752 KM_IRQ1,
6753 KM_SOFTIRQ0,
6754 KM_SOFTIRQ1,
6755 + KM_CLEARPAGE,
6756 KM_TYPE_NR
6757 };
6758
6759 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6760 index 4cc9b6c..02e5029 100644
6761 --- a/arch/um/include/asm/page.h
6762 +++ b/arch/um/include/asm/page.h
6763 @@ -14,6 +14,9 @@
6764 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6765 #define PAGE_MASK (~(PAGE_SIZE-1))
6766
6767 +#define ktla_ktva(addr) (addr)
6768 +#define ktva_ktla(addr) (addr)
6769 +
6770 #ifndef __ASSEMBLY__
6771
6772 struct page;
6773 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6774 index 4a28a15..654dc2a 100644
6775 --- a/arch/um/kernel/process.c
6776 +++ b/arch/um/kernel/process.c
6777 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6778 return 2;
6779 }
6780
6781 -/*
6782 - * Only x86 and x86_64 have an arch_align_stack().
6783 - * All other arches have "#define arch_align_stack(x) (x)"
6784 - * in their asm/system.h
6785 - * As this is included in UML from asm-um/system-generic.h,
6786 - * we can use it to behave as the subarch does.
6787 - */
6788 -#ifndef arch_align_stack
6789 -unsigned long arch_align_stack(unsigned long sp)
6790 -{
6791 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6792 - sp -= get_random_int() % 8192;
6793 - return sp & ~0xf;
6794 -}
6795 -#endif
6796 -
6797 unsigned long get_wchan(struct task_struct *p)
6798 {
6799 unsigned long stack_page, sp, ip;
6800 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
6801 index d1b93c4..ae1b7fd 100644
6802 --- a/arch/um/sys-i386/shared/sysdep/system.h
6803 +++ b/arch/um/sys-i386/shared/sysdep/system.h
6804 @@ -17,7 +17,7 @@
6805 # define AT_VECTOR_SIZE_ARCH 1
6806 #endif
6807
6808 -extern unsigned long arch_align_stack(unsigned long sp);
6809 +#define arch_align_stack(x) ((x) & ~0xfUL)
6810
6811 void default_idle(void);
6812
6813 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
6814 index 857ca0b..9a2669d 100644
6815 --- a/arch/um/sys-i386/syscalls.c
6816 +++ b/arch/um/sys-i386/syscalls.c
6817 @@ -11,6 +11,21 @@
6818 #include "asm/uaccess.h"
6819 #include "asm/unistd.h"
6820
6821 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6822 +{
6823 + unsigned long pax_task_size = TASK_SIZE;
6824 +
6825 +#ifdef CONFIG_PAX_SEGMEXEC
6826 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6827 + pax_task_size = SEGMEXEC_TASK_SIZE;
6828 +#endif
6829 +
6830 + if (len > pax_task_size || addr > pax_task_size - len)
6831 + return -EINVAL;
6832 +
6833 + return 0;
6834 +}
6835 +
6836 /*
6837 * Perform the select(nd, in, out, ex, tv) and mmap() system
6838 * calls. Linux/i386 didn't use to be able to handle more than
6839 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
6840 index d1b93c4..ae1b7fd 100644
6841 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
6842 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
6843 @@ -17,7 +17,7 @@
6844 # define AT_VECTOR_SIZE_ARCH 1
6845 #endif
6846
6847 -extern unsigned long arch_align_stack(unsigned long sp);
6848 +#define arch_align_stack(x) ((x) & ~0xfUL)
6849
6850 void default_idle(void);
6851
6852 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
6853 index 73ae02a..f932de5 100644
6854 --- a/arch/x86/Kconfig
6855 +++ b/arch/x86/Kconfig
6856 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
6857
6858 config X86_32_LAZY_GS
6859 def_bool y
6860 - depends on X86_32 && !CC_STACKPROTECTOR
6861 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
6862
6863 config KTIME_SCALAR
6864 def_bool X86_32
6865 @@ -1008,7 +1008,7 @@ choice
6866
6867 config NOHIGHMEM
6868 bool "off"
6869 - depends on !X86_NUMAQ
6870 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6871 ---help---
6872 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
6873 However, the address space of 32-bit x86 processors is only 4
6874 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
6875
6876 config HIGHMEM4G
6877 bool "4GB"
6878 - depends on !X86_NUMAQ
6879 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6880 ---help---
6881 Select this if you have a 32-bit processor and between 1 and 4
6882 gigabytes of physical RAM.
6883 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
6884 hex
6885 default 0xB0000000 if VMSPLIT_3G_OPT
6886 default 0x80000000 if VMSPLIT_2G
6887 - default 0x78000000 if VMSPLIT_2G_OPT
6888 + default 0x70000000 if VMSPLIT_2G_OPT
6889 default 0x40000000 if VMSPLIT_1G
6890 default 0xC0000000
6891 depends on X86_32
6892 @@ -1460,6 +1460,7 @@ config SECCOMP
6893
6894 config CC_STACKPROTECTOR
6895 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
6896 + depends on X86_64 || !PAX_MEMORY_UDEREF
6897 ---help---
6898 This option turns on the -fstack-protector GCC feature. This
6899 feature puts, at the beginning of functions, a canary value on
6900 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
6901 config PHYSICAL_START
6902 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
6903 default "0x1000000"
6904 + range 0x400000 0x40000000
6905 ---help---
6906 This gives the physical address where the kernel is loaded.
6907
6908 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
6909 hex
6910 prompt "Alignment value to which kernel should be aligned" if X86_32
6911 default "0x1000000"
6912 + range 0x400000 0x1000000 if PAX_KERNEXEC
6913 range 0x2000 0x1000000
6914 ---help---
6915 This value puts the alignment restrictions on physical address
6916 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
6917 Say N if you want to disable CPU hotplug.
6918
6919 config COMPAT_VDSO
6920 - def_bool y
6921 + def_bool n
6922 prompt "Compat VDSO support"
6923 depends on X86_32 || IA32_EMULATION
6924 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
6925 ---help---
6926 Map the 32-bit VDSO to the predictable old-style address too.
6927 ---help---
6928 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
6929 index 0e566103..1a6b57e 100644
6930 --- a/arch/x86/Kconfig.cpu
6931 +++ b/arch/x86/Kconfig.cpu
6932 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
6933
6934 config X86_F00F_BUG
6935 def_bool y
6936 - depends on M586MMX || M586TSC || M586 || M486 || M386
6937 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
6938
6939 config X86_WP_WORKS_OK
6940 def_bool y
6941 @@ -360,7 +360,7 @@ config X86_POPAD_OK
6942
6943 config X86_ALIGNMENT_16
6944 def_bool y
6945 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6946 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6947
6948 config X86_INTEL_USERCOPY
6949 def_bool y
6950 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
6951 # generates cmov.
6952 config X86_CMOV
6953 def_bool y
6954 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6955 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6956
6957 config X86_MINIMUM_CPU_FAMILY
6958 int
6959 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
6960 index d105f29..c928727 100644
6961 --- a/arch/x86/Kconfig.debug
6962 +++ b/arch/x86/Kconfig.debug
6963 @@ -99,7 +99,7 @@ config X86_PTDUMP
6964 config DEBUG_RODATA
6965 bool "Write protect kernel read-only data structures"
6966 default y
6967 - depends on DEBUG_KERNEL
6968 + depends on DEBUG_KERNEL && BROKEN
6969 ---help---
6970 Mark the kernel read-only data as write-protected in the pagetables,
6971 in order to catch accidental (and incorrect) writes to such const
6972 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
6973 index d2d24c9..0f21f8d 100644
6974 --- a/arch/x86/Makefile
6975 +++ b/arch/x86/Makefile
6976 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
6977 else
6978 BITS := 64
6979 UTS_MACHINE := x86_64
6980 + biarch := $(call cc-option,-m64)
6981 CHECKFLAGS += -D__x86_64__ -m64
6982
6983 KBUILD_AFLAGS += -m64
6984 @@ -189,3 +190,12 @@ define archhelp
6985 echo ' FDARGS="..." arguments for the booted kernel'
6986 echo ' FDINITRD=file initrd for the booted kernel'
6987 endef
6988 +
6989 +define OLD_LD
6990 +
6991 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
6992 +*** Please upgrade your binutils to 2.18 or newer
6993 +endef
6994 +
6995 +archprepare:
6996 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
6997 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
6998 index ec749c2..bbb5319 100644
6999 --- a/arch/x86/boot/Makefile
7000 +++ b/arch/x86/boot/Makefile
7001 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7002 $(call cc-option, -fno-stack-protector) \
7003 $(call cc-option, -mpreferred-stack-boundary=2)
7004 KBUILD_CFLAGS += $(call cc-option, -m32)
7005 +ifdef CONSTIFY_PLUGIN
7006 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7007 +endif
7008 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7009 GCOV_PROFILE := n
7010
7011 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7012 index 878e4b9..20537ab 100644
7013 --- a/arch/x86/boot/bitops.h
7014 +++ b/arch/x86/boot/bitops.h
7015 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7016 u8 v;
7017 const u32 *p = (const u32 *)addr;
7018
7019 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7020 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7021 return v;
7022 }
7023
7024 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7025
7026 static inline void set_bit(int nr, void *addr)
7027 {
7028 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7029 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7030 }
7031
7032 #endif /* BOOT_BITOPS_H */
7033 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7034 index 98239d2..f40214c 100644
7035 --- a/arch/x86/boot/boot.h
7036 +++ b/arch/x86/boot/boot.h
7037 @@ -82,7 +82,7 @@ static inline void io_delay(void)
7038 static inline u16 ds(void)
7039 {
7040 u16 seg;
7041 - asm("movw %%ds,%0" : "=rm" (seg));
7042 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7043 return seg;
7044 }
7045
7046 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7047 static inline int memcmp(const void *s1, const void *s2, size_t len)
7048 {
7049 u8 diff;
7050 - asm("repe; cmpsb; setnz %0"
7051 + asm volatile("repe; cmpsb; setnz %0"
7052 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7053 return diff;
7054 }
7055 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7056 index f8ed065..5bf5ff3 100644
7057 --- a/arch/x86/boot/compressed/Makefile
7058 +++ b/arch/x86/boot/compressed/Makefile
7059 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7060 KBUILD_CFLAGS += $(cflags-y)
7061 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7062 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7063 +ifdef CONSTIFY_PLUGIN
7064 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7065 +endif
7066
7067 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7068 GCOV_PROFILE := n
7069 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7070 index f543b70..b60fba8 100644
7071 --- a/arch/x86/boot/compressed/head_32.S
7072 +++ b/arch/x86/boot/compressed/head_32.S
7073 @@ -76,7 +76,7 @@ ENTRY(startup_32)
7074 notl %eax
7075 andl %eax, %ebx
7076 #else
7077 - movl $LOAD_PHYSICAL_ADDR, %ebx
7078 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7079 #endif
7080
7081 /* Target address to relocate to for decompression */
7082 @@ -149,7 +149,7 @@ relocated:
7083 * and where it was actually loaded.
7084 */
7085 movl %ebp, %ebx
7086 - subl $LOAD_PHYSICAL_ADDR, %ebx
7087 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7088 jz 2f /* Nothing to be done if loaded at compiled addr. */
7089 /*
7090 * Process relocations.
7091 @@ -157,8 +157,7 @@ relocated:
7092
7093 1: subl $4, %edi
7094 movl (%edi), %ecx
7095 - testl %ecx, %ecx
7096 - jz 2f
7097 + jecxz 2f
7098 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7099 jmp 1b
7100 2:
7101 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7102 index 077e1b6..2c6b13b 100644
7103 --- a/arch/x86/boot/compressed/head_64.S
7104 +++ b/arch/x86/boot/compressed/head_64.S
7105 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7106 notl %eax
7107 andl %eax, %ebx
7108 #else
7109 - movl $LOAD_PHYSICAL_ADDR, %ebx
7110 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7111 #endif
7112
7113 /* Target address to relocate to for decompression */
7114 @@ -183,7 +183,7 @@ no_longmode:
7115 hlt
7116 jmp 1b
7117
7118 -#include "../../kernel/verify_cpu_64.S"
7119 +#include "../../kernel/verify_cpu.S"
7120
7121 /*
7122 * Be careful here startup_64 needs to be at a predictable
7123 @@ -234,7 +234,7 @@ ENTRY(startup_64)
7124 notq %rax
7125 andq %rax, %rbp
7126 #else
7127 - movq $LOAD_PHYSICAL_ADDR, %rbp
7128 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7129 #endif
7130
7131 /* Target address to relocate to for decompression */
7132 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7133 index 842b2a3..f00178b 100644
7134 --- a/arch/x86/boot/compressed/misc.c
7135 +++ b/arch/x86/boot/compressed/misc.c
7136 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
7137 case PT_LOAD:
7138 #ifdef CONFIG_RELOCATABLE
7139 dest = output;
7140 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7141 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7142 #else
7143 dest = (void *)(phdr->p_paddr);
7144 #endif
7145 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7146 error("Destination address too large");
7147 #endif
7148 #ifndef CONFIG_RELOCATABLE
7149 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7150 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7151 error("Wrong destination address");
7152 #endif
7153
7154 diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7155 index bcbd36c..b1754af 100644
7156 --- a/arch/x86/boot/compressed/mkpiggy.c
7157 +++ b/arch/x86/boot/compressed/mkpiggy.c
7158 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7159
7160 offs = (olen > ilen) ? olen - ilen : 0;
7161 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7162 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7163 + offs += 64*1024; /* Add 64K bytes slack */
7164 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7165
7166 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7167 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7168 index bbeb0c3..f5167ab 100644
7169 --- a/arch/x86/boot/compressed/relocs.c
7170 +++ b/arch/x86/boot/compressed/relocs.c
7171 @@ -10,8 +10,11 @@
7172 #define USE_BSD
7173 #include <endian.h>
7174
7175 +#include "../../../../include/linux/autoconf.h"
7176 +
7177 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7178 static Elf32_Ehdr ehdr;
7179 +static Elf32_Phdr *phdr;
7180 static unsigned long reloc_count, reloc_idx;
7181 static unsigned long *relocs;
7182
7183 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7184
7185 static int is_safe_abs_reloc(const char* sym_name)
7186 {
7187 - int i;
7188 + unsigned int i;
7189
7190 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7191 if (!strcmp(sym_name, safe_abs_relocs[i]))
7192 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7193 }
7194 }
7195
7196 +static void read_phdrs(FILE *fp)
7197 +{
7198 + unsigned int i;
7199 +
7200 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7201 + if (!phdr) {
7202 + die("Unable to allocate %d program headers\n",
7203 + ehdr.e_phnum);
7204 + }
7205 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7206 + die("Seek to %d failed: %s\n",
7207 + ehdr.e_phoff, strerror(errno));
7208 + }
7209 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7210 + die("Cannot read ELF program headers: %s\n",
7211 + strerror(errno));
7212 + }
7213 + for(i = 0; i < ehdr.e_phnum; i++) {
7214 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7215 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7216 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7217 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7218 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7219 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7220 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7221 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7222 + }
7223 +
7224 +}
7225 +
7226 static void read_shdrs(FILE *fp)
7227 {
7228 - int i;
7229 + unsigned int i;
7230 Elf32_Shdr shdr;
7231
7232 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7233 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7234
7235 static void read_strtabs(FILE *fp)
7236 {
7237 - int i;
7238 + unsigned int i;
7239 for (i = 0; i < ehdr.e_shnum; i++) {
7240 struct section *sec = &secs[i];
7241 if (sec->shdr.sh_type != SHT_STRTAB) {
7242 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7243
7244 static void read_symtabs(FILE *fp)
7245 {
7246 - int i,j;
7247 + unsigned int i,j;
7248 for (i = 0; i < ehdr.e_shnum; i++) {
7249 struct section *sec = &secs[i];
7250 if (sec->shdr.sh_type != SHT_SYMTAB) {
7251 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7252
7253 static void read_relocs(FILE *fp)
7254 {
7255 - int i,j;
7256 + unsigned int i,j;
7257 + uint32_t base;
7258 +
7259 for (i = 0; i < ehdr.e_shnum; i++) {
7260 struct section *sec = &secs[i];
7261 if (sec->shdr.sh_type != SHT_REL) {
7262 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7263 die("Cannot read symbol table: %s\n",
7264 strerror(errno));
7265 }
7266 + base = 0;
7267 + for (j = 0; j < ehdr.e_phnum; j++) {
7268 + if (phdr[j].p_type != PT_LOAD )
7269 + continue;
7270 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7271 + continue;
7272 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7273 + break;
7274 + }
7275 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7276 Elf32_Rel *rel = &sec->reltab[j];
7277 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7278 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7279 rel->r_info = elf32_to_cpu(rel->r_info);
7280 }
7281 }
7282 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7283
7284 static void print_absolute_symbols(void)
7285 {
7286 - int i;
7287 + unsigned int i;
7288 printf("Absolute symbols\n");
7289 printf(" Num: Value Size Type Bind Visibility Name\n");
7290 for (i = 0; i < ehdr.e_shnum; i++) {
7291 struct section *sec = &secs[i];
7292 char *sym_strtab;
7293 Elf32_Sym *sh_symtab;
7294 - int j;
7295 + unsigned int j;
7296
7297 if (sec->shdr.sh_type != SHT_SYMTAB) {
7298 continue;
7299 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7300
7301 static void print_absolute_relocs(void)
7302 {
7303 - int i, printed = 0;
7304 + unsigned int i, printed = 0;
7305
7306 for (i = 0; i < ehdr.e_shnum; i++) {
7307 struct section *sec = &secs[i];
7308 struct section *sec_applies, *sec_symtab;
7309 char *sym_strtab;
7310 Elf32_Sym *sh_symtab;
7311 - int j;
7312 + unsigned int j;
7313 if (sec->shdr.sh_type != SHT_REL) {
7314 continue;
7315 }
7316 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7317
7318 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7319 {
7320 - int i;
7321 + unsigned int i;
7322 /* Walk through the relocations */
7323 for (i = 0; i < ehdr.e_shnum; i++) {
7324 char *sym_strtab;
7325 Elf32_Sym *sh_symtab;
7326 struct section *sec_applies, *sec_symtab;
7327 - int j;
7328 + unsigned int j;
7329 struct section *sec = &secs[i];
7330
7331 if (sec->shdr.sh_type != SHT_REL) {
7332 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7333 if (sym->st_shndx == SHN_ABS) {
7334 continue;
7335 }
7336 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7337 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7338 + continue;
7339 +
7340 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7341 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7342 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7343 + continue;
7344 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7345 + continue;
7346 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7347 + continue;
7348 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7349 + continue;
7350 +#endif
7351 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7352 /*
7353 * NONE can be ignored and and PC relative
7354 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7355
7356 static void emit_relocs(int as_text)
7357 {
7358 - int i;
7359 + unsigned int i;
7360 /* Count how many relocations I have and allocate space for them. */
7361 reloc_count = 0;
7362 walk_relocs(count_reloc);
7363 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
7364 fname, strerror(errno));
7365 }
7366 read_ehdr(fp);
7367 + read_phdrs(fp);
7368 read_shdrs(fp);
7369 read_strtabs(fp);
7370 read_symtabs(fp);
7371 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7372 index 4d3ff03..e4972ff 100644
7373 --- a/arch/x86/boot/cpucheck.c
7374 +++ b/arch/x86/boot/cpucheck.c
7375 @@ -74,7 +74,7 @@ static int has_fpu(void)
7376 u16 fcw = -1, fsw = -1;
7377 u32 cr0;
7378
7379 - asm("movl %%cr0,%0" : "=r" (cr0));
7380 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7381 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7382 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7383 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7384 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7385 {
7386 u32 f0, f1;
7387
7388 - asm("pushfl ; "
7389 + asm volatile("pushfl ; "
7390 "pushfl ; "
7391 "popl %0 ; "
7392 "movl %0,%1 ; "
7393 @@ -115,7 +115,7 @@ static void get_flags(void)
7394 set_bit(X86_FEATURE_FPU, cpu.flags);
7395
7396 if (has_eflag(X86_EFLAGS_ID)) {
7397 - asm("cpuid"
7398 + asm volatile("cpuid"
7399 : "=a" (max_intel_level),
7400 "=b" (cpu_vendor[0]),
7401 "=d" (cpu_vendor[1]),
7402 @@ -124,7 +124,7 @@ static void get_flags(void)
7403
7404 if (max_intel_level >= 0x00000001 &&
7405 max_intel_level <= 0x0000ffff) {
7406 - asm("cpuid"
7407 + asm volatile("cpuid"
7408 : "=a" (tfms),
7409 "=c" (cpu.flags[4]),
7410 "=d" (cpu.flags[0])
7411 @@ -136,7 +136,7 @@ static void get_flags(void)
7412 cpu.model += ((tfms >> 16) & 0xf) << 4;
7413 }
7414
7415 - asm("cpuid"
7416 + asm volatile("cpuid"
7417 : "=a" (max_amd_level)
7418 : "a" (0x80000000)
7419 : "ebx", "ecx", "edx");
7420 @@ -144,7 +144,7 @@ static void get_flags(void)
7421 if (max_amd_level >= 0x80000001 &&
7422 max_amd_level <= 0x8000ffff) {
7423 u32 eax = 0x80000001;
7424 - asm("cpuid"
7425 + asm volatile("cpuid"
7426 : "+a" (eax),
7427 "=c" (cpu.flags[6]),
7428 "=d" (cpu.flags[1])
7429 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7430 u32 ecx = MSR_K7_HWCR;
7431 u32 eax, edx;
7432
7433 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7434 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7435 eax &= ~(1 << 15);
7436 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7437 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7438
7439 get_flags(); /* Make sure it really did something */
7440 err = check_flags();
7441 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7442 u32 ecx = MSR_VIA_FCR;
7443 u32 eax, edx;
7444
7445 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7446 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7447 eax |= (1<<1)|(1<<7);
7448 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7449 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7450
7451 set_bit(X86_FEATURE_CX8, cpu.flags);
7452 err = check_flags();
7453 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7454 u32 eax, edx;
7455 u32 level = 1;
7456
7457 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7458 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7459 - asm("cpuid"
7460 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7461 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7462 + asm volatile("cpuid"
7463 : "+a" (level), "=d" (cpu.flags[0])
7464 : : "ecx", "ebx");
7465 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7466 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7467
7468 err = check_flags();
7469 }
7470 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7471 index b31cc54..8d69237 100644
7472 --- a/arch/x86/boot/header.S
7473 +++ b/arch/x86/boot/header.S
7474 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7475 # single linked list of
7476 # struct setup_data
7477
7478 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7479 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7480
7481 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7482 #define VO_INIT_SIZE (VO__end - VO__text)
7483 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7484 index cae3feb..ff8ff2a 100644
7485 --- a/arch/x86/boot/memory.c
7486 +++ b/arch/x86/boot/memory.c
7487 @@ -19,7 +19,7 @@
7488
7489 static int detect_memory_e820(void)
7490 {
7491 - int count = 0;
7492 + unsigned int count = 0;
7493 struct biosregs ireg, oreg;
7494 struct e820entry *desc = boot_params.e820_map;
7495 static struct e820entry buf; /* static so it is zeroed */
7496 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7497 index 11e8c6e..fdbb1ed 100644
7498 --- a/arch/x86/boot/video-vesa.c
7499 +++ b/arch/x86/boot/video-vesa.c
7500 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7501
7502 boot_params.screen_info.vesapm_seg = oreg.es;
7503 boot_params.screen_info.vesapm_off = oreg.di;
7504 + boot_params.screen_info.vesapm_size = oreg.cx;
7505 }
7506
7507 /*
7508 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7509 index d42da38..787cdf3 100644
7510 --- a/arch/x86/boot/video.c
7511 +++ b/arch/x86/boot/video.c
7512 @@ -90,7 +90,7 @@ static void store_mode_params(void)
7513 static unsigned int get_entry(void)
7514 {
7515 char entry_buf[4];
7516 - int i, len = 0;
7517 + unsigned int i, len = 0;
7518 int key;
7519 unsigned int v;
7520
7521 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7522 index 5b577d5..3c1fed4 100644
7523 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7524 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7525 @@ -8,6 +8,8 @@
7526 * including this sentence is retained in full.
7527 */
7528
7529 +#include <asm/alternative-asm.h>
7530 +
7531 .extern crypto_ft_tab
7532 .extern crypto_it_tab
7533 .extern crypto_fl_tab
7534 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7535 je B192; \
7536 leaq 32(r9),r9;
7537
7538 +#define ret pax_force_retaddr 0, 1; ret
7539 +
7540 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7541 movq r1,r2; \
7542 movq r3,r4; \
7543 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7544 index eb0566e..e3ebad8 100644
7545 --- a/arch/x86/crypto/aesni-intel_asm.S
7546 +++ b/arch/x86/crypto/aesni-intel_asm.S
7547 @@ -16,6 +16,7 @@
7548 */
7549
7550 #include <linux/linkage.h>
7551 +#include <asm/alternative-asm.h>
7552
7553 .text
7554
7555 @@ -52,6 +53,7 @@ _key_expansion_256a:
7556 pxor %xmm1, %xmm0
7557 movaps %xmm0, (%rcx)
7558 add $0x10, %rcx
7559 + pax_force_retaddr_bts
7560 ret
7561
7562 _key_expansion_192a:
7563 @@ -75,6 +77,7 @@ _key_expansion_192a:
7564 shufps $0b01001110, %xmm2, %xmm1
7565 movaps %xmm1, 16(%rcx)
7566 add $0x20, %rcx
7567 + pax_force_retaddr_bts
7568 ret
7569
7570 _key_expansion_192b:
7571 @@ -93,6 +96,7 @@ _key_expansion_192b:
7572
7573 movaps %xmm0, (%rcx)
7574 add $0x10, %rcx
7575 + pax_force_retaddr_bts
7576 ret
7577
7578 _key_expansion_256b:
7579 @@ -104,6 +108,7 @@ _key_expansion_256b:
7580 pxor %xmm1, %xmm2
7581 movaps %xmm2, (%rcx)
7582 add $0x10, %rcx
7583 + pax_force_retaddr_bts
7584 ret
7585
7586 /*
7587 @@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7588 cmp %rcx, %rdi
7589 jb .Ldec_key_loop
7590 xor %rax, %rax
7591 + pax_force_retaddr 0, 1
7592 ret
7593 +ENDPROC(aesni_set_key)
7594
7595 /*
7596 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7597 @@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7598 movups (INP), STATE # input
7599 call _aesni_enc1
7600 movups STATE, (OUTP) # output
7601 + pax_force_retaddr 0, 1
7602 ret
7603 +ENDPROC(aesni_enc)
7604
7605 /*
7606 * _aesni_enc1: internal ABI
7607 @@ -319,6 +328,7 @@ _aesni_enc1:
7608 movaps 0x70(TKEYP), KEY
7609 # aesenclast KEY, STATE # last round
7610 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7611 + pax_force_retaddr_bts
7612 ret
7613
7614 /*
7615 @@ -482,6 +492,7 @@ _aesni_enc4:
7616 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7617 # aesenclast KEY, STATE4
7618 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7619 + pax_force_retaddr_bts
7620 ret
7621
7622 /*
7623 @@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7624 movups (INP), STATE # input
7625 call _aesni_dec1
7626 movups STATE, (OUTP) #output
7627 + pax_force_retaddr 0, 1
7628 ret
7629 +ENDPROC(aesni_dec)
7630
7631 /*
7632 * _aesni_dec1: internal ABI
7633 @@ -563,6 +576,7 @@ _aesni_dec1:
7634 movaps 0x70(TKEYP), KEY
7635 # aesdeclast KEY, STATE # last round
7636 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7637 + pax_force_retaddr_bts
7638 ret
7639
7640 /*
7641 @@ -726,6 +740,7 @@ _aesni_dec4:
7642 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7643 # aesdeclast KEY, STATE4
7644 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7645 + pax_force_retaddr_bts
7646 ret
7647
7648 /*
7649 @@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7650 cmp $16, LEN
7651 jge .Lecb_enc_loop1
7652 .Lecb_enc_ret:
7653 + pax_force_retaddr 0, 1
7654 ret
7655 +ENDPROC(aesni_ecb_enc)
7656
7657 /*
7658 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7659 @@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7660 cmp $16, LEN
7661 jge .Lecb_dec_loop1
7662 .Lecb_dec_ret:
7663 + pax_force_retaddr 0, 1
7664 ret
7665 +ENDPROC(aesni_ecb_dec)
7666
7667 /*
7668 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7669 @@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7670 jge .Lcbc_enc_loop
7671 movups STATE, (IVP)
7672 .Lcbc_enc_ret:
7673 + pax_force_retaddr 0, 1
7674 ret
7675 +ENDPROC(aesni_cbc_enc)
7676
7677 /*
7678 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7679 @@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7680 .Lcbc_dec_ret:
7681 movups IV, (IVP)
7682 .Lcbc_dec_just_ret:
7683 + pax_force_retaddr 0, 1
7684 ret
7685 +ENDPROC(aesni_cbc_dec)
7686 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7687 index 6214a9b..1f4fc9a 100644
7688 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7689 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7690 @@ -1,3 +1,5 @@
7691 +#include <asm/alternative-asm.h>
7692 +
7693 # enter ECRYPT_encrypt_bytes
7694 .text
7695 .p2align 5
7696 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7697 add %r11,%rsp
7698 mov %rdi,%rax
7699 mov %rsi,%rdx
7700 + pax_force_retaddr 0, 1
7701 ret
7702 # bytesatleast65:
7703 ._bytesatleast65:
7704 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7705 add %r11,%rsp
7706 mov %rdi,%rax
7707 mov %rsi,%rdx
7708 + pax_force_retaddr
7709 ret
7710 # enter ECRYPT_ivsetup
7711 .text
7712 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7713 add %r11,%rsp
7714 mov %rdi,%rax
7715 mov %rsi,%rdx
7716 + pax_force_retaddr
7717 ret
7718 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7719 index 35974a5..5662ae2 100644
7720 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7721 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7722 @@ -21,6 +21,7 @@
7723 .text
7724
7725 #include <asm/asm-offsets.h>
7726 +#include <asm/alternative-asm.h>
7727
7728 #define a_offset 0
7729 #define b_offset 4
7730 @@ -269,6 +270,7 @@ twofish_enc_blk:
7731
7732 popq R1
7733 movq $1,%rax
7734 + pax_force_retaddr 0, 1
7735 ret
7736
7737 twofish_dec_blk:
7738 @@ -321,4 +323,5 @@ twofish_dec_blk:
7739
7740 popq R1
7741 movq $1,%rax
7742 + pax_force_retaddr 0, 1
7743 ret
7744 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7745 index 14531ab..a89a0c0 100644
7746 --- a/arch/x86/ia32/ia32_aout.c
7747 +++ b/arch/x86/ia32/ia32_aout.c
7748 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7749 unsigned long dump_start, dump_size;
7750 struct user32 dump;
7751
7752 + memset(&dump, 0, sizeof(dump));
7753 +
7754 fs = get_fs();
7755 set_fs(KERNEL_DS);
7756 has_dumped = 1;
7757 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7758 dump_size = dump.u_ssize << PAGE_SHIFT;
7759 DUMP_WRITE(dump_start, dump_size);
7760 }
7761 - /*
7762 - * Finally dump the task struct. Not be used by gdb, but
7763 - * could be useful
7764 - */
7765 - set_fs(KERNEL_DS);
7766 - DUMP_WRITE(current, sizeof(*current));
7767 end_coredump:
7768 set_fs(fs);
7769 return has_dumped;
7770 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7771 index 588a7aa..a3468b0 100644
7772 --- a/arch/x86/ia32/ia32_signal.c
7773 +++ b/arch/x86/ia32/ia32_signal.c
7774 @@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7775 }
7776 seg = get_fs();
7777 set_fs(KERNEL_DS);
7778 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7779 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7780 set_fs(seg);
7781 if (ret >= 0 && uoss_ptr) {
7782 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7783 @@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
7784 */
7785 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7786 size_t frame_size,
7787 - void **fpstate)
7788 + void __user **fpstate)
7789 {
7790 unsigned long sp;
7791
7792 @@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7793
7794 if (used_math()) {
7795 sp = sp - sig_xstate_ia32_size;
7796 - *fpstate = (struct _fpstate_ia32 *) sp;
7797 + *fpstate = (struct _fpstate_ia32 __user *) sp;
7798 if (save_i387_xstate_ia32(*fpstate) < 0)
7799 return (void __user *) -1L;
7800 }
7801 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7802 sp -= frame_size;
7803 /* Align the stack pointer according to the i386 ABI,
7804 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
7805 - sp = ((sp + 4) & -16ul) - 4;
7806 + sp = ((sp - 12) & -16ul) - 4;
7807 return (void __user *) sp;
7808 }
7809
7810 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
7811 * These are actually not used anymore, but left because some
7812 * gdb versions depend on them as a marker.
7813 */
7814 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7815 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7816 } put_user_catch(err);
7817
7818 if (err)
7819 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7820 0xb8,
7821 __NR_ia32_rt_sigreturn,
7822 0x80cd,
7823 - 0,
7824 + 0
7825 };
7826
7827 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
7828 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7829
7830 if (ka->sa.sa_flags & SA_RESTORER)
7831 restorer = ka->sa.sa_restorer;
7832 + else if (current->mm->context.vdso)
7833 + /* Return stub is in 32bit vsyscall page */
7834 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
7835 else
7836 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
7837 - rt_sigreturn);
7838 + restorer = &frame->retcode;
7839 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
7840
7841 /*
7842 * Not actually used anymore, but left because some gdb
7843 * versions need it.
7844 */
7845 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7846 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7847 } put_user_catch(err);
7848
7849 if (err)
7850 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
7851 index 4edd8eb..a558697 100644
7852 --- a/arch/x86/ia32/ia32entry.S
7853 +++ b/arch/x86/ia32/ia32entry.S
7854 @@ -13,7 +13,9 @@
7855 #include <asm/thread_info.h>
7856 #include <asm/segment.h>
7857 #include <asm/irqflags.h>
7858 +#include <asm/pgtable.h>
7859 #include <linux/linkage.h>
7860 +#include <asm/alternative-asm.h>
7861
7862 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
7863 #include <linux/elf-em.h>
7864 @@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
7865 ENDPROC(native_irq_enable_sysexit)
7866 #endif
7867
7868 + .macro pax_enter_kernel_user
7869 + pax_set_fptr_mask
7870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7871 + call pax_enter_kernel_user
7872 +#endif
7873 + .endm
7874 +
7875 + .macro pax_exit_kernel_user
7876 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7877 + call pax_exit_kernel_user
7878 +#endif
7879 +#ifdef CONFIG_PAX_RANDKSTACK
7880 + pushq %rax
7881 + pushq %r11
7882 + call pax_randomize_kstack
7883 + popq %r11
7884 + popq %rax
7885 +#endif
7886 + .endm
7887 +
7888 +.macro pax_erase_kstack
7889 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
7890 + call pax_erase_kstack
7891 +#endif
7892 +.endm
7893 +
7894 /*
7895 * 32bit SYSENTER instruction entry.
7896 *
7897 @@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
7898 CFI_REGISTER rsp,rbp
7899 SWAPGS_UNSAFE_STACK
7900 movq PER_CPU_VAR(kernel_stack), %rsp
7901 - addq $(KERNEL_STACK_OFFSET),%rsp
7902 - /*
7903 - * No need to follow this irqs on/off section: the syscall
7904 - * disabled irqs, here we enable it straight after entry:
7905 - */
7906 - ENABLE_INTERRUPTS(CLBR_NONE)
7907 movl %ebp,%ebp /* zero extension */
7908 pushq $__USER32_DS
7909 CFI_ADJUST_CFA_OFFSET 8
7910 @@ -135,28 +157,41 @@ ENTRY(ia32_sysenter_target)
7911 pushfq
7912 CFI_ADJUST_CFA_OFFSET 8
7913 /*CFI_REL_OFFSET rflags,0*/
7914 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
7915 - CFI_REGISTER rip,r10
7916 + GET_THREAD_INFO(%r11)
7917 + movl TI_sysenter_return(%r11), %r11d
7918 + CFI_REGISTER rip,r11
7919 pushq $__USER32_CS
7920 CFI_ADJUST_CFA_OFFSET 8
7921 /*CFI_REL_OFFSET cs,0*/
7922 movl %eax, %eax
7923 - pushq %r10
7924 + pushq %r11
7925 CFI_ADJUST_CFA_OFFSET 8
7926 CFI_REL_OFFSET rip,0
7927 pushq %rax
7928 CFI_ADJUST_CFA_OFFSET 8
7929 cld
7930 SAVE_ARGS 0,0,1
7931 + pax_enter_kernel_user
7932 + /*
7933 + * No need to follow this irqs on/off section: the syscall
7934 + * disabled irqs, here we enable it straight after entry:
7935 + */
7936 + ENABLE_INTERRUPTS(CLBR_NONE)
7937 /* no need to do an access_ok check here because rbp has been
7938 32bit zero extended */
7939 +
7940 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7941 + mov $PAX_USER_SHADOW_BASE,%r11
7942 + add %r11,%rbp
7943 +#endif
7944 +
7945 1: movl (%rbp),%ebp
7946 .section __ex_table,"a"
7947 .quad 1b,ia32_badarg
7948 .previous
7949 - GET_THREAD_INFO(%r10)
7950 - orl $TS_COMPAT,TI_status(%r10)
7951 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7952 + GET_THREAD_INFO(%r11)
7953 + orl $TS_COMPAT,TI_status(%r11)
7954 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7955 CFI_REMEMBER_STATE
7956 jnz sysenter_tracesys
7957 cmpq $(IA32_NR_syscalls-1),%rax
7958 @@ -166,13 +201,15 @@ sysenter_do_call:
7959 sysenter_dispatch:
7960 call *ia32_sys_call_table(,%rax,8)
7961 movq %rax,RAX-ARGOFFSET(%rsp)
7962 - GET_THREAD_INFO(%r10)
7963 + GET_THREAD_INFO(%r11)
7964 DISABLE_INTERRUPTS(CLBR_NONE)
7965 TRACE_IRQS_OFF
7966 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7967 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7968 jnz sysexit_audit
7969 sysexit_from_sys_call:
7970 - andl $~TS_COMPAT,TI_status(%r10)
7971 + pax_exit_kernel_user
7972 + pax_erase_kstack
7973 + andl $~TS_COMPAT,TI_status(%r11)
7974 /* clear IF, that popfq doesn't enable interrupts early */
7975 andl $~0x200,EFLAGS-R11(%rsp)
7976 movl RIP-R11(%rsp),%edx /* User %eip */
7977 @@ -200,6 +237,9 @@ sysexit_from_sys_call:
7978 movl %eax,%esi /* 2nd arg: syscall number */
7979 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7980 call audit_syscall_entry
7981 +
7982 + pax_erase_kstack
7983 +
7984 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7985 cmpq $(IA32_NR_syscalls-1),%rax
7986 ja ia32_badsys
7987 @@ -211,7 +251,7 @@ sysexit_from_sys_call:
7988 .endm
7989
7990 .macro auditsys_exit exit
7991 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7992 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7993 jnz ia32_ret_from_sys_call
7994 TRACE_IRQS_ON
7995 sti
7996 @@ -221,12 +261,12 @@ sysexit_from_sys_call:
7997 movzbl %al,%edi /* zero-extend that into %edi */
7998 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
7999 call audit_syscall_exit
8000 - GET_THREAD_INFO(%r10)
8001 + GET_THREAD_INFO(%r11)
8002 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8003 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8004 cli
8005 TRACE_IRQS_OFF
8006 - testl %edi,TI_flags(%r10)
8007 + testl %edi,TI_flags(%r11)
8008 jz \exit
8009 CLEAR_RREGS -ARGOFFSET
8010 jmp int_with_check
8011 @@ -244,7 +284,7 @@ sysexit_audit:
8012
8013 sysenter_tracesys:
8014 #ifdef CONFIG_AUDITSYSCALL
8015 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8016 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8017 jz sysenter_auditsys
8018 #endif
8019 SAVE_REST
8020 @@ -252,6 +292,9 @@ sysenter_tracesys:
8021 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8022 movq %rsp,%rdi /* &pt_regs -> arg1 */
8023 call syscall_trace_enter
8024 +
8025 + pax_erase_kstack
8026 +
8027 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8028 RESTORE_REST
8029 cmpq $(IA32_NR_syscalls-1),%rax
8030 @@ -283,19 +326,20 @@ ENDPROC(ia32_sysenter_target)
8031 ENTRY(ia32_cstar_target)
8032 CFI_STARTPROC32 simple
8033 CFI_SIGNAL_FRAME
8034 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8035 + CFI_DEF_CFA rsp,0
8036 CFI_REGISTER rip,rcx
8037 /*CFI_REGISTER rflags,r11*/
8038 SWAPGS_UNSAFE_STACK
8039 movl %esp,%r8d
8040 CFI_REGISTER rsp,r8
8041 movq PER_CPU_VAR(kernel_stack),%rsp
8042 + SAVE_ARGS 8*6,1,1
8043 + pax_enter_kernel_user
8044 /*
8045 * No need to follow this irqs on/off section: the syscall
8046 * disabled irqs and here we enable it straight after entry:
8047 */
8048 ENABLE_INTERRUPTS(CLBR_NONE)
8049 - SAVE_ARGS 8,1,1
8050 movl %eax,%eax /* zero extension */
8051 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8052 movq %rcx,RIP-ARGOFFSET(%rsp)
8053 @@ -311,13 +355,19 @@ ENTRY(ia32_cstar_target)
8054 /* no need to do an access_ok check here because r8 has been
8055 32bit zero extended */
8056 /* hardware stack frame is complete now */
8057 +
8058 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8059 + mov $PAX_USER_SHADOW_BASE,%r11
8060 + add %r11,%r8
8061 +#endif
8062 +
8063 1: movl (%r8),%r9d
8064 .section __ex_table,"a"
8065 .quad 1b,ia32_badarg
8066 .previous
8067 - GET_THREAD_INFO(%r10)
8068 - orl $TS_COMPAT,TI_status(%r10)
8069 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8070 + GET_THREAD_INFO(%r11)
8071 + orl $TS_COMPAT,TI_status(%r11)
8072 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8073 CFI_REMEMBER_STATE
8074 jnz cstar_tracesys
8075 cmpq $IA32_NR_syscalls-1,%rax
8076 @@ -327,13 +377,15 @@ cstar_do_call:
8077 cstar_dispatch:
8078 call *ia32_sys_call_table(,%rax,8)
8079 movq %rax,RAX-ARGOFFSET(%rsp)
8080 - GET_THREAD_INFO(%r10)
8081 + GET_THREAD_INFO(%r11)
8082 DISABLE_INTERRUPTS(CLBR_NONE)
8083 TRACE_IRQS_OFF
8084 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8085 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8086 jnz sysretl_audit
8087 sysretl_from_sys_call:
8088 - andl $~TS_COMPAT,TI_status(%r10)
8089 + pax_exit_kernel_user
8090 + pax_erase_kstack
8091 + andl $~TS_COMPAT,TI_status(%r11)
8092 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8093 movl RIP-ARGOFFSET(%rsp),%ecx
8094 CFI_REGISTER rip,rcx
8095 @@ -361,7 +413,7 @@ sysretl_audit:
8096
8097 cstar_tracesys:
8098 #ifdef CONFIG_AUDITSYSCALL
8099 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8100 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8101 jz cstar_auditsys
8102 #endif
8103 xchgl %r9d,%ebp
8104 @@ -370,6 +422,9 @@ cstar_tracesys:
8105 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8106 movq %rsp,%rdi /* &pt_regs -> arg1 */
8107 call syscall_trace_enter
8108 +
8109 + pax_erase_kstack
8110 +
8111 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8112 RESTORE_REST
8113 xchgl %ebp,%r9d
8114 @@ -415,11 +470,6 @@ ENTRY(ia32_syscall)
8115 CFI_REL_OFFSET rip,RIP-RIP
8116 PARAVIRT_ADJUST_EXCEPTION_FRAME
8117 SWAPGS
8118 - /*
8119 - * No need to follow this irqs on/off section: the syscall
8120 - * disabled irqs and here we enable it straight after entry:
8121 - */
8122 - ENABLE_INTERRUPTS(CLBR_NONE)
8123 movl %eax,%eax
8124 pushq %rax
8125 CFI_ADJUST_CFA_OFFSET 8
8126 @@ -427,9 +477,15 @@ ENTRY(ia32_syscall)
8127 /* note the registers are not zero extended to the sf.
8128 this could be a problem. */
8129 SAVE_ARGS 0,0,1
8130 - GET_THREAD_INFO(%r10)
8131 - orl $TS_COMPAT,TI_status(%r10)
8132 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8133 + pax_enter_kernel_user
8134 + /*
8135 + * No need to follow this irqs on/off section: the syscall
8136 + * disabled irqs and here we enable it straight after entry:
8137 + */
8138 + ENABLE_INTERRUPTS(CLBR_NONE)
8139 + GET_THREAD_INFO(%r11)
8140 + orl $TS_COMPAT,TI_status(%r11)
8141 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8142 jnz ia32_tracesys
8143 cmpq $(IA32_NR_syscalls-1),%rax
8144 ja ia32_badsys
8145 @@ -448,6 +504,9 @@ ia32_tracesys:
8146 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8147 movq %rsp,%rdi /* &pt_regs -> arg1 */
8148 call syscall_trace_enter
8149 +
8150 + pax_erase_kstack
8151 +
8152 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8153 RESTORE_REST
8154 cmpq $(IA32_NR_syscalls-1),%rax
8155 @@ -462,6 +521,7 @@ ia32_badsys:
8156
8157 quiet_ni_syscall:
8158 movq $-ENOSYS,%rax
8159 + pax_force_retaddr
8160 ret
8161 CFI_ENDPROC
8162
8163 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8164 index 016218c..47ccbdd 100644
8165 --- a/arch/x86/ia32/sys_ia32.c
8166 +++ b/arch/x86/ia32/sys_ia32.c
8167 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8168 */
8169 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8170 {
8171 - typeof(ubuf->st_uid) uid = 0;
8172 - typeof(ubuf->st_gid) gid = 0;
8173 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8174 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8175 SET_UID(uid, stat->uid);
8176 SET_GID(gid, stat->gid);
8177 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8178 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8179 }
8180 set_fs(KERNEL_DS);
8181 ret = sys_rt_sigprocmask(how,
8182 - set ? (sigset_t __user *)&s : NULL,
8183 - oset ? (sigset_t __user *)&s : NULL,
8184 + set ? (sigset_t __force_user *)&s : NULL,
8185 + oset ? (sigset_t __force_user *)&s : NULL,
8186 sigsetsize);
8187 set_fs(old_fs);
8188 if (ret)
8189 @@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8190 mm_segment_t old_fs = get_fs();
8191
8192 set_fs(KERNEL_DS);
8193 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8194 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8195 set_fs(old_fs);
8196 if (put_compat_timespec(&t, interval))
8197 return -EFAULT;
8198 @@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8199 mm_segment_t old_fs = get_fs();
8200
8201 set_fs(KERNEL_DS);
8202 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8203 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8204 set_fs(old_fs);
8205 if (!ret) {
8206 switch (_NSIG_WORDS) {
8207 @@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8208 if (copy_siginfo_from_user32(&info, uinfo))
8209 return -EFAULT;
8210 set_fs(KERNEL_DS);
8211 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8212 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8213 set_fs(old_fs);
8214 return ret;
8215 }
8216 @@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8217 return -EFAULT;
8218
8219 set_fs(KERNEL_DS);
8220 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8221 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8222 count);
8223 set_fs(old_fs);
8224
8225 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8226 index e2077d3..b7a8919 100644
8227 --- a/arch/x86/include/asm/alternative-asm.h
8228 +++ b/arch/x86/include/asm/alternative-asm.h
8229 @@ -8,10 +8,10 @@
8230
8231 #ifdef CONFIG_SMP
8232 .macro LOCK_PREFIX
8233 -1: lock
8234 +672: lock
8235 .section .smp_locks,"a"
8236 .align 4
8237 - X86_ALIGN 1b
8238 + X86_ALIGN 672b
8239 .previous
8240 .endm
8241 #else
8242 @@ -19,4 +19,43 @@
8243 .endm
8244 #endif
8245
8246 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
8247 + .macro pax_force_retaddr_bts rip=0
8248 + btsq $63,\rip(%rsp)
8249 + .endm
8250 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8251 + .macro pax_force_retaddr rip=0, reload=0
8252 + btsq $63,\rip(%rsp)
8253 + .endm
8254 + .macro pax_force_fptr ptr
8255 + btsq $63,\ptr
8256 + .endm
8257 + .macro pax_set_fptr_mask
8258 + .endm
8259 +#endif
8260 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8261 + .macro pax_force_retaddr rip=0, reload=0
8262 + .if \reload
8263 + pax_set_fptr_mask
8264 + .endif
8265 + orq %r10,\rip(%rsp)
8266 + .endm
8267 + .macro pax_force_fptr ptr
8268 + orq %r10,\ptr
8269 + .endm
8270 + .macro pax_set_fptr_mask
8271 + movabs $0x8000000000000000,%r10
8272 + .endm
8273 +#endif
8274 +#else
8275 + .macro pax_force_retaddr rip=0, reload=0
8276 + .endm
8277 + .macro pax_force_fptr ptr
8278 + .endm
8279 + .macro pax_force_retaddr_bts rip=0
8280 + .endm
8281 + .macro pax_set_fptr_mask
8282 + .endm
8283 +#endif
8284 +
8285 #endif /* __ASSEMBLY__ */
8286 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8287 index c240efc..fdfadf3 100644
8288 --- a/arch/x86/include/asm/alternative.h
8289 +++ b/arch/x86/include/asm/alternative.h
8290 @@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8291 " .byte 662b-661b\n" /* sourcelen */ \
8292 " .byte 664f-663f\n" /* replacementlen */ \
8293 ".previous\n" \
8294 - ".section .altinstr_replacement, \"ax\"\n" \
8295 + ".section .altinstr_replacement, \"a\"\n" \
8296 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8297 ".previous"
8298
8299 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8300 index 474d80d..1f97d58 100644
8301 --- a/arch/x86/include/asm/apic.h
8302 +++ b/arch/x86/include/asm/apic.h
8303 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8304
8305 #ifdef CONFIG_X86_LOCAL_APIC
8306
8307 -extern unsigned int apic_verbosity;
8308 +extern int apic_verbosity;
8309 extern int local_apic_timer_c2_ok;
8310
8311 extern int disable_apic;
8312 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8313 index 20370c6..a2eb9b0 100644
8314 --- a/arch/x86/include/asm/apm.h
8315 +++ b/arch/x86/include/asm/apm.h
8316 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8317 __asm__ __volatile__(APM_DO_ZERO_SEGS
8318 "pushl %%edi\n\t"
8319 "pushl %%ebp\n\t"
8320 - "lcall *%%cs:apm_bios_entry\n\t"
8321 + "lcall *%%ss:apm_bios_entry\n\t"
8322 "setc %%al\n\t"
8323 "popl %%ebp\n\t"
8324 "popl %%edi\n\t"
8325 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8326 __asm__ __volatile__(APM_DO_ZERO_SEGS
8327 "pushl %%edi\n\t"
8328 "pushl %%ebp\n\t"
8329 - "lcall *%%cs:apm_bios_entry\n\t"
8330 + "lcall *%%ss:apm_bios_entry\n\t"
8331 "setc %%bl\n\t"
8332 "popl %%ebp\n\t"
8333 "popl %%edi\n\t"
8334 diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8335 index dc5a667..939040c 100644
8336 --- a/arch/x86/include/asm/atomic_32.h
8337 +++ b/arch/x86/include/asm/atomic_32.h
8338 @@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8339 }
8340
8341 /**
8342 + * atomic_read_unchecked - read atomic variable
8343 + * @v: pointer of type atomic_unchecked_t
8344 + *
8345 + * Atomically reads the value of @v.
8346 + */
8347 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8348 +{
8349 + return v->counter;
8350 +}
8351 +
8352 +/**
8353 * atomic_set - set atomic variable
8354 * @v: pointer of type atomic_t
8355 * @i: required value
8356 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8357 }
8358
8359 /**
8360 + * atomic_set_unchecked - set atomic variable
8361 + * @v: pointer of type atomic_unchecked_t
8362 + * @i: required value
8363 + *
8364 + * Atomically sets the value of @v to @i.
8365 + */
8366 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8367 +{
8368 + v->counter = i;
8369 +}
8370 +
8371 +/**
8372 * atomic_add - add integer to atomic variable
8373 * @i: integer value to add
8374 * @v: pointer of type atomic_t
8375 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8376 */
8377 static inline void atomic_add(int i, atomic_t *v)
8378 {
8379 - asm volatile(LOCK_PREFIX "addl %1,%0"
8380 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8381 +
8382 +#ifdef CONFIG_PAX_REFCOUNT
8383 + "jno 0f\n"
8384 + LOCK_PREFIX "subl %1,%0\n"
8385 + "int $4\n0:\n"
8386 + _ASM_EXTABLE(0b, 0b)
8387 +#endif
8388 +
8389 + : "+m" (v->counter)
8390 + : "ir" (i));
8391 +}
8392 +
8393 +/**
8394 + * atomic_add_unchecked - add integer to atomic variable
8395 + * @i: integer value to add
8396 + * @v: pointer of type atomic_unchecked_t
8397 + *
8398 + * Atomically adds @i to @v.
8399 + */
8400 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8401 +{
8402 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8403 : "+m" (v->counter)
8404 : "ir" (i));
8405 }
8406 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8407 */
8408 static inline void atomic_sub(int i, atomic_t *v)
8409 {
8410 - asm volatile(LOCK_PREFIX "subl %1,%0"
8411 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8412 +
8413 +#ifdef CONFIG_PAX_REFCOUNT
8414 + "jno 0f\n"
8415 + LOCK_PREFIX "addl %1,%0\n"
8416 + "int $4\n0:\n"
8417 + _ASM_EXTABLE(0b, 0b)
8418 +#endif
8419 +
8420 + : "+m" (v->counter)
8421 + : "ir" (i));
8422 +}
8423 +
8424 +/**
8425 + * atomic_sub_unchecked - subtract integer from atomic variable
8426 + * @i: integer value to subtract
8427 + * @v: pointer of type atomic_unchecked_t
8428 + *
8429 + * Atomically subtracts @i from @v.
8430 + */
8431 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8432 +{
8433 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8434 : "+m" (v->counter)
8435 : "ir" (i));
8436 }
8437 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8438 {
8439 unsigned char c;
8440
8441 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8442 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8443 +
8444 +#ifdef CONFIG_PAX_REFCOUNT
8445 + "jno 0f\n"
8446 + LOCK_PREFIX "addl %2,%0\n"
8447 + "int $4\n0:\n"
8448 + _ASM_EXTABLE(0b, 0b)
8449 +#endif
8450 +
8451 + "sete %1\n"
8452 : "+m" (v->counter), "=qm" (c)
8453 : "ir" (i) : "memory");
8454 return c;
8455 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8456 */
8457 static inline void atomic_inc(atomic_t *v)
8458 {
8459 - asm volatile(LOCK_PREFIX "incl %0"
8460 + asm volatile(LOCK_PREFIX "incl %0\n"
8461 +
8462 +#ifdef CONFIG_PAX_REFCOUNT
8463 + "jno 0f\n"
8464 + LOCK_PREFIX "decl %0\n"
8465 + "int $4\n0:\n"
8466 + _ASM_EXTABLE(0b, 0b)
8467 +#endif
8468 +
8469 + : "+m" (v->counter));
8470 +}
8471 +
8472 +/**
8473 + * atomic_inc_unchecked - increment atomic variable
8474 + * @v: pointer of type atomic_unchecked_t
8475 + *
8476 + * Atomically increments @v by 1.
8477 + */
8478 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8479 +{
8480 + asm volatile(LOCK_PREFIX "incl %0\n"
8481 : "+m" (v->counter));
8482 }
8483
8484 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8485 */
8486 static inline void atomic_dec(atomic_t *v)
8487 {
8488 - asm volatile(LOCK_PREFIX "decl %0"
8489 + asm volatile(LOCK_PREFIX "decl %0\n"
8490 +
8491 +#ifdef CONFIG_PAX_REFCOUNT
8492 + "jno 0f\n"
8493 + LOCK_PREFIX "incl %0\n"
8494 + "int $4\n0:\n"
8495 + _ASM_EXTABLE(0b, 0b)
8496 +#endif
8497 +
8498 + : "+m" (v->counter));
8499 +}
8500 +
8501 +/**
8502 + * atomic_dec_unchecked - decrement atomic variable
8503 + * @v: pointer of type atomic_unchecked_t
8504 + *
8505 + * Atomically decrements @v by 1.
8506 + */
8507 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8508 +{
8509 + asm volatile(LOCK_PREFIX "decl %0\n"
8510 : "+m" (v->counter));
8511 }
8512
8513 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8514 {
8515 unsigned char c;
8516
8517 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8518 + asm volatile(LOCK_PREFIX "decl %0\n"
8519 +
8520 +#ifdef CONFIG_PAX_REFCOUNT
8521 + "jno 0f\n"
8522 + LOCK_PREFIX "incl %0\n"
8523 + "int $4\n0:\n"
8524 + _ASM_EXTABLE(0b, 0b)
8525 +#endif
8526 +
8527 + "sete %1\n"
8528 : "+m" (v->counter), "=qm" (c)
8529 : : "memory");
8530 return c != 0;
8531 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8532 {
8533 unsigned char c;
8534
8535 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8536 + asm volatile(LOCK_PREFIX "incl %0\n"
8537 +
8538 +#ifdef CONFIG_PAX_REFCOUNT
8539 + "jno 0f\n"
8540 + LOCK_PREFIX "decl %0\n"
8541 + "into\n0:\n"
8542 + _ASM_EXTABLE(0b, 0b)
8543 +#endif
8544 +
8545 + "sete %1\n"
8546 + : "+m" (v->counter), "=qm" (c)
8547 + : : "memory");
8548 + return c != 0;
8549 +}
8550 +
8551 +/**
8552 + * atomic_inc_and_test_unchecked - increment and test
8553 + * @v: pointer of type atomic_unchecked_t
8554 + *
8555 + * Atomically increments @v by 1
8556 + * and returns true if the result is zero, or false for all
8557 + * other cases.
8558 + */
8559 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8560 +{
8561 + unsigned char c;
8562 +
8563 + asm volatile(LOCK_PREFIX "incl %0\n"
8564 + "sete %1\n"
8565 : "+m" (v->counter), "=qm" (c)
8566 : : "memory");
8567 return c != 0;
8568 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8569 {
8570 unsigned char c;
8571
8572 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8573 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8574 +
8575 +#ifdef CONFIG_PAX_REFCOUNT
8576 + "jno 0f\n"
8577 + LOCK_PREFIX "subl %2,%0\n"
8578 + "int $4\n0:\n"
8579 + _ASM_EXTABLE(0b, 0b)
8580 +#endif
8581 +
8582 + "sets %1\n"
8583 : "+m" (v->counter), "=qm" (c)
8584 : "ir" (i) : "memory");
8585 return c;
8586 @@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8587 #endif
8588 /* Modern 486+ processor */
8589 __i = i;
8590 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
8591 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8592 +
8593 +#ifdef CONFIG_PAX_REFCOUNT
8594 + "jno 0f\n"
8595 + "movl %0, %1\n"
8596 + "int $4\n0:\n"
8597 + _ASM_EXTABLE(0b, 0b)
8598 +#endif
8599 +
8600 : "+r" (i), "+m" (v->counter)
8601 : : "memory");
8602 return i + __i;
8603 @@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8604 }
8605
8606 /**
8607 + * atomic_add_return_unchecked - add integer and return
8608 + * @v: pointer of type atomic_unchecked_t
8609 + * @i: integer value to add
8610 + *
8611 + * Atomically adds @i to @v and returns @i + @v
8612 + */
8613 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8614 +{
8615 + int __i;
8616 +#ifdef CONFIG_M386
8617 + unsigned long flags;
8618 + if (unlikely(boot_cpu_data.x86 <= 3))
8619 + goto no_xadd;
8620 +#endif
8621 + /* Modern 486+ processor */
8622 + __i = i;
8623 + asm volatile(LOCK_PREFIX "xaddl %0, %1"
8624 + : "+r" (i), "+m" (v->counter)
8625 + : : "memory");
8626 + return i + __i;
8627 +
8628 +#ifdef CONFIG_M386
8629 +no_xadd: /* Legacy 386 processor */
8630 + local_irq_save(flags);
8631 + __i = atomic_read_unchecked(v);
8632 + atomic_set_unchecked(v, i + __i);
8633 + local_irq_restore(flags);
8634 + return i + __i;
8635 +#endif
8636 +}
8637 +
8638 +/**
8639 * atomic_sub_return - subtract integer and return
8640 * @v: pointer of type atomic_t
8641 * @i: integer value to subtract
8642 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8643 return cmpxchg(&v->counter, old, new);
8644 }
8645
8646 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8647 +{
8648 + return cmpxchg(&v->counter, old, new);
8649 +}
8650 +
8651 static inline int atomic_xchg(atomic_t *v, int new)
8652 {
8653 return xchg(&v->counter, new);
8654 }
8655
8656 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8657 +{
8658 + return xchg(&v->counter, new);
8659 +}
8660 +
8661 /**
8662 * atomic_add_unless - add unless the number is already a given value
8663 * @v: pointer of type atomic_t
8664 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8665 */
8666 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8667 {
8668 - int c, old;
8669 + int c, old, new;
8670 c = atomic_read(v);
8671 for (;;) {
8672 - if (unlikely(c == (u)))
8673 + if (unlikely(c == u))
8674 break;
8675 - old = atomic_cmpxchg((v), c, c + (a));
8676 +
8677 + asm volatile("addl %2,%0\n"
8678 +
8679 +#ifdef CONFIG_PAX_REFCOUNT
8680 + "jno 0f\n"
8681 + "subl %2,%0\n"
8682 + "int $4\n0:\n"
8683 + _ASM_EXTABLE(0b, 0b)
8684 +#endif
8685 +
8686 + : "=r" (new)
8687 + : "0" (c), "ir" (a));
8688 +
8689 + old = atomic_cmpxchg(v, c, new);
8690 if (likely(old == c))
8691 break;
8692 c = old;
8693 }
8694 - return c != (u);
8695 + return c != u;
8696 }
8697
8698 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8699
8700 #define atomic_inc_return(v) (atomic_add_return(1, v))
8701 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8702 +{
8703 + return atomic_add_return_unchecked(1, v);
8704 +}
8705 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8706
8707 /* These are x86-specific, used by some header files */
8708 @@ -266,9 +495,18 @@ typedef struct {
8709 u64 __aligned(8) counter;
8710 } atomic64_t;
8711
8712 +#ifdef CONFIG_PAX_REFCOUNT
8713 +typedef struct {
8714 + u64 __aligned(8) counter;
8715 +} atomic64_unchecked_t;
8716 +#else
8717 +typedef atomic64_t atomic64_unchecked_t;
8718 +#endif
8719 +
8720 #define ATOMIC64_INIT(val) { (val) }
8721
8722 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8723 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8724
8725 /**
8726 * atomic64_xchg - xchg atomic64 variable
8727 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8728 * the old value.
8729 */
8730 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8731 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8732
8733 /**
8734 * atomic64_set - set atomic64 variable
8735 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8736 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8737
8738 /**
8739 + * atomic64_unchecked_set - set atomic64 variable
8740 + * @ptr: pointer to type atomic64_unchecked_t
8741 + * @new_val: value to assign
8742 + *
8743 + * Atomically sets the value of @ptr to @new_val.
8744 + */
8745 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8746 +
8747 +/**
8748 * atomic64_read - read atomic64 variable
8749 * @ptr: pointer to type atomic64_t
8750 *
8751 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8752 return res;
8753 }
8754
8755 -extern u64 atomic64_read(atomic64_t *ptr);
8756 +/**
8757 + * atomic64_read_unchecked - read atomic64 variable
8758 + * @ptr: pointer to type atomic64_unchecked_t
8759 + *
8760 + * Atomically reads the value of @ptr and returns it.
8761 + */
8762 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8763 +{
8764 + u64 res;
8765 +
8766 + /*
8767 + * Note, we inline this atomic64_unchecked_t primitive because
8768 + * it only clobbers EAX/EDX and leaves the others
8769 + * untouched. We also (somewhat subtly) rely on the
8770 + * fact that cmpxchg8b returns the current 64-bit value
8771 + * of the memory location we are touching:
8772 + */
8773 + asm volatile(
8774 + "mov %%ebx, %%eax\n\t"
8775 + "mov %%ecx, %%edx\n\t"
8776 + LOCK_PREFIX "cmpxchg8b %1\n"
8777 + : "=&A" (res)
8778 + : "m" (*ptr)
8779 + );
8780 +
8781 + return res;
8782 +}
8783
8784 /**
8785 * atomic64_add_return - add and return
8786 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
8787 * Other variants with different arithmetic operators:
8788 */
8789 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
8790 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8791 extern u64 atomic64_inc_return(atomic64_t *ptr);
8792 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
8793 extern u64 atomic64_dec_return(atomic64_t *ptr);
8794 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
8795
8796 /**
8797 * atomic64_add - add integer to atomic64 variable
8798 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
8799 extern void atomic64_add(u64 delta, atomic64_t *ptr);
8800
8801 /**
8802 + * atomic64_add_unchecked - add integer to atomic64 variable
8803 + * @delta: integer value to add
8804 + * @ptr: pointer to type atomic64_unchecked_t
8805 + *
8806 + * Atomically adds @delta to @ptr.
8807 + */
8808 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8809 +
8810 +/**
8811 * atomic64_sub - subtract the atomic64 variable
8812 * @delta: integer value to subtract
8813 * @ptr: pointer to type atomic64_t
8814 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
8815 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
8816
8817 /**
8818 + * atomic64_sub_unchecked - subtract the atomic64 variable
8819 + * @delta: integer value to subtract
8820 + * @ptr: pointer to type atomic64_unchecked_t
8821 + *
8822 + * Atomically subtracts @delta from @ptr.
8823 + */
8824 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8825 +
8826 +/**
8827 * atomic64_sub_and_test - subtract value from variable and test result
8828 * @delta: integer value to subtract
8829 * @ptr: pointer to type atomic64_t
8830 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
8831 extern void atomic64_inc(atomic64_t *ptr);
8832
8833 /**
8834 + * atomic64_inc_unchecked - increment atomic64 variable
8835 + * @ptr: pointer to type atomic64_unchecked_t
8836 + *
8837 + * Atomically increments @ptr by 1.
8838 + */
8839 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
8840 +
8841 +/**
8842 * atomic64_dec - decrement atomic64 variable
8843 * @ptr: pointer to type atomic64_t
8844 *
8845 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
8846 extern void atomic64_dec(atomic64_t *ptr);
8847
8848 /**
8849 + * atomic64_dec_unchecked - decrement atomic64 variable
8850 + * @ptr: pointer to type atomic64_unchecked_t
8851 + *
8852 + * Atomically decrements @ptr by 1.
8853 + */
8854 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
8855 +
8856 +/**
8857 * atomic64_dec_and_test - decrement and test
8858 * @ptr: pointer to type atomic64_t
8859 *
8860 diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
8861 index d605dc2..fafd7bd 100644
8862 --- a/arch/x86/include/asm/atomic_64.h
8863 +++ b/arch/x86/include/asm/atomic_64.h
8864 @@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
8865 }
8866
8867 /**
8868 + * atomic_read_unchecked - read atomic variable
8869 + * @v: pointer of type atomic_unchecked_t
8870 + *
8871 + * Atomically reads the value of @v.
8872 + */
8873 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8874 +{
8875 + return v->counter;
8876 +}
8877 +
8878 +/**
8879 * atomic_set - set atomic variable
8880 * @v: pointer of type atomic_t
8881 * @i: required value
8882 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
8883 }
8884
8885 /**
8886 + * atomic_set_unchecked - set atomic variable
8887 + * @v: pointer of type atomic_unchecked_t
8888 + * @i: required value
8889 + *
8890 + * Atomically sets the value of @v to @i.
8891 + */
8892 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8893 +{
8894 + v->counter = i;
8895 +}
8896 +
8897 +/**
8898 * atomic_add - add integer to atomic variable
8899 * @i: integer value to add
8900 * @v: pointer of type atomic_t
8901 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
8902 */
8903 static inline void atomic_add(int i, atomic_t *v)
8904 {
8905 - asm volatile(LOCK_PREFIX "addl %1,%0"
8906 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8907 +
8908 +#ifdef CONFIG_PAX_REFCOUNT
8909 + "jno 0f\n"
8910 + LOCK_PREFIX "subl %1,%0\n"
8911 + "int $4\n0:\n"
8912 + _ASM_EXTABLE(0b, 0b)
8913 +#endif
8914 +
8915 + : "=m" (v->counter)
8916 + : "ir" (i), "m" (v->counter));
8917 +}
8918 +
8919 +/**
8920 + * atomic_add_unchecked - add integer to atomic variable
8921 + * @i: integer value to add
8922 + * @v: pointer of type atomic_unchecked_t
8923 + *
8924 + * Atomically adds @i to @v.
8925 + */
8926 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8927 +{
8928 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8929 : "=m" (v->counter)
8930 : "ir" (i), "m" (v->counter));
8931 }
8932 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
8933 */
8934 static inline void atomic_sub(int i, atomic_t *v)
8935 {
8936 - asm volatile(LOCK_PREFIX "subl %1,%0"
8937 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8938 +
8939 +#ifdef CONFIG_PAX_REFCOUNT
8940 + "jno 0f\n"
8941 + LOCK_PREFIX "addl %1,%0\n"
8942 + "int $4\n0:\n"
8943 + _ASM_EXTABLE(0b, 0b)
8944 +#endif
8945 +
8946 + : "=m" (v->counter)
8947 + : "ir" (i), "m" (v->counter));
8948 +}
8949 +
8950 +/**
8951 + * atomic_sub_unchecked - subtract the atomic variable
8952 + * @i: integer value to subtract
8953 + * @v: pointer of type atomic_unchecked_t
8954 + *
8955 + * Atomically subtracts @i from @v.
8956 + */
8957 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8958 +{
8959 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8960 : "=m" (v->counter)
8961 : "ir" (i), "m" (v->counter));
8962 }
8963 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8964 {
8965 unsigned char c;
8966
8967 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8968 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8969 +
8970 +#ifdef CONFIG_PAX_REFCOUNT
8971 + "jno 0f\n"
8972 + LOCK_PREFIX "addl %2,%0\n"
8973 + "int $4\n0:\n"
8974 + _ASM_EXTABLE(0b, 0b)
8975 +#endif
8976 +
8977 + "sete %1\n"
8978 : "=m" (v->counter), "=qm" (c)
8979 : "ir" (i), "m" (v->counter) : "memory");
8980 return c;
8981 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8982 */
8983 static inline void atomic_inc(atomic_t *v)
8984 {
8985 - asm volatile(LOCK_PREFIX "incl %0"
8986 + asm volatile(LOCK_PREFIX "incl %0\n"
8987 +
8988 +#ifdef CONFIG_PAX_REFCOUNT
8989 + "jno 0f\n"
8990 + LOCK_PREFIX "decl %0\n"
8991 + "int $4\n0:\n"
8992 + _ASM_EXTABLE(0b, 0b)
8993 +#endif
8994 +
8995 + : "=m" (v->counter)
8996 + : "m" (v->counter));
8997 +}
8998 +
8999 +/**
9000 + * atomic_inc_unchecked - increment atomic variable
9001 + * @v: pointer of type atomic_unchecked_t
9002 + *
9003 + * Atomically increments @v by 1.
9004 + */
9005 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9006 +{
9007 + asm volatile(LOCK_PREFIX "incl %0\n"
9008 : "=m" (v->counter)
9009 : "m" (v->counter));
9010 }
9011 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9012 */
9013 static inline void atomic_dec(atomic_t *v)
9014 {
9015 - asm volatile(LOCK_PREFIX "decl %0"
9016 + asm volatile(LOCK_PREFIX "decl %0\n"
9017 +
9018 +#ifdef CONFIG_PAX_REFCOUNT
9019 + "jno 0f\n"
9020 + LOCK_PREFIX "incl %0\n"
9021 + "int $4\n0:\n"
9022 + _ASM_EXTABLE(0b, 0b)
9023 +#endif
9024 +
9025 + : "=m" (v->counter)
9026 + : "m" (v->counter));
9027 +}
9028 +
9029 +/**
9030 + * atomic_dec_unchecked - decrement atomic variable
9031 + * @v: pointer of type atomic_unchecked_t
9032 + *
9033 + * Atomically decrements @v by 1.
9034 + */
9035 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9036 +{
9037 + asm volatile(LOCK_PREFIX "decl %0\n"
9038 : "=m" (v->counter)
9039 : "m" (v->counter));
9040 }
9041 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9042 {
9043 unsigned char c;
9044
9045 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9046 + asm volatile(LOCK_PREFIX "decl %0\n"
9047 +
9048 +#ifdef CONFIG_PAX_REFCOUNT
9049 + "jno 0f\n"
9050 + LOCK_PREFIX "incl %0\n"
9051 + "int $4\n0:\n"
9052 + _ASM_EXTABLE(0b, 0b)
9053 +#endif
9054 +
9055 + "sete %1\n"
9056 : "=m" (v->counter), "=qm" (c)
9057 : "m" (v->counter) : "memory");
9058 return c != 0;
9059 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9060 {
9061 unsigned char c;
9062
9063 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9064 + asm volatile(LOCK_PREFIX "incl %0\n"
9065 +
9066 +#ifdef CONFIG_PAX_REFCOUNT
9067 + "jno 0f\n"
9068 + LOCK_PREFIX "decl %0\n"
9069 + "int $4\n0:\n"
9070 + _ASM_EXTABLE(0b, 0b)
9071 +#endif
9072 +
9073 + "sete %1\n"
9074 + : "=m" (v->counter), "=qm" (c)
9075 + : "m" (v->counter) : "memory");
9076 + return c != 0;
9077 +}
9078 +
9079 +/**
9080 + * atomic_inc_and_test_unchecked - increment and test
9081 + * @v: pointer of type atomic_unchecked_t
9082 + *
9083 + * Atomically increments @v by 1
9084 + * and returns true if the result is zero, or false for all
9085 + * other cases.
9086 + */
9087 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9088 +{
9089 + unsigned char c;
9090 +
9091 + asm volatile(LOCK_PREFIX "incl %0\n"
9092 + "sete %1\n"
9093 : "=m" (v->counter), "=qm" (c)
9094 : "m" (v->counter) : "memory");
9095 return c != 0;
9096 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9097 {
9098 unsigned char c;
9099
9100 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9101 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9102 +
9103 +#ifdef CONFIG_PAX_REFCOUNT
9104 + "jno 0f\n"
9105 + LOCK_PREFIX "subl %2,%0\n"
9106 + "int $4\n0:\n"
9107 + _ASM_EXTABLE(0b, 0b)
9108 +#endif
9109 +
9110 + "sets %1\n"
9111 : "=m" (v->counter), "=qm" (c)
9112 : "ir" (i), "m" (v->counter) : "memory");
9113 return c;
9114 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9115 static inline int atomic_add_return(int i, atomic_t *v)
9116 {
9117 int __i = i;
9118 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
9119 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9120 +
9121 +#ifdef CONFIG_PAX_REFCOUNT
9122 + "jno 0f\n"
9123 + "movl %0, %1\n"
9124 + "int $4\n0:\n"
9125 + _ASM_EXTABLE(0b, 0b)
9126 +#endif
9127 +
9128 + : "+r" (i), "+m" (v->counter)
9129 + : : "memory");
9130 + return i + __i;
9131 +}
9132 +
9133 +/**
9134 + * atomic_add_return_unchecked - add and return
9135 + * @i: integer value to add
9136 + * @v: pointer of type atomic_unchecked_t
9137 + *
9138 + * Atomically adds @i to @v and returns @i + @v
9139 + */
9140 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9141 +{
9142 + int __i = i;
9143 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9144 : "+r" (i), "+m" (v->counter)
9145 : : "memory");
9146 return i + __i;
9147 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9148 }
9149
9150 #define atomic_inc_return(v) (atomic_add_return(1, v))
9151 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9152 +{
9153 + return atomic_add_return_unchecked(1, v);
9154 +}
9155 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9156
9157 /* The 64-bit atomic type */
9158 @@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9159 }
9160
9161 /**
9162 + * atomic64_read_unchecked - read atomic64 variable
9163 + * @v: pointer of type atomic64_unchecked_t
9164 + *
9165 + * Atomically reads the value of @v.
9166 + * Doesn't imply a read memory barrier.
9167 + */
9168 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9169 +{
9170 + return v->counter;
9171 +}
9172 +
9173 +/**
9174 * atomic64_set - set atomic64 variable
9175 * @v: pointer to type atomic64_t
9176 * @i: required value
9177 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9178 }
9179
9180 /**
9181 + * atomic64_set_unchecked - set atomic64 variable
9182 + * @v: pointer to type atomic64_unchecked_t
9183 + * @i: required value
9184 + *
9185 + * Atomically sets the value of @v to @i.
9186 + */
9187 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9188 +{
9189 + v->counter = i;
9190 +}
9191 +
9192 +/**
9193 * atomic64_add - add integer to atomic64 variable
9194 * @i: integer value to add
9195 * @v: pointer to type atomic64_t
9196 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9197 */
9198 static inline void atomic64_add(long i, atomic64_t *v)
9199 {
9200 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9201 +
9202 +#ifdef CONFIG_PAX_REFCOUNT
9203 + "jno 0f\n"
9204 + LOCK_PREFIX "subq %1,%0\n"
9205 + "int $4\n0:\n"
9206 + _ASM_EXTABLE(0b, 0b)
9207 +#endif
9208 +
9209 + : "=m" (v->counter)
9210 + : "er" (i), "m" (v->counter));
9211 +}
9212 +
9213 +/**
9214 + * atomic64_add_unchecked - add integer to atomic64 variable
9215 + * @i: integer value to add
9216 + * @v: pointer to type atomic64_unchecked_t
9217 + *
9218 + * Atomically adds @i to @v.
9219 + */
9220 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9221 +{
9222 asm volatile(LOCK_PREFIX "addq %1,%0"
9223 : "=m" (v->counter)
9224 : "er" (i), "m" (v->counter));
9225 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9226 */
9227 static inline void atomic64_sub(long i, atomic64_t *v)
9228 {
9229 - asm volatile(LOCK_PREFIX "subq %1,%0"
9230 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9231 +
9232 +#ifdef CONFIG_PAX_REFCOUNT
9233 + "jno 0f\n"
9234 + LOCK_PREFIX "addq %1,%0\n"
9235 + "int $4\n0:\n"
9236 + _ASM_EXTABLE(0b, 0b)
9237 +#endif
9238 +
9239 : "=m" (v->counter)
9240 : "er" (i), "m" (v->counter));
9241 }
9242 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9243 {
9244 unsigned char c;
9245
9246 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9247 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9248 +
9249 +#ifdef CONFIG_PAX_REFCOUNT
9250 + "jno 0f\n"
9251 + LOCK_PREFIX "addq %2,%0\n"
9252 + "int $4\n0:\n"
9253 + _ASM_EXTABLE(0b, 0b)
9254 +#endif
9255 +
9256 + "sete %1\n"
9257 : "=m" (v->counter), "=qm" (c)
9258 : "er" (i), "m" (v->counter) : "memory");
9259 return c;
9260 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9261 */
9262 static inline void atomic64_inc(atomic64_t *v)
9263 {
9264 + asm volatile(LOCK_PREFIX "incq %0\n"
9265 +
9266 +#ifdef CONFIG_PAX_REFCOUNT
9267 + "jno 0f\n"
9268 + LOCK_PREFIX "decq %0\n"
9269 + "int $4\n0:\n"
9270 + _ASM_EXTABLE(0b, 0b)
9271 +#endif
9272 +
9273 + : "=m" (v->counter)
9274 + : "m" (v->counter));
9275 +}
9276 +
9277 +/**
9278 + * atomic64_inc_unchecked - increment atomic64 variable
9279 + * @v: pointer to type atomic64_unchecked_t
9280 + *
9281 + * Atomically increments @v by 1.
9282 + */
9283 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9284 +{
9285 asm volatile(LOCK_PREFIX "incq %0"
9286 : "=m" (v->counter)
9287 : "m" (v->counter));
9288 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9289 */
9290 static inline void atomic64_dec(atomic64_t *v)
9291 {
9292 - asm volatile(LOCK_PREFIX "decq %0"
9293 + asm volatile(LOCK_PREFIX "decq %0\n"
9294 +
9295 +#ifdef CONFIG_PAX_REFCOUNT
9296 + "jno 0f\n"
9297 + LOCK_PREFIX "incq %0\n"
9298 + "int $4\n0:\n"
9299 + _ASM_EXTABLE(0b, 0b)
9300 +#endif
9301 +
9302 + : "=m" (v->counter)
9303 + : "m" (v->counter));
9304 +}
9305 +
9306 +/**
9307 + * atomic64_dec_unchecked - decrement atomic64 variable
9308 + * @v: pointer to type atomic64_t
9309 + *
9310 + * Atomically decrements @v by 1.
9311 + */
9312 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9313 +{
9314 + asm volatile(LOCK_PREFIX "decq %0\n"
9315 : "=m" (v->counter)
9316 : "m" (v->counter));
9317 }
9318 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9319 {
9320 unsigned char c;
9321
9322 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9323 + asm volatile(LOCK_PREFIX "decq %0\n"
9324 +
9325 +#ifdef CONFIG_PAX_REFCOUNT
9326 + "jno 0f\n"
9327 + LOCK_PREFIX "incq %0\n"
9328 + "int $4\n0:\n"
9329 + _ASM_EXTABLE(0b, 0b)
9330 +#endif
9331 +
9332 + "sete %1\n"
9333 : "=m" (v->counter), "=qm" (c)
9334 : "m" (v->counter) : "memory");
9335 return c != 0;
9336 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9337 {
9338 unsigned char c;
9339
9340 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9341 + asm volatile(LOCK_PREFIX "incq %0\n"
9342 +
9343 +#ifdef CONFIG_PAX_REFCOUNT
9344 + "jno 0f\n"
9345 + LOCK_PREFIX "decq %0\n"
9346 + "int $4\n0:\n"
9347 + _ASM_EXTABLE(0b, 0b)
9348 +#endif
9349 +
9350 + "sete %1\n"
9351 : "=m" (v->counter), "=qm" (c)
9352 : "m" (v->counter) : "memory");
9353 return c != 0;
9354 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9355 {
9356 unsigned char c;
9357
9358 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9359 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9360 +
9361 +#ifdef CONFIG_PAX_REFCOUNT
9362 + "jno 0f\n"
9363 + LOCK_PREFIX "subq %2,%0\n"
9364 + "int $4\n0:\n"
9365 + _ASM_EXTABLE(0b, 0b)
9366 +#endif
9367 +
9368 + "sets %1\n"
9369 : "=m" (v->counter), "=qm" (c)
9370 : "er" (i), "m" (v->counter) : "memory");
9371 return c;
9372 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9373 static inline long atomic64_add_return(long i, atomic64_t *v)
9374 {
9375 long __i = i;
9376 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9377 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9378 +
9379 +#ifdef CONFIG_PAX_REFCOUNT
9380 + "jno 0f\n"
9381 + "movq %0, %1\n"
9382 + "int $4\n0:\n"
9383 + _ASM_EXTABLE(0b, 0b)
9384 +#endif
9385 +
9386 + : "+r" (i), "+m" (v->counter)
9387 + : : "memory");
9388 + return i + __i;
9389 +}
9390 +
9391 +/**
9392 + * atomic64_add_return_unchecked - add and return
9393 + * @i: integer value to add
9394 + * @v: pointer to type atomic64_unchecked_t
9395 + *
9396 + * Atomically adds @i to @v and returns @i + @v
9397 + */
9398 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9399 +{
9400 + long __i = i;
9401 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
9402 : "+r" (i), "+m" (v->counter)
9403 : : "memory");
9404 return i + __i;
9405 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9406 }
9407
9408 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9409 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9410 +{
9411 + return atomic64_add_return_unchecked(1, v);
9412 +}
9413 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9414
9415 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9416 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9417 return cmpxchg(&v->counter, old, new);
9418 }
9419
9420 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9421 +{
9422 + return cmpxchg(&v->counter, old, new);
9423 +}
9424 +
9425 static inline long atomic64_xchg(atomic64_t *v, long new)
9426 {
9427 return xchg(&v->counter, new);
9428 }
9429
9430 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9431 +{
9432 + return xchg(&v->counter, new);
9433 +}
9434 +
9435 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9436 {
9437 return cmpxchg(&v->counter, old, new);
9438 }
9439
9440 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9441 +{
9442 + return cmpxchg(&v->counter, old, new);
9443 +}
9444 +
9445 static inline long atomic_xchg(atomic_t *v, int new)
9446 {
9447 return xchg(&v->counter, new);
9448 }
9449
9450 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9451 +{
9452 + return xchg(&v->counter, new);
9453 +}
9454 +
9455 /**
9456 * atomic_add_unless - add unless the number is a given value
9457 * @v: pointer of type atomic_t
9458 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9459 */
9460 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9461 {
9462 - int c, old;
9463 + int c, old, new;
9464 c = atomic_read(v);
9465 for (;;) {
9466 - if (unlikely(c == (u)))
9467 + if (unlikely(c == u))
9468 break;
9469 - old = atomic_cmpxchg((v), c, c + (a));
9470 +
9471 + asm volatile("addl %2,%0\n"
9472 +
9473 +#ifdef CONFIG_PAX_REFCOUNT
9474 + "jno 0f\n"
9475 + "subl %2,%0\n"
9476 + "int $4\n0:\n"
9477 + _ASM_EXTABLE(0b, 0b)
9478 +#endif
9479 +
9480 + : "=r" (new)
9481 + : "0" (c), "ir" (a));
9482 +
9483 + old = atomic_cmpxchg(v, c, new);
9484 if (likely(old == c))
9485 break;
9486 c = old;
9487 }
9488 - return c != (u);
9489 + return c != u;
9490 }
9491
9492 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9493 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9494 */
9495 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9496 {
9497 - long c, old;
9498 + long c, old, new;
9499 c = atomic64_read(v);
9500 for (;;) {
9501 - if (unlikely(c == (u)))
9502 + if (unlikely(c == u))
9503 break;
9504 - old = atomic64_cmpxchg((v), c, c + (a));
9505 +
9506 + asm volatile("addq %2,%0\n"
9507 +
9508 +#ifdef CONFIG_PAX_REFCOUNT
9509 + "jno 0f\n"
9510 + "subq %2,%0\n"
9511 + "int $4\n0:\n"
9512 + _ASM_EXTABLE(0b, 0b)
9513 +#endif
9514 +
9515 + : "=r" (new)
9516 + : "0" (c), "er" (a));
9517 +
9518 + old = atomic64_cmpxchg(v, c, new);
9519 if (likely(old == c))
9520 break;
9521 c = old;
9522 }
9523 - return c != (u);
9524 + return c != u;
9525 }
9526
9527 /**
9528 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9529 index 02b47a6..d5c4b15 100644
9530 --- a/arch/x86/include/asm/bitops.h
9531 +++ b/arch/x86/include/asm/bitops.h
9532 @@ -38,7 +38,7 @@
9533 * a mask operation on a byte.
9534 */
9535 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9536 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9537 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9538 #define CONST_MASK(nr) (1 << ((nr) & 7))
9539
9540 /**
9541 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9542 index 7a10659..8bbf355 100644
9543 --- a/arch/x86/include/asm/boot.h
9544 +++ b/arch/x86/include/asm/boot.h
9545 @@ -11,10 +11,15 @@
9546 #include <asm/pgtable_types.h>
9547
9548 /* Physical address where kernel should be loaded. */
9549 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9550 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9551 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9552 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9553
9554 +#ifndef __ASSEMBLY__
9555 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9556 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9557 +#endif
9558 +
9559 /* Minimum kernel alignment, as a power of two */
9560 #ifdef CONFIG_X86_64
9561 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9562 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9563 index 549860d..7d45f68 100644
9564 --- a/arch/x86/include/asm/cache.h
9565 +++ b/arch/x86/include/asm/cache.h
9566 @@ -5,9 +5,10 @@
9567
9568 /* L1 cache line size */
9569 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9570 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9571 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9572
9573 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9574 +#define __read_only __attribute__((__section__(".data.read_only")))
9575
9576 #ifdef CONFIG_X86_VSMP
9577 /* vSMP Internode cacheline shift */
9578 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9579 index b54f6af..5b376a6 100644
9580 --- a/arch/x86/include/asm/cacheflush.h
9581 +++ b/arch/x86/include/asm/cacheflush.h
9582 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9583 static inline unsigned long get_page_memtype(struct page *pg)
9584 {
9585 if (!PageUncached(pg) && !PageWC(pg))
9586 - return -1;
9587 + return ~0UL;
9588 else if (!PageUncached(pg) && PageWC(pg))
9589 return _PAGE_CACHE_WC;
9590 else if (PageUncached(pg) && !PageWC(pg))
9591 @@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9592 SetPageWC(pg);
9593 break;
9594 default:
9595 - case -1:
9596 + case ~0UL:
9597 ClearPageUncached(pg);
9598 ClearPageWC(pg);
9599 break;
9600 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9601 index 0e63c9a..ab8d972 100644
9602 --- a/arch/x86/include/asm/calling.h
9603 +++ b/arch/x86/include/asm/calling.h
9604 @@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9605 * for assembly code:
9606 */
9607
9608 -#define R15 0
9609 -#define R14 8
9610 -#define R13 16
9611 -#define R12 24
9612 -#define RBP 32
9613 -#define RBX 40
9614 +#define R15 (0)
9615 +#define R14 (8)
9616 +#define R13 (16)
9617 +#define R12 (24)
9618 +#define RBP (32)
9619 +#define RBX (40)
9620
9621 /* arguments: interrupts/non tracing syscalls only save up to here: */
9622 -#define R11 48
9623 -#define R10 56
9624 -#define R9 64
9625 -#define R8 72
9626 -#define RAX 80
9627 -#define RCX 88
9628 -#define RDX 96
9629 -#define RSI 104
9630 -#define RDI 112
9631 -#define ORIG_RAX 120 /* + error_code */
9632 +#define R11 (48)
9633 +#define R10 (56)
9634 +#define R9 (64)
9635 +#define R8 (72)
9636 +#define RAX (80)
9637 +#define RCX (88)
9638 +#define RDX (96)
9639 +#define RSI (104)
9640 +#define RDI (112)
9641 +#define ORIG_RAX (120) /* + error_code */
9642 /* end of arguments */
9643
9644 /* cpu exception frame or undefined in case of fast syscall: */
9645 -#define RIP 128
9646 -#define CS 136
9647 -#define EFLAGS 144
9648 -#define RSP 152
9649 -#define SS 160
9650 +#define RIP (128)
9651 +#define CS (136)
9652 +#define EFLAGS (144)
9653 +#define RSP (152)
9654 +#define SS (160)
9655
9656 #define ARGOFFSET R11
9657 #define SWFRAME ORIG_RAX
9658 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9659 index 46fc474..b02b0f9 100644
9660 --- a/arch/x86/include/asm/checksum_32.h
9661 +++ b/arch/x86/include/asm/checksum_32.h
9662 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9663 int len, __wsum sum,
9664 int *src_err_ptr, int *dst_err_ptr);
9665
9666 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9667 + int len, __wsum sum,
9668 + int *src_err_ptr, int *dst_err_ptr);
9669 +
9670 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9671 + int len, __wsum sum,
9672 + int *src_err_ptr, int *dst_err_ptr);
9673 +
9674 /*
9675 * Note: when you get a NULL pointer exception here this means someone
9676 * passed in an incorrect kernel address to one of these functions.
9677 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9678 int *err_ptr)
9679 {
9680 might_sleep();
9681 - return csum_partial_copy_generic((__force void *)src, dst,
9682 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9683 len, sum, err_ptr, NULL);
9684 }
9685
9686 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9687 {
9688 might_sleep();
9689 if (access_ok(VERIFY_WRITE, dst, len))
9690 - return csum_partial_copy_generic(src, (__force void *)dst,
9691 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9692 len, sum, NULL, err_ptr);
9693
9694 if (len)
9695 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9696 index 617bd56..7b047a1 100644
9697 --- a/arch/x86/include/asm/desc.h
9698 +++ b/arch/x86/include/asm/desc.h
9699 @@ -4,6 +4,7 @@
9700 #include <asm/desc_defs.h>
9701 #include <asm/ldt.h>
9702 #include <asm/mmu.h>
9703 +#include <asm/pgtable.h>
9704 #include <linux/smp.h>
9705
9706 static inline void fill_ldt(struct desc_struct *desc,
9707 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9708 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9709 desc->type = (info->read_exec_only ^ 1) << 1;
9710 desc->type |= info->contents << 2;
9711 + desc->type |= info->seg_not_present ^ 1;
9712 desc->s = 1;
9713 desc->dpl = 0x3;
9714 desc->p = info->seg_not_present ^ 1;
9715 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9716 }
9717
9718 extern struct desc_ptr idt_descr;
9719 -extern gate_desc idt_table[];
9720 -
9721 -struct gdt_page {
9722 - struct desc_struct gdt[GDT_ENTRIES];
9723 -} __attribute__((aligned(PAGE_SIZE)));
9724 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9725 +extern gate_desc idt_table[256];
9726
9727 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9728 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9729 {
9730 - return per_cpu(gdt_page, cpu).gdt;
9731 + return cpu_gdt_table[cpu];
9732 }
9733
9734 #ifdef CONFIG_X86_64
9735 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9736 unsigned long base, unsigned dpl, unsigned flags,
9737 unsigned short seg)
9738 {
9739 - gate->a = (seg << 16) | (base & 0xffff);
9740 - gate->b = (base & 0xffff0000) |
9741 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9742 + gate->gate.offset_low = base;
9743 + gate->gate.seg = seg;
9744 + gate->gate.reserved = 0;
9745 + gate->gate.type = type;
9746 + gate->gate.s = 0;
9747 + gate->gate.dpl = dpl;
9748 + gate->gate.p = 1;
9749 + gate->gate.offset_high = base >> 16;
9750 }
9751
9752 #endif
9753 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9754 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9755 const gate_desc *gate)
9756 {
9757 + pax_open_kernel();
9758 memcpy(&idt[entry], gate, sizeof(*gate));
9759 + pax_close_kernel();
9760 }
9761
9762 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9763 const void *desc)
9764 {
9765 + pax_open_kernel();
9766 memcpy(&ldt[entry], desc, 8);
9767 + pax_close_kernel();
9768 }
9769
9770 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9771 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9772 size = sizeof(struct desc_struct);
9773 break;
9774 }
9775 +
9776 + pax_open_kernel();
9777 memcpy(&gdt[entry], desc, size);
9778 + pax_close_kernel();
9779 }
9780
9781 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9782 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9783
9784 static inline void native_load_tr_desc(void)
9785 {
9786 + pax_open_kernel();
9787 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9788 + pax_close_kernel();
9789 }
9790
9791 static inline void native_load_gdt(const struct desc_ptr *dtr)
9792 @@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9793 unsigned int i;
9794 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9795
9796 + pax_open_kernel();
9797 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9798 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9799 + pax_close_kernel();
9800 }
9801
9802 #define _LDT_empty(info) \
9803 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9804 desc->limit = (limit >> 16) & 0xf;
9805 }
9806
9807 -static inline void _set_gate(int gate, unsigned type, void *addr,
9808 +static inline void _set_gate(int gate, unsigned type, const void *addr,
9809 unsigned dpl, unsigned ist, unsigned seg)
9810 {
9811 gate_desc s;
9812 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9813 * Pentium F0 0F bugfix can have resulted in the mapped
9814 * IDT being write-protected.
9815 */
9816 -static inline void set_intr_gate(unsigned int n, void *addr)
9817 +static inline void set_intr_gate(unsigned int n, const void *addr)
9818 {
9819 BUG_ON((unsigned)n > 0xFF);
9820 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9821 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9822 /*
9823 * This routine sets up an interrupt gate at directory privilege level 3.
9824 */
9825 -static inline void set_system_intr_gate(unsigned int n, void *addr)
9826 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
9827 {
9828 BUG_ON((unsigned)n > 0xFF);
9829 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9830 }
9831
9832 -static inline void set_system_trap_gate(unsigned int n, void *addr)
9833 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
9834 {
9835 BUG_ON((unsigned)n > 0xFF);
9836 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9837 }
9838
9839 -static inline void set_trap_gate(unsigned int n, void *addr)
9840 +static inline void set_trap_gate(unsigned int n, const void *addr)
9841 {
9842 BUG_ON((unsigned)n > 0xFF);
9843 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9844 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9845 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9846 {
9847 BUG_ON((unsigned)n > 0xFF);
9848 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9849 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9850 }
9851
9852 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9853 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9854 {
9855 BUG_ON((unsigned)n > 0xFF);
9856 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9857 }
9858
9859 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9860 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9861 {
9862 BUG_ON((unsigned)n > 0xFF);
9863 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9864 }
9865
9866 +#ifdef CONFIG_X86_32
9867 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9868 +{
9869 + struct desc_struct d;
9870 +
9871 + if (likely(limit))
9872 + limit = (limit - 1UL) >> PAGE_SHIFT;
9873 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
9874 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9875 +}
9876 +#endif
9877 +
9878 #endif /* _ASM_X86_DESC_H */
9879 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9880 index 9d66848..6b4a691 100644
9881 --- a/arch/x86/include/asm/desc_defs.h
9882 +++ b/arch/x86/include/asm/desc_defs.h
9883 @@ -31,6 +31,12 @@ struct desc_struct {
9884 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9885 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9886 };
9887 + struct {
9888 + u16 offset_low;
9889 + u16 seg;
9890 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9891 + unsigned offset_high: 16;
9892 + } gate;
9893 };
9894 } __attribute__((packed));
9895
9896 diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
9897 index cee34e9..a7c3fa2 100644
9898 --- a/arch/x86/include/asm/device.h
9899 +++ b/arch/x86/include/asm/device.h
9900 @@ -6,7 +6,7 @@ struct dev_archdata {
9901 void *acpi_handle;
9902 #endif
9903 #ifdef CONFIG_X86_64
9904 -struct dma_map_ops *dma_ops;
9905 + const struct dma_map_ops *dma_ops;
9906 #endif
9907 #ifdef CONFIG_DMAR
9908 void *iommu; /* hook for IOMMU specific extension */
9909 diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
9910 index 6a25d5d..786b202 100644
9911 --- a/arch/x86/include/asm/dma-mapping.h
9912 +++ b/arch/x86/include/asm/dma-mapping.h
9913 @@ -25,9 +25,9 @@ extern int iommu_merge;
9914 extern struct device x86_dma_fallback_dev;
9915 extern int panic_on_overflow;
9916
9917 -extern struct dma_map_ops *dma_ops;
9918 +extern const struct dma_map_ops *dma_ops;
9919
9920 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9921 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
9922 {
9923 #ifdef CONFIG_X86_32
9924 return dma_ops;
9925 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9926 /* Make sure we keep the same behaviour */
9927 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
9928 {
9929 - struct dma_map_ops *ops = get_dma_ops(dev);
9930 + const struct dma_map_ops *ops = get_dma_ops(dev);
9931 if (ops->mapping_error)
9932 return ops->mapping_error(dev, dma_addr);
9933
9934 @@ -122,7 +122,7 @@ static inline void *
9935 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9936 gfp_t gfp)
9937 {
9938 - struct dma_map_ops *ops = get_dma_ops(dev);
9939 + const struct dma_map_ops *ops = get_dma_ops(dev);
9940 void *memory;
9941
9942 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
9943 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9944 static inline void dma_free_coherent(struct device *dev, size_t size,
9945 void *vaddr, dma_addr_t bus)
9946 {
9947 - struct dma_map_ops *ops = get_dma_ops(dev);
9948 + const struct dma_map_ops *ops = get_dma_ops(dev);
9949
9950 WARN_ON(irqs_disabled()); /* for portability */
9951
9952 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9953 index 40b4e61..40d8133 100644
9954 --- a/arch/x86/include/asm/e820.h
9955 +++ b/arch/x86/include/asm/e820.h
9956 @@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
9957 #define ISA_END_ADDRESS 0x100000
9958 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
9959
9960 -#define BIOS_BEGIN 0x000a0000
9961 +#define BIOS_BEGIN 0x000c0000
9962 #define BIOS_END 0x00100000
9963
9964 #ifdef __KERNEL__
9965 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9966 index 8ac9d9a..0a6c96e 100644
9967 --- a/arch/x86/include/asm/elf.h
9968 +++ b/arch/x86/include/asm/elf.h
9969 @@ -257,7 +257,25 @@ extern int force_personality32;
9970 the loader. We need to make sure that it is out of the way of the program
9971 that it will "exec", and that there is sufficient room for the brk. */
9972
9973 +#ifdef CONFIG_PAX_SEGMEXEC
9974 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9975 +#else
9976 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9977 +#endif
9978 +
9979 +#ifdef CONFIG_PAX_ASLR
9980 +#ifdef CONFIG_X86_32
9981 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9982 +
9983 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9984 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9985 +#else
9986 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
9987 +
9988 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9989 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9990 +#endif
9991 +#endif
9992
9993 /* This yields a mask that user programs can use to figure out what
9994 instruction set this CPU supports. This could be done in user space,
9995 @@ -310,9 +328,7 @@ do { \
9996
9997 #define ARCH_DLINFO \
9998 do { \
9999 - if (vdso_enabled) \
10000 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10001 - (unsigned long)current->mm->context.vdso); \
10002 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10003 } while (0)
10004
10005 #define AT_SYSINFO 32
10006 @@ -323,7 +339,7 @@ do { \
10007
10008 #endif /* !CONFIG_X86_32 */
10009
10010 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10011 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10012
10013 #define VDSO_ENTRY \
10014 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10015 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10016 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10017 #define compat_arch_setup_additional_pages syscall32_setup_pages
10018
10019 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10020 -#define arch_randomize_brk arch_randomize_brk
10021 -
10022 #endif /* _ASM_X86_ELF_H */
10023 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10024 index cc70c1c..d96d011 100644
10025 --- a/arch/x86/include/asm/emergency-restart.h
10026 +++ b/arch/x86/include/asm/emergency-restart.h
10027 @@ -15,6 +15,6 @@ enum reboot_type {
10028
10029 extern enum reboot_type reboot_type;
10030
10031 -extern void machine_emergency_restart(void);
10032 +extern void machine_emergency_restart(void) __noreturn;
10033
10034 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10035 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10036 index 1f11ce4..7caabd1 100644
10037 --- a/arch/x86/include/asm/futex.h
10038 +++ b/arch/x86/include/asm/futex.h
10039 @@ -12,16 +12,18 @@
10040 #include <asm/system.h>
10041
10042 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10043 + typecheck(u32 __user *, uaddr); \
10044 asm volatile("1:\t" insn "\n" \
10045 "2:\t.section .fixup,\"ax\"\n" \
10046 "3:\tmov\t%3, %1\n" \
10047 "\tjmp\t2b\n" \
10048 "\t.previous\n" \
10049 _ASM_EXTABLE(1b, 3b) \
10050 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10051 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10052 : "i" (-EFAULT), "0" (oparg), "1" (0))
10053
10054 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10055 + typecheck(u32 __user *, uaddr); \
10056 asm volatile("1:\tmovl %2, %0\n" \
10057 "\tmovl\t%0, %3\n" \
10058 "\t" insn "\n" \
10059 @@ -34,10 +36,10 @@
10060 _ASM_EXTABLE(1b, 4b) \
10061 _ASM_EXTABLE(2b, 4b) \
10062 : "=&a" (oldval), "=&r" (ret), \
10063 - "+m" (*uaddr), "=&r" (tem) \
10064 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10065 : "r" (oparg), "i" (-EFAULT), "1" (0))
10066
10067 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10068 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10069 {
10070 int op = (encoded_op >> 28) & 7;
10071 int cmp = (encoded_op >> 24) & 15;
10072 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10073
10074 switch (op) {
10075 case FUTEX_OP_SET:
10076 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10077 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10078 break;
10079 case FUTEX_OP_ADD:
10080 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10081 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10082 uaddr, oparg);
10083 break;
10084 case FUTEX_OP_OR:
10085 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10086 return ret;
10087 }
10088
10089 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10090 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10091 int newval)
10092 {
10093
10094 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10095 return -ENOSYS;
10096 #endif
10097
10098 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10099 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10100 return -EFAULT;
10101
10102 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10103 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10104 "2:\t.section .fixup, \"ax\"\n"
10105 "3:\tmov %2, %0\n"
10106 "\tjmp 2b\n"
10107 "\t.previous\n"
10108 _ASM_EXTABLE(1b, 3b)
10109 - : "=a" (oldval), "+m" (*uaddr)
10110 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10111 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10112 : "memory"
10113 );
10114 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10115 index ba180d9..3bad351 100644
10116 --- a/arch/x86/include/asm/hw_irq.h
10117 +++ b/arch/x86/include/asm/hw_irq.h
10118 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10119 extern void enable_IO_APIC(void);
10120
10121 /* Statistics */
10122 -extern atomic_t irq_err_count;
10123 -extern atomic_t irq_mis_count;
10124 +extern atomic_unchecked_t irq_err_count;
10125 +extern atomic_unchecked_t irq_mis_count;
10126
10127 /* EISA */
10128 extern void eisa_set_level_irq(unsigned int irq);
10129 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10130 index 0b20bbb..4cb1396 100644
10131 --- a/arch/x86/include/asm/i387.h
10132 +++ b/arch/x86/include/asm/i387.h
10133 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10134 {
10135 int err;
10136
10137 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10138 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10139 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10140 +#endif
10141 +
10142 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10143 "2:\n"
10144 ".section .fixup,\"ax\"\n"
10145 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10146 {
10147 int err;
10148
10149 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10150 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10151 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10152 +#endif
10153 +
10154 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10155 "2:\n"
10156 ".section .fixup,\"ax\"\n"
10157 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10158 }
10159
10160 /* We need a safe address that is cheap to find and that is already
10161 - in L1 during context switch. The best choices are unfortunately
10162 - different for UP and SMP */
10163 -#ifdef CONFIG_SMP
10164 -#define safe_address (__per_cpu_offset[0])
10165 -#else
10166 -#define safe_address (kstat_cpu(0).cpustat.user)
10167 -#endif
10168 + in L1 during context switch. */
10169 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10170
10171 /*
10172 * These must be called with preempt disabled
10173 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10174 struct thread_info *me = current_thread_info();
10175 preempt_disable();
10176 if (me->status & TS_USEDFPU)
10177 - __save_init_fpu(me->task);
10178 + __save_init_fpu(current);
10179 else
10180 clts();
10181 }
10182 diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10183 index a299900..15c5410 100644
10184 --- a/arch/x86/include/asm/io_32.h
10185 +++ b/arch/x86/include/asm/io_32.h
10186 @@ -3,6 +3,7 @@
10187
10188 #include <linux/string.h>
10189 #include <linux/compiler.h>
10190 +#include <asm/processor.h>
10191
10192 /*
10193 * This file contains the definitions for the x86 IO instructions
10194 @@ -42,6 +43,17 @@
10195
10196 #ifdef __KERNEL__
10197
10198 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10199 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10200 +{
10201 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10202 +}
10203 +
10204 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10205 +{
10206 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10207 +}
10208 +
10209 #include <asm-generic/iomap.h>
10210
10211 #include <linux/vmalloc.h>
10212 diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10213 index 2440678..c158b88 100644
10214 --- a/arch/x86/include/asm/io_64.h
10215 +++ b/arch/x86/include/asm/io_64.h
10216 @@ -140,6 +140,17 @@ __OUTS(l)
10217
10218 #include <linux/vmalloc.h>
10219
10220 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10221 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10222 +{
10223 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10224 +}
10225 +
10226 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10227 +{
10228 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10229 +}
10230 +
10231 #include <asm-generic/iomap.h>
10232
10233 void __memcpy_fromio(void *, unsigned long, unsigned);
10234 diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10235 index fd6d21b..8b13915 100644
10236 --- a/arch/x86/include/asm/iommu.h
10237 +++ b/arch/x86/include/asm/iommu.h
10238 @@ -3,7 +3,7 @@
10239
10240 extern void pci_iommu_shutdown(void);
10241 extern void no_iommu_init(void);
10242 -extern struct dma_map_ops nommu_dma_ops;
10243 +extern const struct dma_map_ops nommu_dma_ops;
10244 extern int force_iommu, no_iommu;
10245 extern int iommu_detected;
10246 extern int iommu_pass_through;
10247 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10248 index 9e2b952..557206e 100644
10249 --- a/arch/x86/include/asm/irqflags.h
10250 +++ b/arch/x86/include/asm/irqflags.h
10251 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10252 sti; \
10253 sysexit
10254
10255 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10256 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10257 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10258 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10259 +
10260 #else
10261 #define INTERRUPT_RETURN iret
10262 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10263 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10264 index 4fe681d..bb6d40c 100644
10265 --- a/arch/x86/include/asm/kprobes.h
10266 +++ b/arch/x86/include/asm/kprobes.h
10267 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10268 #define BREAKPOINT_INSTRUCTION 0xcc
10269 #define RELATIVEJUMP_INSTRUCTION 0xe9
10270 #define MAX_INSN_SIZE 16
10271 -#define MAX_STACK_SIZE 64
10272 -#define MIN_STACK_SIZE(ADDR) \
10273 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10274 - THREAD_SIZE - (unsigned long)(ADDR))) \
10275 - ? (MAX_STACK_SIZE) \
10276 - : (((unsigned long)current_thread_info()) + \
10277 - THREAD_SIZE - (unsigned long)(ADDR)))
10278 +#define MAX_STACK_SIZE 64UL
10279 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10280
10281 #define flush_insn_slot(p) do { } while (0)
10282
10283 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10284 index 08bc2ff..2e88d1f 100644
10285 --- a/arch/x86/include/asm/kvm_host.h
10286 +++ b/arch/x86/include/asm/kvm_host.h
10287 @@ -534,9 +534,9 @@ struct kvm_x86_ops {
10288 bool (*gb_page_enable)(void);
10289
10290 const struct trace_print_flags *exit_reasons_str;
10291 -};
10292 +} __do_const;
10293
10294 -extern struct kvm_x86_ops *kvm_x86_ops;
10295 +extern const struct kvm_x86_ops *kvm_x86_ops;
10296
10297 int kvm_mmu_module_init(void);
10298 void kvm_mmu_module_exit(void);
10299 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10300 index 47b9b6f..815aaa1 100644
10301 --- a/arch/x86/include/asm/local.h
10302 +++ b/arch/x86/include/asm/local.h
10303 @@ -18,26 +18,58 @@ typedef struct {
10304
10305 static inline void local_inc(local_t *l)
10306 {
10307 - asm volatile(_ASM_INC "%0"
10308 + asm volatile(_ASM_INC "%0\n"
10309 +
10310 +#ifdef CONFIG_PAX_REFCOUNT
10311 + "jno 0f\n"
10312 + _ASM_DEC "%0\n"
10313 + "int $4\n0:\n"
10314 + _ASM_EXTABLE(0b, 0b)
10315 +#endif
10316 +
10317 : "+m" (l->a.counter));
10318 }
10319
10320 static inline void local_dec(local_t *l)
10321 {
10322 - asm volatile(_ASM_DEC "%0"
10323 + asm volatile(_ASM_DEC "%0\n"
10324 +
10325 +#ifdef CONFIG_PAX_REFCOUNT
10326 + "jno 0f\n"
10327 + _ASM_INC "%0\n"
10328 + "int $4\n0:\n"
10329 + _ASM_EXTABLE(0b, 0b)
10330 +#endif
10331 +
10332 : "+m" (l->a.counter));
10333 }
10334
10335 static inline void local_add(long i, local_t *l)
10336 {
10337 - asm volatile(_ASM_ADD "%1,%0"
10338 + asm volatile(_ASM_ADD "%1,%0\n"
10339 +
10340 +#ifdef CONFIG_PAX_REFCOUNT
10341 + "jno 0f\n"
10342 + _ASM_SUB "%1,%0\n"
10343 + "int $4\n0:\n"
10344 + _ASM_EXTABLE(0b, 0b)
10345 +#endif
10346 +
10347 : "+m" (l->a.counter)
10348 : "ir" (i));
10349 }
10350
10351 static inline void local_sub(long i, local_t *l)
10352 {
10353 - asm volatile(_ASM_SUB "%1,%0"
10354 + asm volatile(_ASM_SUB "%1,%0\n"
10355 +
10356 +#ifdef CONFIG_PAX_REFCOUNT
10357 + "jno 0f\n"
10358 + _ASM_ADD "%1,%0\n"
10359 + "int $4\n0:\n"
10360 + _ASM_EXTABLE(0b, 0b)
10361 +#endif
10362 +
10363 : "+m" (l->a.counter)
10364 : "ir" (i));
10365 }
10366 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10367 {
10368 unsigned char c;
10369
10370 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10371 + asm volatile(_ASM_SUB "%2,%0\n"
10372 +
10373 +#ifdef CONFIG_PAX_REFCOUNT
10374 + "jno 0f\n"
10375 + _ASM_ADD "%2,%0\n"
10376 + "int $4\n0:\n"
10377 + _ASM_EXTABLE(0b, 0b)
10378 +#endif
10379 +
10380 + "sete %1\n"
10381 : "+m" (l->a.counter), "=qm" (c)
10382 : "ir" (i) : "memory");
10383 return c;
10384 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10385 {
10386 unsigned char c;
10387
10388 - asm volatile(_ASM_DEC "%0; sete %1"
10389 + asm volatile(_ASM_DEC "%0\n"
10390 +
10391 +#ifdef CONFIG_PAX_REFCOUNT
10392 + "jno 0f\n"
10393 + _ASM_INC "%0\n"
10394 + "int $4\n0:\n"
10395 + _ASM_EXTABLE(0b, 0b)
10396 +#endif
10397 +
10398 + "sete %1\n"
10399 : "+m" (l->a.counter), "=qm" (c)
10400 : : "memory");
10401 return c != 0;
10402 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10403 {
10404 unsigned char c;
10405
10406 - asm volatile(_ASM_INC "%0; sete %1"
10407 + asm volatile(_ASM_INC "%0\n"
10408 +
10409 +#ifdef CONFIG_PAX_REFCOUNT
10410 + "jno 0f\n"
10411 + _ASM_DEC "%0\n"
10412 + "int $4\n0:\n"
10413 + _ASM_EXTABLE(0b, 0b)
10414 +#endif
10415 +
10416 + "sete %1\n"
10417 : "+m" (l->a.counter), "=qm" (c)
10418 : : "memory");
10419 return c != 0;
10420 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10421 {
10422 unsigned char c;
10423
10424 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10425 + asm volatile(_ASM_ADD "%2,%0\n"
10426 +
10427 +#ifdef CONFIG_PAX_REFCOUNT
10428 + "jno 0f\n"
10429 + _ASM_SUB "%2,%0\n"
10430 + "int $4\n0:\n"
10431 + _ASM_EXTABLE(0b, 0b)
10432 +#endif
10433 +
10434 + "sets %1\n"
10435 : "+m" (l->a.counter), "=qm" (c)
10436 : "ir" (i) : "memory");
10437 return c;
10438 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10439 #endif
10440 /* Modern 486+ processor */
10441 __i = i;
10442 - asm volatile(_ASM_XADD "%0, %1;"
10443 + asm volatile(_ASM_XADD "%0, %1\n"
10444 +
10445 +#ifdef CONFIG_PAX_REFCOUNT
10446 + "jno 0f\n"
10447 + _ASM_MOV "%0,%1\n"
10448 + "int $4\n0:\n"
10449 + _ASM_EXTABLE(0b, 0b)
10450 +#endif
10451 +
10452 : "+r" (i), "+m" (l->a.counter)
10453 : : "memory");
10454 return i + __i;
10455 diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10456 index ef51b50..514ba37 100644
10457 --- a/arch/x86/include/asm/microcode.h
10458 +++ b/arch/x86/include/asm/microcode.h
10459 @@ -12,13 +12,13 @@ struct device;
10460 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10461
10462 struct microcode_ops {
10463 - enum ucode_state (*request_microcode_user) (int cpu,
10464 + enum ucode_state (* const request_microcode_user) (int cpu,
10465 const void __user *buf, size_t size);
10466
10467 - enum ucode_state (*request_microcode_fw) (int cpu,
10468 + enum ucode_state (* const request_microcode_fw) (int cpu,
10469 struct device *device);
10470
10471 - void (*microcode_fini_cpu) (int cpu);
10472 + void (* const microcode_fini_cpu) (int cpu);
10473
10474 /*
10475 * The generic 'microcode_core' part guarantees that
10476 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
10477 extern struct ucode_cpu_info ucode_cpu_info[];
10478
10479 #ifdef CONFIG_MICROCODE_INTEL
10480 -extern struct microcode_ops * __init init_intel_microcode(void);
10481 +extern const struct microcode_ops * __init init_intel_microcode(void);
10482 #else
10483 -static inline struct microcode_ops * __init init_intel_microcode(void)
10484 +static inline const struct microcode_ops * __init init_intel_microcode(void)
10485 {
10486 return NULL;
10487 }
10488 #endif /* CONFIG_MICROCODE_INTEL */
10489
10490 #ifdef CONFIG_MICROCODE_AMD
10491 -extern struct microcode_ops * __init init_amd_microcode(void);
10492 +extern const struct microcode_ops * __init init_amd_microcode(void);
10493 #else
10494 -static inline struct microcode_ops * __init init_amd_microcode(void)
10495 +static inline const struct microcode_ops * __init init_amd_microcode(void)
10496 {
10497 return NULL;
10498 }
10499 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10500 index 593e51d..fa69c9a 100644
10501 --- a/arch/x86/include/asm/mman.h
10502 +++ b/arch/x86/include/asm/mman.h
10503 @@ -5,4 +5,14 @@
10504
10505 #include <asm-generic/mman.h>
10506
10507 +#ifdef __KERNEL__
10508 +#ifndef __ASSEMBLY__
10509 +#ifdef CONFIG_X86_32
10510 +#define arch_mmap_check i386_mmap_check
10511 +int i386_mmap_check(unsigned long addr, unsigned long len,
10512 + unsigned long flags);
10513 +#endif
10514 +#endif
10515 +#endif
10516 +
10517 #endif /* _ASM_X86_MMAN_H */
10518 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10519 index 80a1dee..239c67d 100644
10520 --- a/arch/x86/include/asm/mmu.h
10521 +++ b/arch/x86/include/asm/mmu.h
10522 @@ -9,10 +9,23 @@
10523 * we put the segment information here.
10524 */
10525 typedef struct {
10526 - void *ldt;
10527 + struct desc_struct *ldt;
10528 int size;
10529 struct mutex lock;
10530 - void *vdso;
10531 + unsigned long vdso;
10532 +
10533 +#ifdef CONFIG_X86_32
10534 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10535 + unsigned long user_cs_base;
10536 + unsigned long user_cs_limit;
10537 +
10538 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10539 + cpumask_t cpu_user_cs_mask;
10540 +#endif
10541 +
10542 +#endif
10543 +#endif
10544 +
10545 } mm_context_t;
10546
10547 #ifdef CONFIG_SMP
10548 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10549 index 8b5393e..8143173 100644
10550 --- a/arch/x86/include/asm/mmu_context.h
10551 +++ b/arch/x86/include/asm/mmu_context.h
10552 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10553
10554 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10555 {
10556 +
10557 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10558 + unsigned int i;
10559 + pgd_t *pgd;
10560 +
10561 + pax_open_kernel();
10562 + pgd = get_cpu_pgd(smp_processor_id());
10563 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10564 + set_pgd_batched(pgd+i, native_make_pgd(0));
10565 + pax_close_kernel();
10566 +#endif
10567 +
10568 #ifdef CONFIG_SMP
10569 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10570 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10571 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10572 struct task_struct *tsk)
10573 {
10574 unsigned cpu = smp_processor_id();
10575 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10576 + int tlbstate = TLBSTATE_OK;
10577 +#endif
10578
10579 if (likely(prev != next)) {
10580 #ifdef CONFIG_SMP
10581 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10582 + tlbstate = percpu_read(cpu_tlbstate.state);
10583 +#endif
10584 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10585 percpu_write(cpu_tlbstate.active_mm, next);
10586 #endif
10587 cpumask_set_cpu(cpu, mm_cpumask(next));
10588
10589 /* Re-load page tables */
10590 +#ifdef CONFIG_PAX_PER_CPU_PGD
10591 + pax_open_kernel();
10592 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10593 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10594 + pax_close_kernel();
10595 + load_cr3(get_cpu_pgd(cpu));
10596 +#else
10597 load_cr3(next->pgd);
10598 +#endif
10599
10600 /* stop flush ipis for the previous mm */
10601 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10602 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10603 */
10604 if (unlikely(prev->context.ldt != next->context.ldt))
10605 load_LDT_nolock(&next->context);
10606 - }
10607 +
10608 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10609 + if (!nx_enabled) {
10610 + smp_mb__before_clear_bit();
10611 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10612 + smp_mb__after_clear_bit();
10613 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10614 + }
10615 +#endif
10616 +
10617 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10618 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10619 + prev->context.user_cs_limit != next->context.user_cs_limit))
10620 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10621 #ifdef CONFIG_SMP
10622 + else if (unlikely(tlbstate != TLBSTATE_OK))
10623 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10624 +#endif
10625 +#endif
10626 +
10627 + }
10628 else {
10629 +
10630 +#ifdef CONFIG_PAX_PER_CPU_PGD
10631 + pax_open_kernel();
10632 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10633 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10634 + pax_close_kernel();
10635 + load_cr3(get_cpu_pgd(cpu));
10636 +#endif
10637 +
10638 +#ifdef CONFIG_SMP
10639 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10640 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10641
10642 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10643 * tlb flush IPI delivery. We must reload CR3
10644 * to make sure to use no freed page tables.
10645 */
10646 +
10647 +#ifndef CONFIG_PAX_PER_CPU_PGD
10648 load_cr3(next->pgd);
10649 +#endif
10650 +
10651 load_LDT_nolock(&next->context);
10652 +
10653 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10654 + if (!nx_enabled)
10655 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10656 +#endif
10657 +
10658 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10659 +#ifdef CONFIG_PAX_PAGEEXEC
10660 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10661 +#endif
10662 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10663 +#endif
10664 +
10665 }
10666 +#endif
10667 }
10668 -#endif
10669 }
10670
10671 #define activate_mm(prev, next) \
10672 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10673 index 3e2ce58..caaf478 100644
10674 --- a/arch/x86/include/asm/module.h
10675 +++ b/arch/x86/include/asm/module.h
10676 @@ -5,6 +5,7 @@
10677
10678 #ifdef CONFIG_X86_64
10679 /* X86_64 does not define MODULE_PROC_FAMILY */
10680 +#define MODULE_PROC_FAMILY ""
10681 #elif defined CONFIG_M386
10682 #define MODULE_PROC_FAMILY "386 "
10683 #elif defined CONFIG_M486
10684 @@ -59,13 +60,26 @@
10685 #error unknown processor family
10686 #endif
10687
10688 -#ifdef CONFIG_X86_32
10689 -# ifdef CONFIG_4KSTACKS
10690 -# define MODULE_STACKSIZE "4KSTACKS "
10691 -# else
10692 -# define MODULE_STACKSIZE ""
10693 -# endif
10694 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10695 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10696 +#define MODULE_STACKSIZE "4KSTACKS "
10697 +#else
10698 +#define MODULE_STACKSIZE ""
10699 #endif
10700
10701 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10702 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10703 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10704 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10705 +#else
10706 +#define MODULE_PAX_KERNEXEC ""
10707 +#endif
10708 +
10709 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10710 +#define MODULE_PAX_UDEREF "UDEREF "
10711 +#else
10712 +#define MODULE_PAX_UDEREF ""
10713 +#endif
10714 +
10715 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10716 +
10717 #endif /* _ASM_X86_MODULE_H */
10718 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10719 index 7639dbf..e08a58c 100644
10720 --- a/arch/x86/include/asm/page_64_types.h
10721 +++ b/arch/x86/include/asm/page_64_types.h
10722 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10723
10724 /* duplicated to the one in bootmem.h */
10725 extern unsigned long max_pfn;
10726 -extern unsigned long phys_base;
10727 +extern const unsigned long phys_base;
10728
10729 extern unsigned long __phys_addr(unsigned long);
10730 #define __phys_reloc_hide(x) (x)
10731 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10732 index efb3899..ef30687 100644
10733 --- a/arch/x86/include/asm/paravirt.h
10734 +++ b/arch/x86/include/asm/paravirt.h
10735 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10736 val);
10737 }
10738
10739 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10740 +{
10741 + pgdval_t val = native_pgd_val(pgd);
10742 +
10743 + if (sizeof(pgdval_t) > sizeof(long))
10744 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10745 + val, (u64)val >> 32);
10746 + else
10747 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10748 + val);
10749 +}
10750 +
10751 static inline void pgd_clear(pgd_t *pgdp)
10752 {
10753 set_pgd(pgdp, __pgd(0));
10754 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10755 pv_mmu_ops.set_fixmap(idx, phys, flags);
10756 }
10757
10758 +#ifdef CONFIG_PAX_KERNEXEC
10759 +static inline unsigned long pax_open_kernel(void)
10760 +{
10761 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10762 +}
10763 +
10764 +static inline unsigned long pax_close_kernel(void)
10765 +{
10766 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10767 +}
10768 +#else
10769 +static inline unsigned long pax_open_kernel(void) { return 0; }
10770 +static inline unsigned long pax_close_kernel(void) { return 0; }
10771 +#endif
10772 +
10773 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10774
10775 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10776 @@ -945,7 +972,7 @@ extern void default_banner(void);
10777
10778 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10779 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10780 -#define PARA_INDIRECT(addr) *%cs:addr
10781 +#define PARA_INDIRECT(addr) *%ss:addr
10782 #endif
10783
10784 #define INTERRUPT_RETURN \
10785 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
10786 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10787 CLBR_NONE, \
10788 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10789 +
10790 +#define GET_CR0_INTO_RDI \
10791 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10792 + mov %rax,%rdi
10793 +
10794 +#define SET_RDI_INTO_CR0 \
10795 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10796 +
10797 +#define GET_CR3_INTO_RDI \
10798 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10799 + mov %rax,%rdi
10800 +
10801 +#define SET_RDI_INTO_CR3 \
10802 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10803 +
10804 #endif /* CONFIG_X86_32 */
10805
10806 #endif /* __ASSEMBLY__ */
10807 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10808 index 9357473..aeb2de5 100644
10809 --- a/arch/x86/include/asm/paravirt_types.h
10810 +++ b/arch/x86/include/asm/paravirt_types.h
10811 @@ -78,19 +78,19 @@ struct pv_init_ops {
10812 */
10813 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10814 unsigned long addr, unsigned len);
10815 -};
10816 +} __no_const;
10817
10818
10819 struct pv_lazy_ops {
10820 /* Set deferred update mode, used for batching operations. */
10821 void (*enter)(void);
10822 void (*leave)(void);
10823 -};
10824 +} __no_const;
10825
10826 struct pv_time_ops {
10827 unsigned long long (*sched_clock)(void);
10828 unsigned long (*get_tsc_khz)(void);
10829 -};
10830 +} __no_const;
10831
10832 struct pv_cpu_ops {
10833 /* hooks for various privileged instructions */
10834 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
10835
10836 void (*start_context_switch)(struct task_struct *prev);
10837 void (*end_context_switch)(struct task_struct *next);
10838 -};
10839 +} __no_const;
10840
10841 struct pv_irq_ops {
10842 /*
10843 @@ -217,7 +217,7 @@ struct pv_apic_ops {
10844 unsigned long start_eip,
10845 unsigned long start_esp);
10846 #endif
10847 -};
10848 +} __no_const;
10849
10850 struct pv_mmu_ops {
10851 unsigned long (*read_cr2)(void);
10852 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
10853 struct paravirt_callee_save make_pud;
10854
10855 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10856 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10857 #endif /* PAGETABLE_LEVELS == 4 */
10858 #endif /* PAGETABLE_LEVELS >= 3 */
10859
10860 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
10861 an mfn. We can tell which is which from the index. */
10862 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10863 phys_addr_t phys, pgprot_t flags);
10864 +
10865 +#ifdef CONFIG_PAX_KERNEXEC
10866 + unsigned long (*pax_open_kernel)(void);
10867 + unsigned long (*pax_close_kernel)(void);
10868 +#endif
10869 +
10870 };
10871
10872 struct raw_spinlock;
10873 @@ -326,7 +333,7 @@ struct pv_lock_ops {
10874 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
10875 int (*spin_trylock)(struct raw_spinlock *lock);
10876 void (*spin_unlock)(struct raw_spinlock *lock);
10877 -};
10878 +} __no_const;
10879
10880 /* This contains all the paravirt structures: we get a convenient
10881 * number for each function using the offset which we use to indicate
10882 diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
10883 index b399988..3f47c38 100644
10884 --- a/arch/x86/include/asm/pci_x86.h
10885 +++ b/arch/x86/include/asm/pci_x86.h
10886 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
10887 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
10888
10889 struct pci_raw_ops {
10890 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10891 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10892 int reg, int len, u32 *val);
10893 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10894 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10895 int reg, int len, u32 val);
10896 };
10897
10898 -extern struct pci_raw_ops *raw_pci_ops;
10899 -extern struct pci_raw_ops *raw_pci_ext_ops;
10900 +extern const struct pci_raw_ops *raw_pci_ops;
10901 +extern const struct pci_raw_ops *raw_pci_ext_ops;
10902
10903 -extern struct pci_raw_ops pci_direct_conf1;
10904 +extern const struct pci_raw_ops pci_direct_conf1;
10905 extern bool port_cf9_safe;
10906
10907 /* arch_initcall level */
10908 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
10909 index b65a36d..50345a4 100644
10910 --- a/arch/x86/include/asm/percpu.h
10911 +++ b/arch/x86/include/asm/percpu.h
10912 @@ -78,6 +78,7 @@ do { \
10913 if (0) { \
10914 T__ tmp__; \
10915 tmp__ = (val); \
10916 + (void)tmp__; \
10917 } \
10918 switch (sizeof(var)) { \
10919 case 1: \
10920 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10921 index 271de94..ef944d6 100644
10922 --- a/arch/x86/include/asm/pgalloc.h
10923 +++ b/arch/x86/include/asm/pgalloc.h
10924 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10925 pmd_t *pmd, pte_t *pte)
10926 {
10927 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10928 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10929 +}
10930 +
10931 +static inline void pmd_populate_user(struct mm_struct *mm,
10932 + pmd_t *pmd, pte_t *pte)
10933 +{
10934 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10935 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10936 }
10937
10938 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10939 index 2334982..70bc412 100644
10940 --- a/arch/x86/include/asm/pgtable-2level.h
10941 +++ b/arch/x86/include/asm/pgtable-2level.h
10942 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10943
10944 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10945 {
10946 + pax_open_kernel();
10947 *pmdp = pmd;
10948 + pax_close_kernel();
10949 }
10950
10951 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10952 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10953 index 33927d2..ccde329 100644
10954 --- a/arch/x86/include/asm/pgtable-3level.h
10955 +++ b/arch/x86/include/asm/pgtable-3level.h
10956 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10957
10958 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10959 {
10960 + pax_open_kernel();
10961 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10962 + pax_close_kernel();
10963 }
10964
10965 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10966 {
10967 + pax_open_kernel();
10968 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10969 + pax_close_kernel();
10970 }
10971
10972 /*
10973 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10974 index af6fd36..867ff74 100644
10975 --- a/arch/x86/include/asm/pgtable.h
10976 +++ b/arch/x86/include/asm/pgtable.h
10977 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
10978
10979 #ifndef __PAGETABLE_PUD_FOLDED
10980 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10981 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10982 #define pgd_clear(pgd) native_pgd_clear(pgd)
10983 #endif
10984
10985 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
10986
10987 #define arch_end_context_switch(prev) do {} while(0)
10988
10989 +#define pax_open_kernel() native_pax_open_kernel()
10990 +#define pax_close_kernel() native_pax_close_kernel()
10991 #endif /* CONFIG_PARAVIRT */
10992
10993 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
10994 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10995 +
10996 +#ifdef CONFIG_PAX_KERNEXEC
10997 +static inline unsigned long native_pax_open_kernel(void)
10998 +{
10999 + unsigned long cr0;
11000 +
11001 + preempt_disable();
11002 + barrier();
11003 + cr0 = read_cr0() ^ X86_CR0_WP;
11004 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11005 + write_cr0(cr0);
11006 + return cr0 ^ X86_CR0_WP;
11007 +}
11008 +
11009 +static inline unsigned long native_pax_close_kernel(void)
11010 +{
11011 + unsigned long cr0;
11012 +
11013 + cr0 = read_cr0() ^ X86_CR0_WP;
11014 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11015 + write_cr0(cr0);
11016 + barrier();
11017 + preempt_enable_no_resched();
11018 + return cr0 ^ X86_CR0_WP;
11019 +}
11020 +#else
11021 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11022 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11023 +#endif
11024 +
11025 /*
11026 * The following only work if pte_present() is true.
11027 * Undefined behaviour if not..
11028 */
11029 +static inline int pte_user(pte_t pte)
11030 +{
11031 + return pte_val(pte) & _PAGE_USER;
11032 +}
11033 +
11034 static inline int pte_dirty(pte_t pte)
11035 {
11036 return pte_flags(pte) & _PAGE_DIRTY;
11037 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11038 return pte_clear_flags(pte, _PAGE_RW);
11039 }
11040
11041 +static inline pte_t pte_mkread(pte_t pte)
11042 +{
11043 + return __pte(pte_val(pte) | _PAGE_USER);
11044 +}
11045 +
11046 static inline pte_t pte_mkexec(pte_t pte)
11047 {
11048 - return pte_clear_flags(pte, _PAGE_NX);
11049 +#ifdef CONFIG_X86_PAE
11050 + if (__supported_pte_mask & _PAGE_NX)
11051 + return pte_clear_flags(pte, _PAGE_NX);
11052 + else
11053 +#endif
11054 + return pte_set_flags(pte, _PAGE_USER);
11055 +}
11056 +
11057 +static inline pte_t pte_exprotect(pte_t pte)
11058 +{
11059 +#ifdef CONFIG_X86_PAE
11060 + if (__supported_pte_mask & _PAGE_NX)
11061 + return pte_set_flags(pte, _PAGE_NX);
11062 + else
11063 +#endif
11064 + return pte_clear_flags(pte, _PAGE_USER);
11065 }
11066
11067 static inline pte_t pte_mkdirty(pte_t pte)
11068 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11069 #endif
11070
11071 #ifndef __ASSEMBLY__
11072 +
11073 +#ifdef CONFIG_PAX_PER_CPU_PGD
11074 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11075 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11076 +{
11077 + return cpu_pgd[cpu];
11078 +}
11079 +#endif
11080 +
11081 #include <linux/mm_types.h>
11082
11083 static inline int pte_none(pte_t pte)
11084 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11085
11086 static inline int pgd_bad(pgd_t pgd)
11087 {
11088 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11089 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11090 }
11091
11092 static inline int pgd_none(pgd_t pgd)
11093 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11094 * pgd_offset() returns a (pgd_t *)
11095 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11096 */
11097 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11098 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11099 +
11100 +#ifdef CONFIG_PAX_PER_CPU_PGD
11101 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11102 +#endif
11103 +
11104 /*
11105 * a shortcut which implies the use of the kernel's pgd, instead
11106 * of a process's
11107 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11108 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11109 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11110
11111 +#ifdef CONFIG_X86_32
11112 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11113 +#else
11114 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11115 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11116 +
11117 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11118 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11119 +#else
11120 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11121 +#endif
11122 +
11123 +#endif
11124 +
11125 #ifndef __ASSEMBLY__
11126
11127 extern int direct_gbpages;
11128 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11129 * dst and src can be on the same page, but the range must not overlap,
11130 * and must not cross a page boundary.
11131 */
11132 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11133 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11134 {
11135 - memcpy(dst, src, count * sizeof(pgd_t));
11136 + pax_open_kernel();
11137 + while (count--)
11138 + *dst++ = *src++;
11139 + pax_close_kernel();
11140 }
11141
11142 +#ifdef CONFIG_PAX_PER_CPU_PGD
11143 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11144 +#endif
11145 +
11146 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11147 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11148 +#else
11149 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11150 +#endif
11151
11152 #include <asm-generic/pgtable.h>
11153 #endif /* __ASSEMBLY__ */
11154 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11155 index 750f1bf..971e839 100644
11156 --- a/arch/x86/include/asm/pgtable_32.h
11157 +++ b/arch/x86/include/asm/pgtable_32.h
11158 @@ -26,9 +26,6 @@
11159 struct mm_struct;
11160 struct vm_area_struct;
11161
11162 -extern pgd_t swapper_pg_dir[1024];
11163 -extern pgd_t trampoline_pg_dir[1024];
11164 -
11165 static inline void pgtable_cache_init(void) { }
11166 static inline void check_pgt_cache(void) { }
11167 void paging_init(void);
11168 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11169 # include <asm/pgtable-2level.h>
11170 #endif
11171
11172 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11173 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11174 +#ifdef CONFIG_X86_PAE
11175 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11176 +#endif
11177 +
11178 #if defined(CONFIG_HIGHPTE)
11179 #define __KM_PTE \
11180 (in_nmi() ? KM_NMI_PTE : \
11181 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11182 /* Clear a kernel PTE and flush it from the TLB */
11183 #define kpte_clear_flush(ptep, vaddr) \
11184 do { \
11185 + pax_open_kernel(); \
11186 pte_clear(&init_mm, (vaddr), (ptep)); \
11187 + pax_close_kernel(); \
11188 __flush_tlb_one((vaddr)); \
11189 } while (0)
11190
11191 @@ -85,6 +90,9 @@ do { \
11192
11193 #endif /* !__ASSEMBLY__ */
11194
11195 +#define HAVE_ARCH_UNMAPPED_AREA
11196 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11197 +
11198 /*
11199 * kern_addr_valid() is (1) for FLATMEM and (0) for
11200 * SPARSEMEM and DISCONTIGMEM
11201 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11202 index 5e67c15..12d5c47 100644
11203 --- a/arch/x86/include/asm/pgtable_32_types.h
11204 +++ b/arch/x86/include/asm/pgtable_32_types.h
11205 @@ -8,7 +8,7 @@
11206 */
11207 #ifdef CONFIG_X86_PAE
11208 # include <asm/pgtable-3level_types.h>
11209 -# define PMD_SIZE (1UL << PMD_SHIFT)
11210 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11211 # define PMD_MASK (~(PMD_SIZE - 1))
11212 #else
11213 # include <asm/pgtable-2level_types.h>
11214 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11215 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11216 #endif
11217
11218 +#ifdef CONFIG_PAX_KERNEXEC
11219 +#ifndef __ASSEMBLY__
11220 +extern unsigned char MODULES_EXEC_VADDR[];
11221 +extern unsigned char MODULES_EXEC_END[];
11222 +#endif
11223 +#include <asm/boot.h>
11224 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11225 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11226 +#else
11227 +#define ktla_ktva(addr) (addr)
11228 +#define ktva_ktla(addr) (addr)
11229 +#endif
11230 +
11231 #define MODULES_VADDR VMALLOC_START
11232 #define MODULES_END VMALLOC_END
11233 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11234 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11235 index c57a301..6b414ff 100644
11236 --- a/arch/x86/include/asm/pgtable_64.h
11237 +++ b/arch/x86/include/asm/pgtable_64.h
11238 @@ -16,10 +16,14 @@
11239
11240 extern pud_t level3_kernel_pgt[512];
11241 extern pud_t level3_ident_pgt[512];
11242 +extern pud_t level3_vmalloc_start_pgt[512];
11243 +extern pud_t level3_vmalloc_end_pgt[512];
11244 +extern pud_t level3_vmemmap_pgt[512];
11245 +extern pud_t level2_vmemmap_pgt[512];
11246 extern pmd_t level2_kernel_pgt[512];
11247 extern pmd_t level2_fixmap_pgt[512];
11248 -extern pmd_t level2_ident_pgt[512];
11249 -extern pgd_t init_level4_pgt[];
11250 +extern pmd_t level2_ident_pgt[512*2];
11251 +extern pgd_t init_level4_pgt[512];
11252
11253 #define swapper_pg_dir init_level4_pgt
11254
11255 @@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11256
11257 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11258 {
11259 + pax_open_kernel();
11260 *pmdp = pmd;
11261 + pax_close_kernel();
11262 }
11263
11264 static inline void native_pmd_clear(pmd_t *pmd)
11265 @@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11266
11267 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11268 {
11269 + pax_open_kernel();
11270 + *pgdp = pgd;
11271 + pax_close_kernel();
11272 +}
11273 +
11274 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11275 +{
11276 *pgdp = pgd;
11277 }
11278
11279 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11280 index 766ea16..5b96cb3 100644
11281 --- a/arch/x86/include/asm/pgtable_64_types.h
11282 +++ b/arch/x86/include/asm/pgtable_64_types.h
11283 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11284 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11285 #define MODULES_END _AC(0xffffffffff000000, UL)
11286 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11287 +#define MODULES_EXEC_VADDR MODULES_VADDR
11288 +#define MODULES_EXEC_END MODULES_END
11289 +
11290 +#define ktla_ktva(addr) (addr)
11291 +#define ktva_ktla(addr) (addr)
11292
11293 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11294 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11295 index d1f4a76..2f46ba1 100644
11296 --- a/arch/x86/include/asm/pgtable_types.h
11297 +++ b/arch/x86/include/asm/pgtable_types.h
11298 @@ -16,12 +16,11 @@
11299 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11300 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11301 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11302 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11303 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11304 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11305 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11306 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11307 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11308 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11309 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11310 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11311
11312 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11313 @@ -39,7 +38,6 @@
11314 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11315 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11316 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11317 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11318 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11319 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11320 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11321 @@ -55,8 +53,10 @@
11322
11323 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11324 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11325 -#else
11326 +#elif defined(CONFIG_KMEMCHECK)
11327 #define _PAGE_NX (_AT(pteval_t, 0))
11328 +#else
11329 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11330 #endif
11331
11332 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11333 @@ -93,6 +93,9 @@
11334 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11335 _PAGE_ACCESSED)
11336
11337 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11338 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11339 +
11340 #define __PAGE_KERNEL_EXEC \
11341 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11342 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11343 @@ -103,8 +106,8 @@
11344 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11345 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11346 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11347 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11348 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11349 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11350 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11351 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11352 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11353 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11354 @@ -163,8 +166,8 @@
11355 * bits are combined, this will alow user to access the high address mapped
11356 * VDSO in the presence of CONFIG_COMPAT_VDSO
11357 */
11358 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11359 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11360 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11361 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11362 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11363 #endif
11364
11365 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11366 {
11367 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11368 }
11369 +#endif
11370
11371 +#if PAGETABLE_LEVELS == 3
11372 +#include <asm-generic/pgtable-nopud.h>
11373 +#endif
11374 +
11375 +#if PAGETABLE_LEVELS == 2
11376 +#include <asm-generic/pgtable-nopmd.h>
11377 +#endif
11378 +
11379 +#ifndef __ASSEMBLY__
11380 #if PAGETABLE_LEVELS > 3
11381 typedef struct { pudval_t pud; } pud_t;
11382
11383 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11384 return pud.pud;
11385 }
11386 #else
11387 -#include <asm-generic/pgtable-nopud.h>
11388 -
11389 static inline pudval_t native_pud_val(pud_t pud)
11390 {
11391 return native_pgd_val(pud.pgd);
11392 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11393 return pmd.pmd;
11394 }
11395 #else
11396 -#include <asm-generic/pgtable-nopmd.h>
11397 -
11398 static inline pmdval_t native_pmd_val(pmd_t pmd)
11399 {
11400 return native_pgd_val(pmd.pud.pgd);
11401 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11402
11403 extern pteval_t __supported_pte_mask;
11404 extern void set_nx(void);
11405 +
11406 +#ifdef CONFIG_X86_32
11407 +#ifdef CONFIG_X86_PAE
11408 extern int nx_enabled;
11409 +#else
11410 +#define nx_enabled (0)
11411 +#endif
11412 +#else
11413 +#define nx_enabled (1)
11414 +#endif
11415
11416 #define pgprot_writecombine pgprot_writecombine
11417 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11418 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11419 index fa04dea..5f823fc 100644
11420 --- a/arch/x86/include/asm/processor.h
11421 +++ b/arch/x86/include/asm/processor.h
11422 @@ -272,7 +272,7 @@ struct tss_struct {
11423
11424 } ____cacheline_aligned;
11425
11426 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11427 +extern struct tss_struct init_tss[NR_CPUS];
11428
11429 /*
11430 * Save the original ist values for checking stack pointers during debugging
11431 @@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11432 */
11433 #define TASK_SIZE PAGE_OFFSET
11434 #define TASK_SIZE_MAX TASK_SIZE
11435 +
11436 +#ifdef CONFIG_PAX_SEGMEXEC
11437 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11438 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11439 +#else
11440 #define STACK_TOP TASK_SIZE
11441 -#define STACK_TOP_MAX STACK_TOP
11442 +#endif
11443 +
11444 +#define STACK_TOP_MAX TASK_SIZE
11445
11446 #define INIT_THREAD { \
11447 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11448 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11449 .vm86_info = NULL, \
11450 .sysenter_cs = __KERNEL_CS, \
11451 .io_bitmap_ptr = NULL, \
11452 @@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11453 */
11454 #define INIT_TSS { \
11455 .x86_tss = { \
11456 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11457 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11458 .ss0 = __KERNEL_DS, \
11459 .ss1 = __KERNEL_CS, \
11460 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11461 @@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11462 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11463
11464 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11465 -#define KSTK_TOP(info) \
11466 -({ \
11467 - unsigned long *__ptr = (unsigned long *)(info); \
11468 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11469 -})
11470 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11471
11472 /*
11473 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11474 @@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11475 #define task_pt_regs(task) \
11476 ({ \
11477 struct pt_regs *__regs__; \
11478 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11479 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11480 __regs__ - 1; \
11481 })
11482
11483 @@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11484 /*
11485 * User space process size. 47bits minus one guard page.
11486 */
11487 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11488 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11489
11490 /* This decides where the kernel will search for a free chunk of vm
11491 * space during mmap's.
11492 */
11493 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11494 - 0xc0000000 : 0xFFFFe000)
11495 + 0xc0000000 : 0xFFFFf000)
11496
11497 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11498 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11499 @@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11500 #define STACK_TOP_MAX TASK_SIZE_MAX
11501
11502 #define INIT_THREAD { \
11503 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11504 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11505 }
11506
11507 #define INIT_TSS { \
11508 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11509 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11510 }
11511
11512 /*
11513 @@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11514 */
11515 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11516
11517 +#ifdef CONFIG_PAX_SEGMEXEC
11518 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11519 +#endif
11520 +
11521 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11522
11523 /* Get/set a process' ability to use the timestamp counter instruction */
11524 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11525 index 0f0d908..f2e3da2 100644
11526 --- a/arch/x86/include/asm/ptrace.h
11527 +++ b/arch/x86/include/asm/ptrace.h
11528 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11529 }
11530
11531 /*
11532 - * user_mode_vm(regs) determines whether a register set came from user mode.
11533 + * user_mode(regs) determines whether a register set came from user mode.
11534 * This is true if V8086 mode was enabled OR if the register set was from
11535 * protected mode with RPL-3 CS value. This tricky test checks that with
11536 * one comparison. Many places in the kernel can bypass this full check
11537 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11538 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11539 + * be used.
11540 */
11541 -static inline int user_mode(struct pt_regs *regs)
11542 +static inline int user_mode_novm(struct pt_regs *regs)
11543 {
11544 #ifdef CONFIG_X86_32
11545 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11546 #else
11547 - return !!(regs->cs & 3);
11548 + return !!(regs->cs & SEGMENT_RPL_MASK);
11549 #endif
11550 }
11551
11552 -static inline int user_mode_vm(struct pt_regs *regs)
11553 +static inline int user_mode(struct pt_regs *regs)
11554 {
11555 #ifdef CONFIG_X86_32
11556 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11557 USER_RPL;
11558 #else
11559 - return user_mode(regs);
11560 + return user_mode_novm(regs);
11561 #endif
11562 }
11563
11564 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11565 index 562d4fd..6e39df1 100644
11566 --- a/arch/x86/include/asm/reboot.h
11567 +++ b/arch/x86/include/asm/reboot.h
11568 @@ -6,19 +6,19 @@
11569 struct pt_regs;
11570
11571 struct machine_ops {
11572 - void (*restart)(char *cmd);
11573 - void (*halt)(void);
11574 - void (*power_off)(void);
11575 + void (* __noreturn restart)(char *cmd);
11576 + void (* __noreturn halt)(void);
11577 + void (* __noreturn power_off)(void);
11578 void (*shutdown)(void);
11579 void (*crash_shutdown)(struct pt_regs *);
11580 - void (*emergency_restart)(void);
11581 -};
11582 + void (* __noreturn emergency_restart)(void);
11583 +} __no_const;
11584
11585 extern struct machine_ops machine_ops;
11586
11587 void native_machine_crash_shutdown(struct pt_regs *regs);
11588 void native_machine_shutdown(void);
11589 -void machine_real_restart(const unsigned char *code, int length);
11590 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11591
11592 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11593 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11594 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11595 index 606ede1..dbfff37 100644
11596 --- a/arch/x86/include/asm/rwsem.h
11597 +++ b/arch/x86/include/asm/rwsem.h
11598 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11599 {
11600 asm volatile("# beginning down_read\n\t"
11601 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11602 +
11603 +#ifdef CONFIG_PAX_REFCOUNT
11604 + "jno 0f\n"
11605 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11606 + "int $4\n0:\n"
11607 + _ASM_EXTABLE(0b, 0b)
11608 +#endif
11609 +
11610 /* adds 0x00000001, returns the old value */
11611 " jns 1f\n"
11612 " call call_rwsem_down_read_failed\n"
11613 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11614 "1:\n\t"
11615 " mov %1,%2\n\t"
11616 " add %3,%2\n\t"
11617 +
11618 +#ifdef CONFIG_PAX_REFCOUNT
11619 + "jno 0f\n"
11620 + "sub %3,%2\n"
11621 + "int $4\n0:\n"
11622 + _ASM_EXTABLE(0b, 0b)
11623 +#endif
11624 +
11625 " jle 2f\n\t"
11626 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11627 " jnz 1b\n\t"
11628 @@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11629 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11630 asm volatile("# beginning down_write\n\t"
11631 LOCK_PREFIX " xadd %1,(%2)\n\t"
11632 +
11633 +#ifdef CONFIG_PAX_REFCOUNT
11634 + "jno 0f\n"
11635 + "mov %1,(%2)\n"
11636 + "int $4\n0:\n"
11637 + _ASM_EXTABLE(0b, 0b)
11638 +#endif
11639 +
11640 /* subtract 0x0000ffff, returns the old value */
11641 " test %1,%1\n\t"
11642 /* was the count 0 before? */
11643 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11644 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11645 asm volatile("# beginning __up_read\n\t"
11646 LOCK_PREFIX " xadd %1,(%2)\n\t"
11647 +
11648 +#ifdef CONFIG_PAX_REFCOUNT
11649 + "jno 0f\n"
11650 + "mov %1,(%2)\n"
11651 + "int $4\n0:\n"
11652 + _ASM_EXTABLE(0b, 0b)
11653 +#endif
11654 +
11655 /* subtracts 1, returns the old value */
11656 " jns 1f\n\t"
11657 " call call_rwsem_wake\n"
11658 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11659 rwsem_count_t tmp;
11660 asm volatile("# beginning __up_write\n\t"
11661 LOCK_PREFIX " xadd %1,(%2)\n\t"
11662 +
11663 +#ifdef CONFIG_PAX_REFCOUNT
11664 + "jno 0f\n"
11665 + "mov %1,(%2)\n"
11666 + "int $4\n0:\n"
11667 + _ASM_EXTABLE(0b, 0b)
11668 +#endif
11669 +
11670 /* tries to transition
11671 0xffff0001 -> 0x00000000 */
11672 " jz 1f\n"
11673 @@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11674 {
11675 asm volatile("# beginning __downgrade_write\n\t"
11676 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11677 +
11678 +#ifdef CONFIG_PAX_REFCOUNT
11679 + "jno 0f\n"
11680 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11681 + "int $4\n0:\n"
11682 + _ASM_EXTABLE(0b, 0b)
11683 +#endif
11684 +
11685 /*
11686 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11687 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11688 @@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11689 static inline void rwsem_atomic_add(rwsem_count_t delta,
11690 struct rw_semaphore *sem)
11691 {
11692 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11693 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11694 +
11695 +#ifdef CONFIG_PAX_REFCOUNT
11696 + "jno 0f\n"
11697 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11698 + "int $4\n0:\n"
11699 + _ASM_EXTABLE(0b, 0b)
11700 +#endif
11701 +
11702 : "+m" (sem->count)
11703 : "er" (delta));
11704 }
11705 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11706 {
11707 rwsem_count_t tmp = delta;
11708
11709 - asm volatile(LOCK_PREFIX "xadd %0,%1"
11710 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11711 +
11712 +#ifdef CONFIG_PAX_REFCOUNT
11713 + "jno 0f\n"
11714 + "mov %0,%1\n"
11715 + "int $4\n0:\n"
11716 + _ASM_EXTABLE(0b, 0b)
11717 +#endif
11718 +
11719 : "+r" (tmp), "+m" (sem->count)
11720 : : "memory");
11721
11722 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11723 index 14e0ed8..7f7dd5e 100644
11724 --- a/arch/x86/include/asm/segment.h
11725 +++ b/arch/x86/include/asm/segment.h
11726 @@ -62,10 +62,15 @@
11727 * 26 - ESPFIX small SS
11728 * 27 - per-cpu [ offset to per-cpu data area ]
11729 * 28 - stack_canary-20 [ for stack protector ]
11730 - * 29 - unused
11731 - * 30 - unused
11732 + * 29 - PCI BIOS CS
11733 + * 30 - PCI BIOS DS
11734 * 31 - TSS for double fault handler
11735 */
11736 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11737 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11738 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11739 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11740 +
11741 #define GDT_ENTRY_TLS_MIN 6
11742 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11743
11744 @@ -77,6 +82,8 @@
11745
11746 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11747
11748 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11749 +
11750 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11751
11752 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11753 @@ -88,7 +95,7 @@
11754 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11755 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11756
11757 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11758 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11759 #ifdef CONFIG_SMP
11760 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11761 #else
11762 @@ -102,6 +109,12 @@
11763 #define __KERNEL_STACK_CANARY 0
11764 #endif
11765
11766 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11767 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11768 +
11769 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11770 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11771 +
11772 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11773
11774 /*
11775 @@ -139,7 +152,7 @@
11776 */
11777
11778 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11779 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11780 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11781
11782
11783 #else
11784 @@ -163,6 +176,8 @@
11785 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
11786 #define __USER32_DS __USER_DS
11787
11788 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11789 +
11790 #define GDT_ENTRY_TSS 8 /* needs two entries */
11791 #define GDT_ENTRY_LDT 10 /* needs two entries */
11792 #define GDT_ENTRY_TLS_MIN 12
11793 @@ -183,6 +198,7 @@
11794 #endif
11795
11796 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
11797 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
11798 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
11799 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
11800 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
11801 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11802 index 4c2f63c..5685db2 100644
11803 --- a/arch/x86/include/asm/smp.h
11804 +++ b/arch/x86/include/asm/smp.h
11805 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
11806 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
11807 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11808 DECLARE_PER_CPU(u16, cpu_llc_id);
11809 -DECLARE_PER_CPU(int, cpu_number);
11810 +DECLARE_PER_CPU(unsigned int, cpu_number);
11811
11812 static inline struct cpumask *cpu_sibling_mask(int cpu)
11813 {
11814 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
11815 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
11816
11817 /* Static state in head.S used to set up a CPU */
11818 -extern struct {
11819 - void *sp;
11820 - unsigned short ss;
11821 -} stack_start;
11822 +extern unsigned long stack_start; /* Initial stack pointer address */
11823
11824 struct smp_ops {
11825 void (*smp_prepare_boot_cpu)(void);
11826 @@ -60,7 +57,7 @@ struct smp_ops {
11827
11828 void (*send_call_func_ipi)(const struct cpumask *mask);
11829 void (*send_call_func_single_ipi)(int cpu);
11830 -};
11831 +} __no_const;
11832
11833 /* Globals due to paravirt */
11834 extern void set_cpu_sibling_map(int cpu);
11835 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11836 extern int safe_smp_processor_id(void);
11837
11838 #elif defined(CONFIG_X86_64_SMP)
11839 -#define raw_smp_processor_id() (percpu_read(cpu_number))
11840 -
11841 -#define stack_smp_processor_id() \
11842 -({ \
11843 - struct thread_info *ti; \
11844 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11845 - ti->cpu; \
11846 -})
11847 +#define raw_smp_processor_id() (percpu_read(cpu_number))
11848 +#define stack_smp_processor_id() raw_smp_processor_id()
11849 #define safe_smp_processor_id() smp_processor_id()
11850
11851 #endif
11852 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11853 index 4e77853..4359783 100644
11854 --- a/arch/x86/include/asm/spinlock.h
11855 +++ b/arch/x86/include/asm/spinlock.h
11856 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
11857 static inline void __raw_read_lock(raw_rwlock_t *rw)
11858 {
11859 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
11860 +
11861 +#ifdef CONFIG_PAX_REFCOUNT
11862 + "jno 0f\n"
11863 + LOCK_PREFIX " addl $1,(%0)\n"
11864 + "int $4\n0:\n"
11865 + _ASM_EXTABLE(0b, 0b)
11866 +#endif
11867 +
11868 "jns 1f\n"
11869 "call __read_lock_failed\n\t"
11870 "1:\n"
11871 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
11872 static inline void __raw_write_lock(raw_rwlock_t *rw)
11873 {
11874 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
11875 +
11876 +#ifdef CONFIG_PAX_REFCOUNT
11877 + "jno 0f\n"
11878 + LOCK_PREFIX " addl %1,(%0)\n"
11879 + "int $4\n0:\n"
11880 + _ASM_EXTABLE(0b, 0b)
11881 +#endif
11882 +
11883 "jz 1f\n"
11884 "call __write_lock_failed\n\t"
11885 "1:\n"
11886 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
11887
11888 static inline void __raw_read_unlock(raw_rwlock_t *rw)
11889 {
11890 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
11891 + asm volatile(LOCK_PREFIX "incl %0\n"
11892 +
11893 +#ifdef CONFIG_PAX_REFCOUNT
11894 + "jno 0f\n"
11895 + LOCK_PREFIX "decl %0\n"
11896 + "int $4\n0:\n"
11897 + _ASM_EXTABLE(0b, 0b)
11898 +#endif
11899 +
11900 + :"+m" (rw->lock) : : "memory");
11901 }
11902
11903 static inline void __raw_write_unlock(raw_rwlock_t *rw)
11904 {
11905 - asm volatile(LOCK_PREFIX "addl %1, %0"
11906 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
11907 +
11908 +#ifdef CONFIG_PAX_REFCOUNT
11909 + "jno 0f\n"
11910 + LOCK_PREFIX "subl %1, %0\n"
11911 + "int $4\n0:\n"
11912 + _ASM_EXTABLE(0b, 0b)
11913 +#endif
11914 +
11915 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
11916 }
11917
11918 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11919 index 1575177..cb23f52 100644
11920 --- a/arch/x86/include/asm/stackprotector.h
11921 +++ b/arch/x86/include/asm/stackprotector.h
11922 @@ -48,7 +48,7 @@
11923 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11924 */
11925 #define GDT_STACK_CANARY_INIT \
11926 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11927 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11928
11929 /*
11930 * Initialize the stackprotector canary value.
11931 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11932
11933 static inline void load_stack_canary_segment(void)
11934 {
11935 -#ifdef CONFIG_X86_32
11936 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11937 asm volatile ("mov %0, %%gs" : : "r" (0));
11938 #endif
11939 }
11940 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11941 index e0fbf29..858ef4a 100644
11942 --- a/arch/x86/include/asm/system.h
11943 +++ b/arch/x86/include/asm/system.h
11944 @@ -132,7 +132,7 @@ do { \
11945 "thread_return:\n\t" \
11946 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11947 __switch_canary \
11948 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
11949 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11950 "movq %%rax,%%rdi\n\t" \
11951 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11952 "jnz ret_from_fork\n\t" \
11953 @@ -143,7 +143,7 @@ do { \
11954 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11955 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11956 [_tif_fork] "i" (_TIF_FORK), \
11957 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
11958 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
11959 [current_task] "m" (per_cpu_var(current_task)) \
11960 __switch_canary_iparam \
11961 : "memory", "cc" __EXTRA_CLOBBER)
11962 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11963 {
11964 unsigned long __limit;
11965 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11966 - return __limit + 1;
11967 + return __limit;
11968 }
11969
11970 static inline void native_clts(void)
11971 @@ -340,12 +340,12 @@ void enable_hlt(void);
11972
11973 void cpu_idle_wait(void);
11974
11975 -extern unsigned long arch_align_stack(unsigned long sp);
11976 +#define arch_align_stack(x) ((x) & ~0xfUL)
11977 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11978
11979 void default_idle(void);
11980
11981 -void stop_this_cpu(void *dummy);
11982 +void stop_this_cpu(void *dummy) __noreturn;
11983
11984 /*
11985 * Force strict CPU ordering.
11986 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11987 index 19c3ce4..8962535 100644
11988 --- a/arch/x86/include/asm/thread_info.h
11989 +++ b/arch/x86/include/asm/thread_info.h
11990 @@ -10,6 +10,7 @@
11991 #include <linux/compiler.h>
11992 #include <asm/page.h>
11993 #include <asm/types.h>
11994 +#include <asm/percpu.h>
11995
11996 /*
11997 * low level task data that entry.S needs immediate access to
11998 @@ -24,7 +25,6 @@ struct exec_domain;
11999 #include <asm/atomic.h>
12000
12001 struct thread_info {
12002 - struct task_struct *task; /* main task structure */
12003 struct exec_domain *exec_domain; /* execution domain */
12004 __u32 flags; /* low level flags */
12005 __u32 status; /* thread synchronous flags */
12006 @@ -34,18 +34,12 @@ struct thread_info {
12007 mm_segment_t addr_limit;
12008 struct restart_block restart_block;
12009 void __user *sysenter_return;
12010 -#ifdef CONFIG_X86_32
12011 - unsigned long previous_esp; /* ESP of the previous stack in
12012 - case of nested (IRQ) stacks
12013 - */
12014 - __u8 supervisor_stack[0];
12015 -#endif
12016 + unsigned long lowest_stack;
12017 int uaccess_err;
12018 };
12019
12020 -#define INIT_THREAD_INFO(tsk) \
12021 +#define INIT_THREAD_INFO \
12022 { \
12023 - .task = &tsk, \
12024 .exec_domain = &default_exec_domain, \
12025 .flags = 0, \
12026 .cpu = 0, \
12027 @@ -56,7 +50,7 @@ struct thread_info {
12028 }, \
12029 }
12030
12031 -#define init_thread_info (init_thread_union.thread_info)
12032 +#define init_thread_info (init_thread_union.stack)
12033 #define init_stack (init_thread_union.stack)
12034
12035 #else /* !__ASSEMBLY__ */
12036 @@ -163,45 +157,40 @@ struct thread_info {
12037 #define alloc_thread_info(tsk) \
12038 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12039
12040 -#ifdef CONFIG_X86_32
12041 -
12042 -#define STACK_WARN (THREAD_SIZE/8)
12043 -/*
12044 - * macros/functions for gaining access to the thread information structure
12045 - *
12046 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12047 - */
12048 -#ifndef __ASSEMBLY__
12049 -
12050 -
12051 -/* how to get the current stack pointer from C */
12052 -register unsigned long current_stack_pointer asm("esp") __used;
12053 -
12054 -/* how to get the thread information struct from C */
12055 -static inline struct thread_info *current_thread_info(void)
12056 -{
12057 - return (struct thread_info *)
12058 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12059 -}
12060 -
12061 -#else /* !__ASSEMBLY__ */
12062 -
12063 +#ifdef __ASSEMBLY__
12064 /* how to get the thread information struct from ASM */
12065 #define GET_THREAD_INFO(reg) \
12066 - movl $-THREAD_SIZE, reg; \
12067 - andl %esp, reg
12068 + mov PER_CPU_VAR(current_tinfo), reg
12069
12070 /* use this one if reg already contains %esp */
12071 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12072 - andl $-THREAD_SIZE, reg
12073 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12074 +#else
12075 +/* how to get the thread information struct from C */
12076 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12077 +
12078 +static __always_inline struct thread_info *current_thread_info(void)
12079 +{
12080 + return percpu_read_stable(current_tinfo);
12081 +}
12082 +#endif
12083 +
12084 +#ifdef CONFIG_X86_32
12085 +
12086 +#define STACK_WARN (THREAD_SIZE/8)
12087 +/*
12088 + * macros/functions for gaining access to the thread information structure
12089 + *
12090 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12091 + */
12092 +#ifndef __ASSEMBLY__
12093 +
12094 +/* how to get the current stack pointer from C */
12095 +register unsigned long current_stack_pointer asm("esp") __used;
12096
12097 #endif
12098
12099 #else /* X86_32 */
12100
12101 -#include <asm/percpu.h>
12102 -#define KERNEL_STACK_OFFSET (5*8)
12103 -
12104 /*
12105 * macros/functions for gaining access to the thread information structure
12106 * preempt_count needs to be 1 initially, until the scheduler is functional.
12107 @@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12108 #ifndef __ASSEMBLY__
12109 DECLARE_PER_CPU(unsigned long, kernel_stack);
12110
12111 -static inline struct thread_info *current_thread_info(void)
12112 -{
12113 - struct thread_info *ti;
12114 - ti = (void *)(percpu_read_stable(kernel_stack) +
12115 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12116 - return ti;
12117 -}
12118 -
12119 -#else /* !__ASSEMBLY__ */
12120 -
12121 -/* how to get the thread information struct from ASM */
12122 -#define GET_THREAD_INFO(reg) \
12123 - movq PER_CPU_VAR(kernel_stack),reg ; \
12124 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12125 -
12126 +/* how to get the current stack pointer from C */
12127 +register unsigned long current_stack_pointer asm("rsp") __used;
12128 #endif
12129
12130 #endif /* !X86_32 */
12131 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12132 extern void free_thread_info(struct thread_info *ti);
12133 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12134 #define arch_task_cache_init arch_task_cache_init
12135 +
12136 +#define __HAVE_THREAD_FUNCTIONS
12137 +#define task_thread_info(task) (&(task)->tinfo)
12138 +#define task_stack_page(task) ((task)->stack)
12139 +#define setup_thread_stack(p, org) do {} while (0)
12140 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12141 +
12142 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12143 +extern struct task_struct *alloc_task_struct(void);
12144 +extern void free_task_struct(struct task_struct *);
12145 +
12146 #endif
12147 #endif /* _ASM_X86_THREAD_INFO_H */
12148 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12149 index 61c5874..8a046e9 100644
12150 --- a/arch/x86/include/asm/uaccess.h
12151 +++ b/arch/x86/include/asm/uaccess.h
12152 @@ -8,12 +8,15 @@
12153 #include <linux/thread_info.h>
12154 #include <linux/prefetch.h>
12155 #include <linux/string.h>
12156 +#include <linux/sched.h>
12157 #include <asm/asm.h>
12158 #include <asm/page.h>
12159
12160 #define VERIFY_READ 0
12161 #define VERIFY_WRITE 1
12162
12163 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12164 +
12165 /*
12166 * The fs value determines whether argument validity checking should be
12167 * performed or not. If get_fs() == USER_DS, checking is performed, with
12168 @@ -29,7 +32,12 @@
12169
12170 #define get_ds() (KERNEL_DS)
12171 #define get_fs() (current_thread_info()->addr_limit)
12172 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12173 +void __set_fs(mm_segment_t x);
12174 +void set_fs(mm_segment_t x);
12175 +#else
12176 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12177 +#endif
12178
12179 #define segment_eq(a, b) ((a).seg == (b).seg)
12180
12181 @@ -77,7 +85,33 @@
12182 * checks that the pointer is in the user space range - after calling
12183 * this function, memory access functions may still return -EFAULT.
12184 */
12185 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12186 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12187 +#define access_ok(type, addr, size) \
12188 +({ \
12189 + long __size = size; \
12190 + unsigned long __addr = (unsigned long)addr; \
12191 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12192 + unsigned long __end_ao = __addr + __size - 1; \
12193 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12194 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12195 + while(__addr_ao <= __end_ao) { \
12196 + char __c_ao; \
12197 + __addr_ao += PAGE_SIZE; \
12198 + if (__size > PAGE_SIZE) \
12199 + cond_resched(); \
12200 + if (__get_user(__c_ao, (char __user *)__addr)) \
12201 + break; \
12202 + if (type != VERIFY_WRITE) { \
12203 + __addr = __addr_ao; \
12204 + continue; \
12205 + } \
12206 + if (__put_user(__c_ao, (char __user *)__addr)) \
12207 + break; \
12208 + __addr = __addr_ao; \
12209 + } \
12210 + } \
12211 + __ret_ao; \
12212 +})
12213
12214 /*
12215 * The exception table consists of pairs of addresses: the first is the
12216 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12217 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12218 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12219
12220 -
12221 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12222 +#define __copyuser_seg "gs;"
12223 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12224 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12225 +#else
12226 +#define __copyuser_seg
12227 +#define __COPYUSER_SET_ES
12228 +#define __COPYUSER_RESTORE_ES
12229 +#endif
12230
12231 #ifdef CONFIG_X86_32
12232 #define __put_user_asm_u64(x, addr, err, errret) \
12233 - asm volatile("1: movl %%eax,0(%2)\n" \
12234 - "2: movl %%edx,4(%2)\n" \
12235 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12236 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12237 "3:\n" \
12238 ".section .fixup,\"ax\"\n" \
12239 "4: movl %3,%0\n" \
12240 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12241 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12242
12243 #define __put_user_asm_ex_u64(x, addr) \
12244 - asm volatile("1: movl %%eax,0(%1)\n" \
12245 - "2: movl %%edx,4(%1)\n" \
12246 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12247 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12248 "3:\n" \
12249 _ASM_EXTABLE(1b, 2b - 1b) \
12250 _ASM_EXTABLE(2b, 3b - 2b) \
12251 @@ -253,7 +295,7 @@ extern void __put_user_8(void);
12252 __typeof__(*(ptr)) __pu_val; \
12253 __chk_user_ptr(ptr); \
12254 might_fault(); \
12255 - __pu_val = x; \
12256 + __pu_val = (x); \
12257 switch (sizeof(*(ptr))) { \
12258 case 1: \
12259 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12260 @@ -374,7 +416,7 @@ do { \
12261 } while (0)
12262
12263 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12264 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12265 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12266 "2:\n" \
12267 ".section .fixup,\"ax\"\n" \
12268 "3: mov %3,%0\n" \
12269 @@ -382,7 +424,7 @@ do { \
12270 " jmp 2b\n" \
12271 ".previous\n" \
12272 _ASM_EXTABLE(1b, 3b) \
12273 - : "=r" (err), ltype(x) \
12274 + : "=r" (err), ltype (x) \
12275 : "m" (__m(addr)), "i" (errret), "0" (err))
12276
12277 #define __get_user_size_ex(x, ptr, size) \
12278 @@ -407,7 +449,7 @@ do { \
12279 } while (0)
12280
12281 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12282 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12283 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12284 "2:\n" \
12285 _ASM_EXTABLE(1b, 2b - 1b) \
12286 : ltype(x) : "m" (__m(addr)))
12287 @@ -424,13 +466,24 @@ do { \
12288 int __gu_err; \
12289 unsigned long __gu_val; \
12290 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12291 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12292 + (x) = (__typeof__(*(ptr)))__gu_val; \
12293 __gu_err; \
12294 })
12295
12296 /* FIXME: this hack is definitely wrong -AK */
12297 struct __large_struct { unsigned long buf[100]; };
12298 -#define __m(x) (*(struct __large_struct __user *)(x))
12299 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12300 +#define ____m(x) \
12301 +({ \
12302 + unsigned long ____x = (unsigned long)(x); \
12303 + if (____x < PAX_USER_SHADOW_BASE) \
12304 + ____x += PAX_USER_SHADOW_BASE; \
12305 + (void __user *)____x; \
12306 +})
12307 +#else
12308 +#define ____m(x) (x)
12309 +#endif
12310 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12311
12312 /*
12313 * Tell gcc we read from memory instead of writing: this is because
12314 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12315 * aliasing issues.
12316 */
12317 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12318 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12319 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12320 "2:\n" \
12321 ".section .fixup,\"ax\"\n" \
12322 "3: mov %3,%0\n" \
12323 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12324 ".previous\n" \
12325 _ASM_EXTABLE(1b, 3b) \
12326 : "=r"(err) \
12327 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12328 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12329
12330 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12331 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12332 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12333 "2:\n" \
12334 _ASM_EXTABLE(1b, 2b - 1b) \
12335 : : ltype(x), "m" (__m(addr)))
12336 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12337 * On error, the variable @x is set to zero.
12338 */
12339
12340 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12341 +#define __get_user(x, ptr) get_user((x), (ptr))
12342 +#else
12343 #define __get_user(x, ptr) \
12344 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12345 +#endif
12346
12347 /**
12348 * __put_user: - Write a simple value into user space, with less checking.
12349 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12350 * Returns zero on success, or -EFAULT on error.
12351 */
12352
12353 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12354 +#define __put_user(x, ptr) put_user((x), (ptr))
12355 +#else
12356 #define __put_user(x, ptr) \
12357 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12358 +#endif
12359
12360 #define __get_user_unaligned __get_user
12361 #define __put_user_unaligned __put_user
12362 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12363 #define get_user_ex(x, ptr) do { \
12364 unsigned long __gue_val; \
12365 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12366 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12367 + (x) = (__typeof__(*(ptr)))__gue_val; \
12368 } while (0)
12369
12370 #ifdef CONFIG_X86_WP_WORKS_OK
12371 @@ -567,6 +628,7 @@ extern struct movsl_mask {
12372
12373 #define ARCH_HAS_NOCACHE_UACCESS 1
12374
12375 +#define ARCH_HAS_SORT_EXTABLE
12376 #ifdef CONFIG_X86_32
12377 # include "uaccess_32.h"
12378 #else
12379 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12380 index 632fb44..e30e334 100644
12381 --- a/arch/x86/include/asm/uaccess_32.h
12382 +++ b/arch/x86/include/asm/uaccess_32.h
12383 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12384 static __always_inline unsigned long __must_check
12385 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12386 {
12387 + pax_track_stack();
12388 +
12389 + if ((long)n < 0)
12390 + return n;
12391 +
12392 if (__builtin_constant_p(n)) {
12393 unsigned long ret;
12394
12395 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12396 return ret;
12397 }
12398 }
12399 + if (!__builtin_constant_p(n))
12400 + check_object_size(from, n, true);
12401 return __copy_to_user_ll(to, from, n);
12402 }
12403
12404 @@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12405 __copy_to_user(void __user *to, const void *from, unsigned long n)
12406 {
12407 might_fault();
12408 +
12409 return __copy_to_user_inatomic(to, from, n);
12410 }
12411
12412 static __always_inline unsigned long
12413 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12414 {
12415 + if ((long)n < 0)
12416 + return n;
12417 +
12418 /* Avoid zeroing the tail if the copy fails..
12419 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12420 * but as the zeroing behaviour is only significant when n is not
12421 @@ -138,6 +149,12 @@ static __always_inline unsigned long
12422 __copy_from_user(void *to, const void __user *from, unsigned long n)
12423 {
12424 might_fault();
12425 +
12426 + pax_track_stack();
12427 +
12428 + if ((long)n < 0)
12429 + return n;
12430 +
12431 if (__builtin_constant_p(n)) {
12432 unsigned long ret;
12433
12434 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12435 return ret;
12436 }
12437 }
12438 + if (!__builtin_constant_p(n))
12439 + check_object_size(to, n, false);
12440 return __copy_from_user_ll(to, from, n);
12441 }
12442
12443 @@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12444 const void __user *from, unsigned long n)
12445 {
12446 might_fault();
12447 +
12448 + if ((long)n < 0)
12449 + return n;
12450 +
12451 if (__builtin_constant_p(n)) {
12452 unsigned long ret;
12453
12454 @@ -182,14 +205,62 @@ static __always_inline unsigned long
12455 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12456 unsigned long n)
12457 {
12458 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12459 + if ((long)n < 0)
12460 + return n;
12461 +
12462 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12463 +}
12464 +
12465 +/**
12466 + * copy_to_user: - Copy a block of data into user space.
12467 + * @to: Destination address, in user space.
12468 + * @from: Source address, in kernel space.
12469 + * @n: Number of bytes to copy.
12470 + *
12471 + * Context: User context only. This function may sleep.
12472 + *
12473 + * Copy data from kernel space to user space.
12474 + *
12475 + * Returns number of bytes that could not be copied.
12476 + * On success, this will be zero.
12477 + */
12478 +static __always_inline unsigned long __must_check
12479 +copy_to_user(void __user *to, const void *from, unsigned long n)
12480 +{
12481 + if (access_ok(VERIFY_WRITE, to, n))
12482 + n = __copy_to_user(to, from, n);
12483 + return n;
12484 +}
12485 +
12486 +/**
12487 + * copy_from_user: - Copy a block of data from user space.
12488 + * @to: Destination address, in kernel space.
12489 + * @from: Source address, in user space.
12490 + * @n: Number of bytes to copy.
12491 + *
12492 + * Context: User context only. This function may sleep.
12493 + *
12494 + * Copy data from user space to kernel space.
12495 + *
12496 + * Returns number of bytes that could not be copied.
12497 + * On success, this will be zero.
12498 + *
12499 + * If some data could not be copied, this function will pad the copied
12500 + * data to the requested size using zero bytes.
12501 + */
12502 +static __always_inline unsigned long __must_check
12503 +copy_from_user(void *to, const void __user *from, unsigned long n)
12504 +{
12505 + if (access_ok(VERIFY_READ, from, n))
12506 + n = __copy_from_user(to, from, n);
12507 + else if ((long)n > 0) {
12508 + if (!__builtin_constant_p(n))
12509 + check_object_size(to, n, false);
12510 + memset(to, 0, n);
12511 + }
12512 + return n;
12513 }
12514
12515 -unsigned long __must_check copy_to_user(void __user *to,
12516 - const void *from, unsigned long n);
12517 -unsigned long __must_check copy_from_user(void *to,
12518 - const void __user *from,
12519 - unsigned long n);
12520 long __must_check strncpy_from_user(char *dst, const char __user *src,
12521 long count);
12522 long __must_check __strncpy_from_user(char *dst,
12523 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12524 index db24b21..f595ae7 100644
12525 --- a/arch/x86/include/asm/uaccess_64.h
12526 +++ b/arch/x86/include/asm/uaccess_64.h
12527 @@ -9,6 +9,9 @@
12528 #include <linux/prefetch.h>
12529 #include <linux/lockdep.h>
12530 #include <asm/page.h>
12531 +#include <asm/pgtable.h>
12532 +
12533 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12534
12535 /*
12536 * Copy To/From Userspace
12537 @@ -16,116 +19,205 @@
12538
12539 /* Handles exceptions in both to and from, but doesn't do access_ok */
12540 __must_check unsigned long
12541 -copy_user_generic(void *to, const void *from, unsigned len);
12542 +copy_user_generic(void *to, const void *from, unsigned long len);
12543
12544 __must_check unsigned long
12545 -copy_to_user(void __user *to, const void *from, unsigned len);
12546 -__must_check unsigned long
12547 -copy_from_user(void *to, const void __user *from, unsigned len);
12548 -__must_check unsigned long
12549 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12550 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12551
12552 static __always_inline __must_check
12553 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12554 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12555 {
12556 - int ret = 0;
12557 + unsigned ret = 0;
12558
12559 might_fault();
12560 - if (!__builtin_constant_p(size))
12561 - return copy_user_generic(dst, (__force void *)src, size);
12562 +
12563 + if (size > INT_MAX)
12564 + return size;
12565 +
12566 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12567 + if (!__access_ok(VERIFY_READ, src, size))
12568 + return size;
12569 +#endif
12570 +
12571 + if (!__builtin_constant_p(size)) {
12572 + check_object_size(dst, size, false);
12573 +
12574 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12575 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12576 + src += PAX_USER_SHADOW_BASE;
12577 +#endif
12578 +
12579 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12580 + }
12581 switch (size) {
12582 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12583 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12584 ret, "b", "b", "=q", 1);
12585 return ret;
12586 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12587 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12588 ret, "w", "w", "=r", 2);
12589 return ret;
12590 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12591 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12592 ret, "l", "k", "=r", 4);
12593 return ret;
12594 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12595 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12596 ret, "q", "", "=r", 8);
12597 return ret;
12598 case 10:
12599 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12600 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12601 ret, "q", "", "=r", 10);
12602 if (unlikely(ret))
12603 return ret;
12604 __get_user_asm(*(u16 *)(8 + (char *)dst),
12605 - (u16 __user *)(8 + (char __user *)src),
12606 + (const u16 __user *)(8 + (const char __user *)src),
12607 ret, "w", "w", "=r", 2);
12608 return ret;
12609 case 16:
12610 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12611 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12612 ret, "q", "", "=r", 16);
12613 if (unlikely(ret))
12614 return ret;
12615 __get_user_asm(*(u64 *)(8 + (char *)dst),
12616 - (u64 __user *)(8 + (char __user *)src),
12617 + (const u64 __user *)(8 + (const char __user *)src),
12618 ret, "q", "", "=r", 8);
12619 return ret;
12620 default:
12621 - return copy_user_generic(dst, (__force void *)src, size);
12622 +
12623 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12624 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12625 + src += PAX_USER_SHADOW_BASE;
12626 +#endif
12627 +
12628 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12629 }
12630 }
12631
12632 static __always_inline __must_check
12633 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12634 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12635 {
12636 - int ret = 0;
12637 + unsigned ret = 0;
12638
12639 might_fault();
12640 - if (!__builtin_constant_p(size))
12641 - return copy_user_generic((__force void *)dst, src, size);
12642 +
12643 + pax_track_stack();
12644 +
12645 + if (size > INT_MAX)
12646 + return size;
12647 +
12648 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12649 + if (!__access_ok(VERIFY_WRITE, dst, size))
12650 + return size;
12651 +#endif
12652 +
12653 + if (!__builtin_constant_p(size)) {
12654 + check_object_size(src, size, true);
12655 +
12656 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12657 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12658 + dst += PAX_USER_SHADOW_BASE;
12659 +#endif
12660 +
12661 + return copy_user_generic((__force_kernel void *)dst, src, size);
12662 + }
12663 switch (size) {
12664 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12665 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12666 ret, "b", "b", "iq", 1);
12667 return ret;
12668 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12669 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12670 ret, "w", "w", "ir", 2);
12671 return ret;
12672 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12673 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12674 ret, "l", "k", "ir", 4);
12675 return ret;
12676 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12677 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12678 ret, "q", "", "er", 8);
12679 return ret;
12680 case 10:
12681 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12682 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12683 ret, "q", "", "er", 10);
12684 if (unlikely(ret))
12685 return ret;
12686 asm("":::"memory");
12687 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12688 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12689 ret, "w", "w", "ir", 2);
12690 return ret;
12691 case 16:
12692 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12693 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12694 ret, "q", "", "er", 16);
12695 if (unlikely(ret))
12696 return ret;
12697 asm("":::"memory");
12698 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12699 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12700 ret, "q", "", "er", 8);
12701 return ret;
12702 default:
12703 - return copy_user_generic((__force void *)dst, src, size);
12704 +
12705 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12706 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12707 + dst += PAX_USER_SHADOW_BASE;
12708 +#endif
12709 +
12710 + return copy_user_generic((__force_kernel void *)dst, src, size);
12711 + }
12712 +}
12713 +
12714 +static __always_inline __must_check
12715 +unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12716 +{
12717 + if (access_ok(VERIFY_WRITE, to, len))
12718 + len = __copy_to_user(to, from, len);
12719 + return len;
12720 +}
12721 +
12722 +static __always_inline __must_check
12723 +unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12724 +{
12725 + might_fault();
12726 +
12727 + if (access_ok(VERIFY_READ, from, len))
12728 + len = __copy_from_user(to, from, len);
12729 + else if (len < INT_MAX) {
12730 + if (!__builtin_constant_p(len))
12731 + check_object_size(to, len, false);
12732 + memset(to, 0, len);
12733 }
12734 + return len;
12735 }
12736
12737 static __always_inline __must_check
12738 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12739 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12740 {
12741 - int ret = 0;
12742 + unsigned ret = 0;
12743
12744 might_fault();
12745 - if (!__builtin_constant_p(size))
12746 - return copy_user_generic((__force void *)dst,
12747 - (__force void *)src, size);
12748 +
12749 + pax_track_stack();
12750 +
12751 + if (size > INT_MAX)
12752 + return size;
12753 +
12754 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12755 + if (!__access_ok(VERIFY_READ, src, size))
12756 + return size;
12757 + if (!__access_ok(VERIFY_WRITE, dst, size))
12758 + return size;
12759 +#endif
12760 +
12761 + if (!__builtin_constant_p(size)) {
12762 +
12763 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12764 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12765 + src += PAX_USER_SHADOW_BASE;
12766 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12767 + dst += PAX_USER_SHADOW_BASE;
12768 +#endif
12769 +
12770 + return copy_user_generic((__force_kernel void *)dst,
12771 + (__force_kernel const void *)src, size);
12772 + }
12773 switch (size) {
12774 case 1: {
12775 u8 tmp;
12776 - __get_user_asm(tmp, (u8 __user *)src,
12777 + __get_user_asm(tmp, (const u8 __user *)src,
12778 ret, "b", "b", "=q", 1);
12779 if (likely(!ret))
12780 __put_user_asm(tmp, (u8 __user *)dst,
12781 @@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12782 }
12783 case 2: {
12784 u16 tmp;
12785 - __get_user_asm(tmp, (u16 __user *)src,
12786 + __get_user_asm(tmp, (const u16 __user *)src,
12787 ret, "w", "w", "=r", 2);
12788 if (likely(!ret))
12789 __put_user_asm(tmp, (u16 __user *)dst,
12790 @@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12791
12792 case 4: {
12793 u32 tmp;
12794 - __get_user_asm(tmp, (u32 __user *)src,
12795 + __get_user_asm(tmp, (const u32 __user *)src,
12796 ret, "l", "k", "=r", 4);
12797 if (likely(!ret))
12798 __put_user_asm(tmp, (u32 __user *)dst,
12799 @@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12800 }
12801 case 8: {
12802 u64 tmp;
12803 - __get_user_asm(tmp, (u64 __user *)src,
12804 + __get_user_asm(tmp, (const u64 __user *)src,
12805 ret, "q", "", "=r", 8);
12806 if (likely(!ret))
12807 __put_user_asm(tmp, (u64 __user *)dst,
12808 @@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12809 return ret;
12810 }
12811 default:
12812 - return copy_user_generic((__force void *)dst,
12813 - (__force void *)src, size);
12814 +
12815 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12816 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12817 + src += PAX_USER_SHADOW_BASE;
12818 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12819 + dst += PAX_USER_SHADOW_BASE;
12820 +#endif
12821 +
12822 + return copy_user_generic((__force_kernel void *)dst,
12823 + (__force_kernel const void *)src, size);
12824 }
12825 }
12826
12827 @@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
12828 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12829 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12830
12831 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
12832 - unsigned size);
12833 +static __must_check __always_inline unsigned long
12834 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12835 +{
12836 + pax_track_stack();
12837 +
12838 + if (size > INT_MAX)
12839 + return size;
12840 +
12841 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12842 + if (!__access_ok(VERIFY_READ, src, size))
12843 + return size;
12844
12845 -static __must_check __always_inline int
12846 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12847 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12848 + src += PAX_USER_SHADOW_BASE;
12849 +#endif
12850 +
12851 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12852 +}
12853 +
12854 +static __must_check __always_inline unsigned long
12855 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12856 {
12857 - return copy_user_generic((__force void *)dst, src, size);
12858 + if (size > INT_MAX)
12859 + return size;
12860 +
12861 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12862 + if (!__access_ok(VERIFY_WRITE, dst, size))
12863 + return size;
12864 +
12865 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12866 + dst += PAX_USER_SHADOW_BASE;
12867 +#endif
12868 +
12869 + return copy_user_generic((__force_kernel void *)dst, src, size);
12870 }
12871
12872 -extern long __copy_user_nocache(void *dst, const void __user *src,
12873 - unsigned size, int zerorest);
12874 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12875 + unsigned long size, int zerorest);
12876
12877 -static inline int
12878 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12879 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12880 {
12881 might_sleep();
12882 +
12883 + if (size > INT_MAX)
12884 + return size;
12885 +
12886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12887 + if (!__access_ok(VERIFY_READ, src, size))
12888 + return size;
12889 +#endif
12890 +
12891 return __copy_user_nocache(dst, src, size, 1);
12892 }
12893
12894 -static inline int
12895 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12896 - unsigned size)
12897 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12898 + unsigned long size)
12899 {
12900 + if (size > INT_MAX)
12901 + return size;
12902 +
12903 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12904 + if (!__access_ok(VERIFY_READ, src, size))
12905 + return size;
12906 +#endif
12907 +
12908 return __copy_user_nocache(dst, src, size, 0);
12909 }
12910
12911 -unsigned long
12912 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12913 +extern unsigned long
12914 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12915
12916 #endif /* _ASM_X86_UACCESS_64_H */
12917 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12918 index 9064052..786cfbc 100644
12919 --- a/arch/x86/include/asm/vdso.h
12920 +++ b/arch/x86/include/asm/vdso.h
12921 @@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
12922 #define VDSO32_SYMBOL(base, name) \
12923 ({ \
12924 extern const char VDSO32_##name[]; \
12925 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12926 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12927 })
12928 #endif
12929
12930 diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
12931 index 3d61e20..9507180 100644
12932 --- a/arch/x86/include/asm/vgtod.h
12933 +++ b/arch/x86/include/asm/vgtod.h
12934 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
12935 int sysctl_enabled;
12936 struct timezone sys_tz;
12937 struct { /* extract of a clocksource struct */
12938 + char name[8];
12939 cycle_t (*vread)(void);
12940 cycle_t cycle_last;
12941 cycle_t mask;
12942 diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
12943 index 61e08c0..b0da582 100644
12944 --- a/arch/x86/include/asm/vmi.h
12945 +++ b/arch/x86/include/asm/vmi.h
12946 @@ -191,6 +191,7 @@ struct vrom_header {
12947 u8 reserved[96]; /* Reserved for headers */
12948 char vmi_init[8]; /* VMI_Init jump point */
12949 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
12950 + char rom_data[8048]; /* rest of the option ROM */
12951 } __attribute__((packed));
12952
12953 struct pnp_header {
12954 diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
12955 index c6e0bee..fcb9f74 100644
12956 --- a/arch/x86/include/asm/vmi_time.h
12957 +++ b/arch/x86/include/asm/vmi_time.h
12958 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
12959 int (*wallclock_updated)(void);
12960 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
12961 void (*cancel_alarm)(u32 flags);
12962 -} vmi_timer_ops;
12963 +} __no_const vmi_timer_ops;
12964
12965 /* Prototypes */
12966 extern void __init vmi_time_init(void);
12967 diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
12968 index d0983d2..1f7c9e9 100644
12969 --- a/arch/x86/include/asm/vsyscall.h
12970 +++ b/arch/x86/include/asm/vsyscall.h
12971 @@ -15,9 +15,10 @@ enum vsyscall_num {
12972
12973 #ifdef __KERNEL__
12974 #include <linux/seqlock.h>
12975 +#include <linux/getcpu.h>
12976 +#include <linux/time.h>
12977
12978 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
12979 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
12980
12981 /* Definitions for CONFIG_GENERIC_TIME definitions */
12982 #define __section_vsyscall_gtod_data __attribute__ \
12983 @@ -31,7 +32,6 @@ enum vsyscall_num {
12984 #define VGETCPU_LSL 2
12985
12986 extern int __vgetcpu_mode;
12987 -extern volatile unsigned long __jiffies;
12988
12989 /* kernel space (writeable) */
12990 extern int vgetcpu_mode;
12991 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
12992
12993 extern void map_vsyscall(void);
12994
12995 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
12996 +extern time_t vtime(time_t *t);
12997 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
12998 #endif /* __KERNEL__ */
12999
13000 #endif /* _ASM_X86_VSYSCALL_H */
13001 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13002 index 2c756fd..3377e37 100644
13003 --- a/arch/x86/include/asm/x86_init.h
13004 +++ b/arch/x86/include/asm/x86_init.h
13005 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
13006 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13007 void (*find_smp_config)(unsigned int reserve);
13008 void (*get_smp_config)(unsigned int early);
13009 -};
13010 +} __no_const;
13011
13012 /**
13013 * struct x86_init_resources - platform specific resource related ops
13014 @@ -42,7 +42,7 @@ struct x86_init_resources {
13015 void (*probe_roms)(void);
13016 void (*reserve_resources)(void);
13017 char *(*memory_setup)(void);
13018 -};
13019 +} __no_const;
13020
13021 /**
13022 * struct x86_init_irqs - platform specific interrupt setup
13023 @@ -55,7 +55,7 @@ struct x86_init_irqs {
13024 void (*pre_vector_init)(void);
13025 void (*intr_init)(void);
13026 void (*trap_init)(void);
13027 -};
13028 +} __no_const;
13029
13030 /**
13031 * struct x86_init_oem - oem platform specific customizing functions
13032 @@ -65,7 +65,7 @@ struct x86_init_irqs {
13033 struct x86_init_oem {
13034 void (*arch_setup)(void);
13035 void (*banner)(void);
13036 -};
13037 +} __no_const;
13038
13039 /**
13040 * struct x86_init_paging - platform specific paging functions
13041 @@ -75,7 +75,7 @@ struct x86_init_oem {
13042 struct x86_init_paging {
13043 void (*pagetable_setup_start)(pgd_t *base);
13044 void (*pagetable_setup_done)(pgd_t *base);
13045 -};
13046 +} __no_const;
13047
13048 /**
13049 * struct x86_init_timers - platform specific timer setup
13050 @@ -88,7 +88,7 @@ struct x86_init_timers {
13051 void (*setup_percpu_clockev)(void);
13052 void (*tsc_pre_init)(void);
13053 void (*timer_init)(void);
13054 -};
13055 +} __no_const;
13056
13057 /**
13058 * struct x86_init_ops - functions for platform specific setup
13059 @@ -101,7 +101,7 @@ struct x86_init_ops {
13060 struct x86_init_oem oem;
13061 struct x86_init_paging paging;
13062 struct x86_init_timers timers;
13063 -};
13064 +} __no_const;
13065
13066 /**
13067 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13068 @@ -109,7 +109,7 @@ struct x86_init_ops {
13069 */
13070 struct x86_cpuinit_ops {
13071 void (*setup_percpu_clockev)(void);
13072 -};
13073 +} __no_const;
13074
13075 /**
13076 * struct x86_platform_ops - platform specific runtime functions
13077 @@ -121,7 +121,7 @@ struct x86_platform_ops {
13078 unsigned long (*calibrate_tsc)(void);
13079 unsigned long (*get_wallclock)(void);
13080 int (*set_wallclock)(unsigned long nowtime);
13081 -};
13082 +} __no_const;
13083
13084 extern struct x86_init_ops x86_init;
13085 extern struct x86_cpuinit_ops x86_cpuinit;
13086 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13087 index 727acc1..554f3eb 100644
13088 --- a/arch/x86/include/asm/xsave.h
13089 +++ b/arch/x86/include/asm/xsave.h
13090 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13091 static inline int xsave_user(struct xsave_struct __user *buf)
13092 {
13093 int err;
13094 +
13095 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13096 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13097 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13098 +#endif
13099 +
13100 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13101 "2:\n"
13102 ".section .fixup,\"ax\"\n"
13103 @@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13104 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13105 {
13106 int err;
13107 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13108 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13109 u32 lmask = mask;
13110 u32 hmask = mask >> 32;
13111
13112 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13113 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13114 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13115 +#endif
13116 +
13117 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13118 "2:\n"
13119 ".section .fixup,\"ax\"\n"
13120 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13121 index 6a564ac..9b1340c 100644
13122 --- a/arch/x86/kernel/acpi/realmode/Makefile
13123 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13124 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13125 $(call cc-option, -fno-stack-protector) \
13126 $(call cc-option, -mpreferred-stack-boundary=2)
13127 KBUILD_CFLAGS += $(call cc-option, -m32)
13128 +ifdef CONSTIFY_PLUGIN
13129 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13130 +endif
13131 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13132 GCOV_PROFILE := n
13133
13134 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13135 index 580b4e2..d4129e4 100644
13136 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13137 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13138 @@ -91,6 +91,9 @@ _start:
13139 /* Do any other stuff... */
13140
13141 #ifndef CONFIG_64BIT
13142 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
13143 + call verify_cpu
13144 +
13145 /* This could also be done in C code... */
13146 movl pmode_cr3, %eax
13147 movl %eax, %cr3
13148 @@ -104,7 +107,7 @@ _start:
13149 movl %eax, %ecx
13150 orl %edx, %ecx
13151 jz 1f
13152 - movl $0xc0000080, %ecx
13153 + mov $MSR_EFER, %ecx
13154 wrmsr
13155 1:
13156
13157 @@ -114,6 +117,7 @@ _start:
13158 movl pmode_cr0, %eax
13159 movl %eax, %cr0
13160 jmp pmode_return
13161 +# include "../../verify_cpu.S"
13162 #else
13163 pushw $0
13164 pushw trampoline_segment
13165 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13166 index ca93638..7042f24 100644
13167 --- a/arch/x86/kernel/acpi/sleep.c
13168 +++ b/arch/x86/kernel/acpi/sleep.c
13169 @@ -11,11 +11,12 @@
13170 #include <linux/cpumask.h>
13171 #include <asm/segment.h>
13172 #include <asm/desc.h>
13173 +#include <asm/e820.h>
13174
13175 #include "realmode/wakeup.h"
13176 #include "sleep.h"
13177
13178 -unsigned long acpi_wakeup_address;
13179 +unsigned long acpi_wakeup_address = 0x2000;
13180 unsigned long acpi_realmode_flags;
13181
13182 /* address in low memory of the wakeup routine. */
13183 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13184 #else /* CONFIG_64BIT */
13185 header->trampoline_segment = setup_trampoline() >> 4;
13186 #ifdef CONFIG_SMP
13187 - stack_start.sp = temp_stack + sizeof(temp_stack);
13188 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13189 +
13190 + pax_open_kernel();
13191 early_gdt_descr.address =
13192 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13193 + pax_close_kernel();
13194 +
13195 initial_gs = per_cpu_offset(smp_processor_id());
13196 #endif
13197 initial_code = (unsigned long)wakeup_long64;
13198 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13199 return;
13200 }
13201
13202 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13203 -
13204 - if (!acpi_realmode) {
13205 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13206 - return;
13207 - }
13208 -
13209 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13210 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13211 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13212 }
13213
13214
13215 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13216 index 8ded418..079961e 100644
13217 --- a/arch/x86/kernel/acpi/wakeup_32.S
13218 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13219 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13220 # and restore the stack ... but you need gdt for this to work
13221 movl saved_context_esp, %esp
13222
13223 - movl %cs:saved_magic, %eax
13224 - cmpl $0x12345678, %eax
13225 + cmpl $0x12345678, saved_magic
13226 jne bogus_magic
13227
13228 # jump to place where we left off
13229 - movl saved_eip, %eax
13230 - jmp *%eax
13231 + jmp *(saved_eip)
13232
13233 bogus_magic:
13234 jmp bogus_magic
13235 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13236 index de7353c..075da5f 100644
13237 --- a/arch/x86/kernel/alternative.c
13238 +++ b/arch/x86/kernel/alternative.c
13239 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13240
13241 BUG_ON(p->len > MAX_PATCH_LEN);
13242 /* prep the buffer with the original instructions */
13243 - memcpy(insnbuf, p->instr, p->len);
13244 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13245 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13246 (unsigned long)p->instr, p->len);
13247
13248 @@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13249 if (smp_alt_once)
13250 free_init_pages("SMP alternatives",
13251 (unsigned long)__smp_locks,
13252 - (unsigned long)__smp_locks_end);
13253 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13254
13255 restart_nmi();
13256 }
13257 @@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13258 * instructions. And on the local CPU you need to be protected again NMI or MCE
13259 * handlers seeing an inconsistent instruction while you patch.
13260 */
13261 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13262 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
13263 size_t len)
13264 {
13265 unsigned long flags;
13266 local_irq_save(flags);
13267 - memcpy(addr, opcode, len);
13268 +
13269 + pax_open_kernel();
13270 + memcpy(ktla_ktva(addr), opcode, len);
13271 sync_core();
13272 + pax_close_kernel();
13273 +
13274 local_irq_restore(flags);
13275 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13276 that causes hangs on some VIA CPUs. */
13277 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13278 */
13279 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13280 {
13281 - unsigned long flags;
13282 - char *vaddr;
13283 + unsigned char *vaddr = ktla_ktva(addr);
13284 struct page *pages[2];
13285 - int i;
13286 + size_t i;
13287
13288 if (!core_kernel_text((unsigned long)addr)) {
13289 - pages[0] = vmalloc_to_page(addr);
13290 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13291 + pages[0] = vmalloc_to_page(vaddr);
13292 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13293 } else {
13294 - pages[0] = virt_to_page(addr);
13295 + pages[0] = virt_to_page(vaddr);
13296 WARN_ON(!PageReserved(pages[0]));
13297 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13298 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13299 }
13300 BUG_ON(!pages[0]);
13301 - local_irq_save(flags);
13302 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13303 - if (pages[1])
13304 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13305 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13306 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13307 - clear_fixmap(FIX_TEXT_POKE0);
13308 - if (pages[1])
13309 - clear_fixmap(FIX_TEXT_POKE1);
13310 - local_flush_tlb();
13311 - sync_core();
13312 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13313 - that causes hangs on some VIA CPUs. */
13314 + text_poke_early(addr, opcode, len);
13315 for (i = 0; i < len; i++)
13316 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13317 - local_irq_restore(flags);
13318 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13319 return addr;
13320 }
13321 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13322 index 3a44b75..1601800 100644
13323 --- a/arch/x86/kernel/amd_iommu.c
13324 +++ b/arch/x86/kernel/amd_iommu.c
13325 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13326 }
13327 }
13328
13329 -static struct dma_map_ops amd_iommu_dma_ops = {
13330 +static const struct dma_map_ops amd_iommu_dma_ops = {
13331 .alloc_coherent = alloc_coherent,
13332 .free_coherent = free_coherent,
13333 .map_page = map_page,
13334 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13335 index 1d2d670..8e3f477 100644
13336 --- a/arch/x86/kernel/apic/apic.c
13337 +++ b/arch/x86/kernel/apic/apic.c
13338 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13339 /*
13340 * Debug level, exported for io_apic.c
13341 */
13342 -unsigned int apic_verbosity;
13343 +int apic_verbosity;
13344
13345 int pic_mode;
13346
13347 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13348 apic_write(APIC_ESR, 0);
13349 v1 = apic_read(APIC_ESR);
13350 ack_APIC_irq();
13351 - atomic_inc(&irq_err_count);
13352 + atomic_inc_unchecked(&irq_err_count);
13353
13354 /*
13355 * Here is what the APIC error bits mean:
13356 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13357 u16 *bios_cpu_apicid;
13358 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13359
13360 + pax_track_stack();
13361 +
13362 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13363 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13364
13365 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13366 index 8928d97..f799cea 100644
13367 --- a/arch/x86/kernel/apic/io_apic.c
13368 +++ b/arch/x86/kernel/apic/io_apic.c
13369 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13370 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13371 GFP_ATOMIC);
13372 if (!ioapic_entries)
13373 - return 0;
13374 + return NULL;
13375
13376 for (apic = 0; apic < nr_ioapics; apic++) {
13377 ioapic_entries[apic] =
13378 @@ -733,7 +733,7 @@ nomem:
13379 kfree(ioapic_entries[apic]);
13380 kfree(ioapic_entries);
13381
13382 - return 0;
13383 + return NULL;
13384 }
13385
13386 /*
13387 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13388 }
13389 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13390
13391 -void lock_vector_lock(void)
13392 +void lock_vector_lock(void) __acquires(vector_lock)
13393 {
13394 /* Used to the online set of cpus does not change
13395 * during assign_irq_vector.
13396 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13397 spin_lock(&vector_lock);
13398 }
13399
13400 -void unlock_vector_lock(void)
13401 +void unlock_vector_lock(void) __releases(vector_lock)
13402 {
13403 spin_unlock(&vector_lock);
13404 }
13405 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13406 ack_APIC_irq();
13407 }
13408
13409 -atomic_t irq_mis_count;
13410 +atomic_unchecked_t irq_mis_count;
13411
13412 static void ack_apic_level(unsigned int irq)
13413 {
13414 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13415
13416 /* Tail end of version 0x11 I/O APIC bug workaround */
13417 if (!(v & (1 << (i & 0x1f)))) {
13418 - atomic_inc(&irq_mis_count);
13419 + atomic_inc_unchecked(&irq_mis_count);
13420 spin_lock(&ioapic_lock);
13421 __mask_and_edge_IO_APIC_irq(cfg);
13422 __unmask_and_level_IO_APIC_irq(cfg);
13423 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13424 index 151ace6..f317474 100644
13425 --- a/arch/x86/kernel/apm_32.c
13426 +++ b/arch/x86/kernel/apm_32.c
13427 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13428 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13429 * even though they are called in protected mode.
13430 */
13431 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13432 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13433 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13434
13435 static const char driver_version[] = "1.16ac"; /* no spaces */
13436 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13437 BUG_ON(cpu != 0);
13438 gdt = get_cpu_gdt_table(cpu);
13439 save_desc_40 = gdt[0x40 / 8];
13440 +
13441 + pax_open_kernel();
13442 gdt[0x40 / 8] = bad_bios_desc;
13443 + pax_close_kernel();
13444
13445 apm_irq_save(flags);
13446 APM_DO_SAVE_SEGS;
13447 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13448 &call->esi);
13449 APM_DO_RESTORE_SEGS;
13450 apm_irq_restore(flags);
13451 +
13452 + pax_open_kernel();
13453 gdt[0x40 / 8] = save_desc_40;
13454 + pax_close_kernel();
13455 +
13456 put_cpu();
13457
13458 return call->eax & 0xff;
13459 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13460 BUG_ON(cpu != 0);
13461 gdt = get_cpu_gdt_table(cpu);
13462 save_desc_40 = gdt[0x40 / 8];
13463 +
13464 + pax_open_kernel();
13465 gdt[0x40 / 8] = bad_bios_desc;
13466 + pax_close_kernel();
13467
13468 apm_irq_save(flags);
13469 APM_DO_SAVE_SEGS;
13470 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13471 &call->eax);
13472 APM_DO_RESTORE_SEGS;
13473 apm_irq_restore(flags);
13474 +
13475 + pax_open_kernel();
13476 gdt[0x40 / 8] = save_desc_40;
13477 + pax_close_kernel();
13478 +
13479 put_cpu();
13480 return error;
13481 }
13482 @@ -975,7 +989,7 @@ recalc:
13483
13484 static void apm_power_off(void)
13485 {
13486 - unsigned char po_bios_call[] = {
13487 + const unsigned char po_bios_call[] = {
13488 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13489 0x8e, 0xd0, /* movw ax,ss */
13490 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13491 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13492 * code to that CPU.
13493 */
13494 gdt = get_cpu_gdt_table(0);
13495 +
13496 + pax_open_kernel();
13497 set_desc_base(&gdt[APM_CS >> 3],
13498 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13499 set_desc_base(&gdt[APM_CS_16 >> 3],
13500 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13501 set_desc_base(&gdt[APM_DS >> 3],
13502 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13503 + pax_close_kernel();
13504
13505 proc_create("apm", 0, NULL, &apm_file_ops);
13506
13507 diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13508 index dfdbf64..9b2b6ce 100644
13509 --- a/arch/x86/kernel/asm-offsets_32.c
13510 +++ b/arch/x86/kernel/asm-offsets_32.c
13511 @@ -51,7 +51,6 @@ void foo(void)
13512 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13513 BLANK();
13514
13515 - OFFSET(TI_task, thread_info, task);
13516 OFFSET(TI_exec_domain, thread_info, exec_domain);
13517 OFFSET(TI_flags, thread_info, flags);
13518 OFFSET(TI_status, thread_info, status);
13519 @@ -60,6 +59,8 @@ void foo(void)
13520 OFFSET(TI_restart_block, thread_info, restart_block);
13521 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13522 OFFSET(TI_cpu, thread_info, cpu);
13523 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13524 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13525 BLANK();
13526
13527 OFFSET(GDS_size, desc_ptr, size);
13528 @@ -99,6 +100,7 @@ void foo(void)
13529
13530 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13531 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13532 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13533 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13534 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13535 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13536 @@ -115,6 +117,11 @@ void foo(void)
13537 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13538 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13539 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13540 +
13541 +#ifdef CONFIG_PAX_KERNEXEC
13542 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13543 +#endif
13544 +
13545 #endif
13546
13547 #ifdef CONFIG_XEN
13548 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13549 index 4a6aeed..371de20 100644
13550 --- a/arch/x86/kernel/asm-offsets_64.c
13551 +++ b/arch/x86/kernel/asm-offsets_64.c
13552 @@ -44,6 +44,8 @@ int main(void)
13553 ENTRY(addr_limit);
13554 ENTRY(preempt_count);
13555 ENTRY(status);
13556 + ENTRY(lowest_stack);
13557 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13558 #ifdef CONFIG_IA32_EMULATION
13559 ENTRY(sysenter_return);
13560 #endif
13561 @@ -63,6 +65,18 @@ int main(void)
13562 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13563 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13564 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13565 +
13566 +#ifdef CONFIG_PAX_KERNEXEC
13567 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13568 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13569 +#endif
13570 +
13571 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13572 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13573 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13574 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13575 +#endif
13576 +
13577 #endif
13578
13579
13580 @@ -115,6 +129,7 @@ int main(void)
13581 ENTRY(cr8);
13582 BLANK();
13583 #undef ENTRY
13584 + DEFINE(TSS_size, sizeof(struct tss_struct));
13585 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13586 BLANK();
13587 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13588 @@ -130,6 +145,7 @@ int main(void)
13589
13590 BLANK();
13591 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13592 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13593 #ifdef CONFIG_XEN
13594 BLANK();
13595 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13596 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13597 index ff502cc..dc5133e 100644
13598 --- a/arch/x86/kernel/cpu/Makefile
13599 +++ b/arch/x86/kernel/cpu/Makefile
13600 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13601 CFLAGS_REMOVE_common.o = -pg
13602 endif
13603
13604 -# Make sure load_percpu_segment has no stackprotector
13605 -nostackp := $(call cc-option, -fno-stack-protector)
13606 -CFLAGS_common.o := $(nostackp)
13607 -
13608 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13609 obj-y += proc.o capflags.o powerflags.o common.o
13610 obj-y += vmware.o hypervisor.o sched.o
13611 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13612 index 6e082dc..a0b5f36 100644
13613 --- a/arch/x86/kernel/cpu/amd.c
13614 +++ b/arch/x86/kernel/cpu/amd.c
13615 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13616 unsigned int size)
13617 {
13618 /* AMD errata T13 (order #21922) */
13619 - if ((c->x86 == 6)) {
13620 + if (c->x86 == 6) {
13621 /* Duron Rev A0 */
13622 if (c->x86_model == 3 && c->x86_mask == 0)
13623 size = 64;
13624 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13625 index 4e34d10..ba6bc97 100644
13626 --- a/arch/x86/kernel/cpu/common.c
13627 +++ b/arch/x86/kernel/cpu/common.c
13628 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13629
13630 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13631
13632 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13633 -#ifdef CONFIG_X86_64
13634 - /*
13635 - * We need valid kernel segments for data and code in long mode too
13636 - * IRET will check the segment types kkeil 2000/10/28
13637 - * Also sysret mandates a special GDT layout
13638 - *
13639 - * TLS descriptors are currently at a different place compared to i386.
13640 - * Hopefully nobody expects them at a fixed place (Wine?)
13641 - */
13642 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13643 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13644 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13645 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13646 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13647 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13648 -#else
13649 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13650 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13651 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13652 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13653 - /*
13654 - * Segments used for calling PnP BIOS have byte granularity.
13655 - * They code segments and data segments have fixed 64k limits,
13656 - * the transfer segment sizes are set at run time.
13657 - */
13658 - /* 32-bit code */
13659 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13660 - /* 16-bit code */
13661 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13662 - /* 16-bit data */
13663 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13664 - /* 16-bit data */
13665 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13666 - /* 16-bit data */
13667 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13668 - /*
13669 - * The APM segments have byte granularity and their bases
13670 - * are set at run time. All have 64k limits.
13671 - */
13672 - /* 32-bit code */
13673 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13674 - /* 16-bit code */
13675 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13676 - /* data */
13677 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13678 -
13679 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13680 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13681 - GDT_STACK_CANARY_INIT
13682 -#endif
13683 -} };
13684 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13685 -
13686 static int __init x86_xsave_setup(char *s)
13687 {
13688 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13689 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13690 {
13691 struct desc_ptr gdt_descr;
13692
13693 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13694 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13695 gdt_descr.size = GDT_SIZE - 1;
13696 load_gdt(&gdt_descr);
13697 /* Reload the per-cpu base */
13698 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13699 /* Filter out anything that depends on CPUID levels we don't have */
13700 filter_cpuid_features(c, true);
13701
13702 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13703 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13704 +#endif
13705 +
13706 /* If the model name is still unset, do table lookup. */
13707 if (!c->x86_model_id[0]) {
13708 const char *p;
13709 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13710 }
13711 __setup("clearcpuid=", setup_disablecpuid);
13712
13713 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13714 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13715 +
13716 #ifdef CONFIG_X86_64
13717 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13718
13719 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13720 EXPORT_PER_CPU_SYMBOL(current_task);
13721
13722 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13723 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13724 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13725 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13726
13727 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13728 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13729 {
13730 memset(regs, 0, sizeof(struct pt_regs));
13731 regs->fs = __KERNEL_PERCPU;
13732 - regs->gs = __KERNEL_STACK_CANARY;
13733 + savesegment(gs, regs->gs);
13734
13735 return regs;
13736 }
13737 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13738 int i;
13739
13740 cpu = stack_smp_processor_id();
13741 - t = &per_cpu(init_tss, cpu);
13742 + t = init_tss + cpu;
13743 orig_ist = &per_cpu(orig_ist, cpu);
13744
13745 #ifdef CONFIG_NUMA
13746 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13747 switch_to_new_gdt(cpu);
13748 loadsegment(fs, 0);
13749
13750 - load_idt((const struct desc_ptr *)&idt_descr);
13751 + load_idt(&idt_descr);
13752
13753 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13754 syscall_init();
13755 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13756 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13757 barrier();
13758
13759 - check_efer();
13760 if (cpu != 0)
13761 enable_x2apic();
13762
13763 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13764 {
13765 int cpu = smp_processor_id();
13766 struct task_struct *curr = current;
13767 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13768 + struct tss_struct *t = init_tss + cpu;
13769 struct thread_struct *thread = &curr->thread;
13770
13771 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13772 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13773 index 6a77cca..4f4fca0 100644
13774 --- a/arch/x86/kernel/cpu/intel.c
13775 +++ b/arch/x86/kernel/cpu/intel.c
13776 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13777 * Update the IDT descriptor and reload the IDT so that
13778 * it uses the read-only mapped virtual address.
13779 */
13780 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13781 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13782 load_idt(&idt_descr);
13783 }
13784 #endif
13785 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
13786 index 417990f..96dc36b 100644
13787 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
13788 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
13789 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13790 return ret;
13791 }
13792
13793 -static struct sysfs_ops sysfs_ops = {
13794 +static const struct sysfs_ops sysfs_ops = {
13795 .show = show,
13796 .store = store,
13797 };
13798 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13799 index 472763d..9831e11 100644
13800 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13801 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13802 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13803 static int inject_init(void)
13804 {
13805 printk(KERN_INFO "Machine check injector initialized\n");
13806 - mce_chrdev_ops.write = mce_write;
13807 + pax_open_kernel();
13808 + *(void **)&mce_chrdev_ops.write = mce_write;
13809 + pax_close_kernel();
13810 register_die_notifier(&mce_raise_nb);
13811 return 0;
13812 }
13813 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13814 index 0f16a2b..21740f5 100644
13815 --- a/arch/x86/kernel/cpu/mcheck/mce.c
13816 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
13817 @@ -43,6 +43,7 @@
13818 #include <asm/ipi.h>
13819 #include <asm/mce.h>
13820 #include <asm/msr.h>
13821 +#include <asm/local.h>
13822
13823 #include "mce-internal.h"
13824
13825 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
13826 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13827 m->cs, m->ip);
13828
13829 - if (m->cs == __KERNEL_CS)
13830 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13831 print_symbol("{%s}", m->ip);
13832 pr_cont("\n");
13833 }
13834 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
13835
13836 #define PANIC_TIMEOUT 5 /* 5 seconds */
13837
13838 -static atomic_t mce_paniced;
13839 +static atomic_unchecked_t mce_paniced;
13840
13841 static int fake_panic;
13842 -static atomic_t mce_fake_paniced;
13843 +static atomic_unchecked_t mce_fake_paniced;
13844
13845 /* Panic in progress. Enable interrupts and wait for final IPI */
13846 static void wait_for_panic(void)
13847 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13848 /*
13849 * Make sure only one CPU runs in machine check panic
13850 */
13851 - if (atomic_inc_return(&mce_paniced) > 1)
13852 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13853 wait_for_panic();
13854 barrier();
13855
13856 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13857 console_verbose();
13858 } else {
13859 /* Don't log too much for fake panic */
13860 - if (atomic_inc_return(&mce_fake_paniced) > 1)
13861 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13862 return;
13863 }
13864 print_mce_head();
13865 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
13866 * might have been modified by someone else.
13867 */
13868 rmb();
13869 - if (atomic_read(&mce_paniced))
13870 + if (atomic_read_unchecked(&mce_paniced))
13871 wait_for_panic();
13872 if (!monarch_timeout)
13873 goto out;
13874 @@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13875 }
13876
13877 /* Call the installed machine check handler for this CPU setup. */
13878 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
13879 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13880 unexpected_machine_check;
13881
13882 /*
13883 @@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13884 return;
13885 }
13886
13887 + pax_open_kernel();
13888 machine_check_vector = do_machine_check;
13889 + pax_close_kernel();
13890
13891 mce_init();
13892 mce_cpu_features(c);
13893 @@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13894 */
13895
13896 static DEFINE_SPINLOCK(mce_state_lock);
13897 -static int open_count; /* #times opened */
13898 +static local_t open_count; /* #times opened */
13899 static int open_exclu; /* already open exclusive? */
13900
13901 static int mce_open(struct inode *inode, struct file *file)
13902 {
13903 spin_lock(&mce_state_lock);
13904
13905 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
13906 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
13907 spin_unlock(&mce_state_lock);
13908
13909 return -EBUSY;
13910 @@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
13911
13912 if (file->f_flags & O_EXCL)
13913 open_exclu = 1;
13914 - open_count++;
13915 + local_inc(&open_count);
13916
13917 spin_unlock(&mce_state_lock);
13918
13919 @@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
13920 {
13921 spin_lock(&mce_state_lock);
13922
13923 - open_count--;
13924 + local_dec(&open_count);
13925 open_exclu = 0;
13926
13927 spin_unlock(&mce_state_lock);
13928 @@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
13929 static void mce_reset(void)
13930 {
13931 cpu_missing = 0;
13932 - atomic_set(&mce_fake_paniced, 0);
13933 + atomic_set_unchecked(&mce_fake_paniced, 0);
13934 atomic_set(&mce_executing, 0);
13935 atomic_set(&mce_callin, 0);
13936 atomic_set(&global_nwo, 0);
13937 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13938 index ef3cd31..9d2f6ab 100644
13939 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
13940 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13941 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13942 return ret;
13943 }
13944
13945 -static struct sysfs_ops threshold_ops = {
13946 +static const struct sysfs_ops threshold_ops = {
13947 .show = show,
13948 .store = store,
13949 };
13950 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13951 index 5c0e653..1e82c7c 100644
13952 --- a/arch/x86/kernel/cpu/mcheck/p5.c
13953 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
13954 @@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13955 if (!cpu_has(c, X86_FEATURE_MCE))
13956 return;
13957
13958 + pax_open_kernel();
13959 machine_check_vector = pentium_machine_check;
13960 + pax_close_kernel();
13961 /* Make sure the vector pointer is visible before we enable MCEs: */
13962 wmb();
13963
13964 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13965 index 54060f5..e6ba93d 100644
13966 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
13967 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13968 @@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13969 {
13970 u32 lo, hi;
13971
13972 + pax_open_kernel();
13973 machine_check_vector = winchip_machine_check;
13974 + pax_close_kernel();
13975 /* Make sure the vector pointer is visible before we enable MCEs: */
13976 wmb();
13977
13978 diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
13979 index 33af141..92ba9cd 100644
13980 --- a/arch/x86/kernel/cpu/mtrr/amd.c
13981 +++ b/arch/x86/kernel/cpu/mtrr/amd.c
13982 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
13983 return 0;
13984 }
13985
13986 -static struct mtrr_ops amd_mtrr_ops = {
13987 +static const struct mtrr_ops amd_mtrr_ops = {
13988 .vendor = X86_VENDOR_AMD,
13989 .set = amd_set_mtrr,
13990 .get = amd_get_mtrr,
13991 diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
13992 index de89f14..316fe3e 100644
13993 --- a/arch/x86/kernel/cpu/mtrr/centaur.c
13994 +++ b/arch/x86/kernel/cpu/mtrr/centaur.c
13995 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
13996 return 0;
13997 }
13998
13999 -static struct mtrr_ops centaur_mtrr_ops = {
14000 +static const struct mtrr_ops centaur_mtrr_ops = {
14001 .vendor = X86_VENDOR_CENTAUR,
14002 .set = centaur_set_mcr,
14003 .get = centaur_get_mcr,
14004 diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14005 index 228d982..68a3343 100644
14006 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14007 +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14008 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14009 post_set();
14010 }
14011
14012 -static struct mtrr_ops cyrix_mtrr_ops = {
14013 +static const struct mtrr_ops cyrix_mtrr_ops = {
14014 .vendor = X86_VENDOR_CYRIX,
14015 .set_all = cyrix_set_all,
14016 .set = cyrix_set_arr,
14017 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14018 index 55da0c5..4d75584 100644
14019 --- a/arch/x86/kernel/cpu/mtrr/generic.c
14020 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
14021 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14022 /*
14023 * Generic structure...
14024 */
14025 -struct mtrr_ops generic_mtrr_ops = {
14026 +const struct mtrr_ops generic_mtrr_ops = {
14027 .use_intel_if = 1,
14028 .set_all = generic_set_all,
14029 .get = generic_get_mtrr,
14030 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14031 index fd60f09..c94ef52 100644
14032 --- a/arch/x86/kernel/cpu/mtrr/main.c
14033 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14034 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14035 u64 size_or_mask, size_and_mask;
14036 static bool mtrr_aps_delayed_init;
14037
14038 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14039 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14040
14041 -struct mtrr_ops *mtrr_if;
14042 +const struct mtrr_ops *mtrr_if;
14043
14044 static void set_mtrr(unsigned int reg, unsigned long base,
14045 unsigned long size, mtrr_type type);
14046
14047 -void set_mtrr_ops(struct mtrr_ops *ops)
14048 +void set_mtrr_ops(const struct mtrr_ops *ops)
14049 {
14050 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14051 mtrr_ops[ops->vendor] = ops;
14052 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14053 index a501dee..816c719 100644
14054 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14055 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14056 @@ -25,14 +25,14 @@ struct mtrr_ops {
14057 int (*validate_add_page)(unsigned long base, unsigned long size,
14058 unsigned int type);
14059 int (*have_wrcomb)(void);
14060 -};
14061 +} __do_const;
14062
14063 extern int generic_get_free_region(unsigned long base, unsigned long size,
14064 int replace_reg);
14065 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14066 unsigned int type);
14067
14068 -extern struct mtrr_ops generic_mtrr_ops;
14069 +extern const struct mtrr_ops generic_mtrr_ops;
14070
14071 extern int positive_have_wrcomb(void);
14072
14073 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14074 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14075 void get_mtrr_state(void);
14076
14077 -extern void set_mtrr_ops(struct mtrr_ops *ops);
14078 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
14079
14080 extern u64 size_or_mask, size_and_mask;
14081 -extern struct mtrr_ops *mtrr_if;
14082 +extern const struct mtrr_ops *mtrr_if;
14083
14084 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14085 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14086 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14087 index 0ff02ca..fc49a60 100644
14088 --- a/arch/x86/kernel/cpu/perf_event.c
14089 +++ b/arch/x86/kernel/cpu/perf_event.c
14090 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14091 * count to the generic event atomically:
14092 */
14093 again:
14094 - prev_raw_count = atomic64_read(&hwc->prev_count);
14095 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14096 rdmsrl(hwc->event_base + idx, new_raw_count);
14097
14098 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14099 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14100 new_raw_count) != prev_raw_count)
14101 goto again;
14102
14103 @@ -741,7 +741,7 @@ again:
14104 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14105 delta >>= shift;
14106
14107 - atomic64_add(delta, &event->count);
14108 + atomic64_add_unchecked(delta, &event->count);
14109 atomic64_sub(delta, &hwc->period_left);
14110
14111 return new_raw_count;
14112 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14113 * The hw event starts counting from this event offset,
14114 * mark it to be able to extra future deltas:
14115 */
14116 - atomic64_set(&hwc->prev_count, (u64)-left);
14117 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14118
14119 err = checking_wrmsrl(hwc->event_base + idx,
14120 (u64)(-left) & x86_pmu.event_mask);
14121 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14122 break;
14123
14124 callchain_store(entry, frame.return_address);
14125 - fp = frame.next_frame;
14126 + fp = (__force const void __user *)frame.next_frame;
14127 }
14128 }
14129
14130 diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14131 index 898df97..9e82503 100644
14132 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14133 +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14134 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14135
14136 /* Interface defining a CPU specific perfctr watchdog */
14137 struct wd_ops {
14138 - int (*reserve)(void);
14139 - void (*unreserve)(void);
14140 - int (*setup)(unsigned nmi_hz);
14141 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14142 - void (*stop)(void);
14143 + int (* const reserve)(void);
14144 + void (* const unreserve)(void);
14145 + int (* const setup)(unsigned nmi_hz);
14146 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14147 + void (* const stop)(void);
14148 unsigned perfctr;
14149 unsigned evntsel;
14150 u64 checkbit;
14151 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14152 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14153 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14154
14155 +/* cannot be const */
14156 static struct wd_ops intel_arch_wd_ops;
14157
14158 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14159 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14160 return 1;
14161 }
14162
14163 +/* cannot be const */
14164 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14165 .reserve = single_msr_reserve,
14166 .unreserve = single_msr_unreserve,
14167 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14168 index ff95824..2ffdcb5 100644
14169 --- a/arch/x86/kernel/crash.c
14170 +++ b/arch/x86/kernel/crash.c
14171 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14172 regs = args->regs;
14173
14174 #ifdef CONFIG_X86_32
14175 - if (!user_mode_vm(regs)) {
14176 + if (!user_mode(regs)) {
14177 crash_fixup_ss_esp(&fixed_regs, regs);
14178 regs = &fixed_regs;
14179 }
14180 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14181 index 37250fe..bf2ec74 100644
14182 --- a/arch/x86/kernel/doublefault_32.c
14183 +++ b/arch/x86/kernel/doublefault_32.c
14184 @@ -11,7 +11,7 @@
14185
14186 #define DOUBLEFAULT_STACKSIZE (1024)
14187 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14188 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14189 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14190
14191 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14192
14193 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14194 unsigned long gdt, tss;
14195
14196 store_gdt(&gdt_desc);
14197 - gdt = gdt_desc.address;
14198 + gdt = (unsigned long)gdt_desc.address;
14199
14200 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14201
14202 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14203 /* 0x2 bit is always set */
14204 .flags = X86_EFLAGS_SF | 0x2,
14205 .sp = STACK_START,
14206 - .es = __USER_DS,
14207 + .es = __KERNEL_DS,
14208 .cs = __KERNEL_CS,
14209 .ss = __KERNEL_DS,
14210 - .ds = __USER_DS,
14211 + .ds = __KERNEL_DS,
14212 .fs = __KERNEL_PERCPU,
14213
14214 .__cr3 = __pa_nodebug(swapper_pg_dir),
14215 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14216 index 2d8a371..4fa6ae6 100644
14217 --- a/arch/x86/kernel/dumpstack.c
14218 +++ b/arch/x86/kernel/dumpstack.c
14219 @@ -2,6 +2,9 @@
14220 * Copyright (C) 1991, 1992 Linus Torvalds
14221 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14222 */
14223 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14224 +#define __INCLUDED_BY_HIDESYM 1
14225 +#endif
14226 #include <linux/kallsyms.h>
14227 #include <linux/kprobes.h>
14228 #include <linux/uaccess.h>
14229 @@ -28,7 +31,7 @@ static int die_counter;
14230
14231 void printk_address(unsigned long address, int reliable)
14232 {
14233 - printk(" [<%p>] %s%pS\n", (void *) address,
14234 + printk(" [<%p>] %s%pA\n", (void *) address,
14235 reliable ? "" : "? ", (void *) address);
14236 }
14237
14238 @@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14239 static void
14240 print_ftrace_graph_addr(unsigned long addr, void *data,
14241 const struct stacktrace_ops *ops,
14242 - struct thread_info *tinfo, int *graph)
14243 + struct task_struct *task, int *graph)
14244 {
14245 - struct task_struct *task = tinfo->task;
14246 unsigned long ret_addr;
14247 int index = task->curr_ret_stack;
14248
14249 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14250 static inline void
14251 print_ftrace_graph_addr(unsigned long addr, void *data,
14252 const struct stacktrace_ops *ops,
14253 - struct thread_info *tinfo, int *graph)
14254 + struct task_struct *task, int *graph)
14255 { }
14256 #endif
14257
14258 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14259 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14260 */
14261
14262 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14263 - void *p, unsigned int size, void *end)
14264 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14265 {
14266 - void *t = tinfo;
14267 if (end) {
14268 if (p < end && p >= (end-THREAD_SIZE))
14269 return 1;
14270 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14271 }
14272
14273 unsigned long
14274 -print_context_stack(struct thread_info *tinfo,
14275 +print_context_stack(struct task_struct *task, void *stack_start,
14276 unsigned long *stack, unsigned long bp,
14277 const struct stacktrace_ops *ops, void *data,
14278 unsigned long *end, int *graph)
14279 {
14280 struct stack_frame *frame = (struct stack_frame *)bp;
14281
14282 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14283 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14284 unsigned long addr;
14285
14286 addr = *stack;
14287 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14288 } else {
14289 ops->address(data, addr, 0);
14290 }
14291 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14292 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14293 }
14294 stack++;
14295 }
14296 @@ -180,7 +180,7 @@ void dump_stack(void)
14297 #endif
14298
14299 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14300 - current->pid, current->comm, print_tainted(),
14301 + task_pid_nr(current), current->comm, print_tainted(),
14302 init_utsname()->release,
14303 (int)strcspn(init_utsname()->version, " "),
14304 init_utsname()->version);
14305 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14306 return flags;
14307 }
14308
14309 +extern void gr_handle_kernel_exploit(void);
14310 +
14311 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14312 {
14313 if (regs && kexec_should_crash(current))
14314 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14315 panic("Fatal exception in interrupt");
14316 if (panic_on_oops)
14317 panic("Fatal exception");
14318 - do_exit(signr);
14319 +
14320 + gr_handle_kernel_exploit();
14321 +
14322 + do_group_exit(signr);
14323 }
14324
14325 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14326 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14327 unsigned long flags = oops_begin();
14328 int sig = SIGSEGV;
14329
14330 - if (!user_mode_vm(regs))
14331 + if (!user_mode(regs))
14332 report_bug(regs->ip, regs);
14333
14334 if (__die(str, regs, err))
14335 diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14336 index 81086c2..13e8b17 100644
14337 --- a/arch/x86/kernel/dumpstack.h
14338 +++ b/arch/x86/kernel/dumpstack.h
14339 @@ -15,7 +15,7 @@
14340 #endif
14341
14342 extern unsigned long
14343 -print_context_stack(struct thread_info *tinfo,
14344 +print_context_stack(struct task_struct *task, void *stack_start,
14345 unsigned long *stack, unsigned long bp,
14346 const struct stacktrace_ops *ops, void *data,
14347 unsigned long *end, int *graph);
14348 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14349 index f7dd2a7..504f53b 100644
14350 --- a/arch/x86/kernel/dumpstack_32.c
14351 +++ b/arch/x86/kernel/dumpstack_32.c
14352 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14353 #endif
14354
14355 for (;;) {
14356 - struct thread_info *context;
14357 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14358 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14359
14360 - context = (struct thread_info *)
14361 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14362 - bp = print_context_stack(context, stack, bp, ops,
14363 - data, NULL, &graph);
14364 -
14365 - stack = (unsigned long *)context->previous_esp;
14366 - if (!stack)
14367 + if (stack_start == task_stack_page(task))
14368 break;
14369 + stack = *(unsigned long **)stack_start;
14370 if (ops->stack(data, "IRQ") < 0)
14371 break;
14372 touch_nmi_watchdog();
14373 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14374 * When in-kernel, we also print out the stack and code at the
14375 * time of the fault..
14376 */
14377 - if (!user_mode_vm(regs)) {
14378 + if (!user_mode(regs)) {
14379 unsigned int code_prologue = code_bytes * 43 / 64;
14380 unsigned int code_len = code_bytes;
14381 unsigned char c;
14382 u8 *ip;
14383 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14384
14385 printk(KERN_EMERG "Stack:\n");
14386 show_stack_log_lvl(NULL, regs, &regs->sp,
14387 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14388
14389 printk(KERN_EMERG "Code: ");
14390
14391 - ip = (u8 *)regs->ip - code_prologue;
14392 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14393 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14394 /* try starting at IP */
14395 - ip = (u8 *)regs->ip;
14396 + ip = (u8 *)regs->ip + cs_base;
14397 code_len = code_len - code_prologue + 1;
14398 }
14399 for (i = 0; i < code_len; i++, ip++) {
14400 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14401 printk(" Bad EIP value.");
14402 break;
14403 }
14404 - if (ip == (u8 *)regs->ip)
14405 + if (ip == (u8 *)regs->ip + cs_base)
14406 printk("<%02x> ", c);
14407 else
14408 printk("%02x ", c);
14409 @@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14410 printk("\n");
14411 }
14412
14413 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14414 +void pax_check_alloca(unsigned long size)
14415 +{
14416 + unsigned long sp = (unsigned long)&sp, stack_left;
14417 +
14418 + /* all kernel stacks are of the same size */
14419 + stack_left = sp & (THREAD_SIZE - 1);
14420 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14421 +}
14422 +EXPORT_SYMBOL(pax_check_alloca);
14423 +#endif
14424 +
14425 int is_valid_bugaddr(unsigned long ip)
14426 {
14427 unsigned short ud2;
14428
14429 + ip = ktla_ktva(ip);
14430 if (ip < PAGE_OFFSET)
14431 return 0;
14432 if (probe_kernel_address((unsigned short *)ip, ud2))
14433 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14434 index a071e6b..36cd585 100644
14435 --- a/arch/x86/kernel/dumpstack_64.c
14436 +++ b/arch/x86/kernel/dumpstack_64.c
14437 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14438 unsigned long *irq_stack_end =
14439 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14440 unsigned used = 0;
14441 - struct thread_info *tinfo;
14442 int graph = 0;
14443 + void *stack_start;
14444
14445 if (!task)
14446 task = current;
14447 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14448 * current stack address. If the stacks consist of nested
14449 * exceptions
14450 */
14451 - tinfo = task_thread_info(task);
14452 for (;;) {
14453 char *id;
14454 unsigned long *estack_end;
14455 +
14456 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14457 &used, &id);
14458
14459 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14460 if (ops->stack(data, id) < 0)
14461 break;
14462
14463 - bp = print_context_stack(tinfo, stack, bp, ops,
14464 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14465 data, estack_end, &graph);
14466 ops->stack(data, "<EOE>");
14467 /*
14468 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14469 if (stack >= irq_stack && stack < irq_stack_end) {
14470 if (ops->stack(data, "IRQ") < 0)
14471 break;
14472 - bp = print_context_stack(tinfo, stack, bp,
14473 + bp = print_context_stack(task, irq_stack, stack, bp,
14474 ops, data, irq_stack_end, &graph);
14475 /*
14476 * We link to the next stack (which would be
14477 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14478 /*
14479 * This handles the process stack:
14480 */
14481 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14482 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14483 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14484 put_cpu();
14485 }
14486 EXPORT_SYMBOL(dump_trace);
14487 @@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14488 return ud2 == 0x0b0f;
14489 }
14490
14491 +
14492 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14493 +void pax_check_alloca(unsigned long size)
14494 +{
14495 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14496 + unsigned cpu, used;
14497 + char *id;
14498 +
14499 + /* check the process stack first */
14500 + stack_start = (unsigned long)task_stack_page(current);
14501 + stack_end = stack_start + THREAD_SIZE;
14502 + if (likely(stack_start <= sp && sp < stack_end)) {
14503 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14504 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14505 + return;
14506 + }
14507 +
14508 + cpu = get_cpu();
14509 +
14510 + /* check the irq stacks */
14511 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14512 + stack_start = stack_end - IRQ_STACK_SIZE;
14513 + if (stack_start <= sp && sp < stack_end) {
14514 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14515 + put_cpu();
14516 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14517 + return;
14518 + }
14519 +
14520 + /* check the exception stacks */
14521 + used = 0;
14522 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14523 + stack_start = stack_end - EXCEPTION_STKSZ;
14524 + if (stack_end && stack_start <= sp && sp < stack_end) {
14525 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14526 + put_cpu();
14527 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14528 + return;
14529 + }
14530 +
14531 + put_cpu();
14532 +
14533 + /* unknown stack */
14534 + BUG();
14535 +}
14536 +EXPORT_SYMBOL(pax_check_alloca);
14537 +#endif
14538 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14539 index a89739a..95e0c48 100644
14540 --- a/arch/x86/kernel/e820.c
14541 +++ b/arch/x86/kernel/e820.c
14542 @@ -733,7 +733,7 @@ struct early_res {
14543 };
14544 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14545 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14546 - {}
14547 + { 0, 0, {0}, 0 }
14548 };
14549
14550 static int __init find_overlapped_early(u64 start, u64 end)
14551 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14552 index b9c830c..1e41a96 100644
14553 --- a/arch/x86/kernel/early_printk.c
14554 +++ b/arch/x86/kernel/early_printk.c
14555 @@ -7,6 +7,7 @@
14556 #include <linux/pci_regs.h>
14557 #include <linux/pci_ids.h>
14558 #include <linux/errno.h>
14559 +#include <linux/sched.h>
14560 #include <asm/io.h>
14561 #include <asm/processor.h>
14562 #include <asm/fcntl.h>
14563 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14564 int n;
14565 va_list ap;
14566
14567 + pax_track_stack();
14568 +
14569 va_start(ap, fmt);
14570 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14571 early_console->write(early_console, buf, n);
14572 diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14573 index 5cab48e..b025f9b 100644
14574 --- a/arch/x86/kernel/efi_32.c
14575 +++ b/arch/x86/kernel/efi_32.c
14576 @@ -38,70 +38,56 @@
14577 */
14578
14579 static unsigned long efi_rt_eflags;
14580 -static pgd_t efi_bak_pg_dir_pointer[2];
14581 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14582
14583 -void efi_call_phys_prelog(void)
14584 +void __init efi_call_phys_prelog(void)
14585 {
14586 - unsigned long cr4;
14587 - unsigned long temp;
14588 struct desc_ptr gdt_descr;
14589
14590 +#ifdef CONFIG_PAX_KERNEXEC
14591 + struct desc_struct d;
14592 +#endif
14593 +
14594 local_irq_save(efi_rt_eflags);
14595
14596 - /*
14597 - * If I don't have PAE, I should just duplicate two entries in page
14598 - * directory. If I have PAE, I just need to duplicate one entry in
14599 - * page directory.
14600 - */
14601 - cr4 = read_cr4_safe();
14602 -
14603 - if (cr4 & X86_CR4_PAE) {
14604 - efi_bak_pg_dir_pointer[0].pgd =
14605 - swapper_pg_dir[pgd_index(0)].pgd;
14606 - swapper_pg_dir[0].pgd =
14607 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14608 - } else {
14609 - efi_bak_pg_dir_pointer[0].pgd =
14610 - swapper_pg_dir[pgd_index(0)].pgd;
14611 - efi_bak_pg_dir_pointer[1].pgd =
14612 - swapper_pg_dir[pgd_index(0x400000)].pgd;
14613 - swapper_pg_dir[pgd_index(0)].pgd =
14614 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14615 - temp = PAGE_OFFSET + 0x400000;
14616 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14617 - swapper_pg_dir[pgd_index(temp)].pgd;
14618 - }
14619 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14620 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14621 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14622
14623 /*
14624 * After the lock is released, the original page table is restored.
14625 */
14626 __flush_tlb_all();
14627
14628 +#ifdef CONFIG_PAX_KERNEXEC
14629 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14630 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14631 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14632 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14633 +#endif
14634 +
14635 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14636 gdt_descr.size = GDT_SIZE - 1;
14637 load_gdt(&gdt_descr);
14638 }
14639
14640 -void efi_call_phys_epilog(void)
14641 +void __init efi_call_phys_epilog(void)
14642 {
14643 - unsigned long cr4;
14644 struct desc_ptr gdt_descr;
14645
14646 +#ifdef CONFIG_PAX_KERNEXEC
14647 + struct desc_struct d;
14648 +
14649 + memset(&d, 0, sizeof d);
14650 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14651 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14652 +#endif
14653 +
14654 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14655 gdt_descr.size = GDT_SIZE - 1;
14656 load_gdt(&gdt_descr);
14657
14658 - cr4 = read_cr4_safe();
14659 -
14660 - if (cr4 & X86_CR4_PAE) {
14661 - swapper_pg_dir[pgd_index(0)].pgd =
14662 - efi_bak_pg_dir_pointer[0].pgd;
14663 - } else {
14664 - swapper_pg_dir[pgd_index(0)].pgd =
14665 - efi_bak_pg_dir_pointer[0].pgd;
14666 - swapper_pg_dir[pgd_index(0x400000)].pgd =
14667 - efi_bak_pg_dir_pointer[1].pgd;
14668 - }
14669 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14670
14671 /*
14672 * After the lock is released, the original page table is restored.
14673 diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14674 index fbe66e6..c5c0dd2 100644
14675 --- a/arch/x86/kernel/efi_stub_32.S
14676 +++ b/arch/x86/kernel/efi_stub_32.S
14677 @@ -6,7 +6,9 @@
14678 */
14679
14680 #include <linux/linkage.h>
14681 +#include <linux/init.h>
14682 #include <asm/page_types.h>
14683 +#include <asm/segment.h>
14684
14685 /*
14686 * efi_call_phys(void *, ...) is a function with variable parameters.
14687 @@ -20,7 +22,7 @@
14688 * service functions will comply with gcc calling convention, too.
14689 */
14690
14691 -.text
14692 +__INIT
14693 ENTRY(efi_call_phys)
14694 /*
14695 * 0. The function can only be called in Linux kernel. So CS has been
14696 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14697 * The mapping of lower virtual memory has been created in prelog and
14698 * epilog.
14699 */
14700 - movl $1f, %edx
14701 - subl $__PAGE_OFFSET, %edx
14702 - jmp *%edx
14703 + movl $(__KERNEXEC_EFI_DS), %edx
14704 + mov %edx, %ds
14705 + mov %edx, %es
14706 + mov %edx, %ss
14707 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14708 1:
14709
14710 /*
14711 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14712 * parameter 2, ..., param n. To make things easy, we save the return
14713 * address of efi_call_phys in a global variable.
14714 */
14715 - popl %edx
14716 - movl %edx, saved_return_addr
14717 - /* get the function pointer into ECX*/
14718 - popl %ecx
14719 - movl %ecx, efi_rt_function_ptr
14720 - movl $2f, %edx
14721 - subl $__PAGE_OFFSET, %edx
14722 - pushl %edx
14723 + popl (saved_return_addr)
14724 + popl (efi_rt_function_ptr)
14725
14726 /*
14727 * 3. Clear PG bit in %CR0.
14728 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14729 /*
14730 * 5. Call the physical function.
14731 */
14732 - jmp *%ecx
14733 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
14734
14735 -2:
14736 /*
14737 * 6. After EFI runtime service returns, control will return to
14738 * following instruction. We'd better readjust stack pointer first.
14739 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14740 movl %cr0, %edx
14741 orl $0x80000000, %edx
14742 movl %edx, %cr0
14743 - jmp 1f
14744 -1:
14745 +
14746 /*
14747 * 8. Now restore the virtual mode from flat mode by
14748 * adding EIP with PAGE_OFFSET.
14749 */
14750 - movl $1f, %edx
14751 - jmp *%edx
14752 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14753 1:
14754 + movl $(__KERNEL_DS), %edx
14755 + mov %edx, %ds
14756 + mov %edx, %es
14757 + mov %edx, %ss
14758
14759 /*
14760 * 9. Balance the stack. And because EAX contain the return value,
14761 * we'd better not clobber it.
14762 */
14763 - leal efi_rt_function_ptr, %edx
14764 - movl (%edx), %ecx
14765 - pushl %ecx
14766 + pushl (efi_rt_function_ptr)
14767
14768 /*
14769 - * 10. Push the saved return address onto the stack and return.
14770 + * 10. Return to the saved return address.
14771 */
14772 - leal saved_return_addr, %edx
14773 - movl (%edx), %ecx
14774 - pushl %ecx
14775 - ret
14776 + jmpl *(saved_return_addr)
14777 ENDPROC(efi_call_phys)
14778 .previous
14779
14780 -.data
14781 +__INITDATA
14782 saved_return_addr:
14783 .long 0
14784 efi_rt_function_ptr:
14785 diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
14786 index 4c07cca..2c8427d 100644
14787 --- a/arch/x86/kernel/efi_stub_64.S
14788 +++ b/arch/x86/kernel/efi_stub_64.S
14789 @@ -7,6 +7,7 @@
14790 */
14791
14792 #include <linux/linkage.h>
14793 +#include <asm/alternative-asm.h>
14794
14795 #define SAVE_XMM \
14796 mov %rsp, %rax; \
14797 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
14798 call *%rdi
14799 addq $32, %rsp
14800 RESTORE_XMM
14801 + pax_force_retaddr 0, 1
14802 ret
14803 ENDPROC(efi_call0)
14804
14805 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
14806 call *%rdi
14807 addq $32, %rsp
14808 RESTORE_XMM
14809 + pax_force_retaddr 0, 1
14810 ret
14811 ENDPROC(efi_call1)
14812
14813 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
14814 call *%rdi
14815 addq $32, %rsp
14816 RESTORE_XMM
14817 + pax_force_retaddr 0, 1
14818 ret
14819 ENDPROC(efi_call2)
14820
14821 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
14822 call *%rdi
14823 addq $32, %rsp
14824 RESTORE_XMM
14825 + pax_force_retaddr 0, 1
14826 ret
14827 ENDPROC(efi_call3)
14828
14829 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
14830 call *%rdi
14831 addq $32, %rsp
14832 RESTORE_XMM
14833 + pax_force_retaddr 0, 1
14834 ret
14835 ENDPROC(efi_call4)
14836
14837 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
14838 call *%rdi
14839 addq $48, %rsp
14840 RESTORE_XMM
14841 + pax_force_retaddr 0, 1
14842 ret
14843 ENDPROC(efi_call5)
14844
14845 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
14846 call *%rdi
14847 addq $48, %rsp
14848 RESTORE_XMM
14849 + pax_force_retaddr 0, 1
14850 ret
14851 ENDPROC(efi_call6)
14852 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14853 index c097e7d..c689cf4 100644
14854 --- a/arch/x86/kernel/entry_32.S
14855 +++ b/arch/x86/kernel/entry_32.S
14856 @@ -185,13 +185,146 @@
14857 /*CFI_REL_OFFSET gs, PT_GS*/
14858 .endm
14859 .macro SET_KERNEL_GS reg
14860 +
14861 +#ifdef CONFIG_CC_STACKPROTECTOR
14862 movl $(__KERNEL_STACK_CANARY), \reg
14863 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14864 + movl $(__USER_DS), \reg
14865 +#else
14866 + xorl \reg, \reg
14867 +#endif
14868 +
14869 movl \reg, %gs
14870 .endm
14871
14872 #endif /* CONFIG_X86_32_LAZY_GS */
14873
14874 -.macro SAVE_ALL
14875 +.macro pax_enter_kernel
14876 +#ifdef CONFIG_PAX_KERNEXEC
14877 + call pax_enter_kernel
14878 +#endif
14879 +.endm
14880 +
14881 +.macro pax_exit_kernel
14882 +#ifdef CONFIG_PAX_KERNEXEC
14883 + call pax_exit_kernel
14884 +#endif
14885 +.endm
14886 +
14887 +#ifdef CONFIG_PAX_KERNEXEC
14888 +ENTRY(pax_enter_kernel)
14889 +#ifdef CONFIG_PARAVIRT
14890 + pushl %eax
14891 + pushl %ecx
14892 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14893 + mov %eax, %esi
14894 +#else
14895 + mov %cr0, %esi
14896 +#endif
14897 + bts $16, %esi
14898 + jnc 1f
14899 + mov %cs, %esi
14900 + cmp $__KERNEL_CS, %esi
14901 + jz 3f
14902 + ljmp $__KERNEL_CS, $3f
14903 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14904 +2:
14905 +#ifdef CONFIG_PARAVIRT
14906 + mov %esi, %eax
14907 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14908 +#else
14909 + mov %esi, %cr0
14910 +#endif
14911 +3:
14912 +#ifdef CONFIG_PARAVIRT
14913 + popl %ecx
14914 + popl %eax
14915 +#endif
14916 + ret
14917 +ENDPROC(pax_enter_kernel)
14918 +
14919 +ENTRY(pax_exit_kernel)
14920 +#ifdef CONFIG_PARAVIRT
14921 + pushl %eax
14922 + pushl %ecx
14923 +#endif
14924 + mov %cs, %esi
14925 + cmp $__KERNEXEC_KERNEL_CS, %esi
14926 + jnz 2f
14927 +#ifdef CONFIG_PARAVIRT
14928 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14929 + mov %eax, %esi
14930 +#else
14931 + mov %cr0, %esi
14932 +#endif
14933 + btr $16, %esi
14934 + ljmp $__KERNEL_CS, $1f
14935 +1:
14936 +#ifdef CONFIG_PARAVIRT
14937 + mov %esi, %eax
14938 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14939 +#else
14940 + mov %esi, %cr0
14941 +#endif
14942 +2:
14943 +#ifdef CONFIG_PARAVIRT
14944 + popl %ecx
14945 + popl %eax
14946 +#endif
14947 + ret
14948 +ENDPROC(pax_exit_kernel)
14949 +#endif
14950 +
14951 +.macro pax_erase_kstack
14952 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14953 + call pax_erase_kstack
14954 +#endif
14955 +.endm
14956 +
14957 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14958 +/*
14959 + * ebp: thread_info
14960 + * ecx, edx: can be clobbered
14961 + */
14962 +ENTRY(pax_erase_kstack)
14963 + pushl %edi
14964 + pushl %eax
14965 +
14966 + mov TI_lowest_stack(%ebp), %edi
14967 + mov $-0xBEEF, %eax
14968 + std
14969 +
14970 +1: mov %edi, %ecx
14971 + and $THREAD_SIZE_asm - 1, %ecx
14972 + shr $2, %ecx
14973 + repne scasl
14974 + jecxz 2f
14975 +
14976 + cmp $2*16, %ecx
14977 + jc 2f
14978 +
14979 + mov $2*16, %ecx
14980 + repe scasl
14981 + jecxz 2f
14982 + jne 1b
14983 +
14984 +2: cld
14985 + mov %esp, %ecx
14986 + sub %edi, %ecx
14987 + shr $2, %ecx
14988 + rep stosl
14989 +
14990 + mov TI_task_thread_sp0(%ebp), %edi
14991 + sub $128, %edi
14992 + mov %edi, TI_lowest_stack(%ebp)
14993 +
14994 + popl %eax
14995 + popl %edi
14996 + ret
14997 +ENDPROC(pax_erase_kstack)
14998 +#endif
14999 +
15000 +.macro __SAVE_ALL _DS
15001 cld
15002 PUSH_GS
15003 pushl %fs
15004 @@ -224,7 +357,7 @@
15005 pushl %ebx
15006 CFI_ADJUST_CFA_OFFSET 4
15007 CFI_REL_OFFSET ebx, 0
15008 - movl $(__USER_DS), %edx
15009 + movl $\_DS, %edx
15010 movl %edx, %ds
15011 movl %edx, %es
15012 movl $(__KERNEL_PERCPU), %edx
15013 @@ -232,6 +365,15 @@
15014 SET_KERNEL_GS %edx
15015 .endm
15016
15017 +.macro SAVE_ALL
15018 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15019 + __SAVE_ALL __KERNEL_DS
15020 + pax_enter_kernel
15021 +#else
15022 + __SAVE_ALL __USER_DS
15023 +#endif
15024 +.endm
15025 +
15026 .macro RESTORE_INT_REGS
15027 popl %ebx
15028 CFI_ADJUST_CFA_OFFSET -4
15029 @@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15030 CFI_ADJUST_CFA_OFFSET -4
15031 jmp syscall_exit
15032 CFI_ENDPROC
15033 -END(ret_from_fork)
15034 +ENDPROC(ret_from_fork)
15035
15036 /*
15037 * Return to user mode is not as complex as all this looks,
15038 @@ -352,7 +494,15 @@ check_userspace:
15039 movb PT_CS(%esp), %al
15040 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15041 cmpl $USER_RPL, %eax
15042 +
15043 +#ifdef CONFIG_PAX_KERNEXEC
15044 + jae resume_userspace
15045 +
15046 + PAX_EXIT_KERNEL
15047 + jmp resume_kernel
15048 +#else
15049 jb resume_kernel # not returning to v8086 or userspace
15050 +#endif
15051
15052 ENTRY(resume_userspace)
15053 LOCKDEP_SYS_EXIT
15054 @@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15055 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15056 # int/exception return?
15057 jne work_pending
15058 - jmp restore_all
15059 -END(ret_from_exception)
15060 + jmp restore_all_pax
15061 +ENDPROC(ret_from_exception)
15062
15063 #ifdef CONFIG_PREEMPT
15064 ENTRY(resume_kernel)
15065 @@ -380,7 +530,7 @@ need_resched:
15066 jz restore_all
15067 call preempt_schedule_irq
15068 jmp need_resched
15069 -END(resume_kernel)
15070 +ENDPROC(resume_kernel)
15071 #endif
15072 CFI_ENDPROC
15073
15074 @@ -414,25 +564,36 @@ sysenter_past_esp:
15075 /*CFI_REL_OFFSET cs, 0*/
15076 /*
15077 * Push current_thread_info()->sysenter_return to the stack.
15078 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15079 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15080 */
15081 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15082 + pushl $0
15083 CFI_ADJUST_CFA_OFFSET 4
15084 CFI_REL_OFFSET eip, 0
15085
15086 pushl %eax
15087 CFI_ADJUST_CFA_OFFSET 4
15088 SAVE_ALL
15089 + GET_THREAD_INFO(%ebp)
15090 + movl TI_sysenter_return(%ebp),%ebp
15091 + movl %ebp,PT_EIP(%esp)
15092 ENABLE_INTERRUPTS(CLBR_NONE)
15093
15094 /*
15095 * Load the potential sixth argument from user stack.
15096 * Careful about security.
15097 */
15098 + movl PT_OLDESP(%esp),%ebp
15099 +
15100 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15101 + mov PT_OLDSS(%esp),%ds
15102 +1: movl %ds:(%ebp),%ebp
15103 + push %ss
15104 + pop %ds
15105 +#else
15106 cmpl $__PAGE_OFFSET-3,%ebp
15107 jae syscall_fault
15108 1: movl (%ebp),%ebp
15109 +#endif
15110 +
15111 movl %ebp,PT_EBP(%esp)
15112 .section __ex_table,"a"
15113 .align 4
15114 @@ -455,12 +616,24 @@ sysenter_do_call:
15115 testl $_TIF_ALLWORK_MASK, %ecx
15116 jne sysexit_audit
15117 sysenter_exit:
15118 +
15119 +#ifdef CONFIG_PAX_RANDKSTACK
15120 + pushl_cfi %eax
15121 + movl %esp, %eax
15122 + call pax_randomize_kstack
15123 + popl_cfi %eax
15124 +#endif
15125 +
15126 + pax_erase_kstack
15127 +
15128 /* if something modifies registers it must also disable sysexit */
15129 movl PT_EIP(%esp), %edx
15130 movl PT_OLDESP(%esp), %ecx
15131 xorl %ebp,%ebp
15132 TRACE_IRQS_ON
15133 1: mov PT_FS(%esp), %fs
15134 +2: mov PT_DS(%esp), %ds
15135 +3: mov PT_ES(%esp), %es
15136 PTGS_TO_GS
15137 ENABLE_INTERRUPTS_SYSEXIT
15138
15139 @@ -477,6 +650,9 @@ sysenter_audit:
15140 movl %eax,%edx /* 2nd arg: syscall number */
15141 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15142 call audit_syscall_entry
15143 +
15144 + pax_erase_kstack
15145 +
15146 pushl %ebx
15147 CFI_ADJUST_CFA_OFFSET 4
15148 movl PT_EAX(%esp),%eax /* reload syscall number */
15149 @@ -504,11 +680,17 @@ sysexit_audit:
15150
15151 CFI_ENDPROC
15152 .pushsection .fixup,"ax"
15153 -2: movl $0,PT_FS(%esp)
15154 +4: movl $0,PT_FS(%esp)
15155 + jmp 1b
15156 +5: movl $0,PT_DS(%esp)
15157 + jmp 1b
15158 +6: movl $0,PT_ES(%esp)
15159 jmp 1b
15160 .section __ex_table,"a"
15161 .align 4
15162 - .long 1b,2b
15163 + .long 1b,4b
15164 + .long 2b,5b
15165 + .long 3b,6b
15166 .popsection
15167 PTGS_TO_GS_EX
15168 ENDPROC(ia32_sysenter_target)
15169 @@ -538,6 +720,15 @@ syscall_exit:
15170 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15171 jne syscall_exit_work
15172
15173 +restore_all_pax:
15174 +
15175 +#ifdef CONFIG_PAX_RANDKSTACK
15176 + movl %esp, %eax
15177 + call pax_randomize_kstack
15178 +#endif
15179 +
15180 + pax_erase_kstack
15181 +
15182 restore_all:
15183 TRACE_IRQS_IRET
15184 restore_all_notrace:
15185 @@ -602,10 +793,29 @@ ldt_ss:
15186 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15187 mov %dx, %ax /* eax: new kernel esp */
15188 sub %eax, %edx /* offset (low word is 0) */
15189 - PER_CPU(gdt_page, %ebx)
15190 +#ifdef CONFIG_SMP
15191 + movl PER_CPU_VAR(cpu_number), %ebx
15192 + shll $PAGE_SHIFT_asm, %ebx
15193 + addl $cpu_gdt_table, %ebx
15194 +#else
15195 + movl $cpu_gdt_table, %ebx
15196 +#endif
15197 shr $16, %edx
15198 +
15199 +#ifdef CONFIG_PAX_KERNEXEC
15200 + mov %cr0, %esi
15201 + btr $16, %esi
15202 + mov %esi, %cr0
15203 +#endif
15204 +
15205 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15206 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15207 +
15208 +#ifdef CONFIG_PAX_KERNEXEC
15209 + bts $16, %esi
15210 + mov %esi, %cr0
15211 +#endif
15212 +
15213 pushl $__ESPFIX_SS
15214 CFI_ADJUST_CFA_OFFSET 4
15215 push %eax /* new kernel esp */
15216 @@ -636,36 +846,30 @@ work_resched:
15217 movl TI_flags(%ebp), %ecx
15218 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15219 # than syscall tracing?
15220 - jz restore_all
15221 + jz restore_all_pax
15222 testb $_TIF_NEED_RESCHED, %cl
15223 jnz work_resched
15224
15225 work_notifysig: # deal with pending signals and
15226 # notify-resume requests
15227 + movl %esp, %eax
15228 #ifdef CONFIG_VM86
15229 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15230 - movl %esp, %eax
15231 - jne work_notifysig_v86 # returning to kernel-space or
15232 + jz 1f # returning to kernel-space or
15233 # vm86-space
15234 - xorl %edx, %edx
15235 - call do_notify_resume
15236 - jmp resume_userspace_sig
15237
15238 - ALIGN
15239 -work_notifysig_v86:
15240 pushl %ecx # save ti_flags for do_notify_resume
15241 CFI_ADJUST_CFA_OFFSET 4
15242 call save_v86_state # %eax contains pt_regs pointer
15243 popl %ecx
15244 CFI_ADJUST_CFA_OFFSET -4
15245 movl %eax, %esp
15246 -#else
15247 - movl %esp, %eax
15248 +1:
15249 #endif
15250 xorl %edx, %edx
15251 call do_notify_resume
15252 jmp resume_userspace_sig
15253 -END(work_pending)
15254 +ENDPROC(work_pending)
15255
15256 # perform syscall exit tracing
15257 ALIGN
15258 @@ -673,11 +877,14 @@ syscall_trace_entry:
15259 movl $-ENOSYS,PT_EAX(%esp)
15260 movl %esp, %eax
15261 call syscall_trace_enter
15262 +
15263 + pax_erase_kstack
15264 +
15265 /* What it returned is what we'll actually use. */
15266 cmpl $(nr_syscalls), %eax
15267 jnae syscall_call
15268 jmp syscall_exit
15269 -END(syscall_trace_entry)
15270 +ENDPROC(syscall_trace_entry)
15271
15272 # perform syscall exit tracing
15273 ALIGN
15274 @@ -690,20 +897,24 @@ syscall_exit_work:
15275 movl %esp, %eax
15276 call syscall_trace_leave
15277 jmp resume_userspace
15278 -END(syscall_exit_work)
15279 +ENDPROC(syscall_exit_work)
15280 CFI_ENDPROC
15281
15282 RING0_INT_FRAME # can't unwind into user space anyway
15283 syscall_fault:
15284 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15285 + push %ss
15286 + pop %ds
15287 +#endif
15288 GET_THREAD_INFO(%ebp)
15289 movl $-EFAULT,PT_EAX(%esp)
15290 jmp resume_userspace
15291 -END(syscall_fault)
15292 +ENDPROC(syscall_fault)
15293
15294 syscall_badsys:
15295 movl $-ENOSYS,PT_EAX(%esp)
15296 jmp resume_userspace
15297 -END(syscall_badsys)
15298 +ENDPROC(syscall_badsys)
15299 CFI_ENDPROC
15300
15301 /*
15302 @@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15303 PTREGSCALL(vm86)
15304 PTREGSCALL(vm86old)
15305
15306 + ALIGN;
15307 +ENTRY(kernel_execve)
15308 + push %ebp
15309 + sub $PT_OLDSS+4,%esp
15310 + push %edi
15311 + push %ecx
15312 + push %eax
15313 + lea 3*4(%esp),%edi
15314 + mov $PT_OLDSS/4+1,%ecx
15315 + xorl %eax,%eax
15316 + rep stosl
15317 + pop %eax
15318 + pop %ecx
15319 + pop %edi
15320 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15321 + mov %eax,PT_EBX(%esp)
15322 + mov %edx,PT_ECX(%esp)
15323 + mov %ecx,PT_EDX(%esp)
15324 + mov %esp,%eax
15325 + call sys_execve
15326 + GET_THREAD_INFO(%ebp)
15327 + test %eax,%eax
15328 + jz syscall_exit
15329 + add $PT_OLDSS+4,%esp
15330 + pop %ebp
15331 + ret
15332 +
15333 .macro FIXUP_ESPFIX_STACK
15334 /*
15335 * Switch back for ESPFIX stack to the normal zerobased stack
15336 @@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15337 * normal stack and adjusts ESP with the matching offset.
15338 */
15339 /* fixup the stack */
15340 - PER_CPU(gdt_page, %ebx)
15341 +#ifdef CONFIG_SMP
15342 + movl PER_CPU_VAR(cpu_number), %ebx
15343 + shll $PAGE_SHIFT_asm, %ebx
15344 + addl $cpu_gdt_table, %ebx
15345 +#else
15346 + movl $cpu_gdt_table, %ebx
15347 +#endif
15348 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15349 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15350 shl $16, %eax
15351 @@ -793,7 +1037,7 @@ vector=vector+1
15352 .endr
15353 2: jmp common_interrupt
15354 .endr
15355 -END(irq_entries_start)
15356 +ENDPROC(irq_entries_start)
15357
15358 .previous
15359 END(interrupt)
15360 @@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15361 CFI_ADJUST_CFA_OFFSET 4
15362 jmp error_code
15363 CFI_ENDPROC
15364 -END(coprocessor_error)
15365 +ENDPROC(coprocessor_error)
15366
15367 ENTRY(simd_coprocessor_error)
15368 RING0_INT_FRAME
15369 @@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15370 CFI_ADJUST_CFA_OFFSET 4
15371 jmp error_code
15372 CFI_ENDPROC
15373 -END(simd_coprocessor_error)
15374 +ENDPROC(simd_coprocessor_error)
15375
15376 ENTRY(device_not_available)
15377 RING0_INT_FRAME
15378 @@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15379 CFI_ADJUST_CFA_OFFSET 4
15380 jmp error_code
15381 CFI_ENDPROC
15382 -END(device_not_available)
15383 +ENDPROC(device_not_available)
15384
15385 #ifdef CONFIG_PARAVIRT
15386 ENTRY(native_iret)
15387 @@ -869,12 +1113,12 @@ ENTRY(native_iret)
15388 .align 4
15389 .long native_iret, iret_exc
15390 .previous
15391 -END(native_iret)
15392 +ENDPROC(native_iret)
15393
15394 ENTRY(native_irq_enable_sysexit)
15395 sti
15396 sysexit
15397 -END(native_irq_enable_sysexit)
15398 +ENDPROC(native_irq_enable_sysexit)
15399 #endif
15400
15401 ENTRY(overflow)
15402 @@ -885,7 +1129,7 @@ ENTRY(overflow)
15403 CFI_ADJUST_CFA_OFFSET 4
15404 jmp error_code
15405 CFI_ENDPROC
15406 -END(overflow)
15407 +ENDPROC(overflow)
15408
15409 ENTRY(bounds)
15410 RING0_INT_FRAME
15411 @@ -895,7 +1139,7 @@ ENTRY(bounds)
15412 CFI_ADJUST_CFA_OFFSET 4
15413 jmp error_code
15414 CFI_ENDPROC
15415 -END(bounds)
15416 +ENDPROC(bounds)
15417
15418 ENTRY(invalid_op)
15419 RING0_INT_FRAME
15420 @@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15421 CFI_ADJUST_CFA_OFFSET 4
15422 jmp error_code
15423 CFI_ENDPROC
15424 -END(invalid_op)
15425 +ENDPROC(invalid_op)
15426
15427 ENTRY(coprocessor_segment_overrun)
15428 RING0_INT_FRAME
15429 @@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15430 CFI_ADJUST_CFA_OFFSET 4
15431 jmp error_code
15432 CFI_ENDPROC
15433 -END(coprocessor_segment_overrun)
15434 +ENDPROC(coprocessor_segment_overrun)
15435
15436 ENTRY(invalid_TSS)
15437 RING0_EC_FRAME
15438 @@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15439 CFI_ADJUST_CFA_OFFSET 4
15440 jmp error_code
15441 CFI_ENDPROC
15442 -END(invalid_TSS)
15443 +ENDPROC(invalid_TSS)
15444
15445 ENTRY(segment_not_present)
15446 RING0_EC_FRAME
15447 @@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15448 CFI_ADJUST_CFA_OFFSET 4
15449 jmp error_code
15450 CFI_ENDPROC
15451 -END(segment_not_present)
15452 +ENDPROC(segment_not_present)
15453
15454 ENTRY(stack_segment)
15455 RING0_EC_FRAME
15456 @@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15457 CFI_ADJUST_CFA_OFFSET 4
15458 jmp error_code
15459 CFI_ENDPROC
15460 -END(stack_segment)
15461 +ENDPROC(stack_segment)
15462
15463 ENTRY(alignment_check)
15464 RING0_EC_FRAME
15465 @@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15466 CFI_ADJUST_CFA_OFFSET 4
15467 jmp error_code
15468 CFI_ENDPROC
15469 -END(alignment_check)
15470 +ENDPROC(alignment_check)
15471
15472 ENTRY(divide_error)
15473 RING0_INT_FRAME
15474 @@ -957,7 +1201,7 @@ ENTRY(divide_error)
15475 CFI_ADJUST_CFA_OFFSET 4
15476 jmp error_code
15477 CFI_ENDPROC
15478 -END(divide_error)
15479 +ENDPROC(divide_error)
15480
15481 #ifdef CONFIG_X86_MCE
15482 ENTRY(machine_check)
15483 @@ -968,7 +1212,7 @@ ENTRY(machine_check)
15484 CFI_ADJUST_CFA_OFFSET 4
15485 jmp error_code
15486 CFI_ENDPROC
15487 -END(machine_check)
15488 +ENDPROC(machine_check)
15489 #endif
15490
15491 ENTRY(spurious_interrupt_bug)
15492 @@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15493 CFI_ADJUST_CFA_OFFSET 4
15494 jmp error_code
15495 CFI_ENDPROC
15496 -END(spurious_interrupt_bug)
15497 +ENDPROC(spurious_interrupt_bug)
15498
15499 ENTRY(kernel_thread_helper)
15500 pushl $0 # fake return address for unwinder
15501 @@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15502
15503 ENTRY(mcount)
15504 ret
15505 -END(mcount)
15506 +ENDPROC(mcount)
15507
15508 ENTRY(ftrace_caller)
15509 cmpl $0, function_trace_stop
15510 @@ -1124,7 +1368,7 @@ ftrace_graph_call:
15511 .globl ftrace_stub
15512 ftrace_stub:
15513 ret
15514 -END(ftrace_caller)
15515 +ENDPROC(ftrace_caller)
15516
15517 #else /* ! CONFIG_DYNAMIC_FTRACE */
15518
15519 @@ -1160,7 +1404,7 @@ trace:
15520 popl %ecx
15521 popl %eax
15522 jmp ftrace_stub
15523 -END(mcount)
15524 +ENDPROC(mcount)
15525 #endif /* CONFIG_DYNAMIC_FTRACE */
15526 #endif /* CONFIG_FUNCTION_TRACER */
15527
15528 @@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15529 popl %ecx
15530 popl %eax
15531 ret
15532 -END(ftrace_graph_caller)
15533 +ENDPROC(ftrace_graph_caller)
15534
15535 .globl return_to_handler
15536 return_to_handler:
15537 @@ -1198,7 +1442,6 @@ return_to_handler:
15538 ret
15539 #endif
15540
15541 -.section .rodata,"a"
15542 #include "syscall_table_32.S"
15543
15544 syscall_table_size=(.-sys_call_table)
15545 @@ -1255,15 +1498,18 @@ error_code:
15546 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15547 REG_TO_PTGS %ecx
15548 SET_KERNEL_GS %ecx
15549 - movl $(__USER_DS), %ecx
15550 + movl $(__KERNEL_DS), %ecx
15551 movl %ecx, %ds
15552 movl %ecx, %es
15553 +
15554 + pax_enter_kernel
15555 +
15556 TRACE_IRQS_OFF
15557 movl %esp,%eax # pt_regs pointer
15558 call *%edi
15559 jmp ret_from_exception
15560 CFI_ENDPROC
15561 -END(page_fault)
15562 +ENDPROC(page_fault)
15563
15564 /*
15565 * Debug traps and NMI can happen at the one SYSENTER instruction
15566 @@ -1309,7 +1555,7 @@ debug_stack_correct:
15567 call do_debug
15568 jmp ret_from_exception
15569 CFI_ENDPROC
15570 -END(debug)
15571 +ENDPROC(debug)
15572
15573 /*
15574 * NMI is doubly nasty. It can happen _while_ we're handling
15575 @@ -1351,6 +1597,9 @@ nmi_stack_correct:
15576 xorl %edx,%edx # zero error code
15577 movl %esp,%eax # pt_regs pointer
15578 call do_nmi
15579 +
15580 + pax_exit_kernel
15581 +
15582 jmp restore_all_notrace
15583 CFI_ENDPROC
15584
15585 @@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15586 FIXUP_ESPFIX_STACK # %eax == %esp
15587 xorl %edx,%edx # zero error code
15588 call do_nmi
15589 +
15590 + pax_exit_kernel
15591 +
15592 RESTORE_REGS
15593 lss 12+4(%esp), %esp # back to espfix stack
15594 CFI_ADJUST_CFA_OFFSET -24
15595 jmp irq_return
15596 CFI_ENDPROC
15597 -END(nmi)
15598 +ENDPROC(nmi)
15599
15600 ENTRY(int3)
15601 RING0_INT_FRAME
15602 @@ -1409,7 +1661,7 @@ ENTRY(int3)
15603 call do_int3
15604 jmp ret_from_exception
15605 CFI_ENDPROC
15606 -END(int3)
15607 +ENDPROC(int3)
15608
15609 ENTRY(general_protection)
15610 RING0_EC_FRAME
15611 @@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15612 CFI_ADJUST_CFA_OFFSET 4
15613 jmp error_code
15614 CFI_ENDPROC
15615 -END(general_protection)
15616 +ENDPROC(general_protection)
15617
15618 /*
15619 * End of kprobes section
15620 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15621 index 34a56a9..4aa5c8b 100644
15622 --- a/arch/x86/kernel/entry_64.S
15623 +++ b/arch/x86/kernel/entry_64.S
15624 @@ -53,6 +53,8 @@
15625 #include <asm/paravirt.h>
15626 #include <asm/ftrace.h>
15627 #include <asm/percpu.h>
15628 +#include <asm/pgtable.h>
15629 +#include <asm/alternative-asm.h>
15630
15631 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15632 #include <linux/elf-em.h>
15633 @@ -64,8 +66,9 @@
15634 #ifdef CONFIG_FUNCTION_TRACER
15635 #ifdef CONFIG_DYNAMIC_FTRACE
15636 ENTRY(mcount)
15637 + pax_force_retaddr
15638 retq
15639 -END(mcount)
15640 +ENDPROC(mcount)
15641
15642 ENTRY(ftrace_caller)
15643 cmpl $0, function_trace_stop
15644 @@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15645 #endif
15646
15647 GLOBAL(ftrace_stub)
15648 + pax_force_retaddr
15649 retq
15650 -END(ftrace_caller)
15651 +ENDPROC(ftrace_caller)
15652
15653 #else /* ! CONFIG_DYNAMIC_FTRACE */
15654 ENTRY(mcount)
15655 @@ -108,6 +112,7 @@ ENTRY(mcount)
15656 #endif
15657
15658 GLOBAL(ftrace_stub)
15659 + pax_force_retaddr
15660 retq
15661
15662 trace:
15663 @@ -117,12 +122,13 @@ trace:
15664 movq 8(%rbp), %rsi
15665 subq $MCOUNT_INSN_SIZE, %rdi
15666
15667 + pax_force_fptr ftrace_trace_function
15668 call *ftrace_trace_function
15669
15670 MCOUNT_RESTORE_FRAME
15671
15672 jmp ftrace_stub
15673 -END(mcount)
15674 +ENDPROC(mcount)
15675 #endif /* CONFIG_DYNAMIC_FTRACE */
15676 #endif /* CONFIG_FUNCTION_TRACER */
15677
15678 @@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15679
15680 MCOUNT_RESTORE_FRAME
15681
15682 + pax_force_retaddr
15683 retq
15684 -END(ftrace_graph_caller)
15685 +ENDPROC(ftrace_graph_caller)
15686
15687 GLOBAL(return_to_handler)
15688 subq $24, %rsp
15689 @@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15690 movq 8(%rsp), %rdx
15691 movq (%rsp), %rax
15692 addq $16, %rsp
15693 + pax_force_retaddr
15694 retq
15695 #endif
15696
15697 @@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15698 ENDPROC(native_usergs_sysret64)
15699 #endif /* CONFIG_PARAVIRT */
15700
15701 + .macro ljmpq sel, off
15702 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15703 + .byte 0x48; ljmp *1234f(%rip)
15704 + .pushsection .rodata
15705 + .align 16
15706 + 1234: .quad \off; .word \sel
15707 + .popsection
15708 +#else
15709 + pushq $\sel
15710 + pushq $\off
15711 + lretq
15712 +#endif
15713 + .endm
15714 +
15715 + .macro pax_enter_kernel
15716 + pax_set_fptr_mask
15717 +#ifdef CONFIG_PAX_KERNEXEC
15718 + call pax_enter_kernel
15719 +#endif
15720 + .endm
15721 +
15722 + .macro pax_exit_kernel
15723 +#ifdef CONFIG_PAX_KERNEXEC
15724 + call pax_exit_kernel
15725 +#endif
15726 + .endm
15727 +
15728 +#ifdef CONFIG_PAX_KERNEXEC
15729 +ENTRY(pax_enter_kernel)
15730 + pushq %rdi
15731 +
15732 +#ifdef CONFIG_PARAVIRT
15733 + PV_SAVE_REGS(CLBR_RDI)
15734 +#endif
15735 +
15736 + GET_CR0_INTO_RDI
15737 + bts $16,%rdi
15738 + jnc 3f
15739 + mov %cs,%edi
15740 + cmp $__KERNEL_CS,%edi
15741 + jnz 2f
15742 +1:
15743 +
15744 +#ifdef CONFIG_PARAVIRT
15745 + PV_RESTORE_REGS(CLBR_RDI)
15746 +#endif
15747 +
15748 + popq %rdi
15749 + pax_force_retaddr
15750 + retq
15751 +
15752 +2: ljmpq __KERNEL_CS,1f
15753 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15754 +4: SET_RDI_INTO_CR0
15755 + jmp 1b
15756 +ENDPROC(pax_enter_kernel)
15757 +
15758 +ENTRY(pax_exit_kernel)
15759 + pushq %rdi
15760 +
15761 +#ifdef CONFIG_PARAVIRT
15762 + PV_SAVE_REGS(CLBR_RDI)
15763 +#endif
15764 +
15765 + mov %cs,%rdi
15766 + cmp $__KERNEXEC_KERNEL_CS,%edi
15767 + jz 2f
15768 +1:
15769 +
15770 +#ifdef CONFIG_PARAVIRT
15771 + PV_RESTORE_REGS(CLBR_RDI);
15772 +#endif
15773 +
15774 + popq %rdi
15775 + pax_force_retaddr
15776 + retq
15777 +
15778 +2: GET_CR0_INTO_RDI
15779 + btr $16,%rdi
15780 + ljmpq __KERNEL_CS,3f
15781 +3: SET_RDI_INTO_CR0
15782 + jmp 1b
15783 +#ifdef CONFIG_PARAVIRT
15784 + PV_RESTORE_REGS(CLBR_RDI);
15785 +#endif
15786 +
15787 + popq %rdi
15788 + pax_force_retaddr
15789 + retq
15790 +ENDPROC(pax_exit_kernel)
15791 +#endif
15792 +
15793 + .macro pax_enter_kernel_user
15794 + pax_set_fptr_mask
15795 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15796 + call pax_enter_kernel_user
15797 +#endif
15798 + .endm
15799 +
15800 + .macro pax_exit_kernel_user
15801 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15802 + call pax_exit_kernel_user
15803 +#endif
15804 +#ifdef CONFIG_PAX_RANDKSTACK
15805 + pushq %rax
15806 + call pax_randomize_kstack
15807 + popq %rax
15808 +#endif
15809 + .endm
15810 +
15811 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15812 +ENTRY(pax_enter_kernel_user)
15813 + pushq %rdi
15814 + pushq %rbx
15815 +
15816 +#ifdef CONFIG_PARAVIRT
15817 + PV_SAVE_REGS(CLBR_RDI)
15818 +#endif
15819 +
15820 + GET_CR3_INTO_RDI
15821 + mov %rdi,%rbx
15822 + add $__START_KERNEL_map,%rbx
15823 + sub phys_base(%rip),%rbx
15824 +
15825 +#ifdef CONFIG_PARAVIRT
15826 + pushq %rdi
15827 + cmpl $0, pv_info+PARAVIRT_enabled
15828 + jz 1f
15829 + i = 0
15830 + .rept USER_PGD_PTRS
15831 + mov i*8(%rbx),%rsi
15832 + mov $0,%sil
15833 + lea i*8(%rbx),%rdi
15834 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15835 + i = i + 1
15836 + .endr
15837 + jmp 2f
15838 +1:
15839 +#endif
15840 +
15841 + i = 0
15842 + .rept USER_PGD_PTRS
15843 + movb $0,i*8(%rbx)
15844 + i = i + 1
15845 + .endr
15846 +
15847 +#ifdef CONFIG_PARAVIRT
15848 +2: popq %rdi
15849 +#endif
15850 + SET_RDI_INTO_CR3
15851 +
15852 +#ifdef CONFIG_PAX_KERNEXEC
15853 + GET_CR0_INTO_RDI
15854 + bts $16,%rdi
15855 + SET_RDI_INTO_CR0
15856 +#endif
15857 +
15858 +#ifdef CONFIG_PARAVIRT
15859 + PV_RESTORE_REGS(CLBR_RDI)
15860 +#endif
15861 +
15862 + popq %rbx
15863 + popq %rdi
15864 + pax_force_retaddr
15865 + retq
15866 +ENDPROC(pax_enter_kernel_user)
15867 +
15868 +ENTRY(pax_exit_kernel_user)
15869 + push %rdi
15870 +
15871 +#ifdef CONFIG_PARAVIRT
15872 + pushq %rbx
15873 + PV_SAVE_REGS(CLBR_RDI)
15874 +#endif
15875 +
15876 +#ifdef CONFIG_PAX_KERNEXEC
15877 + GET_CR0_INTO_RDI
15878 + btr $16,%rdi
15879 + SET_RDI_INTO_CR0
15880 +#endif
15881 +
15882 + GET_CR3_INTO_RDI
15883 + add $__START_KERNEL_map,%rdi
15884 + sub phys_base(%rip),%rdi
15885 +
15886 +#ifdef CONFIG_PARAVIRT
15887 + cmpl $0, pv_info+PARAVIRT_enabled
15888 + jz 1f
15889 + mov %rdi,%rbx
15890 + i = 0
15891 + .rept USER_PGD_PTRS
15892 + mov i*8(%rbx),%rsi
15893 + mov $0x67,%sil
15894 + lea i*8(%rbx),%rdi
15895 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15896 + i = i + 1
15897 + .endr
15898 + jmp 2f
15899 +1:
15900 +#endif
15901 +
15902 + i = 0
15903 + .rept USER_PGD_PTRS
15904 + movb $0x67,i*8(%rdi)
15905 + i = i + 1
15906 + .endr
15907 +
15908 +#ifdef CONFIG_PARAVIRT
15909 +2: PV_RESTORE_REGS(CLBR_RDI)
15910 + popq %rbx
15911 +#endif
15912 +
15913 + popq %rdi
15914 + pax_force_retaddr
15915 + retq
15916 +ENDPROC(pax_exit_kernel_user)
15917 +#endif
15918 +
15919 +.macro pax_erase_kstack
15920 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15921 + call pax_erase_kstack
15922 +#endif
15923 +.endm
15924 +
15925 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15926 +/*
15927 + * r11: thread_info
15928 + * rcx, rdx: can be clobbered
15929 + */
15930 +ENTRY(pax_erase_kstack)
15931 + pushq %rdi
15932 + pushq %rax
15933 + pushq %r11
15934 +
15935 + GET_THREAD_INFO(%r11)
15936 + mov TI_lowest_stack(%r11), %rdi
15937 + mov $-0xBEEF, %rax
15938 + std
15939 +
15940 +1: mov %edi, %ecx
15941 + and $THREAD_SIZE_asm - 1, %ecx
15942 + shr $3, %ecx
15943 + repne scasq
15944 + jecxz 2f
15945 +
15946 + cmp $2*8, %ecx
15947 + jc 2f
15948 +
15949 + mov $2*8, %ecx
15950 + repe scasq
15951 + jecxz 2f
15952 + jne 1b
15953 +
15954 +2: cld
15955 + mov %esp, %ecx
15956 + sub %edi, %ecx
15957 +
15958 + cmp $THREAD_SIZE_asm, %rcx
15959 + jb 3f
15960 + ud2
15961 +3:
15962 +
15963 + shr $3, %ecx
15964 + rep stosq
15965 +
15966 + mov TI_task_thread_sp0(%r11), %rdi
15967 + sub $256, %rdi
15968 + mov %rdi, TI_lowest_stack(%r11)
15969 +
15970 + popq %r11
15971 + popq %rax
15972 + popq %rdi
15973 + pax_force_retaddr
15974 + ret
15975 +ENDPROC(pax_erase_kstack)
15976 +#endif
15977
15978 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15979 #ifdef CONFIG_TRACE_IRQFLAGS
15980 @@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
15981 .endm
15982
15983 .macro UNFAKE_STACK_FRAME
15984 - addq $8*6, %rsp
15985 - CFI_ADJUST_CFA_OFFSET -(6*8)
15986 + addq $8*6 + ARG_SKIP, %rsp
15987 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15988 .endm
15989
15990 /*
15991 @@ -317,7 +601,7 @@ ENTRY(save_args)
15992 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
15993 movq_cfi rbp, 8 /* push %rbp */
15994 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
15995 - testl $3, CS(%rdi)
15996 + testb $3, CS(%rdi)
15997 je 1f
15998 SWAPGS
15999 /*
16000 @@ -337,9 +621,10 @@ ENTRY(save_args)
16001 * We entered an interrupt context - irqs are off:
16002 */
16003 2: TRACE_IRQS_OFF
16004 + pax_force_retaddr
16005 ret
16006 CFI_ENDPROC
16007 -END(save_args)
16008 +ENDPROC(save_args)
16009
16010 ENTRY(save_rest)
16011 PARTIAL_FRAME 1 REST_SKIP+8
16012 @@ -352,9 +637,10 @@ ENTRY(save_rest)
16013 movq_cfi r15, R15+16
16014 movq %r11, 8(%rsp) /* return address */
16015 FIXUP_TOP_OF_STACK %r11, 16
16016 + pax_force_retaddr
16017 ret
16018 CFI_ENDPROC
16019 -END(save_rest)
16020 +ENDPROC(save_rest)
16021
16022 /* save complete stack frame */
16023 .pushsection .kprobes.text, "ax"
16024 @@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16025 js 1f /* negative -> in kernel */
16026 SWAPGS
16027 xorl %ebx,%ebx
16028 -1: ret
16029 +1: pax_force_retaddr_bts
16030 + ret
16031 CFI_ENDPROC
16032 -END(save_paranoid)
16033 +ENDPROC(save_paranoid)
16034 .popsection
16035
16036 /*
16037 @@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16038
16039 RESTORE_REST
16040
16041 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16042 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16043 je int_ret_from_sys_call
16044
16045 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16046 @@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16047 jmp ret_from_sys_call # go to the SYSRET fastpath
16048
16049 CFI_ENDPROC
16050 -END(ret_from_fork)
16051 +ENDPROC(ret_from_fork)
16052
16053 /*
16054 * System call entry. Upto 6 arguments in registers are supported.
16055 @@ -455,7 +742,7 @@ END(ret_from_fork)
16056 ENTRY(system_call)
16057 CFI_STARTPROC simple
16058 CFI_SIGNAL_FRAME
16059 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16060 + CFI_DEF_CFA rsp,0
16061 CFI_REGISTER rip,rcx
16062 /*CFI_REGISTER rflags,r11*/
16063 SWAPGS_UNSAFE_STACK
16064 @@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16065
16066 movq %rsp,PER_CPU_VAR(old_rsp)
16067 movq PER_CPU_VAR(kernel_stack),%rsp
16068 + SAVE_ARGS 8*6,1
16069 + pax_enter_kernel_user
16070 /*
16071 * No need to follow this irqs off/on section - it's straight
16072 * and short:
16073 */
16074 ENABLE_INTERRUPTS(CLBR_NONE)
16075 - SAVE_ARGS 8,1
16076 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16077 movq %rcx,RIP-ARGOFFSET(%rsp)
16078 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16079 @@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16080 system_call_fastpath:
16081 cmpq $__NR_syscall_max,%rax
16082 ja badsys
16083 - movq %r10,%rcx
16084 + movq R10-ARGOFFSET(%rsp),%rcx
16085 call *sys_call_table(,%rax,8) # XXX: rip relative
16086 movq %rax,RAX-ARGOFFSET(%rsp)
16087 /*
16088 @@ -502,6 +790,8 @@ sysret_check:
16089 andl %edi,%edx
16090 jnz sysret_careful
16091 CFI_REMEMBER_STATE
16092 + pax_exit_kernel_user
16093 + pax_erase_kstack
16094 /*
16095 * sysretq will re-enable interrupts:
16096 */
16097 @@ -555,14 +845,18 @@ badsys:
16098 * jump back to the normal fast path.
16099 */
16100 auditsys:
16101 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16102 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16103 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16104 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16105 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16106 movq %rax,%rsi /* 2nd arg: syscall number */
16107 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16108 call audit_syscall_entry
16109 +
16110 + pax_erase_kstack
16111 +
16112 LOAD_ARGS 0 /* reload call-clobbered registers */
16113 + pax_set_fptr_mask
16114 jmp system_call_fastpath
16115
16116 /*
16117 @@ -592,16 +886,20 @@ tracesys:
16118 FIXUP_TOP_OF_STACK %rdi
16119 movq %rsp,%rdi
16120 call syscall_trace_enter
16121 +
16122 + pax_erase_kstack
16123 +
16124 /*
16125 * Reload arg registers from stack in case ptrace changed them.
16126 * We don't reload %rax because syscall_trace_enter() returned
16127 * the value it wants us to use in the table lookup.
16128 */
16129 LOAD_ARGS ARGOFFSET, 1
16130 + pax_set_fptr_mask
16131 RESTORE_REST
16132 cmpq $__NR_syscall_max,%rax
16133 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16134 - movq %r10,%rcx /* fixup for C */
16135 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16136 call *sys_call_table(,%rax,8)
16137 movq %rax,RAX-ARGOFFSET(%rsp)
16138 /* Use IRET because user could have changed frame */
16139 @@ -613,7 +911,7 @@ tracesys:
16140 GLOBAL(int_ret_from_sys_call)
16141 DISABLE_INTERRUPTS(CLBR_NONE)
16142 TRACE_IRQS_OFF
16143 - testl $3,CS-ARGOFFSET(%rsp)
16144 + testb $3,CS-ARGOFFSET(%rsp)
16145 je retint_restore_args
16146 movl $_TIF_ALLWORK_MASK,%edi
16147 /* edi: mask to check */
16148 @@ -674,7 +972,7 @@ int_restore_rest:
16149 TRACE_IRQS_OFF
16150 jmp int_with_check
16151 CFI_ENDPROC
16152 -END(system_call)
16153 +ENDPROC(system_call)
16154
16155 /*
16156 * Certain special system calls that need to save a complete full stack frame.
16157 @@ -690,7 +988,7 @@ ENTRY(\label)
16158 call \func
16159 jmp ptregscall_common
16160 CFI_ENDPROC
16161 -END(\label)
16162 +ENDPROC(\label)
16163 .endm
16164
16165 PTREGSCALL stub_clone, sys_clone, %r8
16166 @@ -708,9 +1006,10 @@ ENTRY(ptregscall_common)
16167 movq_cfi_restore R12+8, r12
16168 movq_cfi_restore RBP+8, rbp
16169 movq_cfi_restore RBX+8, rbx
16170 + pax_force_retaddr
16171 ret $REST_SKIP /* pop extended registers */
16172 CFI_ENDPROC
16173 -END(ptregscall_common)
16174 +ENDPROC(ptregscall_common)
16175
16176 ENTRY(stub_execve)
16177 CFI_STARTPROC
16178 @@ -726,7 +1025,7 @@ ENTRY(stub_execve)
16179 RESTORE_REST
16180 jmp int_ret_from_sys_call
16181 CFI_ENDPROC
16182 -END(stub_execve)
16183 +ENDPROC(stub_execve)
16184
16185 /*
16186 * sigreturn is special because it needs to restore all registers on return.
16187 @@ -744,7 +1043,7 @@ ENTRY(stub_rt_sigreturn)
16188 RESTORE_REST
16189 jmp int_ret_from_sys_call
16190 CFI_ENDPROC
16191 -END(stub_rt_sigreturn)
16192 +ENDPROC(stub_rt_sigreturn)
16193
16194 /*
16195 * Build the entry stubs and pointer table with some assembler magic.
16196 @@ -780,7 +1079,7 @@ vector=vector+1
16197 2: jmp common_interrupt
16198 .endr
16199 CFI_ENDPROC
16200 -END(irq_entries_start)
16201 +ENDPROC(irq_entries_start)
16202
16203 .previous
16204 END(interrupt)
16205 @@ -800,6 +1099,16 @@ END(interrupt)
16206 CFI_ADJUST_CFA_OFFSET 10*8
16207 call save_args
16208 PARTIAL_FRAME 0
16209 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16210 + testb $3, CS(%rdi)
16211 + jnz 1f
16212 + pax_enter_kernel
16213 + jmp 2f
16214 +1: pax_enter_kernel_user
16215 +2:
16216 +#else
16217 + pax_enter_kernel
16218 +#endif
16219 call \func
16220 .endm
16221
16222 @@ -822,7 +1131,7 @@ ret_from_intr:
16223 CFI_ADJUST_CFA_OFFSET -8
16224 exit_intr:
16225 GET_THREAD_INFO(%rcx)
16226 - testl $3,CS-ARGOFFSET(%rsp)
16227 + testb $3,CS-ARGOFFSET(%rsp)
16228 je retint_kernel
16229
16230 /* Interrupt came from user space */
16231 @@ -844,12 +1153,16 @@ retint_swapgs: /* return to user-space */
16232 * The iretq could re-enable interrupts:
16233 */
16234 DISABLE_INTERRUPTS(CLBR_ANY)
16235 + pax_exit_kernel_user
16236 + pax_erase_kstack
16237 TRACE_IRQS_IRETQ
16238 SWAPGS
16239 jmp restore_args
16240
16241 retint_restore_args: /* return to kernel space */
16242 DISABLE_INTERRUPTS(CLBR_ANY)
16243 + pax_exit_kernel
16244 + pax_force_retaddr RIP-ARGOFFSET
16245 /*
16246 * The iretq could re-enable interrupts:
16247 */
16248 @@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16249 #endif
16250
16251 CFI_ENDPROC
16252 -END(common_interrupt)
16253 +ENDPROC(common_interrupt)
16254
16255 /*
16256 * APIC interrupts.
16257 @@ -953,7 +1266,7 @@ ENTRY(\sym)
16258 interrupt \do_sym
16259 jmp ret_from_intr
16260 CFI_ENDPROC
16261 -END(\sym)
16262 +ENDPROC(\sym)
16263 .endm
16264
16265 #ifdef CONFIG_SMP
16266 @@ -1032,12 +1345,22 @@ ENTRY(\sym)
16267 CFI_ADJUST_CFA_OFFSET 15*8
16268 call error_entry
16269 DEFAULT_FRAME 0
16270 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16271 + testb $3, CS(%rsp)
16272 + jnz 1f
16273 + pax_enter_kernel
16274 + jmp 2f
16275 +1: pax_enter_kernel_user
16276 +2:
16277 +#else
16278 + pax_enter_kernel
16279 +#endif
16280 movq %rsp,%rdi /* pt_regs pointer */
16281 xorl %esi,%esi /* no error code */
16282 call \do_sym
16283 jmp error_exit /* %ebx: no swapgs flag */
16284 CFI_ENDPROC
16285 -END(\sym)
16286 +ENDPROC(\sym)
16287 .endm
16288
16289 .macro paranoidzeroentry sym do_sym
16290 @@ -1049,12 +1372,22 @@ ENTRY(\sym)
16291 subq $15*8, %rsp
16292 call save_paranoid
16293 TRACE_IRQS_OFF
16294 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16295 + testb $3, CS(%rsp)
16296 + jnz 1f
16297 + pax_enter_kernel
16298 + jmp 2f
16299 +1: pax_enter_kernel_user
16300 +2:
16301 +#else
16302 + pax_enter_kernel
16303 +#endif
16304 movq %rsp,%rdi /* pt_regs pointer */
16305 xorl %esi,%esi /* no error code */
16306 call \do_sym
16307 jmp paranoid_exit /* %ebx: no swapgs flag */
16308 CFI_ENDPROC
16309 -END(\sym)
16310 +ENDPROC(\sym)
16311 .endm
16312
16313 .macro paranoidzeroentry_ist sym do_sym ist
16314 @@ -1066,15 +1399,30 @@ ENTRY(\sym)
16315 subq $15*8, %rsp
16316 call save_paranoid
16317 TRACE_IRQS_OFF
16318 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16319 + testb $3, CS(%rsp)
16320 + jnz 1f
16321 + pax_enter_kernel
16322 + jmp 2f
16323 +1: pax_enter_kernel_user
16324 +2:
16325 +#else
16326 + pax_enter_kernel
16327 +#endif
16328 movq %rsp,%rdi /* pt_regs pointer */
16329 xorl %esi,%esi /* no error code */
16330 - PER_CPU(init_tss, %rbp)
16331 +#ifdef CONFIG_SMP
16332 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16333 + lea init_tss(%rbp), %rbp
16334 +#else
16335 + lea init_tss(%rip), %rbp
16336 +#endif
16337 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16338 call \do_sym
16339 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16340 jmp paranoid_exit /* %ebx: no swapgs flag */
16341 CFI_ENDPROC
16342 -END(\sym)
16343 +ENDPROC(\sym)
16344 .endm
16345
16346 .macro errorentry sym do_sym
16347 @@ -1085,13 +1433,23 @@ ENTRY(\sym)
16348 CFI_ADJUST_CFA_OFFSET 15*8
16349 call error_entry
16350 DEFAULT_FRAME 0
16351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16352 + testb $3, CS(%rsp)
16353 + jnz 1f
16354 + pax_enter_kernel
16355 + jmp 2f
16356 +1: pax_enter_kernel_user
16357 +2:
16358 +#else
16359 + pax_enter_kernel
16360 +#endif
16361 movq %rsp,%rdi /* pt_regs pointer */
16362 movq ORIG_RAX(%rsp),%rsi /* get error code */
16363 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16364 call \do_sym
16365 jmp error_exit /* %ebx: no swapgs flag */
16366 CFI_ENDPROC
16367 -END(\sym)
16368 +ENDPROC(\sym)
16369 .endm
16370
16371 /* error code is on the stack already */
16372 @@ -1104,13 +1462,23 @@ ENTRY(\sym)
16373 call save_paranoid
16374 DEFAULT_FRAME 0
16375 TRACE_IRQS_OFF
16376 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16377 + testb $3, CS(%rsp)
16378 + jnz 1f
16379 + pax_enter_kernel
16380 + jmp 2f
16381 +1: pax_enter_kernel_user
16382 +2:
16383 +#else
16384 + pax_enter_kernel
16385 +#endif
16386 movq %rsp,%rdi /* pt_regs pointer */
16387 movq ORIG_RAX(%rsp),%rsi /* get error code */
16388 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16389 call \do_sym
16390 jmp paranoid_exit /* %ebx: no swapgs flag */
16391 CFI_ENDPROC
16392 -END(\sym)
16393 +ENDPROC(\sym)
16394 .endm
16395
16396 zeroentry divide_error do_divide_error
16397 @@ -1141,9 +1509,10 @@ gs_change:
16398 SWAPGS
16399 popf
16400 CFI_ADJUST_CFA_OFFSET -8
16401 + pax_force_retaddr
16402 ret
16403 CFI_ENDPROC
16404 -END(native_load_gs_index)
16405 +ENDPROC(native_load_gs_index)
16406
16407 .section __ex_table,"a"
16408 .align 8
16409 @@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16410 * of hacks for example to fork off the per-CPU idle tasks.
16411 * [Hopefully no generic code relies on the reschedule -AK]
16412 */
16413 - RESTORE_ALL
16414 + RESTORE_REST
16415 UNFAKE_STACK_FRAME
16416 + pax_force_retaddr
16417 ret
16418 CFI_ENDPROC
16419 -END(kernel_thread)
16420 +ENDPROC(kernel_thread)
16421
16422 ENTRY(child_rip)
16423 pushq $0 # fake return address
16424 @@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16425 */
16426 movq %rdi, %rax
16427 movq %rsi, %rdi
16428 + pax_force_fptr %rax
16429 call *%rax
16430 # exit
16431 mov %eax, %edi
16432 call do_exit
16433 ud2 # padding for call trace
16434 CFI_ENDPROC
16435 -END(child_rip)
16436 +ENDPROC(child_rip)
16437
16438 /*
16439 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16440 @@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16441 RESTORE_REST
16442 testq %rax,%rax
16443 je int_ret_from_sys_call
16444 - RESTORE_ARGS
16445 UNFAKE_STACK_FRAME
16446 + pax_force_retaddr
16447 ret
16448 CFI_ENDPROC
16449 -END(kernel_execve)
16450 +ENDPROC(kernel_execve)
16451
16452 /* Call softirq on interrupt stack. Interrupts are off. */
16453 ENTRY(call_softirq)
16454 @@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16455 CFI_DEF_CFA_REGISTER rsp
16456 CFI_ADJUST_CFA_OFFSET -8
16457 decl PER_CPU_VAR(irq_count)
16458 + pax_force_retaddr
16459 ret
16460 CFI_ENDPROC
16461 -END(call_softirq)
16462 +ENDPROC(call_softirq)
16463
16464 #ifdef CONFIG_XEN
16465 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16466 @@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16467 decl PER_CPU_VAR(irq_count)
16468 jmp error_exit
16469 CFI_ENDPROC
16470 -END(xen_do_hypervisor_callback)
16471 +ENDPROC(xen_do_hypervisor_callback)
16472
16473 /*
16474 * Hypervisor uses this for application faults while it executes.
16475 @@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16476 SAVE_ALL
16477 jmp error_exit
16478 CFI_ENDPROC
16479 -END(xen_failsafe_callback)
16480 +ENDPROC(xen_failsafe_callback)
16481
16482 #endif /* CONFIG_XEN */
16483
16484 @@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16485 TRACE_IRQS_OFF
16486 testl %ebx,%ebx /* swapgs needed? */
16487 jnz paranoid_restore
16488 - testl $3,CS(%rsp)
16489 + testb $3,CS(%rsp)
16490 jnz paranoid_userspace
16491 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16492 + pax_exit_kernel
16493 + TRACE_IRQS_IRETQ 0
16494 + SWAPGS_UNSAFE_STACK
16495 + RESTORE_ALL 8
16496 + pax_force_retaddr_bts
16497 + jmp irq_return
16498 +#endif
16499 paranoid_swapgs:
16500 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16501 + pax_exit_kernel_user
16502 +#else
16503 + pax_exit_kernel
16504 +#endif
16505 TRACE_IRQS_IRETQ 0
16506 SWAPGS_UNSAFE_STACK
16507 RESTORE_ALL 8
16508 jmp irq_return
16509 paranoid_restore:
16510 + pax_exit_kernel
16511 TRACE_IRQS_IRETQ 0
16512 RESTORE_ALL 8
16513 + pax_force_retaddr_bts
16514 jmp irq_return
16515 paranoid_userspace:
16516 GET_THREAD_INFO(%rcx)
16517 @@ -1443,7 +1830,7 @@ paranoid_schedule:
16518 TRACE_IRQS_OFF
16519 jmp paranoid_userspace
16520 CFI_ENDPROC
16521 -END(paranoid_exit)
16522 +ENDPROC(paranoid_exit)
16523
16524 /*
16525 * Exception entry point. This expects an error code/orig_rax on the stack.
16526 @@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16527 movq_cfi r14, R14+8
16528 movq_cfi r15, R15+8
16529 xorl %ebx,%ebx
16530 - testl $3,CS+8(%rsp)
16531 + testb $3,CS+8(%rsp)
16532 je error_kernelspace
16533 error_swapgs:
16534 SWAPGS
16535 error_sti:
16536 TRACE_IRQS_OFF
16537 + pax_force_retaddr_bts
16538 ret
16539 CFI_ENDPROC
16540
16541 @@ -1497,7 +1885,7 @@ error_kernelspace:
16542 cmpq $gs_change,RIP+8(%rsp)
16543 je error_swapgs
16544 jmp error_sti
16545 -END(error_entry)
16546 +ENDPROC(error_entry)
16547
16548
16549 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16550 @@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16551 jnz retint_careful
16552 jmp retint_swapgs
16553 CFI_ENDPROC
16554 -END(error_exit)
16555 +ENDPROC(error_exit)
16556
16557
16558 /* runs on exception stack */
16559 @@ -1529,6 +1917,16 @@ ENTRY(nmi)
16560 CFI_ADJUST_CFA_OFFSET 15*8
16561 call save_paranoid
16562 DEFAULT_FRAME 0
16563 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16564 + testb $3, CS(%rsp)
16565 + jnz 1f
16566 + pax_enter_kernel
16567 + jmp 2f
16568 +1: pax_enter_kernel_user
16569 +2:
16570 +#else
16571 + pax_enter_kernel
16572 +#endif
16573 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16574 movq %rsp,%rdi
16575 movq $-1,%rsi
16576 @@ -1539,12 +1937,28 @@ ENTRY(nmi)
16577 DISABLE_INTERRUPTS(CLBR_NONE)
16578 testl %ebx,%ebx /* swapgs needed? */
16579 jnz nmi_restore
16580 - testl $3,CS(%rsp)
16581 + testb $3,CS(%rsp)
16582 jnz nmi_userspace
16583 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16584 + pax_exit_kernel
16585 + SWAPGS_UNSAFE_STACK
16586 + RESTORE_ALL 8
16587 + pax_force_retaddr_bts
16588 + jmp irq_return
16589 +#endif
16590 nmi_swapgs:
16591 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16592 + pax_exit_kernel_user
16593 +#else
16594 + pax_exit_kernel
16595 +#endif
16596 SWAPGS_UNSAFE_STACK
16597 + RESTORE_ALL 8
16598 + jmp irq_return
16599 nmi_restore:
16600 + pax_exit_kernel
16601 RESTORE_ALL 8
16602 + pax_force_retaddr_bts
16603 jmp irq_return
16604 nmi_userspace:
16605 GET_THREAD_INFO(%rcx)
16606 @@ -1573,14 +1987,14 @@ nmi_schedule:
16607 jmp paranoid_exit
16608 CFI_ENDPROC
16609 #endif
16610 -END(nmi)
16611 +ENDPROC(nmi)
16612
16613 ENTRY(ignore_sysret)
16614 CFI_STARTPROC
16615 mov $-ENOSYS,%eax
16616 sysret
16617 CFI_ENDPROC
16618 -END(ignore_sysret)
16619 +ENDPROC(ignore_sysret)
16620
16621 /*
16622 * End of kprobes section
16623 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16624 index 9dbb527..7b3615a 100644
16625 --- a/arch/x86/kernel/ftrace.c
16626 +++ b/arch/x86/kernel/ftrace.c
16627 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16628 static void *mod_code_newcode; /* holds the text to write to the IP */
16629
16630 static unsigned nmi_wait_count;
16631 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16632 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16633
16634 int ftrace_arch_read_dyn_info(char *buf, int size)
16635 {
16636 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16637
16638 r = snprintf(buf, size, "%u %u",
16639 nmi_wait_count,
16640 - atomic_read(&nmi_update_count));
16641 + atomic_read_unchecked(&nmi_update_count));
16642 return r;
16643 }
16644
16645 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16646 {
16647 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16648 smp_rmb();
16649 + pax_open_kernel();
16650 ftrace_mod_code();
16651 - atomic_inc(&nmi_update_count);
16652 + pax_close_kernel();
16653 + atomic_inc_unchecked(&nmi_update_count);
16654 }
16655 /* Must have previous changes seen before executions */
16656 smp_mb();
16657 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16658
16659
16660
16661 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16662 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16663
16664 static unsigned char *ftrace_nop_replace(void)
16665 {
16666 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16667 {
16668 unsigned char replaced[MCOUNT_INSN_SIZE];
16669
16670 + ip = ktla_ktva(ip);
16671 +
16672 /*
16673 * Note: Due to modules and __init, code can
16674 * disappear and change, we need to protect against faulting
16675 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16676 unsigned char old[MCOUNT_INSN_SIZE], *new;
16677 int ret;
16678
16679 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16680 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16681 new = ftrace_call_replace(ip, (unsigned long)func);
16682 ret = ftrace_modify_code(ip, old, new);
16683
16684 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16685 switch (faulted) {
16686 case 0:
16687 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16688 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16689 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16690 break;
16691 case 1:
16692 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16693 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16694 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16695 break;
16696 case 2:
16697 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16698 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16699 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16700 break;
16701 }
16702
16703 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16704 {
16705 unsigned char code[MCOUNT_INSN_SIZE];
16706
16707 + ip = ktla_ktva(ip);
16708 +
16709 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16710 return -EFAULT;
16711
16712 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16713 index 4f8e250..df24706 100644
16714 --- a/arch/x86/kernel/head32.c
16715 +++ b/arch/x86/kernel/head32.c
16716 @@ -16,6 +16,7 @@
16717 #include <asm/apic.h>
16718 #include <asm/io_apic.h>
16719 #include <asm/bios_ebda.h>
16720 +#include <asm/boot.h>
16721
16722 static void __init i386_default_early_setup(void)
16723 {
16724 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16725 {
16726 reserve_trampoline_memory();
16727
16728 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16729 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16730
16731 #ifdef CONFIG_BLK_DEV_INITRD
16732 /* Reserve INITRD */
16733 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16734 index 34c3308..6fc4e76 100644
16735 --- a/arch/x86/kernel/head_32.S
16736 +++ b/arch/x86/kernel/head_32.S
16737 @@ -19,10 +19,17 @@
16738 #include <asm/setup.h>
16739 #include <asm/processor-flags.h>
16740 #include <asm/percpu.h>
16741 +#include <asm/msr-index.h>
16742
16743 /* Physical address */
16744 #define pa(X) ((X) - __PAGE_OFFSET)
16745
16746 +#ifdef CONFIG_PAX_KERNEXEC
16747 +#define ta(X) (X)
16748 +#else
16749 +#define ta(X) ((X) - __PAGE_OFFSET)
16750 +#endif
16751 +
16752 /*
16753 * References to members of the new_cpu_data structure.
16754 */
16755 @@ -52,11 +59,7 @@
16756 * and small than max_low_pfn, otherwise will waste some page table entries
16757 */
16758
16759 -#if PTRS_PER_PMD > 1
16760 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16761 -#else
16762 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16763 -#endif
16764 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16765
16766 /* Enough space to fit pagetables for the low memory linear map */
16767 MAPPING_BEYOND_END = \
16768 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
16769 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16770
16771 /*
16772 + * Real beginning of normal "text" segment
16773 + */
16774 +ENTRY(stext)
16775 +ENTRY(_stext)
16776 +
16777 +/*
16778 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16779 * %esi points to the real-mode code as a 32-bit pointer.
16780 * CS and DS must be 4 GB flat segments, but we don't depend on
16781 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16782 * can.
16783 */
16784 __HEAD
16785 +
16786 +#ifdef CONFIG_PAX_KERNEXEC
16787 + jmp startup_32
16788 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16789 +.fill PAGE_SIZE-5,1,0xcc
16790 +#endif
16791 +
16792 ENTRY(startup_32)
16793 + movl pa(stack_start),%ecx
16794 +
16795 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
16796 us to not reload segments */
16797 testb $(1<<6), BP_loadflags(%esi)
16798 @@ -95,7 +113,60 @@ ENTRY(startup_32)
16799 movl %eax,%es
16800 movl %eax,%fs
16801 movl %eax,%gs
16802 + movl %eax,%ss
16803 2:
16804 + leal -__PAGE_OFFSET(%ecx),%esp
16805 +
16806 +#ifdef CONFIG_SMP
16807 + movl $pa(cpu_gdt_table),%edi
16808 + movl $__per_cpu_load,%eax
16809 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16810 + rorl $16,%eax
16811 + movb %al,__KERNEL_PERCPU + 4(%edi)
16812 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16813 + movl $__per_cpu_end - 1,%eax
16814 + subl $__per_cpu_start,%eax
16815 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16816 +#endif
16817 +
16818 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16819 + movl $NR_CPUS,%ecx
16820 + movl $pa(cpu_gdt_table),%edi
16821 +1:
16822 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16823 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16824 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16825 + addl $PAGE_SIZE_asm,%edi
16826 + loop 1b
16827 +#endif
16828 +
16829 +#ifdef CONFIG_PAX_KERNEXEC
16830 + movl $pa(boot_gdt),%edi
16831 + movl $__LOAD_PHYSICAL_ADDR,%eax
16832 + movw %ax,__BOOT_CS + 2(%edi)
16833 + rorl $16,%eax
16834 + movb %al,__BOOT_CS + 4(%edi)
16835 + movb %ah,__BOOT_CS + 7(%edi)
16836 + rorl $16,%eax
16837 +
16838 + ljmp $(__BOOT_CS),$1f
16839 +1:
16840 +
16841 + movl $NR_CPUS,%ecx
16842 + movl $pa(cpu_gdt_table),%edi
16843 + addl $__PAGE_OFFSET,%eax
16844 +1:
16845 + movw %ax,__KERNEL_CS + 2(%edi)
16846 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16847 + rorl $16,%eax
16848 + movb %al,__KERNEL_CS + 4(%edi)
16849 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16850 + movb %ah,__KERNEL_CS + 7(%edi)
16851 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16852 + rorl $16,%eax
16853 + addl $PAGE_SIZE_asm,%edi
16854 + loop 1b
16855 +#endif
16856
16857 /*
16858 * Clear BSS first so that there are no surprises...
16859 @@ -140,9 +211,7 @@ ENTRY(startup_32)
16860 cmpl $num_subarch_entries, %eax
16861 jae bad_subarch
16862
16863 - movl pa(subarch_entries)(,%eax,4), %eax
16864 - subl $__PAGE_OFFSET, %eax
16865 - jmp *%eax
16866 + jmp *pa(subarch_entries)(,%eax,4)
16867
16868 bad_subarch:
16869 WEAK(lguest_entry)
16870 @@ -154,10 +223,10 @@ WEAK(xen_entry)
16871 __INITDATA
16872
16873 subarch_entries:
16874 - .long default_entry /* normal x86/PC */
16875 - .long lguest_entry /* lguest hypervisor */
16876 - .long xen_entry /* Xen hypervisor */
16877 - .long default_entry /* Moorestown MID */
16878 + .long ta(default_entry) /* normal x86/PC */
16879 + .long ta(lguest_entry) /* lguest hypervisor */
16880 + .long ta(xen_entry) /* Xen hypervisor */
16881 + .long ta(default_entry) /* Moorestown MID */
16882 num_subarch_entries = (. - subarch_entries) / 4
16883 .previous
16884 #endif /* CONFIG_PARAVIRT */
16885 @@ -218,8 +287,11 @@ default_entry:
16886 movl %eax, pa(max_pfn_mapped)
16887
16888 /* Do early initialization of the fixmap area */
16889 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16890 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16891 +#ifdef CONFIG_COMPAT_VDSO
16892 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16893 +#else
16894 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16895 +#endif
16896 #else /* Not PAE */
16897
16898 page_pde_offset = (__PAGE_OFFSET >> 20);
16899 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16900 movl %eax, pa(max_pfn_mapped)
16901
16902 /* Do early initialization of the fixmap area */
16903 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16904 - movl %eax,pa(swapper_pg_dir+0xffc)
16905 +#ifdef CONFIG_COMPAT_VDSO
16906 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
16907 +#else
16908 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
16909 +#endif
16910 #endif
16911 jmp 3f
16912 /*
16913 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
16914 movl %eax,%es
16915 movl %eax,%fs
16916 movl %eax,%gs
16917 + movl pa(stack_start),%ecx
16918 + movl %eax,%ss
16919 + leal -__PAGE_OFFSET(%ecx),%esp
16920 #endif /* CONFIG_SMP */
16921 3:
16922
16923 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
16924 orl %edx,%eax
16925 movl %eax,%cr4
16926
16927 +#ifdef CONFIG_X86_PAE
16928 btl $5, %eax # check if PAE is enabled
16929 jnc 6f
16930
16931 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
16932 cpuid
16933 cmpl $0x80000000, %eax
16934 jbe 6f
16935 +
16936 + /* Clear bogus XD_DISABLE bits */
16937 + call verify_cpu
16938 +
16939 mov $0x80000001, %eax
16940 cpuid
16941 /* Execute Disable bit supported? */
16942 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
16943 jnc 6f
16944
16945 /* Setup EFER (Extended Feature Enable Register) */
16946 - movl $0xc0000080, %ecx
16947 + movl $MSR_EFER, %ecx
16948 rdmsr
16949
16950 btsl $11, %eax
16951 /* Make changes effective */
16952 wrmsr
16953
16954 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16955 + movl $1,pa(nx_enabled)
16956 +#endif
16957 +
16958 6:
16959
16960 /*
16961 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
16962 movl %eax,%cr0 /* ..and set paging (PG) bit */
16963 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
16964 1:
16965 - /* Set up the stack pointer */
16966 - lss stack_start,%esp
16967 + /* Shift the stack pointer to a virtual address */
16968 + addl $__PAGE_OFFSET, %esp
16969
16970 /*
16971 * Initialize eflags. Some BIOS's leave bits like NT set. This would
16972 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
16973
16974 #ifdef CONFIG_SMP
16975 cmpb $0, ready
16976 - jz 1f /* Initial CPU cleans BSS */
16977 - jmp checkCPUtype
16978 -1:
16979 + jnz checkCPUtype
16980 #endif /* CONFIG_SMP */
16981
16982 /*
16983 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
16984 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16985 movl %eax,%ss # after changing gdt.
16986
16987 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16988 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16989 movl %eax,%ds
16990 movl %eax,%es
16991
16992 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
16993 */
16994 cmpb $0,ready
16995 jne 1f
16996 - movl $per_cpu__gdt_page,%eax
16997 + movl $cpu_gdt_table,%eax
16998 movl $per_cpu__stack_canary,%ecx
16999 +#ifdef CONFIG_SMP
17000 + addl $__per_cpu_load,%ecx
17001 +#endif
17002 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17003 shrl $16, %ecx
17004 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17005 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17006 1:
17007 -#endif
17008 movl $(__KERNEL_STACK_CANARY),%eax
17009 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17010 + movl $(__USER_DS),%eax
17011 +#else
17012 + xorl %eax,%eax
17013 +#endif
17014 movl %eax,%gs
17015
17016 xorl %eax,%eax # Clear LDT
17017 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17018
17019 cld # gcc2 wants the direction flag cleared at all times
17020 pushl $0 # fake return address for unwinder
17021 -#ifdef CONFIG_SMP
17022 - movb ready, %cl
17023 movb $1, ready
17024 - cmpb $0,%cl # the first CPU calls start_kernel
17025 - je 1f
17026 - movl (stack_start), %esp
17027 -1:
17028 -#endif /* CONFIG_SMP */
17029 jmp *(initial_code)
17030
17031 /*
17032 @@ -546,22 +631,22 @@ early_page_fault:
17033 jmp early_fault
17034
17035 early_fault:
17036 - cld
17037 #ifdef CONFIG_PRINTK
17038 + cmpl $1,%ss:early_recursion_flag
17039 + je hlt_loop
17040 + incl %ss:early_recursion_flag
17041 + cld
17042 pusha
17043 movl $(__KERNEL_DS),%eax
17044 movl %eax,%ds
17045 movl %eax,%es
17046 - cmpl $2,early_recursion_flag
17047 - je hlt_loop
17048 - incl early_recursion_flag
17049 movl %cr2,%eax
17050 pushl %eax
17051 pushl %edx /* trapno */
17052 pushl $fault_msg
17053 call printk
17054 +; call dump_stack
17055 #endif
17056 - call dump_stack
17057 hlt_loop:
17058 hlt
17059 jmp hlt_loop
17060 @@ -569,8 +654,11 @@ hlt_loop:
17061 /* This is the default interrupt "handler" :-) */
17062 ALIGN
17063 ignore_int:
17064 - cld
17065 #ifdef CONFIG_PRINTK
17066 + cmpl $2,%ss:early_recursion_flag
17067 + je hlt_loop
17068 + incl %ss:early_recursion_flag
17069 + cld
17070 pushl %eax
17071 pushl %ecx
17072 pushl %edx
17073 @@ -579,9 +667,6 @@ ignore_int:
17074 movl $(__KERNEL_DS),%eax
17075 movl %eax,%ds
17076 movl %eax,%es
17077 - cmpl $2,early_recursion_flag
17078 - je hlt_loop
17079 - incl early_recursion_flag
17080 pushl 16(%esp)
17081 pushl 24(%esp)
17082 pushl 32(%esp)
17083 @@ -600,6 +685,8 @@ ignore_int:
17084 #endif
17085 iret
17086
17087 +#include "verify_cpu.S"
17088 +
17089 __REFDATA
17090 .align 4
17091 ENTRY(initial_code)
17092 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17093 /*
17094 * BSS section
17095 */
17096 -__PAGE_ALIGNED_BSS
17097 - .align PAGE_SIZE_asm
17098 #ifdef CONFIG_X86_PAE
17099 +.section .swapper_pg_pmd,"a",@progbits
17100 swapper_pg_pmd:
17101 .fill 1024*KPMDS,4,0
17102 #else
17103 +.section .swapper_pg_dir,"a",@progbits
17104 ENTRY(swapper_pg_dir)
17105 .fill 1024,4,0
17106 #endif
17107 +.section .swapper_pg_fixmap,"a",@progbits
17108 swapper_pg_fixmap:
17109 .fill 1024,4,0
17110 #ifdef CONFIG_X86_TRAMPOLINE
17111 +.section .trampoline_pg_dir,"a",@progbits
17112 ENTRY(trampoline_pg_dir)
17113 +#ifdef CONFIG_X86_PAE
17114 + .fill 4,8,0
17115 +#else
17116 .fill 1024,4,0
17117 #endif
17118 +#endif
17119 +
17120 +.section .empty_zero_page,"a",@progbits
17121 ENTRY(empty_zero_page)
17122 .fill 4096,1,0
17123
17124 /*
17125 + * The IDT has to be page-aligned to simplify the Pentium
17126 + * F0 0F bug workaround.. We have a special link segment
17127 + * for this.
17128 + */
17129 +.section .idt,"a",@progbits
17130 +ENTRY(idt_table)
17131 + .fill 256,8,0
17132 +
17133 +/*
17134 * This starts the data section.
17135 */
17136 #ifdef CONFIG_X86_PAE
17137 -__PAGE_ALIGNED_DATA
17138 - /* Page-aligned for the benefit of paravirt? */
17139 - .align PAGE_SIZE_asm
17140 +.section .swapper_pg_dir,"a",@progbits
17141 +
17142 ENTRY(swapper_pg_dir)
17143 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17144 # if KPMDS == 3
17145 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17146 # error "Kernel PMDs should be 1, 2 or 3"
17147 # endif
17148 .align PAGE_SIZE_asm /* needs to be page-sized too */
17149 +
17150 +#ifdef CONFIG_PAX_PER_CPU_PGD
17151 +ENTRY(cpu_pgd)
17152 + .rept NR_CPUS
17153 + .fill 4,8,0
17154 + .endr
17155 +#endif
17156 +
17157 #endif
17158
17159 .data
17160 +.balign 4
17161 ENTRY(stack_start)
17162 - .long init_thread_union+THREAD_SIZE
17163 - .long __BOOT_DS
17164 + .long init_thread_union+THREAD_SIZE-8
17165
17166 ready: .byte 0
17167
17168 +.section .rodata,"a",@progbits
17169 early_recursion_flag:
17170 .long 0
17171
17172 @@ -697,7 +809,7 @@ fault_msg:
17173 .word 0 # 32 bit align gdt_desc.address
17174 boot_gdt_descr:
17175 .word __BOOT_DS+7
17176 - .long boot_gdt - __PAGE_OFFSET
17177 + .long pa(boot_gdt)
17178
17179 .word 0 # 32-bit align idt_desc.address
17180 idt_descr:
17181 @@ -708,7 +820,7 @@ idt_descr:
17182 .word 0 # 32 bit align gdt_desc.address
17183 ENTRY(early_gdt_descr)
17184 .word GDT_ENTRIES*8-1
17185 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17186 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17187
17188 /*
17189 * The boot_gdt must mirror the equivalent in setup.S and is
17190 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17191 .align L1_CACHE_BYTES
17192 ENTRY(boot_gdt)
17193 .fill GDT_ENTRY_BOOT_CS,8,0
17194 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17195 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17196 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17197 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17198 +
17199 + .align PAGE_SIZE_asm
17200 +ENTRY(cpu_gdt_table)
17201 + .rept NR_CPUS
17202 + .quad 0x0000000000000000 /* NULL descriptor */
17203 + .quad 0x0000000000000000 /* 0x0b reserved */
17204 + .quad 0x0000000000000000 /* 0x13 reserved */
17205 + .quad 0x0000000000000000 /* 0x1b reserved */
17206 +
17207 +#ifdef CONFIG_PAX_KERNEXEC
17208 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17209 +#else
17210 + .quad 0x0000000000000000 /* 0x20 unused */
17211 +#endif
17212 +
17213 + .quad 0x0000000000000000 /* 0x28 unused */
17214 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17215 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17216 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17217 + .quad 0x0000000000000000 /* 0x4b reserved */
17218 + .quad 0x0000000000000000 /* 0x53 reserved */
17219 + .quad 0x0000000000000000 /* 0x5b reserved */
17220 +
17221 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17222 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17223 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17224 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17225 +
17226 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17227 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17228 +
17229 + /*
17230 + * Segments used for calling PnP BIOS have byte granularity.
17231 + * The code segments and data segments have fixed 64k limits,
17232 + * the transfer segment sizes are set at run time.
17233 + */
17234 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17235 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17236 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17237 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17238 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17239 +
17240 + /*
17241 + * The APM segments have byte granularity and their bases
17242 + * are set at run time. All have 64k limits.
17243 + */
17244 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17245 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17246 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17247 +
17248 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17249 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17250 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17251 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17252 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17253 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17254 +
17255 + /* Be sure this is zeroed to avoid false validations in Xen */
17256 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17257 + .endr
17258 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17259 index 780cd92..758b2a6 100644
17260 --- a/arch/x86/kernel/head_64.S
17261 +++ b/arch/x86/kernel/head_64.S
17262 @@ -19,6 +19,8 @@
17263 #include <asm/cache.h>
17264 #include <asm/processor-flags.h>
17265 #include <asm/percpu.h>
17266 +#include <asm/cpufeature.h>
17267 +#include <asm/alternative-asm.h>
17268
17269 #ifdef CONFIG_PARAVIRT
17270 #include <asm/asm-offsets.h>
17271 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17272 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17273 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17274 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17275 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17276 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17277 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17278 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17279 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17280 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17281
17282 .text
17283 __HEAD
17284 @@ -85,35 +93,23 @@ startup_64:
17285 */
17286 addq %rbp, init_level4_pgt + 0(%rip)
17287 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17288 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17289 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17290 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17291 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17292
17293 addq %rbp, level3_ident_pgt + 0(%rip)
17294 +#ifndef CONFIG_XEN
17295 + addq %rbp, level3_ident_pgt + 8(%rip)
17296 +#endif
17297
17298 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17299 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17300 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17301 +
17302 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17303 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17304
17305 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17306 -
17307 - /* Add an Identity mapping if I am above 1G */
17308 - leaq _text(%rip), %rdi
17309 - andq $PMD_PAGE_MASK, %rdi
17310 -
17311 - movq %rdi, %rax
17312 - shrq $PUD_SHIFT, %rax
17313 - andq $(PTRS_PER_PUD - 1), %rax
17314 - jz ident_complete
17315 -
17316 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17317 - leaq level3_ident_pgt(%rip), %rbx
17318 - movq %rdx, 0(%rbx, %rax, 8)
17319 -
17320 - movq %rdi, %rax
17321 - shrq $PMD_SHIFT, %rax
17322 - andq $(PTRS_PER_PMD - 1), %rax
17323 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17324 - leaq level2_spare_pgt(%rip), %rbx
17325 - movq %rdx, 0(%rbx, %rax, 8)
17326 -ident_complete:
17327 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17328
17329 /*
17330 * Fixup the kernel text+data virtual addresses. Note that
17331 @@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17332 * after the boot processor executes this code.
17333 */
17334
17335 - /* Enable PAE mode and PGE */
17336 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17337 + /* Enable PAE mode and PSE/PGE */
17338 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17339 movq %rax, %cr4
17340
17341 /* Setup early boot stage 4 level pagetables. */
17342 @@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17343 movl $MSR_EFER, %ecx
17344 rdmsr
17345 btsl $_EFER_SCE, %eax /* Enable System Call */
17346 - btl $20,%edi /* No Execute supported? */
17347 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17348 jnc 1f
17349 btsl $_EFER_NX, %eax
17350 + leaq init_level4_pgt(%rip), %rdi
17351 +#ifndef CONFIG_EFI
17352 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17353 +#endif
17354 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17355 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17356 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17357 1: wrmsr /* Make changes effective */
17358
17359 /* Setup cr0 */
17360 @@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17361 * jump. In addition we need to ensure %cs is set so we make this
17362 * a far return.
17363 */
17364 + pax_set_fptr_mask
17365 movq initial_code(%rip),%rax
17366 pushq $0 # fake return address to stop unwinder
17367 pushq $__KERNEL_CS # set correct cs
17368 @@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17369 .quad x86_64_start_kernel
17370 ENTRY(initial_gs)
17371 .quad INIT_PER_CPU_VAR(irq_stack_union)
17372 - __FINITDATA
17373
17374 ENTRY(stack_start)
17375 .quad init_thread_union+THREAD_SIZE-8
17376 .word 0
17377 + __FINITDATA
17378
17379 bad_address:
17380 jmp bad_address
17381
17382 - .section ".init.text","ax"
17383 + __INIT
17384 #ifdef CONFIG_EARLY_PRINTK
17385 .globl early_idt_handlers
17386 early_idt_handlers:
17387 @@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17388 #endif /* EARLY_PRINTK */
17389 1: hlt
17390 jmp 1b
17391 + .previous
17392
17393 #ifdef CONFIG_EARLY_PRINTK
17394 + __INITDATA
17395 early_recursion_flag:
17396 .long 0
17397 + .previous
17398
17399 + .section .rodata,"a",@progbits
17400 early_idt_msg:
17401 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17402 early_idt_ripmsg:
17403 .asciz "RIP %s\n"
17404 + .previous
17405 #endif /* CONFIG_EARLY_PRINTK */
17406 - .previous
17407
17408 + .section .rodata,"a",@progbits
17409 #define NEXT_PAGE(name) \
17410 .balign PAGE_SIZE; \
17411 ENTRY(name)
17412 @@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17413 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17414 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17415 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17416 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17417 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17418 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17419 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17420 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17421 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17422 .org init_level4_pgt + L4_START_KERNEL*8, 0
17423 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17424 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17425
17426 +#ifdef CONFIG_PAX_PER_CPU_PGD
17427 +NEXT_PAGE(cpu_pgd)
17428 + .rept NR_CPUS
17429 + .fill 512,8,0
17430 + .endr
17431 +#endif
17432 +
17433 NEXT_PAGE(level3_ident_pgt)
17434 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17435 +#ifdef CONFIG_XEN
17436 .fill 511,8,0
17437 +#else
17438 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17439 + .fill 510,8,0
17440 +#endif
17441 +
17442 +NEXT_PAGE(level3_vmalloc_start_pgt)
17443 + .fill 512,8,0
17444 +
17445 +NEXT_PAGE(level3_vmalloc_end_pgt)
17446 + .fill 512,8,0
17447 +
17448 +NEXT_PAGE(level3_vmemmap_pgt)
17449 + .fill L3_VMEMMAP_START,8,0
17450 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17451
17452 NEXT_PAGE(level3_kernel_pgt)
17453 .fill L3_START_KERNEL,8,0
17454 @@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17455 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17456 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17457
17458 +NEXT_PAGE(level2_vmemmap_pgt)
17459 + .fill 512,8,0
17460 +
17461 NEXT_PAGE(level2_fixmap_pgt)
17462 - .fill 506,8,0
17463 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17464 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17465 - .fill 5,8,0
17466 + .fill 507,8,0
17467 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17468 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17469 + .fill 4,8,0
17470
17471 -NEXT_PAGE(level1_fixmap_pgt)
17472 +NEXT_PAGE(level1_vsyscall_pgt)
17473 .fill 512,8,0
17474
17475 -NEXT_PAGE(level2_ident_pgt)
17476 - /* Since I easily can, map the first 1G.
17477 + /* Since I easily can, map the first 2G.
17478 * Don't set NX because code runs from these pages.
17479 */
17480 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17481 +NEXT_PAGE(level2_ident_pgt)
17482 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17483
17484 NEXT_PAGE(level2_kernel_pgt)
17485 /*
17486 @@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17487 * If you want to increase this then increase MODULES_VADDR
17488 * too.)
17489 */
17490 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17491 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17492 -
17493 -NEXT_PAGE(level2_spare_pgt)
17494 - .fill 512, 8, 0
17495 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17496
17497 #undef PMDS
17498 #undef NEXT_PAGE
17499
17500 - .data
17501 + .align PAGE_SIZE
17502 +ENTRY(cpu_gdt_table)
17503 + .rept NR_CPUS
17504 + .quad 0x0000000000000000 /* NULL descriptor */
17505 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17506 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17507 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17508 + .quad 0x00cffb000000ffff /* __USER32_CS */
17509 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17510 + .quad 0x00affb000000ffff /* __USER_CS */
17511 +
17512 +#ifdef CONFIG_PAX_KERNEXEC
17513 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17514 +#else
17515 + .quad 0x0 /* unused */
17516 +#endif
17517 +
17518 + .quad 0,0 /* TSS */
17519 + .quad 0,0 /* LDT */
17520 + .quad 0,0,0 /* three TLS descriptors */
17521 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17522 + /* asm/segment.h:GDT_ENTRIES must match this */
17523 +
17524 + /* zero the remaining page */
17525 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17526 + .endr
17527 +
17528 .align 16
17529 .globl early_gdt_descr
17530 early_gdt_descr:
17531 .word GDT_ENTRIES*8-1
17532 early_gdt_descr_base:
17533 - .quad INIT_PER_CPU_VAR(gdt_page)
17534 + .quad cpu_gdt_table
17535
17536 ENTRY(phys_base)
17537 /* This must match the first entry in level2_kernel_pgt */
17538 .quad 0x0000000000000000
17539
17540 #include "../../x86/xen/xen-head.S"
17541 -
17542 - .section .bss, "aw", @nobits
17543 +
17544 + .section .rodata,"a",@progbits
17545 .align L1_CACHE_BYTES
17546 ENTRY(idt_table)
17547 - .skip IDT_ENTRIES * 16
17548 + .fill 512,8,0
17549
17550 __PAGE_ALIGNED_BSS
17551 .align PAGE_SIZE
17552 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17553 index 9c3bd4a..e1d9b35 100644
17554 --- a/arch/x86/kernel/i386_ksyms_32.c
17555 +++ b/arch/x86/kernel/i386_ksyms_32.c
17556 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17557 EXPORT_SYMBOL(cmpxchg8b_emu);
17558 #endif
17559
17560 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17561 +
17562 /* Networking helper routines. */
17563 EXPORT_SYMBOL(csum_partial_copy_generic);
17564 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17565 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17566
17567 EXPORT_SYMBOL(__get_user_1);
17568 EXPORT_SYMBOL(__get_user_2);
17569 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17570
17571 EXPORT_SYMBOL(csum_partial);
17572 EXPORT_SYMBOL(empty_zero_page);
17573 +
17574 +#ifdef CONFIG_PAX_KERNEXEC
17575 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17576 +#endif
17577 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17578 index df89102..a244320 100644
17579 --- a/arch/x86/kernel/i8259.c
17580 +++ b/arch/x86/kernel/i8259.c
17581 @@ -208,7 +208,7 @@ spurious_8259A_irq:
17582 "spurious 8259A interrupt: IRQ%d.\n", irq);
17583 spurious_irq_mask |= irqmask;
17584 }
17585 - atomic_inc(&irq_err_count);
17586 + atomic_inc_unchecked(&irq_err_count);
17587 /*
17588 * Theoretically we do not have to handle this IRQ,
17589 * but in Linux this does not cause problems and is
17590 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17591 index 3a54dcb..1c22348 100644
17592 --- a/arch/x86/kernel/init_task.c
17593 +++ b/arch/x86/kernel/init_task.c
17594 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17595 * way process stacks are handled. This is done by having a special
17596 * "init_task" linker map entry..
17597 */
17598 -union thread_union init_thread_union __init_task_data =
17599 - { INIT_THREAD_INFO(init_task) };
17600 +union thread_union init_thread_union __init_task_data;
17601
17602 /*
17603 * Initial task structure.
17604 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17605 * section. Since TSS's are completely CPU-local, we want them
17606 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17607 */
17608 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17609 -
17610 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17611 +EXPORT_SYMBOL(init_tss);
17612 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17613 index 99c4d30..74c84e9 100644
17614 --- a/arch/x86/kernel/ioport.c
17615 +++ b/arch/x86/kernel/ioport.c
17616 @@ -6,6 +6,7 @@
17617 #include <linux/sched.h>
17618 #include <linux/kernel.h>
17619 #include <linux/capability.h>
17620 +#include <linux/security.h>
17621 #include <linux/errno.h>
17622 #include <linux/types.h>
17623 #include <linux/ioport.h>
17624 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17625
17626 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17627 return -EINVAL;
17628 +#ifdef CONFIG_GRKERNSEC_IO
17629 + if (turn_on && grsec_disable_privio) {
17630 + gr_handle_ioperm();
17631 + return -EPERM;
17632 + }
17633 +#endif
17634 if (turn_on && !capable(CAP_SYS_RAWIO))
17635 return -EPERM;
17636
17637 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17638 * because the ->io_bitmap_max value must match the bitmap
17639 * contents:
17640 */
17641 - tss = &per_cpu(init_tss, get_cpu());
17642 + tss = init_tss + get_cpu();
17643
17644 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17645
17646 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17647 return -EINVAL;
17648 /* Trying to gain more privileges? */
17649 if (level > old) {
17650 +#ifdef CONFIG_GRKERNSEC_IO
17651 + if (grsec_disable_privio) {
17652 + gr_handle_iopl();
17653 + return -EPERM;
17654 + }
17655 +#endif
17656 if (!capable(CAP_SYS_RAWIO))
17657 return -EPERM;
17658 }
17659 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17660 index 04bbd52..83a07d9 100644
17661 --- a/arch/x86/kernel/irq.c
17662 +++ b/arch/x86/kernel/irq.c
17663 @@ -15,7 +15,7 @@
17664 #include <asm/mce.h>
17665 #include <asm/hw_irq.h>
17666
17667 -atomic_t irq_err_count;
17668 +atomic_unchecked_t irq_err_count;
17669
17670 /* Function pointer for generic interrupt vector handling */
17671 void (*generic_interrupt_extension)(void) = NULL;
17672 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17673 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17674 seq_printf(p, " Machine check polls\n");
17675 #endif
17676 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17677 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17678 #if defined(CONFIG_X86_IO_APIC)
17679 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17680 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17681 #endif
17682 return 0;
17683 }
17684 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17685
17686 u64 arch_irq_stat(void)
17687 {
17688 - u64 sum = atomic_read(&irq_err_count);
17689 + u64 sum = atomic_read_unchecked(&irq_err_count);
17690
17691 #ifdef CONFIG_X86_IO_APIC
17692 - sum += atomic_read(&irq_mis_count);
17693 + sum += atomic_read_unchecked(&irq_mis_count);
17694 #endif
17695 return sum;
17696 }
17697 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17698 index 7d35d0f..03f1d52 100644
17699 --- a/arch/x86/kernel/irq_32.c
17700 +++ b/arch/x86/kernel/irq_32.c
17701 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17702 __asm__ __volatile__("andl %%esp,%0" :
17703 "=r" (sp) : "0" (THREAD_SIZE - 1));
17704
17705 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17706 + return sp < STACK_WARN;
17707 }
17708
17709 static void print_stack_overflow(void)
17710 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17711 * per-CPU IRQ handling contexts (thread information and stack)
17712 */
17713 union irq_ctx {
17714 - struct thread_info tinfo;
17715 - u32 stack[THREAD_SIZE/sizeof(u32)];
17716 -} __attribute__((aligned(PAGE_SIZE)));
17717 + unsigned long previous_esp;
17718 + u32 stack[THREAD_SIZE/sizeof(u32)];
17719 +} __attribute__((aligned(THREAD_SIZE)));
17720
17721 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17722 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17723 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17724 static inline int
17725 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17726 {
17727 - union irq_ctx *curctx, *irqctx;
17728 + union irq_ctx *irqctx;
17729 u32 *isp, arg1, arg2;
17730
17731 - curctx = (union irq_ctx *) current_thread_info();
17732 irqctx = __get_cpu_var(hardirq_ctx);
17733
17734 /*
17735 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17736 * handler) we can't do that and just have to keep using the
17737 * current stack (which is the irq stack already after all)
17738 */
17739 - if (unlikely(curctx == irqctx))
17740 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17741 return 0;
17742
17743 /* build the stack frame on the IRQ stack */
17744 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17745 - irqctx->tinfo.task = curctx->tinfo.task;
17746 - irqctx->tinfo.previous_esp = current_stack_pointer;
17747 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17748 + irqctx->previous_esp = current_stack_pointer;
17749
17750 - /*
17751 - * Copy the softirq bits in preempt_count so that the
17752 - * softirq checks work in the hardirq context.
17753 - */
17754 - irqctx->tinfo.preempt_count =
17755 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17756 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17757 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17758 + __set_fs(MAKE_MM_SEG(0));
17759 +#endif
17760
17761 if (unlikely(overflow))
17762 call_on_stack(print_stack_overflow, isp);
17763 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17764 : "0" (irq), "1" (desc), "2" (isp),
17765 "D" (desc->handle_irq)
17766 : "memory", "cc", "ecx");
17767 +
17768 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17769 + __set_fs(current_thread_info()->addr_limit);
17770 +#endif
17771 +
17772 return 1;
17773 }
17774
17775 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17776 */
17777 void __cpuinit irq_ctx_init(int cpu)
17778 {
17779 - union irq_ctx *irqctx;
17780 -
17781 if (per_cpu(hardirq_ctx, cpu))
17782 return;
17783
17784 - irqctx = &per_cpu(hardirq_stack, cpu);
17785 - irqctx->tinfo.task = NULL;
17786 - irqctx->tinfo.exec_domain = NULL;
17787 - irqctx->tinfo.cpu = cpu;
17788 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17789 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17790 -
17791 - per_cpu(hardirq_ctx, cpu) = irqctx;
17792 -
17793 - irqctx = &per_cpu(softirq_stack, cpu);
17794 - irqctx->tinfo.task = NULL;
17795 - irqctx->tinfo.exec_domain = NULL;
17796 - irqctx->tinfo.cpu = cpu;
17797 - irqctx->tinfo.preempt_count = 0;
17798 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17799 -
17800 - per_cpu(softirq_ctx, cpu) = irqctx;
17801 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
17802 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
17803
17804 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17805 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17806 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
17807 asmlinkage void do_softirq(void)
17808 {
17809 unsigned long flags;
17810 - struct thread_info *curctx;
17811 union irq_ctx *irqctx;
17812 u32 *isp;
17813
17814 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
17815 local_irq_save(flags);
17816
17817 if (local_softirq_pending()) {
17818 - curctx = current_thread_info();
17819 irqctx = __get_cpu_var(softirq_ctx);
17820 - irqctx->tinfo.task = curctx->task;
17821 - irqctx->tinfo.previous_esp = current_stack_pointer;
17822 + irqctx->previous_esp = current_stack_pointer;
17823
17824 /* build the stack frame on the softirq stack */
17825 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17826 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17827 +
17828 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17829 + __set_fs(MAKE_MM_SEG(0));
17830 +#endif
17831
17832 call_on_stack(__do_softirq, isp);
17833 +
17834 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17835 + __set_fs(current_thread_info()->addr_limit);
17836 +#endif
17837 +
17838 /*
17839 * Shouldnt happen, we returned above if in_interrupt():
17840 */
17841 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17842 index 8d82a77..0baf312 100644
17843 --- a/arch/x86/kernel/kgdb.c
17844 +++ b/arch/x86/kernel/kgdb.c
17845 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17846
17847 /* clear the trace bit */
17848 linux_regs->flags &= ~X86_EFLAGS_TF;
17849 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17850 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17851
17852 /* set the trace bit if we're stepping */
17853 if (remcomInBuffer[0] == 's') {
17854 linux_regs->flags |= X86_EFLAGS_TF;
17855 kgdb_single_step = 1;
17856 - atomic_set(&kgdb_cpu_doing_single_step,
17857 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17858 raw_smp_processor_id());
17859 }
17860
17861 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17862 break;
17863
17864 case DIE_DEBUG:
17865 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
17866 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
17867 raw_smp_processor_id()) {
17868 if (user_mode(regs))
17869 return single_step_cont(regs, args);
17870 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
17871 return instruction_pointer(regs);
17872 }
17873
17874 -struct kgdb_arch arch_kgdb_ops = {
17875 +const struct kgdb_arch arch_kgdb_ops = {
17876 /* Breakpoint instruction: */
17877 .gdb_bpt_instr = { 0xcc },
17878 .flags = KGDB_HW_BREAKPOINT,
17879 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17880 index 7a67820..8d15b75 100644
17881 --- a/arch/x86/kernel/kprobes.c
17882 +++ b/arch/x86/kernel/kprobes.c
17883 @@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
17884 char op;
17885 s32 raddr;
17886 } __attribute__((packed)) * jop;
17887 - jop = (struct __arch_jmp_op *)from;
17888 +
17889 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
17890 +
17891 + pax_open_kernel();
17892 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
17893 jop->op = RELATIVEJUMP_INSTRUCTION;
17894 + pax_close_kernel();
17895 }
17896
17897 /*
17898 @@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17899 kprobe_opcode_t opcode;
17900 kprobe_opcode_t *orig_opcodes = opcodes;
17901
17902 - if (search_exception_tables((unsigned long)opcodes))
17903 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17904 return 0; /* Page fault may occur on this address. */
17905
17906 retry:
17907 @@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
17908 disp = (u8 *) p->addr + *((s32 *) insn) -
17909 (u8 *) p->ainsn.insn;
17910 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
17911 + pax_open_kernel();
17912 *(s32 *)insn = (s32) disp;
17913 + pax_close_kernel();
17914 }
17915 }
17916 #endif
17917 @@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
17918
17919 static void __kprobes arch_copy_kprobe(struct kprobe *p)
17920 {
17921 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17922 + pax_open_kernel();
17923 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17924 + pax_close_kernel();
17925
17926 fix_riprel(p);
17927
17928 - if (can_boost(p->addr))
17929 + if (can_boost(ktla_ktva(p->addr)))
17930 p->ainsn.boostable = 0;
17931 else
17932 p->ainsn.boostable = -1;
17933
17934 - p->opcode = *p->addr;
17935 + p->opcode = *(ktla_ktva(p->addr));
17936 }
17937
17938 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17939 @@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
17940 if (p->opcode == BREAKPOINT_INSTRUCTION)
17941 regs->ip = (unsigned long)p->addr;
17942 else
17943 - regs->ip = (unsigned long)p->ainsn.insn;
17944 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17945 }
17946
17947 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
17948 @@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17949 if (p->ainsn.boostable == 1 && !p->post_handler) {
17950 /* Boost up -- we can execute copied instructions directly */
17951 reset_current_kprobe();
17952 - regs->ip = (unsigned long)p->ainsn.insn;
17953 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17954 preempt_enable_no_resched();
17955 return;
17956 }
17957 @@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17958 struct kprobe_ctlblk *kcb;
17959
17960 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
17961 - if (*addr != BREAKPOINT_INSTRUCTION) {
17962 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17963 /*
17964 * The breakpoint instruction was removed right
17965 * after we hit it. Another cpu has removed
17966 @@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17967 /* Skip orig_ax, ip, cs */
17968 " addq $24, %rsp\n"
17969 " popfq\n"
17970 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17971 + " btsq $63,(%rsp)\n"
17972 +#endif
17973 #else
17974 " pushf\n"
17975 /*
17976 @@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17977 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17978 {
17979 unsigned long *tos = stack_addr(regs);
17980 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17981 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17982 unsigned long orig_ip = (unsigned long)p->addr;
17983 kprobe_opcode_t *insn = p->ainsn.insn;
17984
17985 @@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17986 struct die_args *args = data;
17987 int ret = NOTIFY_DONE;
17988
17989 - if (args->regs && user_mode_vm(args->regs))
17990 + if (args->regs && user_mode(args->regs))
17991 return ret;
17992
17993 switch (val) {
17994 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
17995 index 63b0ec8..6d92227 100644
17996 --- a/arch/x86/kernel/kvm.c
17997 +++ b/arch/x86/kernel/kvm.c
17998 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
17999 pv_mmu_ops.set_pud = kvm_set_pud;
18000 #if PAGETABLE_LEVELS == 4
18001 pv_mmu_ops.set_pgd = kvm_set_pgd;
18002 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18003 #endif
18004 #endif
18005 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18006 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18007 index ec6ef60..ab2c824 100644
18008 --- a/arch/x86/kernel/ldt.c
18009 +++ b/arch/x86/kernel/ldt.c
18010 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18011 if (reload) {
18012 #ifdef CONFIG_SMP
18013 preempt_disable();
18014 - load_LDT(pc);
18015 + load_LDT_nolock(pc);
18016 if (!cpumask_equal(mm_cpumask(current->mm),
18017 cpumask_of(smp_processor_id())))
18018 smp_call_function(flush_ldt, current->mm, 1);
18019 preempt_enable();
18020 #else
18021 - load_LDT(pc);
18022 + load_LDT_nolock(pc);
18023 #endif
18024 }
18025 if (oldsize) {
18026 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18027 return err;
18028
18029 for (i = 0; i < old->size; i++)
18030 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18031 + write_ldt_entry(new->ldt, i, old->ldt + i);
18032 return 0;
18033 }
18034
18035 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18036 retval = copy_ldt(&mm->context, &old_mm->context);
18037 mutex_unlock(&old_mm->context.lock);
18038 }
18039 +
18040 + if (tsk == current) {
18041 + mm->context.vdso = 0;
18042 +
18043 +#ifdef CONFIG_X86_32
18044 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18045 + mm->context.user_cs_base = 0UL;
18046 + mm->context.user_cs_limit = ~0UL;
18047 +
18048 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18049 + cpus_clear(mm->context.cpu_user_cs_mask);
18050 +#endif
18051 +
18052 +#endif
18053 +#endif
18054 +
18055 + }
18056 +
18057 return retval;
18058 }
18059
18060 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18061 }
18062 }
18063
18064 +#ifdef CONFIG_PAX_SEGMEXEC
18065 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18066 + error = -EINVAL;
18067 + goto out_unlock;
18068 + }
18069 +#endif
18070 +
18071 fill_ldt(&ldt, &ldt_info);
18072 if (oldmode)
18073 ldt.avl = 0;
18074 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18075 index c1c429d..f02eaf9 100644
18076 --- a/arch/x86/kernel/machine_kexec_32.c
18077 +++ b/arch/x86/kernel/machine_kexec_32.c
18078 @@ -26,7 +26,7 @@
18079 #include <asm/system.h>
18080 #include <asm/cacheflush.h>
18081
18082 -static void set_idt(void *newidt, __u16 limit)
18083 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18084 {
18085 struct desc_ptr curidt;
18086
18087 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18088 }
18089
18090
18091 -static void set_gdt(void *newgdt, __u16 limit)
18092 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18093 {
18094 struct desc_ptr curgdt;
18095
18096 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18097 }
18098
18099 control_page = page_address(image->control_code_page);
18100 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18101 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18102
18103 relocate_kernel_ptr = control_page;
18104 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18105 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18106 index 1e47679..e73449d 100644
18107 --- a/arch/x86/kernel/microcode_amd.c
18108 +++ b/arch/x86/kernel/microcode_amd.c
18109 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18110 uci->mc = NULL;
18111 }
18112
18113 -static struct microcode_ops microcode_amd_ops = {
18114 +static const struct microcode_ops microcode_amd_ops = {
18115 .request_microcode_user = request_microcode_user,
18116 .request_microcode_fw = request_microcode_fw,
18117 .collect_cpu_info = collect_cpu_info_amd,
18118 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18119 .microcode_fini_cpu = microcode_fini_cpu_amd,
18120 };
18121
18122 -struct microcode_ops * __init init_amd_microcode(void)
18123 +const struct microcode_ops * __init init_amd_microcode(void)
18124 {
18125 return &microcode_amd_ops;
18126 }
18127 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18128 index 378e9a8..b5a6ea9 100644
18129 --- a/arch/x86/kernel/microcode_core.c
18130 +++ b/arch/x86/kernel/microcode_core.c
18131 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18132
18133 #define MICROCODE_VERSION "2.00"
18134
18135 -static struct microcode_ops *microcode_ops;
18136 +static const struct microcode_ops *microcode_ops;
18137
18138 /*
18139 * Synchronization.
18140 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18141 index 0d334dd..14cedaf 100644
18142 --- a/arch/x86/kernel/microcode_intel.c
18143 +++ b/arch/x86/kernel/microcode_intel.c
18144 @@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18145
18146 static int get_ucode_user(void *to, const void *from, size_t n)
18147 {
18148 - return copy_from_user(to, from, n);
18149 + return copy_from_user(to, (const void __force_user *)from, n);
18150 }
18151
18152 static enum ucode_state
18153 request_microcode_user(int cpu, const void __user *buf, size_t size)
18154 {
18155 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18156 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18157 }
18158
18159 static void microcode_fini_cpu(int cpu)
18160 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18161 uci->mc = NULL;
18162 }
18163
18164 -static struct microcode_ops microcode_intel_ops = {
18165 +static const struct microcode_ops microcode_intel_ops = {
18166 .request_microcode_user = request_microcode_user,
18167 .request_microcode_fw = request_microcode_fw,
18168 .collect_cpu_info = collect_cpu_info,
18169 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18170 .microcode_fini_cpu = microcode_fini_cpu,
18171 };
18172
18173 -struct microcode_ops * __init init_intel_microcode(void)
18174 +const struct microcode_ops * __init init_intel_microcode(void)
18175 {
18176 return &microcode_intel_ops;
18177 }
18178 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18179 index 89f386f..9028f51 100644
18180 --- a/arch/x86/kernel/module.c
18181 +++ b/arch/x86/kernel/module.c
18182 @@ -34,7 +34,7 @@
18183 #define DEBUGP(fmt...)
18184 #endif
18185
18186 -void *module_alloc(unsigned long size)
18187 +static void *__module_alloc(unsigned long size, pgprot_t prot)
18188 {
18189 struct vm_struct *area;
18190
18191 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18192 if (!area)
18193 return NULL;
18194
18195 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18196 - PAGE_KERNEL_EXEC);
18197 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18198 +}
18199 +
18200 +void *module_alloc(unsigned long size)
18201 +{
18202 +
18203 +#ifdef CONFIG_PAX_KERNEXEC
18204 + return __module_alloc(size, PAGE_KERNEL);
18205 +#else
18206 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18207 +#endif
18208 +
18209 }
18210
18211 /* Free memory returned from module_alloc */
18212 @@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18213 vfree(module_region);
18214 }
18215
18216 +#ifdef CONFIG_PAX_KERNEXEC
18217 +#ifdef CONFIG_X86_32
18218 +void *module_alloc_exec(unsigned long size)
18219 +{
18220 + struct vm_struct *area;
18221 +
18222 + if (size == 0)
18223 + return NULL;
18224 +
18225 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18226 + return area ? area->addr : NULL;
18227 +}
18228 +EXPORT_SYMBOL(module_alloc_exec);
18229 +
18230 +void module_free_exec(struct module *mod, void *module_region)
18231 +{
18232 + vunmap(module_region);
18233 +}
18234 +EXPORT_SYMBOL(module_free_exec);
18235 +#else
18236 +void module_free_exec(struct module *mod, void *module_region)
18237 +{
18238 + module_free(mod, module_region);
18239 +}
18240 +EXPORT_SYMBOL(module_free_exec);
18241 +
18242 +void *module_alloc_exec(unsigned long size)
18243 +{
18244 + return __module_alloc(size, PAGE_KERNEL_RX);
18245 +}
18246 +EXPORT_SYMBOL(module_alloc_exec);
18247 +#endif
18248 +#endif
18249 +
18250 /* We don't need anything special. */
18251 int module_frob_arch_sections(Elf_Ehdr *hdr,
18252 Elf_Shdr *sechdrs,
18253 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18254 unsigned int i;
18255 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18256 Elf32_Sym *sym;
18257 - uint32_t *location;
18258 + uint32_t *plocation, location;
18259
18260 DEBUGP("Applying relocate section %u to %u\n", relsec,
18261 sechdrs[relsec].sh_info);
18262 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18263 /* This is where to make the change */
18264 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18265 - + rel[i].r_offset;
18266 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18267 + location = (uint32_t)plocation;
18268 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18269 + plocation = ktla_ktva((void *)plocation);
18270 /* This is the symbol it is referring to. Note that all
18271 undefined symbols have been resolved. */
18272 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18273 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18274 switch (ELF32_R_TYPE(rel[i].r_info)) {
18275 case R_386_32:
18276 /* We add the value into the location given */
18277 - *location += sym->st_value;
18278 + pax_open_kernel();
18279 + *plocation += sym->st_value;
18280 + pax_close_kernel();
18281 break;
18282 case R_386_PC32:
18283 /* Add the value, subtract its postition */
18284 - *location += sym->st_value - (uint32_t)location;
18285 + pax_open_kernel();
18286 + *plocation += sym->st_value - location;
18287 + pax_close_kernel();
18288 break;
18289 default:
18290 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18291 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18292 case R_X86_64_NONE:
18293 break;
18294 case R_X86_64_64:
18295 + pax_open_kernel();
18296 *(u64 *)loc = val;
18297 + pax_close_kernel();
18298 break;
18299 case R_X86_64_32:
18300 + pax_open_kernel();
18301 *(u32 *)loc = val;
18302 + pax_close_kernel();
18303 if (val != *(u32 *)loc)
18304 goto overflow;
18305 break;
18306 case R_X86_64_32S:
18307 + pax_open_kernel();
18308 *(s32 *)loc = val;
18309 + pax_close_kernel();
18310 if ((s64)val != *(s32 *)loc)
18311 goto overflow;
18312 break;
18313 case R_X86_64_PC32:
18314 val -= (u64)loc;
18315 + pax_open_kernel();
18316 *(u32 *)loc = val;
18317 + pax_close_kernel();
18318 +
18319 #if 0
18320 if ((s64)val != *(s32 *)loc)
18321 goto overflow;
18322 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18323 index 3a7c5a4..9191528 100644
18324 --- a/arch/x86/kernel/paravirt-spinlocks.c
18325 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18326 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18327 __raw_spin_lock(lock);
18328 }
18329
18330 -struct pv_lock_ops pv_lock_ops = {
18331 +struct pv_lock_ops pv_lock_ops __read_only = {
18332 #ifdef CONFIG_SMP
18333 .spin_is_locked = __ticket_spin_is_locked,
18334 .spin_is_contended = __ticket_spin_is_contended,
18335 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18336 index 1b1739d..dea6077 100644
18337 --- a/arch/x86/kernel/paravirt.c
18338 +++ b/arch/x86/kernel/paravirt.c
18339 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18340 {
18341 return x;
18342 }
18343 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18344 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18345 +#endif
18346
18347 void __init default_banner(void)
18348 {
18349 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18350 * corresponding structure. */
18351 static void *get_call_destination(u8 type)
18352 {
18353 - struct paravirt_patch_template tmpl = {
18354 + const struct paravirt_patch_template tmpl = {
18355 .pv_init_ops = pv_init_ops,
18356 .pv_time_ops = pv_time_ops,
18357 .pv_cpu_ops = pv_cpu_ops,
18358 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18359 .pv_lock_ops = pv_lock_ops,
18360 #endif
18361 };
18362 +
18363 + pax_track_stack();
18364 return *((void **)&tmpl + type);
18365 }
18366
18367 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18368 if (opfunc == NULL)
18369 /* If there's no function, patch it with a ud2a (BUG) */
18370 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18371 - else if (opfunc == _paravirt_nop)
18372 + else if (opfunc == (void *)_paravirt_nop)
18373 /* If the operation is a nop, then nop the callsite */
18374 ret = paravirt_patch_nop();
18375
18376 /* identity functions just return their single argument */
18377 - else if (opfunc == _paravirt_ident_32)
18378 + else if (opfunc == (void *)_paravirt_ident_32)
18379 ret = paravirt_patch_ident_32(insnbuf, len);
18380 - else if (opfunc == _paravirt_ident_64)
18381 + else if (opfunc == (void *)_paravirt_ident_64)
18382 ret = paravirt_patch_ident_64(insnbuf, len);
18383 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18384 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18385 + ret = paravirt_patch_ident_64(insnbuf, len);
18386 +#endif
18387
18388 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18389 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18390 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18391 if (insn_len > len || start == NULL)
18392 insn_len = len;
18393 else
18394 - memcpy(insnbuf, start, insn_len);
18395 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18396
18397 return insn_len;
18398 }
18399 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18400 preempt_enable();
18401 }
18402
18403 -struct pv_info pv_info = {
18404 +struct pv_info pv_info __read_only = {
18405 .name = "bare hardware",
18406 .paravirt_enabled = 0,
18407 .kernel_rpl = 0,
18408 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18409 };
18410
18411 -struct pv_init_ops pv_init_ops = {
18412 +struct pv_init_ops pv_init_ops __read_only = {
18413 .patch = native_patch,
18414 };
18415
18416 -struct pv_time_ops pv_time_ops = {
18417 +struct pv_time_ops pv_time_ops __read_only = {
18418 .sched_clock = native_sched_clock,
18419 };
18420
18421 -struct pv_irq_ops pv_irq_ops = {
18422 +struct pv_irq_ops pv_irq_ops __read_only = {
18423 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18424 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18425 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18426 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18427 #endif
18428 };
18429
18430 -struct pv_cpu_ops pv_cpu_ops = {
18431 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18432 .cpuid = native_cpuid,
18433 .get_debugreg = native_get_debugreg,
18434 .set_debugreg = native_set_debugreg,
18435 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18436 .end_context_switch = paravirt_nop,
18437 };
18438
18439 -struct pv_apic_ops pv_apic_ops = {
18440 +struct pv_apic_ops pv_apic_ops __read_only = {
18441 #ifdef CONFIG_X86_LOCAL_APIC
18442 .startup_ipi_hook = paravirt_nop,
18443 #endif
18444 };
18445
18446 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18447 +#ifdef CONFIG_X86_32
18448 +#ifdef CONFIG_X86_PAE
18449 +/* 64-bit pagetable entries */
18450 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18451 +#else
18452 /* 32-bit pagetable entries */
18453 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18454 +#endif
18455 #else
18456 /* 64-bit pagetable entries */
18457 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18458 #endif
18459
18460 -struct pv_mmu_ops pv_mmu_ops = {
18461 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18462
18463 .read_cr2 = native_read_cr2,
18464 .write_cr2 = native_write_cr2,
18465 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18466 .make_pud = PTE_IDENT,
18467
18468 .set_pgd = native_set_pgd,
18469 + .set_pgd_batched = native_set_pgd_batched,
18470 #endif
18471 #endif /* PAGETABLE_LEVELS >= 3 */
18472
18473 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18474 },
18475
18476 .set_fixmap = native_set_fixmap,
18477 +
18478 +#ifdef CONFIG_PAX_KERNEXEC
18479 + .pax_open_kernel = native_pax_open_kernel,
18480 + .pax_close_kernel = native_pax_close_kernel,
18481 +#endif
18482 +
18483 };
18484
18485 EXPORT_SYMBOL_GPL(pv_time_ops);
18486 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18487 index 1a2d4b1..6a0dd55 100644
18488 --- a/arch/x86/kernel/pci-calgary_64.c
18489 +++ b/arch/x86/kernel/pci-calgary_64.c
18490 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18491 free_pages((unsigned long)vaddr, get_order(size));
18492 }
18493
18494 -static struct dma_map_ops calgary_dma_ops = {
18495 +static const struct dma_map_ops calgary_dma_ops = {
18496 .alloc_coherent = calgary_alloc_coherent,
18497 .free_coherent = calgary_free_coherent,
18498 .map_sg = calgary_map_sg,
18499 diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18500 index 6ac3931..42b4414 100644
18501 --- a/arch/x86/kernel/pci-dma.c
18502 +++ b/arch/x86/kernel/pci-dma.c
18503 @@ -14,7 +14,7 @@
18504
18505 static int forbid_dac __read_mostly;
18506
18507 -struct dma_map_ops *dma_ops;
18508 +const struct dma_map_ops *dma_ops;
18509 EXPORT_SYMBOL(dma_ops);
18510
18511 static int iommu_sac_force __read_mostly;
18512 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18513
18514 int dma_supported(struct device *dev, u64 mask)
18515 {
18516 - struct dma_map_ops *ops = get_dma_ops(dev);
18517 + const struct dma_map_ops *ops = get_dma_ops(dev);
18518
18519 #ifdef CONFIG_PCI
18520 if (mask > 0xffffffff && forbid_dac > 0) {
18521 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18522 index 1c76691..e3632db 100644
18523 --- a/arch/x86/kernel/pci-gart_64.c
18524 +++ b/arch/x86/kernel/pci-gart_64.c
18525 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18526 return -1;
18527 }
18528
18529 -static struct dma_map_ops gart_dma_ops = {
18530 +static const struct dma_map_ops gart_dma_ops = {
18531 .map_sg = gart_map_sg,
18532 .unmap_sg = gart_unmap_sg,
18533 .map_page = gart_map_page,
18534 diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18535 index a3933d4..c898869 100644
18536 --- a/arch/x86/kernel/pci-nommu.c
18537 +++ b/arch/x86/kernel/pci-nommu.c
18538 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18539 flush_write_buffers();
18540 }
18541
18542 -struct dma_map_ops nommu_dma_ops = {
18543 +const struct dma_map_ops nommu_dma_ops = {
18544 .alloc_coherent = dma_generic_alloc_coherent,
18545 .free_coherent = nommu_free_coherent,
18546 .map_sg = nommu_map_sg,
18547 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18548 index aaa6b78..4de1881 100644
18549 --- a/arch/x86/kernel/pci-swiotlb.c
18550 +++ b/arch/x86/kernel/pci-swiotlb.c
18551 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18552 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18553 }
18554
18555 -static struct dma_map_ops swiotlb_dma_ops = {
18556 +static const struct dma_map_ops swiotlb_dma_ops = {
18557 .mapping_error = swiotlb_dma_mapping_error,
18558 .alloc_coherent = x86_swiotlb_alloc_coherent,
18559 .free_coherent = swiotlb_free_coherent,
18560 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18561 index fc6c84d..0312ca2 100644
18562 --- a/arch/x86/kernel/process.c
18563 +++ b/arch/x86/kernel/process.c
18564 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18565
18566 void free_thread_info(struct thread_info *ti)
18567 {
18568 - free_thread_xstate(ti->task);
18569 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18570 }
18571
18572 +static struct kmem_cache *task_struct_cachep;
18573 +
18574 void arch_task_cache_init(void)
18575 {
18576 - task_xstate_cachep =
18577 - kmem_cache_create("task_xstate", xstate_size,
18578 + /* create a slab on which task_structs can be allocated */
18579 + task_struct_cachep =
18580 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18581 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18582 +
18583 + task_xstate_cachep =
18584 + kmem_cache_create("task_xstate", xstate_size,
18585 __alignof__(union thread_xstate),
18586 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18587 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18588 +}
18589 +
18590 +struct task_struct *alloc_task_struct(void)
18591 +{
18592 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18593 +}
18594 +
18595 +void free_task_struct(struct task_struct *task)
18596 +{
18597 + free_thread_xstate(task);
18598 + kmem_cache_free(task_struct_cachep, task);
18599 }
18600
18601 /*
18602 @@ -73,7 +90,7 @@ void exit_thread(void)
18603 unsigned long *bp = t->io_bitmap_ptr;
18604
18605 if (bp) {
18606 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18607 + struct tss_struct *tss = init_tss + get_cpu();
18608
18609 t->io_bitmap_ptr = NULL;
18610 clear_thread_flag(TIF_IO_BITMAP);
18611 @@ -93,6 +110,9 @@ void flush_thread(void)
18612
18613 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18614
18615 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18616 + loadsegment(gs, 0);
18617 +#endif
18618 tsk->thread.debugreg0 = 0;
18619 tsk->thread.debugreg1 = 0;
18620 tsk->thread.debugreg2 = 0;
18621 @@ -307,7 +327,7 @@ void default_idle(void)
18622 EXPORT_SYMBOL(default_idle);
18623 #endif
18624
18625 -void stop_this_cpu(void *dummy)
18626 +__noreturn void stop_this_cpu(void *dummy)
18627 {
18628 local_irq_disable();
18629 /*
18630 @@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18631 }
18632 early_param("idle", idle_setup);
18633
18634 -unsigned long arch_align_stack(unsigned long sp)
18635 +#ifdef CONFIG_PAX_RANDKSTACK
18636 +void pax_randomize_kstack(struct pt_regs *regs)
18637 {
18638 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18639 - sp -= get_random_int() % 8192;
18640 - return sp & ~0xf;
18641 -}
18642 + struct thread_struct *thread = &current->thread;
18643 + unsigned long time;
18644
18645 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18646 -{
18647 - unsigned long range_end = mm->brk + 0x02000000;
18648 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18649 + if (!randomize_va_space)
18650 + return;
18651 +
18652 + if (v8086_mode(regs))
18653 + return;
18654 +
18655 + rdtscl(time);
18656 +
18657 + /* P4 seems to return a 0 LSB, ignore it */
18658 +#ifdef CONFIG_MPENTIUM4
18659 + time &= 0x3EUL;
18660 + time <<= 2;
18661 +#elif defined(CONFIG_X86_64)
18662 + time &= 0xFUL;
18663 + time <<= 4;
18664 +#else
18665 + time &= 0x1FUL;
18666 + time <<= 3;
18667 +#endif
18668 +
18669 + thread->sp0 ^= time;
18670 + load_sp0(init_tss + smp_processor_id(), thread);
18671 +
18672 +#ifdef CONFIG_X86_64
18673 + percpu_write(kernel_stack, thread->sp0);
18674 +#endif
18675 }
18676 +#endif
18677
18678 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18679 index c40c432..6e1df72 100644
18680 --- a/arch/x86/kernel/process_32.c
18681 +++ b/arch/x86/kernel/process_32.c
18682 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18683 unsigned long thread_saved_pc(struct task_struct *tsk)
18684 {
18685 return ((unsigned long *)tsk->thread.sp)[3];
18686 +//XXX return tsk->thread.eip;
18687 }
18688
18689 #ifndef CONFIG_SMP
18690 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18691 unsigned short ss, gs;
18692 const char *board;
18693
18694 - if (user_mode_vm(regs)) {
18695 + if (user_mode(regs)) {
18696 sp = regs->sp;
18697 ss = regs->ss & 0xffff;
18698 - gs = get_user_gs(regs);
18699 } else {
18700 sp = (unsigned long) (&regs->sp);
18701 savesegment(ss, ss);
18702 - savesegment(gs, gs);
18703 }
18704 + gs = get_user_gs(regs);
18705
18706 printk("\n");
18707
18708 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18709 regs.bx = (unsigned long) fn;
18710 regs.dx = (unsigned long) arg;
18711
18712 - regs.ds = __USER_DS;
18713 - regs.es = __USER_DS;
18714 + regs.ds = __KERNEL_DS;
18715 + regs.es = __KERNEL_DS;
18716 regs.fs = __KERNEL_PERCPU;
18717 - regs.gs = __KERNEL_STACK_CANARY;
18718 + savesegment(gs, regs.gs);
18719 regs.orig_ax = -1;
18720 regs.ip = (unsigned long) kernel_thread_helper;
18721 regs.cs = __KERNEL_CS | get_kernel_rpl();
18722 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18723 struct task_struct *tsk;
18724 int err;
18725
18726 - childregs = task_pt_regs(p);
18727 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18728 *childregs = *regs;
18729 childregs->ax = 0;
18730 childregs->sp = sp;
18731
18732 p->thread.sp = (unsigned long) childregs;
18733 p->thread.sp0 = (unsigned long) (childregs+1);
18734 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18735
18736 p->thread.ip = (unsigned long) ret_from_fork;
18737
18738 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18739 struct thread_struct *prev = &prev_p->thread,
18740 *next = &next_p->thread;
18741 int cpu = smp_processor_id();
18742 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18743 + struct tss_struct *tss = init_tss + cpu;
18744 bool preload_fpu;
18745
18746 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18747 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18748 */
18749 lazy_save_gs(prev->gs);
18750
18751 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18752 + __set_fs(task_thread_info(next_p)->addr_limit);
18753 +#endif
18754 +
18755 /*
18756 * Load the per-thread Thread-Local Storage descriptor.
18757 */
18758 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18759 */
18760 arch_end_context_switch(next_p);
18761
18762 + percpu_write(current_task, next_p);
18763 + percpu_write(current_tinfo, &next_p->tinfo);
18764 +
18765 if (preload_fpu)
18766 __math_state_restore();
18767
18768 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18769 if (prev->gs | next->gs)
18770 lazy_load_gs(next->gs);
18771
18772 - percpu_write(current_task, next_p);
18773 -
18774 return prev_p;
18775 }
18776
18777 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
18778 } while (count++ < 16);
18779 return 0;
18780 }
18781 -
18782 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18783 index 39493bc..196816d 100644
18784 --- a/arch/x86/kernel/process_64.c
18785 +++ b/arch/x86/kernel/process_64.c
18786 @@ -91,7 +91,7 @@ static void __exit_idle(void)
18787 void exit_idle(void)
18788 {
18789 /* idle loop has pid 0 */
18790 - if (current->pid)
18791 + if (task_pid_nr(current))
18792 return;
18793 __exit_idle();
18794 }
18795 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
18796 if (!board)
18797 board = "";
18798 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
18799 - current->pid, current->comm, print_tainted(),
18800 + task_pid_nr(current), current->comm, print_tainted(),
18801 init_utsname()->release,
18802 (int)strcspn(init_utsname()->version, " "),
18803 init_utsname()->version, board);
18804 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18805 struct pt_regs *childregs;
18806 struct task_struct *me = current;
18807
18808 - childregs = ((struct pt_regs *)
18809 - (THREAD_SIZE + task_stack_page(p))) - 1;
18810 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18811 *childregs = *regs;
18812
18813 childregs->ax = 0;
18814 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18815 p->thread.sp = (unsigned long) childregs;
18816 p->thread.sp0 = (unsigned long) (childregs+1);
18817 p->thread.usersp = me->thread.usersp;
18818 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18819
18820 set_tsk_thread_flag(p, TIF_FORK);
18821
18822 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18823 struct thread_struct *prev = &prev_p->thread;
18824 struct thread_struct *next = &next_p->thread;
18825 int cpu = smp_processor_id();
18826 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18827 + struct tss_struct *tss = init_tss + cpu;
18828 unsigned fsindex, gsindex;
18829 bool preload_fpu;
18830
18831 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18832 prev->usersp = percpu_read(old_rsp);
18833 percpu_write(old_rsp, next->usersp);
18834 percpu_write(current_task, next_p);
18835 + percpu_write(current_tinfo, &next_p->tinfo);
18836
18837 - percpu_write(kernel_stack,
18838 - (unsigned long)task_stack_page(next_p) +
18839 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18840 + percpu_write(kernel_stack, next->sp0);
18841
18842 /*
18843 * Now maybe reload the debug registers and handle I/O bitmaps
18844 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
18845 if (!p || p == current || p->state == TASK_RUNNING)
18846 return 0;
18847 stack = (unsigned long)task_stack_page(p);
18848 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18849 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18850 return 0;
18851 fp = *(u64 *)(p->thread.sp);
18852 do {
18853 - if (fp < (unsigned long)stack ||
18854 - fp >= (unsigned long)stack+THREAD_SIZE)
18855 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18856 return 0;
18857 ip = *(u64 *)(fp+8);
18858 if (!in_sched_functions(ip))
18859 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18860 index c06acdd..3f5fff5 100644
18861 --- a/arch/x86/kernel/ptrace.c
18862 +++ b/arch/x86/kernel/ptrace.c
18863 @@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
18864 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18865 {
18866 int ret;
18867 - unsigned long __user *datap = (unsigned long __user *)data;
18868 + unsigned long __user *datap = (__force unsigned long __user *)data;
18869
18870 switch (request) {
18871 /* read the word at location addr in the USER area. */
18872 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18873 if (addr < 0)
18874 return -EIO;
18875 ret = do_get_thread_area(child, addr,
18876 - (struct user_desc __user *) data);
18877 + (__force struct user_desc __user *) data);
18878 break;
18879
18880 case PTRACE_SET_THREAD_AREA:
18881 if (addr < 0)
18882 return -EIO;
18883 ret = do_set_thread_area(child, addr,
18884 - (struct user_desc __user *) data, 0);
18885 + (__force struct user_desc __user *) data, 0);
18886 break;
18887 #endif
18888
18889 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18890 #ifdef CONFIG_X86_PTRACE_BTS
18891 case PTRACE_BTS_CONFIG:
18892 ret = ptrace_bts_config
18893 - (child, data, (struct ptrace_bts_config __user *)addr);
18894 + (child, data, (__force struct ptrace_bts_config __user *)addr);
18895 break;
18896
18897 case PTRACE_BTS_STATUS:
18898 ret = ptrace_bts_status
18899 - (child, data, (struct ptrace_bts_config __user *)addr);
18900 + (child, data, (__force struct ptrace_bts_config __user *)addr);
18901 break;
18902
18903 case PTRACE_BTS_SIZE:
18904 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18905
18906 case PTRACE_BTS_GET:
18907 ret = ptrace_bts_read_record
18908 - (child, data, (struct bts_struct __user *) addr);
18909 + (child, data, (__force struct bts_struct __user *) addr);
18910 break;
18911
18912 case PTRACE_BTS_CLEAR:
18913 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18914
18915 case PTRACE_BTS_DRAIN:
18916 ret = ptrace_bts_drain
18917 - (child, data, (struct bts_struct __user *) addr);
18918 + (child, data, (__force struct bts_struct __user *) addr);
18919 break;
18920 #endif /* CONFIG_X86_PTRACE_BTS */
18921
18922 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18923 info.si_code = si_code;
18924
18925 /* User-mode ip? */
18926 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
18927 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
18928
18929 /* Send us the fake SIGTRAP */
18930 force_sig_info(SIGTRAP, &info, tsk);
18931 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18932 * We must return the syscall number to actually look up in the table.
18933 * This can be -1L to skip running any syscall at all.
18934 */
18935 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
18936 +long syscall_trace_enter(struct pt_regs *regs)
18937 {
18938 long ret = 0;
18939
18940 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
18941 return ret ?: regs->orig_ax;
18942 }
18943
18944 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
18945 +void syscall_trace_leave(struct pt_regs *regs)
18946 {
18947 if (unlikely(current->audit_context))
18948 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
18949 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18950 index cf98100..e76e03d 100644
18951 --- a/arch/x86/kernel/reboot.c
18952 +++ b/arch/x86/kernel/reboot.c
18953 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
18954 EXPORT_SYMBOL(pm_power_off);
18955
18956 static const struct desc_ptr no_idt = {};
18957 -static int reboot_mode;
18958 +static unsigned short reboot_mode;
18959 enum reboot_type reboot_type = BOOT_KBD;
18960 int reboot_force;
18961
18962 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
18963 controller to pulse the CPU reset line, which is more thorough, but
18964 doesn't work with at least one type of 486 motherboard. It is easy
18965 to stop this code working; hence the copious comments. */
18966 -static const unsigned long long
18967 -real_mode_gdt_entries [3] =
18968 +static struct desc_struct
18969 +real_mode_gdt_entries [3] __read_only =
18970 {
18971 - 0x0000000000000000ULL, /* Null descriptor */
18972 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
18973 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
18974 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
18975 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
18976 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
18977 };
18978
18979 static const struct desc_ptr
18980 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
18981 * specified by the code and length parameters.
18982 * We assume that length will aways be less that 100!
18983 */
18984 -void machine_real_restart(const unsigned char *code, int length)
18985 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
18986 {
18987 local_irq_disable();
18988
18989 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
18990 /* Remap the kernel at virtual address zero, as well as offset zero
18991 from the kernel segment. This assumes the kernel segment starts at
18992 virtual address PAGE_OFFSET. */
18993 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18994 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
18995 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18996 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
18997
18998 /*
18999 * Use `swapper_pg_dir' as our page directory.
19000 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19001 boot)". This seems like a fairly standard thing that gets set by
19002 REBOOT.COM programs, and the previous reset routine did this
19003 too. */
19004 - *((unsigned short *)0x472) = reboot_mode;
19005 + *(unsigned short *)(__va(0x472)) = reboot_mode;
19006
19007 /* For the switch to real mode, copy some code to low memory. It has
19008 to be in the first 64k because it is running in 16-bit mode, and it
19009 has to have the same physical and virtual address, because it turns
19010 off paging. Copy it near the end of the first page, out of the way
19011 of BIOS variables. */
19012 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19013 - real_mode_switch, sizeof (real_mode_switch));
19014 - memcpy((void *)(0x1000 - 100), code, length);
19015 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19016 + memcpy(__va(0x1000 - 100), code, length);
19017
19018 /* Set up the IDT for real mode. */
19019 load_idt(&real_mode_idt);
19020 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19021 __asm__ __volatile__ ("ljmp $0x0008,%0"
19022 :
19023 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19024 + do { } while (1);
19025 }
19026 #ifdef CONFIG_APM_MODULE
19027 EXPORT_SYMBOL(machine_real_restart);
19028 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19029 {
19030 }
19031
19032 -static void native_machine_emergency_restart(void)
19033 +__noreturn static void native_machine_emergency_restart(void)
19034 {
19035 int i;
19036
19037 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19038 #endif
19039 }
19040
19041 -static void __machine_emergency_restart(int emergency)
19042 +static __noreturn void __machine_emergency_restart(int emergency)
19043 {
19044 reboot_emergency = emergency;
19045 machine_ops.emergency_restart();
19046 }
19047
19048 -static void native_machine_restart(char *__unused)
19049 +static __noreturn void native_machine_restart(char *__unused)
19050 {
19051 printk("machine restart\n");
19052
19053 @@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19054 __machine_emergency_restart(0);
19055 }
19056
19057 -static void native_machine_halt(void)
19058 +static __noreturn void native_machine_halt(void)
19059 {
19060 /* stop other cpus and apics */
19061 machine_shutdown();
19062 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
19063 stop_this_cpu(NULL);
19064 }
19065
19066 -static void native_machine_power_off(void)
19067 +__noreturn static void native_machine_power_off(void)
19068 {
19069 if (pm_power_off) {
19070 if (!reboot_force)
19071 @@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19072 }
19073 /* a fallback in case there is no PM info available */
19074 tboot_shutdown(TB_SHUTDOWN_HALT);
19075 + do { } while (1);
19076 }
19077
19078 struct machine_ops machine_ops = {
19079 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19080 index 7a6f3b3..976a959 100644
19081 --- a/arch/x86/kernel/relocate_kernel_64.S
19082 +++ b/arch/x86/kernel/relocate_kernel_64.S
19083 @@ -11,6 +11,7 @@
19084 #include <asm/kexec.h>
19085 #include <asm/processor-flags.h>
19086 #include <asm/pgtable_types.h>
19087 +#include <asm/alternative-asm.h>
19088
19089 /*
19090 * Must be relocatable PIC code callable as a C function
19091 @@ -167,6 +168,7 @@ identity_mapped:
19092 xorq %r14, %r14
19093 xorq %r15, %r15
19094
19095 + pax_force_retaddr 0, 1
19096 ret
19097
19098 1:
19099 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19100 index 5449a26..0b6c759 100644
19101 --- a/arch/x86/kernel/setup.c
19102 +++ b/arch/x86/kernel/setup.c
19103 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19104
19105 if (!boot_params.hdr.root_flags)
19106 root_mountflags &= ~MS_RDONLY;
19107 - init_mm.start_code = (unsigned long) _text;
19108 - init_mm.end_code = (unsigned long) _etext;
19109 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19110 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19111 init_mm.end_data = (unsigned long) _edata;
19112 init_mm.brk = _brk_end;
19113
19114 - code_resource.start = virt_to_phys(_text);
19115 - code_resource.end = virt_to_phys(_etext)-1;
19116 - data_resource.start = virt_to_phys(_etext);
19117 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19118 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19119 + data_resource.start = virt_to_phys(_sdata);
19120 data_resource.end = virt_to_phys(_edata)-1;
19121 bss_resource.start = virt_to_phys(&__bss_start);
19122 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19123 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19124 index d559af9..524c6ad 100644
19125 --- a/arch/x86/kernel/setup_percpu.c
19126 +++ b/arch/x86/kernel/setup_percpu.c
19127 @@ -25,19 +25,17 @@
19128 # define DBG(x...)
19129 #endif
19130
19131 -DEFINE_PER_CPU(int, cpu_number);
19132 +#ifdef CONFIG_SMP
19133 +DEFINE_PER_CPU(unsigned int, cpu_number);
19134 EXPORT_PER_CPU_SYMBOL(cpu_number);
19135 +#endif
19136
19137 -#ifdef CONFIG_X86_64
19138 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19139 -#else
19140 -#define BOOT_PERCPU_OFFSET 0
19141 -#endif
19142
19143 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19144 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19145
19146 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19147 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19148 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19149 };
19150 EXPORT_SYMBOL(__per_cpu_offset);
19151 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19152 {
19153 #ifdef CONFIG_X86_32
19154 struct desc_struct gdt;
19155 + unsigned long base = per_cpu_offset(cpu);
19156
19157 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19158 - 0x2 | DESCTYPE_S, 0x8);
19159 - gdt.s = 1;
19160 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19161 + 0x83 | DESCTYPE_S, 0xC);
19162 write_gdt_entry(get_cpu_gdt_table(cpu),
19163 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19164 #endif
19165 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19166 /* alrighty, percpu areas up and running */
19167 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19168 for_each_possible_cpu(cpu) {
19169 +#ifdef CONFIG_CC_STACKPROTECTOR
19170 +#ifdef CONFIG_X86_32
19171 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19172 +#endif
19173 +#endif
19174 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19175 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19176 per_cpu(cpu_number, cpu) = cpu;
19177 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19178 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19179 #endif
19180 #endif
19181 +#ifdef CONFIG_CC_STACKPROTECTOR
19182 +#ifdef CONFIG_X86_32
19183 + if (!cpu)
19184 + per_cpu(stack_canary.canary, cpu) = canary;
19185 +#endif
19186 +#endif
19187 /*
19188 * Up to this point, the boot CPU has been using .data.init
19189 * area. Reload any changed state for the boot CPU.
19190 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19191 index 6a44a76..a9287a1 100644
19192 --- a/arch/x86/kernel/signal.c
19193 +++ b/arch/x86/kernel/signal.c
19194 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19195 * Align the stack pointer according to the i386 ABI,
19196 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19197 */
19198 - sp = ((sp + 4) & -16ul) - 4;
19199 + sp = ((sp - 12) & -16ul) - 4;
19200 #else /* !CONFIG_X86_32 */
19201 sp = round_down(sp, 16) - 8;
19202 #endif
19203 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19204 * Return an always-bogus address instead so we will die with SIGSEGV.
19205 */
19206 if (onsigstack && !likely(on_sig_stack(sp)))
19207 - return (void __user *)-1L;
19208 + return (__force void __user *)-1L;
19209
19210 /* save i387 state */
19211 if (used_math() && save_i387_xstate(*fpstate) < 0)
19212 - return (void __user *)-1L;
19213 + return (__force void __user *)-1L;
19214
19215 return (void __user *)sp;
19216 }
19217 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19218 }
19219
19220 if (current->mm->context.vdso)
19221 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19222 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19223 else
19224 - restorer = &frame->retcode;
19225 + restorer = (void __user *)&frame->retcode;
19226 if (ka->sa.sa_flags & SA_RESTORER)
19227 restorer = ka->sa.sa_restorer;
19228
19229 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19230 * reasons and because gdb uses it as a signature to notice
19231 * signal handler stack frames.
19232 */
19233 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19234 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19235
19236 if (err)
19237 return -EFAULT;
19238 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19239 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19240
19241 /* Set up to return from userspace. */
19242 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19243 + if (current->mm->context.vdso)
19244 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19245 + else
19246 + restorer = (void __user *)&frame->retcode;
19247 if (ka->sa.sa_flags & SA_RESTORER)
19248 restorer = ka->sa.sa_restorer;
19249 put_user_ex(restorer, &frame->pretcode);
19250 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19251 * reasons and because gdb uses it as a signature to notice
19252 * signal handler stack frames.
19253 */
19254 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19255 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19256 } put_user_catch(err);
19257
19258 if (err)
19259 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19260 int signr;
19261 sigset_t *oldset;
19262
19263 + pax_track_stack();
19264 +
19265 /*
19266 * We want the common case to go fast, which is why we may in certain
19267 * cases get here from kernel mode. Just return without doing anything
19268 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19269 * X86_32: vm86 regs switched out by assembly code before reaching
19270 * here, so testing against kernel CS suffices.
19271 */
19272 - if (!user_mode(regs))
19273 + if (!user_mode_novm(regs))
19274 return;
19275
19276 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19277 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19278 index 7e8e905..64d5c32 100644
19279 --- a/arch/x86/kernel/smpboot.c
19280 +++ b/arch/x86/kernel/smpboot.c
19281 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19282 */
19283 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19284
19285 -void cpu_hotplug_driver_lock()
19286 +void cpu_hotplug_driver_lock(void)
19287 {
19288 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
19289 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
19290 }
19291
19292 -void cpu_hotplug_driver_unlock()
19293 +void cpu_hotplug_driver_unlock(void)
19294 {
19295 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19296 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19297 }
19298
19299 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19300 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19301 * target processor state.
19302 */
19303 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19304 - (unsigned long)stack_start.sp);
19305 + stack_start);
19306
19307 /*
19308 * Run STARTUP IPI loop.
19309 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19310 set_idle_for_cpu(cpu, c_idle.idle);
19311 do_rest:
19312 per_cpu(current_task, cpu) = c_idle.idle;
19313 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19314 #ifdef CONFIG_X86_32
19315 /* Stack for startup_32 can be just as for start_secondary onwards */
19316 irq_ctx_init(cpu);
19317 @@ -750,13 +751,15 @@ do_rest:
19318 #else
19319 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19320 initial_gs = per_cpu_offset(cpu);
19321 - per_cpu(kernel_stack, cpu) =
19322 - (unsigned long)task_stack_page(c_idle.idle) -
19323 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19324 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19325 #endif
19326 +
19327 + pax_open_kernel();
19328 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19329 + pax_close_kernel();
19330 +
19331 initial_code = (unsigned long)start_secondary;
19332 - stack_start.sp = (void *) c_idle.idle->thread.sp;
19333 + stack_start = c_idle.idle->thread.sp;
19334
19335 /* start_ip had better be page-aligned! */
19336 start_ip = setup_trampoline();
19337 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19338
19339 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19340
19341 +#ifdef CONFIG_PAX_PER_CPU_PGD
19342 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19343 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19344 + KERNEL_PGD_PTRS);
19345 +#endif
19346 +
19347 err = do_boot_cpu(apicid, cpu);
19348
19349 if (err) {
19350 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19351 index 3149032..14f1053 100644
19352 --- a/arch/x86/kernel/step.c
19353 +++ b/arch/x86/kernel/step.c
19354 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19355 struct desc_struct *desc;
19356 unsigned long base;
19357
19358 - seg &= ~7UL;
19359 + seg >>= 3;
19360
19361 mutex_lock(&child->mm->context.lock);
19362 - if (unlikely((seg >> 3) >= child->mm->context.size))
19363 + if (unlikely(seg >= child->mm->context.size))
19364 addr = -1L; /* bogus selector, access would fault */
19365 else {
19366 desc = child->mm->context.ldt + seg;
19367 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19368 addr += base;
19369 }
19370 mutex_unlock(&child->mm->context.lock);
19371 - }
19372 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19373 + addr = ktla_ktva(addr);
19374
19375 return addr;
19376 }
19377 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19378 unsigned char opcode[15];
19379 unsigned long addr = convert_ip_to_linear(child, regs);
19380
19381 + if (addr == -EINVAL)
19382 + return 0;
19383 +
19384 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19385 for (i = 0; i < copied; i++) {
19386 switch (opcode[i]) {
19387 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19388
19389 #ifdef CONFIG_X86_64
19390 case 0x40 ... 0x4f:
19391 - if (regs->cs != __USER_CS)
19392 + if ((regs->cs & 0xffff) != __USER_CS)
19393 /* 32-bit mode: register increment */
19394 return 0;
19395 /* 64-bit mode: REX prefix */
19396 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19397 index dee1ff7..a397f7f 100644
19398 --- a/arch/x86/kernel/sys_i386_32.c
19399 +++ b/arch/x86/kernel/sys_i386_32.c
19400 @@ -24,6 +24,21 @@
19401
19402 #include <asm/syscalls.h>
19403
19404 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19405 +{
19406 + unsigned long pax_task_size = TASK_SIZE;
19407 +
19408 +#ifdef CONFIG_PAX_SEGMEXEC
19409 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19410 + pax_task_size = SEGMEXEC_TASK_SIZE;
19411 +#endif
19412 +
19413 + if (len > pax_task_size || addr > pax_task_size - len)
19414 + return -EINVAL;
19415 +
19416 + return 0;
19417 +}
19418 +
19419 /*
19420 * Perform the select(nd, in, out, ex, tv) and mmap() system
19421 * calls. Linux/i386 didn't use to be able to handle more than
19422 @@ -58,6 +73,212 @@ out:
19423 return err;
19424 }
19425
19426 +unsigned long
19427 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19428 + unsigned long len, unsigned long pgoff, unsigned long flags)
19429 +{
19430 + struct mm_struct *mm = current->mm;
19431 + struct vm_area_struct *vma;
19432 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19433 +
19434 +#ifdef CONFIG_PAX_SEGMEXEC
19435 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19436 + pax_task_size = SEGMEXEC_TASK_SIZE;
19437 +#endif
19438 +
19439 + pax_task_size -= PAGE_SIZE;
19440 +
19441 + if (len > pax_task_size)
19442 + return -ENOMEM;
19443 +
19444 + if (flags & MAP_FIXED)
19445 + return addr;
19446 +
19447 +#ifdef CONFIG_PAX_RANDMMAP
19448 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19449 +#endif
19450 +
19451 + if (addr) {
19452 + addr = PAGE_ALIGN(addr);
19453 + if (pax_task_size - len >= addr) {
19454 + vma = find_vma(mm, addr);
19455 + if (check_heap_stack_gap(vma, addr, len))
19456 + return addr;
19457 + }
19458 + }
19459 + if (len > mm->cached_hole_size) {
19460 + start_addr = addr = mm->free_area_cache;
19461 + } else {
19462 + start_addr = addr = mm->mmap_base;
19463 + mm->cached_hole_size = 0;
19464 + }
19465 +
19466 +#ifdef CONFIG_PAX_PAGEEXEC
19467 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19468 + start_addr = 0x00110000UL;
19469 +
19470 +#ifdef CONFIG_PAX_RANDMMAP
19471 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19472 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19473 +#endif
19474 +
19475 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19476 + start_addr = addr = mm->mmap_base;
19477 + else
19478 + addr = start_addr;
19479 + }
19480 +#endif
19481 +
19482 +full_search:
19483 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19484 + /* At this point: (!vma || addr < vma->vm_end). */
19485 + if (pax_task_size - len < addr) {
19486 + /*
19487 + * Start a new search - just in case we missed
19488 + * some holes.
19489 + */
19490 + if (start_addr != mm->mmap_base) {
19491 + start_addr = addr = mm->mmap_base;
19492 + mm->cached_hole_size = 0;
19493 + goto full_search;
19494 + }
19495 + return -ENOMEM;
19496 + }
19497 + if (check_heap_stack_gap(vma, addr, len))
19498 + break;
19499 + if (addr + mm->cached_hole_size < vma->vm_start)
19500 + mm->cached_hole_size = vma->vm_start - addr;
19501 + addr = vma->vm_end;
19502 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19503 + start_addr = addr = mm->mmap_base;
19504 + mm->cached_hole_size = 0;
19505 + goto full_search;
19506 + }
19507 + }
19508 +
19509 + /*
19510 + * Remember the place where we stopped the search:
19511 + */
19512 + mm->free_area_cache = addr + len;
19513 + return addr;
19514 +}
19515 +
19516 +unsigned long
19517 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19518 + const unsigned long len, const unsigned long pgoff,
19519 + const unsigned long flags)
19520 +{
19521 + struct vm_area_struct *vma;
19522 + struct mm_struct *mm = current->mm;
19523 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19524 +
19525 +#ifdef CONFIG_PAX_SEGMEXEC
19526 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19527 + pax_task_size = SEGMEXEC_TASK_SIZE;
19528 +#endif
19529 +
19530 + pax_task_size -= PAGE_SIZE;
19531 +
19532 + /* requested length too big for entire address space */
19533 + if (len > pax_task_size)
19534 + return -ENOMEM;
19535 +
19536 + if (flags & MAP_FIXED)
19537 + return addr;
19538 +
19539 +#ifdef CONFIG_PAX_PAGEEXEC
19540 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19541 + goto bottomup;
19542 +#endif
19543 +
19544 +#ifdef CONFIG_PAX_RANDMMAP
19545 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19546 +#endif
19547 +
19548 + /* requesting a specific address */
19549 + if (addr) {
19550 + addr = PAGE_ALIGN(addr);
19551 + if (pax_task_size - len >= addr) {
19552 + vma = find_vma(mm, addr);
19553 + if (check_heap_stack_gap(vma, addr, len))
19554 + return addr;
19555 + }
19556 + }
19557 +
19558 + /* check if free_area_cache is useful for us */
19559 + if (len <= mm->cached_hole_size) {
19560 + mm->cached_hole_size = 0;
19561 + mm->free_area_cache = mm->mmap_base;
19562 + }
19563 +
19564 + /* either no address requested or can't fit in requested address hole */
19565 + addr = mm->free_area_cache;
19566 +
19567 + /* make sure it can fit in the remaining address space */
19568 + if (addr > len) {
19569 + vma = find_vma(mm, addr-len);
19570 + if (check_heap_stack_gap(vma, addr - len, len))
19571 + /* remember the address as a hint for next time */
19572 + return (mm->free_area_cache = addr-len);
19573 + }
19574 +
19575 + if (mm->mmap_base < len)
19576 + goto bottomup;
19577 +
19578 + addr = mm->mmap_base-len;
19579 +
19580 + do {
19581 + /*
19582 + * Lookup failure means no vma is above this address,
19583 + * else if new region fits below vma->vm_start,
19584 + * return with success:
19585 + */
19586 + vma = find_vma(mm, addr);
19587 + if (check_heap_stack_gap(vma, addr, len))
19588 + /* remember the address as a hint for next time */
19589 + return (mm->free_area_cache = addr);
19590 +
19591 + /* remember the largest hole we saw so far */
19592 + if (addr + mm->cached_hole_size < vma->vm_start)
19593 + mm->cached_hole_size = vma->vm_start - addr;
19594 +
19595 + /* try just below the current vma->vm_start */
19596 + addr = skip_heap_stack_gap(vma, len);
19597 + } while (!IS_ERR_VALUE(addr));
19598 +
19599 +bottomup:
19600 + /*
19601 + * A failed mmap() very likely causes application failure,
19602 + * so fall back to the bottom-up function here. This scenario
19603 + * can happen with large stack limits and large mmap()
19604 + * allocations.
19605 + */
19606 +
19607 +#ifdef CONFIG_PAX_SEGMEXEC
19608 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19609 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19610 + else
19611 +#endif
19612 +
19613 + mm->mmap_base = TASK_UNMAPPED_BASE;
19614 +
19615 +#ifdef CONFIG_PAX_RANDMMAP
19616 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19617 + mm->mmap_base += mm->delta_mmap;
19618 +#endif
19619 +
19620 + mm->free_area_cache = mm->mmap_base;
19621 + mm->cached_hole_size = ~0UL;
19622 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19623 + /*
19624 + * Restore the topdown base:
19625 + */
19626 + mm->mmap_base = base;
19627 + mm->free_area_cache = base;
19628 + mm->cached_hole_size = ~0UL;
19629 +
19630 + return addr;
19631 +}
19632
19633 struct sel_arg_struct {
19634 unsigned long n;
19635 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19636 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19637 case SEMTIMEDOP:
19638 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19639 - (const struct timespec __user *)fifth);
19640 + (__force const struct timespec __user *)fifth);
19641
19642 case SEMGET:
19643 return sys_semget(first, second, third);
19644 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19645 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19646 if (ret)
19647 return ret;
19648 - return put_user(raddr, (ulong __user *) third);
19649 + return put_user(raddr, (__force ulong __user *) third);
19650 }
19651 case 1: /* iBCS2 emulator entry point */
19652 if (!segment_eq(get_fs(), get_ds()))
19653 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19654
19655 return error;
19656 }
19657 -
19658 -
19659 -/*
19660 - * Do a system call from kernel instead of calling sys_execve so we
19661 - * end up with proper pt_regs.
19662 - */
19663 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19664 -{
19665 - long __res;
19666 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19667 - : "=a" (__res)
19668 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19669 - return __res;
19670 -}
19671 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19672 index 8aa2057..b604bc1 100644
19673 --- a/arch/x86/kernel/sys_x86_64.c
19674 +++ b/arch/x86/kernel/sys_x86_64.c
19675 @@ -32,8 +32,8 @@ out:
19676 return error;
19677 }
19678
19679 -static void find_start_end(unsigned long flags, unsigned long *begin,
19680 - unsigned long *end)
19681 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19682 + unsigned long *begin, unsigned long *end)
19683 {
19684 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19685 unsigned long new_begin;
19686 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19687 *begin = new_begin;
19688 }
19689 } else {
19690 - *begin = TASK_UNMAPPED_BASE;
19691 + *begin = mm->mmap_base;
19692 *end = TASK_SIZE;
19693 }
19694 }
19695 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19696 if (flags & MAP_FIXED)
19697 return addr;
19698
19699 - find_start_end(flags, &begin, &end);
19700 + find_start_end(mm, flags, &begin, &end);
19701
19702 if (len > end)
19703 return -ENOMEM;
19704
19705 +#ifdef CONFIG_PAX_RANDMMAP
19706 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19707 +#endif
19708 +
19709 if (addr) {
19710 addr = PAGE_ALIGN(addr);
19711 vma = find_vma(mm, addr);
19712 - if (end - len >= addr &&
19713 - (!vma || addr + len <= vma->vm_start))
19714 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19715 return addr;
19716 }
19717 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19718 @@ -106,7 +109,7 @@ full_search:
19719 }
19720 return -ENOMEM;
19721 }
19722 - if (!vma || addr + len <= vma->vm_start) {
19723 + if (check_heap_stack_gap(vma, addr, len)) {
19724 /*
19725 * Remember the place where we stopped the search:
19726 */
19727 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19728 {
19729 struct vm_area_struct *vma;
19730 struct mm_struct *mm = current->mm;
19731 - unsigned long addr = addr0;
19732 + unsigned long base = mm->mmap_base, addr = addr0;
19733
19734 /* requested length too big for entire address space */
19735 if (len > TASK_SIZE)
19736 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19737 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19738 goto bottomup;
19739
19740 +#ifdef CONFIG_PAX_RANDMMAP
19741 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19742 +#endif
19743 +
19744 /* requesting a specific address */
19745 if (addr) {
19746 addr = PAGE_ALIGN(addr);
19747 - vma = find_vma(mm, addr);
19748 - if (TASK_SIZE - len >= addr &&
19749 - (!vma || addr + len <= vma->vm_start))
19750 - return addr;
19751 + if (TASK_SIZE - len >= addr) {
19752 + vma = find_vma(mm, addr);
19753 + if (check_heap_stack_gap(vma, addr, len))
19754 + return addr;
19755 + }
19756 }
19757
19758 /* check if free_area_cache is useful for us */
19759 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19760 /* make sure it can fit in the remaining address space */
19761 if (addr > len) {
19762 vma = find_vma(mm, addr-len);
19763 - if (!vma || addr <= vma->vm_start)
19764 + if (check_heap_stack_gap(vma, addr - len, len))
19765 /* remember the address as a hint for next time */
19766 return mm->free_area_cache = addr-len;
19767 }
19768 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19769 * return with success:
19770 */
19771 vma = find_vma(mm, addr);
19772 - if (!vma || addr+len <= vma->vm_start)
19773 + if (check_heap_stack_gap(vma, addr, len))
19774 /* remember the address as a hint for next time */
19775 return mm->free_area_cache = addr;
19776
19777 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19778 mm->cached_hole_size = vma->vm_start - addr;
19779
19780 /* try just below the current vma->vm_start */
19781 - addr = vma->vm_start-len;
19782 - } while (len < vma->vm_start);
19783 + addr = skip_heap_stack_gap(vma, len);
19784 + } while (!IS_ERR_VALUE(addr));
19785
19786 bottomup:
19787 /*
19788 @@ -198,13 +206,21 @@ bottomup:
19789 * can happen with large stack limits and large mmap()
19790 * allocations.
19791 */
19792 + mm->mmap_base = TASK_UNMAPPED_BASE;
19793 +
19794 +#ifdef CONFIG_PAX_RANDMMAP
19795 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19796 + mm->mmap_base += mm->delta_mmap;
19797 +#endif
19798 +
19799 + mm->free_area_cache = mm->mmap_base;
19800 mm->cached_hole_size = ~0UL;
19801 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19802 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19803 /*
19804 * Restore the topdown base:
19805 */
19806 - mm->free_area_cache = mm->mmap_base;
19807 + mm->mmap_base = base;
19808 + mm->free_area_cache = base;
19809 mm->cached_hole_size = ~0UL;
19810
19811 return addr;
19812 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
19813 index 76d70a4..4c94a44 100644
19814 --- a/arch/x86/kernel/syscall_table_32.S
19815 +++ b/arch/x86/kernel/syscall_table_32.S
19816 @@ -1,3 +1,4 @@
19817 +.section .rodata,"a",@progbits
19818 ENTRY(sys_call_table)
19819 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
19820 .long sys_exit
19821 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19822 index 46b8277..3349d55 100644
19823 --- a/arch/x86/kernel/tboot.c
19824 +++ b/arch/x86/kernel/tboot.c
19825 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
19826
19827 void tboot_shutdown(u32 shutdown_type)
19828 {
19829 - void (*shutdown)(void);
19830 + void (* __noreturn shutdown)(void);
19831
19832 if (!tboot_enabled())
19833 return;
19834 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
19835
19836 switch_to_tboot_pt();
19837
19838 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19839 + shutdown = (void *)tboot->shutdown_entry;
19840 shutdown();
19841
19842 /* should not reach here */
19843 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19844 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19845 }
19846
19847 -static atomic_t ap_wfs_count;
19848 +static atomic_unchecked_t ap_wfs_count;
19849
19850 static int tboot_wait_for_aps(int num_aps)
19851 {
19852 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19853 {
19854 switch (action) {
19855 case CPU_DYING:
19856 - atomic_inc(&ap_wfs_count);
19857 + atomic_inc_unchecked(&ap_wfs_count);
19858 if (num_online_cpus() == 1)
19859 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19860 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19861 return NOTIFY_BAD;
19862 break;
19863 }
19864 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
19865
19866 tboot_create_trampoline();
19867
19868 - atomic_set(&ap_wfs_count, 0);
19869 + atomic_set_unchecked(&ap_wfs_count, 0);
19870 register_hotcpu_notifier(&tboot_cpu_notifier);
19871 return 0;
19872 }
19873 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19874 index be25734..87fe232 100644
19875 --- a/arch/x86/kernel/time.c
19876 +++ b/arch/x86/kernel/time.c
19877 @@ -26,17 +26,13 @@
19878 int timer_ack;
19879 #endif
19880
19881 -#ifdef CONFIG_X86_64
19882 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
19883 -#endif
19884 -
19885 unsigned long profile_pc(struct pt_regs *regs)
19886 {
19887 unsigned long pc = instruction_pointer(regs);
19888
19889 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19890 + if (!user_mode(regs) && in_lock_functions(pc)) {
19891 #ifdef CONFIG_FRAME_POINTER
19892 - return *(unsigned long *)(regs->bp + sizeof(long));
19893 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19894 #else
19895 unsigned long *sp =
19896 (unsigned long *)kernel_stack_pointer(regs);
19897 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19898 * or above a saved flags. Eflags has bits 22-31 zero,
19899 * kernel addresses don't.
19900 */
19901 +
19902 +#ifdef CONFIG_PAX_KERNEXEC
19903 + return ktla_ktva(sp[0]);
19904 +#else
19905 if (sp[0] >> 22)
19906 return sp[0];
19907 if (sp[1] >> 22)
19908 return sp[1];
19909 #endif
19910 +
19911 +#endif
19912 }
19913 return pc;
19914 }
19915 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19916 index 6bb7b85..dd853e1 100644
19917 --- a/arch/x86/kernel/tls.c
19918 +++ b/arch/x86/kernel/tls.c
19919 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19920 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19921 return -EINVAL;
19922
19923 +#ifdef CONFIG_PAX_SEGMEXEC
19924 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19925 + return -EINVAL;
19926 +#endif
19927 +
19928 set_tls_desc(p, idx, &info, 1);
19929
19930 return 0;
19931 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19932 index 8508237..229b664 100644
19933 --- a/arch/x86/kernel/trampoline_32.S
19934 +++ b/arch/x86/kernel/trampoline_32.S
19935 @@ -32,6 +32,12 @@
19936 #include <asm/segment.h>
19937 #include <asm/page_types.h>
19938
19939 +#ifdef CONFIG_PAX_KERNEXEC
19940 +#define ta(X) (X)
19941 +#else
19942 +#define ta(X) ((X) - __PAGE_OFFSET)
19943 +#endif
19944 +
19945 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
19946 __CPUINITRODATA
19947 .code16
19948 @@ -60,7 +66,7 @@ r_base = .
19949 inc %ax # protected mode (PE) bit
19950 lmsw %ax # into protected mode
19951 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19952 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19953 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19954
19955 # These need to be in the same 64K segment as the above;
19956 # hence we don't use the boot_gdt_descr defined in head.S
19957 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19958 index 3af2dff..ba8aa49 100644
19959 --- a/arch/x86/kernel/trampoline_64.S
19960 +++ b/arch/x86/kernel/trampoline_64.S
19961 @@ -91,7 +91,7 @@ startup_32:
19962 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19963 movl %eax, %ds
19964
19965 - movl $X86_CR4_PAE, %eax
19966 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19967 movl %eax, %cr4 # Enable PAE mode
19968
19969 # Setup trampoline 4 level pagetables
19970 @@ -127,7 +127,7 @@ startup_64:
19971 no_longmode:
19972 hlt
19973 jmp no_longmode
19974 -#include "verify_cpu_64.S"
19975 +#include "verify_cpu.S"
19976
19977 # Careful these need to be in the same 64K segment as the above;
19978 tidt:
19979 @@ -138,7 +138,7 @@ tidt:
19980 # so the kernel can live anywhere
19981 .balign 4
19982 tgdt:
19983 - .short tgdt_end - tgdt # gdt limit
19984 + .short tgdt_end - tgdt - 1 # gdt limit
19985 .long tgdt - r_base
19986 .short 0
19987 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19988 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19989 index 7e37dce..ec3f8e5 100644
19990 --- a/arch/x86/kernel/traps.c
19991 +++ b/arch/x86/kernel/traps.c
19992 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
19993
19994 /* Do we ignore FPU interrupts ? */
19995 char ignore_fpu_irq;
19996 -
19997 -/*
19998 - * The IDT has to be page-aligned to simplify the Pentium
19999 - * F0 0F bug workaround.
20000 - */
20001 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20002 #endif
20003
20004 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20005 @@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20006 static inline void
20007 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20008 {
20009 - if (!user_mode_vm(regs))
20010 + if (!user_mode(regs))
20011 die(str, regs, err);
20012 }
20013 #endif
20014
20015 static void __kprobes
20016 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20017 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20018 long error_code, siginfo_t *info)
20019 {
20020 struct task_struct *tsk = current;
20021
20022 #ifdef CONFIG_X86_32
20023 - if (regs->flags & X86_VM_MASK) {
20024 + if (v8086_mode(regs)) {
20025 /*
20026 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20027 * On nmi (interrupt 2), do_trap should not be called.
20028 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20029 }
20030 #endif
20031
20032 - if (!user_mode(regs))
20033 + if (!user_mode_novm(regs))
20034 goto kernel_trap;
20035
20036 #ifdef CONFIG_X86_32
20037 @@ -158,7 +152,7 @@ trap_signal:
20038 printk_ratelimit()) {
20039 printk(KERN_INFO
20040 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20041 - tsk->comm, tsk->pid, str,
20042 + tsk->comm, task_pid_nr(tsk), str,
20043 regs->ip, regs->sp, error_code);
20044 print_vma_addr(" in ", regs->ip);
20045 printk("\n");
20046 @@ -175,8 +169,20 @@ kernel_trap:
20047 if (!fixup_exception(regs)) {
20048 tsk->thread.error_code = error_code;
20049 tsk->thread.trap_no = trapnr;
20050 +
20051 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20052 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20053 + str = "PAX: suspicious stack segment fault";
20054 +#endif
20055 +
20056 die(str, regs, error_code);
20057 }
20058 +
20059 +#ifdef CONFIG_PAX_REFCOUNT
20060 + if (trapnr == 4)
20061 + pax_report_refcount_overflow(regs);
20062 +#endif
20063 +
20064 return;
20065
20066 #ifdef CONFIG_X86_32
20067 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20068 conditional_sti(regs);
20069
20070 #ifdef CONFIG_X86_32
20071 - if (regs->flags & X86_VM_MASK)
20072 + if (v8086_mode(regs))
20073 goto gp_in_vm86;
20074 #endif
20075
20076 tsk = current;
20077 - if (!user_mode(regs))
20078 + if (!user_mode_novm(regs))
20079 goto gp_in_kernel;
20080
20081 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20082 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20083 + struct mm_struct *mm = tsk->mm;
20084 + unsigned long limit;
20085 +
20086 + down_write(&mm->mmap_sem);
20087 + limit = mm->context.user_cs_limit;
20088 + if (limit < TASK_SIZE) {
20089 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20090 + up_write(&mm->mmap_sem);
20091 + return;
20092 + }
20093 + up_write(&mm->mmap_sem);
20094 + }
20095 +#endif
20096 +
20097 tsk->thread.error_code = error_code;
20098 tsk->thread.trap_no = 13;
20099
20100 @@ -305,6 +327,13 @@ gp_in_kernel:
20101 if (notify_die(DIE_GPF, "general protection fault", regs,
20102 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20103 return;
20104 +
20105 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20106 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20107 + die("PAX: suspicious general protection fault", regs, error_code);
20108 + else
20109 +#endif
20110 +
20111 die("general protection fault", regs, error_code);
20112 }
20113
20114 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20115 dotraplinkage notrace __kprobes void
20116 do_nmi(struct pt_regs *regs, long error_code)
20117 {
20118 +
20119 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20120 + if (!user_mode(regs)) {
20121 + unsigned long cs = regs->cs & 0xFFFF;
20122 + unsigned long ip = ktva_ktla(regs->ip);
20123 +
20124 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20125 + regs->ip = ip;
20126 + }
20127 +#endif
20128 +
20129 nmi_enter();
20130
20131 inc_irq_stat(__nmi_count);
20132 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20133 }
20134
20135 #ifdef CONFIG_X86_32
20136 - if (regs->flags & X86_VM_MASK)
20137 + if (v8086_mode(regs))
20138 goto debug_vm86;
20139 #endif
20140
20141 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20142 * kernel space (but re-enable TF when returning to user mode).
20143 */
20144 if (condition & DR_STEP) {
20145 - if (!user_mode(regs))
20146 + if (!user_mode_novm(regs))
20147 goto clear_TF_reenable;
20148 }
20149
20150 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20151 * Handle strange cache flush from user space exception
20152 * in all other cases. This is undocumented behaviour.
20153 */
20154 - if (regs->flags & X86_VM_MASK) {
20155 + if (v8086_mode(regs)) {
20156 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20157 return;
20158 }
20159 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20160 void __math_state_restore(void)
20161 {
20162 struct thread_info *thread = current_thread_info();
20163 - struct task_struct *tsk = thread->task;
20164 + struct task_struct *tsk = current;
20165
20166 /*
20167 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20168 @@ -825,8 +865,7 @@ void __math_state_restore(void)
20169 */
20170 asmlinkage void math_state_restore(void)
20171 {
20172 - struct thread_info *thread = current_thread_info();
20173 - struct task_struct *tsk = thread->task;
20174 + struct task_struct *tsk = current;
20175
20176 if (!tsk_used_math(tsk)) {
20177 local_irq_enable();
20178 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20179 new file mode 100644
20180 index 0000000..50c5edd
20181 --- /dev/null
20182 +++ b/arch/x86/kernel/verify_cpu.S
20183 @@ -0,0 +1,140 @@
20184 +/*
20185 + *
20186 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
20187 + * code has been borrowed from boot/setup.S and was introduced by
20188 + * Andi Kleen.
20189 + *
20190 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20191 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20192 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20193 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20194 + *
20195 + * This source code is licensed under the GNU General Public License,
20196 + * Version 2. See the file COPYING for more details.
20197 + *
20198 + * This is a common code for verification whether CPU supports
20199 + * long mode and SSE or not. It is not called directly instead this
20200 + * file is included at various places and compiled in that context.
20201 + * This file is expected to run in 32bit code. Currently:
20202 + *
20203 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20204 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
20205 + * arch/x86/kernel/head_32.S: processor startup
20206 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20207 + *
20208 + * verify_cpu, returns the status of longmode and SSE in register %eax.
20209 + * 0: Success 1: Failure
20210 + *
20211 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20212 + *
20213 + * The caller needs to check for the error code and take the action
20214 + * appropriately. Either display a message or halt.
20215 + */
20216 +
20217 +#include <asm/cpufeature.h>
20218 +#include <asm/msr-index.h>
20219 +
20220 +verify_cpu:
20221 + pushfl # Save caller passed flags
20222 + pushl $0 # Kill any dangerous flags
20223 + popfl
20224 +
20225 + pushfl # standard way to check for cpuid
20226 + popl %eax
20227 + movl %eax,%ebx
20228 + xorl $0x200000,%eax
20229 + pushl %eax
20230 + popfl
20231 + pushfl
20232 + popl %eax
20233 + cmpl %eax,%ebx
20234 + jz verify_cpu_no_longmode # cpu has no cpuid
20235 +
20236 + movl $0x0,%eax # See if cpuid 1 is implemented
20237 + cpuid
20238 + cmpl $0x1,%eax
20239 + jb verify_cpu_no_longmode # no cpuid 1
20240 +
20241 + xor %di,%di
20242 + cmpl $0x68747541,%ebx # AuthenticAMD
20243 + jnz verify_cpu_noamd
20244 + cmpl $0x69746e65,%edx
20245 + jnz verify_cpu_noamd
20246 + cmpl $0x444d4163,%ecx
20247 + jnz verify_cpu_noamd
20248 + mov $1,%di # cpu is from AMD
20249 + jmp verify_cpu_check
20250 +
20251 +verify_cpu_noamd:
20252 + cmpl $0x756e6547,%ebx # GenuineIntel?
20253 + jnz verify_cpu_check
20254 + cmpl $0x49656e69,%edx
20255 + jnz verify_cpu_check
20256 + cmpl $0x6c65746e,%ecx
20257 + jnz verify_cpu_check
20258 +
20259 + # only call IA32_MISC_ENABLE when:
20260 + # family > 6 || (family == 6 && model >= 0xd)
20261 + movl $0x1, %eax # check CPU family and model
20262 + cpuid
20263 + movl %eax, %ecx
20264 +
20265 + andl $0x0ff00f00, %eax # mask family and extended family
20266 + shrl $8, %eax
20267 + cmpl $6, %eax
20268 + ja verify_cpu_clear_xd # family > 6, ok
20269 + jb verify_cpu_check # family < 6, skip
20270 +
20271 + andl $0x000f00f0, %ecx # mask model and extended model
20272 + shrl $4, %ecx
20273 + cmpl $0xd, %ecx
20274 + jb verify_cpu_check # family == 6, model < 0xd, skip
20275 +
20276 +verify_cpu_clear_xd:
20277 + movl $MSR_IA32_MISC_ENABLE, %ecx
20278 + rdmsr
20279 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20280 + jnc verify_cpu_check # only write MSR if bit was changed
20281 + wrmsr
20282 +
20283 +verify_cpu_check:
20284 + movl $0x1,%eax # Does the cpu have what it takes
20285 + cpuid
20286 + andl $REQUIRED_MASK0,%edx
20287 + xorl $REQUIRED_MASK0,%edx
20288 + jnz verify_cpu_no_longmode
20289 +
20290 + movl $0x80000000,%eax # See if extended cpuid is implemented
20291 + cpuid
20292 + cmpl $0x80000001,%eax
20293 + jb verify_cpu_no_longmode # no extended cpuid
20294 +
20295 + movl $0x80000001,%eax # Does the cpu have what it takes
20296 + cpuid
20297 + andl $REQUIRED_MASK1,%edx
20298 + xorl $REQUIRED_MASK1,%edx
20299 + jnz verify_cpu_no_longmode
20300 +
20301 +verify_cpu_sse_test:
20302 + movl $1,%eax
20303 + cpuid
20304 + andl $SSE_MASK,%edx
20305 + cmpl $SSE_MASK,%edx
20306 + je verify_cpu_sse_ok
20307 + test %di,%di
20308 + jz verify_cpu_no_longmode # only try to force SSE on AMD
20309 + movl $MSR_K7_HWCR,%ecx
20310 + rdmsr
20311 + btr $15,%eax # enable SSE
20312 + wrmsr
20313 + xor %di,%di # don't loop
20314 + jmp verify_cpu_sse_test # try again
20315 +
20316 +verify_cpu_no_longmode:
20317 + popfl # Restore caller passed flags
20318 + movl $1,%eax
20319 + ret
20320 +verify_cpu_sse_ok:
20321 + popfl # Restore caller passed flags
20322 + xorl %eax, %eax
20323 + ret
20324 diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20325 deleted file mode 100644
20326 index 45b6f8a..0000000
20327 --- a/arch/x86/kernel/verify_cpu_64.S
20328 +++ /dev/null
20329 @@ -1,105 +0,0 @@
20330 -/*
20331 - *
20332 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
20333 - * code has been borrowed from boot/setup.S and was introduced by
20334 - * Andi Kleen.
20335 - *
20336 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20337 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20338 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20339 - *
20340 - * This source code is licensed under the GNU General Public License,
20341 - * Version 2. See the file COPYING for more details.
20342 - *
20343 - * This is a common code for verification whether CPU supports
20344 - * long mode and SSE or not. It is not called directly instead this
20345 - * file is included at various places and compiled in that context.
20346 - * Following are the current usage.
20347 - *
20348 - * This file is included by both 16bit and 32bit code.
20349 - *
20350 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20351 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20352 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20353 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20354 - *
20355 - * verify_cpu, returns the status of cpu check in register %eax.
20356 - * 0: Success 1: Failure
20357 - *
20358 - * The caller needs to check for the error code and take the action
20359 - * appropriately. Either display a message or halt.
20360 - */
20361 -
20362 -#include <asm/cpufeature.h>
20363 -
20364 -verify_cpu:
20365 - pushfl # Save caller passed flags
20366 - pushl $0 # Kill any dangerous flags
20367 - popfl
20368 -
20369 - pushfl # standard way to check for cpuid
20370 - popl %eax
20371 - movl %eax,%ebx
20372 - xorl $0x200000,%eax
20373 - pushl %eax
20374 - popfl
20375 - pushfl
20376 - popl %eax
20377 - cmpl %eax,%ebx
20378 - jz verify_cpu_no_longmode # cpu has no cpuid
20379 -
20380 - movl $0x0,%eax # See if cpuid 1 is implemented
20381 - cpuid
20382 - cmpl $0x1,%eax
20383 - jb verify_cpu_no_longmode # no cpuid 1
20384 -
20385 - xor %di,%di
20386 - cmpl $0x68747541,%ebx # AuthenticAMD
20387 - jnz verify_cpu_noamd
20388 - cmpl $0x69746e65,%edx
20389 - jnz verify_cpu_noamd
20390 - cmpl $0x444d4163,%ecx
20391 - jnz verify_cpu_noamd
20392 - mov $1,%di # cpu is from AMD
20393 -
20394 -verify_cpu_noamd:
20395 - movl $0x1,%eax # Does the cpu have what it takes
20396 - cpuid
20397 - andl $REQUIRED_MASK0,%edx
20398 - xorl $REQUIRED_MASK0,%edx
20399 - jnz verify_cpu_no_longmode
20400 -
20401 - movl $0x80000000,%eax # See if extended cpuid is implemented
20402 - cpuid
20403 - cmpl $0x80000001,%eax
20404 - jb verify_cpu_no_longmode # no extended cpuid
20405 -
20406 - movl $0x80000001,%eax # Does the cpu have what it takes
20407 - cpuid
20408 - andl $REQUIRED_MASK1,%edx
20409 - xorl $REQUIRED_MASK1,%edx
20410 - jnz verify_cpu_no_longmode
20411 -
20412 -verify_cpu_sse_test:
20413 - movl $1,%eax
20414 - cpuid
20415 - andl $SSE_MASK,%edx
20416 - cmpl $SSE_MASK,%edx
20417 - je verify_cpu_sse_ok
20418 - test %di,%di
20419 - jz verify_cpu_no_longmode # only try to force SSE on AMD
20420 - movl $0xc0010015,%ecx # HWCR
20421 - rdmsr
20422 - btr $15,%eax # enable SSE
20423 - wrmsr
20424 - xor %di,%di # don't loop
20425 - jmp verify_cpu_sse_test # try again
20426 -
20427 -verify_cpu_no_longmode:
20428 - popfl # Restore caller passed flags
20429 - movl $1,%eax
20430 - ret
20431 -verify_cpu_sse_ok:
20432 - popfl # Restore caller passed flags
20433 - xorl %eax, %eax
20434 - ret
20435 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20436 index 9c4e625..c992817 100644
20437 --- a/arch/x86/kernel/vm86_32.c
20438 +++ b/arch/x86/kernel/vm86_32.c
20439 @@ -41,6 +41,7 @@
20440 #include <linux/ptrace.h>
20441 #include <linux/audit.h>
20442 #include <linux/stddef.h>
20443 +#include <linux/grsecurity.h>
20444
20445 #include <asm/uaccess.h>
20446 #include <asm/io.h>
20447 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20448 do_exit(SIGSEGV);
20449 }
20450
20451 - tss = &per_cpu(init_tss, get_cpu());
20452 + tss = init_tss + get_cpu();
20453 current->thread.sp0 = current->thread.saved_sp0;
20454 current->thread.sysenter_cs = __KERNEL_CS;
20455 load_sp0(tss, &current->thread);
20456 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20457 struct task_struct *tsk;
20458 int tmp, ret = -EPERM;
20459
20460 +#ifdef CONFIG_GRKERNSEC_VM86
20461 + if (!capable(CAP_SYS_RAWIO)) {
20462 + gr_handle_vm86();
20463 + goto out;
20464 + }
20465 +#endif
20466 +
20467 tsk = current;
20468 if (tsk->thread.saved_sp0)
20469 goto out;
20470 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20471 int tmp, ret;
20472 struct vm86plus_struct __user *v86;
20473
20474 +#ifdef CONFIG_GRKERNSEC_VM86
20475 + if (!capable(CAP_SYS_RAWIO)) {
20476 + gr_handle_vm86();
20477 + ret = -EPERM;
20478 + goto out;
20479 + }
20480 +#endif
20481 +
20482 tsk = current;
20483 switch (regs->bx) {
20484 case VM86_REQUEST_IRQ:
20485 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20486 tsk->thread.saved_fs = info->regs32->fs;
20487 tsk->thread.saved_gs = get_user_gs(info->regs32);
20488
20489 - tss = &per_cpu(init_tss, get_cpu());
20490 + tss = init_tss + get_cpu();
20491 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20492 if (cpu_has_sep)
20493 tsk->thread.sysenter_cs = 0;
20494 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20495 goto cannot_handle;
20496 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20497 goto cannot_handle;
20498 - intr_ptr = (unsigned long __user *) (i << 2);
20499 + intr_ptr = (__force unsigned long __user *) (i << 2);
20500 if (get_user(segoffs, intr_ptr))
20501 goto cannot_handle;
20502 if ((segoffs >> 16) == BIOSSEG)
20503 diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20504 index d430e4c..831f817 100644
20505 --- a/arch/x86/kernel/vmi_32.c
20506 +++ b/arch/x86/kernel/vmi_32.c
20507 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20508 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20509
20510 #define call_vrom_func(rom,func) \
20511 - (((VROMFUNC *)(rom->func))())
20512 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
20513
20514 #define call_vrom_long_func(rom,func,arg) \
20515 - (((VROMLONGFUNC *)(rom->func)) (arg))
20516 +({\
20517 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20518 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20519 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20520 + __reloc;\
20521 +})
20522
20523 -static struct vrom_header *vmi_rom;
20524 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20525 static int disable_pge;
20526 static int disable_pse;
20527 static int disable_sep;
20528 @@ -76,10 +81,10 @@ static struct {
20529 void (*set_initial_ap_state)(int, int);
20530 void (*halt)(void);
20531 void (*set_lazy_mode)(int mode);
20532 -} vmi_ops;
20533 +} __no_const vmi_ops __read_only;
20534
20535 /* Cached VMI operations */
20536 -struct vmi_timer_ops vmi_timer_ops;
20537 +struct vmi_timer_ops vmi_timer_ops __read_only;
20538
20539 /*
20540 * VMI patching routines.
20541 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20542 static inline void patch_offset(void *insnbuf,
20543 unsigned long ip, unsigned long dest)
20544 {
20545 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
20546 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
20547 }
20548
20549 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20550 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20551 {
20552 u64 reloc;
20553 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20554 +
20555 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20556 switch(rel->type) {
20557 case VMI_RELOCATION_CALL_REL:
20558 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20559
20560 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20561 {
20562 - const pte_t pte = { .pte = 0 };
20563 + const pte_t pte = __pte(0ULL);
20564 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20565 }
20566
20567 static void vmi_pmd_clear(pmd_t *pmd)
20568 {
20569 - const pte_t pte = { .pte = 0 };
20570 + const pte_t pte = __pte(0ULL);
20571 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20572 }
20573 #endif
20574 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20575 ap.ss = __KERNEL_DS;
20576 ap.esp = (unsigned long) start_esp;
20577
20578 - ap.ds = __USER_DS;
20579 - ap.es = __USER_DS;
20580 + ap.ds = __KERNEL_DS;
20581 + ap.es = __KERNEL_DS;
20582 ap.fs = __KERNEL_PERCPU;
20583 - ap.gs = __KERNEL_STACK_CANARY;
20584 + savesegment(gs, ap.gs);
20585
20586 ap.eflags = 0;
20587
20588 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20589 paravirt_leave_lazy_mmu();
20590 }
20591
20592 +#ifdef CONFIG_PAX_KERNEXEC
20593 +static unsigned long vmi_pax_open_kernel(void)
20594 +{
20595 + return 0;
20596 +}
20597 +
20598 +static unsigned long vmi_pax_close_kernel(void)
20599 +{
20600 + return 0;
20601 +}
20602 +#endif
20603 +
20604 static inline int __init check_vmi_rom(struct vrom_header *rom)
20605 {
20606 struct pci_header *pci;
20607 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20608 return 0;
20609 if (rom->vrom_signature != VMI_SIGNATURE)
20610 return 0;
20611 + if (rom->rom_length * 512 > sizeof(*rom)) {
20612 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20613 + return 0;
20614 + }
20615 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20616 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20617 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20618 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20619 struct vrom_header *romstart;
20620 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20621 if (check_vmi_rom(romstart)) {
20622 - vmi_rom = romstart;
20623 + vmi_rom = *romstart;
20624 return 1;
20625 }
20626 }
20627 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20628
20629 para_fill(pv_irq_ops.safe_halt, Halt);
20630
20631 +#ifdef CONFIG_PAX_KERNEXEC
20632 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20633 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20634 +#endif
20635 +
20636 /*
20637 * Alternative instruction rewriting doesn't happen soon enough
20638 * to convert VMI_IRET to a call instead of a jump; so we have
20639 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20640
20641 void __init vmi_init(void)
20642 {
20643 - if (!vmi_rom)
20644 + if (!vmi_rom.rom_signature)
20645 probe_vmi_rom();
20646 else
20647 - check_vmi_rom(vmi_rom);
20648 + check_vmi_rom(&vmi_rom);
20649
20650 /* In case probing for or validating the ROM failed, basil */
20651 - if (!vmi_rom)
20652 + if (!vmi_rom.rom_signature)
20653 return;
20654
20655 - reserve_top_address(-vmi_rom->virtual_top);
20656 + reserve_top_address(-vmi_rom.virtual_top);
20657
20658 #ifdef CONFIG_X86_IO_APIC
20659 /* This is virtual hardware; timer routing is wired correctly */
20660 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
20661 {
20662 unsigned long flags;
20663
20664 - if (!vmi_rom)
20665 + if (!vmi_rom.rom_signature)
20666 return;
20667
20668 local_irq_save(flags);
20669 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20670 index 3c68fe2..12c8280 100644
20671 --- a/arch/x86/kernel/vmlinux.lds.S
20672 +++ b/arch/x86/kernel/vmlinux.lds.S
20673 @@ -26,6 +26,13 @@
20674 #include <asm/page_types.h>
20675 #include <asm/cache.h>
20676 #include <asm/boot.h>
20677 +#include <asm/segment.h>
20678 +
20679 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20680 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20681 +#else
20682 +#define __KERNEL_TEXT_OFFSET 0
20683 +#endif
20684
20685 #undef i386 /* in case the preprocessor is a 32bit one */
20686
20687 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20688 #ifdef CONFIG_X86_32
20689 OUTPUT_ARCH(i386)
20690 ENTRY(phys_startup_32)
20691 -jiffies = jiffies_64;
20692 #else
20693 OUTPUT_ARCH(i386:x86-64)
20694 ENTRY(phys_startup_64)
20695 -jiffies_64 = jiffies;
20696 #endif
20697
20698 PHDRS {
20699 text PT_LOAD FLAGS(5); /* R_E */
20700 - data PT_LOAD FLAGS(7); /* RWE */
20701 +#ifdef CONFIG_X86_32
20702 + module PT_LOAD FLAGS(5); /* R_E */
20703 +#endif
20704 +#ifdef CONFIG_XEN
20705 + rodata PT_LOAD FLAGS(5); /* R_E */
20706 +#else
20707 + rodata PT_LOAD FLAGS(4); /* R__ */
20708 +#endif
20709 + data PT_LOAD FLAGS(6); /* RW_ */
20710 #ifdef CONFIG_X86_64
20711 user PT_LOAD FLAGS(5); /* R_E */
20712 +#endif
20713 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20714 #ifdef CONFIG_SMP
20715 percpu PT_LOAD FLAGS(6); /* RW_ */
20716 #endif
20717 + text.init PT_LOAD FLAGS(5); /* R_E */
20718 + text.exit PT_LOAD FLAGS(5); /* R_E */
20719 init PT_LOAD FLAGS(7); /* RWE */
20720 -#endif
20721 note PT_NOTE FLAGS(0); /* ___ */
20722 }
20723
20724 SECTIONS
20725 {
20726 #ifdef CONFIG_X86_32
20727 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20728 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20729 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20730 #else
20731 - . = __START_KERNEL;
20732 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20733 + . = __START_KERNEL;
20734 #endif
20735
20736 /* Text and read-only data */
20737 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20738 - _text = .;
20739 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20740 /* bootstrapping code */
20741 +#ifdef CONFIG_X86_32
20742 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20743 +#else
20744 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20745 +#endif
20746 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20747 + _text = .;
20748 HEAD_TEXT
20749 #ifdef CONFIG_X86_32
20750 . = ALIGN(PAGE_SIZE);
20751 @@ -82,28 +102,71 @@ SECTIONS
20752 IRQENTRY_TEXT
20753 *(.fixup)
20754 *(.gnu.warning)
20755 - /* End of text section */
20756 - _etext = .;
20757 } :text = 0x9090
20758
20759 - NOTES :text :note
20760 + . += __KERNEL_TEXT_OFFSET;
20761
20762 - EXCEPTION_TABLE(16) :text = 0x9090
20763 +#ifdef CONFIG_X86_32
20764 + . = ALIGN(PAGE_SIZE);
20765 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
20766 + *(.vmi.rom)
20767 + } :module
20768 +
20769 + . = ALIGN(PAGE_SIZE);
20770 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20771 +
20772 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20773 + MODULES_EXEC_VADDR = .;
20774 + BYTE(0)
20775 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20776 + . = ALIGN(HPAGE_SIZE);
20777 + MODULES_EXEC_END = . - 1;
20778 +#endif
20779 +
20780 + } :module
20781 +#endif
20782 +
20783 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20784 + /* End of text section */
20785 + _etext = . - __KERNEL_TEXT_OFFSET;
20786 + }
20787 +
20788 +#ifdef CONFIG_X86_32
20789 + . = ALIGN(PAGE_SIZE);
20790 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20791 + *(.idt)
20792 + . = ALIGN(PAGE_SIZE);
20793 + *(.empty_zero_page)
20794 + *(.swapper_pg_fixmap)
20795 + *(.swapper_pg_pmd)
20796 + *(.swapper_pg_dir)
20797 + *(.trampoline_pg_dir)
20798 + } :rodata
20799 +#endif
20800 +
20801 + . = ALIGN(PAGE_SIZE);
20802 + NOTES :rodata :note
20803 +
20804 + EXCEPTION_TABLE(16) :rodata
20805
20806 RO_DATA(PAGE_SIZE)
20807
20808 /* Data */
20809 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20810 +
20811 +#ifdef CONFIG_PAX_KERNEXEC
20812 + . = ALIGN(HPAGE_SIZE);
20813 +#else
20814 + . = ALIGN(PAGE_SIZE);
20815 +#endif
20816 +
20817 /* Start of data section */
20818 _sdata = .;
20819
20820 /* init_task */
20821 INIT_TASK_DATA(THREAD_SIZE)
20822
20823 -#ifdef CONFIG_X86_32
20824 - /* 32 bit has nosave before _edata */
20825 NOSAVE_DATA
20826 -#endif
20827
20828 PAGE_ALIGNED_DATA(PAGE_SIZE)
20829
20830 @@ -112,6 +175,8 @@ SECTIONS
20831 DATA_DATA
20832 CONSTRUCTORS
20833
20834 + jiffies = jiffies_64;
20835 +
20836 /* rarely changed data like cpu maps */
20837 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
20838
20839 @@ -166,12 +231,6 @@ SECTIONS
20840 }
20841 vgetcpu_mode = VVIRT(.vgetcpu_mode);
20842
20843 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
20844 - .jiffies : AT(VLOAD(.jiffies)) {
20845 - *(.jiffies)
20846 - }
20847 - jiffies = VVIRT(.jiffies);
20848 -
20849 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
20850 *(.vsyscall_3)
20851 }
20852 @@ -187,12 +246,19 @@ SECTIONS
20853 #endif /* CONFIG_X86_64 */
20854
20855 /* Init code and data - will be freed after init */
20856 - . = ALIGN(PAGE_SIZE);
20857 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20858 + BYTE(0)
20859 +
20860 +#ifdef CONFIG_PAX_KERNEXEC
20861 + . = ALIGN(HPAGE_SIZE);
20862 +#else
20863 + . = ALIGN(PAGE_SIZE);
20864 +#endif
20865 +
20866 __init_begin = .; /* paired with __init_end */
20867 - }
20868 + } :init.begin
20869
20870 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20871 +#ifdef CONFIG_SMP
20872 /*
20873 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20874 * output PHDR, so the next output section - .init.text - should
20875 @@ -201,12 +267,27 @@ SECTIONS
20876 PERCPU_VADDR(0, :percpu)
20877 #endif
20878
20879 - INIT_TEXT_SECTION(PAGE_SIZE)
20880 -#ifdef CONFIG_X86_64
20881 - :init
20882 -#endif
20883 + . = ALIGN(PAGE_SIZE);
20884 + init_begin = .;
20885 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20886 + VMLINUX_SYMBOL(_sinittext) = .;
20887 + INIT_TEXT
20888 + VMLINUX_SYMBOL(_einittext) = .;
20889 + . = ALIGN(PAGE_SIZE);
20890 + } :text.init
20891
20892 - INIT_DATA_SECTION(16)
20893 + /*
20894 + * .exit.text is discard at runtime, not link time, to deal with
20895 + * references from .altinstructions and .eh_frame
20896 + */
20897 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20898 + EXIT_TEXT
20899 + . = ALIGN(16);
20900 + } :text.exit
20901 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20902 +
20903 + . = ALIGN(PAGE_SIZE);
20904 + INIT_DATA_SECTION(16) :init
20905
20906 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20907 __x86_cpu_dev_start = .;
20908 @@ -232,19 +313,11 @@ SECTIONS
20909 *(.altinstr_replacement)
20910 }
20911
20912 - /*
20913 - * .exit.text is discard at runtime, not link time, to deal with
20914 - * references from .altinstructions and .eh_frame
20915 - */
20916 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20917 - EXIT_TEXT
20918 - }
20919 -
20920 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20921 EXIT_DATA
20922 }
20923
20924 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20925 +#ifndef CONFIG_SMP
20926 PERCPU(PAGE_SIZE)
20927 #endif
20928
20929 @@ -267,12 +340,6 @@ SECTIONS
20930 . = ALIGN(PAGE_SIZE);
20931 }
20932
20933 -#ifdef CONFIG_X86_64
20934 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20935 - NOSAVE_DATA
20936 - }
20937 -#endif
20938 -
20939 /* BSS */
20940 . = ALIGN(PAGE_SIZE);
20941 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20942 @@ -288,6 +355,7 @@ SECTIONS
20943 __brk_base = .;
20944 . += 64 * 1024; /* 64k alignment slop space */
20945 *(.brk_reservation) /* areas brk users have reserved */
20946 + . = ALIGN(HPAGE_SIZE);
20947 __brk_limit = .;
20948 }
20949
20950 @@ -316,13 +384,12 @@ SECTIONS
20951 * for the boot processor.
20952 */
20953 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
20954 -INIT_PER_CPU(gdt_page);
20955 INIT_PER_CPU(irq_stack_union);
20956
20957 /*
20958 * Build-time check on the image size:
20959 */
20960 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20961 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20962 "kernel image bigger than KERNEL_IMAGE_SIZE");
20963
20964 #ifdef CONFIG_SMP
20965 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20966 index 62f39d7..3bc46a1 100644
20967 --- a/arch/x86/kernel/vsyscall_64.c
20968 +++ b/arch/x86/kernel/vsyscall_64.c
20969 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
20970
20971 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
20972 /* copy vsyscall data */
20973 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
20974 vsyscall_gtod_data.clock.vread = clock->vread;
20975 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
20976 vsyscall_gtod_data.clock.mask = clock->mask;
20977 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
20978 We do this here because otherwise user space would do it on
20979 its own in a likely inferior way (no access to jiffies).
20980 If you don't like it pass NULL. */
20981 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
20982 + if (tcache && tcache->blob[0] == (j = jiffies)) {
20983 p = tcache->blob[1];
20984 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
20985 /* Load per CPU data from RDTSCP */
20986 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20987 index 3909e3b..5433a97 100644
20988 --- a/arch/x86/kernel/x8664_ksyms_64.c
20989 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20990 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
20991
20992 EXPORT_SYMBOL(copy_user_generic);
20993 EXPORT_SYMBOL(__copy_user_nocache);
20994 -EXPORT_SYMBOL(copy_from_user);
20995 -EXPORT_SYMBOL(copy_to_user);
20996 EXPORT_SYMBOL(__copy_from_user_inatomic);
20997
20998 EXPORT_SYMBOL(copy_page);
20999 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21000 index c5ee17e..d63218f 100644
21001 --- a/arch/x86/kernel/xsave.c
21002 +++ b/arch/x86/kernel/xsave.c
21003 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21004 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21005 return -1;
21006
21007 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21008 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21009 fx_sw_user->extended_size -
21010 FP_XSTATE_MAGIC2_SIZE));
21011 /*
21012 @@ -196,7 +196,7 @@ fx_only:
21013 * the other extended state.
21014 */
21015 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21016 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21017 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21018 }
21019
21020 /*
21021 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21022 if (task_thread_info(tsk)->status & TS_XSAVE)
21023 err = restore_user_xstate(buf);
21024 else
21025 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
21026 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
21027 buf);
21028 if (unlikely(err)) {
21029 /*
21030 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21031 index 1350e43..a94b011 100644
21032 --- a/arch/x86/kvm/emulate.c
21033 +++ b/arch/x86/kvm/emulate.c
21034 @@ -81,8 +81,8 @@
21035 #define Src2CL (1<<29)
21036 #define Src2ImmByte (2<<29)
21037 #define Src2One (3<<29)
21038 -#define Src2Imm16 (4<<29)
21039 -#define Src2Mask (7<<29)
21040 +#define Src2Imm16 (4U<<29)
21041 +#define Src2Mask (7U<<29)
21042
21043 enum {
21044 Group1_80, Group1_81, Group1_82, Group1_83,
21045 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
21046
21047 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21048 do { \
21049 + unsigned long _tmp; \
21050 __asm__ __volatile__ ( \
21051 _PRE_EFLAGS("0", "4", "2") \
21052 _op _suffix " %"_x"3,%1; " \
21053 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
21054 /* Raw emulation: instruction has two explicit operands. */
21055 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21056 do { \
21057 - unsigned long _tmp; \
21058 - \
21059 switch ((_dst).bytes) { \
21060 case 2: \
21061 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21062 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
21063
21064 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21065 do { \
21066 - unsigned long _tmp; \
21067 switch ((_dst).bytes) { \
21068 case 1: \
21069 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21070 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21071 index 8dfeaaa..4daa395 100644
21072 --- a/arch/x86/kvm/lapic.c
21073 +++ b/arch/x86/kvm/lapic.c
21074 @@ -52,7 +52,7 @@
21075 #define APIC_BUS_CYCLE_NS 1
21076
21077 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21078 -#define apic_debug(fmt, arg...)
21079 +#define apic_debug(fmt, arg...) do {} while (0)
21080
21081 #define APIC_LVT_NUM 6
21082 /* 14 is the version for Xeon and Pentium 8.4.8*/
21083 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21084 index 3bc2707..dd157e2 100644
21085 --- a/arch/x86/kvm/paging_tmpl.h
21086 +++ b/arch/x86/kvm/paging_tmpl.h
21087 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21088 int level = PT_PAGE_TABLE_LEVEL;
21089 unsigned long mmu_seq;
21090
21091 + pax_track_stack();
21092 +
21093 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21094 kvm_mmu_audit(vcpu, "pre page fault");
21095
21096 @@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21097 kvm_mmu_free_some_pages(vcpu);
21098 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21099 level, &write_pt, pfn);
21100 + (void)sptep;
21101 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21102 sptep, *sptep, write_pt);
21103
21104 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21105 index 7c6e63e..c5d92c1 100644
21106 --- a/arch/x86/kvm/svm.c
21107 +++ b/arch/x86/kvm/svm.c
21108 @@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21109 int cpu = raw_smp_processor_id();
21110
21111 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21112 +
21113 + pax_open_kernel();
21114 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21115 + pax_close_kernel();
21116 +
21117 load_TR_desc();
21118 }
21119
21120 @@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21121 return true;
21122 }
21123
21124 -static struct kvm_x86_ops svm_x86_ops = {
21125 +static const struct kvm_x86_ops svm_x86_ops = {
21126 .cpu_has_kvm_support = has_svm,
21127 .disabled_by_bios = is_disabled,
21128 .hardware_setup = svm_hardware_setup,
21129 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21130 index e6d925f..e7a4af8 100644
21131 --- a/arch/x86/kvm/vmx.c
21132 +++ b/arch/x86/kvm/vmx.c
21133 @@ -570,7 +570,11 @@ static void reload_tss(void)
21134
21135 kvm_get_gdt(&gdt);
21136 descs = (void *)gdt.base;
21137 +
21138 + pax_open_kernel();
21139 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21140 + pax_close_kernel();
21141 +
21142 load_TR_desc();
21143 }
21144
21145 @@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21146 if (!cpu_has_vmx_flexpriority())
21147 flexpriority_enabled = 0;
21148
21149 - if (!cpu_has_vmx_tpr_shadow())
21150 - kvm_x86_ops->update_cr8_intercept = NULL;
21151 + if (!cpu_has_vmx_tpr_shadow()) {
21152 + pax_open_kernel();
21153 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21154 + pax_close_kernel();
21155 + }
21156
21157 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21158 kvm_disable_largepages();
21159 @@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21160 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21161
21162 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21163 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21164 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21165 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21166 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21167 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21168 @@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21169 "jmp .Lkvm_vmx_return \n\t"
21170 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21171 ".Lkvm_vmx_return: "
21172 +
21173 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21174 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21175 + ".Lkvm_vmx_return2: "
21176 +#endif
21177 +
21178 /* Save guest registers, load host registers, keep flags */
21179 "xchg %0, (%%"R"sp) \n\t"
21180 "mov %%"R"ax, %c[rax](%0) \n\t"
21181 @@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21182 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21183 #endif
21184 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21185 +
21186 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21187 + ,[cs]"i"(__KERNEL_CS)
21188 +#endif
21189 +
21190 : "cc", "memory"
21191 - , R"bx", R"di", R"si"
21192 + , R"ax", R"bx", R"di", R"si"
21193 #ifdef CONFIG_X86_64
21194 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21195 #endif
21196 @@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21197 if (vmx->rmode.irq.pending)
21198 fixup_rmode_irq(vmx);
21199
21200 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21201 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21202 +
21203 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21204 + loadsegment(fs, __KERNEL_PERCPU);
21205 +#endif
21206 +
21207 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21208 + __set_fs(current_thread_info()->addr_limit);
21209 +#endif
21210 +
21211 vmx->launched = 1;
21212
21213 vmx_complete_interrupts(vmx);
21214 @@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21215 return false;
21216 }
21217
21218 -static struct kvm_x86_ops vmx_x86_ops = {
21219 +static const struct kvm_x86_ops vmx_x86_ops = {
21220 .cpu_has_kvm_support = cpu_has_kvm_support,
21221 .disabled_by_bios = vmx_disabled_by_bios,
21222 .hardware_setup = hardware_setup,
21223 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21224 index df1cefb..5e882ad 100644
21225 --- a/arch/x86/kvm/x86.c
21226 +++ b/arch/x86/kvm/x86.c
21227 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21228 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21229 struct kvm_cpuid_entry2 __user *entries);
21230
21231 -struct kvm_x86_ops *kvm_x86_ops;
21232 +const struct kvm_x86_ops *kvm_x86_ops;
21233 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21234
21235 int ignore_msrs = 0;
21236 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21237 struct kvm_cpuid2 *cpuid,
21238 struct kvm_cpuid_entry2 __user *entries)
21239 {
21240 - int r;
21241 + int r, i;
21242
21243 r = -E2BIG;
21244 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21245 goto out;
21246 r = -EFAULT;
21247 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21248 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21249 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21250 goto out;
21251 + for (i = 0; i < cpuid->nent; ++i) {
21252 + struct kvm_cpuid_entry2 cpuid_entry;
21253 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21254 + goto out;
21255 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
21256 + }
21257 vcpu->arch.cpuid_nent = cpuid->nent;
21258 kvm_apic_set_version(vcpu);
21259 return 0;
21260 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21261 struct kvm_cpuid2 *cpuid,
21262 struct kvm_cpuid_entry2 __user *entries)
21263 {
21264 - int r;
21265 + int r, i;
21266
21267 vcpu_load(vcpu);
21268 r = -E2BIG;
21269 if (cpuid->nent < vcpu->arch.cpuid_nent)
21270 goto out;
21271 r = -EFAULT;
21272 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21273 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21274 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21275 goto out;
21276 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21277 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21278 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21279 + goto out;
21280 + }
21281 return 0;
21282
21283 out:
21284 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21285 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21286 struct kvm_interrupt *irq)
21287 {
21288 - if (irq->irq < 0 || irq->irq >= 256)
21289 + if (irq->irq >= 256)
21290 return -EINVAL;
21291 if (irqchip_in_kernel(vcpu->kvm))
21292 return -ENXIO;
21293 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21294 .notifier_call = kvmclock_cpufreq_notifier
21295 };
21296
21297 -int kvm_arch_init(void *opaque)
21298 +int kvm_arch_init(const void *opaque)
21299 {
21300 int r, cpu;
21301 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21302 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21303
21304 if (kvm_x86_ops) {
21305 printk(KERN_ERR "kvm: already loaded the other module\n");
21306 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21307 index 7e59dc1..b88c98f 100644
21308 --- a/arch/x86/lguest/boot.c
21309 +++ b/arch/x86/lguest/boot.c
21310 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21311 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21312 * Launcher to reboot us.
21313 */
21314 -static void lguest_restart(char *reason)
21315 +static __noreturn void lguest_restart(char *reason)
21316 {
21317 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21318 + BUG();
21319 }
21320
21321 /*G:050
21322 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21323 index 824fa0b..c619e96 100644
21324 --- a/arch/x86/lib/atomic64_32.c
21325 +++ b/arch/x86/lib/atomic64_32.c
21326 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21327 }
21328 EXPORT_SYMBOL(atomic64_cmpxchg);
21329
21330 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21331 +{
21332 + return cmpxchg8b(&ptr->counter, old_val, new_val);
21333 +}
21334 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21335 +
21336 /**
21337 * atomic64_xchg - xchg atomic64 variable
21338 * @ptr: pointer to type atomic64_t
21339 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21340 EXPORT_SYMBOL(atomic64_xchg);
21341
21342 /**
21343 + * atomic64_xchg_unchecked - xchg atomic64 variable
21344 + * @ptr: pointer to type atomic64_unchecked_t
21345 + * @new_val: value to assign
21346 + *
21347 + * Atomically xchgs the value of @ptr to @new_val and returns
21348 + * the old value.
21349 + */
21350 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21351 +{
21352 + /*
21353 + * Try first with a (possibly incorrect) assumption about
21354 + * what we have there. We'll do two loops most likely,
21355 + * but we'll get an ownership MESI transaction straight away
21356 + * instead of a read transaction followed by a
21357 + * flush-for-ownership transaction:
21358 + */
21359 + u64 old_val, real_val = 0;
21360 +
21361 + do {
21362 + old_val = real_val;
21363 +
21364 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21365 +
21366 + } while (real_val != old_val);
21367 +
21368 + return old_val;
21369 +}
21370 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
21371 +
21372 +/**
21373 * atomic64_set - set atomic64 variable
21374 * @ptr: pointer to type atomic64_t
21375 * @new_val: value to assign
21376 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21377 EXPORT_SYMBOL(atomic64_set);
21378
21379 /**
21380 -EXPORT_SYMBOL(atomic64_read);
21381 + * atomic64_unchecked_set - set atomic64 variable
21382 + * @ptr: pointer to type atomic64_unchecked_t
21383 + * @new_val: value to assign
21384 + *
21385 + * Atomically sets the value of @ptr to @new_val.
21386 + */
21387 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21388 +{
21389 + atomic64_xchg_unchecked(ptr, new_val);
21390 +}
21391 +EXPORT_SYMBOL(atomic64_set_unchecked);
21392 +
21393 +/**
21394 * atomic64_add_return - add and return
21395 * @delta: integer value to add
21396 * @ptr: pointer to type atomic64_t
21397 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21398 }
21399 EXPORT_SYMBOL(atomic64_add_return);
21400
21401 +/**
21402 + * atomic64_add_return_unchecked - add and return
21403 + * @delta: integer value to add
21404 + * @ptr: pointer to type atomic64_unchecked_t
21405 + *
21406 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
21407 + */
21408 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21409 +{
21410 + /*
21411 + * Try first with a (possibly incorrect) assumption about
21412 + * what we have there. We'll do two loops most likely,
21413 + * but we'll get an ownership MESI transaction straight away
21414 + * instead of a read transaction followed by a
21415 + * flush-for-ownership transaction:
21416 + */
21417 + u64 old_val, new_val, real_val = 0;
21418 +
21419 + do {
21420 + old_val = real_val;
21421 + new_val = old_val + delta;
21422 +
21423 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21424 +
21425 + } while (real_val != old_val);
21426 +
21427 + return new_val;
21428 +}
21429 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
21430 +
21431 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21432 {
21433 return atomic64_add_return(-delta, ptr);
21434 }
21435 EXPORT_SYMBOL(atomic64_sub_return);
21436
21437 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21438 +{
21439 + return atomic64_add_return_unchecked(-delta, ptr);
21440 +}
21441 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21442 +
21443 u64 atomic64_inc_return(atomic64_t *ptr)
21444 {
21445 return atomic64_add_return(1, ptr);
21446 }
21447 EXPORT_SYMBOL(atomic64_inc_return);
21448
21449 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21450 +{
21451 + return atomic64_add_return_unchecked(1, ptr);
21452 +}
21453 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21454 +
21455 u64 atomic64_dec_return(atomic64_t *ptr)
21456 {
21457 return atomic64_sub_return(1, ptr);
21458 }
21459 EXPORT_SYMBOL(atomic64_dec_return);
21460
21461 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21462 +{
21463 + return atomic64_sub_return_unchecked(1, ptr);
21464 +}
21465 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21466 +
21467 /**
21468 * atomic64_add - add integer to atomic64 variable
21469 * @delta: integer value to add
21470 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21471 EXPORT_SYMBOL(atomic64_add);
21472
21473 /**
21474 + * atomic64_add_unchecked - add integer to atomic64 variable
21475 + * @delta: integer value to add
21476 + * @ptr: pointer to type atomic64_unchecked_t
21477 + *
21478 + * Atomically adds @delta to @ptr.
21479 + */
21480 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21481 +{
21482 + atomic64_add_return_unchecked(delta, ptr);
21483 +}
21484 +EXPORT_SYMBOL(atomic64_add_unchecked);
21485 +
21486 +/**
21487 * atomic64_sub - subtract the atomic64 variable
21488 * @delta: integer value to subtract
21489 * @ptr: pointer to type atomic64_t
21490 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21491 EXPORT_SYMBOL(atomic64_sub);
21492
21493 /**
21494 + * atomic64_sub_unchecked - subtract the atomic64 variable
21495 + * @delta: integer value to subtract
21496 + * @ptr: pointer to type atomic64_unchecked_t
21497 + *
21498 + * Atomically subtracts @delta from @ptr.
21499 + */
21500 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21501 +{
21502 + atomic64_add_unchecked(-delta, ptr);
21503 +}
21504 +EXPORT_SYMBOL(atomic64_sub_unchecked);
21505 +
21506 +/**
21507 * atomic64_sub_and_test - subtract value from variable and test result
21508 * @delta: integer value to subtract
21509 * @ptr: pointer to type atomic64_t
21510 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21511 EXPORT_SYMBOL(atomic64_inc);
21512
21513 /**
21514 + * atomic64_inc_unchecked - increment atomic64 variable
21515 + * @ptr: pointer to type atomic64_unchecked_t
21516 + *
21517 + * Atomically increments @ptr by 1.
21518 + */
21519 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21520 +{
21521 + atomic64_add_unchecked(1, ptr);
21522 +}
21523 +EXPORT_SYMBOL(atomic64_inc_unchecked);
21524 +
21525 +/**
21526 * atomic64_dec - decrement atomic64 variable
21527 * @ptr: pointer to type atomic64_t
21528 *
21529 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21530 EXPORT_SYMBOL(atomic64_dec);
21531
21532 /**
21533 + * atomic64_dec_unchecked - decrement atomic64 variable
21534 + * @ptr: pointer to type atomic64_unchecked_t
21535 + *
21536 + * Atomically decrements @ptr by 1.
21537 + */
21538 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21539 +{
21540 + atomic64_sub_unchecked(1, ptr);
21541 +}
21542 +EXPORT_SYMBOL(atomic64_dec_unchecked);
21543 +
21544 +/**
21545 * atomic64_dec_and_test - decrement and test
21546 * @ptr: pointer to type atomic64_t
21547 *
21548 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21549 index adbccd0..98f96c8 100644
21550 --- a/arch/x86/lib/checksum_32.S
21551 +++ b/arch/x86/lib/checksum_32.S
21552 @@ -28,7 +28,8 @@
21553 #include <linux/linkage.h>
21554 #include <asm/dwarf2.h>
21555 #include <asm/errno.h>
21556 -
21557 +#include <asm/segment.h>
21558 +
21559 /*
21560 * computes a partial checksum, e.g. for TCP/UDP fragments
21561 */
21562 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21563
21564 #define ARGBASE 16
21565 #define FP 12
21566 -
21567 -ENTRY(csum_partial_copy_generic)
21568 +
21569 +ENTRY(csum_partial_copy_generic_to_user)
21570 CFI_STARTPROC
21571 +
21572 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21573 + pushl %gs
21574 + CFI_ADJUST_CFA_OFFSET 4
21575 + popl %es
21576 + CFI_ADJUST_CFA_OFFSET -4
21577 + jmp csum_partial_copy_generic
21578 +#endif
21579 +
21580 +ENTRY(csum_partial_copy_generic_from_user)
21581 +
21582 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21583 + pushl %gs
21584 + CFI_ADJUST_CFA_OFFSET 4
21585 + popl %ds
21586 + CFI_ADJUST_CFA_OFFSET -4
21587 +#endif
21588 +
21589 +ENTRY(csum_partial_copy_generic)
21590 subl $4,%esp
21591 CFI_ADJUST_CFA_OFFSET 4
21592 pushl %edi
21593 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21594 jmp 4f
21595 SRC(1: movw (%esi), %bx )
21596 addl $2, %esi
21597 -DST( movw %bx, (%edi) )
21598 +DST( movw %bx, %es:(%edi) )
21599 addl $2, %edi
21600 addw %bx, %ax
21601 adcl $0, %eax
21602 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21603 SRC(1: movl (%esi), %ebx )
21604 SRC( movl 4(%esi), %edx )
21605 adcl %ebx, %eax
21606 -DST( movl %ebx, (%edi) )
21607 +DST( movl %ebx, %es:(%edi) )
21608 adcl %edx, %eax
21609 -DST( movl %edx, 4(%edi) )
21610 +DST( movl %edx, %es:4(%edi) )
21611
21612 SRC( movl 8(%esi), %ebx )
21613 SRC( movl 12(%esi), %edx )
21614 adcl %ebx, %eax
21615 -DST( movl %ebx, 8(%edi) )
21616 +DST( movl %ebx, %es:8(%edi) )
21617 adcl %edx, %eax
21618 -DST( movl %edx, 12(%edi) )
21619 +DST( movl %edx, %es:12(%edi) )
21620
21621 SRC( movl 16(%esi), %ebx )
21622 SRC( movl 20(%esi), %edx )
21623 adcl %ebx, %eax
21624 -DST( movl %ebx, 16(%edi) )
21625 +DST( movl %ebx, %es:16(%edi) )
21626 adcl %edx, %eax
21627 -DST( movl %edx, 20(%edi) )
21628 +DST( movl %edx, %es:20(%edi) )
21629
21630 SRC( movl 24(%esi), %ebx )
21631 SRC( movl 28(%esi), %edx )
21632 adcl %ebx, %eax
21633 -DST( movl %ebx, 24(%edi) )
21634 +DST( movl %ebx, %es:24(%edi) )
21635 adcl %edx, %eax
21636 -DST( movl %edx, 28(%edi) )
21637 +DST( movl %edx, %es:28(%edi) )
21638
21639 lea 32(%esi), %esi
21640 lea 32(%edi), %edi
21641 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21642 shrl $2, %edx # This clears CF
21643 SRC(3: movl (%esi), %ebx )
21644 adcl %ebx, %eax
21645 -DST( movl %ebx, (%edi) )
21646 +DST( movl %ebx, %es:(%edi) )
21647 lea 4(%esi), %esi
21648 lea 4(%edi), %edi
21649 dec %edx
21650 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21651 jb 5f
21652 SRC( movw (%esi), %cx )
21653 leal 2(%esi), %esi
21654 -DST( movw %cx, (%edi) )
21655 +DST( movw %cx, %es:(%edi) )
21656 leal 2(%edi), %edi
21657 je 6f
21658 shll $16,%ecx
21659 SRC(5: movb (%esi), %cl )
21660 -DST( movb %cl, (%edi) )
21661 +DST( movb %cl, %es:(%edi) )
21662 6: addl %ecx, %eax
21663 adcl $0, %eax
21664 7:
21665 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21666
21667 6001:
21668 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21669 - movl $-EFAULT, (%ebx)
21670 + movl $-EFAULT, %ss:(%ebx)
21671
21672 # zero the complete destination - computing the rest
21673 # is too much work
21674 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21675
21676 6002:
21677 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21678 - movl $-EFAULT,(%ebx)
21679 + movl $-EFAULT,%ss:(%ebx)
21680 jmp 5000b
21681
21682 .previous
21683
21684 + pushl %ss
21685 + CFI_ADJUST_CFA_OFFSET 4
21686 + popl %ds
21687 + CFI_ADJUST_CFA_OFFSET -4
21688 + pushl %ss
21689 + CFI_ADJUST_CFA_OFFSET 4
21690 + popl %es
21691 + CFI_ADJUST_CFA_OFFSET -4
21692 popl %ebx
21693 CFI_ADJUST_CFA_OFFSET -4
21694 CFI_RESTORE ebx
21695 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21696 CFI_ADJUST_CFA_OFFSET -4
21697 ret
21698 CFI_ENDPROC
21699 -ENDPROC(csum_partial_copy_generic)
21700 +ENDPROC(csum_partial_copy_generic_to_user)
21701
21702 #else
21703
21704 /* Version for PentiumII/PPro */
21705
21706 #define ROUND1(x) \
21707 + nop; nop; nop; \
21708 SRC(movl x(%esi), %ebx ) ; \
21709 addl %ebx, %eax ; \
21710 - DST(movl %ebx, x(%edi) ) ;
21711 + DST(movl %ebx, %es:x(%edi)) ;
21712
21713 #define ROUND(x) \
21714 + nop; nop; nop; \
21715 SRC(movl x(%esi), %ebx ) ; \
21716 adcl %ebx, %eax ; \
21717 - DST(movl %ebx, x(%edi) ) ;
21718 + DST(movl %ebx, %es:x(%edi)) ;
21719
21720 #define ARGBASE 12
21721 -
21722 -ENTRY(csum_partial_copy_generic)
21723 +
21724 +ENTRY(csum_partial_copy_generic_to_user)
21725 CFI_STARTPROC
21726 +
21727 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21728 + pushl %gs
21729 + CFI_ADJUST_CFA_OFFSET 4
21730 + popl %es
21731 + CFI_ADJUST_CFA_OFFSET -4
21732 + jmp csum_partial_copy_generic
21733 +#endif
21734 +
21735 +ENTRY(csum_partial_copy_generic_from_user)
21736 +
21737 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21738 + pushl %gs
21739 + CFI_ADJUST_CFA_OFFSET 4
21740 + popl %ds
21741 + CFI_ADJUST_CFA_OFFSET -4
21742 +#endif
21743 +
21744 +ENTRY(csum_partial_copy_generic)
21745 pushl %ebx
21746 CFI_ADJUST_CFA_OFFSET 4
21747 CFI_REL_OFFSET ebx, 0
21748 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21749 subl %ebx, %edi
21750 lea -1(%esi),%edx
21751 andl $-32,%edx
21752 - lea 3f(%ebx,%ebx), %ebx
21753 + lea 3f(%ebx,%ebx,2), %ebx
21754 testl %esi, %esi
21755 jmp *%ebx
21756 1: addl $64,%esi
21757 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
21758 jb 5f
21759 SRC( movw (%esi), %dx )
21760 leal 2(%esi), %esi
21761 -DST( movw %dx, (%edi) )
21762 +DST( movw %dx, %es:(%edi) )
21763 leal 2(%edi), %edi
21764 je 6f
21765 shll $16,%edx
21766 5:
21767 SRC( movb (%esi), %dl )
21768 -DST( movb %dl, (%edi) )
21769 +DST( movb %dl, %es:(%edi) )
21770 6: addl %edx, %eax
21771 adcl $0, %eax
21772 7:
21773 .section .fixup, "ax"
21774 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21775 - movl $-EFAULT, (%ebx)
21776 + movl $-EFAULT, %ss:(%ebx)
21777 # zero the complete destination (computing the rest is too much work)
21778 movl ARGBASE+8(%esp),%edi # dst
21779 movl ARGBASE+12(%esp),%ecx # len
21780 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
21781 rep; stosb
21782 jmp 7b
21783 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21784 - movl $-EFAULT, (%ebx)
21785 + movl $-EFAULT, %ss:(%ebx)
21786 jmp 7b
21787 .previous
21788
21789 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21790 + pushl %ss
21791 + CFI_ADJUST_CFA_OFFSET 4
21792 + popl %ds
21793 + CFI_ADJUST_CFA_OFFSET -4
21794 + pushl %ss
21795 + CFI_ADJUST_CFA_OFFSET 4
21796 + popl %es
21797 + CFI_ADJUST_CFA_OFFSET -4
21798 +#endif
21799 +
21800 popl %esi
21801 CFI_ADJUST_CFA_OFFSET -4
21802 CFI_RESTORE esi
21803 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
21804 CFI_RESTORE ebx
21805 ret
21806 CFI_ENDPROC
21807 -ENDPROC(csum_partial_copy_generic)
21808 +ENDPROC(csum_partial_copy_generic_to_user)
21809
21810 #undef ROUND
21811 #undef ROUND1
21812 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21813 index ebeafcc..1e3a402 100644
21814 --- a/arch/x86/lib/clear_page_64.S
21815 +++ b/arch/x86/lib/clear_page_64.S
21816 @@ -1,5 +1,6 @@
21817 #include <linux/linkage.h>
21818 #include <asm/dwarf2.h>
21819 +#include <asm/alternative-asm.h>
21820
21821 /*
21822 * Zero a page.
21823 @@ -10,6 +11,7 @@ ENTRY(clear_page_c)
21824 movl $4096/8,%ecx
21825 xorl %eax,%eax
21826 rep stosq
21827 + pax_force_retaddr
21828 ret
21829 CFI_ENDPROC
21830 ENDPROC(clear_page_c)
21831 @@ -33,6 +35,7 @@ ENTRY(clear_page)
21832 leaq 64(%rdi),%rdi
21833 jnz .Lloop
21834 nop
21835 + pax_force_retaddr
21836 ret
21837 CFI_ENDPROC
21838 .Lclear_page_end:
21839 @@ -43,7 +46,7 @@ ENDPROC(clear_page)
21840
21841 #include <asm/cpufeature.h>
21842
21843 - .section .altinstr_replacement,"ax"
21844 + .section .altinstr_replacement,"a"
21845 1: .byte 0xeb /* jmp <disp8> */
21846 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21847 2:
21848 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21849 index 727a5d4..333818a 100644
21850 --- a/arch/x86/lib/copy_page_64.S
21851 +++ b/arch/x86/lib/copy_page_64.S
21852 @@ -2,12 +2,14 @@
21853
21854 #include <linux/linkage.h>
21855 #include <asm/dwarf2.h>
21856 +#include <asm/alternative-asm.h>
21857
21858 ALIGN
21859 copy_page_c:
21860 CFI_STARTPROC
21861 movl $4096/8,%ecx
21862 rep movsq
21863 + pax_force_retaddr
21864 ret
21865 CFI_ENDPROC
21866 ENDPROC(copy_page_c)
21867 @@ -38,7 +40,7 @@ ENTRY(copy_page)
21868 movq 16 (%rsi), %rdx
21869 movq 24 (%rsi), %r8
21870 movq 32 (%rsi), %r9
21871 - movq 40 (%rsi), %r10
21872 + movq 40 (%rsi), %r13
21873 movq 48 (%rsi), %r11
21874 movq 56 (%rsi), %r12
21875
21876 @@ -49,7 +51,7 @@ ENTRY(copy_page)
21877 movq %rdx, 16 (%rdi)
21878 movq %r8, 24 (%rdi)
21879 movq %r9, 32 (%rdi)
21880 - movq %r10, 40 (%rdi)
21881 + movq %r13, 40 (%rdi)
21882 movq %r11, 48 (%rdi)
21883 movq %r12, 56 (%rdi)
21884
21885 @@ -68,7 +70,7 @@ ENTRY(copy_page)
21886 movq 16 (%rsi), %rdx
21887 movq 24 (%rsi), %r8
21888 movq 32 (%rsi), %r9
21889 - movq 40 (%rsi), %r10
21890 + movq 40 (%rsi), %r13
21891 movq 48 (%rsi), %r11
21892 movq 56 (%rsi), %r12
21893
21894 @@ -77,7 +79,7 @@ ENTRY(copy_page)
21895 movq %rdx, 16 (%rdi)
21896 movq %r8, 24 (%rdi)
21897 movq %r9, 32 (%rdi)
21898 - movq %r10, 40 (%rdi)
21899 + movq %r13, 40 (%rdi)
21900 movq %r11, 48 (%rdi)
21901 movq %r12, 56 (%rdi)
21902
21903 @@ -94,6 +96,7 @@ ENTRY(copy_page)
21904 CFI_RESTORE r13
21905 addq $3*8,%rsp
21906 CFI_ADJUST_CFA_OFFSET -3*8
21907 + pax_force_retaddr
21908 ret
21909 .Lcopy_page_end:
21910 CFI_ENDPROC
21911 @@ -104,7 +107,7 @@ ENDPROC(copy_page)
21912
21913 #include <asm/cpufeature.h>
21914
21915 - .section .altinstr_replacement,"ax"
21916 + .section .altinstr_replacement,"a"
21917 1: .byte 0xeb /* jmp <disp8> */
21918 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21919 2:
21920 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21921 index af8debd..40c75f3 100644
21922 --- a/arch/x86/lib/copy_user_64.S
21923 +++ b/arch/x86/lib/copy_user_64.S
21924 @@ -15,13 +15,15 @@
21925 #include <asm/asm-offsets.h>
21926 #include <asm/thread_info.h>
21927 #include <asm/cpufeature.h>
21928 +#include <asm/pgtable.h>
21929 +#include <asm/alternative-asm.h>
21930
21931 .macro ALTERNATIVE_JUMP feature,orig,alt
21932 0:
21933 .byte 0xe9 /* 32bit jump */
21934 .long \orig-1f /* by default jump to orig */
21935 1:
21936 - .section .altinstr_replacement,"ax"
21937 + .section .altinstr_replacement,"a"
21938 2: .byte 0xe9 /* near jump with 32bit immediate */
21939 .long \alt-1b /* offset */ /* or alternatively to alt */
21940 .previous
21941 @@ -64,55 +66,26 @@
21942 #endif
21943 .endm
21944
21945 -/* Standard copy_to_user with segment limit checking */
21946 -ENTRY(copy_to_user)
21947 - CFI_STARTPROC
21948 - GET_THREAD_INFO(%rax)
21949 - movq %rdi,%rcx
21950 - addq %rdx,%rcx
21951 - jc bad_to_user
21952 - cmpq TI_addr_limit(%rax),%rcx
21953 - ja bad_to_user
21954 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21955 - CFI_ENDPROC
21956 -ENDPROC(copy_to_user)
21957 -
21958 -/* Standard copy_from_user with segment limit checking */
21959 -ENTRY(copy_from_user)
21960 - CFI_STARTPROC
21961 - GET_THREAD_INFO(%rax)
21962 - movq %rsi,%rcx
21963 - addq %rdx,%rcx
21964 - jc bad_from_user
21965 - cmpq TI_addr_limit(%rax),%rcx
21966 - ja bad_from_user
21967 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21968 - CFI_ENDPROC
21969 -ENDPROC(copy_from_user)
21970 -
21971 ENTRY(copy_user_generic)
21972 CFI_STARTPROC
21973 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21974 CFI_ENDPROC
21975 ENDPROC(copy_user_generic)
21976
21977 -ENTRY(__copy_from_user_inatomic)
21978 - CFI_STARTPROC
21979 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21980 - CFI_ENDPROC
21981 -ENDPROC(__copy_from_user_inatomic)
21982 -
21983 .section .fixup,"ax"
21984 /* must zero dest */
21985 ENTRY(bad_from_user)
21986 bad_from_user:
21987 CFI_STARTPROC
21988 + testl %edx,%edx
21989 + js bad_to_user
21990 movl %edx,%ecx
21991 xorl %eax,%eax
21992 rep
21993 stosb
21994 bad_to_user:
21995 movl %edx,%eax
21996 + pax_force_retaddr
21997 ret
21998 CFI_ENDPROC
21999 ENDPROC(bad_from_user)
22000 @@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22001 jz 17f
22002 1: movq (%rsi),%r8
22003 2: movq 1*8(%rsi),%r9
22004 -3: movq 2*8(%rsi),%r10
22005 +3: movq 2*8(%rsi),%rax
22006 4: movq 3*8(%rsi),%r11
22007 5: movq %r8,(%rdi)
22008 6: movq %r9,1*8(%rdi)
22009 -7: movq %r10,2*8(%rdi)
22010 +7: movq %rax,2*8(%rdi)
22011 8: movq %r11,3*8(%rdi)
22012 9: movq 4*8(%rsi),%r8
22013 10: movq 5*8(%rsi),%r9
22014 -11: movq 6*8(%rsi),%r10
22015 +11: movq 6*8(%rsi),%rax
22016 12: movq 7*8(%rsi),%r11
22017 13: movq %r8,4*8(%rdi)
22018 14: movq %r9,5*8(%rdi)
22019 -15: movq %r10,6*8(%rdi)
22020 +15: movq %rax,6*8(%rdi)
22021 16: movq %r11,7*8(%rdi)
22022 leaq 64(%rsi),%rsi
22023 leaq 64(%rdi),%rdi
22024 @@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22025 decl %ecx
22026 jnz 21b
22027 23: xor %eax,%eax
22028 + pax_force_retaddr
22029 ret
22030
22031 .section .fixup,"ax"
22032 @@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22033 3: rep
22034 movsb
22035 4: xorl %eax,%eax
22036 + pax_force_retaddr
22037 ret
22038
22039 .section .fixup,"ax"
22040 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22041 index cb0c112..e3a6895 100644
22042 --- a/arch/x86/lib/copy_user_nocache_64.S
22043 +++ b/arch/x86/lib/copy_user_nocache_64.S
22044 @@ -8,12 +8,14 @@
22045
22046 #include <linux/linkage.h>
22047 #include <asm/dwarf2.h>
22048 +#include <asm/alternative-asm.h>
22049
22050 #define FIX_ALIGNMENT 1
22051
22052 #include <asm/current.h>
22053 #include <asm/asm-offsets.h>
22054 #include <asm/thread_info.h>
22055 +#include <asm/pgtable.h>
22056
22057 .macro ALIGN_DESTINATION
22058 #ifdef FIX_ALIGNMENT
22059 @@ -50,6 +52,15 @@
22060 */
22061 ENTRY(__copy_user_nocache)
22062 CFI_STARTPROC
22063 +
22064 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22065 + mov $PAX_USER_SHADOW_BASE,%rcx
22066 + cmp %rcx,%rsi
22067 + jae 1f
22068 + add %rcx,%rsi
22069 +1:
22070 +#endif
22071 +
22072 cmpl $8,%edx
22073 jb 20f /* less then 8 bytes, go to byte copy loop */
22074 ALIGN_DESTINATION
22075 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22076 jz 17f
22077 1: movq (%rsi),%r8
22078 2: movq 1*8(%rsi),%r9
22079 -3: movq 2*8(%rsi),%r10
22080 +3: movq 2*8(%rsi),%rax
22081 4: movq 3*8(%rsi),%r11
22082 5: movnti %r8,(%rdi)
22083 6: movnti %r9,1*8(%rdi)
22084 -7: movnti %r10,2*8(%rdi)
22085 +7: movnti %rax,2*8(%rdi)
22086 8: movnti %r11,3*8(%rdi)
22087 9: movq 4*8(%rsi),%r8
22088 10: movq 5*8(%rsi),%r9
22089 -11: movq 6*8(%rsi),%r10
22090 +11: movq 6*8(%rsi),%rax
22091 12: movq 7*8(%rsi),%r11
22092 13: movnti %r8,4*8(%rdi)
22093 14: movnti %r9,5*8(%rdi)
22094 -15: movnti %r10,6*8(%rdi)
22095 +15: movnti %rax,6*8(%rdi)
22096 16: movnti %r11,7*8(%rdi)
22097 leaq 64(%rsi),%rsi
22098 leaq 64(%rdi),%rdi
22099 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22100 jnz 21b
22101 23: xorl %eax,%eax
22102 sfence
22103 + pax_force_retaddr
22104 ret
22105
22106 .section .fixup,"ax"
22107 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22108 index f0dba36..48cb4d6 100644
22109 --- a/arch/x86/lib/csum-copy_64.S
22110 +++ b/arch/x86/lib/csum-copy_64.S
22111 @@ -8,6 +8,7 @@
22112 #include <linux/linkage.h>
22113 #include <asm/dwarf2.h>
22114 #include <asm/errno.h>
22115 +#include <asm/alternative-asm.h>
22116
22117 /*
22118 * Checksum copy with exception handling.
22119 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22120 CFI_RESTORE rbp
22121 addq $7*8,%rsp
22122 CFI_ADJUST_CFA_OFFSET -7*8
22123 + pax_force_retaddr 0, 1
22124 ret
22125 CFI_RESTORE_STATE
22126
22127 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22128 index 459b58a..9570bc7 100644
22129 --- a/arch/x86/lib/csum-wrappers_64.c
22130 +++ b/arch/x86/lib/csum-wrappers_64.c
22131 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22132 len -= 2;
22133 }
22134 }
22135 - isum = csum_partial_copy_generic((__force const void *)src,
22136 +
22137 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22138 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22139 + src += PAX_USER_SHADOW_BASE;
22140 +#endif
22141 +
22142 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
22143 dst, len, isum, errp, NULL);
22144 if (unlikely(*errp))
22145 goto out_err;
22146 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22147 }
22148
22149 *errp = 0;
22150 - return csum_partial_copy_generic(src, (void __force *)dst,
22151 +
22152 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22153 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22154 + dst += PAX_USER_SHADOW_BASE;
22155 +#endif
22156 +
22157 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22158 len, isum, NULL, errp);
22159 }
22160 EXPORT_SYMBOL(csum_partial_copy_to_user);
22161 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22162 index 51f1504..ddac4c1 100644
22163 --- a/arch/x86/lib/getuser.S
22164 +++ b/arch/x86/lib/getuser.S
22165 @@ -33,15 +33,38 @@
22166 #include <asm/asm-offsets.h>
22167 #include <asm/thread_info.h>
22168 #include <asm/asm.h>
22169 +#include <asm/segment.h>
22170 +#include <asm/pgtable.h>
22171 +#include <asm/alternative-asm.h>
22172 +
22173 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22174 +#define __copyuser_seg gs;
22175 +#else
22176 +#define __copyuser_seg
22177 +#endif
22178
22179 .text
22180 ENTRY(__get_user_1)
22181 CFI_STARTPROC
22182 +
22183 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22184 GET_THREAD_INFO(%_ASM_DX)
22185 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22186 jae bad_get_user
22187 -1: movzb (%_ASM_AX),%edx
22188 +
22189 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22190 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22191 + cmp %_ASM_DX,%_ASM_AX
22192 + jae 1234f
22193 + add %_ASM_DX,%_ASM_AX
22194 +1234:
22195 +#endif
22196 +
22197 +#endif
22198 +
22199 +1: __copyuser_seg movzb (%_ASM_AX),%edx
22200 xor %eax,%eax
22201 + pax_force_retaddr
22202 ret
22203 CFI_ENDPROC
22204 ENDPROC(__get_user_1)
22205 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22206 ENTRY(__get_user_2)
22207 CFI_STARTPROC
22208 add $1,%_ASM_AX
22209 +
22210 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22211 jc bad_get_user
22212 GET_THREAD_INFO(%_ASM_DX)
22213 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22214 jae bad_get_user
22215 -2: movzwl -1(%_ASM_AX),%edx
22216 +
22217 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22218 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22219 + cmp %_ASM_DX,%_ASM_AX
22220 + jae 1234f
22221 + add %_ASM_DX,%_ASM_AX
22222 +1234:
22223 +#endif
22224 +
22225 +#endif
22226 +
22227 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22228 xor %eax,%eax
22229 + pax_force_retaddr
22230 ret
22231 CFI_ENDPROC
22232 ENDPROC(__get_user_2)
22233 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22234 ENTRY(__get_user_4)
22235 CFI_STARTPROC
22236 add $3,%_ASM_AX
22237 +
22238 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22239 jc bad_get_user
22240 GET_THREAD_INFO(%_ASM_DX)
22241 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22242 jae bad_get_user
22243 -3: mov -3(%_ASM_AX),%edx
22244 +
22245 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22246 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22247 + cmp %_ASM_DX,%_ASM_AX
22248 + jae 1234f
22249 + add %_ASM_DX,%_ASM_AX
22250 +1234:
22251 +#endif
22252 +
22253 +#endif
22254 +
22255 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
22256 xor %eax,%eax
22257 + pax_force_retaddr
22258 ret
22259 CFI_ENDPROC
22260 ENDPROC(__get_user_4)
22261 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22262 GET_THREAD_INFO(%_ASM_DX)
22263 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22264 jae bad_get_user
22265 +
22266 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22267 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22268 + cmp %_ASM_DX,%_ASM_AX
22269 + jae 1234f
22270 + add %_ASM_DX,%_ASM_AX
22271 +1234:
22272 +#endif
22273 +
22274 4: movq -7(%_ASM_AX),%_ASM_DX
22275 xor %eax,%eax
22276 + pax_force_retaddr
22277 ret
22278 CFI_ENDPROC
22279 ENDPROC(__get_user_8)
22280 @@ -91,6 +152,7 @@ bad_get_user:
22281 CFI_STARTPROC
22282 xor %edx,%edx
22283 mov $(-EFAULT),%_ASM_AX
22284 + pax_force_retaddr
22285 ret
22286 CFI_ENDPROC
22287 END(bad_get_user)
22288 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22289 index 05a95e7..326f2fa 100644
22290 --- a/arch/x86/lib/iomap_copy_64.S
22291 +++ b/arch/x86/lib/iomap_copy_64.S
22292 @@ -17,6 +17,7 @@
22293
22294 #include <linux/linkage.h>
22295 #include <asm/dwarf2.h>
22296 +#include <asm/alternative-asm.h>
22297
22298 /*
22299 * override generic version in lib/iomap_copy.c
22300 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22301 CFI_STARTPROC
22302 movl %edx,%ecx
22303 rep movsd
22304 + pax_force_retaddr
22305 ret
22306 CFI_ENDPROC
22307 ENDPROC(__iowrite32_copy)
22308 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22309 index ad5441e..610e351 100644
22310 --- a/arch/x86/lib/memcpy_64.S
22311 +++ b/arch/x86/lib/memcpy_64.S
22312 @@ -4,6 +4,7 @@
22313
22314 #include <asm/cpufeature.h>
22315 #include <asm/dwarf2.h>
22316 +#include <asm/alternative-asm.h>
22317
22318 /*
22319 * memcpy - Copy a memory block.
22320 @@ -34,6 +35,7 @@ memcpy_c:
22321 rep movsq
22322 movl %edx, %ecx
22323 rep movsb
22324 + pax_force_retaddr
22325 ret
22326 CFI_ENDPROC
22327 ENDPROC(memcpy_c)
22328 @@ -118,6 +120,7 @@ ENTRY(memcpy)
22329 jnz .Lloop_1
22330
22331 .Lend:
22332 + pax_force_retaddr 0, 1
22333 ret
22334 CFI_ENDPROC
22335 ENDPROC(memcpy)
22336 @@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22337 * It is also a lot simpler. Use this when possible:
22338 */
22339
22340 - .section .altinstr_replacement, "ax"
22341 + .section .altinstr_replacement, "a"
22342 1: .byte 0xeb /* jmp <disp8> */
22343 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22344 2:
22345 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22346 index 2c59481..7e9ba4e 100644
22347 --- a/arch/x86/lib/memset_64.S
22348 +++ b/arch/x86/lib/memset_64.S
22349 @@ -2,6 +2,7 @@
22350
22351 #include <linux/linkage.h>
22352 #include <asm/dwarf2.h>
22353 +#include <asm/alternative-asm.h>
22354
22355 /*
22356 * ISO C memset - set a memory block to a byte value.
22357 @@ -28,6 +29,7 @@ memset_c:
22358 movl %r8d,%ecx
22359 rep stosb
22360 movq %r9,%rax
22361 + pax_force_retaddr
22362 ret
22363 CFI_ENDPROC
22364 ENDPROC(memset_c)
22365 @@ -35,13 +37,13 @@ ENDPROC(memset_c)
22366 ENTRY(memset)
22367 ENTRY(__memset)
22368 CFI_STARTPROC
22369 - movq %rdi,%r10
22370 movq %rdx,%r11
22371
22372 /* expand byte value */
22373 movzbl %sil,%ecx
22374 movabs $0x0101010101010101,%rax
22375 mul %rcx /* with rax, clobbers rdx */
22376 + movq %rdi,%rdx
22377
22378 /* align dst */
22379 movl %edi,%r9d
22380 @@ -95,7 +97,8 @@ ENTRY(__memset)
22381 jnz .Lloop_1
22382
22383 .Lende:
22384 - movq %r10,%rax
22385 + movq %rdx,%rax
22386 + pax_force_retaddr
22387 ret
22388
22389 CFI_RESTORE_STATE
22390 @@ -118,7 +121,7 @@ ENDPROC(__memset)
22391
22392 #include <asm/cpufeature.h>
22393
22394 - .section .altinstr_replacement,"ax"
22395 + .section .altinstr_replacement,"a"
22396 1: .byte 0xeb /* jmp <disp8> */
22397 .byte (memset_c - memset) - (2f - 1b) /* offset */
22398 2:
22399 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22400 index c9f2d9b..e7fd2c0 100644
22401 --- a/arch/x86/lib/mmx_32.c
22402 +++ b/arch/x86/lib/mmx_32.c
22403 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22404 {
22405 void *p;
22406 int i;
22407 + unsigned long cr0;
22408
22409 if (unlikely(in_interrupt()))
22410 return __memcpy(to, from, len);
22411 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22412 kernel_fpu_begin();
22413
22414 __asm__ __volatile__ (
22415 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22416 - " prefetch 64(%0)\n"
22417 - " prefetch 128(%0)\n"
22418 - " prefetch 192(%0)\n"
22419 - " prefetch 256(%0)\n"
22420 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22421 + " prefetch 64(%1)\n"
22422 + " prefetch 128(%1)\n"
22423 + " prefetch 192(%1)\n"
22424 + " prefetch 256(%1)\n"
22425 "2: \n"
22426 ".section .fixup, \"ax\"\n"
22427 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22428 + "3: \n"
22429 +
22430 +#ifdef CONFIG_PAX_KERNEXEC
22431 + " movl %%cr0, %0\n"
22432 + " movl %0, %%eax\n"
22433 + " andl $0xFFFEFFFF, %%eax\n"
22434 + " movl %%eax, %%cr0\n"
22435 +#endif
22436 +
22437 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22438 +
22439 +#ifdef CONFIG_PAX_KERNEXEC
22440 + " movl %0, %%cr0\n"
22441 +#endif
22442 +
22443 " jmp 2b\n"
22444 ".previous\n"
22445 _ASM_EXTABLE(1b, 3b)
22446 - : : "r" (from));
22447 + : "=&r" (cr0) : "r" (from) : "ax");
22448
22449 for ( ; i > 5; i--) {
22450 __asm__ __volatile__ (
22451 - "1: prefetch 320(%0)\n"
22452 - "2: movq (%0), %%mm0\n"
22453 - " movq 8(%0), %%mm1\n"
22454 - " movq 16(%0), %%mm2\n"
22455 - " movq 24(%0), %%mm3\n"
22456 - " movq %%mm0, (%1)\n"
22457 - " movq %%mm1, 8(%1)\n"
22458 - " movq %%mm2, 16(%1)\n"
22459 - " movq %%mm3, 24(%1)\n"
22460 - " movq 32(%0), %%mm0\n"
22461 - " movq 40(%0), %%mm1\n"
22462 - " movq 48(%0), %%mm2\n"
22463 - " movq 56(%0), %%mm3\n"
22464 - " movq %%mm0, 32(%1)\n"
22465 - " movq %%mm1, 40(%1)\n"
22466 - " movq %%mm2, 48(%1)\n"
22467 - " movq %%mm3, 56(%1)\n"
22468 + "1: prefetch 320(%1)\n"
22469 + "2: movq (%1), %%mm0\n"
22470 + " movq 8(%1), %%mm1\n"
22471 + " movq 16(%1), %%mm2\n"
22472 + " movq 24(%1), %%mm3\n"
22473 + " movq %%mm0, (%2)\n"
22474 + " movq %%mm1, 8(%2)\n"
22475 + " movq %%mm2, 16(%2)\n"
22476 + " movq %%mm3, 24(%2)\n"
22477 + " movq 32(%1), %%mm0\n"
22478 + " movq 40(%1), %%mm1\n"
22479 + " movq 48(%1), %%mm2\n"
22480 + " movq 56(%1), %%mm3\n"
22481 + " movq %%mm0, 32(%2)\n"
22482 + " movq %%mm1, 40(%2)\n"
22483 + " movq %%mm2, 48(%2)\n"
22484 + " movq %%mm3, 56(%2)\n"
22485 ".section .fixup, \"ax\"\n"
22486 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22487 + "3:\n"
22488 +
22489 +#ifdef CONFIG_PAX_KERNEXEC
22490 + " movl %%cr0, %0\n"
22491 + " movl %0, %%eax\n"
22492 + " andl $0xFFFEFFFF, %%eax\n"
22493 + " movl %%eax, %%cr0\n"
22494 +#endif
22495 +
22496 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22497 +
22498 +#ifdef CONFIG_PAX_KERNEXEC
22499 + " movl %0, %%cr0\n"
22500 +#endif
22501 +
22502 " jmp 2b\n"
22503 ".previous\n"
22504 _ASM_EXTABLE(1b, 3b)
22505 - : : "r" (from), "r" (to) : "memory");
22506 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22507
22508 from += 64;
22509 to += 64;
22510 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22511 static void fast_copy_page(void *to, void *from)
22512 {
22513 int i;
22514 + unsigned long cr0;
22515
22516 kernel_fpu_begin();
22517
22518 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22519 * but that is for later. -AV
22520 */
22521 __asm__ __volatile__(
22522 - "1: prefetch (%0)\n"
22523 - " prefetch 64(%0)\n"
22524 - " prefetch 128(%0)\n"
22525 - " prefetch 192(%0)\n"
22526 - " prefetch 256(%0)\n"
22527 + "1: prefetch (%1)\n"
22528 + " prefetch 64(%1)\n"
22529 + " prefetch 128(%1)\n"
22530 + " prefetch 192(%1)\n"
22531 + " prefetch 256(%1)\n"
22532 "2: \n"
22533 ".section .fixup, \"ax\"\n"
22534 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22535 + "3: \n"
22536 +
22537 +#ifdef CONFIG_PAX_KERNEXEC
22538 + " movl %%cr0, %0\n"
22539 + " movl %0, %%eax\n"
22540 + " andl $0xFFFEFFFF, %%eax\n"
22541 + " movl %%eax, %%cr0\n"
22542 +#endif
22543 +
22544 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22545 +
22546 +#ifdef CONFIG_PAX_KERNEXEC
22547 + " movl %0, %%cr0\n"
22548 +#endif
22549 +
22550 " jmp 2b\n"
22551 ".previous\n"
22552 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22553 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22554
22555 for (i = 0; i < (4096-320)/64; i++) {
22556 __asm__ __volatile__ (
22557 - "1: prefetch 320(%0)\n"
22558 - "2: movq (%0), %%mm0\n"
22559 - " movntq %%mm0, (%1)\n"
22560 - " movq 8(%0), %%mm1\n"
22561 - " movntq %%mm1, 8(%1)\n"
22562 - " movq 16(%0), %%mm2\n"
22563 - " movntq %%mm2, 16(%1)\n"
22564 - " movq 24(%0), %%mm3\n"
22565 - " movntq %%mm3, 24(%1)\n"
22566 - " movq 32(%0), %%mm4\n"
22567 - " movntq %%mm4, 32(%1)\n"
22568 - " movq 40(%0), %%mm5\n"
22569 - " movntq %%mm5, 40(%1)\n"
22570 - " movq 48(%0), %%mm6\n"
22571 - " movntq %%mm6, 48(%1)\n"
22572 - " movq 56(%0), %%mm7\n"
22573 - " movntq %%mm7, 56(%1)\n"
22574 + "1: prefetch 320(%1)\n"
22575 + "2: movq (%1), %%mm0\n"
22576 + " movntq %%mm0, (%2)\n"
22577 + " movq 8(%1), %%mm1\n"
22578 + " movntq %%mm1, 8(%2)\n"
22579 + " movq 16(%1), %%mm2\n"
22580 + " movntq %%mm2, 16(%2)\n"
22581 + " movq 24(%1), %%mm3\n"
22582 + " movntq %%mm3, 24(%2)\n"
22583 + " movq 32(%1), %%mm4\n"
22584 + " movntq %%mm4, 32(%2)\n"
22585 + " movq 40(%1), %%mm5\n"
22586 + " movntq %%mm5, 40(%2)\n"
22587 + " movq 48(%1), %%mm6\n"
22588 + " movntq %%mm6, 48(%2)\n"
22589 + " movq 56(%1), %%mm7\n"
22590 + " movntq %%mm7, 56(%2)\n"
22591 ".section .fixup, \"ax\"\n"
22592 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22593 + "3:\n"
22594 +
22595 +#ifdef CONFIG_PAX_KERNEXEC
22596 + " movl %%cr0, %0\n"
22597 + " movl %0, %%eax\n"
22598 + " andl $0xFFFEFFFF, %%eax\n"
22599 + " movl %%eax, %%cr0\n"
22600 +#endif
22601 +
22602 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22603 +
22604 +#ifdef CONFIG_PAX_KERNEXEC
22605 + " movl %0, %%cr0\n"
22606 +#endif
22607 +
22608 " jmp 2b\n"
22609 ".previous\n"
22610 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22611 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22612
22613 from += 64;
22614 to += 64;
22615 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22616 static void fast_copy_page(void *to, void *from)
22617 {
22618 int i;
22619 + unsigned long cr0;
22620
22621 kernel_fpu_begin();
22622
22623 __asm__ __volatile__ (
22624 - "1: prefetch (%0)\n"
22625 - " prefetch 64(%0)\n"
22626 - " prefetch 128(%0)\n"
22627 - " prefetch 192(%0)\n"
22628 - " prefetch 256(%0)\n"
22629 + "1: prefetch (%1)\n"
22630 + " prefetch 64(%1)\n"
22631 + " prefetch 128(%1)\n"
22632 + " prefetch 192(%1)\n"
22633 + " prefetch 256(%1)\n"
22634 "2: \n"
22635 ".section .fixup, \"ax\"\n"
22636 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22637 + "3: \n"
22638 +
22639 +#ifdef CONFIG_PAX_KERNEXEC
22640 + " movl %%cr0, %0\n"
22641 + " movl %0, %%eax\n"
22642 + " andl $0xFFFEFFFF, %%eax\n"
22643 + " movl %%eax, %%cr0\n"
22644 +#endif
22645 +
22646 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22647 +
22648 +#ifdef CONFIG_PAX_KERNEXEC
22649 + " movl %0, %%cr0\n"
22650 +#endif
22651 +
22652 " jmp 2b\n"
22653 ".previous\n"
22654 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22655 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22656
22657 for (i = 0; i < 4096/64; i++) {
22658 __asm__ __volatile__ (
22659 - "1: prefetch 320(%0)\n"
22660 - "2: movq (%0), %%mm0\n"
22661 - " movq 8(%0), %%mm1\n"
22662 - " movq 16(%0), %%mm2\n"
22663 - " movq 24(%0), %%mm3\n"
22664 - " movq %%mm0, (%1)\n"
22665 - " movq %%mm1, 8(%1)\n"
22666 - " movq %%mm2, 16(%1)\n"
22667 - " movq %%mm3, 24(%1)\n"
22668 - " movq 32(%0), %%mm0\n"
22669 - " movq 40(%0), %%mm1\n"
22670 - " movq 48(%0), %%mm2\n"
22671 - " movq 56(%0), %%mm3\n"
22672 - " movq %%mm0, 32(%1)\n"
22673 - " movq %%mm1, 40(%1)\n"
22674 - " movq %%mm2, 48(%1)\n"
22675 - " movq %%mm3, 56(%1)\n"
22676 + "1: prefetch 320(%1)\n"
22677 + "2: movq (%1), %%mm0\n"
22678 + " movq 8(%1), %%mm1\n"
22679 + " movq 16(%1), %%mm2\n"
22680 + " movq 24(%1), %%mm3\n"
22681 + " movq %%mm0, (%2)\n"
22682 + " movq %%mm1, 8(%2)\n"
22683 + " movq %%mm2, 16(%2)\n"
22684 + " movq %%mm3, 24(%2)\n"
22685 + " movq 32(%1), %%mm0\n"
22686 + " movq 40(%1), %%mm1\n"
22687 + " movq 48(%1), %%mm2\n"
22688 + " movq 56(%1), %%mm3\n"
22689 + " movq %%mm0, 32(%2)\n"
22690 + " movq %%mm1, 40(%2)\n"
22691 + " movq %%mm2, 48(%2)\n"
22692 + " movq %%mm3, 56(%2)\n"
22693 ".section .fixup, \"ax\"\n"
22694 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22695 + "3:\n"
22696 +
22697 +#ifdef CONFIG_PAX_KERNEXEC
22698 + " movl %%cr0, %0\n"
22699 + " movl %0, %%eax\n"
22700 + " andl $0xFFFEFFFF, %%eax\n"
22701 + " movl %%eax, %%cr0\n"
22702 +#endif
22703 +
22704 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22705 +
22706 +#ifdef CONFIG_PAX_KERNEXEC
22707 + " movl %0, %%cr0\n"
22708 +#endif
22709 +
22710 " jmp 2b\n"
22711 ".previous\n"
22712 _ASM_EXTABLE(1b, 3b)
22713 - : : "r" (from), "r" (to) : "memory");
22714 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22715
22716 from += 64;
22717 to += 64;
22718 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22719 index 69fa106..adda88b 100644
22720 --- a/arch/x86/lib/msr-reg.S
22721 +++ b/arch/x86/lib/msr-reg.S
22722 @@ -3,6 +3,7 @@
22723 #include <asm/dwarf2.h>
22724 #include <asm/asm.h>
22725 #include <asm/msr.h>
22726 +#include <asm/alternative-asm.h>
22727
22728 #ifdef CONFIG_X86_64
22729 /*
22730 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22731 CFI_STARTPROC
22732 pushq_cfi %rbx
22733 pushq_cfi %rbp
22734 - movq %rdi, %r10 /* Save pointer */
22735 + movq %rdi, %r9 /* Save pointer */
22736 xorl %r11d, %r11d /* Return value */
22737 movl (%rdi), %eax
22738 movl 4(%rdi), %ecx
22739 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22740 movl 28(%rdi), %edi
22741 CFI_REMEMBER_STATE
22742 1: \op
22743 -2: movl %eax, (%r10)
22744 +2: movl %eax, (%r9)
22745 movl %r11d, %eax /* Return value */
22746 - movl %ecx, 4(%r10)
22747 - movl %edx, 8(%r10)
22748 - movl %ebx, 12(%r10)
22749 - movl %ebp, 20(%r10)
22750 - movl %esi, 24(%r10)
22751 - movl %edi, 28(%r10)
22752 + movl %ecx, 4(%r9)
22753 + movl %edx, 8(%r9)
22754 + movl %ebx, 12(%r9)
22755 + movl %ebp, 20(%r9)
22756 + movl %esi, 24(%r9)
22757 + movl %edi, 28(%r9)
22758 popq_cfi %rbp
22759 popq_cfi %rbx
22760 + pax_force_retaddr
22761 ret
22762 3:
22763 CFI_RESTORE_STATE
22764 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22765 index 36b0d15..d381858 100644
22766 --- a/arch/x86/lib/putuser.S
22767 +++ b/arch/x86/lib/putuser.S
22768 @@ -15,7 +15,9 @@
22769 #include <asm/thread_info.h>
22770 #include <asm/errno.h>
22771 #include <asm/asm.h>
22772 -
22773 +#include <asm/segment.h>
22774 +#include <asm/pgtable.h>
22775 +#include <asm/alternative-asm.h>
22776
22777 /*
22778 * __put_user_X
22779 @@ -29,52 +31,119 @@
22780 * as they get called from within inline assembly.
22781 */
22782
22783 -#define ENTER CFI_STARTPROC ; \
22784 - GET_THREAD_INFO(%_ASM_BX)
22785 -#define EXIT ret ; \
22786 +#define ENTER CFI_STARTPROC
22787 +#define EXIT pax_force_retaddr; ret ; \
22788 CFI_ENDPROC
22789
22790 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22791 +#define _DEST %_ASM_CX,%_ASM_BX
22792 +#else
22793 +#define _DEST %_ASM_CX
22794 +#endif
22795 +
22796 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22797 +#define __copyuser_seg gs;
22798 +#else
22799 +#define __copyuser_seg
22800 +#endif
22801 +
22802 .text
22803 ENTRY(__put_user_1)
22804 ENTER
22805 +
22806 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22807 + GET_THREAD_INFO(%_ASM_BX)
22808 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22809 jae bad_put_user
22810 -1: movb %al,(%_ASM_CX)
22811 +
22812 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22813 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22814 + cmp %_ASM_BX,%_ASM_CX
22815 + jb 1234f
22816 + xor %ebx,%ebx
22817 +1234:
22818 +#endif
22819 +
22820 +#endif
22821 +
22822 +1: __copyuser_seg movb %al,(_DEST)
22823 xor %eax,%eax
22824 EXIT
22825 ENDPROC(__put_user_1)
22826
22827 ENTRY(__put_user_2)
22828 ENTER
22829 +
22830 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22831 + GET_THREAD_INFO(%_ASM_BX)
22832 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22833 sub $1,%_ASM_BX
22834 cmp %_ASM_BX,%_ASM_CX
22835 jae bad_put_user
22836 -2: movw %ax,(%_ASM_CX)
22837 +
22838 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22839 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22840 + cmp %_ASM_BX,%_ASM_CX
22841 + jb 1234f
22842 + xor %ebx,%ebx
22843 +1234:
22844 +#endif
22845 +
22846 +#endif
22847 +
22848 +2: __copyuser_seg movw %ax,(_DEST)
22849 xor %eax,%eax
22850 EXIT
22851 ENDPROC(__put_user_2)
22852
22853 ENTRY(__put_user_4)
22854 ENTER
22855 +
22856 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22857 + GET_THREAD_INFO(%_ASM_BX)
22858 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22859 sub $3,%_ASM_BX
22860 cmp %_ASM_BX,%_ASM_CX
22861 jae bad_put_user
22862 -3: movl %eax,(%_ASM_CX)
22863 +
22864 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22865 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22866 + cmp %_ASM_BX,%_ASM_CX
22867 + jb 1234f
22868 + xor %ebx,%ebx
22869 +1234:
22870 +#endif
22871 +
22872 +#endif
22873 +
22874 +3: __copyuser_seg movl %eax,(_DEST)
22875 xor %eax,%eax
22876 EXIT
22877 ENDPROC(__put_user_4)
22878
22879 ENTRY(__put_user_8)
22880 ENTER
22881 +
22882 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22883 + GET_THREAD_INFO(%_ASM_BX)
22884 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22885 sub $7,%_ASM_BX
22886 cmp %_ASM_BX,%_ASM_CX
22887 jae bad_put_user
22888 -4: mov %_ASM_AX,(%_ASM_CX)
22889 +
22890 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22891 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22892 + cmp %_ASM_BX,%_ASM_CX
22893 + jb 1234f
22894 + xor %ebx,%ebx
22895 +1234:
22896 +#endif
22897 +
22898 +#endif
22899 +
22900 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22901 #ifdef CONFIG_X86_32
22902 -5: movl %edx,4(%_ASM_CX)
22903 +5: __copyuser_seg movl %edx,4(_DEST)
22904 #endif
22905 xor %eax,%eax
22906 EXIT
22907 diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
22908 index 05ea55f..6345b9a 100644
22909 --- a/arch/x86/lib/rwlock_64.S
22910 +++ b/arch/x86/lib/rwlock_64.S
22911 @@ -2,6 +2,7 @@
22912
22913 #include <linux/linkage.h>
22914 #include <asm/rwlock.h>
22915 +#include <asm/asm.h>
22916 #include <asm/alternative-asm.h>
22917 #include <asm/dwarf2.h>
22918
22919 @@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
22920 CFI_STARTPROC
22921 LOCK_PREFIX
22922 addl $RW_LOCK_BIAS,(%rdi)
22923 +
22924 +#ifdef CONFIG_PAX_REFCOUNT
22925 + jno 1234f
22926 + LOCK_PREFIX
22927 + subl $RW_LOCK_BIAS,(%rdi)
22928 + int $4
22929 +1234:
22930 + _ASM_EXTABLE(1234b, 1234b)
22931 +#endif
22932 +
22933 1: rep
22934 nop
22935 cmpl $RW_LOCK_BIAS,(%rdi)
22936 jne 1b
22937 LOCK_PREFIX
22938 subl $RW_LOCK_BIAS,(%rdi)
22939 +
22940 +#ifdef CONFIG_PAX_REFCOUNT
22941 + jno 1234f
22942 + LOCK_PREFIX
22943 + addl $RW_LOCK_BIAS,(%rdi)
22944 + int $4
22945 +1234:
22946 + _ASM_EXTABLE(1234b, 1234b)
22947 +#endif
22948 +
22949 jnz __write_lock_failed
22950 + pax_force_retaddr
22951 ret
22952 CFI_ENDPROC
22953 END(__write_lock_failed)
22954 @@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
22955 CFI_STARTPROC
22956 LOCK_PREFIX
22957 incl (%rdi)
22958 +
22959 +#ifdef CONFIG_PAX_REFCOUNT
22960 + jno 1234f
22961 + LOCK_PREFIX
22962 + decl (%rdi)
22963 + int $4
22964 +1234:
22965 + _ASM_EXTABLE(1234b, 1234b)
22966 +#endif
22967 +
22968 1: rep
22969 nop
22970 cmpl $1,(%rdi)
22971 js 1b
22972 LOCK_PREFIX
22973 decl (%rdi)
22974 +
22975 +#ifdef CONFIG_PAX_REFCOUNT
22976 + jno 1234f
22977 + LOCK_PREFIX
22978 + incl (%rdi)
22979 + int $4
22980 +1234:
22981 + _ASM_EXTABLE(1234b, 1234b)
22982 +#endif
22983 +
22984 js __read_lock_failed
22985 + pax_force_retaddr
22986 ret
22987 CFI_ENDPROC
22988 END(__read_lock_failed)
22989 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
22990 index 15acecf..f768b10 100644
22991 --- a/arch/x86/lib/rwsem_64.S
22992 +++ b/arch/x86/lib/rwsem_64.S
22993 @@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
22994 call rwsem_down_read_failed
22995 popq %rdx
22996 restore_common_regs
22997 + pax_force_retaddr
22998 ret
22999 ENDPROC(call_rwsem_down_read_failed)
23000
23001 @@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23002 movq %rax,%rdi
23003 call rwsem_down_write_failed
23004 restore_common_regs
23005 + pax_force_retaddr
23006 ret
23007 ENDPROC(call_rwsem_down_write_failed)
23008
23009 @@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23010 movq %rax,%rdi
23011 call rwsem_wake
23012 restore_common_regs
23013 -1: ret
23014 +1: pax_force_retaddr
23015 + ret
23016 ENDPROC(call_rwsem_wake)
23017
23018 /* Fix up special calling conventions */
23019 @@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23020 call rwsem_downgrade_wake
23021 popq %rdx
23022 restore_common_regs
23023 + pax_force_retaddr
23024 ret
23025 ENDPROC(call_rwsem_downgrade_wake)
23026 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23027 index bf9a7d5..fb06ab5 100644
23028 --- a/arch/x86/lib/thunk_64.S
23029 +++ b/arch/x86/lib/thunk_64.S
23030 @@ -10,7 +10,8 @@
23031 #include <asm/dwarf2.h>
23032 #include <asm/calling.h>
23033 #include <asm/rwlock.h>
23034 -
23035 + #include <asm/alternative-asm.h>
23036 +
23037 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23038 .macro thunk name,func
23039 .globl \name
23040 @@ -70,6 +71,7 @@
23041 SAVE_ARGS
23042 restore:
23043 RESTORE_ARGS
23044 + pax_force_retaddr
23045 ret
23046 CFI_ENDPROC
23047
23048 @@ -77,5 +79,6 @@ restore:
23049 SAVE_ARGS
23050 restore_norax:
23051 RESTORE_ARGS 1
23052 + pax_force_retaddr
23053 ret
23054 CFI_ENDPROC
23055 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23056 index 1f118d4..ec4a953 100644
23057 --- a/arch/x86/lib/usercopy_32.c
23058 +++ b/arch/x86/lib/usercopy_32.c
23059 @@ -43,7 +43,7 @@ do { \
23060 __asm__ __volatile__( \
23061 " testl %1,%1\n" \
23062 " jz 2f\n" \
23063 - "0: lodsb\n" \
23064 + "0: "__copyuser_seg"lodsb\n" \
23065 " stosb\n" \
23066 " testb %%al,%%al\n" \
23067 " jz 1f\n" \
23068 @@ -128,10 +128,12 @@ do { \
23069 int __d0; \
23070 might_fault(); \
23071 __asm__ __volatile__( \
23072 + __COPYUSER_SET_ES \
23073 "0: rep; stosl\n" \
23074 " movl %2,%0\n" \
23075 "1: rep; stosb\n" \
23076 "2:\n" \
23077 + __COPYUSER_RESTORE_ES \
23078 ".section .fixup,\"ax\"\n" \
23079 "3: lea 0(%2,%0,4),%0\n" \
23080 " jmp 2b\n" \
23081 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23082 might_fault();
23083
23084 __asm__ __volatile__(
23085 + __COPYUSER_SET_ES
23086 " testl %0, %0\n"
23087 " jz 3f\n"
23088 " andl %0,%%ecx\n"
23089 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23090 " subl %%ecx,%0\n"
23091 " addl %0,%%eax\n"
23092 "1:\n"
23093 + __COPYUSER_RESTORE_ES
23094 ".section .fixup,\"ax\"\n"
23095 "2: xorl %%eax,%%eax\n"
23096 " jmp 1b\n"
23097 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23098
23099 #ifdef CONFIG_X86_INTEL_USERCOPY
23100 static unsigned long
23101 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
23102 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23103 {
23104 int d0, d1;
23105 __asm__ __volatile__(
23106 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23107 " .align 2,0x90\n"
23108 "3: movl 0(%4), %%eax\n"
23109 "4: movl 4(%4), %%edx\n"
23110 - "5: movl %%eax, 0(%3)\n"
23111 - "6: movl %%edx, 4(%3)\n"
23112 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23113 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23114 "7: movl 8(%4), %%eax\n"
23115 "8: movl 12(%4),%%edx\n"
23116 - "9: movl %%eax, 8(%3)\n"
23117 - "10: movl %%edx, 12(%3)\n"
23118 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23119 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23120 "11: movl 16(%4), %%eax\n"
23121 "12: movl 20(%4), %%edx\n"
23122 - "13: movl %%eax, 16(%3)\n"
23123 - "14: movl %%edx, 20(%3)\n"
23124 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23125 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23126 "15: movl 24(%4), %%eax\n"
23127 "16: movl 28(%4), %%edx\n"
23128 - "17: movl %%eax, 24(%3)\n"
23129 - "18: movl %%edx, 28(%3)\n"
23130 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23131 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23132 "19: movl 32(%4), %%eax\n"
23133 "20: movl 36(%4), %%edx\n"
23134 - "21: movl %%eax, 32(%3)\n"
23135 - "22: movl %%edx, 36(%3)\n"
23136 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23137 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23138 "23: movl 40(%4), %%eax\n"
23139 "24: movl 44(%4), %%edx\n"
23140 - "25: movl %%eax, 40(%3)\n"
23141 - "26: movl %%edx, 44(%3)\n"
23142 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23143 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23144 "27: movl 48(%4), %%eax\n"
23145 "28: movl 52(%4), %%edx\n"
23146 - "29: movl %%eax, 48(%3)\n"
23147 - "30: movl %%edx, 52(%3)\n"
23148 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23149 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23150 "31: movl 56(%4), %%eax\n"
23151 "32: movl 60(%4), %%edx\n"
23152 - "33: movl %%eax, 56(%3)\n"
23153 - "34: movl %%edx, 60(%3)\n"
23154 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23155 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23156 " addl $-64, %0\n"
23157 " addl $64, %4\n"
23158 " addl $64, %3\n"
23159 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23160 " shrl $2, %0\n"
23161 " andl $3, %%eax\n"
23162 " cld\n"
23163 + __COPYUSER_SET_ES
23164 "99: rep; movsl\n"
23165 "36: movl %%eax, %0\n"
23166 "37: rep; movsb\n"
23167 "100:\n"
23168 + __COPYUSER_RESTORE_ES
23169 + ".section .fixup,\"ax\"\n"
23170 + "101: lea 0(%%eax,%0,4),%0\n"
23171 + " jmp 100b\n"
23172 + ".previous\n"
23173 + ".section __ex_table,\"a\"\n"
23174 + " .align 4\n"
23175 + " .long 1b,100b\n"
23176 + " .long 2b,100b\n"
23177 + " .long 3b,100b\n"
23178 + " .long 4b,100b\n"
23179 + " .long 5b,100b\n"
23180 + " .long 6b,100b\n"
23181 + " .long 7b,100b\n"
23182 + " .long 8b,100b\n"
23183 + " .long 9b,100b\n"
23184 + " .long 10b,100b\n"
23185 + " .long 11b,100b\n"
23186 + " .long 12b,100b\n"
23187 + " .long 13b,100b\n"
23188 + " .long 14b,100b\n"
23189 + " .long 15b,100b\n"
23190 + " .long 16b,100b\n"
23191 + " .long 17b,100b\n"
23192 + " .long 18b,100b\n"
23193 + " .long 19b,100b\n"
23194 + " .long 20b,100b\n"
23195 + " .long 21b,100b\n"
23196 + " .long 22b,100b\n"
23197 + " .long 23b,100b\n"
23198 + " .long 24b,100b\n"
23199 + " .long 25b,100b\n"
23200 + " .long 26b,100b\n"
23201 + " .long 27b,100b\n"
23202 + " .long 28b,100b\n"
23203 + " .long 29b,100b\n"
23204 + " .long 30b,100b\n"
23205 + " .long 31b,100b\n"
23206 + " .long 32b,100b\n"
23207 + " .long 33b,100b\n"
23208 + " .long 34b,100b\n"
23209 + " .long 35b,100b\n"
23210 + " .long 36b,100b\n"
23211 + " .long 37b,100b\n"
23212 + " .long 99b,101b\n"
23213 + ".previous"
23214 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23215 + : "1"(to), "2"(from), "0"(size)
23216 + : "eax", "edx", "memory");
23217 + return size;
23218 +}
23219 +
23220 +static unsigned long
23221 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23222 +{
23223 + int d0, d1;
23224 + __asm__ __volatile__(
23225 + " .align 2,0x90\n"
23226 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23227 + " cmpl $67, %0\n"
23228 + " jbe 3f\n"
23229 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23230 + " .align 2,0x90\n"
23231 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23232 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23233 + "5: movl %%eax, 0(%3)\n"
23234 + "6: movl %%edx, 4(%3)\n"
23235 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23236 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23237 + "9: movl %%eax, 8(%3)\n"
23238 + "10: movl %%edx, 12(%3)\n"
23239 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23240 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23241 + "13: movl %%eax, 16(%3)\n"
23242 + "14: movl %%edx, 20(%3)\n"
23243 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23244 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23245 + "17: movl %%eax, 24(%3)\n"
23246 + "18: movl %%edx, 28(%3)\n"
23247 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23248 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23249 + "21: movl %%eax, 32(%3)\n"
23250 + "22: movl %%edx, 36(%3)\n"
23251 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23252 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23253 + "25: movl %%eax, 40(%3)\n"
23254 + "26: movl %%edx, 44(%3)\n"
23255 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23256 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23257 + "29: movl %%eax, 48(%3)\n"
23258 + "30: movl %%edx, 52(%3)\n"
23259 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23260 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23261 + "33: movl %%eax, 56(%3)\n"
23262 + "34: movl %%edx, 60(%3)\n"
23263 + " addl $-64, %0\n"
23264 + " addl $64, %4\n"
23265 + " addl $64, %3\n"
23266 + " cmpl $63, %0\n"
23267 + " ja 1b\n"
23268 + "35: movl %0, %%eax\n"
23269 + " shrl $2, %0\n"
23270 + " andl $3, %%eax\n"
23271 + " cld\n"
23272 + "99: rep; "__copyuser_seg" movsl\n"
23273 + "36: movl %%eax, %0\n"
23274 + "37: rep; "__copyuser_seg" movsb\n"
23275 + "100:\n"
23276 ".section .fixup,\"ax\"\n"
23277 "101: lea 0(%%eax,%0,4),%0\n"
23278 " jmp 100b\n"
23279 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23280 int d0, d1;
23281 __asm__ __volatile__(
23282 " .align 2,0x90\n"
23283 - "0: movl 32(%4), %%eax\n"
23284 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23285 " cmpl $67, %0\n"
23286 " jbe 2f\n"
23287 - "1: movl 64(%4), %%eax\n"
23288 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23289 " .align 2,0x90\n"
23290 - "2: movl 0(%4), %%eax\n"
23291 - "21: movl 4(%4), %%edx\n"
23292 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23293 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23294 " movl %%eax, 0(%3)\n"
23295 " movl %%edx, 4(%3)\n"
23296 - "3: movl 8(%4), %%eax\n"
23297 - "31: movl 12(%4),%%edx\n"
23298 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23299 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23300 " movl %%eax, 8(%3)\n"
23301 " movl %%edx, 12(%3)\n"
23302 - "4: movl 16(%4), %%eax\n"
23303 - "41: movl 20(%4), %%edx\n"
23304 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23305 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23306 " movl %%eax, 16(%3)\n"
23307 " movl %%edx, 20(%3)\n"
23308 - "10: movl 24(%4), %%eax\n"
23309 - "51: movl 28(%4), %%edx\n"
23310 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23311 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23312 " movl %%eax, 24(%3)\n"
23313 " movl %%edx, 28(%3)\n"
23314 - "11: movl 32(%4), %%eax\n"
23315 - "61: movl 36(%4), %%edx\n"
23316 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23317 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23318 " movl %%eax, 32(%3)\n"
23319 " movl %%edx, 36(%3)\n"
23320 - "12: movl 40(%4), %%eax\n"
23321 - "71: movl 44(%4), %%edx\n"
23322 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23323 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23324 " movl %%eax, 40(%3)\n"
23325 " movl %%edx, 44(%3)\n"
23326 - "13: movl 48(%4), %%eax\n"
23327 - "81: movl 52(%4), %%edx\n"
23328 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23329 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23330 " movl %%eax, 48(%3)\n"
23331 " movl %%edx, 52(%3)\n"
23332 - "14: movl 56(%4), %%eax\n"
23333 - "91: movl 60(%4), %%edx\n"
23334 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23335 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23336 " movl %%eax, 56(%3)\n"
23337 " movl %%edx, 60(%3)\n"
23338 " addl $-64, %0\n"
23339 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23340 " shrl $2, %0\n"
23341 " andl $3, %%eax\n"
23342 " cld\n"
23343 - "6: rep; movsl\n"
23344 + "6: rep; "__copyuser_seg" movsl\n"
23345 " movl %%eax,%0\n"
23346 - "7: rep; movsb\n"
23347 + "7: rep; "__copyuser_seg" movsb\n"
23348 "8:\n"
23349 ".section .fixup,\"ax\"\n"
23350 "9: lea 0(%%eax,%0,4),%0\n"
23351 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23352
23353 __asm__ __volatile__(
23354 " .align 2,0x90\n"
23355 - "0: movl 32(%4), %%eax\n"
23356 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23357 " cmpl $67, %0\n"
23358 " jbe 2f\n"
23359 - "1: movl 64(%4), %%eax\n"
23360 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23361 " .align 2,0x90\n"
23362 - "2: movl 0(%4), %%eax\n"
23363 - "21: movl 4(%4), %%edx\n"
23364 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23365 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23366 " movnti %%eax, 0(%3)\n"
23367 " movnti %%edx, 4(%3)\n"
23368 - "3: movl 8(%4), %%eax\n"
23369 - "31: movl 12(%4),%%edx\n"
23370 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23371 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23372 " movnti %%eax, 8(%3)\n"
23373 " movnti %%edx, 12(%3)\n"
23374 - "4: movl 16(%4), %%eax\n"
23375 - "41: movl 20(%4), %%edx\n"
23376 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23377 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23378 " movnti %%eax, 16(%3)\n"
23379 " movnti %%edx, 20(%3)\n"
23380 - "10: movl 24(%4), %%eax\n"
23381 - "51: movl 28(%4), %%edx\n"
23382 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23383 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23384 " movnti %%eax, 24(%3)\n"
23385 " movnti %%edx, 28(%3)\n"
23386 - "11: movl 32(%4), %%eax\n"
23387 - "61: movl 36(%4), %%edx\n"
23388 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23389 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23390 " movnti %%eax, 32(%3)\n"
23391 " movnti %%edx, 36(%3)\n"
23392 - "12: movl 40(%4), %%eax\n"
23393 - "71: movl 44(%4), %%edx\n"
23394 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23395 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23396 " movnti %%eax, 40(%3)\n"
23397 " movnti %%edx, 44(%3)\n"
23398 - "13: movl 48(%4), %%eax\n"
23399 - "81: movl 52(%4), %%edx\n"
23400 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23401 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23402 " movnti %%eax, 48(%3)\n"
23403 " movnti %%edx, 52(%3)\n"
23404 - "14: movl 56(%4), %%eax\n"
23405 - "91: movl 60(%4), %%edx\n"
23406 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23407 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23408 " movnti %%eax, 56(%3)\n"
23409 " movnti %%edx, 60(%3)\n"
23410 " addl $-64, %0\n"
23411 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23412 " shrl $2, %0\n"
23413 " andl $3, %%eax\n"
23414 " cld\n"
23415 - "6: rep; movsl\n"
23416 + "6: rep; "__copyuser_seg" movsl\n"
23417 " movl %%eax,%0\n"
23418 - "7: rep; movsb\n"
23419 + "7: rep; "__copyuser_seg" movsb\n"
23420 "8:\n"
23421 ".section .fixup,\"ax\"\n"
23422 "9: lea 0(%%eax,%0,4),%0\n"
23423 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23424
23425 __asm__ __volatile__(
23426 " .align 2,0x90\n"
23427 - "0: movl 32(%4), %%eax\n"
23428 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23429 " cmpl $67, %0\n"
23430 " jbe 2f\n"
23431 - "1: movl 64(%4), %%eax\n"
23432 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23433 " .align 2,0x90\n"
23434 - "2: movl 0(%4), %%eax\n"
23435 - "21: movl 4(%4), %%edx\n"
23436 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23437 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23438 " movnti %%eax, 0(%3)\n"
23439 " movnti %%edx, 4(%3)\n"
23440 - "3: movl 8(%4), %%eax\n"
23441 - "31: movl 12(%4),%%edx\n"
23442 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23443 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23444 " movnti %%eax, 8(%3)\n"
23445 " movnti %%edx, 12(%3)\n"
23446 - "4: movl 16(%4), %%eax\n"
23447 - "41: movl 20(%4), %%edx\n"
23448 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23449 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23450 " movnti %%eax, 16(%3)\n"
23451 " movnti %%edx, 20(%3)\n"
23452 - "10: movl 24(%4), %%eax\n"
23453 - "51: movl 28(%4), %%edx\n"
23454 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23455 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23456 " movnti %%eax, 24(%3)\n"
23457 " movnti %%edx, 28(%3)\n"
23458 - "11: movl 32(%4), %%eax\n"
23459 - "61: movl 36(%4), %%edx\n"
23460 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23461 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23462 " movnti %%eax, 32(%3)\n"
23463 " movnti %%edx, 36(%3)\n"
23464 - "12: movl 40(%4), %%eax\n"
23465 - "71: movl 44(%4), %%edx\n"
23466 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23467 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23468 " movnti %%eax, 40(%3)\n"
23469 " movnti %%edx, 44(%3)\n"
23470 - "13: movl 48(%4), %%eax\n"
23471 - "81: movl 52(%4), %%edx\n"
23472 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23473 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23474 " movnti %%eax, 48(%3)\n"
23475 " movnti %%edx, 52(%3)\n"
23476 - "14: movl 56(%4), %%eax\n"
23477 - "91: movl 60(%4), %%edx\n"
23478 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23479 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23480 " movnti %%eax, 56(%3)\n"
23481 " movnti %%edx, 60(%3)\n"
23482 " addl $-64, %0\n"
23483 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23484 " shrl $2, %0\n"
23485 " andl $3, %%eax\n"
23486 " cld\n"
23487 - "6: rep; movsl\n"
23488 + "6: rep; "__copyuser_seg" movsl\n"
23489 " movl %%eax,%0\n"
23490 - "7: rep; movsb\n"
23491 + "7: rep; "__copyuser_seg" movsb\n"
23492 "8:\n"
23493 ".section .fixup,\"ax\"\n"
23494 "9: lea 0(%%eax,%0,4),%0\n"
23495 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23496 */
23497 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23498 unsigned long size);
23499 -unsigned long __copy_user_intel(void __user *to, const void *from,
23500 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23501 + unsigned long size);
23502 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23503 unsigned long size);
23504 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23505 const void __user *from, unsigned long size);
23506 #endif /* CONFIG_X86_INTEL_USERCOPY */
23507
23508 /* Generic arbitrary sized copy. */
23509 -#define __copy_user(to, from, size) \
23510 +#define __copy_user(to, from, size, prefix, set, restore) \
23511 do { \
23512 int __d0, __d1, __d2; \
23513 __asm__ __volatile__( \
23514 + set \
23515 " cmp $7,%0\n" \
23516 " jbe 1f\n" \
23517 " movl %1,%0\n" \
23518 " negl %0\n" \
23519 " andl $7,%0\n" \
23520 " subl %0,%3\n" \
23521 - "4: rep; movsb\n" \
23522 + "4: rep; "prefix"movsb\n" \
23523 " movl %3,%0\n" \
23524 " shrl $2,%0\n" \
23525 " andl $3,%3\n" \
23526 " .align 2,0x90\n" \
23527 - "0: rep; movsl\n" \
23528 + "0: rep; "prefix"movsl\n" \
23529 " movl %3,%0\n" \
23530 - "1: rep; movsb\n" \
23531 + "1: rep; "prefix"movsb\n" \
23532 "2:\n" \
23533 + restore \
23534 ".section .fixup,\"ax\"\n" \
23535 "5: addl %3,%0\n" \
23536 " jmp 2b\n" \
23537 @@ -682,14 +799,14 @@ do { \
23538 " negl %0\n" \
23539 " andl $7,%0\n" \
23540 " subl %0,%3\n" \
23541 - "4: rep; movsb\n" \
23542 + "4: rep; "__copyuser_seg"movsb\n" \
23543 " movl %3,%0\n" \
23544 " shrl $2,%0\n" \
23545 " andl $3,%3\n" \
23546 " .align 2,0x90\n" \
23547 - "0: rep; movsl\n" \
23548 + "0: rep; "__copyuser_seg"movsl\n" \
23549 " movl %3,%0\n" \
23550 - "1: rep; movsb\n" \
23551 + "1: rep; "__copyuser_seg"movsb\n" \
23552 "2:\n" \
23553 ".section .fixup,\"ax\"\n" \
23554 "5: addl %3,%0\n" \
23555 @@ -775,9 +892,9 @@ survive:
23556 }
23557 #endif
23558 if (movsl_is_ok(to, from, n))
23559 - __copy_user(to, from, n);
23560 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23561 else
23562 - n = __copy_user_intel(to, from, n);
23563 + n = __generic_copy_to_user_intel(to, from, n);
23564 return n;
23565 }
23566 EXPORT_SYMBOL(__copy_to_user_ll);
23567 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23568 unsigned long n)
23569 {
23570 if (movsl_is_ok(to, from, n))
23571 - __copy_user(to, from, n);
23572 + __copy_user(to, from, n, __copyuser_seg, "", "");
23573 else
23574 - n = __copy_user_intel((void __user *)to,
23575 - (const void *)from, n);
23576 + n = __generic_copy_from_user_intel(to, from, n);
23577 return n;
23578 }
23579 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23580 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23581 if (n > 64 && cpu_has_xmm2)
23582 n = __copy_user_intel_nocache(to, from, n);
23583 else
23584 - __copy_user(to, from, n);
23585 + __copy_user(to, from, n, __copyuser_seg, "", "");
23586 #else
23587 - __copy_user(to, from, n);
23588 + __copy_user(to, from, n, __copyuser_seg, "", "");
23589 #endif
23590 return n;
23591 }
23592 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23593
23594 -/**
23595 - * copy_to_user: - Copy a block of data into user space.
23596 - * @to: Destination address, in user space.
23597 - * @from: Source address, in kernel space.
23598 - * @n: Number of bytes to copy.
23599 - *
23600 - * Context: User context only. This function may sleep.
23601 - *
23602 - * Copy data from kernel space to user space.
23603 - *
23604 - * Returns number of bytes that could not be copied.
23605 - * On success, this will be zero.
23606 - */
23607 -unsigned long
23608 -copy_to_user(void __user *to, const void *from, unsigned long n)
23609 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23610 +void __set_fs(mm_segment_t x)
23611 {
23612 - if (access_ok(VERIFY_WRITE, to, n))
23613 - n = __copy_to_user(to, from, n);
23614 - return n;
23615 + switch (x.seg) {
23616 + case 0:
23617 + loadsegment(gs, 0);
23618 + break;
23619 + case TASK_SIZE_MAX:
23620 + loadsegment(gs, __USER_DS);
23621 + break;
23622 + case -1UL:
23623 + loadsegment(gs, __KERNEL_DS);
23624 + break;
23625 + default:
23626 + BUG();
23627 + }
23628 + return;
23629 }
23630 -EXPORT_SYMBOL(copy_to_user);
23631 +EXPORT_SYMBOL(__set_fs);
23632
23633 -/**
23634 - * copy_from_user: - Copy a block of data from user space.
23635 - * @to: Destination address, in kernel space.
23636 - * @from: Source address, in user space.
23637 - * @n: Number of bytes to copy.
23638 - *
23639 - * Context: User context only. This function may sleep.
23640 - *
23641 - * Copy data from user space to kernel space.
23642 - *
23643 - * Returns number of bytes that could not be copied.
23644 - * On success, this will be zero.
23645 - *
23646 - * If some data could not be copied, this function will pad the copied
23647 - * data to the requested size using zero bytes.
23648 - */
23649 -unsigned long
23650 -copy_from_user(void *to, const void __user *from, unsigned long n)
23651 +void set_fs(mm_segment_t x)
23652 {
23653 - if (access_ok(VERIFY_READ, from, n))
23654 - n = __copy_from_user(to, from, n);
23655 - else
23656 - memset(to, 0, n);
23657 - return n;
23658 + current_thread_info()->addr_limit = x;
23659 + __set_fs(x);
23660 }
23661 -EXPORT_SYMBOL(copy_from_user);
23662 +EXPORT_SYMBOL(set_fs);
23663 +#endif
23664 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23665 index b7c2849..8633ad8 100644
23666 --- a/arch/x86/lib/usercopy_64.c
23667 +++ b/arch/x86/lib/usercopy_64.c
23668 @@ -42,6 +42,12 @@ long
23669 __strncpy_from_user(char *dst, const char __user *src, long count)
23670 {
23671 long res;
23672 +
23673 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23674 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23675 + src += PAX_USER_SHADOW_BASE;
23676 +#endif
23677 +
23678 __do_strncpy_from_user(dst, src, count, res);
23679 return res;
23680 }
23681 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23682 {
23683 long __d0;
23684 might_fault();
23685 +
23686 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23687 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23688 + addr += PAX_USER_SHADOW_BASE;
23689 +#endif
23690 +
23691 /* no memory constraint because it doesn't change any memory gcc knows
23692 about */
23693 asm volatile(
23694 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23695 }
23696 EXPORT_SYMBOL(strlen_user);
23697
23698 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23699 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23700 {
23701 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23702 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23703 - }
23704 - return len;
23705 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23706 +
23707 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23708 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23709 + to += PAX_USER_SHADOW_BASE;
23710 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23711 + from += PAX_USER_SHADOW_BASE;
23712 +#endif
23713 +
23714 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23715 + }
23716 + return len;
23717 }
23718 EXPORT_SYMBOL(copy_in_user);
23719
23720 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23721 * it is not necessary to optimize tail handling.
23722 */
23723 unsigned long
23724 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23725 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23726 {
23727 char c;
23728 unsigned zero_len;
23729 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23730 index 61b41ca..5fef66a 100644
23731 --- a/arch/x86/mm/extable.c
23732 +++ b/arch/x86/mm/extable.c
23733 @@ -1,14 +1,71 @@
23734 #include <linux/module.h>
23735 #include <linux/spinlock.h>
23736 +#include <linux/sort.h>
23737 #include <asm/uaccess.h>
23738 +#include <asm/pgtable.h>
23739
23740 +/*
23741 + * The exception table needs to be sorted so that the binary
23742 + * search that we use to find entries in it works properly.
23743 + * This is used both for the kernel exception table and for
23744 + * the exception tables of modules that get loaded.
23745 + */
23746 +static int cmp_ex(const void *a, const void *b)
23747 +{
23748 + const struct exception_table_entry *x = a, *y = b;
23749 +
23750 + /* avoid overflow */
23751 + if (x->insn > y->insn)
23752 + return 1;
23753 + if (x->insn < y->insn)
23754 + return -1;
23755 + return 0;
23756 +}
23757 +
23758 +static void swap_ex(void *a, void *b, int size)
23759 +{
23760 + struct exception_table_entry t, *x = a, *y = b;
23761 +
23762 + t = *x;
23763 +
23764 + pax_open_kernel();
23765 + *x = *y;
23766 + *y = t;
23767 + pax_close_kernel();
23768 +}
23769 +
23770 +void sort_extable(struct exception_table_entry *start,
23771 + struct exception_table_entry *finish)
23772 +{
23773 + sort(start, finish - start, sizeof(struct exception_table_entry),
23774 + cmp_ex, swap_ex);
23775 +}
23776 +
23777 +#ifdef CONFIG_MODULES
23778 +/*
23779 + * If the exception table is sorted, any referring to the module init
23780 + * will be at the beginning or the end.
23781 + */
23782 +void trim_init_extable(struct module *m)
23783 +{
23784 + /*trim the beginning*/
23785 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
23786 + m->extable++;
23787 + m->num_exentries--;
23788 + }
23789 + /*trim the end*/
23790 + while (m->num_exentries &&
23791 + within_module_init(m->extable[m->num_exentries-1].insn, m))
23792 + m->num_exentries--;
23793 +}
23794 +#endif /* CONFIG_MODULES */
23795
23796 int fixup_exception(struct pt_regs *regs)
23797 {
23798 const struct exception_table_entry *fixup;
23799
23800 #ifdef CONFIG_PNPBIOS
23801 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23802 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23803 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23804 extern u32 pnp_bios_is_utter_crap;
23805 pnp_bios_is_utter_crap = 1;
23806 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23807 index 8ac0d76..87899a4 100644
23808 --- a/arch/x86/mm/fault.c
23809 +++ b/arch/x86/mm/fault.c
23810 @@ -11,10 +11,19 @@
23811 #include <linux/kprobes.h> /* __kprobes, ... */
23812 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
23813 #include <linux/perf_event.h> /* perf_sw_event */
23814 +#include <linux/unistd.h>
23815 +#include <linux/compiler.h>
23816
23817 #include <asm/traps.h> /* dotraplinkage, ... */
23818 #include <asm/pgalloc.h> /* pgd_*(), ... */
23819 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23820 +#include <asm/vsyscall.h>
23821 +#include <asm/tlbflush.h>
23822 +
23823 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23824 +#include <asm/stacktrace.h>
23825 +#include "../kernel/dumpstack.h"
23826 +#endif
23827
23828 /*
23829 * Page fault error code bits:
23830 @@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
23831 int ret = 0;
23832
23833 /* kprobe_running() needs smp_processor_id() */
23834 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23835 + if (kprobes_built_in() && !user_mode(regs)) {
23836 preempt_disable();
23837 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23838 ret = 1;
23839 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23840 return !instr_lo || (instr_lo>>1) == 1;
23841 case 0x00:
23842 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23843 - if (probe_kernel_address(instr, opcode))
23844 + if (user_mode(regs)) {
23845 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23846 + return 0;
23847 + } else if (probe_kernel_address(instr, opcode))
23848 return 0;
23849
23850 *prefetch = (instr_lo == 0xF) &&
23851 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23852 while (instr < max_instr) {
23853 unsigned char opcode;
23854
23855 - if (probe_kernel_address(instr, opcode))
23856 + if (user_mode(regs)) {
23857 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23858 + break;
23859 + } else if (probe_kernel_address(instr, opcode))
23860 break;
23861
23862 instr++;
23863 @@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23864 force_sig_info(si_signo, &info, tsk);
23865 }
23866
23867 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23868 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23869 +#endif
23870 +
23871 +#ifdef CONFIG_PAX_EMUTRAMP
23872 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23873 +#endif
23874 +
23875 +#ifdef CONFIG_PAX_PAGEEXEC
23876 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23877 +{
23878 + pgd_t *pgd;
23879 + pud_t *pud;
23880 + pmd_t *pmd;
23881 +
23882 + pgd = pgd_offset(mm, address);
23883 + if (!pgd_present(*pgd))
23884 + return NULL;
23885 + pud = pud_offset(pgd, address);
23886 + if (!pud_present(*pud))
23887 + return NULL;
23888 + pmd = pmd_offset(pud, address);
23889 + if (!pmd_present(*pmd))
23890 + return NULL;
23891 + return pmd;
23892 +}
23893 +#endif
23894 +
23895 DEFINE_SPINLOCK(pgd_lock);
23896 LIST_HEAD(pgd_list);
23897
23898 @@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
23899 address += PMD_SIZE) {
23900
23901 unsigned long flags;
23902 +
23903 +#ifdef CONFIG_PAX_PER_CPU_PGD
23904 + unsigned long cpu;
23905 +#else
23906 struct page *page;
23907 +#endif
23908
23909 spin_lock_irqsave(&pgd_lock, flags);
23910 +
23911 +#ifdef CONFIG_PAX_PER_CPU_PGD
23912 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23913 + pgd_t *pgd = get_cpu_pgd(cpu);
23914 +#else
23915 list_for_each_entry(page, &pgd_list, lru) {
23916 - if (!vmalloc_sync_one(page_address(page), address))
23917 + pgd_t *pgd = page_address(page);
23918 +#endif
23919 +
23920 + if (!vmalloc_sync_one(pgd, address))
23921 break;
23922 }
23923 spin_unlock_irqrestore(&pgd_lock, flags);
23924 @@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
23925 * an interrupt in the middle of a task switch..
23926 */
23927 pgd_paddr = read_cr3();
23928 +
23929 +#ifdef CONFIG_PAX_PER_CPU_PGD
23930 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23931 +#endif
23932 +
23933 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23934 if (!pmd_k)
23935 return -1;
23936 @@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
23937
23938 const pgd_t *pgd_ref = pgd_offset_k(address);
23939 unsigned long flags;
23940 +
23941 +#ifdef CONFIG_PAX_PER_CPU_PGD
23942 + unsigned long cpu;
23943 +#else
23944 struct page *page;
23945 +#endif
23946
23947 if (pgd_none(*pgd_ref))
23948 continue;
23949
23950 spin_lock_irqsave(&pgd_lock, flags);
23951 +
23952 +#ifdef CONFIG_PAX_PER_CPU_PGD
23953 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23954 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
23955 +#else
23956 list_for_each_entry(page, &pgd_list, lru) {
23957 pgd_t *pgd;
23958 pgd = (pgd_t *)page_address(page) + pgd_index(address);
23959 +#endif
23960 +
23961 if (pgd_none(*pgd))
23962 set_pgd(pgd, *pgd_ref);
23963 else
23964 @@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
23965 * happen within a race in page table update. In the later
23966 * case just flush:
23967 */
23968 +
23969 +#ifdef CONFIG_PAX_PER_CPU_PGD
23970 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23971 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23972 +#else
23973 pgd = pgd_offset(current->active_mm, address);
23974 +#endif
23975 +
23976 pgd_ref = pgd_offset_k(address);
23977 if (pgd_none(*pgd_ref))
23978 return -1;
23979 @@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23980 static int is_errata100(struct pt_regs *regs, unsigned long address)
23981 {
23982 #ifdef CONFIG_X86_64
23983 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23984 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23985 return 1;
23986 #endif
23987 return 0;
23988 @@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23989 }
23990
23991 static const char nx_warning[] = KERN_CRIT
23992 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23993 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23994
23995 static void
23996 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23997 @@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23998 if (!oops_may_print())
23999 return;
24000
24001 - if (error_code & PF_INSTR) {
24002 + if (nx_enabled && (error_code & PF_INSTR)) {
24003 unsigned int level;
24004
24005 pte_t *pte = lookup_address(address, &level);
24006
24007 if (pte && pte_present(*pte) && !pte_exec(*pte))
24008 - printk(nx_warning, current_uid());
24009 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24010 }
24011
24012 +#ifdef CONFIG_PAX_KERNEXEC
24013 + if (init_mm.start_code <= address && address < init_mm.end_code) {
24014 + if (current->signal->curr_ip)
24015 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24016 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24017 + else
24018 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24019 + current->comm, task_pid_nr(current), current_uid(), current_euid());
24020 + }
24021 +#endif
24022 +
24023 printk(KERN_ALERT "BUG: unable to handle kernel ");
24024 if (address < PAGE_SIZE)
24025 printk(KERN_CONT "NULL pointer dereference");
24026 @@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24027 {
24028 struct task_struct *tsk = current;
24029
24030 +#ifdef CONFIG_X86_64
24031 + struct mm_struct *mm = tsk->mm;
24032 +
24033 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24034 + if (regs->ip == (unsigned long)vgettimeofday) {
24035 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24036 + return;
24037 + } else if (regs->ip == (unsigned long)vtime) {
24038 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24039 + return;
24040 + } else if (regs->ip == (unsigned long)vgetcpu) {
24041 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24042 + return;
24043 + }
24044 + }
24045 +#endif
24046 +
24047 /* User mode accesses just cause a SIGSEGV */
24048 if (error_code & PF_USER) {
24049 /*
24050 @@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24051 if (is_errata100(regs, address))
24052 return;
24053
24054 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24055 + if (pax_is_fetch_fault(regs, error_code, address)) {
24056 +
24057 +#ifdef CONFIG_PAX_EMUTRAMP
24058 + switch (pax_handle_fetch_fault(regs)) {
24059 + case 2:
24060 + return;
24061 + }
24062 +#endif
24063 +
24064 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24065 + do_group_exit(SIGKILL);
24066 + }
24067 +#endif
24068 +
24069 if (unlikely(show_unhandled_signals))
24070 show_signal_msg(regs, error_code, address, tsk);
24071
24072 @@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24073 if (fault & VM_FAULT_HWPOISON) {
24074 printk(KERN_ERR
24075 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24076 - tsk->comm, tsk->pid, address);
24077 + tsk->comm, task_pid_nr(tsk), address);
24078 code = BUS_MCEERR_AR;
24079 }
24080 #endif
24081 @@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24082 return 1;
24083 }
24084
24085 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24086 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24087 +{
24088 + pte_t *pte;
24089 + pmd_t *pmd;
24090 + spinlock_t *ptl;
24091 + unsigned char pte_mask;
24092 +
24093 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24094 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
24095 + return 0;
24096 +
24097 + /* PaX: it's our fault, let's handle it if we can */
24098 +
24099 + /* PaX: take a look at read faults before acquiring any locks */
24100 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24101 + /* instruction fetch attempt from a protected page in user mode */
24102 + up_read(&mm->mmap_sem);
24103 +
24104 +#ifdef CONFIG_PAX_EMUTRAMP
24105 + switch (pax_handle_fetch_fault(regs)) {
24106 + case 2:
24107 + return 1;
24108 + }
24109 +#endif
24110 +
24111 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24112 + do_group_exit(SIGKILL);
24113 + }
24114 +
24115 + pmd = pax_get_pmd(mm, address);
24116 + if (unlikely(!pmd))
24117 + return 0;
24118 +
24119 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24120 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24121 + pte_unmap_unlock(pte, ptl);
24122 + return 0;
24123 + }
24124 +
24125 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24126 + /* write attempt to a protected page in user mode */
24127 + pte_unmap_unlock(pte, ptl);
24128 + return 0;
24129 + }
24130 +
24131 +#ifdef CONFIG_SMP
24132 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24133 +#else
24134 + if (likely(address > get_limit(regs->cs)))
24135 +#endif
24136 + {
24137 + set_pte(pte, pte_mkread(*pte));
24138 + __flush_tlb_one(address);
24139 + pte_unmap_unlock(pte, ptl);
24140 + up_read(&mm->mmap_sem);
24141 + return 1;
24142 + }
24143 +
24144 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24145 +
24146 + /*
24147 + * PaX: fill DTLB with user rights and retry
24148 + */
24149 + __asm__ __volatile__ (
24150 + "orb %2,(%1)\n"
24151 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24152 +/*
24153 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24154 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24155 + * page fault when examined during a TLB load attempt. this is true not only
24156 + * for PTEs holding a non-present entry but also present entries that will
24157 + * raise a page fault (such as those set up by PaX, or the copy-on-write
24158 + * mechanism). in effect it means that we do *not* need to flush the TLBs
24159 + * for our target pages since their PTEs are simply not in the TLBs at all.
24160 +
24161 + * the best thing in omitting it is that we gain around 15-20% speed in the
24162 + * fast path of the page fault handler and can get rid of tracing since we
24163 + * can no longer flush unintended entries.
24164 + */
24165 + "invlpg (%0)\n"
24166 +#endif
24167 + __copyuser_seg"testb $0,(%0)\n"
24168 + "xorb %3,(%1)\n"
24169 + :
24170 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24171 + : "memory", "cc");
24172 + pte_unmap_unlock(pte, ptl);
24173 + up_read(&mm->mmap_sem);
24174 + return 1;
24175 +}
24176 +#endif
24177 +
24178 /*
24179 * Handle a spurious fault caused by a stale TLB entry.
24180 *
24181 @@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24182 static inline int
24183 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24184 {
24185 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24186 + return 1;
24187 +
24188 if (write) {
24189 /* write, present and write, not present: */
24190 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24191 @@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24192 {
24193 struct vm_area_struct *vma;
24194 struct task_struct *tsk;
24195 - unsigned long address;
24196 struct mm_struct *mm;
24197 int write;
24198 int fault;
24199
24200 - tsk = current;
24201 - mm = tsk->mm;
24202 -
24203 /* Get the faulting address: */
24204 - address = read_cr2();
24205 + unsigned long address = read_cr2();
24206 +
24207 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24208 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24209 + if (!search_exception_tables(regs->ip)) {
24210 + bad_area_nosemaphore(regs, error_code, address);
24211 + return;
24212 + }
24213 + if (address < PAX_USER_SHADOW_BASE) {
24214 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24215 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24216 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24217 + } else
24218 + address -= PAX_USER_SHADOW_BASE;
24219 + }
24220 +#endif
24221 +
24222 + tsk = current;
24223 + mm = tsk->mm;
24224
24225 /*
24226 * Detect and handle instructions that would cause a page fault for
24227 @@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24228 * User-mode registers count as a user access even for any
24229 * potential system fault or CPU buglet:
24230 */
24231 - if (user_mode_vm(regs)) {
24232 + if (user_mode(regs)) {
24233 local_irq_enable();
24234 error_code |= PF_USER;
24235 } else {
24236 @@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24237 might_sleep();
24238 }
24239
24240 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24241 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24242 + return;
24243 +#endif
24244 +
24245 vma = find_vma(mm, address);
24246 if (unlikely(!vma)) {
24247 bad_area(regs, error_code, address);
24248 @@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24249 bad_area(regs, error_code, address);
24250 return;
24251 }
24252 - if (error_code & PF_USER) {
24253 - /*
24254 - * Accessing the stack below %sp is always a bug.
24255 - * The large cushion allows instructions like enter
24256 - * and pusha to work. ("enter $65535, $31" pushes
24257 - * 32 pointers and then decrements %sp by 65535.)
24258 - */
24259 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24260 - bad_area(regs, error_code, address);
24261 - return;
24262 - }
24263 + /*
24264 + * Accessing the stack below %sp is always a bug.
24265 + * The large cushion allows instructions like enter
24266 + * and pusha to work. ("enter $65535, $31" pushes
24267 + * 32 pointers and then decrements %sp by 65535.)
24268 + */
24269 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24270 + bad_area(regs, error_code, address);
24271 + return;
24272 }
24273 +
24274 +#ifdef CONFIG_PAX_SEGMEXEC
24275 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24276 + bad_area(regs, error_code, address);
24277 + return;
24278 + }
24279 +#endif
24280 +
24281 if (unlikely(expand_stack(vma, address))) {
24282 bad_area(regs, error_code, address);
24283 return;
24284 @@ -1146,3 +1390,292 @@ good_area:
24285
24286 up_read(&mm->mmap_sem);
24287 }
24288 +
24289 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24290 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24291 +{
24292 + struct mm_struct *mm = current->mm;
24293 + unsigned long ip = regs->ip;
24294 +
24295 + if (v8086_mode(regs))
24296 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24297 +
24298 +#ifdef CONFIG_PAX_PAGEEXEC
24299 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24300 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24301 + return true;
24302 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24303 + return true;
24304 + return false;
24305 + }
24306 +#endif
24307 +
24308 +#ifdef CONFIG_PAX_SEGMEXEC
24309 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24310 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24311 + return true;
24312 + return false;
24313 + }
24314 +#endif
24315 +
24316 + return false;
24317 +}
24318 +#endif
24319 +
24320 +#ifdef CONFIG_PAX_EMUTRAMP
24321 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24322 +{
24323 + int err;
24324 +
24325 + do { /* PaX: libffi trampoline emulation */
24326 + unsigned char mov, jmp;
24327 + unsigned int addr1, addr2;
24328 +
24329 +#ifdef CONFIG_X86_64
24330 + if ((regs->ip + 9) >> 32)
24331 + break;
24332 +#endif
24333 +
24334 + err = get_user(mov, (unsigned char __user *)regs->ip);
24335 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24336 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24337 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24338 +
24339 + if (err)
24340 + break;
24341 +
24342 + if (mov == 0xB8 && jmp == 0xE9) {
24343 + regs->ax = addr1;
24344 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24345 + return 2;
24346 + }
24347 + } while (0);
24348 +
24349 + do { /* PaX: gcc trampoline emulation #1 */
24350 + unsigned char mov1, mov2;
24351 + unsigned short jmp;
24352 + unsigned int addr1, addr2;
24353 +
24354 +#ifdef CONFIG_X86_64
24355 + if ((regs->ip + 11) >> 32)
24356 + break;
24357 +#endif
24358 +
24359 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24360 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24361 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24362 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24363 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24364 +
24365 + if (err)
24366 + break;
24367 +
24368 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24369 + regs->cx = addr1;
24370 + regs->ax = addr2;
24371 + regs->ip = addr2;
24372 + return 2;
24373 + }
24374 + } while (0);
24375 +
24376 + do { /* PaX: gcc trampoline emulation #2 */
24377 + unsigned char mov, jmp;
24378 + unsigned int addr1, addr2;
24379 +
24380 +#ifdef CONFIG_X86_64
24381 + if ((regs->ip + 9) >> 32)
24382 + break;
24383 +#endif
24384 +
24385 + err = get_user(mov, (unsigned char __user *)regs->ip);
24386 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24387 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24388 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24389 +
24390 + if (err)
24391 + break;
24392 +
24393 + if (mov == 0xB9 && jmp == 0xE9) {
24394 + regs->cx = addr1;
24395 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24396 + return 2;
24397 + }
24398 + } while (0);
24399 +
24400 + return 1; /* PaX in action */
24401 +}
24402 +
24403 +#ifdef CONFIG_X86_64
24404 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24405 +{
24406 + int err;
24407 +
24408 + do { /* PaX: libffi trampoline emulation */
24409 + unsigned short mov1, mov2, jmp1;
24410 + unsigned char stcclc, jmp2;
24411 + unsigned long addr1, addr2;
24412 +
24413 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24414 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24415 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24416 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24417 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24418 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24419 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24420 +
24421 + if (err)
24422 + break;
24423 +
24424 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24425 + regs->r11 = addr1;
24426 + regs->r10 = addr2;
24427 + if (stcclc == 0xF8)
24428 + regs->flags &= ~X86_EFLAGS_CF;
24429 + else
24430 + regs->flags |= X86_EFLAGS_CF;
24431 + regs->ip = addr1;
24432 + return 2;
24433 + }
24434 + } while (0);
24435 +
24436 + do { /* PaX: gcc trampoline emulation #1 */
24437 + unsigned short mov1, mov2, jmp1;
24438 + unsigned char jmp2;
24439 + unsigned int addr1;
24440 + unsigned long addr2;
24441 +
24442 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24443 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24444 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24445 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24446 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24447 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24448 +
24449 + if (err)
24450 + break;
24451 +
24452 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24453 + regs->r11 = addr1;
24454 + regs->r10 = addr2;
24455 + regs->ip = addr1;
24456 + return 2;
24457 + }
24458 + } while (0);
24459 +
24460 + do { /* PaX: gcc trampoline emulation #2 */
24461 + unsigned short mov1, mov2, jmp1;
24462 + unsigned char jmp2;
24463 + unsigned long addr1, addr2;
24464 +
24465 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24466 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24467 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24468 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24469 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24470 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24471 +
24472 + if (err)
24473 + break;
24474 +
24475 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24476 + regs->r11 = addr1;
24477 + regs->r10 = addr2;
24478 + regs->ip = addr1;
24479 + return 2;
24480 + }
24481 + } while (0);
24482 +
24483 + return 1; /* PaX in action */
24484 +}
24485 +#endif
24486 +
24487 +/*
24488 + * PaX: decide what to do with offenders (regs->ip = fault address)
24489 + *
24490 + * returns 1 when task should be killed
24491 + * 2 when gcc trampoline was detected
24492 + */
24493 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24494 +{
24495 + if (v8086_mode(regs))
24496 + return 1;
24497 +
24498 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24499 + return 1;
24500 +
24501 +#ifdef CONFIG_X86_32
24502 + return pax_handle_fetch_fault_32(regs);
24503 +#else
24504 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24505 + return pax_handle_fetch_fault_32(regs);
24506 + else
24507 + return pax_handle_fetch_fault_64(regs);
24508 +#endif
24509 +}
24510 +#endif
24511 +
24512 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24513 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24514 +{
24515 + long i;
24516 +
24517 + printk(KERN_ERR "PAX: bytes at PC: ");
24518 + for (i = 0; i < 20; i++) {
24519 + unsigned char c;
24520 + if (get_user(c, (unsigned char __force_user *)pc+i))
24521 + printk(KERN_CONT "?? ");
24522 + else
24523 + printk(KERN_CONT "%02x ", c);
24524 + }
24525 + printk("\n");
24526 +
24527 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24528 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24529 + unsigned long c;
24530 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24531 +#ifdef CONFIG_X86_32
24532 + printk(KERN_CONT "???????? ");
24533 +#else
24534 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24535 + printk(KERN_CONT "???????? ???????? ");
24536 + else
24537 + printk(KERN_CONT "???????????????? ");
24538 +#endif
24539 + } else {
24540 +#ifdef CONFIG_X86_64
24541 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24542 + printk(KERN_CONT "%08x ", (unsigned int)c);
24543 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24544 + } else
24545 +#endif
24546 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24547 + }
24548 + }
24549 + printk("\n");
24550 +}
24551 +#endif
24552 +
24553 +/**
24554 + * probe_kernel_write(): safely attempt to write to a location
24555 + * @dst: address to write to
24556 + * @src: pointer to the data that shall be written
24557 + * @size: size of the data chunk
24558 + *
24559 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24560 + * happens, handle that and return -EFAULT.
24561 + */
24562 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24563 +{
24564 + long ret;
24565 + mm_segment_t old_fs = get_fs();
24566 +
24567 + set_fs(KERNEL_DS);
24568 + pagefault_disable();
24569 + pax_open_kernel();
24570 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24571 + pax_close_kernel();
24572 + pagefault_enable();
24573 + set_fs(old_fs);
24574 +
24575 + return ret ? -EFAULT : 0;
24576 +}
24577 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24578 index 71da1bc..7a16bf4 100644
24579 --- a/arch/x86/mm/gup.c
24580 +++ b/arch/x86/mm/gup.c
24581 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24582 addr = start;
24583 len = (unsigned long) nr_pages << PAGE_SHIFT;
24584 end = start + len;
24585 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24586 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24587 (void __user *)start, len)))
24588 return 0;
24589
24590 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24591 index 63a6ba6..79abd7a 100644
24592 --- a/arch/x86/mm/highmem_32.c
24593 +++ b/arch/x86/mm/highmem_32.c
24594 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24595 idx = type + KM_TYPE_NR*smp_processor_id();
24596 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24597 BUG_ON(!pte_none(*(kmap_pte-idx)));
24598 +
24599 + pax_open_kernel();
24600 set_pte(kmap_pte-idx, mk_pte(page, prot));
24601 + pax_close_kernel();
24602
24603 return (void *)vaddr;
24604 }
24605 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24606 index f46c340..6ff9a26 100644
24607 --- a/arch/x86/mm/hugetlbpage.c
24608 +++ b/arch/x86/mm/hugetlbpage.c
24609 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24610 struct hstate *h = hstate_file(file);
24611 struct mm_struct *mm = current->mm;
24612 struct vm_area_struct *vma;
24613 - unsigned long start_addr;
24614 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24615 +
24616 +#ifdef CONFIG_PAX_SEGMEXEC
24617 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24618 + pax_task_size = SEGMEXEC_TASK_SIZE;
24619 +#endif
24620 +
24621 + pax_task_size -= PAGE_SIZE;
24622
24623 if (len > mm->cached_hole_size) {
24624 - start_addr = mm->free_area_cache;
24625 + start_addr = mm->free_area_cache;
24626 } else {
24627 - start_addr = TASK_UNMAPPED_BASE;
24628 - mm->cached_hole_size = 0;
24629 + start_addr = mm->mmap_base;
24630 + mm->cached_hole_size = 0;
24631 }
24632
24633 full_search:
24634 @@ -281,26 +288,27 @@ full_search:
24635
24636 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24637 /* At this point: (!vma || addr < vma->vm_end). */
24638 - if (TASK_SIZE - len < addr) {
24639 + if (pax_task_size - len < addr) {
24640 /*
24641 * Start a new search - just in case we missed
24642 * some holes.
24643 */
24644 - if (start_addr != TASK_UNMAPPED_BASE) {
24645 - start_addr = TASK_UNMAPPED_BASE;
24646 + if (start_addr != mm->mmap_base) {
24647 + start_addr = mm->mmap_base;
24648 mm->cached_hole_size = 0;
24649 goto full_search;
24650 }
24651 return -ENOMEM;
24652 }
24653 - if (!vma || addr + len <= vma->vm_start) {
24654 - mm->free_area_cache = addr + len;
24655 - return addr;
24656 - }
24657 + if (check_heap_stack_gap(vma, addr, len))
24658 + break;
24659 if (addr + mm->cached_hole_size < vma->vm_start)
24660 mm->cached_hole_size = vma->vm_start - addr;
24661 addr = ALIGN(vma->vm_end, huge_page_size(h));
24662 }
24663 +
24664 + mm->free_area_cache = addr + len;
24665 + return addr;
24666 }
24667
24668 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24669 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24670 {
24671 struct hstate *h = hstate_file(file);
24672 struct mm_struct *mm = current->mm;
24673 - struct vm_area_struct *vma, *prev_vma;
24674 - unsigned long base = mm->mmap_base, addr = addr0;
24675 + struct vm_area_struct *vma;
24676 + unsigned long base = mm->mmap_base, addr;
24677 unsigned long largest_hole = mm->cached_hole_size;
24678 - int first_time = 1;
24679
24680 /* don't allow allocations above current base */
24681 if (mm->free_area_cache > base)
24682 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24683 largest_hole = 0;
24684 mm->free_area_cache = base;
24685 }
24686 -try_again:
24687 +
24688 /* make sure it can fit in the remaining address space */
24689 if (mm->free_area_cache < len)
24690 goto fail;
24691
24692 /* either no address requested or cant fit in requested address hole */
24693 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24694 + addr = (mm->free_area_cache - len);
24695 do {
24696 + addr &= huge_page_mask(h);
24697 + vma = find_vma(mm, addr);
24698 /*
24699 * Lookup failure means no vma is above this address,
24700 * i.e. return with success:
24701 - */
24702 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24703 - return addr;
24704 -
24705 - /*
24706 * new region fits between prev_vma->vm_end and
24707 * vma->vm_start, use it:
24708 */
24709 - if (addr + len <= vma->vm_start &&
24710 - (!prev_vma || (addr >= prev_vma->vm_end))) {
24711 + if (check_heap_stack_gap(vma, addr, len)) {
24712 /* remember the address as a hint for next time */
24713 - mm->cached_hole_size = largest_hole;
24714 - return (mm->free_area_cache = addr);
24715 - } else {
24716 - /* pull free_area_cache down to the first hole */
24717 - if (mm->free_area_cache == vma->vm_end) {
24718 - mm->free_area_cache = vma->vm_start;
24719 - mm->cached_hole_size = largest_hole;
24720 - }
24721 + mm->cached_hole_size = largest_hole;
24722 + return (mm->free_area_cache = addr);
24723 + }
24724 + /* pull free_area_cache down to the first hole */
24725 + if (mm->free_area_cache == vma->vm_end) {
24726 + mm->free_area_cache = vma->vm_start;
24727 + mm->cached_hole_size = largest_hole;
24728 }
24729
24730 /* remember the largest hole we saw so far */
24731 if (addr + largest_hole < vma->vm_start)
24732 - largest_hole = vma->vm_start - addr;
24733 + largest_hole = vma->vm_start - addr;
24734
24735 /* try just below the current vma->vm_start */
24736 - addr = (vma->vm_start - len) & huge_page_mask(h);
24737 - } while (len <= vma->vm_start);
24738 + addr = skip_heap_stack_gap(vma, len);
24739 + } while (!IS_ERR_VALUE(addr));
24740
24741 fail:
24742 /*
24743 - * if hint left us with no space for the requested
24744 - * mapping then try again:
24745 - */
24746 - if (first_time) {
24747 - mm->free_area_cache = base;
24748 - largest_hole = 0;
24749 - first_time = 0;
24750 - goto try_again;
24751 - }
24752 - /*
24753 * A failed mmap() very likely causes application failure,
24754 * so fall back to the bottom-up function here. This scenario
24755 * can happen with large stack limits and large mmap()
24756 * allocations.
24757 */
24758 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24759 +
24760 +#ifdef CONFIG_PAX_SEGMEXEC
24761 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24762 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24763 + else
24764 +#endif
24765 +
24766 + mm->mmap_base = TASK_UNMAPPED_BASE;
24767 +
24768 +#ifdef CONFIG_PAX_RANDMMAP
24769 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24770 + mm->mmap_base += mm->delta_mmap;
24771 +#endif
24772 +
24773 + mm->free_area_cache = mm->mmap_base;
24774 mm->cached_hole_size = ~0UL;
24775 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24776 len, pgoff, flags);
24777 @@ -387,6 +393,7 @@ fail:
24778 /*
24779 * Restore the topdown base:
24780 */
24781 + mm->mmap_base = base;
24782 mm->free_area_cache = base;
24783 mm->cached_hole_size = ~0UL;
24784
24785 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24786 struct hstate *h = hstate_file(file);
24787 struct mm_struct *mm = current->mm;
24788 struct vm_area_struct *vma;
24789 + unsigned long pax_task_size = TASK_SIZE;
24790
24791 if (len & ~huge_page_mask(h))
24792 return -EINVAL;
24793 - if (len > TASK_SIZE)
24794 +
24795 +#ifdef CONFIG_PAX_SEGMEXEC
24796 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24797 + pax_task_size = SEGMEXEC_TASK_SIZE;
24798 +#endif
24799 +
24800 + pax_task_size -= PAGE_SIZE;
24801 +
24802 + if (len > pax_task_size)
24803 return -ENOMEM;
24804
24805 if (flags & MAP_FIXED) {
24806 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24807 if (addr) {
24808 addr = ALIGN(addr, huge_page_size(h));
24809 vma = find_vma(mm, addr);
24810 - if (TASK_SIZE - len >= addr &&
24811 - (!vma || addr + len <= vma->vm_start))
24812 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24813 return addr;
24814 }
24815 if (mm->get_unmapped_area == arch_get_unmapped_area)
24816 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24817 index 73ffd55..ad78676 100644
24818 --- a/arch/x86/mm/init.c
24819 +++ b/arch/x86/mm/init.c
24820 @@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
24821 * cause a hotspot and fill up ZONE_DMA. The page tables
24822 * need roughly 0.5KB per GB.
24823 */
24824 -#ifdef CONFIG_X86_32
24825 - start = 0x7000;
24826 -#else
24827 - start = 0x8000;
24828 -#endif
24829 + start = 0x100000;
24830 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
24831 tables, PAGE_SIZE);
24832 if (e820_table_start == -1UL)
24833 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24834 #endif
24835
24836 set_nx();
24837 - if (nx_enabled)
24838 + if (nx_enabled && cpu_has_nx)
24839 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
24840
24841 /* Enable PSE if available */
24842 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24843 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24844 * mmio resources as well as potential bios/acpi data regions.
24845 */
24846 +
24847 int devmem_is_allowed(unsigned long pagenr)
24848 {
24849 +#ifdef CONFIG_GRKERNSEC_KMEM
24850 + /* allow BDA */
24851 + if (!pagenr)
24852 + return 1;
24853 + /* allow EBDA */
24854 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24855 + return 1;
24856 + /* allow ISA/video mem */
24857 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24858 + return 1;
24859 + /* throw out everything else below 1MB */
24860 + if (pagenr <= 256)
24861 + return 0;
24862 +#else
24863 if (pagenr <= 256)
24864 return 1;
24865 +#endif
24866 +
24867 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24868 return 0;
24869 if (!page_is_ram(pagenr))
24870 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24871
24872 void free_initmem(void)
24873 {
24874 +
24875 +#ifdef CONFIG_PAX_KERNEXEC
24876 +#ifdef CONFIG_X86_32
24877 + /* PaX: limit KERNEL_CS to actual size */
24878 + unsigned long addr, limit;
24879 + struct desc_struct d;
24880 + int cpu;
24881 +
24882 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24883 + limit = (limit - 1UL) >> PAGE_SHIFT;
24884 +
24885 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24886 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
24887 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24888 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24889 + }
24890 +
24891 + /* PaX: make KERNEL_CS read-only */
24892 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24893 + if (!paravirt_enabled())
24894 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24895 +/*
24896 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24897 + pgd = pgd_offset_k(addr);
24898 + pud = pud_offset(pgd, addr);
24899 + pmd = pmd_offset(pud, addr);
24900 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24901 + }
24902 +*/
24903 +#ifdef CONFIG_X86_PAE
24904 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24905 +/*
24906 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24907 + pgd = pgd_offset_k(addr);
24908 + pud = pud_offset(pgd, addr);
24909 + pmd = pmd_offset(pud, addr);
24910 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24911 + }
24912 +*/
24913 +#endif
24914 +
24915 +#ifdef CONFIG_MODULES
24916 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24917 +#endif
24918 +
24919 +#else
24920 + pgd_t *pgd;
24921 + pud_t *pud;
24922 + pmd_t *pmd;
24923 + unsigned long addr, end;
24924 +
24925 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24926 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24927 + pgd = pgd_offset_k(addr);
24928 + pud = pud_offset(pgd, addr);
24929 + pmd = pmd_offset(pud, addr);
24930 + if (!pmd_present(*pmd))
24931 + continue;
24932 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24933 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24934 + else
24935 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24936 + }
24937 +
24938 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24939 + end = addr + KERNEL_IMAGE_SIZE;
24940 + for (; addr < end; addr += PMD_SIZE) {
24941 + pgd = pgd_offset_k(addr);
24942 + pud = pud_offset(pgd, addr);
24943 + pmd = pmd_offset(pud, addr);
24944 + if (!pmd_present(*pmd))
24945 + continue;
24946 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24947 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24948 + }
24949 +#endif
24950 +
24951 + flush_tlb_all();
24952 +#endif
24953 +
24954 free_init_pages("unused kernel memory",
24955 (unsigned long)(&__init_begin),
24956 (unsigned long)(&__init_end));
24957 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24958 index 30938c1..bda3d5d 100644
24959 --- a/arch/x86/mm/init_32.c
24960 +++ b/arch/x86/mm/init_32.c
24961 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
24962 }
24963
24964 /*
24965 - * Creates a middle page table and puts a pointer to it in the
24966 - * given global directory entry. This only returns the gd entry
24967 - * in non-PAE compilation mode, since the middle layer is folded.
24968 - */
24969 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24970 -{
24971 - pud_t *pud;
24972 - pmd_t *pmd_table;
24973 -
24974 -#ifdef CONFIG_X86_PAE
24975 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24976 - if (after_bootmem)
24977 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24978 - else
24979 - pmd_table = (pmd_t *)alloc_low_page();
24980 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24981 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24982 - pud = pud_offset(pgd, 0);
24983 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24984 -
24985 - return pmd_table;
24986 - }
24987 -#endif
24988 - pud = pud_offset(pgd, 0);
24989 - pmd_table = pmd_offset(pud, 0);
24990 -
24991 - return pmd_table;
24992 -}
24993 -
24994 -/*
24995 * Create a page table and place a pointer to it in a middle page
24996 * directory entry:
24997 */
24998 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24999 page_table = (pte_t *)alloc_low_page();
25000
25001 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25002 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25003 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25004 +#else
25005 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25006 +#endif
25007 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25008 }
25009
25010 return pte_offset_kernel(pmd, 0);
25011 }
25012
25013 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
25014 +{
25015 + pud_t *pud;
25016 + pmd_t *pmd_table;
25017 +
25018 + pud = pud_offset(pgd, 0);
25019 + pmd_table = pmd_offset(pud, 0);
25020 +
25021 + return pmd_table;
25022 +}
25023 +
25024 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25025 {
25026 int pgd_idx = pgd_index(vaddr);
25027 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25028 int pgd_idx, pmd_idx;
25029 unsigned long vaddr;
25030 pgd_t *pgd;
25031 + pud_t *pud;
25032 pmd_t *pmd;
25033 pte_t *pte = NULL;
25034
25035 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25036 pgd = pgd_base + pgd_idx;
25037
25038 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25039 - pmd = one_md_table_init(pgd);
25040 - pmd = pmd + pmd_index(vaddr);
25041 + pud = pud_offset(pgd, vaddr);
25042 + pmd = pmd_offset(pud, vaddr);
25043 +
25044 +#ifdef CONFIG_X86_PAE
25045 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25046 +#endif
25047 +
25048 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25049 pmd++, pmd_idx++) {
25050 pte = page_table_kmap_check(one_page_table_init(pmd),
25051 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25052 }
25053 }
25054
25055 -static inline int is_kernel_text(unsigned long addr)
25056 +static inline int is_kernel_text(unsigned long start, unsigned long end)
25057 {
25058 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25059 - return 1;
25060 - return 0;
25061 + if ((start > ktla_ktva((unsigned long)_etext) ||
25062 + end <= ktla_ktva((unsigned long)_stext)) &&
25063 + (start > ktla_ktva((unsigned long)_einittext) ||
25064 + end <= ktla_ktva((unsigned long)_sinittext)) &&
25065 +
25066 +#ifdef CONFIG_ACPI_SLEEP
25067 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25068 +#endif
25069 +
25070 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25071 + return 0;
25072 + return 1;
25073 }
25074
25075 /*
25076 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25077 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25078 unsigned long start_pfn, end_pfn;
25079 pgd_t *pgd_base = swapper_pg_dir;
25080 - int pgd_idx, pmd_idx, pte_ofs;
25081 + unsigned int pgd_idx, pmd_idx, pte_ofs;
25082 unsigned long pfn;
25083 pgd_t *pgd;
25084 + pud_t *pud;
25085 pmd_t *pmd;
25086 pte_t *pte;
25087 unsigned pages_2m, pages_4k;
25088 @@ -278,8 +279,13 @@ repeat:
25089 pfn = start_pfn;
25090 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25091 pgd = pgd_base + pgd_idx;
25092 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25093 - pmd = one_md_table_init(pgd);
25094 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25095 + pud = pud_offset(pgd, 0);
25096 + pmd = pmd_offset(pud, 0);
25097 +
25098 +#ifdef CONFIG_X86_PAE
25099 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25100 +#endif
25101
25102 if (pfn >= end_pfn)
25103 continue;
25104 @@ -291,14 +297,13 @@ repeat:
25105 #endif
25106 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25107 pmd++, pmd_idx++) {
25108 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25109 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25110
25111 /*
25112 * Map with big pages if possible, otherwise
25113 * create normal page tables:
25114 */
25115 if (use_pse) {
25116 - unsigned int addr2;
25117 pgprot_t prot = PAGE_KERNEL_LARGE;
25118 /*
25119 * first pass will use the same initial
25120 @@ -308,11 +313,7 @@ repeat:
25121 __pgprot(PTE_IDENT_ATTR |
25122 _PAGE_PSE);
25123
25124 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25125 - PAGE_OFFSET + PAGE_SIZE-1;
25126 -
25127 - if (is_kernel_text(addr) ||
25128 - is_kernel_text(addr2))
25129 + if (is_kernel_text(address, address + PMD_SIZE))
25130 prot = PAGE_KERNEL_LARGE_EXEC;
25131
25132 pages_2m++;
25133 @@ -329,7 +330,7 @@ repeat:
25134 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25135 pte += pte_ofs;
25136 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25137 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25138 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25139 pgprot_t prot = PAGE_KERNEL;
25140 /*
25141 * first pass will use the same initial
25142 @@ -337,7 +338,7 @@ repeat:
25143 */
25144 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25145
25146 - if (is_kernel_text(addr))
25147 + if (is_kernel_text(address, address + PAGE_SIZE))
25148 prot = PAGE_KERNEL_EXEC;
25149
25150 pages_4k++;
25151 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25152
25153 pud = pud_offset(pgd, va);
25154 pmd = pmd_offset(pud, va);
25155 - if (!pmd_present(*pmd))
25156 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
25157 break;
25158
25159 pte = pte_offset_kernel(pmd, va);
25160 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25161
25162 static void __init pagetable_init(void)
25163 {
25164 - pgd_t *pgd_base = swapper_pg_dir;
25165 -
25166 - permanent_kmaps_init(pgd_base);
25167 + permanent_kmaps_init(swapper_pg_dir);
25168 }
25169
25170 #ifdef CONFIG_ACPI_SLEEP
25171 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25172 * ACPI suspend needs this for resume, because things like the intel-agp
25173 * driver might have split up a kernel 4MB mapping.
25174 */
25175 -char swsusp_pg_dir[PAGE_SIZE]
25176 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25177 __attribute__ ((aligned(PAGE_SIZE)));
25178
25179 static inline void save_pg_dir(void)
25180 {
25181 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25182 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25183 }
25184 #else /* !CONFIG_ACPI_SLEEP */
25185 static inline void save_pg_dir(void)
25186 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25187 flush_tlb_all();
25188 }
25189
25190 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25191 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25192 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25193
25194 /* user-defined highmem size */
25195 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25196 * Initialize the boot-time allocator (with low memory only):
25197 */
25198 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25199 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25200 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25201 PAGE_SIZE);
25202 if (bootmap == -1L)
25203 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25204 @@ -864,6 +863,12 @@ void __init mem_init(void)
25205
25206 pci_iommu_alloc();
25207
25208 +#ifdef CONFIG_PAX_PER_CPU_PGD
25209 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25210 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25211 + KERNEL_PGD_PTRS);
25212 +#endif
25213 +
25214 #ifdef CONFIG_FLATMEM
25215 BUG_ON(!mem_map);
25216 #endif
25217 @@ -881,7 +886,7 @@ void __init mem_init(void)
25218 set_highmem_pages_init();
25219
25220 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25221 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25222 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25223 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25224
25225 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25226 @@ -923,10 +928,10 @@ void __init mem_init(void)
25227 ((unsigned long)&__init_end -
25228 (unsigned long)&__init_begin) >> 10,
25229
25230 - (unsigned long)&_etext, (unsigned long)&_edata,
25231 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25232 + (unsigned long)&_sdata, (unsigned long)&_edata,
25233 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25234
25235 - (unsigned long)&_text, (unsigned long)&_etext,
25236 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25237 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25238
25239 /*
25240 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25241 if (!kernel_set_to_readonly)
25242 return;
25243
25244 + start = ktla_ktva(start);
25245 pr_debug("Set kernel text: %lx - %lx for read write\n",
25246 start, start+size);
25247
25248 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25249 if (!kernel_set_to_readonly)
25250 return;
25251
25252 + start = ktla_ktva(start);
25253 pr_debug("Set kernel text: %lx - %lx for read only\n",
25254 start, start+size);
25255
25256 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25257 unsigned long start = PFN_ALIGN(_text);
25258 unsigned long size = PFN_ALIGN(_etext) - start;
25259
25260 + start = ktla_ktva(start);
25261 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25262 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25263 size >> 10);
25264 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25265 index 7d095ad..25d2549 100644
25266 --- a/arch/x86/mm/init_64.c
25267 +++ b/arch/x86/mm/init_64.c
25268 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25269 pmd = fill_pmd(pud, vaddr);
25270 pte = fill_pte(pmd, vaddr);
25271
25272 + pax_open_kernel();
25273 set_pte(pte, new_pte);
25274 + pax_close_kernel();
25275
25276 /*
25277 * It's enough to flush this one mapping.
25278 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25279 pgd = pgd_offset_k((unsigned long)__va(phys));
25280 if (pgd_none(*pgd)) {
25281 pud = (pud_t *) spp_getpage();
25282 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25283 - _PAGE_USER));
25284 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25285 }
25286 pud = pud_offset(pgd, (unsigned long)__va(phys));
25287 if (pud_none(*pud)) {
25288 pmd = (pmd_t *) spp_getpage();
25289 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25290 - _PAGE_USER));
25291 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25292 }
25293 pmd = pmd_offset(pud, phys);
25294 BUG_ON(!pmd_none(*pmd));
25295 @@ -675,6 +675,12 @@ void __init mem_init(void)
25296
25297 pci_iommu_alloc();
25298
25299 +#ifdef CONFIG_PAX_PER_CPU_PGD
25300 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25301 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25302 + KERNEL_PGD_PTRS);
25303 +#endif
25304 +
25305 /* clear_bss() already clear the empty_zero_page */
25306
25307 reservedpages = 0;
25308 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25309 static struct vm_area_struct gate_vma = {
25310 .vm_start = VSYSCALL_START,
25311 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25312 - .vm_page_prot = PAGE_READONLY_EXEC,
25313 - .vm_flags = VM_READ | VM_EXEC
25314 + .vm_page_prot = PAGE_READONLY,
25315 + .vm_flags = VM_READ
25316 };
25317
25318 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25319 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25320
25321 const char *arch_vma_name(struct vm_area_struct *vma)
25322 {
25323 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25324 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25325 return "[vdso]";
25326 if (vma == &gate_vma)
25327 return "[vsyscall]";
25328 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25329 index 84e236c..69bd3f6 100644
25330 --- a/arch/x86/mm/iomap_32.c
25331 +++ b/arch/x86/mm/iomap_32.c
25332 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25333 debug_kmap_atomic(type);
25334 idx = type + KM_TYPE_NR * smp_processor_id();
25335 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25336 +
25337 + pax_open_kernel();
25338 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25339 + pax_close_kernel();
25340 +
25341 arch_flush_lazy_mmu_mode();
25342
25343 return (void *)vaddr;
25344 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25345 index 2feb9bd..3646202 100644
25346 --- a/arch/x86/mm/ioremap.c
25347 +++ b/arch/x86/mm/ioremap.c
25348 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25349 * Second special case: Some BIOSen report the PC BIOS
25350 * area (640->1Mb) as ram even though it is not.
25351 */
25352 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25353 - pagenr < (BIOS_END >> PAGE_SHIFT))
25354 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25355 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25356 return 0;
25357
25358 for (i = 0; i < e820.nr_map; i++) {
25359 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25360 /*
25361 * Don't allow anybody to remap normal RAM that we're using..
25362 */
25363 - for (pfn = phys_addr >> PAGE_SHIFT;
25364 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25365 - pfn++) {
25366 -
25367 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25368 int is_ram = page_is_ram(pfn);
25369
25370 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25371 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25372 return NULL;
25373 WARN_ON_ONCE(is_ram);
25374 }
25375 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_setup(char *str)
25376 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25377
25378 static __initdata int after_paging_init;
25379 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25380 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25381
25382 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25383 {
25384 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
25385 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25386
25387 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25388 - memset(bm_pte, 0, sizeof(bm_pte));
25389 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25390 + pmd_populate_user(&init_mm, pmd, bm_pte);
25391
25392 /*
25393 * The boot-ioremap range spans multiple pmds, for which
25394 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25395 index 8cc1833..1abbc5b 100644
25396 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25397 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25398 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25399 * memory (e.g. tracked pages)? For now, we need this to avoid
25400 * invoking kmemcheck for PnP BIOS calls.
25401 */
25402 - if (regs->flags & X86_VM_MASK)
25403 + if (v8086_mode(regs))
25404 return false;
25405 - if (regs->cs != __KERNEL_CS)
25406 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25407 return false;
25408
25409 pte = kmemcheck_pte_lookup(address);
25410 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25411 index c8191de..2975082 100644
25412 --- a/arch/x86/mm/mmap.c
25413 +++ b/arch/x86/mm/mmap.c
25414 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25415 * Leave an at least ~128 MB hole with possible stack randomization.
25416 */
25417 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25418 -#define MAX_GAP (TASK_SIZE/6*5)
25419 +#define MAX_GAP (pax_task_size/6*5)
25420
25421 /*
25422 * True on X86_32 or when emulating IA32 on X86_64
25423 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25424 return rnd << PAGE_SHIFT;
25425 }
25426
25427 -static unsigned long mmap_base(void)
25428 +static unsigned long mmap_base(struct mm_struct *mm)
25429 {
25430 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25431 + unsigned long pax_task_size = TASK_SIZE;
25432 +
25433 +#ifdef CONFIG_PAX_SEGMEXEC
25434 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25435 + pax_task_size = SEGMEXEC_TASK_SIZE;
25436 +#endif
25437
25438 if (gap < MIN_GAP)
25439 gap = MIN_GAP;
25440 else if (gap > MAX_GAP)
25441 gap = MAX_GAP;
25442
25443 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25444 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25445 }
25446
25447 /*
25448 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25449 * does, but not when emulating X86_32
25450 */
25451 -static unsigned long mmap_legacy_base(void)
25452 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25453 {
25454 - if (mmap_is_ia32())
25455 + if (mmap_is_ia32()) {
25456 +
25457 +#ifdef CONFIG_PAX_SEGMEXEC
25458 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25459 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25460 + else
25461 +#endif
25462 +
25463 return TASK_UNMAPPED_BASE;
25464 - else
25465 + } else
25466 return TASK_UNMAPPED_BASE + mmap_rnd();
25467 }
25468
25469 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25470 void arch_pick_mmap_layout(struct mm_struct *mm)
25471 {
25472 if (mmap_is_legacy()) {
25473 - mm->mmap_base = mmap_legacy_base();
25474 + mm->mmap_base = mmap_legacy_base(mm);
25475 +
25476 +#ifdef CONFIG_PAX_RANDMMAP
25477 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25478 + mm->mmap_base += mm->delta_mmap;
25479 +#endif
25480 +
25481 mm->get_unmapped_area = arch_get_unmapped_area;
25482 mm->unmap_area = arch_unmap_area;
25483 } else {
25484 - mm->mmap_base = mmap_base();
25485 + mm->mmap_base = mmap_base(mm);
25486 +
25487 +#ifdef CONFIG_PAX_RANDMMAP
25488 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25489 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25490 +#endif
25491 +
25492 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25493 mm->unmap_area = arch_unmap_area_topdown;
25494 }
25495 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25496 index 132772a..b961f11 100644
25497 --- a/arch/x86/mm/mmio-mod.c
25498 +++ b/arch/x86/mm/mmio-mod.c
25499 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25500 break;
25501 default:
25502 {
25503 - unsigned char *ip = (unsigned char *)instptr;
25504 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25505 my_trace->opcode = MMIO_UNKNOWN_OP;
25506 my_trace->width = 0;
25507 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25508 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25509 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25510 void __iomem *addr)
25511 {
25512 - static atomic_t next_id;
25513 + static atomic_unchecked_t next_id;
25514 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25515 /* These are page-unaligned. */
25516 struct mmiotrace_map map = {
25517 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25518 .private = trace
25519 },
25520 .phys = offset,
25521 - .id = atomic_inc_return(&next_id)
25522 + .id = atomic_inc_return_unchecked(&next_id)
25523 };
25524 map.map_id = trace->id;
25525
25526 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25527 index d253006..e56dd6a 100644
25528 --- a/arch/x86/mm/numa_32.c
25529 +++ b/arch/x86/mm/numa_32.c
25530 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25531 }
25532 #endif
25533
25534 -extern unsigned long find_max_low_pfn(void);
25535 extern unsigned long highend_pfn, highstart_pfn;
25536
25537 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25538 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25539 index e1d1069..2251ff3 100644
25540 --- a/arch/x86/mm/pageattr-test.c
25541 +++ b/arch/x86/mm/pageattr-test.c
25542 @@ -36,7 +36,7 @@ enum {
25543
25544 static int pte_testbit(pte_t pte)
25545 {
25546 - return pte_flags(pte) & _PAGE_UNUSED1;
25547 + return pte_flags(pte) & _PAGE_CPA_TEST;
25548 }
25549
25550 struct split_state {
25551 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25552 index dd38bfb..8c12306 100644
25553 --- a/arch/x86/mm/pageattr.c
25554 +++ b/arch/x86/mm/pageattr.c
25555 @@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25556 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25557 */
25558 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25559 - pgprot_val(forbidden) |= _PAGE_NX;
25560 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25561
25562 /*
25563 * The kernel text needs to be executable for obvious reasons
25564 * Does not cover __inittext since that is gone later on. On
25565 * 64bit we do not enforce !NX on the low mapping
25566 */
25567 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25568 - pgprot_val(forbidden) |= _PAGE_NX;
25569 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25570 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25571
25572 +#ifdef CONFIG_DEBUG_RODATA
25573 /*
25574 * The .rodata section needs to be read-only. Using the pfn
25575 * catches all aliases.
25576 @@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25577 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25578 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25579 pgprot_val(forbidden) |= _PAGE_RW;
25580 +#endif
25581 +
25582 +#ifdef CONFIG_PAX_KERNEXEC
25583 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25584 + pgprot_val(forbidden) |= _PAGE_RW;
25585 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25586 + }
25587 +#endif
25588
25589 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25590
25591 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25592 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25593 {
25594 /* change init_mm */
25595 + pax_open_kernel();
25596 set_pte_atomic(kpte, pte);
25597 +
25598 #ifdef CONFIG_X86_32
25599 if (!SHARED_KERNEL_PMD) {
25600 +
25601 +#ifdef CONFIG_PAX_PER_CPU_PGD
25602 + unsigned long cpu;
25603 +#else
25604 struct page *page;
25605 +#endif
25606
25607 +#ifdef CONFIG_PAX_PER_CPU_PGD
25608 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
25609 + pgd_t *pgd = get_cpu_pgd(cpu);
25610 +#else
25611 list_for_each_entry(page, &pgd_list, lru) {
25612 - pgd_t *pgd;
25613 + pgd_t *pgd = (pgd_t *)page_address(page);
25614 +#endif
25615 +
25616 pud_t *pud;
25617 pmd_t *pmd;
25618
25619 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25620 + pgd += pgd_index(address);
25621 pud = pud_offset(pgd, address);
25622 pmd = pmd_offset(pud, address);
25623 set_pte_atomic((pte_t *)pmd, pte);
25624 }
25625 }
25626 #endif
25627 + pax_close_kernel();
25628 }
25629
25630 static int
25631 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25632 index e78cd0e..de0a817 100644
25633 --- a/arch/x86/mm/pat.c
25634 +++ b/arch/x86/mm/pat.c
25635 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25636
25637 conflict:
25638 printk(KERN_INFO "%s:%d conflicting memory types "
25639 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25640 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25641 new->end, cattr_name(new->type), cattr_name(entry->type));
25642 return -EBUSY;
25643 }
25644 @@ -559,7 +559,7 @@ unlock_ret:
25645
25646 if (err) {
25647 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25648 - current->comm, current->pid, start, end);
25649 + current->comm, task_pid_nr(current), start, end);
25650 }
25651
25652 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25653 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25654 while (cursor < to) {
25655 if (!devmem_is_allowed(pfn)) {
25656 printk(KERN_INFO
25657 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25658 - current->comm, from, to);
25659 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25660 + current->comm, from, to, cursor);
25661 return 0;
25662 }
25663 cursor += PAGE_SIZE;
25664 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25665 printk(KERN_INFO
25666 "%s:%d ioremap_change_attr failed %s "
25667 "for %Lx-%Lx\n",
25668 - current->comm, current->pid,
25669 + current->comm, task_pid_nr(current),
25670 cattr_name(flags),
25671 base, (unsigned long long)(base + size));
25672 return -EINVAL;
25673 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25674 free_memtype(paddr, paddr + size);
25675 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25676 " for %Lx-%Lx, got %s\n",
25677 - current->comm, current->pid,
25678 + current->comm, task_pid_nr(current),
25679 cattr_name(want_flags),
25680 (unsigned long long)paddr,
25681 (unsigned long long)(paddr + size),
25682 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25683 index df3d5c8..c2223e1 100644
25684 --- a/arch/x86/mm/pf_in.c
25685 +++ b/arch/x86/mm/pf_in.c
25686 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25687 int i;
25688 enum reason_type rv = OTHERS;
25689
25690 - p = (unsigned char *)ins_addr;
25691 + p = (unsigned char *)ktla_ktva(ins_addr);
25692 p += skip_prefix(p, &prf);
25693 p += get_opcode(p, &opcode);
25694
25695 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25696 struct prefix_bits prf;
25697 int i;
25698
25699 - p = (unsigned char *)ins_addr;
25700 + p = (unsigned char *)ktla_ktva(ins_addr);
25701 p += skip_prefix(p, &prf);
25702 p += get_opcode(p, &opcode);
25703
25704 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25705 struct prefix_bits prf;
25706 int i;
25707
25708 - p = (unsigned char *)ins_addr;
25709 + p = (unsigned char *)ktla_ktva(ins_addr);
25710 p += skip_prefix(p, &prf);
25711 p += get_opcode(p, &opcode);
25712
25713 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25714 int i;
25715 unsigned long rv;
25716
25717 - p = (unsigned char *)ins_addr;
25718 + p = (unsigned char *)ktla_ktva(ins_addr);
25719 p += skip_prefix(p, &prf);
25720 p += get_opcode(p, &opcode);
25721 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25722 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25723 int i;
25724 unsigned long rv;
25725
25726 - p = (unsigned char *)ins_addr;
25727 + p = (unsigned char *)ktla_ktva(ins_addr);
25728 p += skip_prefix(p, &prf);
25729 p += get_opcode(p, &opcode);
25730 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25731 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25732 index e0e6fad..6b90017 100644
25733 --- a/arch/x86/mm/pgtable.c
25734 +++ b/arch/x86/mm/pgtable.c
25735 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25736 list_del(&page->lru);
25737 }
25738
25739 -#define UNSHARED_PTRS_PER_PGD \
25740 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25741 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25742 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25743
25744 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25745 +{
25746 + while (count--)
25747 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25748 +}
25749 +#endif
25750 +
25751 +#ifdef CONFIG_PAX_PER_CPU_PGD
25752 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25753 +{
25754 + while (count--)
25755 +
25756 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25757 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
25758 +#else
25759 + *dst++ = *src++;
25760 +#endif
25761 +
25762 +}
25763 +#endif
25764 +
25765 +#ifdef CONFIG_X86_64
25766 +#define pxd_t pud_t
25767 +#define pyd_t pgd_t
25768 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25769 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25770 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25771 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
25772 +#define PYD_SIZE PGDIR_SIZE
25773 +#else
25774 +#define pxd_t pmd_t
25775 +#define pyd_t pud_t
25776 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25777 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25778 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25779 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
25780 +#define PYD_SIZE PUD_SIZE
25781 +#endif
25782 +
25783 +#ifdef CONFIG_PAX_PER_CPU_PGD
25784 +static inline void pgd_ctor(pgd_t *pgd) {}
25785 +static inline void pgd_dtor(pgd_t *pgd) {}
25786 +#else
25787 static void pgd_ctor(pgd_t *pgd)
25788 {
25789 /* If the pgd points to a shared pagetable level (either the
25790 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
25791 pgd_list_del(pgd);
25792 spin_unlock_irqrestore(&pgd_lock, flags);
25793 }
25794 +#endif
25795
25796 /*
25797 * List of all pgd's needed for non-PAE so it can invalidate entries
25798 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
25799 * -- wli
25800 */
25801
25802 -#ifdef CONFIG_X86_PAE
25803 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25804 /*
25805 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25806 * updating the top-level pagetable entries to guarantee the
25807 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
25808 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25809 * and initialize the kernel pmds here.
25810 */
25811 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25812 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25813
25814 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25815 {
25816 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25817 */
25818 flush_tlb_mm(mm);
25819 }
25820 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25821 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25822 #else /* !CONFIG_X86_PAE */
25823
25824 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25825 -#define PREALLOCATED_PMDS 0
25826 +#define PREALLOCATED_PXDS 0
25827
25828 #endif /* CONFIG_X86_PAE */
25829
25830 -static void free_pmds(pmd_t *pmds[])
25831 +static void free_pxds(pxd_t *pxds[])
25832 {
25833 int i;
25834
25835 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25836 - if (pmds[i])
25837 - free_page((unsigned long)pmds[i]);
25838 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25839 + if (pxds[i])
25840 + free_page((unsigned long)pxds[i]);
25841 }
25842
25843 -static int preallocate_pmds(pmd_t *pmds[])
25844 +static int preallocate_pxds(pxd_t *pxds[])
25845 {
25846 int i;
25847 bool failed = false;
25848
25849 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25850 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25851 - if (pmd == NULL)
25852 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25853 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25854 + if (pxd == NULL)
25855 failed = true;
25856 - pmds[i] = pmd;
25857 + pxds[i] = pxd;
25858 }
25859
25860 if (failed) {
25861 - free_pmds(pmds);
25862 + free_pxds(pxds);
25863 return -ENOMEM;
25864 }
25865
25866 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
25867 * preallocate which never got a corresponding vma will need to be
25868 * freed manually.
25869 */
25870 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25871 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25872 {
25873 int i;
25874
25875 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25876 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25877 pgd_t pgd = pgdp[i];
25878
25879 if (pgd_val(pgd) != 0) {
25880 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25881 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25882
25883 - pgdp[i] = native_make_pgd(0);
25884 + set_pgd(pgdp + i, native_make_pgd(0));
25885
25886 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25887 - pmd_free(mm, pmd);
25888 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25889 + pxd_free(mm, pxd);
25890 }
25891 }
25892 }
25893
25894 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25895 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25896 {
25897 - pud_t *pud;
25898 + pyd_t *pyd;
25899 unsigned long addr;
25900 int i;
25901
25902 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25903 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25904 return;
25905
25906 - pud = pud_offset(pgd, 0);
25907 +#ifdef CONFIG_X86_64
25908 + pyd = pyd_offset(mm, 0L);
25909 +#else
25910 + pyd = pyd_offset(pgd, 0L);
25911 +#endif
25912
25913 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25914 - i++, pud++, addr += PUD_SIZE) {
25915 - pmd_t *pmd = pmds[i];
25916 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25917 + i++, pyd++, addr += PYD_SIZE) {
25918 + pxd_t *pxd = pxds[i];
25919
25920 if (i >= KERNEL_PGD_BOUNDARY)
25921 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25922 - sizeof(pmd_t) * PTRS_PER_PMD);
25923 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25924 + sizeof(pxd_t) * PTRS_PER_PMD);
25925
25926 - pud_populate(mm, pud, pmd);
25927 + pyd_populate(mm, pyd, pxd);
25928 }
25929 }
25930
25931 pgd_t *pgd_alloc(struct mm_struct *mm)
25932 {
25933 pgd_t *pgd;
25934 - pmd_t *pmds[PREALLOCATED_PMDS];
25935 + pxd_t *pxds[PREALLOCATED_PXDS];
25936 +
25937 unsigned long flags;
25938
25939 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25940 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25941
25942 mm->pgd = pgd;
25943
25944 - if (preallocate_pmds(pmds) != 0)
25945 + if (preallocate_pxds(pxds) != 0)
25946 goto out_free_pgd;
25947
25948 if (paravirt_pgd_alloc(mm) != 0)
25949 - goto out_free_pmds;
25950 + goto out_free_pxds;
25951
25952 /*
25953 * Make sure that pre-populating the pmds is atomic with
25954 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25955 spin_lock_irqsave(&pgd_lock, flags);
25956
25957 pgd_ctor(pgd);
25958 - pgd_prepopulate_pmd(mm, pgd, pmds);
25959 + pgd_prepopulate_pxd(mm, pgd, pxds);
25960
25961 spin_unlock_irqrestore(&pgd_lock, flags);
25962
25963 return pgd;
25964
25965 -out_free_pmds:
25966 - free_pmds(pmds);
25967 +out_free_pxds:
25968 + free_pxds(pxds);
25969 out_free_pgd:
25970 free_page((unsigned long)pgd);
25971 out:
25972 @@ -287,7 +338,7 @@ out:
25973
25974 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25975 {
25976 - pgd_mop_up_pmds(mm, pgd);
25977 + pgd_mop_up_pxds(mm, pgd);
25978 pgd_dtor(pgd);
25979 paravirt_pgd_free(mm, pgd);
25980 free_page((unsigned long)pgd);
25981 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25982 index 46c8834..fcab43d 100644
25983 --- a/arch/x86/mm/pgtable_32.c
25984 +++ b/arch/x86/mm/pgtable_32.c
25985 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25986 return;
25987 }
25988 pte = pte_offset_kernel(pmd, vaddr);
25989 +
25990 + pax_open_kernel();
25991 if (pte_val(pteval))
25992 set_pte_at(&init_mm, vaddr, pte, pteval);
25993 else
25994 pte_clear(&init_mm, vaddr, pte);
25995 + pax_close_kernel();
25996
25997 /*
25998 * It's enough to flush this one mapping.
25999 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26000 index 513d8ed..978c161 100644
26001 --- a/arch/x86/mm/setup_nx.c
26002 +++ b/arch/x86/mm/setup_nx.c
26003 @@ -4,11 +4,10 @@
26004
26005 #include <asm/pgtable.h>
26006
26007 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26008 int nx_enabled;
26009
26010 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26011 -static int disable_nx __cpuinitdata;
26012 -
26013 +#ifndef CONFIG_PAX_PAGEEXEC
26014 /*
26015 * noexec = on|off
26016 *
26017 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26018 if (!str)
26019 return -EINVAL;
26020 if (!strncmp(str, "on", 2)) {
26021 - __supported_pte_mask |= _PAGE_NX;
26022 - disable_nx = 0;
26023 + nx_enabled = 1;
26024 } else if (!strncmp(str, "off", 3)) {
26025 - disable_nx = 1;
26026 - __supported_pte_mask &= ~_PAGE_NX;
26027 + nx_enabled = 0;
26028 }
26029 return 0;
26030 }
26031 early_param("noexec", noexec_setup);
26032 #endif
26033 +#endif
26034
26035 #ifdef CONFIG_X86_PAE
26036 void __init set_nx(void)
26037 {
26038 - unsigned int v[4], l, h;
26039 + if (!nx_enabled && cpu_has_nx) {
26040 + unsigned l, h;
26041
26042 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26043 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26044 -
26045 - if ((v[3] & (1 << 20)) && !disable_nx) {
26046 - rdmsr(MSR_EFER, l, h);
26047 - l |= EFER_NX;
26048 - wrmsr(MSR_EFER, l, h);
26049 - nx_enabled = 1;
26050 - __supported_pte_mask |= _PAGE_NX;
26051 - }
26052 + __supported_pte_mask &= ~_PAGE_NX;
26053 + rdmsr(MSR_EFER, l, h);
26054 + l &= ~EFER_NX;
26055 + wrmsr(MSR_EFER, l, h);
26056 }
26057 }
26058 #else
26059 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26060 unsigned long efer;
26061
26062 rdmsrl(MSR_EFER, efer);
26063 - if (!(efer & EFER_NX) || disable_nx)
26064 + if (!(efer & EFER_NX) || !nx_enabled)
26065 __supported_pte_mask &= ~_PAGE_NX;
26066 }
26067 #endif
26068 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26069 index 36fe08e..b123d3a 100644
26070 --- a/arch/x86/mm/tlb.c
26071 +++ b/arch/x86/mm/tlb.c
26072 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
26073 BUG();
26074 cpumask_clear_cpu(cpu,
26075 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26076 +
26077 +#ifndef CONFIG_PAX_PER_CPU_PGD
26078 load_cr3(swapper_pg_dir);
26079 +#endif
26080 +
26081 }
26082 EXPORT_SYMBOL_GPL(leave_mm);
26083
26084 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26085 index 829edf0..672adb3 100644
26086 --- a/arch/x86/oprofile/backtrace.c
26087 +++ b/arch/x86/oprofile/backtrace.c
26088 @@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26089 {
26090 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26091
26092 - if (!user_mode_vm(regs)) {
26093 + if (!user_mode(regs)) {
26094 unsigned long stack = kernel_stack_pointer(regs);
26095 if (depth)
26096 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26097 diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26098 index e6a160a..36deff6 100644
26099 --- a/arch/x86/oprofile/op_model_p4.c
26100 +++ b/arch/x86/oprofile/op_model_p4.c
26101 @@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26102 #endif
26103 }
26104
26105 -static int inline addr_increment(void)
26106 +static inline int addr_increment(void)
26107 {
26108 #ifdef CONFIG_SMP
26109 return smp_num_siblings == 2 ? 2 : 1;
26110 diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26111 index 1331fcf..03901b2 100644
26112 --- a/arch/x86/pci/common.c
26113 +++ b/arch/x86/pci/common.c
26114 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
26115 int pcibios_last_bus = -1;
26116 unsigned long pirq_table_addr;
26117 struct pci_bus *pci_root_bus;
26118 -struct pci_raw_ops *raw_pci_ops;
26119 -struct pci_raw_ops *raw_pci_ext_ops;
26120 +const struct pci_raw_ops *raw_pci_ops;
26121 +const struct pci_raw_ops *raw_pci_ext_ops;
26122
26123 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26124 int reg, int len, u32 *val)
26125 diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26126 index 347d882..4baf6b6 100644
26127 --- a/arch/x86/pci/direct.c
26128 +++ b/arch/x86/pci/direct.c
26129 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26130
26131 #undef PCI_CONF1_ADDRESS
26132
26133 -struct pci_raw_ops pci_direct_conf1 = {
26134 +const struct pci_raw_ops pci_direct_conf1 = {
26135 .read = pci_conf1_read,
26136 .write = pci_conf1_write,
26137 };
26138 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26139
26140 #undef PCI_CONF2_ADDRESS
26141
26142 -struct pci_raw_ops pci_direct_conf2 = {
26143 +const struct pci_raw_ops pci_direct_conf2 = {
26144 .read = pci_conf2_read,
26145 .write = pci_conf2_write,
26146 };
26147 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26148 * This should be close to trivial, but it isn't, because there are buggy
26149 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26150 */
26151 -static int __init pci_sanity_check(struct pci_raw_ops *o)
26152 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
26153 {
26154 u32 x = 0;
26155 int year, devfn;
26156 diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26157 index f10a7e9..0425342 100644
26158 --- a/arch/x86/pci/mmconfig_32.c
26159 +++ b/arch/x86/pci/mmconfig_32.c
26160 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26161 return 0;
26162 }
26163
26164 -static struct pci_raw_ops pci_mmcfg = {
26165 +static const struct pci_raw_ops pci_mmcfg = {
26166 .read = pci_mmcfg_read,
26167 .write = pci_mmcfg_write,
26168 };
26169 diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26170 index 94349f8..41600a7 100644
26171 --- a/arch/x86/pci/mmconfig_64.c
26172 +++ b/arch/x86/pci/mmconfig_64.c
26173 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26174 return 0;
26175 }
26176
26177 -static struct pci_raw_ops pci_mmcfg = {
26178 +static const struct pci_raw_ops pci_mmcfg = {
26179 .read = pci_mmcfg_read,
26180 .write = pci_mmcfg_write,
26181 };
26182 diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26183 index 8eb295e..86bd657 100644
26184 --- a/arch/x86/pci/numaq_32.c
26185 +++ b/arch/x86/pci/numaq_32.c
26186 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26187
26188 #undef PCI_CONF1_MQ_ADDRESS
26189
26190 -static struct pci_raw_ops pci_direct_conf1_mq = {
26191 +static const struct pci_raw_ops pci_direct_conf1_mq = {
26192 .read = pci_conf1_mq_read,
26193 .write = pci_conf1_mq_write
26194 };
26195 diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26196 index b889d82..5a58a0a 100644
26197 --- a/arch/x86/pci/olpc.c
26198 +++ b/arch/x86/pci/olpc.c
26199 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26200 return 0;
26201 }
26202
26203 -static struct pci_raw_ops pci_olpc_conf = {
26204 +static const struct pci_raw_ops pci_olpc_conf = {
26205 .read = pci_olpc_read,
26206 .write = pci_olpc_write,
26207 };
26208 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26209 index 1c975cc..ffd0536 100644
26210 --- a/arch/x86/pci/pcbios.c
26211 +++ b/arch/x86/pci/pcbios.c
26212 @@ -56,50 +56,93 @@ union bios32 {
26213 static struct {
26214 unsigned long address;
26215 unsigned short segment;
26216 -} bios32_indirect = { 0, __KERNEL_CS };
26217 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26218
26219 /*
26220 * Returns the entry point for the given service, NULL on error
26221 */
26222
26223 -static unsigned long bios32_service(unsigned long service)
26224 +static unsigned long __devinit bios32_service(unsigned long service)
26225 {
26226 unsigned char return_code; /* %al */
26227 unsigned long address; /* %ebx */
26228 unsigned long length; /* %ecx */
26229 unsigned long entry; /* %edx */
26230 unsigned long flags;
26231 + struct desc_struct d, *gdt;
26232
26233 local_irq_save(flags);
26234 - __asm__("lcall *(%%edi); cld"
26235 +
26236 + gdt = get_cpu_gdt_table(smp_processor_id());
26237 +
26238 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26239 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26240 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26241 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26242 +
26243 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26244 : "=a" (return_code),
26245 "=b" (address),
26246 "=c" (length),
26247 "=d" (entry)
26248 : "0" (service),
26249 "1" (0),
26250 - "D" (&bios32_indirect));
26251 + "D" (&bios32_indirect),
26252 + "r"(__PCIBIOS_DS)
26253 + : "memory");
26254 +
26255 + pax_open_kernel();
26256 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26257 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26258 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26259 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26260 + pax_close_kernel();
26261 +
26262 local_irq_restore(flags);
26263
26264 switch (return_code) {
26265 - case 0:
26266 - return address + entry;
26267 - case 0x80: /* Not present */
26268 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26269 - return 0;
26270 - default: /* Shouldn't happen */
26271 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26272 - service, return_code);
26273 + case 0: {
26274 + int cpu;
26275 + unsigned char flags;
26276 +
26277 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26278 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26279 + printk(KERN_WARNING "bios32_service: not valid\n");
26280 return 0;
26281 + }
26282 + address = address + PAGE_OFFSET;
26283 + length += 16UL; /* some BIOSs underreport this... */
26284 + flags = 4;
26285 + if (length >= 64*1024*1024) {
26286 + length >>= PAGE_SHIFT;
26287 + flags |= 8;
26288 + }
26289 +
26290 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
26291 + gdt = get_cpu_gdt_table(cpu);
26292 + pack_descriptor(&d, address, length, 0x9b, flags);
26293 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26294 + pack_descriptor(&d, address, length, 0x93, flags);
26295 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26296 + }
26297 + return entry;
26298 + }
26299 + case 0x80: /* Not present */
26300 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26301 + return 0;
26302 + default: /* Shouldn't happen */
26303 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26304 + service, return_code);
26305 + return 0;
26306 }
26307 }
26308
26309 static struct {
26310 unsigned long address;
26311 unsigned short segment;
26312 -} pci_indirect = { 0, __KERNEL_CS };
26313 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26314
26315 -static int pci_bios_present;
26316 +static int pci_bios_present __read_only;
26317
26318 static int __devinit check_pcibios(void)
26319 {
26320 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26321 unsigned long flags, pcibios_entry;
26322
26323 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26324 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26325 + pci_indirect.address = pcibios_entry;
26326
26327 local_irq_save(flags);
26328 - __asm__(
26329 - "lcall *(%%edi); cld\n\t"
26330 + __asm__("movw %w6, %%ds\n\t"
26331 + "lcall *%%ss:(%%edi); cld\n\t"
26332 + "push %%ss\n\t"
26333 + "pop %%ds\n\t"
26334 "jc 1f\n\t"
26335 "xor %%ah, %%ah\n"
26336 "1:"
26337 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26338 "=b" (ebx),
26339 "=c" (ecx)
26340 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26341 - "D" (&pci_indirect)
26342 + "D" (&pci_indirect),
26343 + "r" (__PCIBIOS_DS)
26344 : "memory");
26345 local_irq_restore(flags);
26346
26347 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26348
26349 switch (len) {
26350 case 1:
26351 - __asm__("lcall *(%%esi); cld\n\t"
26352 + __asm__("movw %w6, %%ds\n\t"
26353 + "lcall *%%ss:(%%esi); cld\n\t"
26354 + "push %%ss\n\t"
26355 + "pop %%ds\n\t"
26356 "jc 1f\n\t"
26357 "xor %%ah, %%ah\n"
26358 "1:"
26359 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26360 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26361 "b" (bx),
26362 "D" ((long)reg),
26363 - "S" (&pci_indirect));
26364 + "S" (&pci_indirect),
26365 + "r" (__PCIBIOS_DS));
26366 /*
26367 * Zero-extend the result beyond 8 bits, do not trust the
26368 * BIOS having done it:
26369 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26370 *value &= 0xff;
26371 break;
26372 case 2:
26373 - __asm__("lcall *(%%esi); cld\n\t"
26374 + __asm__("movw %w6, %%ds\n\t"
26375 + "lcall *%%ss:(%%esi); cld\n\t"
26376 + "push %%ss\n\t"
26377 + "pop %%ds\n\t"
26378 "jc 1f\n\t"
26379 "xor %%ah, %%ah\n"
26380 "1:"
26381 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26382 : "1" (PCIBIOS_READ_CONFIG_WORD),
26383 "b" (bx),
26384 "D" ((long)reg),
26385 - "S" (&pci_indirect));
26386 + "S" (&pci_indirect),
26387 + "r" (__PCIBIOS_DS));
26388 /*
26389 * Zero-extend the result beyond 16 bits, do not trust the
26390 * BIOS having done it:
26391 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26392 *value &= 0xffff;
26393 break;
26394 case 4:
26395 - __asm__("lcall *(%%esi); cld\n\t"
26396 + __asm__("movw %w6, %%ds\n\t"
26397 + "lcall *%%ss:(%%esi); cld\n\t"
26398 + "push %%ss\n\t"
26399 + "pop %%ds\n\t"
26400 "jc 1f\n\t"
26401 "xor %%ah, %%ah\n"
26402 "1:"
26403 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26404 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26405 "b" (bx),
26406 "D" ((long)reg),
26407 - "S" (&pci_indirect));
26408 + "S" (&pci_indirect),
26409 + "r" (__PCIBIOS_DS));
26410 break;
26411 }
26412
26413 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26414
26415 switch (len) {
26416 case 1:
26417 - __asm__("lcall *(%%esi); cld\n\t"
26418 + __asm__("movw %w6, %%ds\n\t"
26419 + "lcall *%%ss:(%%esi); cld\n\t"
26420 + "push %%ss\n\t"
26421 + "pop %%ds\n\t"
26422 "jc 1f\n\t"
26423 "xor %%ah, %%ah\n"
26424 "1:"
26425 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26426 "c" (value),
26427 "b" (bx),
26428 "D" ((long)reg),
26429 - "S" (&pci_indirect));
26430 + "S" (&pci_indirect),
26431 + "r" (__PCIBIOS_DS));
26432 break;
26433 case 2:
26434 - __asm__("lcall *(%%esi); cld\n\t"
26435 + __asm__("movw %w6, %%ds\n\t"
26436 + "lcall *%%ss:(%%esi); cld\n\t"
26437 + "push %%ss\n\t"
26438 + "pop %%ds\n\t"
26439 "jc 1f\n\t"
26440 "xor %%ah, %%ah\n"
26441 "1:"
26442 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26443 "c" (value),
26444 "b" (bx),
26445 "D" ((long)reg),
26446 - "S" (&pci_indirect));
26447 + "S" (&pci_indirect),
26448 + "r" (__PCIBIOS_DS));
26449 break;
26450 case 4:
26451 - __asm__("lcall *(%%esi); cld\n\t"
26452 + __asm__("movw %w6, %%ds\n\t"
26453 + "lcall *%%ss:(%%esi); cld\n\t"
26454 + "push %%ss\n\t"
26455 + "pop %%ds\n\t"
26456 "jc 1f\n\t"
26457 "xor %%ah, %%ah\n"
26458 "1:"
26459 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26460 "c" (value),
26461 "b" (bx),
26462 "D" ((long)reg),
26463 - "S" (&pci_indirect));
26464 + "S" (&pci_indirect),
26465 + "r" (__PCIBIOS_DS));
26466 break;
26467 }
26468
26469 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26470 * Function table for BIOS32 access
26471 */
26472
26473 -static struct pci_raw_ops pci_bios_access = {
26474 +static const struct pci_raw_ops pci_bios_access = {
26475 .read = pci_bios_read,
26476 .write = pci_bios_write
26477 };
26478 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26479 * Try to find PCI BIOS.
26480 */
26481
26482 -static struct pci_raw_ops * __devinit pci_find_bios(void)
26483 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
26484 {
26485 union bios32 *check;
26486 unsigned char sum;
26487 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26488
26489 DBG("PCI: Fetching IRQ routing table... ");
26490 __asm__("push %%es\n\t"
26491 + "movw %w8, %%ds\n\t"
26492 "push %%ds\n\t"
26493 "pop %%es\n\t"
26494 - "lcall *(%%esi); cld\n\t"
26495 + "lcall *%%ss:(%%esi); cld\n\t"
26496 "pop %%es\n\t"
26497 + "push %%ss\n\t"
26498 + "pop %%ds\n"
26499 "jc 1f\n\t"
26500 "xor %%ah, %%ah\n"
26501 "1:"
26502 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26503 "1" (0),
26504 "D" ((long) &opt),
26505 "S" (&pci_indirect),
26506 - "m" (opt)
26507 + "m" (opt),
26508 + "r" (__PCIBIOS_DS)
26509 : "memory");
26510 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26511 if (ret & 0xff00)
26512 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26513 {
26514 int ret;
26515
26516 - __asm__("lcall *(%%esi); cld\n\t"
26517 + __asm__("movw %w5, %%ds\n\t"
26518 + "lcall *%%ss:(%%esi); cld\n\t"
26519 + "push %%ss\n\t"
26520 + "pop %%ds\n"
26521 "jc 1f\n\t"
26522 "xor %%ah, %%ah\n"
26523 "1:"
26524 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26525 : "0" (PCIBIOS_SET_PCI_HW_INT),
26526 "b" ((dev->bus->number << 8) | dev->devfn),
26527 "c" ((irq << 8) | (pin + 10)),
26528 - "S" (&pci_indirect));
26529 + "S" (&pci_indirect),
26530 + "r" (__PCIBIOS_DS));
26531 return !(ret & 0xff00);
26532 }
26533 EXPORT_SYMBOL(pcibios_set_irq_routing);
26534 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26535 index fa0f651..9d8f3d9 100644
26536 --- a/arch/x86/power/cpu.c
26537 +++ b/arch/x86/power/cpu.c
26538 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
26539 static void fix_processor_context(void)
26540 {
26541 int cpu = smp_processor_id();
26542 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26543 + struct tss_struct *t = init_tss + cpu;
26544
26545 set_tss_desc(cpu, t); /*
26546 * This just modifies memory; should not be
26547 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
26548 */
26549
26550 #ifdef CONFIG_X86_64
26551 + pax_open_kernel();
26552 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26553 + pax_close_kernel();
26554
26555 syscall_init(); /* This sets MSR_*STAR and related */
26556 #endif
26557 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26558 index dd78ef6..f9d928d 100644
26559 --- a/arch/x86/vdso/Makefile
26560 +++ b/arch/x86/vdso/Makefile
26561 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26562 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26563 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26564
26565 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26566 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26567 GCOV_PROFILE := n
26568
26569 #
26570 diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26571 index ee55754..0013b2e 100644
26572 --- a/arch/x86/vdso/vclock_gettime.c
26573 +++ b/arch/x86/vdso/vclock_gettime.c
26574 @@ -22,24 +22,48 @@
26575 #include <asm/hpet.h>
26576 #include <asm/unistd.h>
26577 #include <asm/io.h>
26578 +#include <asm/fixmap.h>
26579 #include "vextern.h"
26580
26581 #define gtod vdso_vsyscall_gtod_data
26582
26583 +notrace noinline long __vdso_fallback_time(long *t)
26584 +{
26585 + long secs;
26586 + asm volatile("syscall"
26587 + : "=a" (secs)
26588 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26589 + return secs;
26590 +}
26591 +
26592 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26593 {
26594 long ret;
26595 asm("syscall" : "=a" (ret) :
26596 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26597 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26598 return ret;
26599 }
26600
26601 +notrace static inline cycle_t __vdso_vread_hpet(void)
26602 +{
26603 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26604 +}
26605 +
26606 +notrace static inline cycle_t __vdso_vread_tsc(void)
26607 +{
26608 + cycle_t ret = (cycle_t)vget_cycles();
26609 +
26610 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26611 +}
26612 +
26613 notrace static inline long vgetns(void)
26614 {
26615 long v;
26616 - cycles_t (*vread)(void);
26617 - vread = gtod->clock.vread;
26618 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26619 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26620 + v = __vdso_vread_tsc();
26621 + else
26622 + v = __vdso_vread_hpet();
26623 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26624 return (v * gtod->clock.mult) >> gtod->clock.shift;
26625 }
26626
26627 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26628
26629 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26630 {
26631 - if (likely(gtod->sysctl_enabled))
26632 + if (likely(gtod->sysctl_enabled &&
26633 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26634 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26635 switch (clock) {
26636 case CLOCK_REALTIME:
26637 if (likely(gtod->clock.vread))
26638 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26639 int clock_gettime(clockid_t, struct timespec *)
26640 __attribute__((weak, alias("__vdso_clock_gettime")));
26641
26642 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26643 +{
26644 + long ret;
26645 + asm("syscall" : "=a" (ret) :
26646 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26647 + return ret;
26648 +}
26649 +
26650 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26651 {
26652 - long ret;
26653 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26654 + if (likely(gtod->sysctl_enabled &&
26655 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26656 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26657 + {
26658 if (likely(tv != NULL)) {
26659 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26660 offsetof(struct timespec, tv_nsec) ||
26661 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26662 }
26663 return 0;
26664 }
26665 - asm("syscall" : "=a" (ret) :
26666 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26667 - return ret;
26668 + return __vdso_fallback_gettimeofday(tv, tz);
26669 }
26670 int gettimeofday(struct timeval *, struct timezone *)
26671 __attribute__((weak, alias("__vdso_gettimeofday")));
26672 diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26673 index 4e5dd3b..00ba15e 100644
26674 --- a/arch/x86/vdso/vdso.lds.S
26675 +++ b/arch/x86/vdso/vdso.lds.S
26676 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26677 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26678 #include "vextern.h"
26679 #undef VEXTERN
26680 +
26681 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26682 +VEXTERN(fallback_gettimeofday)
26683 +VEXTERN(fallback_time)
26684 +VEXTERN(getcpu)
26685 +#undef VEXTERN
26686 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26687 index 58bc00f..d53fb48 100644
26688 --- a/arch/x86/vdso/vdso32-setup.c
26689 +++ b/arch/x86/vdso/vdso32-setup.c
26690 @@ -25,6 +25,7 @@
26691 #include <asm/tlbflush.h>
26692 #include <asm/vdso.h>
26693 #include <asm/proto.h>
26694 +#include <asm/mman.h>
26695
26696 enum {
26697 VDSO_DISABLED = 0,
26698 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26699 void enable_sep_cpu(void)
26700 {
26701 int cpu = get_cpu();
26702 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26703 + struct tss_struct *tss = init_tss + cpu;
26704
26705 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26706 put_cpu();
26707 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26708 gate_vma.vm_start = FIXADDR_USER_START;
26709 gate_vma.vm_end = FIXADDR_USER_END;
26710 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26711 - gate_vma.vm_page_prot = __P101;
26712 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26713 /*
26714 * Make sure the vDSO gets into every core dump.
26715 * Dumping its contents makes post-mortem fully interpretable later
26716 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26717 if (compat)
26718 addr = VDSO_HIGH_BASE;
26719 else {
26720 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26721 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26722 if (IS_ERR_VALUE(addr)) {
26723 ret = addr;
26724 goto up_fail;
26725 }
26726 }
26727
26728 - current->mm->context.vdso = (void *)addr;
26729 + current->mm->context.vdso = addr;
26730
26731 if (compat_uses_vma || !compat) {
26732 /*
26733 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26734 }
26735
26736 current_thread_info()->sysenter_return =
26737 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26738 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26739
26740 up_fail:
26741 if (ret)
26742 - current->mm->context.vdso = NULL;
26743 + current->mm->context.vdso = 0;
26744
26745 up_write(&mm->mmap_sem);
26746
26747 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
26748
26749 const char *arch_vma_name(struct vm_area_struct *vma)
26750 {
26751 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26752 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26753 return "[vdso]";
26754 +
26755 +#ifdef CONFIG_PAX_SEGMEXEC
26756 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26757 + return "[vdso]";
26758 +#endif
26759 +
26760 return NULL;
26761 }
26762
26763 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26764 struct mm_struct *mm = tsk->mm;
26765
26766 /* Check to see if this task was created in compat vdso mode */
26767 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26768 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26769 return &gate_vma;
26770 return NULL;
26771 }
26772 diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
26773 index 1683ba2..48d07f3 100644
26774 --- a/arch/x86/vdso/vextern.h
26775 +++ b/arch/x86/vdso/vextern.h
26776 @@ -11,6 +11,5 @@
26777 put into vextern.h and be referenced as a pointer with vdso prefix.
26778 The main kernel later fills in the values. */
26779
26780 -VEXTERN(jiffies)
26781 VEXTERN(vgetcpu_mode)
26782 VEXTERN(vsyscall_gtod_data)
26783 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26784 index 21e1aeb..2c0b3c4 100644
26785 --- a/arch/x86/vdso/vma.c
26786 +++ b/arch/x86/vdso/vma.c
26787 @@ -17,8 +17,6 @@
26788 #include "vextern.h" /* Just for VMAGIC. */
26789 #undef VEXTERN
26790
26791 -unsigned int __read_mostly vdso_enabled = 1;
26792 -
26793 extern char vdso_start[], vdso_end[];
26794 extern unsigned short vdso_sync_cpuid;
26795
26796 @@ -27,10 +25,8 @@ static unsigned vdso_size;
26797
26798 static inline void *var_ref(void *p, char *name)
26799 {
26800 - if (*(void **)p != (void *)VMAGIC) {
26801 - printk("VDSO: variable %s broken\n", name);
26802 - vdso_enabled = 0;
26803 - }
26804 + if (*(void **)p != (void *)VMAGIC)
26805 + panic("VDSO: variable %s broken\n", name);
26806 return p;
26807 }
26808
26809 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
26810 if (!vbase)
26811 goto oom;
26812
26813 - if (memcmp(vbase, "\177ELF", 4)) {
26814 - printk("VDSO: I'm broken; not ELF\n");
26815 - vdso_enabled = 0;
26816 - }
26817 + if (memcmp(vbase, ELFMAG, SELFMAG))
26818 + panic("VDSO: I'm broken; not ELF\n");
26819
26820 #define VEXTERN(x) \
26821 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
26822 #include "vextern.h"
26823 #undef VEXTERN
26824 + vunmap(vbase);
26825 return 0;
26826
26827 oom:
26828 - printk("Cannot allocate vdso\n");
26829 - vdso_enabled = 0;
26830 - return -ENOMEM;
26831 + panic("Cannot allocate vdso\n");
26832 }
26833 __initcall(init_vdso_vars);
26834
26835 @@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26836 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26837 {
26838 struct mm_struct *mm = current->mm;
26839 - unsigned long addr;
26840 + unsigned long addr = 0;
26841 int ret;
26842
26843 - if (!vdso_enabled)
26844 - return 0;
26845 -
26846 down_write(&mm->mmap_sem);
26847 +
26848 +#ifdef CONFIG_PAX_RANDMMAP
26849 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26850 +#endif
26851 +
26852 addr = vdso_addr(mm->start_stack, vdso_size);
26853 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26854 if (IS_ERR_VALUE(addr)) {
26855 @@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26856 goto up_fail;
26857 }
26858
26859 - current->mm->context.vdso = (void *)addr;
26860 + current->mm->context.vdso = addr;
26861
26862 ret = install_special_mapping(mm, addr, vdso_size,
26863 VM_READ|VM_EXEC|
26864 @@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26865 VM_ALWAYSDUMP,
26866 vdso_pages);
26867 if (ret) {
26868 - current->mm->context.vdso = NULL;
26869 + current->mm->context.vdso = 0;
26870 goto up_fail;
26871 }
26872
26873 @@ -132,10 +127,3 @@ up_fail:
26874 up_write(&mm->mmap_sem);
26875 return ret;
26876 }
26877 -
26878 -static __init int vdso_setup(char *s)
26879 -{
26880 - vdso_enabled = simple_strtoul(s, NULL, 0);
26881 - return 0;
26882 -}
26883 -__setup("vdso=", vdso_setup);
26884 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26885 index 0087b00..eecb34f 100644
26886 --- a/arch/x86/xen/enlighten.c
26887 +++ b/arch/x86/xen/enlighten.c
26888 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26889
26890 struct shared_info xen_dummy_shared_info;
26891
26892 -void *xen_initial_gdt;
26893 -
26894 /*
26895 * Point at some empty memory to start with. We map the real shared_info
26896 * page as soon as fixmap is up and running.
26897 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
26898
26899 preempt_disable();
26900
26901 - start = __get_cpu_var(idt_desc).address;
26902 + start = (unsigned long)__get_cpu_var(idt_desc).address;
26903 end = start + __get_cpu_var(idt_desc).size + 1;
26904
26905 xen_mc_flush();
26906 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
26907 #endif
26908 };
26909
26910 -static void xen_reboot(int reason)
26911 +static __noreturn void xen_reboot(int reason)
26912 {
26913 struct sched_shutdown r = { .reason = reason };
26914
26915 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
26916 BUG();
26917 }
26918
26919 -static void xen_restart(char *msg)
26920 +static __noreturn void xen_restart(char *msg)
26921 {
26922 xen_reboot(SHUTDOWN_reboot);
26923 }
26924
26925 -static void xen_emergency_restart(void)
26926 +static __noreturn void xen_emergency_restart(void)
26927 {
26928 xen_reboot(SHUTDOWN_reboot);
26929 }
26930
26931 -static void xen_machine_halt(void)
26932 +static __noreturn void xen_machine_halt(void)
26933 {
26934 xen_reboot(SHUTDOWN_poweroff);
26935 }
26936 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
26937 */
26938 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26939
26940 -#ifdef CONFIG_X86_64
26941 /* Work out if we support NX */
26942 - check_efer();
26943 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26944 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26945 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26946 + unsigned l, h;
26947 +
26948 +#ifdef CONFIG_X86_PAE
26949 + nx_enabled = 1;
26950 +#endif
26951 + __supported_pte_mask |= _PAGE_NX;
26952 + rdmsr(MSR_EFER, l, h);
26953 + l |= EFER_NX;
26954 + wrmsr(MSR_EFER, l, h);
26955 + }
26956 #endif
26957
26958 xen_setup_features();
26959 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
26960
26961 machine_ops = xen_machine_ops;
26962
26963 - /*
26964 - * The only reliable way to retain the initial address of the
26965 - * percpu gdt_page is to remember it here, so we can go and
26966 - * mark it RW later, when the initial percpu area is freed.
26967 - */
26968 - xen_initial_gdt = &per_cpu(gdt_page, 0);
26969 -
26970 xen_smp_init();
26971
26972 pgd = (pgd_t *)xen_start_info->pt_base;
26973 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26974 index 3f90a2c..2c2ad84 100644
26975 --- a/arch/x86/xen/mmu.c
26976 +++ b/arch/x86/xen/mmu.c
26977 @@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26978 convert_pfn_mfn(init_level4_pgt);
26979 convert_pfn_mfn(level3_ident_pgt);
26980 convert_pfn_mfn(level3_kernel_pgt);
26981 + convert_pfn_mfn(level3_vmalloc_start_pgt);
26982 + convert_pfn_mfn(level3_vmalloc_end_pgt);
26983 + convert_pfn_mfn(level3_vmemmap_pgt);
26984
26985 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26986 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26987 @@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
26988 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26989 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26990 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26991 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
26992 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
26993 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26994 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26995 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26996 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26997 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26998
26999 @@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27000 pv_mmu_ops.set_pud = xen_set_pud;
27001 #if PAGETABLE_LEVELS == 4
27002 pv_mmu_ops.set_pgd = xen_set_pgd;
27003 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27004 #endif
27005
27006 /* This will work as long as patching hasn't happened yet
27007 @@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27008 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27009 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27010 .set_pgd = xen_set_pgd_hyper,
27011 + .set_pgd_batched = xen_set_pgd_hyper,
27012
27013 .alloc_pud = xen_alloc_pmd_init,
27014 .release_pud = xen_release_pmd_init,
27015 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27016 index a96204a..fca9b8e 100644
27017 --- a/arch/x86/xen/smp.c
27018 +++ b/arch/x86/xen/smp.c
27019 @@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27020 {
27021 BUG_ON(smp_processor_id() != 0);
27022 native_smp_prepare_boot_cpu();
27023 -
27024 - /* We've switched to the "real" per-cpu gdt, so make sure the
27025 - old memory can be recycled */
27026 - make_lowmem_page_readwrite(xen_initial_gdt);
27027 -
27028 xen_setup_vcpu_info_placement();
27029 }
27030
27031 @@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27032 gdt = get_cpu_gdt_table(cpu);
27033
27034 ctxt->flags = VGCF_IN_KERNEL;
27035 - ctxt->user_regs.ds = __USER_DS;
27036 - ctxt->user_regs.es = __USER_DS;
27037 + ctxt->user_regs.ds = __KERNEL_DS;
27038 + ctxt->user_regs.es = __KERNEL_DS;
27039 ctxt->user_regs.ss = __KERNEL_DS;
27040 #ifdef CONFIG_X86_32
27041 ctxt->user_regs.fs = __KERNEL_PERCPU;
27042 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27043 + savesegment(gs, ctxt->user_regs.gs);
27044 #else
27045 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27046 #endif
27047 @@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27048 int rc;
27049
27050 per_cpu(current_task, cpu) = idle;
27051 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27052 #ifdef CONFIG_X86_32
27053 irq_ctx_init(cpu);
27054 #else
27055 clear_tsk_thread_flag(idle, TIF_FORK);
27056 - per_cpu(kernel_stack, cpu) =
27057 - (unsigned long)task_stack_page(idle) -
27058 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27059 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27060 #endif
27061 xen_setup_runstate_info(cpu);
27062 xen_setup_timer(cpu);
27063 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27064 index 9a95a9c..4f39e774 100644
27065 --- a/arch/x86/xen/xen-asm_32.S
27066 +++ b/arch/x86/xen/xen-asm_32.S
27067 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27068 ESP_OFFSET=4 # bytes pushed onto stack
27069
27070 /*
27071 - * Store vcpu_info pointer for easy access. Do it this way to
27072 - * avoid having to reload %fs
27073 + * Store vcpu_info pointer for easy access.
27074 */
27075 #ifdef CONFIG_SMP
27076 - GET_THREAD_INFO(%eax)
27077 - movl TI_cpu(%eax), %eax
27078 - movl __per_cpu_offset(,%eax,4), %eax
27079 - mov per_cpu__xen_vcpu(%eax), %eax
27080 + push %fs
27081 + mov $(__KERNEL_PERCPU), %eax
27082 + mov %eax, %fs
27083 + mov PER_CPU_VAR(xen_vcpu), %eax
27084 + pop %fs
27085 #else
27086 movl per_cpu__xen_vcpu, %eax
27087 #endif
27088 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27089 index 1a5ff24..a187d40 100644
27090 --- a/arch/x86/xen/xen-head.S
27091 +++ b/arch/x86/xen/xen-head.S
27092 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27093 #ifdef CONFIG_X86_32
27094 mov %esi,xen_start_info
27095 mov $init_thread_union+THREAD_SIZE,%esp
27096 +#ifdef CONFIG_SMP
27097 + movl $cpu_gdt_table,%edi
27098 + movl $__per_cpu_load,%eax
27099 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27100 + rorl $16,%eax
27101 + movb %al,__KERNEL_PERCPU + 4(%edi)
27102 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27103 + movl $__per_cpu_end - 1,%eax
27104 + subl $__per_cpu_start,%eax
27105 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27106 +#endif
27107 #else
27108 mov %rsi,xen_start_info
27109 mov $init_thread_union+THREAD_SIZE,%rsp
27110 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27111 index f9153a3..51eab3d 100644
27112 --- a/arch/x86/xen/xen-ops.h
27113 +++ b/arch/x86/xen/xen-ops.h
27114 @@ -10,8 +10,6 @@
27115 extern const char xen_hypervisor_callback[];
27116 extern const char xen_failsafe_callback[];
27117
27118 -extern void *xen_initial_gdt;
27119 -
27120 struct trap_info;
27121 void xen_copy_trap_info(struct trap_info *traps);
27122
27123 diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27124 index 15c6308..96e83c2 100644
27125 --- a/block/blk-integrity.c
27126 +++ b/block/blk-integrity.c
27127 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27128 NULL,
27129 };
27130
27131 -static struct sysfs_ops integrity_ops = {
27132 +static const struct sysfs_ops integrity_ops = {
27133 .show = &integrity_attr_show,
27134 .store = &integrity_attr_store,
27135 };
27136 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27137 index ca56420..f2fc409 100644
27138 --- a/block/blk-iopoll.c
27139 +++ b/block/blk-iopoll.c
27140 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27141 }
27142 EXPORT_SYMBOL(blk_iopoll_complete);
27143
27144 -static void blk_iopoll_softirq(struct softirq_action *h)
27145 +static void blk_iopoll_softirq(void)
27146 {
27147 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27148 int rearm = 0, budget = blk_iopoll_budget;
27149 diff --git a/block/blk-map.c b/block/blk-map.c
27150 index 30a7e51..0aeec6a 100644
27151 --- a/block/blk-map.c
27152 +++ b/block/blk-map.c
27153 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27154 * direct dma. else, set up kernel bounce buffers
27155 */
27156 uaddr = (unsigned long) ubuf;
27157 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
27158 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27159 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27160 else
27161 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27162 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27163 for (i = 0; i < iov_count; i++) {
27164 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27165
27166 + if (!iov[i].iov_len)
27167 + return -EINVAL;
27168 +
27169 if (uaddr & queue_dma_alignment(q)) {
27170 unaligned = 1;
27171 break;
27172 }
27173 - if (!iov[i].iov_len)
27174 - return -EINVAL;
27175 }
27176
27177 if (unaligned || (q->dma_pad_mask & len) || map_data)
27178 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27179 if (!len || !kbuf)
27180 return -EINVAL;
27181
27182 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27183 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27184 if (do_copy)
27185 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27186 else
27187 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27188 index ee9c216..58d410a 100644
27189 --- a/block/blk-softirq.c
27190 +++ b/block/blk-softirq.c
27191 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27192 * Softirq action handler - move entries to local list and loop over them
27193 * while passing them to the queue registered handler.
27194 */
27195 -static void blk_done_softirq(struct softirq_action *h)
27196 +static void blk_done_softirq(void)
27197 {
27198 struct list_head *cpu_list, local_list;
27199
27200 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27201 index bb9c5ea..5330d48 100644
27202 --- a/block/blk-sysfs.c
27203 +++ b/block/blk-sysfs.c
27204 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27205 kmem_cache_free(blk_requestq_cachep, q);
27206 }
27207
27208 -static struct sysfs_ops queue_sysfs_ops = {
27209 +static const struct sysfs_ops queue_sysfs_ops = {
27210 .show = queue_attr_show,
27211 .store = queue_attr_store,
27212 };
27213 diff --git a/block/bsg.c b/block/bsg.c
27214 index 7154a7a..08ac2f0 100644
27215 --- a/block/bsg.c
27216 +++ b/block/bsg.c
27217 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27218 struct sg_io_v4 *hdr, struct bsg_device *bd,
27219 fmode_t has_write_perm)
27220 {
27221 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27222 + unsigned char *cmdptr;
27223 +
27224 if (hdr->request_len > BLK_MAX_CDB) {
27225 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27226 if (!rq->cmd)
27227 return -ENOMEM;
27228 - }
27229 + cmdptr = rq->cmd;
27230 + } else
27231 + cmdptr = tmpcmd;
27232
27233 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27234 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27235 hdr->request_len))
27236 return -EFAULT;
27237
27238 + if (cmdptr != rq->cmd)
27239 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27240 +
27241 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27242 if (blk_verify_command(rq->cmd, has_write_perm))
27243 return -EPERM;
27244 @@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27245 rq->next_rq = next_rq;
27246 next_rq->cmd_type = rq->cmd_type;
27247
27248 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27249 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27250 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27251 hdr->din_xfer_len, GFP_KERNEL);
27252 if (ret)
27253 @@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27254
27255 if (hdr->dout_xfer_len) {
27256 dxfer_len = hdr->dout_xfer_len;
27257 - dxferp = (void*)(unsigned long)hdr->dout_xferp;
27258 + dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27259 } else if (hdr->din_xfer_len) {
27260 dxfer_len = hdr->din_xfer_len;
27261 - dxferp = (void*)(unsigned long)hdr->din_xferp;
27262 + dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27263 } else
27264 dxfer_len = 0;
27265
27266 @@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27267 int len = min_t(unsigned int, hdr->max_response_len,
27268 rq->sense_len);
27269
27270 - ret = copy_to_user((void*)(unsigned long)hdr->response,
27271 + ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27272 rq->sense, len);
27273 if (!ret)
27274 hdr->response_len = len;
27275 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27276 index 9bd086c..ca1fc22 100644
27277 --- a/block/compat_ioctl.c
27278 +++ b/block/compat_ioctl.c
27279 @@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27280 err |= __get_user(f->spec1, &uf->spec1);
27281 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27282 err |= __get_user(name, &uf->name);
27283 - f->name = compat_ptr(name);
27284 + f->name = (void __force_kernel *)compat_ptr(name);
27285 if (err) {
27286 err = -EFAULT;
27287 goto out;
27288 diff --git a/block/elevator.c b/block/elevator.c
27289 index a847046..75a1746 100644
27290 --- a/block/elevator.c
27291 +++ b/block/elevator.c
27292 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27293 return error;
27294 }
27295
27296 -static struct sysfs_ops elv_sysfs_ops = {
27297 +static const struct sysfs_ops elv_sysfs_ops = {
27298 .show = elv_attr_show,
27299 .store = elv_attr_store,
27300 };
27301 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27302 index 1d5a780..d85b6c2 100644
27303 --- a/block/scsi_ioctl.c
27304 +++ b/block/scsi_ioctl.c
27305 @@ -24,6 +24,7 @@
27306 #include <linux/capability.h>
27307 #include <linux/completion.h>
27308 #include <linux/cdrom.h>
27309 +#include <linux/ratelimit.h>
27310 #include <linux/slab.h>
27311 #include <linux/times.h>
27312 #include <asm/uaccess.h>
27313 @@ -220,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27314 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27315 struct sg_io_hdr *hdr, fmode_t mode)
27316 {
27317 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27318 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27319 + unsigned char *cmdptr;
27320 +
27321 + if (rq->cmd != rq->__cmd)
27322 + cmdptr = rq->cmd;
27323 + else
27324 + cmdptr = tmpcmd;
27325 +
27326 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27327 return -EFAULT;
27328 +
27329 + if (cmdptr != rq->cmd)
27330 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27331 +
27332 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27333 return -EPERM;
27334
27335 @@ -430,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27336 int err;
27337 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27338 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27339 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27340 + unsigned char *cmdptr;
27341
27342 if (!sic)
27343 return -EINVAL;
27344 @@ -463,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27345 */
27346 err = -EFAULT;
27347 rq->cmd_len = cmdlen;
27348 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27349 +
27350 + if (rq->cmd != rq->__cmd)
27351 + cmdptr = rq->cmd;
27352 + else
27353 + cmdptr = tmpcmd;
27354 +
27355 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27356 goto error;
27357
27358 + if (rq->cmd != cmdptr)
27359 + memcpy(rq->cmd, cmdptr, cmdlen);
27360 +
27361 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27362 goto error;
27363
27364 @@ -689,7 +713,59 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
27365 }
27366 EXPORT_SYMBOL(scsi_cmd_ioctl);
27367
27368 -int __init blk_scsi_ioctl_init(void)
27369 +int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
27370 +{
27371 + if (bd && bd == bd->bd_contains)
27372 + return 0;
27373 +
27374 + /* Actually none of these is particularly useful on a partition,
27375 + * but they are safe.
27376 + */
27377 + switch (cmd) {
27378 + case SCSI_IOCTL_GET_IDLUN:
27379 + case SCSI_IOCTL_GET_BUS_NUMBER:
27380 + case SCSI_IOCTL_GET_PCI:
27381 + case SCSI_IOCTL_PROBE_HOST:
27382 + case SG_GET_VERSION_NUM:
27383 + case SG_SET_TIMEOUT:
27384 + case SG_GET_TIMEOUT:
27385 + case SG_GET_RESERVED_SIZE:
27386 + case SG_SET_RESERVED_SIZE:
27387 + case SG_EMULATED_HOST:
27388 + return 0;
27389 + case CDROM_GET_CAPABILITY:
27390 + /* Keep this until we remove the printk below. udev sends it
27391 + * and we do not want to spam dmesg about it. CD-ROMs do
27392 + * not have partitions, so we get here only for disks.
27393 + */
27394 + return -ENOIOCTLCMD;
27395 + default:
27396 + break;
27397 + }
27398 +
27399 + /* In particular, rule out all resets and host-specific ioctls. */
27400 + if (printk_ratelimit())
27401 + printk(KERN_WARNING "%s: sending ioctl %x to a partition!\n",
27402 + current->comm, cmd);
27403 +
27404 + return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
27405 +}
27406 +EXPORT_SYMBOL(scsi_verify_blk_ioctl);
27407 +
27408 +int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
27409 + unsigned int cmd, void __user *arg)
27410 +{
27411 + int ret;
27412 +
27413 + ret = scsi_verify_blk_ioctl(bd, cmd);
27414 + if (ret < 0)
27415 + return ret;
27416 +
27417 + return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
27418 +}
27419 +EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
27420 +
27421 +static int __init blk_scsi_ioctl_init(void)
27422 {
27423 blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
27424 return 0;
27425 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27426 index 3533582..f143117 100644
27427 --- a/crypto/cryptd.c
27428 +++ b/crypto/cryptd.c
27429 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27430
27431 struct cryptd_blkcipher_request_ctx {
27432 crypto_completion_t complete;
27433 -};
27434 +} __no_const;
27435
27436 struct cryptd_hash_ctx {
27437 struct crypto_shash *child;
27438 diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27439 index a90d260..7a9765e 100644
27440 --- a/crypto/gf128mul.c
27441 +++ b/crypto/gf128mul.c
27442 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27443 for (i = 0; i < 7; ++i)
27444 gf128mul_x_lle(&p[i + 1], &p[i]);
27445
27446 - memset(r, 0, sizeof(r));
27447 + memset(r, 0, sizeof(*r));
27448 for (i = 0;;) {
27449 u8 ch = ((u8 *)b)[15 - i];
27450
27451 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27452 for (i = 0; i < 7; ++i)
27453 gf128mul_x_bbe(&p[i + 1], &p[i]);
27454
27455 - memset(r, 0, sizeof(r));
27456 + memset(r, 0, sizeof(*r));
27457 for (i = 0;;) {
27458 u8 ch = ((u8 *)b)[i];
27459
27460 diff --git a/crypto/serpent.c b/crypto/serpent.c
27461 index b651a55..023297d 100644
27462 --- a/crypto/serpent.c
27463 +++ b/crypto/serpent.c
27464 @@ -21,6 +21,7 @@
27465 #include <asm/byteorder.h>
27466 #include <linux/crypto.h>
27467 #include <linux/types.h>
27468 +#include <linux/sched.h>
27469
27470 /* Key is padded to the maximum of 256 bits before round key generation.
27471 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27472 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27473 u32 r0,r1,r2,r3,r4;
27474 int i;
27475
27476 + pax_track_stack();
27477 +
27478 /* Copy key, add padding */
27479
27480 for (i = 0; i < keylen; ++i)
27481 diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27482 index 0d2cdb8..d8de48d 100644
27483 --- a/drivers/acpi/acpi_pad.c
27484 +++ b/drivers/acpi/acpi_pad.c
27485 @@ -30,7 +30,7 @@
27486 #include <acpi/acpi_bus.h>
27487 #include <acpi/acpi_drivers.h>
27488
27489 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27490 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27491 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27492 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27493 static DEFINE_MUTEX(isolated_cpus_lock);
27494 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27495 index 3f4602b..2e41d36 100644
27496 --- a/drivers/acpi/battery.c
27497 +++ b/drivers/acpi/battery.c
27498 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27499 }
27500
27501 static struct battery_file {
27502 - struct file_operations ops;
27503 + const struct file_operations ops;
27504 mode_t mode;
27505 const char *name;
27506 } acpi_battery_file[] = {
27507 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27508 index 7338b6a..82f0257 100644
27509 --- a/drivers/acpi/dock.c
27510 +++ b/drivers/acpi/dock.c
27511 @@ -77,7 +77,7 @@ struct dock_dependent_device {
27512 struct list_head list;
27513 struct list_head hotplug_list;
27514 acpi_handle handle;
27515 - struct acpi_dock_ops *ops;
27516 + const struct acpi_dock_ops *ops;
27517 void *context;
27518 };
27519
27520 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27521 * the dock driver after _DCK is executed.
27522 */
27523 int
27524 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27525 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27526 void *context)
27527 {
27528 struct dock_dependent_device *dd;
27529 diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27530 index 7c1c59e..2993595 100644
27531 --- a/drivers/acpi/osl.c
27532 +++ b/drivers/acpi/osl.c
27533 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27534 void __iomem *virt_addr;
27535
27536 virt_addr = ioremap(phys_addr, width);
27537 + if (!virt_addr)
27538 + return AE_NO_MEMORY;
27539 if (!value)
27540 value = &dummy;
27541
27542 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27543 void __iomem *virt_addr;
27544
27545 virt_addr = ioremap(phys_addr, width);
27546 + if (!virt_addr)
27547 + return AE_NO_MEMORY;
27548
27549 switch (width) {
27550 case 8:
27551 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27552 index c216062..eec10d2 100644
27553 --- a/drivers/acpi/power_meter.c
27554 +++ b/drivers/acpi/power_meter.c
27555 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27556 return res;
27557
27558 temp /= 1000;
27559 - if (temp < 0)
27560 - return -EINVAL;
27561
27562 mutex_lock(&resource->lock);
27563 resource->trip[attr->index - 7] = temp;
27564 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27565 index d0d25e2..961643d 100644
27566 --- a/drivers/acpi/proc.c
27567 +++ b/drivers/acpi/proc.c
27568 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27569 size_t count, loff_t * ppos)
27570 {
27571 struct list_head *node, *next;
27572 - char strbuf[5];
27573 - char str[5] = "";
27574 - unsigned int len = count;
27575 + char strbuf[5] = {0};
27576 struct acpi_device *found_dev = NULL;
27577
27578 - if (len > 4)
27579 - len = 4;
27580 - if (len < 0)
27581 - return -EFAULT;
27582 + if (count > 4)
27583 + count = 4;
27584
27585 - if (copy_from_user(strbuf, buffer, len))
27586 + if (copy_from_user(strbuf, buffer, count))
27587 return -EFAULT;
27588 - strbuf[len] = '\0';
27589 - sscanf(strbuf, "%s", str);
27590 + strbuf[count] = '\0';
27591
27592 mutex_lock(&acpi_device_lock);
27593 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27594 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27595 if (!dev->wakeup.flags.valid)
27596 continue;
27597
27598 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27599 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27600 dev->wakeup.state.enabled =
27601 dev->wakeup.state.enabled ? 0 : 1;
27602 found_dev = dev;
27603 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27604 index 7102474..de8ad22 100644
27605 --- a/drivers/acpi/processor_core.c
27606 +++ b/drivers/acpi/processor_core.c
27607 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27608 return 0;
27609 }
27610
27611 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27612 + BUG_ON(pr->id >= nr_cpu_ids);
27613
27614 /*
27615 * Buggy BIOS check
27616 diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27617 index d933980..5761f13 100644
27618 --- a/drivers/acpi/sbshc.c
27619 +++ b/drivers/acpi/sbshc.c
27620 @@ -17,7 +17,7 @@
27621
27622 #define PREFIX "ACPI: "
27623
27624 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27625 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27626 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27627
27628 struct acpi_smb_hc {
27629 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27630 index 0458094..6978e7b 100644
27631 --- a/drivers/acpi/sleep.c
27632 +++ b/drivers/acpi/sleep.c
27633 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27634 }
27635 }
27636
27637 -static struct platform_suspend_ops acpi_suspend_ops = {
27638 +static const struct platform_suspend_ops acpi_suspend_ops = {
27639 .valid = acpi_suspend_state_valid,
27640 .begin = acpi_suspend_begin,
27641 .prepare_late = acpi_pm_prepare,
27642 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27643 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27644 * been requested.
27645 */
27646 -static struct platform_suspend_ops acpi_suspend_ops_old = {
27647 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
27648 .valid = acpi_suspend_state_valid,
27649 .begin = acpi_suspend_begin_old,
27650 .prepare_late = acpi_pm_disable_gpes,
27651 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27652 acpi_enable_all_runtime_gpes();
27653 }
27654
27655 -static struct platform_hibernation_ops acpi_hibernation_ops = {
27656 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
27657 .begin = acpi_hibernation_begin,
27658 .end = acpi_pm_end,
27659 .pre_snapshot = acpi_hibernation_pre_snapshot,
27660 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27661 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27662 * been requested.
27663 */
27664 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27665 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27666 .begin = acpi_hibernation_begin_old,
27667 .end = acpi_pm_end,
27668 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27669 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27670 index 05dff63..b662ab7 100644
27671 --- a/drivers/acpi/video.c
27672 +++ b/drivers/acpi/video.c
27673 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27674 vd->brightness->levels[request_level]);
27675 }
27676
27677 -static struct backlight_ops acpi_backlight_ops = {
27678 +static const struct backlight_ops acpi_backlight_ops = {
27679 .get_brightness = acpi_video_get_brightness,
27680 .update_status = acpi_video_set_brightness,
27681 };
27682 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27683 index 6787aab..23ffb0e 100644
27684 --- a/drivers/ata/ahci.c
27685 +++ b/drivers/ata/ahci.c
27686 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27687 .sdev_attrs = ahci_sdev_attrs,
27688 };
27689
27690 -static struct ata_port_operations ahci_ops = {
27691 +static const struct ata_port_operations ahci_ops = {
27692 .inherits = &sata_pmp_port_ops,
27693
27694 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27695 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27696 .port_stop = ahci_port_stop,
27697 };
27698
27699 -static struct ata_port_operations ahci_vt8251_ops = {
27700 +static const struct ata_port_operations ahci_vt8251_ops = {
27701 .inherits = &ahci_ops,
27702 .hardreset = ahci_vt8251_hardreset,
27703 };
27704
27705 -static struct ata_port_operations ahci_p5wdh_ops = {
27706 +static const struct ata_port_operations ahci_p5wdh_ops = {
27707 .inherits = &ahci_ops,
27708 .hardreset = ahci_p5wdh_hardreset,
27709 };
27710
27711 -static struct ata_port_operations ahci_sb600_ops = {
27712 +static const struct ata_port_operations ahci_sb600_ops = {
27713 .inherits = &ahci_ops,
27714 .softreset = ahci_sb600_softreset,
27715 .pmp_softreset = ahci_sb600_softreset,
27716 diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27717 index 99e7196..4968c77 100644
27718 --- a/drivers/ata/ata_generic.c
27719 +++ b/drivers/ata/ata_generic.c
27720 @@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27721 ATA_BMDMA_SHT(DRV_NAME),
27722 };
27723
27724 -static struct ata_port_operations generic_port_ops = {
27725 +static const struct ata_port_operations generic_port_ops = {
27726 .inherits = &ata_bmdma_port_ops,
27727 .cable_detect = ata_cable_unknown,
27728 .set_mode = generic_set_mode,
27729 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27730 index c33591d..000c121 100644
27731 --- a/drivers/ata/ata_piix.c
27732 +++ b/drivers/ata/ata_piix.c
27733 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27734 ATA_BMDMA_SHT(DRV_NAME),
27735 };
27736
27737 -static struct ata_port_operations piix_pata_ops = {
27738 +static const struct ata_port_operations piix_pata_ops = {
27739 .inherits = &ata_bmdma32_port_ops,
27740 .cable_detect = ata_cable_40wire,
27741 .set_piomode = piix_set_piomode,
27742 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27743 .prereset = piix_pata_prereset,
27744 };
27745
27746 -static struct ata_port_operations piix_vmw_ops = {
27747 +static const struct ata_port_operations piix_vmw_ops = {
27748 .inherits = &piix_pata_ops,
27749 .bmdma_status = piix_vmw_bmdma_status,
27750 };
27751
27752 -static struct ata_port_operations ich_pata_ops = {
27753 +static const struct ata_port_operations ich_pata_ops = {
27754 .inherits = &piix_pata_ops,
27755 .cable_detect = ich_pata_cable_detect,
27756 .set_dmamode = ich_set_dmamode,
27757 };
27758
27759 -static struct ata_port_operations piix_sata_ops = {
27760 +static const struct ata_port_operations piix_sata_ops = {
27761 .inherits = &ata_bmdma_port_ops,
27762 };
27763
27764 -static struct ata_port_operations piix_sidpr_sata_ops = {
27765 +static const struct ata_port_operations piix_sidpr_sata_ops = {
27766 .inherits = &piix_sata_ops,
27767 .hardreset = sata_std_hardreset,
27768 .scr_read = piix_sidpr_scr_read,
27769 diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27770 index b0882cd..c295d65 100644
27771 --- a/drivers/ata/libata-acpi.c
27772 +++ b/drivers/ata/libata-acpi.c
27773 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27774 ata_acpi_uevent(dev->link->ap, dev, event);
27775 }
27776
27777 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27778 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27779 .handler = ata_acpi_dev_notify_dock,
27780 .uevent = ata_acpi_dev_uevent,
27781 };
27782
27783 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27784 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27785 .handler = ata_acpi_ap_notify_dock,
27786 .uevent = ata_acpi_ap_uevent,
27787 };
27788 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27789 index d4f7f99..94f603e 100644
27790 --- a/drivers/ata/libata-core.c
27791 +++ b/drivers/ata/libata-core.c
27792 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27793 struct ata_port *ap;
27794 unsigned int tag;
27795
27796 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27797 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27798 ap = qc->ap;
27799
27800 qc->flags = 0;
27801 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27802 struct ata_port *ap;
27803 struct ata_link *link;
27804
27805 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27806 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27807 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27808 ap = qc->ap;
27809 link = qc->dev->link;
27810 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27811 * LOCKING:
27812 * None.
27813 */
27814 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
27815 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
27816 {
27817 static DEFINE_SPINLOCK(lock);
27818 const struct ata_port_operations *cur;
27819 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27820 return;
27821
27822 spin_lock(&lock);
27823 + pax_open_kernel();
27824
27825 for (cur = ops->inherits; cur; cur = cur->inherits) {
27826 void **inherit = (void **)cur;
27827 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27828 if (IS_ERR(*pp))
27829 *pp = NULL;
27830
27831 - ops->inherits = NULL;
27832 + *(struct ata_port_operations **)&ops->inherits = NULL;
27833
27834 + pax_close_kernel();
27835 spin_unlock(&lock);
27836 }
27837
27838 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
27839 */
27840 /* KILLME - the only user left is ipr */
27841 void ata_host_init(struct ata_host *host, struct device *dev,
27842 - unsigned long flags, struct ata_port_operations *ops)
27843 + unsigned long flags, const struct ata_port_operations *ops)
27844 {
27845 spin_lock_init(&host->lock);
27846 host->dev = dev;
27847 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
27848 /* truly dummy */
27849 }
27850
27851 -struct ata_port_operations ata_dummy_port_ops = {
27852 +const struct ata_port_operations ata_dummy_port_ops = {
27853 .qc_prep = ata_noop_qc_prep,
27854 .qc_issue = ata_dummy_qc_issue,
27855 .error_handler = ata_dummy_error_handler,
27856 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
27857 index e5bdb9b..45a8e72 100644
27858 --- a/drivers/ata/libata-eh.c
27859 +++ b/drivers/ata/libata-eh.c
27860 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
27861 {
27862 struct ata_link *link;
27863
27864 + pax_track_stack();
27865 +
27866 ata_for_each_link(link, ap, HOST_FIRST)
27867 ata_eh_link_report(link);
27868 }
27869 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
27870 */
27871 void ata_std_error_handler(struct ata_port *ap)
27872 {
27873 - struct ata_port_operations *ops = ap->ops;
27874 + const struct ata_port_operations *ops = ap->ops;
27875 ata_reset_fn_t hardreset = ops->hardreset;
27876
27877 /* ignore built-in hardreset if SCR access is not available */
27878 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
27879 index 51f0ffb..19ce3e3 100644
27880 --- a/drivers/ata/libata-pmp.c
27881 +++ b/drivers/ata/libata-pmp.c
27882 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
27883 */
27884 static int sata_pmp_eh_recover(struct ata_port *ap)
27885 {
27886 - struct ata_port_operations *ops = ap->ops;
27887 + const struct ata_port_operations *ops = ap->ops;
27888 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
27889 struct ata_link *pmp_link = &ap->link;
27890 struct ata_device *pmp_dev = pmp_link->device;
27891 diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
27892 index d8f35fe..288180a 100644
27893 --- a/drivers/ata/pata_acpi.c
27894 +++ b/drivers/ata/pata_acpi.c
27895 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
27896 ATA_BMDMA_SHT(DRV_NAME),
27897 };
27898
27899 -static struct ata_port_operations pacpi_ops = {
27900 +static const struct ata_port_operations pacpi_ops = {
27901 .inherits = &ata_bmdma_port_ops,
27902 .qc_issue = pacpi_qc_issue,
27903 .cable_detect = pacpi_cable_detect,
27904 diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
27905 index 9434114..1f2f364 100644
27906 --- a/drivers/ata/pata_ali.c
27907 +++ b/drivers/ata/pata_ali.c
27908 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
27909 * Port operations for PIO only ALi
27910 */
27911
27912 -static struct ata_port_operations ali_early_port_ops = {
27913 +static const struct ata_port_operations ali_early_port_ops = {
27914 .inherits = &ata_sff_port_ops,
27915 .cable_detect = ata_cable_40wire,
27916 .set_piomode = ali_set_piomode,
27917 @@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
27918 * Port operations for DMA capable ALi without cable
27919 * detect
27920 */
27921 -static struct ata_port_operations ali_20_port_ops = {
27922 +static const struct ata_port_operations ali_20_port_ops = {
27923 .inherits = &ali_dma_base_ops,
27924 .cable_detect = ata_cable_40wire,
27925 .mode_filter = ali_20_filter,
27926 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
27927 /*
27928 * Port operations for DMA capable ALi with cable detect
27929 */
27930 -static struct ata_port_operations ali_c2_port_ops = {
27931 +static const struct ata_port_operations ali_c2_port_ops = {
27932 .inherits = &ali_dma_base_ops,
27933 .check_atapi_dma = ali_check_atapi_dma,
27934 .cable_detect = ali_c2_cable_detect,
27935 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
27936 /*
27937 * Port operations for DMA capable ALi with cable detect
27938 */
27939 -static struct ata_port_operations ali_c4_port_ops = {
27940 +static const struct ata_port_operations ali_c4_port_ops = {
27941 .inherits = &ali_dma_base_ops,
27942 .check_atapi_dma = ali_check_atapi_dma,
27943 .cable_detect = ali_c2_cable_detect,
27944 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
27945 /*
27946 * Port operations for DMA capable ALi with cable detect and LBA48
27947 */
27948 -static struct ata_port_operations ali_c5_port_ops = {
27949 +static const struct ata_port_operations ali_c5_port_ops = {
27950 .inherits = &ali_dma_base_ops,
27951 .check_atapi_dma = ali_check_atapi_dma,
27952 .dev_config = ali_warn_atapi_dma,
27953 diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
27954 index 567f3f7..c8ee0da 100644
27955 --- a/drivers/ata/pata_amd.c
27956 +++ b/drivers/ata/pata_amd.c
27957 @@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
27958 .prereset = amd_pre_reset,
27959 };
27960
27961 -static struct ata_port_operations amd33_port_ops = {
27962 +static const struct ata_port_operations amd33_port_ops = {
27963 .inherits = &amd_base_port_ops,
27964 .cable_detect = ata_cable_40wire,
27965 .set_piomode = amd33_set_piomode,
27966 .set_dmamode = amd33_set_dmamode,
27967 };
27968
27969 -static struct ata_port_operations amd66_port_ops = {
27970 +static const struct ata_port_operations amd66_port_ops = {
27971 .inherits = &amd_base_port_ops,
27972 .cable_detect = ata_cable_unknown,
27973 .set_piomode = amd66_set_piomode,
27974 .set_dmamode = amd66_set_dmamode,
27975 };
27976
27977 -static struct ata_port_operations amd100_port_ops = {
27978 +static const struct ata_port_operations amd100_port_ops = {
27979 .inherits = &amd_base_port_ops,
27980 .cable_detect = ata_cable_unknown,
27981 .set_piomode = amd100_set_piomode,
27982 .set_dmamode = amd100_set_dmamode,
27983 };
27984
27985 -static struct ata_port_operations amd133_port_ops = {
27986 +static const struct ata_port_operations amd133_port_ops = {
27987 .inherits = &amd_base_port_ops,
27988 .cable_detect = amd_cable_detect,
27989 .set_piomode = amd133_set_piomode,
27990 @@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
27991 .host_stop = nv_host_stop,
27992 };
27993
27994 -static struct ata_port_operations nv100_port_ops = {
27995 +static const struct ata_port_operations nv100_port_ops = {
27996 .inherits = &nv_base_port_ops,
27997 .set_piomode = nv100_set_piomode,
27998 .set_dmamode = nv100_set_dmamode,
27999 };
28000
28001 -static struct ata_port_operations nv133_port_ops = {
28002 +static const struct ata_port_operations nv133_port_ops = {
28003 .inherits = &nv_base_port_ops,
28004 .set_piomode = nv133_set_piomode,
28005 .set_dmamode = nv133_set_dmamode,
28006 diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
28007 index d332cfd..4b7eaae 100644
28008 --- a/drivers/ata/pata_artop.c
28009 +++ b/drivers/ata/pata_artop.c
28010 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
28011 ATA_BMDMA_SHT(DRV_NAME),
28012 };
28013
28014 -static struct ata_port_operations artop6210_ops = {
28015 +static const struct ata_port_operations artop6210_ops = {
28016 .inherits = &ata_bmdma_port_ops,
28017 .cable_detect = ata_cable_40wire,
28018 .set_piomode = artop6210_set_piomode,
28019 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
28020 .qc_defer = artop6210_qc_defer,
28021 };
28022
28023 -static struct ata_port_operations artop6260_ops = {
28024 +static const struct ata_port_operations artop6260_ops = {
28025 .inherits = &ata_bmdma_port_ops,
28026 .cable_detect = artop6260_cable_detect,
28027 .set_piomode = artop6260_set_piomode,
28028 diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
28029 index 5c129f9..7bb7ccb 100644
28030 --- a/drivers/ata/pata_at32.c
28031 +++ b/drivers/ata/pata_at32.c
28032 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
28033 ATA_PIO_SHT(DRV_NAME),
28034 };
28035
28036 -static struct ata_port_operations at32_port_ops = {
28037 +static const struct ata_port_operations at32_port_ops = {
28038 .inherits = &ata_sff_port_ops,
28039 .cable_detect = ata_cable_40wire,
28040 .set_piomode = pata_at32_set_piomode,
28041 diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28042 index 41c94b1..829006d 100644
28043 --- a/drivers/ata/pata_at91.c
28044 +++ b/drivers/ata/pata_at91.c
28045 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28046 ATA_PIO_SHT(DRV_NAME),
28047 };
28048
28049 -static struct ata_port_operations pata_at91_port_ops = {
28050 +static const struct ata_port_operations pata_at91_port_ops = {
28051 .inherits = &ata_sff_port_ops,
28052
28053 .sff_data_xfer = pata_at91_data_xfer_noirq,
28054 diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28055 index ae4454d..d391eb4 100644
28056 --- a/drivers/ata/pata_atiixp.c
28057 +++ b/drivers/ata/pata_atiixp.c
28058 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28059 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28060 };
28061
28062 -static struct ata_port_operations atiixp_port_ops = {
28063 +static const struct ata_port_operations atiixp_port_ops = {
28064 .inherits = &ata_bmdma_port_ops,
28065
28066 .qc_prep = ata_sff_dumb_qc_prep,
28067 diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28068 index 6fe7ded..2a425dc 100644
28069 --- a/drivers/ata/pata_atp867x.c
28070 +++ b/drivers/ata/pata_atp867x.c
28071 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28072 ATA_BMDMA_SHT(DRV_NAME),
28073 };
28074
28075 -static struct ata_port_operations atp867x_ops = {
28076 +static const struct ata_port_operations atp867x_ops = {
28077 .inherits = &ata_bmdma_port_ops,
28078 .cable_detect = atp867x_cable_detect,
28079 .set_piomode = atp867x_set_piomode,
28080 diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28081 index c4b47a3..b27a367 100644
28082 --- a/drivers/ata/pata_bf54x.c
28083 +++ b/drivers/ata/pata_bf54x.c
28084 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28085 .dma_boundary = ATA_DMA_BOUNDARY,
28086 };
28087
28088 -static struct ata_port_operations bfin_pata_ops = {
28089 +static const struct ata_port_operations bfin_pata_ops = {
28090 .inherits = &ata_sff_port_ops,
28091
28092 .set_piomode = bfin_set_piomode,
28093 diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28094 index 5acf9fa..84248be 100644
28095 --- a/drivers/ata/pata_cmd640.c
28096 +++ b/drivers/ata/pata_cmd640.c
28097 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28098 ATA_BMDMA_SHT(DRV_NAME),
28099 };
28100
28101 -static struct ata_port_operations cmd640_port_ops = {
28102 +static const struct ata_port_operations cmd640_port_ops = {
28103 .inherits = &ata_bmdma_port_ops,
28104 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28105 .sff_data_xfer = ata_sff_data_xfer_noirq,
28106 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28107 index ccd2694..c869c3d 100644
28108 --- a/drivers/ata/pata_cmd64x.c
28109 +++ b/drivers/ata/pata_cmd64x.c
28110 @@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28111 .set_dmamode = cmd64x_set_dmamode,
28112 };
28113
28114 -static struct ata_port_operations cmd64x_port_ops = {
28115 +static const struct ata_port_operations cmd64x_port_ops = {
28116 .inherits = &cmd64x_base_ops,
28117 .cable_detect = ata_cable_40wire,
28118 };
28119
28120 -static struct ata_port_operations cmd646r1_port_ops = {
28121 +static const struct ata_port_operations cmd646r1_port_ops = {
28122 .inherits = &cmd64x_base_ops,
28123 .bmdma_stop = cmd646r1_bmdma_stop,
28124 .cable_detect = ata_cable_40wire,
28125 };
28126
28127 -static struct ata_port_operations cmd648_port_ops = {
28128 +static const struct ata_port_operations cmd648_port_ops = {
28129 .inherits = &cmd64x_base_ops,
28130 .bmdma_stop = cmd648_bmdma_stop,
28131 .cable_detect = cmd648_cable_detect,
28132 diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28133 index 0df83cf..d7595b0 100644
28134 --- a/drivers/ata/pata_cs5520.c
28135 +++ b/drivers/ata/pata_cs5520.c
28136 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28137 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28138 };
28139
28140 -static struct ata_port_operations cs5520_port_ops = {
28141 +static const struct ata_port_operations cs5520_port_ops = {
28142 .inherits = &ata_bmdma_port_ops,
28143 .qc_prep = ata_sff_dumb_qc_prep,
28144 .cable_detect = ata_cable_40wire,
28145 diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28146 index c974b05..6d26b11 100644
28147 --- a/drivers/ata/pata_cs5530.c
28148 +++ b/drivers/ata/pata_cs5530.c
28149 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28150 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28151 };
28152
28153 -static struct ata_port_operations cs5530_port_ops = {
28154 +static const struct ata_port_operations cs5530_port_ops = {
28155 .inherits = &ata_bmdma_port_ops,
28156
28157 .qc_prep = ata_sff_dumb_qc_prep,
28158 diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28159 index 403f561..aacd26b 100644
28160 --- a/drivers/ata/pata_cs5535.c
28161 +++ b/drivers/ata/pata_cs5535.c
28162 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28163 ATA_BMDMA_SHT(DRV_NAME),
28164 };
28165
28166 -static struct ata_port_operations cs5535_port_ops = {
28167 +static const struct ata_port_operations cs5535_port_ops = {
28168 .inherits = &ata_bmdma_port_ops,
28169 .cable_detect = cs5535_cable_detect,
28170 .set_piomode = cs5535_set_piomode,
28171 diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28172 index 6da4cb4..de24a25 100644
28173 --- a/drivers/ata/pata_cs5536.c
28174 +++ b/drivers/ata/pata_cs5536.c
28175 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28176 ATA_BMDMA_SHT(DRV_NAME),
28177 };
28178
28179 -static struct ata_port_operations cs5536_port_ops = {
28180 +static const struct ata_port_operations cs5536_port_ops = {
28181 .inherits = &ata_bmdma_port_ops,
28182 .cable_detect = cs5536_cable_detect,
28183 .set_piomode = cs5536_set_piomode,
28184 diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28185 index 8fb040b..b16a9c9 100644
28186 --- a/drivers/ata/pata_cypress.c
28187 +++ b/drivers/ata/pata_cypress.c
28188 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28189 ATA_BMDMA_SHT(DRV_NAME),
28190 };
28191
28192 -static struct ata_port_operations cy82c693_port_ops = {
28193 +static const struct ata_port_operations cy82c693_port_ops = {
28194 .inherits = &ata_bmdma_port_ops,
28195 .cable_detect = ata_cable_40wire,
28196 .set_piomode = cy82c693_set_piomode,
28197 diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28198 index 2a6412f..555ee11 100644
28199 --- a/drivers/ata/pata_efar.c
28200 +++ b/drivers/ata/pata_efar.c
28201 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28202 ATA_BMDMA_SHT(DRV_NAME),
28203 };
28204
28205 -static struct ata_port_operations efar_ops = {
28206 +static const struct ata_port_operations efar_ops = {
28207 .inherits = &ata_bmdma_port_ops,
28208 .cable_detect = efar_cable_detect,
28209 .set_piomode = efar_set_piomode,
28210 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28211 index b9d8836..0b92030 100644
28212 --- a/drivers/ata/pata_hpt366.c
28213 +++ b/drivers/ata/pata_hpt366.c
28214 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28215 * Configuration for HPT366/68
28216 */
28217
28218 -static struct ata_port_operations hpt366_port_ops = {
28219 +static const struct ata_port_operations hpt366_port_ops = {
28220 .inherits = &ata_bmdma_port_ops,
28221 .cable_detect = hpt36x_cable_detect,
28222 .mode_filter = hpt366_filter,
28223 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28224 index 5af7f19..00c4980 100644
28225 --- a/drivers/ata/pata_hpt37x.c
28226 +++ b/drivers/ata/pata_hpt37x.c
28227 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28228 * Configuration for HPT370
28229 */
28230
28231 -static struct ata_port_operations hpt370_port_ops = {
28232 +static const struct ata_port_operations hpt370_port_ops = {
28233 .inherits = &ata_bmdma_port_ops,
28234
28235 .bmdma_stop = hpt370_bmdma_stop,
28236 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28237 * Configuration for HPT370A. Close to 370 but less filters
28238 */
28239
28240 -static struct ata_port_operations hpt370a_port_ops = {
28241 +static const struct ata_port_operations hpt370a_port_ops = {
28242 .inherits = &hpt370_port_ops,
28243 .mode_filter = hpt370a_filter,
28244 };
28245 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28246 * and DMA mode setting functionality.
28247 */
28248
28249 -static struct ata_port_operations hpt372_port_ops = {
28250 +static const struct ata_port_operations hpt372_port_ops = {
28251 .inherits = &ata_bmdma_port_ops,
28252
28253 .bmdma_stop = hpt37x_bmdma_stop,
28254 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28255 * but we have a different cable detection procedure for function 1.
28256 */
28257
28258 -static struct ata_port_operations hpt374_fn1_port_ops = {
28259 +static const struct ata_port_operations hpt374_fn1_port_ops = {
28260 .inherits = &hpt372_port_ops,
28261 .prereset = hpt374_fn1_pre_reset,
28262 };
28263 diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28264 index 100f227..2e39382 100644
28265 --- a/drivers/ata/pata_hpt3x2n.c
28266 +++ b/drivers/ata/pata_hpt3x2n.c
28267 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28268 * Configuration for HPT3x2n.
28269 */
28270
28271 -static struct ata_port_operations hpt3x2n_port_ops = {
28272 +static const struct ata_port_operations hpt3x2n_port_ops = {
28273 .inherits = &ata_bmdma_port_ops,
28274
28275 .bmdma_stop = hpt3x2n_bmdma_stop,
28276 diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28277 index 7e31025..6fca8f4 100644
28278 --- a/drivers/ata/pata_hpt3x3.c
28279 +++ b/drivers/ata/pata_hpt3x3.c
28280 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28281 ATA_BMDMA_SHT(DRV_NAME),
28282 };
28283
28284 -static struct ata_port_operations hpt3x3_port_ops = {
28285 +static const struct ata_port_operations hpt3x3_port_ops = {
28286 .inherits = &ata_bmdma_port_ops,
28287 .cable_detect = ata_cable_40wire,
28288 .set_piomode = hpt3x3_set_piomode,
28289 diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28290 index b663b7f..9a26c2a 100644
28291 --- a/drivers/ata/pata_icside.c
28292 +++ b/drivers/ata/pata_icside.c
28293 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28294 }
28295 }
28296
28297 -static struct ata_port_operations pata_icside_port_ops = {
28298 +static const struct ata_port_operations pata_icside_port_ops = {
28299 .inherits = &ata_sff_port_ops,
28300 /* no need to build any PRD tables for DMA */
28301 .qc_prep = ata_noop_qc_prep,
28302 diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28303 index 4bceb88..457dfb6 100644
28304 --- a/drivers/ata/pata_isapnp.c
28305 +++ b/drivers/ata/pata_isapnp.c
28306 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28307 ATA_PIO_SHT(DRV_NAME),
28308 };
28309
28310 -static struct ata_port_operations isapnp_port_ops = {
28311 +static const struct ata_port_operations isapnp_port_ops = {
28312 .inherits = &ata_sff_port_ops,
28313 .cable_detect = ata_cable_40wire,
28314 };
28315
28316 -static struct ata_port_operations isapnp_noalt_port_ops = {
28317 +static const struct ata_port_operations isapnp_noalt_port_ops = {
28318 .inherits = &ata_sff_port_ops,
28319 .cable_detect = ata_cable_40wire,
28320 /* No altstatus so we don't want to use the lost interrupt poll */
28321 diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28322 index f156da8..24976e2 100644
28323 --- a/drivers/ata/pata_it8213.c
28324 +++ b/drivers/ata/pata_it8213.c
28325 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28326 };
28327
28328
28329 -static struct ata_port_operations it8213_ops = {
28330 +static const struct ata_port_operations it8213_ops = {
28331 .inherits = &ata_bmdma_port_ops,
28332 .cable_detect = it8213_cable_detect,
28333 .set_piomode = it8213_set_piomode,
28334 diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28335 index 188bc2f..ca9e785 100644
28336 --- a/drivers/ata/pata_it821x.c
28337 +++ b/drivers/ata/pata_it821x.c
28338 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28339 ATA_BMDMA_SHT(DRV_NAME),
28340 };
28341
28342 -static struct ata_port_operations it821x_smart_port_ops = {
28343 +static const struct ata_port_operations it821x_smart_port_ops = {
28344 .inherits = &ata_bmdma_port_ops,
28345
28346 .check_atapi_dma= it821x_check_atapi_dma,
28347 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28348 .port_start = it821x_port_start,
28349 };
28350
28351 -static struct ata_port_operations it821x_passthru_port_ops = {
28352 +static const struct ata_port_operations it821x_passthru_port_ops = {
28353 .inherits = &ata_bmdma_port_ops,
28354
28355 .check_atapi_dma= it821x_check_atapi_dma,
28356 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28357 .port_start = it821x_port_start,
28358 };
28359
28360 -static struct ata_port_operations it821x_rdc_port_ops = {
28361 +static const struct ata_port_operations it821x_rdc_port_ops = {
28362 .inherits = &ata_bmdma_port_ops,
28363
28364 .check_atapi_dma= it821x_check_atapi_dma,
28365 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28366 index ba54b08..4b952b7 100644
28367 --- a/drivers/ata/pata_ixp4xx_cf.c
28368 +++ b/drivers/ata/pata_ixp4xx_cf.c
28369 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28370 ATA_PIO_SHT(DRV_NAME),
28371 };
28372
28373 -static struct ata_port_operations ixp4xx_port_ops = {
28374 +static const struct ata_port_operations ixp4xx_port_ops = {
28375 .inherits = &ata_sff_port_ops,
28376 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28377 .cable_detect = ata_cable_40wire,
28378 diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28379 index 3a1474a..434b0ff 100644
28380 --- a/drivers/ata/pata_jmicron.c
28381 +++ b/drivers/ata/pata_jmicron.c
28382 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28383 ATA_BMDMA_SHT(DRV_NAME),
28384 };
28385
28386 -static struct ata_port_operations jmicron_ops = {
28387 +static const struct ata_port_operations jmicron_ops = {
28388 .inherits = &ata_bmdma_port_ops,
28389 .prereset = jmicron_pre_reset,
28390 };
28391 diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28392 index 6932e56..220e71d 100644
28393 --- a/drivers/ata/pata_legacy.c
28394 +++ b/drivers/ata/pata_legacy.c
28395 @@ -106,7 +106,7 @@ struct legacy_probe {
28396
28397 struct legacy_controller {
28398 const char *name;
28399 - struct ata_port_operations *ops;
28400 + const struct ata_port_operations *ops;
28401 unsigned int pio_mask;
28402 unsigned int flags;
28403 unsigned int pflags;
28404 @@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28405 * pio_mask as well.
28406 */
28407
28408 -static struct ata_port_operations simple_port_ops = {
28409 +static const struct ata_port_operations simple_port_ops = {
28410 .inherits = &legacy_base_port_ops,
28411 .sff_data_xfer = ata_sff_data_xfer_noirq,
28412 };
28413
28414 -static struct ata_port_operations legacy_port_ops = {
28415 +static const struct ata_port_operations legacy_port_ops = {
28416 .inherits = &legacy_base_port_ops,
28417 .sff_data_xfer = ata_sff_data_xfer_noirq,
28418 .set_mode = legacy_set_mode,
28419 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28420 return buflen;
28421 }
28422
28423 -static struct ata_port_operations pdc20230_port_ops = {
28424 +static const struct ata_port_operations pdc20230_port_ops = {
28425 .inherits = &legacy_base_port_ops,
28426 .set_piomode = pdc20230_set_piomode,
28427 .sff_data_xfer = pdc_data_xfer_vlb,
28428 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28429 ioread8(ap->ioaddr.status_addr);
28430 }
28431
28432 -static struct ata_port_operations ht6560a_port_ops = {
28433 +static const struct ata_port_operations ht6560a_port_ops = {
28434 .inherits = &legacy_base_port_ops,
28435 .set_piomode = ht6560a_set_piomode,
28436 };
28437 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28438 ioread8(ap->ioaddr.status_addr);
28439 }
28440
28441 -static struct ata_port_operations ht6560b_port_ops = {
28442 +static const struct ata_port_operations ht6560b_port_ops = {
28443 .inherits = &legacy_base_port_ops,
28444 .set_piomode = ht6560b_set_piomode,
28445 };
28446 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28447 }
28448
28449
28450 -static struct ata_port_operations opti82c611a_port_ops = {
28451 +static const struct ata_port_operations opti82c611a_port_ops = {
28452 .inherits = &legacy_base_port_ops,
28453 .set_piomode = opti82c611a_set_piomode,
28454 };
28455 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28456 return ata_sff_qc_issue(qc);
28457 }
28458
28459 -static struct ata_port_operations opti82c46x_port_ops = {
28460 +static const struct ata_port_operations opti82c46x_port_ops = {
28461 .inherits = &legacy_base_port_ops,
28462 .set_piomode = opti82c46x_set_piomode,
28463 .qc_issue = opti82c46x_qc_issue,
28464 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28465 return 0;
28466 }
28467
28468 -static struct ata_port_operations qdi6500_port_ops = {
28469 +static const struct ata_port_operations qdi6500_port_ops = {
28470 .inherits = &legacy_base_port_ops,
28471 .set_piomode = qdi6500_set_piomode,
28472 .qc_issue = qdi_qc_issue,
28473 .sff_data_xfer = vlb32_data_xfer,
28474 };
28475
28476 -static struct ata_port_operations qdi6580_port_ops = {
28477 +static const struct ata_port_operations qdi6580_port_ops = {
28478 .inherits = &legacy_base_port_ops,
28479 .set_piomode = qdi6580_set_piomode,
28480 .sff_data_xfer = vlb32_data_xfer,
28481 };
28482
28483 -static struct ata_port_operations qdi6580dp_port_ops = {
28484 +static const struct ata_port_operations qdi6580dp_port_ops = {
28485 .inherits = &legacy_base_port_ops,
28486 .set_piomode = qdi6580dp_set_piomode,
28487 .sff_data_xfer = vlb32_data_xfer,
28488 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28489 return 0;
28490 }
28491
28492 -static struct ata_port_operations winbond_port_ops = {
28493 +static const struct ata_port_operations winbond_port_ops = {
28494 .inherits = &legacy_base_port_ops,
28495 .set_piomode = winbond_set_piomode,
28496 .sff_data_xfer = vlb32_data_xfer,
28497 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28498 int pio_modes = controller->pio_mask;
28499 unsigned long io = probe->port;
28500 u32 mask = (1 << probe->slot);
28501 - struct ata_port_operations *ops = controller->ops;
28502 + const struct ata_port_operations *ops = controller->ops;
28503 struct legacy_data *ld = &legacy_data[probe->slot];
28504 struct ata_host *host = NULL;
28505 struct ata_port *ap;
28506 diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28507 index 2096fb7..4d090fc 100644
28508 --- a/drivers/ata/pata_marvell.c
28509 +++ b/drivers/ata/pata_marvell.c
28510 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28511 ATA_BMDMA_SHT(DRV_NAME),
28512 };
28513
28514 -static struct ata_port_operations marvell_ops = {
28515 +static const struct ata_port_operations marvell_ops = {
28516 .inherits = &ata_bmdma_port_ops,
28517 .cable_detect = marvell_cable_detect,
28518 .prereset = marvell_pre_reset,
28519 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28520 index 99d41be..7d56aa8 100644
28521 --- a/drivers/ata/pata_mpc52xx.c
28522 +++ b/drivers/ata/pata_mpc52xx.c
28523 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28524 ATA_PIO_SHT(DRV_NAME),
28525 };
28526
28527 -static struct ata_port_operations mpc52xx_ata_port_ops = {
28528 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
28529 .inherits = &ata_bmdma_port_ops,
28530 .sff_dev_select = mpc52xx_ata_dev_select,
28531 .set_piomode = mpc52xx_ata_set_piomode,
28532 diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28533 index b21f002..0a27e7f 100644
28534 --- a/drivers/ata/pata_mpiix.c
28535 +++ b/drivers/ata/pata_mpiix.c
28536 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28537 ATA_PIO_SHT(DRV_NAME),
28538 };
28539
28540 -static struct ata_port_operations mpiix_port_ops = {
28541 +static const struct ata_port_operations mpiix_port_ops = {
28542 .inherits = &ata_sff_port_ops,
28543 .qc_issue = mpiix_qc_issue,
28544 .cable_detect = ata_cable_40wire,
28545 diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28546 index f0d52f7..89c3be3 100644
28547 --- a/drivers/ata/pata_netcell.c
28548 +++ b/drivers/ata/pata_netcell.c
28549 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28550 ATA_BMDMA_SHT(DRV_NAME),
28551 };
28552
28553 -static struct ata_port_operations netcell_ops = {
28554 +static const struct ata_port_operations netcell_ops = {
28555 .inherits = &ata_bmdma_port_ops,
28556 .cable_detect = ata_cable_80wire,
28557 .read_id = netcell_read_id,
28558 diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28559 index dd53a66..a3f4317 100644
28560 --- a/drivers/ata/pata_ninja32.c
28561 +++ b/drivers/ata/pata_ninja32.c
28562 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28563 ATA_BMDMA_SHT(DRV_NAME),
28564 };
28565
28566 -static struct ata_port_operations ninja32_port_ops = {
28567 +static const struct ata_port_operations ninja32_port_ops = {
28568 .inherits = &ata_bmdma_port_ops,
28569 .sff_dev_select = ninja32_dev_select,
28570 .cable_detect = ata_cable_40wire,
28571 diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28572 index ca53fac..9aa93ef 100644
28573 --- a/drivers/ata/pata_ns87410.c
28574 +++ b/drivers/ata/pata_ns87410.c
28575 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28576 ATA_PIO_SHT(DRV_NAME),
28577 };
28578
28579 -static struct ata_port_operations ns87410_port_ops = {
28580 +static const struct ata_port_operations ns87410_port_ops = {
28581 .inherits = &ata_sff_port_ops,
28582 .qc_issue = ns87410_qc_issue,
28583 .cable_detect = ata_cable_40wire,
28584 diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28585 index 773b159..55f454e 100644
28586 --- a/drivers/ata/pata_ns87415.c
28587 +++ b/drivers/ata/pata_ns87415.c
28588 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28589 }
28590 #endif /* 87560 SuperIO Support */
28591
28592 -static struct ata_port_operations ns87415_pata_ops = {
28593 +static const struct ata_port_operations ns87415_pata_ops = {
28594 .inherits = &ata_bmdma_port_ops,
28595
28596 .check_atapi_dma = ns87415_check_atapi_dma,
28597 @@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28598 };
28599
28600 #if defined(CONFIG_SUPERIO)
28601 -static struct ata_port_operations ns87560_pata_ops = {
28602 +static const struct ata_port_operations ns87560_pata_ops = {
28603 .inherits = &ns87415_pata_ops,
28604 .sff_tf_read = ns87560_tf_read,
28605 .sff_check_status = ns87560_check_status,
28606 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28607 index d6f6956..639295b 100644
28608 --- a/drivers/ata/pata_octeon_cf.c
28609 +++ b/drivers/ata/pata_octeon_cf.c
28610 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28611 return 0;
28612 }
28613
28614 +/* cannot be const */
28615 static struct ata_port_operations octeon_cf_ops = {
28616 .inherits = &ata_sff_port_ops,
28617 .check_atapi_dma = octeon_cf_check_atapi_dma,
28618 diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28619 index 84ac503..adee1cd 100644
28620 --- a/drivers/ata/pata_oldpiix.c
28621 +++ b/drivers/ata/pata_oldpiix.c
28622 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28623 ATA_BMDMA_SHT(DRV_NAME),
28624 };
28625
28626 -static struct ata_port_operations oldpiix_pata_ops = {
28627 +static const struct ata_port_operations oldpiix_pata_ops = {
28628 .inherits = &ata_bmdma_port_ops,
28629 .qc_issue = oldpiix_qc_issue,
28630 .cable_detect = ata_cable_40wire,
28631 diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28632 index 99eddda..3a4c0aa 100644
28633 --- a/drivers/ata/pata_opti.c
28634 +++ b/drivers/ata/pata_opti.c
28635 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28636 ATA_PIO_SHT(DRV_NAME),
28637 };
28638
28639 -static struct ata_port_operations opti_port_ops = {
28640 +static const struct ata_port_operations opti_port_ops = {
28641 .inherits = &ata_sff_port_ops,
28642 .cable_detect = ata_cable_40wire,
28643 .set_piomode = opti_set_piomode,
28644 diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28645 index 86885a4..8e9968d 100644
28646 --- a/drivers/ata/pata_optidma.c
28647 +++ b/drivers/ata/pata_optidma.c
28648 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28649 ATA_BMDMA_SHT(DRV_NAME),
28650 };
28651
28652 -static struct ata_port_operations optidma_port_ops = {
28653 +static const struct ata_port_operations optidma_port_ops = {
28654 .inherits = &ata_bmdma_port_ops,
28655 .cable_detect = ata_cable_40wire,
28656 .set_piomode = optidma_set_pio_mode,
28657 @@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28658 .prereset = optidma_pre_reset,
28659 };
28660
28661 -static struct ata_port_operations optiplus_port_ops = {
28662 +static const struct ata_port_operations optiplus_port_ops = {
28663 .inherits = &optidma_port_ops,
28664 .set_piomode = optiplus_set_pio_mode,
28665 .set_dmamode = optiplus_set_dma_mode,
28666 diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28667 index 11fb4cc..1a14022 100644
28668 --- a/drivers/ata/pata_palmld.c
28669 +++ b/drivers/ata/pata_palmld.c
28670 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28671 ATA_PIO_SHT(DRV_NAME),
28672 };
28673
28674 -static struct ata_port_operations palmld_port_ops = {
28675 +static const struct ata_port_operations palmld_port_ops = {
28676 .inherits = &ata_sff_port_ops,
28677 .sff_data_xfer = ata_sff_data_xfer_noirq,
28678 .cable_detect = ata_cable_40wire,
28679 diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28680 index dc99e26..7f4b1e4 100644
28681 --- a/drivers/ata/pata_pcmcia.c
28682 +++ b/drivers/ata/pata_pcmcia.c
28683 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28684 ATA_PIO_SHT(DRV_NAME),
28685 };
28686
28687 -static struct ata_port_operations pcmcia_port_ops = {
28688 +static const struct ata_port_operations pcmcia_port_ops = {
28689 .inherits = &ata_sff_port_ops,
28690 .sff_data_xfer = ata_sff_data_xfer_noirq,
28691 .cable_detect = ata_cable_40wire,
28692 .set_mode = pcmcia_set_mode,
28693 };
28694
28695 -static struct ata_port_operations pcmcia_8bit_port_ops = {
28696 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
28697 .inherits = &ata_sff_port_ops,
28698 .sff_data_xfer = ata_data_xfer_8bit,
28699 .cable_detect = ata_cable_40wire,
28700 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28701 unsigned long io_base, ctl_base;
28702 void __iomem *io_addr, *ctl_addr;
28703 int n_ports = 1;
28704 - struct ata_port_operations *ops = &pcmcia_port_ops;
28705 + const struct ata_port_operations *ops = &pcmcia_port_ops;
28706
28707 info = kzalloc(sizeof(*info), GFP_KERNEL);
28708 if (info == NULL)
28709 diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28710 index ca5cad0..3a1f125 100644
28711 --- a/drivers/ata/pata_pdc2027x.c
28712 +++ b/drivers/ata/pata_pdc2027x.c
28713 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28714 ATA_BMDMA_SHT(DRV_NAME),
28715 };
28716
28717 -static struct ata_port_operations pdc2027x_pata100_ops = {
28718 +static const struct ata_port_operations pdc2027x_pata100_ops = {
28719 .inherits = &ata_bmdma_port_ops,
28720 .check_atapi_dma = pdc2027x_check_atapi_dma,
28721 .cable_detect = pdc2027x_cable_detect,
28722 .prereset = pdc2027x_prereset,
28723 };
28724
28725 -static struct ata_port_operations pdc2027x_pata133_ops = {
28726 +static const struct ata_port_operations pdc2027x_pata133_ops = {
28727 .inherits = &pdc2027x_pata100_ops,
28728 .mode_filter = pdc2027x_mode_filter,
28729 .set_piomode = pdc2027x_set_piomode,
28730 diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28731 index 2911120..4bf62aa 100644
28732 --- a/drivers/ata/pata_pdc202xx_old.c
28733 +++ b/drivers/ata/pata_pdc202xx_old.c
28734 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28735 ATA_BMDMA_SHT(DRV_NAME),
28736 };
28737
28738 -static struct ata_port_operations pdc2024x_port_ops = {
28739 +static const struct ata_port_operations pdc2024x_port_ops = {
28740 .inherits = &ata_bmdma_port_ops,
28741
28742 .cable_detect = ata_cable_40wire,
28743 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28744 .sff_exec_command = pdc202xx_exec_command,
28745 };
28746
28747 -static struct ata_port_operations pdc2026x_port_ops = {
28748 +static const struct ata_port_operations pdc2026x_port_ops = {
28749 .inherits = &pdc2024x_port_ops,
28750
28751 .check_atapi_dma = pdc2026x_check_atapi_dma,
28752 diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28753 index 3f6ebc6..a18c358 100644
28754 --- a/drivers/ata/pata_platform.c
28755 +++ b/drivers/ata/pata_platform.c
28756 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28757 ATA_PIO_SHT(DRV_NAME),
28758 };
28759
28760 -static struct ata_port_operations pata_platform_port_ops = {
28761 +static const struct ata_port_operations pata_platform_port_ops = {
28762 .inherits = &ata_sff_port_ops,
28763 .sff_data_xfer = ata_sff_data_xfer_noirq,
28764 .cable_detect = ata_cable_unknown,
28765 diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28766 index 45879dc..165a9f9 100644
28767 --- a/drivers/ata/pata_qdi.c
28768 +++ b/drivers/ata/pata_qdi.c
28769 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28770 ATA_PIO_SHT(DRV_NAME),
28771 };
28772
28773 -static struct ata_port_operations qdi6500_port_ops = {
28774 +static const struct ata_port_operations qdi6500_port_ops = {
28775 .inherits = &ata_sff_port_ops,
28776 .qc_issue = qdi_qc_issue,
28777 .sff_data_xfer = qdi_data_xfer,
28778 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28779 .set_piomode = qdi6500_set_piomode,
28780 };
28781
28782 -static struct ata_port_operations qdi6580_port_ops = {
28783 +static const struct ata_port_operations qdi6580_port_ops = {
28784 .inherits = &qdi6500_port_ops,
28785 .set_piomode = qdi6580_set_piomode,
28786 };
28787 diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28788 index 4401b33..716c5cc 100644
28789 --- a/drivers/ata/pata_radisys.c
28790 +++ b/drivers/ata/pata_radisys.c
28791 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28792 ATA_BMDMA_SHT(DRV_NAME),
28793 };
28794
28795 -static struct ata_port_operations radisys_pata_ops = {
28796 +static const struct ata_port_operations radisys_pata_ops = {
28797 .inherits = &ata_bmdma_port_ops,
28798 .qc_issue = radisys_qc_issue,
28799 .cable_detect = ata_cable_unknown,
28800 diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28801 index 45f1e10..fab6bca 100644
28802 --- a/drivers/ata/pata_rb532_cf.c
28803 +++ b/drivers/ata/pata_rb532_cf.c
28804 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28805 return IRQ_HANDLED;
28806 }
28807
28808 -static struct ata_port_operations rb532_pata_port_ops = {
28809 +static const struct ata_port_operations rb532_pata_port_ops = {
28810 .inherits = &ata_sff_port_ops,
28811 .sff_data_xfer = ata_sff_data_xfer32,
28812 };
28813 diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
28814 index c843a1e..b5853c3 100644
28815 --- a/drivers/ata/pata_rdc.c
28816 +++ b/drivers/ata/pata_rdc.c
28817 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
28818 pci_write_config_byte(dev, 0x48, udma_enable);
28819 }
28820
28821 -static struct ata_port_operations rdc_pata_ops = {
28822 +static const struct ata_port_operations rdc_pata_ops = {
28823 .inherits = &ata_bmdma32_port_ops,
28824 .cable_detect = rdc_pata_cable_detect,
28825 .set_piomode = rdc_set_piomode,
28826 diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
28827 index a5e4dfe..080c8c9 100644
28828 --- a/drivers/ata/pata_rz1000.c
28829 +++ b/drivers/ata/pata_rz1000.c
28830 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
28831 ATA_PIO_SHT(DRV_NAME),
28832 };
28833
28834 -static struct ata_port_operations rz1000_port_ops = {
28835 +static const struct ata_port_operations rz1000_port_ops = {
28836 .inherits = &ata_sff_port_ops,
28837 .cable_detect = ata_cable_40wire,
28838 .set_mode = rz1000_set_mode,
28839 diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
28840 index 3bbed83..e309daf 100644
28841 --- a/drivers/ata/pata_sc1200.c
28842 +++ b/drivers/ata/pata_sc1200.c
28843 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
28844 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28845 };
28846
28847 -static struct ata_port_operations sc1200_port_ops = {
28848 +static const struct ata_port_operations sc1200_port_ops = {
28849 .inherits = &ata_bmdma_port_ops,
28850 .qc_prep = ata_sff_dumb_qc_prep,
28851 .qc_issue = sc1200_qc_issue,
28852 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
28853 index 4257d6b..4c1d9d5 100644
28854 --- a/drivers/ata/pata_scc.c
28855 +++ b/drivers/ata/pata_scc.c
28856 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
28857 ATA_BMDMA_SHT(DRV_NAME),
28858 };
28859
28860 -static struct ata_port_operations scc_pata_ops = {
28861 +static const struct ata_port_operations scc_pata_ops = {
28862 .inherits = &ata_bmdma_port_ops,
28863
28864 .set_piomode = scc_set_piomode,
28865 diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
28866 index 99cceb4..e2e0a87 100644
28867 --- a/drivers/ata/pata_sch.c
28868 +++ b/drivers/ata/pata_sch.c
28869 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
28870 ATA_BMDMA_SHT(DRV_NAME),
28871 };
28872
28873 -static struct ata_port_operations sch_pata_ops = {
28874 +static const struct ata_port_operations sch_pata_ops = {
28875 .inherits = &ata_bmdma_port_ops,
28876 .cable_detect = ata_cable_unknown,
28877 .set_piomode = sch_set_piomode,
28878 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
28879 index beaed12..39969f1 100644
28880 --- a/drivers/ata/pata_serverworks.c
28881 +++ b/drivers/ata/pata_serverworks.c
28882 @@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
28883 ATA_BMDMA_SHT(DRV_NAME),
28884 };
28885
28886 -static struct ata_port_operations serverworks_osb4_port_ops = {
28887 +static const struct ata_port_operations serverworks_osb4_port_ops = {
28888 .inherits = &ata_bmdma_port_ops,
28889 .cable_detect = serverworks_cable_detect,
28890 .mode_filter = serverworks_osb4_filter,
28891 @@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
28892 .set_dmamode = serverworks_set_dmamode,
28893 };
28894
28895 -static struct ata_port_operations serverworks_csb_port_ops = {
28896 +static const struct ata_port_operations serverworks_csb_port_ops = {
28897 .inherits = &serverworks_osb4_port_ops,
28898 .mode_filter = serverworks_csb_filter,
28899 };
28900 diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
28901 index a2ace48..0463b44 100644
28902 --- a/drivers/ata/pata_sil680.c
28903 +++ b/drivers/ata/pata_sil680.c
28904 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
28905 ATA_BMDMA_SHT(DRV_NAME),
28906 };
28907
28908 -static struct ata_port_operations sil680_port_ops = {
28909 +static const struct ata_port_operations sil680_port_ops = {
28910 .inherits = &ata_bmdma32_port_ops,
28911 .cable_detect = sil680_cable_detect,
28912 .set_piomode = sil680_set_piomode,
28913 diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
28914 index 488e77b..b3724d5 100644
28915 --- a/drivers/ata/pata_sis.c
28916 +++ b/drivers/ata/pata_sis.c
28917 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
28918 ATA_BMDMA_SHT(DRV_NAME),
28919 };
28920
28921 -static struct ata_port_operations sis_133_for_sata_ops = {
28922 +static const struct ata_port_operations sis_133_for_sata_ops = {
28923 .inherits = &ata_bmdma_port_ops,
28924 .set_piomode = sis_133_set_piomode,
28925 .set_dmamode = sis_133_set_dmamode,
28926 .cable_detect = sis_133_cable_detect,
28927 };
28928
28929 -static struct ata_port_operations sis_base_ops = {
28930 +static const struct ata_port_operations sis_base_ops = {
28931 .inherits = &ata_bmdma_port_ops,
28932 .prereset = sis_pre_reset,
28933 };
28934
28935 -static struct ata_port_operations sis_133_ops = {
28936 +static const struct ata_port_operations sis_133_ops = {
28937 .inherits = &sis_base_ops,
28938 .set_piomode = sis_133_set_piomode,
28939 .set_dmamode = sis_133_set_dmamode,
28940 .cable_detect = sis_133_cable_detect,
28941 };
28942
28943 -static struct ata_port_operations sis_133_early_ops = {
28944 +static const struct ata_port_operations sis_133_early_ops = {
28945 .inherits = &sis_base_ops,
28946 .set_piomode = sis_100_set_piomode,
28947 .set_dmamode = sis_133_early_set_dmamode,
28948 .cable_detect = sis_66_cable_detect,
28949 };
28950
28951 -static struct ata_port_operations sis_100_ops = {
28952 +static const struct ata_port_operations sis_100_ops = {
28953 .inherits = &sis_base_ops,
28954 .set_piomode = sis_100_set_piomode,
28955 .set_dmamode = sis_100_set_dmamode,
28956 .cable_detect = sis_66_cable_detect,
28957 };
28958
28959 -static struct ata_port_operations sis_66_ops = {
28960 +static const struct ata_port_operations sis_66_ops = {
28961 .inherits = &sis_base_ops,
28962 .set_piomode = sis_old_set_piomode,
28963 .set_dmamode = sis_66_set_dmamode,
28964 .cable_detect = sis_66_cable_detect,
28965 };
28966
28967 -static struct ata_port_operations sis_old_ops = {
28968 +static const struct ata_port_operations sis_old_ops = {
28969 .inherits = &sis_base_ops,
28970 .set_piomode = sis_old_set_piomode,
28971 .set_dmamode = sis_old_set_dmamode,
28972 diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
28973 index 29f733c..43e9ca0 100644
28974 --- a/drivers/ata/pata_sl82c105.c
28975 +++ b/drivers/ata/pata_sl82c105.c
28976 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
28977 ATA_BMDMA_SHT(DRV_NAME),
28978 };
28979
28980 -static struct ata_port_operations sl82c105_port_ops = {
28981 +static const struct ata_port_operations sl82c105_port_ops = {
28982 .inherits = &ata_bmdma_port_ops,
28983 .qc_defer = sl82c105_qc_defer,
28984 .bmdma_start = sl82c105_bmdma_start,
28985 diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
28986 index f1f13ff..df39e99 100644
28987 --- a/drivers/ata/pata_triflex.c
28988 +++ b/drivers/ata/pata_triflex.c
28989 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
28990 ATA_BMDMA_SHT(DRV_NAME),
28991 };
28992
28993 -static struct ata_port_operations triflex_port_ops = {
28994 +static const struct ata_port_operations triflex_port_ops = {
28995 .inherits = &ata_bmdma_port_ops,
28996 .bmdma_start = triflex_bmdma_start,
28997 .bmdma_stop = triflex_bmdma_stop,
28998 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
28999 index 1d73b8d..98a4b29 100644
29000 --- a/drivers/ata/pata_via.c
29001 +++ b/drivers/ata/pata_via.c
29002 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
29003 ATA_BMDMA_SHT(DRV_NAME),
29004 };
29005
29006 -static struct ata_port_operations via_port_ops = {
29007 +static const struct ata_port_operations via_port_ops = {
29008 .inherits = &ata_bmdma_port_ops,
29009 .cable_detect = via_cable_detect,
29010 .set_piomode = via_set_piomode,
29011 @@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
29012 .port_start = via_port_start,
29013 };
29014
29015 -static struct ata_port_operations via_port_ops_noirq = {
29016 +static const struct ata_port_operations via_port_ops_noirq = {
29017 .inherits = &via_port_ops,
29018 .sff_data_xfer = ata_sff_data_xfer_noirq,
29019 };
29020 diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
29021 index 6d8619b..ad511c4 100644
29022 --- a/drivers/ata/pata_winbond.c
29023 +++ b/drivers/ata/pata_winbond.c
29024 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
29025 ATA_PIO_SHT(DRV_NAME),
29026 };
29027
29028 -static struct ata_port_operations winbond_port_ops = {
29029 +static const struct ata_port_operations winbond_port_ops = {
29030 .inherits = &ata_sff_port_ops,
29031 .sff_data_xfer = winbond_data_xfer,
29032 .cable_detect = ata_cable_40wire,
29033 diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
29034 index 6c65b07..f996ec7 100644
29035 --- a/drivers/ata/pdc_adma.c
29036 +++ b/drivers/ata/pdc_adma.c
29037 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
29038 .dma_boundary = ADMA_DMA_BOUNDARY,
29039 };
29040
29041 -static struct ata_port_operations adma_ata_ops = {
29042 +static const struct ata_port_operations adma_ata_ops = {
29043 .inherits = &ata_sff_port_ops,
29044
29045 .lost_interrupt = ATA_OP_NULL,
29046 diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29047 index 172b57e..c49bc1e 100644
29048 --- a/drivers/ata/sata_fsl.c
29049 +++ b/drivers/ata/sata_fsl.c
29050 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29051 .dma_boundary = ATA_DMA_BOUNDARY,
29052 };
29053
29054 -static struct ata_port_operations sata_fsl_ops = {
29055 +static const struct ata_port_operations sata_fsl_ops = {
29056 .inherits = &sata_pmp_port_ops,
29057
29058 .qc_defer = ata_std_qc_defer,
29059 diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29060 index 4406902..60603ef 100644
29061 --- a/drivers/ata/sata_inic162x.c
29062 +++ b/drivers/ata/sata_inic162x.c
29063 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29064 return 0;
29065 }
29066
29067 -static struct ata_port_operations inic_port_ops = {
29068 +static const struct ata_port_operations inic_port_ops = {
29069 .inherits = &sata_port_ops,
29070
29071 .check_atapi_dma = inic_check_atapi_dma,
29072 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29073 index cf41126..8107be6 100644
29074 --- a/drivers/ata/sata_mv.c
29075 +++ b/drivers/ata/sata_mv.c
29076 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29077 .dma_boundary = MV_DMA_BOUNDARY,
29078 };
29079
29080 -static struct ata_port_operations mv5_ops = {
29081 +static const struct ata_port_operations mv5_ops = {
29082 .inherits = &ata_sff_port_ops,
29083
29084 .lost_interrupt = ATA_OP_NULL,
29085 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29086 .port_stop = mv_port_stop,
29087 };
29088
29089 -static struct ata_port_operations mv6_ops = {
29090 +static const struct ata_port_operations mv6_ops = {
29091 .inherits = &mv5_ops,
29092 .dev_config = mv6_dev_config,
29093 .scr_read = mv_scr_read,
29094 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29095 .bmdma_status = mv_bmdma_status,
29096 };
29097
29098 -static struct ata_port_operations mv_iie_ops = {
29099 +static const struct ata_port_operations mv_iie_ops = {
29100 .inherits = &mv6_ops,
29101 .dev_config = ATA_OP_NULL,
29102 .qc_prep = mv_qc_prep_iie,
29103 diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29104 index ae2297c..d5c9c33 100644
29105 --- a/drivers/ata/sata_nv.c
29106 +++ b/drivers/ata/sata_nv.c
29107 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29108 * cases. Define nv_hardreset() which only kicks in for post-boot
29109 * probing and use it for all variants.
29110 */
29111 -static struct ata_port_operations nv_generic_ops = {
29112 +static const struct ata_port_operations nv_generic_ops = {
29113 .inherits = &ata_bmdma_port_ops,
29114 .lost_interrupt = ATA_OP_NULL,
29115 .scr_read = nv_scr_read,
29116 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29117 .hardreset = nv_hardreset,
29118 };
29119
29120 -static struct ata_port_operations nv_nf2_ops = {
29121 +static const struct ata_port_operations nv_nf2_ops = {
29122 .inherits = &nv_generic_ops,
29123 .freeze = nv_nf2_freeze,
29124 .thaw = nv_nf2_thaw,
29125 };
29126
29127 -static struct ata_port_operations nv_ck804_ops = {
29128 +static const struct ata_port_operations nv_ck804_ops = {
29129 .inherits = &nv_generic_ops,
29130 .freeze = nv_ck804_freeze,
29131 .thaw = nv_ck804_thaw,
29132 .host_stop = nv_ck804_host_stop,
29133 };
29134
29135 -static struct ata_port_operations nv_adma_ops = {
29136 +static const struct ata_port_operations nv_adma_ops = {
29137 .inherits = &nv_ck804_ops,
29138
29139 .check_atapi_dma = nv_adma_check_atapi_dma,
29140 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29141 .host_stop = nv_adma_host_stop,
29142 };
29143
29144 -static struct ata_port_operations nv_swncq_ops = {
29145 +static const struct ata_port_operations nv_swncq_ops = {
29146 .inherits = &nv_generic_ops,
29147
29148 .qc_defer = ata_std_qc_defer,
29149 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29150 index 07d8d00..6cc70bb 100644
29151 --- a/drivers/ata/sata_promise.c
29152 +++ b/drivers/ata/sata_promise.c
29153 @@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29154 .error_handler = pdc_error_handler,
29155 };
29156
29157 -static struct ata_port_operations pdc_sata_ops = {
29158 +static const struct ata_port_operations pdc_sata_ops = {
29159 .inherits = &pdc_common_ops,
29160 .cable_detect = pdc_sata_cable_detect,
29161 .freeze = pdc_sata_freeze,
29162 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29163
29164 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29165 and ->freeze/thaw that ignore the hotplug controls. */
29166 -static struct ata_port_operations pdc_old_sata_ops = {
29167 +static const struct ata_port_operations pdc_old_sata_ops = {
29168 .inherits = &pdc_sata_ops,
29169 .freeze = pdc_freeze,
29170 .thaw = pdc_thaw,
29171 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29172 };
29173
29174 -static struct ata_port_operations pdc_pata_ops = {
29175 +static const struct ata_port_operations pdc_pata_ops = {
29176 .inherits = &pdc_common_ops,
29177 .cable_detect = pdc_pata_cable_detect,
29178 .freeze = pdc_freeze,
29179 diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29180 index 326c0cf..36ecebe 100644
29181 --- a/drivers/ata/sata_qstor.c
29182 +++ b/drivers/ata/sata_qstor.c
29183 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29184 .dma_boundary = QS_DMA_BOUNDARY,
29185 };
29186
29187 -static struct ata_port_operations qs_ata_ops = {
29188 +static const struct ata_port_operations qs_ata_ops = {
29189 .inherits = &ata_sff_port_ops,
29190
29191 .check_atapi_dma = qs_check_atapi_dma,
29192 diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29193 index 3cb69d5..0871d3c 100644
29194 --- a/drivers/ata/sata_sil.c
29195 +++ b/drivers/ata/sata_sil.c
29196 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29197 .sg_tablesize = ATA_MAX_PRD
29198 };
29199
29200 -static struct ata_port_operations sil_ops = {
29201 +static const struct ata_port_operations sil_ops = {
29202 .inherits = &ata_bmdma32_port_ops,
29203 .dev_config = sil_dev_config,
29204 .set_mode = sil_set_mode,
29205 diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29206 index e6946fc..eddb794 100644
29207 --- a/drivers/ata/sata_sil24.c
29208 +++ b/drivers/ata/sata_sil24.c
29209 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29210 .dma_boundary = ATA_DMA_BOUNDARY,
29211 };
29212
29213 -static struct ata_port_operations sil24_ops = {
29214 +static const struct ata_port_operations sil24_ops = {
29215 .inherits = &sata_pmp_port_ops,
29216
29217 .qc_defer = sil24_qc_defer,
29218 diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29219 index f8a91bf..9cb06b6 100644
29220 --- a/drivers/ata/sata_sis.c
29221 +++ b/drivers/ata/sata_sis.c
29222 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29223 ATA_BMDMA_SHT(DRV_NAME),
29224 };
29225
29226 -static struct ata_port_operations sis_ops = {
29227 +static const struct ata_port_operations sis_ops = {
29228 .inherits = &ata_bmdma_port_ops,
29229 .scr_read = sis_scr_read,
29230 .scr_write = sis_scr_write,
29231 diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29232 index 7257f2d..d04c6f5 100644
29233 --- a/drivers/ata/sata_svw.c
29234 +++ b/drivers/ata/sata_svw.c
29235 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29236 };
29237
29238
29239 -static struct ata_port_operations k2_sata_ops = {
29240 +static const struct ata_port_operations k2_sata_ops = {
29241 .inherits = &ata_bmdma_port_ops,
29242 .sff_tf_load = k2_sata_tf_load,
29243 .sff_tf_read = k2_sata_tf_read,
29244 diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29245 index bbcf970..cd0df0d 100644
29246 --- a/drivers/ata/sata_sx4.c
29247 +++ b/drivers/ata/sata_sx4.c
29248 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29249 };
29250
29251 /* TODO: inherit from base port_ops after converting to new EH */
29252 -static struct ata_port_operations pdc_20621_ops = {
29253 +static const struct ata_port_operations pdc_20621_ops = {
29254 .inherits = &ata_sff_port_ops,
29255
29256 .check_atapi_dma = pdc_check_atapi_dma,
29257 diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29258 index e5bff47..089d859 100644
29259 --- a/drivers/ata/sata_uli.c
29260 +++ b/drivers/ata/sata_uli.c
29261 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29262 ATA_BMDMA_SHT(DRV_NAME),
29263 };
29264
29265 -static struct ata_port_operations uli_ops = {
29266 +static const struct ata_port_operations uli_ops = {
29267 .inherits = &ata_bmdma_port_ops,
29268 .scr_read = uli_scr_read,
29269 .scr_write = uli_scr_write,
29270 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29271 index f5dcca7..77b94eb 100644
29272 --- a/drivers/ata/sata_via.c
29273 +++ b/drivers/ata/sata_via.c
29274 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29275 ATA_BMDMA_SHT(DRV_NAME),
29276 };
29277
29278 -static struct ata_port_operations svia_base_ops = {
29279 +static const struct ata_port_operations svia_base_ops = {
29280 .inherits = &ata_bmdma_port_ops,
29281 .sff_tf_load = svia_tf_load,
29282 };
29283
29284 -static struct ata_port_operations vt6420_sata_ops = {
29285 +static const struct ata_port_operations vt6420_sata_ops = {
29286 .inherits = &svia_base_ops,
29287 .freeze = svia_noop_freeze,
29288 .prereset = vt6420_prereset,
29289 .bmdma_start = vt6420_bmdma_start,
29290 };
29291
29292 -static struct ata_port_operations vt6421_pata_ops = {
29293 +static const struct ata_port_operations vt6421_pata_ops = {
29294 .inherits = &svia_base_ops,
29295 .cable_detect = vt6421_pata_cable_detect,
29296 .set_piomode = vt6421_set_pio_mode,
29297 .set_dmamode = vt6421_set_dma_mode,
29298 };
29299
29300 -static struct ata_port_operations vt6421_sata_ops = {
29301 +static const struct ata_port_operations vt6421_sata_ops = {
29302 .inherits = &svia_base_ops,
29303 .scr_read = svia_scr_read,
29304 .scr_write = svia_scr_write,
29305 };
29306
29307 -static struct ata_port_operations vt8251_ops = {
29308 +static const struct ata_port_operations vt8251_ops = {
29309 .inherits = &svia_base_ops,
29310 .hardreset = sata_std_hardreset,
29311 .scr_read = vt8251_scr_read,
29312 diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29313 index 8b2a278..51e65d3 100644
29314 --- a/drivers/ata/sata_vsc.c
29315 +++ b/drivers/ata/sata_vsc.c
29316 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29317 };
29318
29319
29320 -static struct ata_port_operations vsc_sata_ops = {
29321 +static const struct ata_port_operations vsc_sata_ops = {
29322 .inherits = &ata_bmdma_port_ops,
29323 /* The IRQ handling is not quite standard SFF behaviour so we
29324 cannot use the default lost interrupt handler */
29325 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29326 index 5effec6..7e4019a 100644
29327 --- a/drivers/atm/adummy.c
29328 +++ b/drivers/atm/adummy.c
29329 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29330 vcc->pop(vcc, skb);
29331 else
29332 dev_kfree_skb_any(skb);
29333 - atomic_inc(&vcc->stats->tx);
29334 + atomic_inc_unchecked(&vcc->stats->tx);
29335
29336 return 0;
29337 }
29338 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29339 index 66e1813..26a27c6 100644
29340 --- a/drivers/atm/ambassador.c
29341 +++ b/drivers/atm/ambassador.c
29342 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29343 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29344
29345 // VC layer stats
29346 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29347 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29348
29349 // free the descriptor
29350 kfree (tx_descr);
29351 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29352 dump_skb ("<<<", vc, skb);
29353
29354 // VC layer stats
29355 - atomic_inc(&atm_vcc->stats->rx);
29356 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29357 __net_timestamp(skb);
29358 // end of our responsability
29359 atm_vcc->push (atm_vcc, skb);
29360 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29361 } else {
29362 PRINTK (KERN_INFO, "dropped over-size frame");
29363 // should we count this?
29364 - atomic_inc(&atm_vcc->stats->rx_drop);
29365 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29366 }
29367
29368 } else {
29369 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29370 }
29371
29372 if (check_area (skb->data, skb->len)) {
29373 - atomic_inc(&atm_vcc->stats->tx_err);
29374 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29375 return -ENOMEM; // ?
29376 }
29377
29378 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29379 index 02ad83d..6daffeb 100644
29380 --- a/drivers/atm/atmtcp.c
29381 +++ b/drivers/atm/atmtcp.c
29382 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29383 if (vcc->pop) vcc->pop(vcc,skb);
29384 else dev_kfree_skb(skb);
29385 if (dev_data) return 0;
29386 - atomic_inc(&vcc->stats->tx_err);
29387 + atomic_inc_unchecked(&vcc->stats->tx_err);
29388 return -ENOLINK;
29389 }
29390 size = skb->len+sizeof(struct atmtcp_hdr);
29391 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29392 if (!new_skb) {
29393 if (vcc->pop) vcc->pop(vcc,skb);
29394 else dev_kfree_skb(skb);
29395 - atomic_inc(&vcc->stats->tx_err);
29396 + atomic_inc_unchecked(&vcc->stats->tx_err);
29397 return -ENOBUFS;
29398 }
29399 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29400 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29401 if (vcc->pop) vcc->pop(vcc,skb);
29402 else dev_kfree_skb(skb);
29403 out_vcc->push(out_vcc,new_skb);
29404 - atomic_inc(&vcc->stats->tx);
29405 - atomic_inc(&out_vcc->stats->rx);
29406 + atomic_inc_unchecked(&vcc->stats->tx);
29407 + atomic_inc_unchecked(&out_vcc->stats->rx);
29408 return 0;
29409 }
29410
29411 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29412 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29413 read_unlock(&vcc_sklist_lock);
29414 if (!out_vcc) {
29415 - atomic_inc(&vcc->stats->tx_err);
29416 + atomic_inc_unchecked(&vcc->stats->tx_err);
29417 goto done;
29418 }
29419 skb_pull(skb,sizeof(struct atmtcp_hdr));
29420 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29421 __net_timestamp(new_skb);
29422 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29423 out_vcc->push(out_vcc,new_skb);
29424 - atomic_inc(&vcc->stats->tx);
29425 - atomic_inc(&out_vcc->stats->rx);
29426 + atomic_inc_unchecked(&vcc->stats->tx);
29427 + atomic_inc_unchecked(&out_vcc->stats->rx);
29428 done:
29429 if (vcc->pop) vcc->pop(vcc,skb);
29430 else dev_kfree_skb(skb);
29431 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29432 index 0c30261..3da356e 100644
29433 --- a/drivers/atm/eni.c
29434 +++ b/drivers/atm/eni.c
29435 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29436 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29437 vcc->dev->number);
29438 length = 0;
29439 - atomic_inc(&vcc->stats->rx_err);
29440 + atomic_inc_unchecked(&vcc->stats->rx_err);
29441 }
29442 else {
29443 length = ATM_CELL_SIZE-1; /* no HEC */
29444 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29445 size);
29446 }
29447 eff = length = 0;
29448 - atomic_inc(&vcc->stats->rx_err);
29449 + atomic_inc_unchecked(&vcc->stats->rx_err);
29450 }
29451 else {
29452 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29453 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29454 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29455 vcc->dev->number,vcc->vci,length,size << 2,descr);
29456 length = eff = 0;
29457 - atomic_inc(&vcc->stats->rx_err);
29458 + atomic_inc_unchecked(&vcc->stats->rx_err);
29459 }
29460 }
29461 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29462 @@ -770,7 +770,7 @@ rx_dequeued++;
29463 vcc->push(vcc,skb);
29464 pushed++;
29465 }
29466 - atomic_inc(&vcc->stats->rx);
29467 + atomic_inc_unchecked(&vcc->stats->rx);
29468 }
29469 wake_up(&eni_dev->rx_wait);
29470 }
29471 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29472 PCI_DMA_TODEVICE);
29473 if (vcc->pop) vcc->pop(vcc,skb);
29474 else dev_kfree_skb_irq(skb);
29475 - atomic_inc(&vcc->stats->tx);
29476 + atomic_inc_unchecked(&vcc->stats->tx);
29477 wake_up(&eni_dev->tx_wait);
29478 dma_complete++;
29479 }
29480 @@ -1570,7 +1570,7 @@ tx_complete++;
29481 /*--------------------------------- entries ---------------------------------*/
29482
29483
29484 -static const char *media_name[] __devinitdata = {
29485 +static const char *media_name[] __devinitconst = {
29486 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29487 "UTP", "05?", "06?", "07?", /* 4- 7 */
29488 "TAXI","09?", "10?", "11?", /* 8-11 */
29489 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29490 index cd5049a..a51209f 100644
29491 --- a/drivers/atm/firestream.c
29492 +++ b/drivers/atm/firestream.c
29493 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29494 }
29495 }
29496
29497 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29498 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29499
29500 fs_dprintk (FS_DEBUG_TXMEM, "i");
29501 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29502 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29503 #endif
29504 skb_put (skb, qe->p1 & 0xffff);
29505 ATM_SKB(skb)->vcc = atm_vcc;
29506 - atomic_inc(&atm_vcc->stats->rx);
29507 + atomic_inc_unchecked(&atm_vcc->stats->rx);
29508 __net_timestamp(skb);
29509 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29510 atm_vcc->push (atm_vcc, skb);
29511 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29512 kfree (pe);
29513 }
29514 if (atm_vcc)
29515 - atomic_inc(&atm_vcc->stats->rx_drop);
29516 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29517 break;
29518 case 0x1f: /* Reassembly abort: no buffers. */
29519 /* Silently increment error counter. */
29520 if (atm_vcc)
29521 - atomic_inc(&atm_vcc->stats->rx_drop);
29522 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29523 break;
29524 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29525 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29526 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29527 index f766cc4..a34002e 100644
29528 --- a/drivers/atm/fore200e.c
29529 +++ b/drivers/atm/fore200e.c
29530 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29531 #endif
29532 /* check error condition */
29533 if (*entry->status & STATUS_ERROR)
29534 - atomic_inc(&vcc->stats->tx_err);
29535 + atomic_inc_unchecked(&vcc->stats->tx_err);
29536 else
29537 - atomic_inc(&vcc->stats->tx);
29538 + atomic_inc_unchecked(&vcc->stats->tx);
29539 }
29540 }
29541
29542 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29543 if (skb == NULL) {
29544 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29545
29546 - atomic_inc(&vcc->stats->rx_drop);
29547 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29548 return -ENOMEM;
29549 }
29550
29551 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29552
29553 dev_kfree_skb_any(skb);
29554
29555 - atomic_inc(&vcc->stats->rx_drop);
29556 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29557 return -ENOMEM;
29558 }
29559
29560 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29561
29562 vcc->push(vcc, skb);
29563 - atomic_inc(&vcc->stats->rx);
29564 + atomic_inc_unchecked(&vcc->stats->rx);
29565
29566 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29567
29568 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29569 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29570 fore200e->atm_dev->number,
29571 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29572 - atomic_inc(&vcc->stats->rx_err);
29573 + atomic_inc_unchecked(&vcc->stats->rx_err);
29574 }
29575 }
29576
29577 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29578 goto retry_here;
29579 }
29580
29581 - atomic_inc(&vcc->stats->tx_err);
29582 + atomic_inc_unchecked(&vcc->stats->tx_err);
29583
29584 fore200e->tx_sat++;
29585 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29586 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29587 index 7066703..2b130de 100644
29588 --- a/drivers/atm/he.c
29589 +++ b/drivers/atm/he.c
29590 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29591
29592 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29593 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29594 - atomic_inc(&vcc->stats->rx_drop);
29595 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29596 goto return_host_buffers;
29597 }
29598
29599 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29600 RBRQ_LEN_ERR(he_dev->rbrq_head)
29601 ? "LEN_ERR" : "",
29602 vcc->vpi, vcc->vci);
29603 - atomic_inc(&vcc->stats->rx_err);
29604 + atomic_inc_unchecked(&vcc->stats->rx_err);
29605 goto return_host_buffers;
29606 }
29607
29608 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29609 vcc->push(vcc, skb);
29610 spin_lock(&he_dev->global_lock);
29611
29612 - atomic_inc(&vcc->stats->rx);
29613 + atomic_inc_unchecked(&vcc->stats->rx);
29614
29615 return_host_buffers:
29616 ++pdus_assembled;
29617 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29618 tpd->vcc->pop(tpd->vcc, tpd->skb);
29619 else
29620 dev_kfree_skb_any(tpd->skb);
29621 - atomic_inc(&tpd->vcc->stats->tx_err);
29622 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29623 }
29624 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29625 return;
29626 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29627 vcc->pop(vcc, skb);
29628 else
29629 dev_kfree_skb_any(skb);
29630 - atomic_inc(&vcc->stats->tx_err);
29631 + atomic_inc_unchecked(&vcc->stats->tx_err);
29632 return -EINVAL;
29633 }
29634
29635 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29636 vcc->pop(vcc, skb);
29637 else
29638 dev_kfree_skb_any(skb);
29639 - atomic_inc(&vcc->stats->tx_err);
29640 + atomic_inc_unchecked(&vcc->stats->tx_err);
29641 return -EINVAL;
29642 }
29643 #endif
29644 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29645 vcc->pop(vcc, skb);
29646 else
29647 dev_kfree_skb_any(skb);
29648 - atomic_inc(&vcc->stats->tx_err);
29649 + atomic_inc_unchecked(&vcc->stats->tx_err);
29650 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29651 return -ENOMEM;
29652 }
29653 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29654 vcc->pop(vcc, skb);
29655 else
29656 dev_kfree_skb_any(skb);
29657 - atomic_inc(&vcc->stats->tx_err);
29658 + atomic_inc_unchecked(&vcc->stats->tx_err);
29659 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29660 return -ENOMEM;
29661 }
29662 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29663 __enqueue_tpd(he_dev, tpd, cid);
29664 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29665
29666 - atomic_inc(&vcc->stats->tx);
29667 + atomic_inc_unchecked(&vcc->stats->tx);
29668
29669 return 0;
29670 }
29671 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29672 index 4e49021..01b1512 100644
29673 --- a/drivers/atm/horizon.c
29674 +++ b/drivers/atm/horizon.c
29675 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29676 {
29677 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29678 // VC layer stats
29679 - atomic_inc(&vcc->stats->rx);
29680 + atomic_inc_unchecked(&vcc->stats->rx);
29681 __net_timestamp(skb);
29682 // end of our responsability
29683 vcc->push (vcc, skb);
29684 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29685 dev->tx_iovec = NULL;
29686
29687 // VC layer stats
29688 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29689 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29690
29691 // free the skb
29692 hrz_kfree_skb (skb);
29693 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29694 index e33ae00..9deb4ab 100644
29695 --- a/drivers/atm/idt77252.c
29696 +++ b/drivers/atm/idt77252.c
29697 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29698 else
29699 dev_kfree_skb(skb);
29700
29701 - atomic_inc(&vcc->stats->tx);
29702 + atomic_inc_unchecked(&vcc->stats->tx);
29703 }
29704
29705 atomic_dec(&scq->used);
29706 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29707 if ((sb = dev_alloc_skb(64)) == NULL) {
29708 printk("%s: Can't allocate buffers for aal0.\n",
29709 card->name);
29710 - atomic_add(i, &vcc->stats->rx_drop);
29711 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
29712 break;
29713 }
29714 if (!atm_charge(vcc, sb->truesize)) {
29715 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29716 card->name);
29717 - atomic_add(i - 1, &vcc->stats->rx_drop);
29718 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29719 dev_kfree_skb(sb);
29720 break;
29721 }
29722 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29723 ATM_SKB(sb)->vcc = vcc;
29724 __net_timestamp(sb);
29725 vcc->push(vcc, sb);
29726 - atomic_inc(&vcc->stats->rx);
29727 + atomic_inc_unchecked(&vcc->stats->rx);
29728
29729 cell += ATM_CELL_PAYLOAD;
29730 }
29731 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29732 "(CDC: %08x)\n",
29733 card->name, len, rpp->len, readl(SAR_REG_CDC));
29734 recycle_rx_pool_skb(card, rpp);
29735 - atomic_inc(&vcc->stats->rx_err);
29736 + atomic_inc_unchecked(&vcc->stats->rx_err);
29737 return;
29738 }
29739 if (stat & SAR_RSQE_CRC) {
29740 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29741 recycle_rx_pool_skb(card, rpp);
29742 - atomic_inc(&vcc->stats->rx_err);
29743 + atomic_inc_unchecked(&vcc->stats->rx_err);
29744 return;
29745 }
29746 if (skb_queue_len(&rpp->queue) > 1) {
29747 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29748 RXPRINTK("%s: Can't alloc RX skb.\n",
29749 card->name);
29750 recycle_rx_pool_skb(card, rpp);
29751 - atomic_inc(&vcc->stats->rx_err);
29752 + atomic_inc_unchecked(&vcc->stats->rx_err);
29753 return;
29754 }
29755 if (!atm_charge(vcc, skb->truesize)) {
29756 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29757 __net_timestamp(skb);
29758
29759 vcc->push(vcc, skb);
29760 - atomic_inc(&vcc->stats->rx);
29761 + atomic_inc_unchecked(&vcc->stats->rx);
29762
29763 return;
29764 }
29765 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29766 __net_timestamp(skb);
29767
29768 vcc->push(vcc, skb);
29769 - atomic_inc(&vcc->stats->rx);
29770 + atomic_inc_unchecked(&vcc->stats->rx);
29771
29772 if (skb->truesize > SAR_FB_SIZE_3)
29773 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29774 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29775 if (vcc->qos.aal != ATM_AAL0) {
29776 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29777 card->name, vpi, vci);
29778 - atomic_inc(&vcc->stats->rx_drop);
29779 + atomic_inc_unchecked(&vcc->stats->rx_drop);
29780 goto drop;
29781 }
29782
29783 if ((sb = dev_alloc_skb(64)) == NULL) {
29784 printk("%s: Can't allocate buffers for AAL0.\n",
29785 card->name);
29786 - atomic_inc(&vcc->stats->rx_err);
29787 + atomic_inc_unchecked(&vcc->stats->rx_err);
29788 goto drop;
29789 }
29790
29791 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29792 ATM_SKB(sb)->vcc = vcc;
29793 __net_timestamp(sb);
29794 vcc->push(vcc, sb);
29795 - atomic_inc(&vcc->stats->rx);
29796 + atomic_inc_unchecked(&vcc->stats->rx);
29797
29798 drop:
29799 skb_pull(queue, 64);
29800 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29801
29802 if (vc == NULL) {
29803 printk("%s: NULL connection in send().\n", card->name);
29804 - atomic_inc(&vcc->stats->tx_err);
29805 + atomic_inc_unchecked(&vcc->stats->tx_err);
29806 dev_kfree_skb(skb);
29807 return -EINVAL;
29808 }
29809 if (!test_bit(VCF_TX, &vc->flags)) {
29810 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29811 - atomic_inc(&vcc->stats->tx_err);
29812 + atomic_inc_unchecked(&vcc->stats->tx_err);
29813 dev_kfree_skb(skb);
29814 return -EINVAL;
29815 }
29816 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29817 break;
29818 default:
29819 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29820 - atomic_inc(&vcc->stats->tx_err);
29821 + atomic_inc_unchecked(&vcc->stats->tx_err);
29822 dev_kfree_skb(skb);
29823 return -EINVAL;
29824 }
29825
29826 if (skb_shinfo(skb)->nr_frags != 0) {
29827 printk("%s: No scatter-gather yet.\n", card->name);
29828 - atomic_inc(&vcc->stats->tx_err);
29829 + atomic_inc_unchecked(&vcc->stats->tx_err);
29830 dev_kfree_skb(skb);
29831 return -EINVAL;
29832 }
29833 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29834
29835 err = queue_skb(card, vc, skb, oam);
29836 if (err) {
29837 - atomic_inc(&vcc->stats->tx_err);
29838 + atomic_inc_unchecked(&vcc->stats->tx_err);
29839 dev_kfree_skb(skb);
29840 return err;
29841 }
29842 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
29843 skb = dev_alloc_skb(64);
29844 if (!skb) {
29845 printk("%s: Out of memory in send_oam().\n", card->name);
29846 - atomic_inc(&vcc->stats->tx_err);
29847 + atomic_inc_unchecked(&vcc->stats->tx_err);
29848 return -ENOMEM;
29849 }
29850 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
29851 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
29852 index b2c1b37..faa672b 100644
29853 --- a/drivers/atm/iphase.c
29854 +++ b/drivers/atm/iphase.c
29855 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
29856 status = (u_short) (buf_desc_ptr->desc_mode);
29857 if (status & (RX_CER | RX_PTE | RX_OFL))
29858 {
29859 - atomic_inc(&vcc->stats->rx_err);
29860 + atomic_inc_unchecked(&vcc->stats->rx_err);
29861 IF_ERR(printk("IA: bad packet, dropping it");)
29862 if (status & RX_CER) {
29863 IF_ERR(printk(" cause: packet CRC error\n");)
29864 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
29865 len = dma_addr - buf_addr;
29866 if (len > iadev->rx_buf_sz) {
29867 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
29868 - atomic_inc(&vcc->stats->rx_err);
29869 + atomic_inc_unchecked(&vcc->stats->rx_err);
29870 goto out_free_desc;
29871 }
29872
29873 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29874 ia_vcc = INPH_IA_VCC(vcc);
29875 if (ia_vcc == NULL)
29876 {
29877 - atomic_inc(&vcc->stats->rx_err);
29878 + atomic_inc_unchecked(&vcc->stats->rx_err);
29879 dev_kfree_skb_any(skb);
29880 atm_return(vcc, atm_guess_pdu2truesize(len));
29881 goto INCR_DLE;
29882 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29883 if ((length > iadev->rx_buf_sz) || (length >
29884 (skb->len - sizeof(struct cpcs_trailer))))
29885 {
29886 - atomic_inc(&vcc->stats->rx_err);
29887 + atomic_inc_unchecked(&vcc->stats->rx_err);
29888 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
29889 length, skb->len);)
29890 dev_kfree_skb_any(skb);
29891 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29892
29893 IF_RX(printk("rx_dle_intr: skb push");)
29894 vcc->push(vcc,skb);
29895 - atomic_inc(&vcc->stats->rx);
29896 + atomic_inc_unchecked(&vcc->stats->rx);
29897 iadev->rx_pkt_cnt++;
29898 }
29899 INCR_DLE:
29900 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
29901 {
29902 struct k_sonet_stats *stats;
29903 stats = &PRIV(_ia_dev[board])->sonet_stats;
29904 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
29905 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
29906 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
29907 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
29908 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
29909 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
29910 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
29911 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
29912 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
29913 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
29914 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
29915 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
29916 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
29917 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
29918 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
29919 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
29920 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
29921 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
29922 }
29923 ia_cmds.status = 0;
29924 break;
29925 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29926 if ((desc == 0) || (desc > iadev->num_tx_desc))
29927 {
29928 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
29929 - atomic_inc(&vcc->stats->tx);
29930 + atomic_inc_unchecked(&vcc->stats->tx);
29931 if (vcc->pop)
29932 vcc->pop(vcc, skb);
29933 else
29934 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29935 ATM_DESC(skb) = vcc->vci;
29936 skb_queue_tail(&iadev->tx_dma_q, skb);
29937
29938 - atomic_inc(&vcc->stats->tx);
29939 + atomic_inc_unchecked(&vcc->stats->tx);
29940 iadev->tx_pkt_cnt++;
29941 /* Increment transaction counter */
29942 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
29943
29944 #if 0
29945 /* add flow control logic */
29946 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
29947 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
29948 if (iavcc->vc_desc_cnt > 10) {
29949 vcc->tx_quota = vcc->tx_quota * 3 / 4;
29950 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
29951 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
29952 index cf97c34..8d30655 100644
29953 --- a/drivers/atm/lanai.c
29954 +++ b/drivers/atm/lanai.c
29955 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
29956 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
29957 lanai_endtx(lanai, lvcc);
29958 lanai_free_skb(lvcc->tx.atmvcc, skb);
29959 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
29960 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
29961 }
29962
29963 /* Try to fill the buffer - don't call unless there is backlog */
29964 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
29965 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
29966 __net_timestamp(skb);
29967 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
29968 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
29969 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
29970 out:
29971 lvcc->rx.buf.ptr = end;
29972 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
29973 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29974 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
29975 "vcc %d\n", lanai->number, (unsigned int) s, vci);
29976 lanai->stats.service_rxnotaal5++;
29977 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29978 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29979 return 0;
29980 }
29981 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
29982 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29983 int bytes;
29984 read_unlock(&vcc_sklist_lock);
29985 DPRINTK("got trashed rx pdu on vci %d\n", vci);
29986 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29987 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29988 lvcc->stats.x.aal5.service_trash++;
29989 bytes = (SERVICE_GET_END(s) * 16) -
29990 (((unsigned long) lvcc->rx.buf.ptr) -
29991 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29992 }
29993 if (s & SERVICE_STREAM) {
29994 read_unlock(&vcc_sklist_lock);
29995 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29996 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29997 lvcc->stats.x.aal5.service_stream++;
29998 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
29999 "PDU on VCI %d!\n", lanai->number, vci);
30000 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30001 return 0;
30002 }
30003 DPRINTK("got rx crc error on vci %d\n", vci);
30004 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30005 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30006 lvcc->stats.x.aal5.service_rxcrc++;
30007 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30008 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30009 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30010 index 3da804b..d3b0eed 100644
30011 --- a/drivers/atm/nicstar.c
30012 +++ b/drivers/atm/nicstar.c
30013 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30014 if ((vc = (vc_map *) vcc->dev_data) == NULL)
30015 {
30016 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
30017 - atomic_inc(&vcc->stats->tx_err);
30018 + atomic_inc_unchecked(&vcc->stats->tx_err);
30019 dev_kfree_skb_any(skb);
30020 return -EINVAL;
30021 }
30022 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30023 if (!vc->tx)
30024 {
30025 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
30026 - atomic_inc(&vcc->stats->tx_err);
30027 + atomic_inc_unchecked(&vcc->stats->tx_err);
30028 dev_kfree_skb_any(skb);
30029 return -EINVAL;
30030 }
30031 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30032 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
30033 {
30034 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
30035 - atomic_inc(&vcc->stats->tx_err);
30036 + atomic_inc_unchecked(&vcc->stats->tx_err);
30037 dev_kfree_skb_any(skb);
30038 return -EINVAL;
30039 }
30040 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30041 if (skb_shinfo(skb)->nr_frags != 0)
30042 {
30043 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30044 - atomic_inc(&vcc->stats->tx_err);
30045 + atomic_inc_unchecked(&vcc->stats->tx_err);
30046 dev_kfree_skb_any(skb);
30047 return -EINVAL;
30048 }
30049 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30050
30051 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30052 {
30053 - atomic_inc(&vcc->stats->tx_err);
30054 + atomic_inc_unchecked(&vcc->stats->tx_err);
30055 dev_kfree_skb_any(skb);
30056 return -EIO;
30057 }
30058 - atomic_inc(&vcc->stats->tx);
30059 + atomic_inc_unchecked(&vcc->stats->tx);
30060
30061 return 0;
30062 }
30063 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30064 {
30065 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30066 card->index);
30067 - atomic_add(i,&vcc->stats->rx_drop);
30068 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
30069 break;
30070 }
30071 if (!atm_charge(vcc, sb->truesize))
30072 {
30073 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30074 card->index);
30075 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30076 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30077 dev_kfree_skb_any(sb);
30078 break;
30079 }
30080 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30081 ATM_SKB(sb)->vcc = vcc;
30082 __net_timestamp(sb);
30083 vcc->push(vcc, sb);
30084 - atomic_inc(&vcc->stats->rx);
30085 + atomic_inc_unchecked(&vcc->stats->rx);
30086 cell += ATM_CELL_PAYLOAD;
30087 }
30088
30089 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30090 if (iovb == NULL)
30091 {
30092 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30093 - atomic_inc(&vcc->stats->rx_drop);
30094 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30095 recycle_rx_buf(card, skb);
30096 return;
30097 }
30098 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30099 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30100 {
30101 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30102 - atomic_inc(&vcc->stats->rx_err);
30103 + atomic_inc_unchecked(&vcc->stats->rx_err);
30104 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30105 NS_SKB(iovb)->iovcnt = 0;
30106 iovb->len = 0;
30107 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30108 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30109 card->index);
30110 which_list(card, skb);
30111 - atomic_inc(&vcc->stats->rx_err);
30112 + atomic_inc_unchecked(&vcc->stats->rx_err);
30113 recycle_rx_buf(card, skb);
30114 vc->rx_iov = NULL;
30115 recycle_iov_buf(card, iovb);
30116 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30117 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30118 card->index);
30119 which_list(card, skb);
30120 - atomic_inc(&vcc->stats->rx_err);
30121 + atomic_inc_unchecked(&vcc->stats->rx_err);
30122 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30123 NS_SKB(iovb)->iovcnt);
30124 vc->rx_iov = NULL;
30125 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30126 printk(" - PDU size mismatch.\n");
30127 else
30128 printk(".\n");
30129 - atomic_inc(&vcc->stats->rx_err);
30130 + atomic_inc_unchecked(&vcc->stats->rx_err);
30131 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30132 NS_SKB(iovb)->iovcnt);
30133 vc->rx_iov = NULL;
30134 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30135 if (!atm_charge(vcc, skb->truesize))
30136 {
30137 push_rxbufs(card, skb);
30138 - atomic_inc(&vcc->stats->rx_drop);
30139 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30140 }
30141 else
30142 {
30143 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30144 ATM_SKB(skb)->vcc = vcc;
30145 __net_timestamp(skb);
30146 vcc->push(vcc, skb);
30147 - atomic_inc(&vcc->stats->rx);
30148 + atomic_inc_unchecked(&vcc->stats->rx);
30149 }
30150 }
30151 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30152 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30153 if (!atm_charge(vcc, sb->truesize))
30154 {
30155 push_rxbufs(card, sb);
30156 - atomic_inc(&vcc->stats->rx_drop);
30157 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30158 }
30159 else
30160 {
30161 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30162 ATM_SKB(sb)->vcc = vcc;
30163 __net_timestamp(sb);
30164 vcc->push(vcc, sb);
30165 - atomic_inc(&vcc->stats->rx);
30166 + atomic_inc_unchecked(&vcc->stats->rx);
30167 }
30168
30169 push_rxbufs(card, skb);
30170 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30171 if (!atm_charge(vcc, skb->truesize))
30172 {
30173 push_rxbufs(card, skb);
30174 - atomic_inc(&vcc->stats->rx_drop);
30175 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30176 }
30177 else
30178 {
30179 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30180 ATM_SKB(skb)->vcc = vcc;
30181 __net_timestamp(skb);
30182 vcc->push(vcc, skb);
30183 - atomic_inc(&vcc->stats->rx);
30184 + atomic_inc_unchecked(&vcc->stats->rx);
30185 }
30186
30187 push_rxbufs(card, sb);
30188 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30189 if (hb == NULL)
30190 {
30191 printk("nicstar%d: Out of huge buffers.\n", card->index);
30192 - atomic_inc(&vcc->stats->rx_drop);
30193 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30194 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30195 NS_SKB(iovb)->iovcnt);
30196 vc->rx_iov = NULL;
30197 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30198 }
30199 else
30200 dev_kfree_skb_any(hb);
30201 - atomic_inc(&vcc->stats->rx_drop);
30202 + atomic_inc_unchecked(&vcc->stats->rx_drop);
30203 }
30204 else
30205 {
30206 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30207 #endif /* NS_USE_DESTRUCTORS */
30208 __net_timestamp(hb);
30209 vcc->push(vcc, hb);
30210 - atomic_inc(&vcc->stats->rx);
30211 + atomic_inc_unchecked(&vcc->stats->rx);
30212 }
30213 }
30214
30215 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30216 index 84c93ff..e6ed269 100644
30217 --- a/drivers/atm/solos-pci.c
30218 +++ b/drivers/atm/solos-pci.c
30219 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30220 }
30221 atm_charge(vcc, skb->truesize);
30222 vcc->push(vcc, skb);
30223 - atomic_inc(&vcc->stats->rx);
30224 + atomic_inc_unchecked(&vcc->stats->rx);
30225 break;
30226
30227 case PKT_STATUS:
30228 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30229 char msg[500];
30230 char item[10];
30231
30232 + pax_track_stack();
30233 +
30234 len = buf->len;
30235 for (i = 0; i < len; i++){
30236 if(i % 8 == 0)
30237 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30238 vcc = SKB_CB(oldskb)->vcc;
30239
30240 if (vcc) {
30241 - atomic_inc(&vcc->stats->tx);
30242 + atomic_inc_unchecked(&vcc->stats->tx);
30243 solos_pop(vcc, oldskb);
30244 } else
30245 dev_kfree_skb_irq(oldskb);
30246 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30247 index 6dd3f59..ee377f3 100644
30248 --- a/drivers/atm/suni.c
30249 +++ b/drivers/atm/suni.c
30250 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30251
30252
30253 #define ADD_LIMITED(s,v) \
30254 - atomic_add((v),&stats->s); \
30255 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30256 + atomic_add_unchecked((v),&stats->s); \
30257 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30258
30259
30260 static void suni_hz(unsigned long from_timer)
30261 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30262 index fc8cb07..4a80e53 100644
30263 --- a/drivers/atm/uPD98402.c
30264 +++ b/drivers/atm/uPD98402.c
30265 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30266 struct sonet_stats tmp;
30267 int error = 0;
30268
30269 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30270 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30271 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30272 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30273 if (zero && !error) {
30274 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30275
30276
30277 #define ADD_LIMITED(s,v) \
30278 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30279 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30280 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30281 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30282 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30283 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30284
30285
30286 static void stat_event(struct atm_dev *dev)
30287 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30288 if (reason & uPD98402_INT_PFM) stat_event(dev);
30289 if (reason & uPD98402_INT_PCO) {
30290 (void) GET(PCOCR); /* clear interrupt cause */
30291 - atomic_add(GET(HECCT),
30292 + atomic_add_unchecked(GET(HECCT),
30293 &PRIV(dev)->sonet_stats.uncorr_hcs);
30294 }
30295 if ((reason & uPD98402_INT_RFO) &&
30296 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30297 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30298 uPD98402_INT_LOS),PIMR); /* enable them */
30299 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30300 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30301 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30302 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30303 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30304 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30305 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30306 return 0;
30307 }
30308
30309 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30310 index 2e9635b..32927b4 100644
30311 --- a/drivers/atm/zatm.c
30312 +++ b/drivers/atm/zatm.c
30313 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30314 }
30315 if (!size) {
30316 dev_kfree_skb_irq(skb);
30317 - if (vcc) atomic_inc(&vcc->stats->rx_err);
30318 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30319 continue;
30320 }
30321 if (!atm_charge(vcc,skb->truesize)) {
30322 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30323 skb->len = size;
30324 ATM_SKB(skb)->vcc = vcc;
30325 vcc->push(vcc,skb);
30326 - atomic_inc(&vcc->stats->rx);
30327 + atomic_inc_unchecked(&vcc->stats->rx);
30328 }
30329 zout(pos & 0xffff,MTA(mbx));
30330 #if 0 /* probably a stupid idea */
30331 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30332 skb_queue_head(&zatm_vcc->backlog,skb);
30333 break;
30334 }
30335 - atomic_inc(&vcc->stats->tx);
30336 + atomic_inc_unchecked(&vcc->stats->tx);
30337 wake_up(&zatm_vcc->tx_wait);
30338 }
30339
30340 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30341 index 63c143e..fece183 100644
30342 --- a/drivers/base/bus.c
30343 +++ b/drivers/base/bus.c
30344 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30345 return ret;
30346 }
30347
30348 -static struct sysfs_ops driver_sysfs_ops = {
30349 +static const struct sysfs_ops driver_sysfs_ops = {
30350 .show = drv_attr_show,
30351 .store = drv_attr_store,
30352 };
30353 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30354 return ret;
30355 }
30356
30357 -static struct sysfs_ops bus_sysfs_ops = {
30358 +static const struct sysfs_ops bus_sysfs_ops = {
30359 .show = bus_attr_show,
30360 .store = bus_attr_store,
30361 };
30362 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30363 return 0;
30364 }
30365
30366 -static struct kset_uevent_ops bus_uevent_ops = {
30367 +static const struct kset_uevent_ops bus_uevent_ops = {
30368 .filter = bus_uevent_filter,
30369 };
30370
30371 diff --git a/drivers/base/class.c b/drivers/base/class.c
30372 index 6e2c3b0..cb61871 100644
30373 --- a/drivers/base/class.c
30374 +++ b/drivers/base/class.c
30375 @@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30376 kfree(cp);
30377 }
30378
30379 -static struct sysfs_ops class_sysfs_ops = {
30380 +static const struct sysfs_ops class_sysfs_ops = {
30381 .show = class_attr_show,
30382 .store = class_attr_store,
30383 };
30384 diff --git a/drivers/base/core.c b/drivers/base/core.c
30385 index f33d768..a9358d0 100644
30386 --- a/drivers/base/core.c
30387 +++ b/drivers/base/core.c
30388 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30389 return ret;
30390 }
30391
30392 -static struct sysfs_ops dev_sysfs_ops = {
30393 +static const struct sysfs_ops dev_sysfs_ops = {
30394 .show = dev_attr_show,
30395 .store = dev_attr_store,
30396 };
30397 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30398 return retval;
30399 }
30400
30401 -static struct kset_uevent_ops device_uevent_ops = {
30402 +static const struct kset_uevent_ops device_uevent_ops = {
30403 .filter = dev_uevent_filter,
30404 .name = dev_uevent_name,
30405 .uevent = dev_uevent,
30406 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30407 index 989429c..2272b00 100644
30408 --- a/drivers/base/memory.c
30409 +++ b/drivers/base/memory.c
30410 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30411 return retval;
30412 }
30413
30414 -static struct kset_uevent_ops memory_uevent_ops = {
30415 +static const struct kset_uevent_ops memory_uevent_ops = {
30416 .name = memory_uevent_name,
30417 .uevent = memory_uevent,
30418 };
30419 diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30420 index 3f202f7..61c4a6f 100644
30421 --- a/drivers/base/sys.c
30422 +++ b/drivers/base/sys.c
30423 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30424 return -EIO;
30425 }
30426
30427 -static struct sysfs_ops sysfs_ops = {
30428 +static const struct sysfs_ops sysfs_ops = {
30429 .show = sysdev_show,
30430 .store = sysdev_store,
30431 };
30432 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30433 return -EIO;
30434 }
30435
30436 -static struct sysfs_ops sysfs_class_ops = {
30437 +static const struct sysfs_ops sysfs_class_ops = {
30438 .show = sysdev_class_show,
30439 .store = sysdev_class_store,
30440 };
30441 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30442 index eb4fa19..1954777 100644
30443 --- a/drivers/block/DAC960.c
30444 +++ b/drivers/block/DAC960.c
30445 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30446 unsigned long flags;
30447 int Channel, TargetID;
30448
30449 + pax_track_stack();
30450 +
30451 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30452 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30453 sizeof(DAC960_SCSI_Inquiry_T) +
30454 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30455 index ca9c548..7e2e3f3 100644
30456 --- a/drivers/block/cciss.c
30457 +++ b/drivers/block/cciss.c
30458 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30459 int err;
30460 u32 cp;
30461
30462 + memset(&arg64, 0, sizeof(arg64));
30463 +
30464 err = 0;
30465 err |=
30466 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30467 @@ -1583,7 +1585,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
30468 return status;
30469 }
30470
30471 - /* scsi_cmd_ioctl handles these, below, though some are not */
30472 + /* scsi_cmd_blk_ioctl handles these, below, though some are not */
30473 /* very meaningful for cciss. SG_IO is the main one people want. */
30474
30475 case SG_GET_VERSION_NUM:
30476 @@ -1594,9 +1596,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
30477 case SG_EMULATED_HOST:
30478 case SG_IO:
30479 case SCSI_IOCTL_SEND_COMMAND:
30480 - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
30481 + return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
30482
30483 - /* scsi_cmd_ioctl would normally handle these, below, but */
30484 + /* scsi_cmd_blk_ioctl would normally handle these, below, but */
30485 /* they aren't a good fit for cciss, as CD-ROMs are */
30486 /* not supported, and we don't have any bus/target/lun */
30487 /* which we present to the kernel. */
30488 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30489 /* Wait (up to 20 seconds) for a command to complete */
30490
30491 for (i = 20 * HZ; i > 0; i--) {
30492 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30493 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30494 if (done == FIFO_EMPTY)
30495 schedule_timeout_uninterruptible(1);
30496 else
30497 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30498 resend_cmd1:
30499
30500 /* Disable interrupt on the board. */
30501 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
30502 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
30503
30504 /* Make sure there is room in the command FIFO */
30505 /* Actually it should be completely empty at this time */
30506 @@ -2884,13 +2886,13 @@ resend_cmd1:
30507 /* tape side of the driver. */
30508 for (i = 200000; i > 0; i--) {
30509 /* if fifo isn't full go */
30510 - if (!(h->access.fifo_full(h)))
30511 + if (!(h->access->fifo_full(h)))
30512 break;
30513 udelay(10);
30514 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30515 " waiting!\n", h->ctlr);
30516 }
30517 - h->access.submit_command(h, c); /* Send the cmd */
30518 + h->access->submit_command(h, c); /* Send the cmd */
30519 do {
30520 complete = pollcomplete(h->ctlr);
30521
30522 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30523 while (!hlist_empty(&h->reqQ)) {
30524 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30525 /* can't do anything if fifo is full */
30526 - if ((h->access.fifo_full(h))) {
30527 + if ((h->access->fifo_full(h))) {
30528 printk(KERN_WARNING "cciss: fifo full\n");
30529 break;
30530 }
30531 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30532 h->Qdepth--;
30533
30534 /* Tell the controller execute command */
30535 - h->access.submit_command(h, c);
30536 + h->access->submit_command(h, c);
30537
30538 /* Put job onto the completed Q */
30539 addQ(&h->cmpQ, c);
30540 @@ -3393,17 +3395,17 @@ startio:
30541
30542 static inline unsigned long get_next_completion(ctlr_info_t *h)
30543 {
30544 - return h->access.command_completed(h);
30545 + return h->access->command_completed(h);
30546 }
30547
30548 static inline int interrupt_pending(ctlr_info_t *h)
30549 {
30550 - return h->access.intr_pending(h);
30551 + return h->access->intr_pending(h);
30552 }
30553
30554 static inline long interrupt_not_for_us(ctlr_info_t *h)
30555 {
30556 - return (((h->access.intr_pending(h) == 0) ||
30557 + return (((h->access->intr_pending(h) == 0) ||
30558 (h->interrupts_enabled == 0)));
30559 }
30560
30561 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30562 */
30563 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30564 c->product_name = products[prod_index].product_name;
30565 - c->access = *(products[prod_index].access);
30566 + c->access = products[prod_index].access;
30567 c->nr_cmds = c->max_commands - 4;
30568 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30569 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30570 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30571 }
30572
30573 /* make sure the board interrupts are off */
30574 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30575 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30576 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30577 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30578 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30579 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30580 cciss_scsi_setup(i);
30581
30582 /* Turn the interrupts on so we can service requests */
30583 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30584 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30585
30586 /* Get the firmware version */
30587 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30588 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30589 index 04d6bf8..36e712d 100644
30590 --- a/drivers/block/cciss.h
30591 +++ b/drivers/block/cciss.h
30592 @@ -90,7 +90,7 @@ struct ctlr_info
30593 // information about each logical volume
30594 drive_info_struct *drv[CISS_MAX_LUN];
30595
30596 - struct access_method access;
30597 + struct access_method *access;
30598
30599 /* queue and queue Info */
30600 struct hlist_head reqQ;
30601 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30602 index 6422651..bb1bdef 100644
30603 --- a/drivers/block/cpqarray.c
30604 +++ b/drivers/block/cpqarray.c
30605 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30606 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30607 goto Enomem4;
30608 }
30609 - hba[i]->access.set_intr_mask(hba[i], 0);
30610 + hba[i]->access->set_intr_mask(hba[i], 0);
30611 if (request_irq(hba[i]->intr, do_ida_intr,
30612 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30613 {
30614 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30615 add_timer(&hba[i]->timer);
30616
30617 /* Enable IRQ now that spinlock and rate limit timer are set up */
30618 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30619 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30620
30621 for(j=0; j<NWD; j++) {
30622 struct gendisk *disk = ida_gendisk[i][j];
30623 @@ -695,7 +695,7 @@ DBGINFO(
30624 for(i=0; i<NR_PRODUCTS; i++) {
30625 if (board_id == products[i].board_id) {
30626 c->product_name = products[i].product_name;
30627 - c->access = *(products[i].access);
30628 + c->access = products[i].access;
30629 break;
30630 }
30631 }
30632 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30633 hba[ctlr]->intr = intr;
30634 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30635 hba[ctlr]->product_name = products[j].product_name;
30636 - hba[ctlr]->access = *(products[j].access);
30637 + hba[ctlr]->access = products[j].access;
30638 hba[ctlr]->ctlr = ctlr;
30639 hba[ctlr]->board_id = board_id;
30640 hba[ctlr]->pci_dev = NULL; /* not PCI */
30641 @@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30642 struct scatterlist tmp_sg[SG_MAX];
30643 int i, dir, seg;
30644
30645 + pax_track_stack();
30646 +
30647 if (blk_queue_plugged(q))
30648 goto startio;
30649
30650 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30651
30652 while((c = h->reqQ) != NULL) {
30653 /* Can't do anything if we're busy */
30654 - if (h->access.fifo_full(h) == 0)
30655 + if (h->access->fifo_full(h) == 0)
30656 return;
30657
30658 /* Get the first entry from the request Q */
30659 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30660 h->Qdepth--;
30661
30662 /* Tell the controller to do our bidding */
30663 - h->access.submit_command(h, c);
30664 + h->access->submit_command(h, c);
30665
30666 /* Get onto the completion Q */
30667 addQ(&h->cmpQ, c);
30668 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30669 unsigned long flags;
30670 __u32 a,a1;
30671
30672 - istat = h->access.intr_pending(h);
30673 + istat = h->access->intr_pending(h);
30674 /* Is this interrupt for us? */
30675 if (istat == 0)
30676 return IRQ_NONE;
30677 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30678 */
30679 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30680 if (istat & FIFO_NOT_EMPTY) {
30681 - while((a = h->access.command_completed(h))) {
30682 + while((a = h->access->command_completed(h))) {
30683 a1 = a; a &= ~3;
30684 if ((c = h->cmpQ) == NULL)
30685 {
30686 @@ -1434,11 +1436,11 @@ static int sendcmd(
30687 /*
30688 * Disable interrupt
30689 */
30690 - info_p->access.set_intr_mask(info_p, 0);
30691 + info_p->access->set_intr_mask(info_p, 0);
30692 /* Make sure there is room in the command FIFO */
30693 /* Actually it should be completely empty at this time. */
30694 for (i = 200000; i > 0; i--) {
30695 - temp = info_p->access.fifo_full(info_p);
30696 + temp = info_p->access->fifo_full(info_p);
30697 if (temp != 0) {
30698 break;
30699 }
30700 @@ -1451,7 +1453,7 @@ DBG(
30701 /*
30702 * Send the cmd
30703 */
30704 - info_p->access.submit_command(info_p, c);
30705 + info_p->access->submit_command(info_p, c);
30706 complete = pollcomplete(ctlr);
30707
30708 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30709 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30710 * we check the new geometry. Then turn interrupts back on when
30711 * we're done.
30712 */
30713 - host->access.set_intr_mask(host, 0);
30714 + host->access->set_intr_mask(host, 0);
30715 getgeometry(ctlr);
30716 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30717 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30718
30719 for(i=0; i<NWD; i++) {
30720 struct gendisk *disk = ida_gendisk[ctlr][i];
30721 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30722 /* Wait (up to 2 seconds) for a command to complete */
30723
30724 for (i = 200000; i > 0; i--) {
30725 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
30726 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
30727 if (done == 0) {
30728 udelay(10); /* a short fixed delay */
30729 } else
30730 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30731 index be73e9d..7fbf140 100644
30732 --- a/drivers/block/cpqarray.h
30733 +++ b/drivers/block/cpqarray.h
30734 @@ -99,7 +99,7 @@ struct ctlr_info {
30735 drv_info_t drv[NWD];
30736 struct proc_dir_entry *proc;
30737
30738 - struct access_method access;
30739 + struct access_method *access;
30740
30741 cmdlist_t *reqQ;
30742 cmdlist_t *cmpQ;
30743 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30744 index 8ec2d70..2804b30 100644
30745 --- a/drivers/block/loop.c
30746 +++ b/drivers/block/loop.c
30747 @@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30748 mm_segment_t old_fs = get_fs();
30749
30750 set_fs(get_ds());
30751 - bw = file->f_op->write(file, buf, len, &pos);
30752 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30753 set_fs(old_fs);
30754 if (likely(bw == len))
30755 return 0;
30756 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30757 index 26ada47..083c480 100644
30758 --- a/drivers/block/nbd.c
30759 +++ b/drivers/block/nbd.c
30760 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30761 struct kvec iov;
30762 sigset_t blocked, oldset;
30763
30764 + pax_track_stack();
30765 +
30766 if (unlikely(!sock)) {
30767 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30768 lo->disk->disk_name, (send ? "send" : "recv"));
30769 @@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30770 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30771 unsigned int cmd, unsigned long arg)
30772 {
30773 + pax_track_stack();
30774 +
30775 switch (cmd) {
30776 case NBD_DISCONNECT: {
30777 struct request sreq;
30778 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30779 index a5d585d..d087be3 100644
30780 --- a/drivers/block/pktcdvd.c
30781 +++ b/drivers/block/pktcdvd.c
30782 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30783 return len;
30784 }
30785
30786 -static struct sysfs_ops kobj_pkt_ops = {
30787 +static const struct sysfs_ops kobj_pkt_ops = {
30788 .show = kobj_pkt_show,
30789 .store = kobj_pkt_store
30790 };
30791 diff --git a/drivers/block/ub.c b/drivers/block/ub.c
30792 index c739b20..c6ac1b2 100644
30793 --- a/drivers/block/ub.c
30794 +++ b/drivers/block/ub.c
30795 @@ -1726,10 +1726,9 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
30796 static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
30797 unsigned int cmd, unsigned long arg)
30798 {
30799 - struct gendisk *disk = bdev->bd_disk;
30800 void __user *usermem = (void __user *) arg;
30801
30802 - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
30803 + return scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
30804 }
30805
30806 /*
30807 diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
30808 index 51042f0ba7..44d019b 100644
30809 --- a/drivers/block/virtio_blk.c
30810 +++ b/drivers/block/virtio_blk.c
30811 @@ -200,8 +200,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
30812 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
30813 return -ENOTTY;
30814
30815 - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
30816 - (void __user *)data);
30817 + return scsi_cmd_blk_ioctl(bdev, mode, cmd,
30818 + (void __user *)data);
30819 }
30820
30821 /* We provide getgeo only to please some old bootloader/partitioning tools */
30822 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
30823 index 614da5b..59cccc9 100644
30824 --- a/drivers/cdrom/cdrom.c
30825 +++ b/drivers/cdrom/cdrom.c
30826 @@ -2684,12 +2684,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
30827 {
30828 void __user *argp = (void __user *)arg;
30829 int ret;
30830 - struct gendisk *disk = bdev->bd_disk;
30831
30832 /*
30833 * Try the generic SCSI command ioctl's first.
30834 */
30835 - ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
30836 + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
30837 if (ret != -ENOTTY)
30838 return ret;
30839
30840 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30841 index 6aad99e..89cd142 100644
30842 --- a/drivers/char/Kconfig
30843 +++ b/drivers/char/Kconfig
30844 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30845
30846 config DEVKMEM
30847 bool "/dev/kmem virtual device support"
30848 - default y
30849 + default n
30850 + depends on !GRKERNSEC_KMEM
30851 help
30852 Say Y here if you want to support the /dev/kmem device. The
30853 /dev/kmem device is rarely used, but can be used for certain
30854 @@ -1114,6 +1115,7 @@ config DEVPORT
30855 bool
30856 depends on !M68K
30857 depends on ISA || PCI
30858 + depends on !GRKERNSEC_KMEM
30859 default y
30860
30861 source "drivers/s390/char/Kconfig"
30862 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30863 index a96f319..a778a5b 100644
30864 --- a/drivers/char/agp/frontend.c
30865 +++ b/drivers/char/agp/frontend.c
30866 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30867 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30868 return -EFAULT;
30869
30870 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30871 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30872 return -EFAULT;
30873
30874 client = agp_find_client_by_pid(reserve.pid);
30875 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30876 index d8cff90..9628e70 100644
30877 --- a/drivers/char/briq_panel.c
30878 +++ b/drivers/char/briq_panel.c
30879 @@ -10,6 +10,7 @@
30880 #include <linux/types.h>
30881 #include <linux/errno.h>
30882 #include <linux/tty.h>
30883 +#include <linux/mutex.h>
30884 #include <linux/timer.h>
30885 #include <linux/kernel.h>
30886 #include <linux/wait.h>
30887 @@ -36,6 +37,7 @@ static int vfd_is_open;
30888 static unsigned char vfd[40];
30889 static int vfd_cursor;
30890 static unsigned char ledpb, led;
30891 +static DEFINE_MUTEX(vfd_mutex);
30892
30893 static void update_vfd(void)
30894 {
30895 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30896 if (!vfd_is_open)
30897 return -EBUSY;
30898
30899 + mutex_lock(&vfd_mutex);
30900 for (;;) {
30901 char c;
30902 if (!indx)
30903 break;
30904 - if (get_user(c, buf))
30905 + if (get_user(c, buf)) {
30906 + mutex_unlock(&vfd_mutex);
30907 return -EFAULT;
30908 + }
30909 if (esc) {
30910 set_led(c);
30911 esc = 0;
30912 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30913 buf++;
30914 }
30915 update_vfd();
30916 + mutex_unlock(&vfd_mutex);
30917
30918 return len;
30919 }
30920 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
30921 index 31e7c91..161afc0 100644
30922 --- a/drivers/char/genrtc.c
30923 +++ b/drivers/char/genrtc.c
30924 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
30925 switch (cmd) {
30926
30927 case RTC_PLL_GET:
30928 + memset(&pll, 0, sizeof(pll));
30929 if (get_rtc_pll(&pll))
30930 return -EINVAL;
30931 else
30932 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
30933 index 006466d..a2bb21c 100644
30934 --- a/drivers/char/hpet.c
30935 +++ b/drivers/char/hpet.c
30936 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
30937 return 0;
30938 }
30939
30940 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
30941 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
30942
30943 static int
30944 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
30945 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
30946 }
30947
30948 static int
30949 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30950 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
30951 {
30952 struct hpet_timer __iomem *timer;
30953 struct hpet __iomem *hpet;
30954 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30955 {
30956 struct hpet_info info;
30957
30958 + memset(&info, 0, sizeof(info));
30959 +
30960 if (devp->hd_ireqfreq)
30961 info.hi_ireqfreq =
30962 hpet_time_div(hpetp, devp->hd_ireqfreq);
30963 - else
30964 - info.hi_ireqfreq = 0;
30965 info.hi_flags =
30966 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
30967 info.hi_hpet = hpetp->hp_which;
30968 diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
30969 index 0afc8b8..6913fc3 100644
30970 --- a/drivers/char/hvc_beat.c
30971 +++ b/drivers/char/hvc_beat.c
30972 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
30973 return cnt;
30974 }
30975
30976 -static struct hv_ops hvc_beat_get_put_ops = {
30977 +static const struct hv_ops hvc_beat_get_put_ops = {
30978 .get_chars = hvc_beat_get_chars,
30979 .put_chars = hvc_beat_put_chars,
30980 };
30981 diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
30982 index 98097f2..407dddc 100644
30983 --- a/drivers/char/hvc_console.c
30984 +++ b/drivers/char/hvc_console.c
30985 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
30986 * console interfaces but can still be used as a tty device. This has to be
30987 * static because kmalloc will not work during early console init.
30988 */
30989 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30990 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30991 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
30992 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
30993
30994 @@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
30995 * vty adapters do NOT get an hvc_instantiate() callback since they
30996 * appear after early console init.
30997 */
30998 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
30999 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
31000 {
31001 struct hvc_struct *hp;
31002
31003 @@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
31004 };
31005
31006 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
31007 - struct hv_ops *ops, int outbuf_size)
31008 + const struct hv_ops *ops, int outbuf_size)
31009 {
31010 struct hvc_struct *hp;
31011 int i;
31012 diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
31013 index 10950ca..ed176c3 100644
31014 --- a/drivers/char/hvc_console.h
31015 +++ b/drivers/char/hvc_console.h
31016 @@ -55,7 +55,7 @@ struct hvc_struct {
31017 int outbuf_size;
31018 int n_outbuf;
31019 uint32_t vtermno;
31020 - struct hv_ops *ops;
31021 + const struct hv_ops *ops;
31022 int irq_requested;
31023 int data;
31024 struct winsize ws;
31025 @@ -76,11 +76,11 @@ struct hv_ops {
31026 };
31027
31028 /* Register a vterm and a slot index for use as a console (console_init) */
31029 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
31030 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
31031
31032 /* register a vterm for hvc tty operation (module_init or hotplug add) */
31033 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
31034 - struct hv_ops *ops, int outbuf_size);
31035 + const struct hv_ops *ops, int outbuf_size);
31036 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
31037 extern int hvc_remove(struct hvc_struct *hp);
31038
31039 diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
31040 index 936d05b..fd02426 100644
31041 --- a/drivers/char/hvc_iseries.c
31042 +++ b/drivers/char/hvc_iseries.c
31043 @@ -197,7 +197,7 @@ done:
31044 return sent;
31045 }
31046
31047 -static struct hv_ops hvc_get_put_ops = {
31048 +static const struct hv_ops hvc_get_put_ops = {
31049 .get_chars = get_chars,
31050 .put_chars = put_chars,
31051 .notifier_add = notifier_add_irq,
31052 diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
31053 index b0e168f..69cda2a 100644
31054 --- a/drivers/char/hvc_iucv.c
31055 +++ b/drivers/char/hvc_iucv.c
31056 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
31057
31058
31059 /* HVC operations */
31060 -static struct hv_ops hvc_iucv_ops = {
31061 +static const struct hv_ops hvc_iucv_ops = {
31062 .get_chars = hvc_iucv_get_chars,
31063 .put_chars = hvc_iucv_put_chars,
31064 .notifier_add = hvc_iucv_notifier_add,
31065 diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
31066 index 88590d0..61c4a61 100644
31067 --- a/drivers/char/hvc_rtas.c
31068 +++ b/drivers/char/hvc_rtas.c
31069 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
31070 return i;
31071 }
31072
31073 -static struct hv_ops hvc_rtas_get_put_ops = {
31074 +static const struct hv_ops hvc_rtas_get_put_ops = {
31075 .get_chars = hvc_rtas_read_console,
31076 .put_chars = hvc_rtas_write_console,
31077 };
31078 diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
31079 index bd63ba8..b0957e6 100644
31080 --- a/drivers/char/hvc_udbg.c
31081 +++ b/drivers/char/hvc_udbg.c
31082 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
31083 return i;
31084 }
31085
31086 -static struct hv_ops hvc_udbg_ops = {
31087 +static const struct hv_ops hvc_udbg_ops = {
31088 .get_chars = hvc_udbg_get,
31089 .put_chars = hvc_udbg_put,
31090 };
31091 diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
31092 index 10be343..27370e9 100644
31093 --- a/drivers/char/hvc_vio.c
31094 +++ b/drivers/char/hvc_vio.c
31095 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
31096 return got;
31097 }
31098
31099 -static struct hv_ops hvc_get_put_ops = {
31100 +static const struct hv_ops hvc_get_put_ops = {
31101 .get_chars = filtered_get_chars,
31102 .put_chars = hvc_put_chars,
31103 .notifier_add = notifier_add_irq,
31104 diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
31105 index a6ee32b..94f8c26 100644
31106 --- a/drivers/char/hvc_xen.c
31107 +++ b/drivers/char/hvc_xen.c
31108 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
31109 return recv;
31110 }
31111
31112 -static struct hv_ops hvc_ops = {
31113 +static const struct hv_ops hvc_ops = {
31114 .get_chars = read_console,
31115 .put_chars = write_console,
31116 .notifier_add = notifier_add_irq,
31117 diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31118 index 266b858..f3ee0bb 100644
31119 --- a/drivers/char/hvcs.c
31120 +++ b/drivers/char/hvcs.c
31121 @@ -82,6 +82,7 @@
31122 #include <asm/hvcserver.h>
31123 #include <asm/uaccess.h>
31124 #include <asm/vio.h>
31125 +#include <asm/local.h>
31126
31127 /*
31128 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31129 @@ -269,7 +270,7 @@ struct hvcs_struct {
31130 unsigned int index;
31131
31132 struct tty_struct *tty;
31133 - int open_count;
31134 + local_t open_count;
31135
31136 /*
31137 * Used to tell the driver kernel_thread what operations need to take
31138 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31139
31140 spin_lock_irqsave(&hvcsd->lock, flags);
31141
31142 - if (hvcsd->open_count > 0) {
31143 + if (local_read(&hvcsd->open_count) > 0) {
31144 spin_unlock_irqrestore(&hvcsd->lock, flags);
31145 printk(KERN_INFO "HVCS: vterm state unchanged. "
31146 "The hvcs device node is still in use.\n");
31147 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31148 if ((retval = hvcs_partner_connect(hvcsd)))
31149 goto error_release;
31150
31151 - hvcsd->open_count = 1;
31152 + local_set(&hvcsd->open_count, 1);
31153 hvcsd->tty = tty;
31154 tty->driver_data = hvcsd;
31155
31156 @@ -1169,7 +1170,7 @@ fast_open:
31157
31158 spin_lock_irqsave(&hvcsd->lock, flags);
31159 kref_get(&hvcsd->kref);
31160 - hvcsd->open_count++;
31161 + local_inc(&hvcsd->open_count);
31162 hvcsd->todo_mask |= HVCS_SCHED_READ;
31163 spin_unlock_irqrestore(&hvcsd->lock, flags);
31164
31165 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31166 hvcsd = tty->driver_data;
31167
31168 spin_lock_irqsave(&hvcsd->lock, flags);
31169 - if (--hvcsd->open_count == 0) {
31170 + if (local_dec_and_test(&hvcsd->open_count)) {
31171
31172 vio_disable_interrupts(hvcsd->vdev);
31173
31174 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31175 free_irq(irq, hvcsd);
31176 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31177 return;
31178 - } else if (hvcsd->open_count < 0) {
31179 + } else if (local_read(&hvcsd->open_count) < 0) {
31180 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31181 " is missmanaged.\n",
31182 - hvcsd->vdev->unit_address, hvcsd->open_count);
31183 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31184 }
31185
31186 spin_unlock_irqrestore(&hvcsd->lock, flags);
31187 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31188
31189 spin_lock_irqsave(&hvcsd->lock, flags);
31190 /* Preserve this so that we know how many kref refs to put */
31191 - temp_open_count = hvcsd->open_count;
31192 + temp_open_count = local_read(&hvcsd->open_count);
31193
31194 /*
31195 * Don't kref put inside the spinlock because the destruction
31196 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31197 hvcsd->tty->driver_data = NULL;
31198 hvcsd->tty = NULL;
31199
31200 - hvcsd->open_count = 0;
31201 + local_set(&hvcsd->open_count, 0);
31202
31203 /* This will drop any buffered data on the floor which is OK in a hangup
31204 * scenario. */
31205 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31206 * the middle of a write operation? This is a crummy place to do this
31207 * but we want to keep it all in the spinlock.
31208 */
31209 - if (hvcsd->open_count <= 0) {
31210 + if (local_read(&hvcsd->open_count) <= 0) {
31211 spin_unlock_irqrestore(&hvcsd->lock, flags);
31212 return -ENODEV;
31213 }
31214 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31215 {
31216 struct hvcs_struct *hvcsd = tty->driver_data;
31217
31218 - if (!hvcsd || hvcsd->open_count <= 0)
31219 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31220 return 0;
31221
31222 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31223 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31224 index ec5e3f8..02455ba 100644
31225 --- a/drivers/char/ipmi/ipmi_msghandler.c
31226 +++ b/drivers/char/ipmi/ipmi_msghandler.c
31227 @@ -414,7 +414,7 @@ struct ipmi_smi {
31228 struct proc_dir_entry *proc_dir;
31229 char proc_dir_name[10];
31230
31231 - atomic_t stats[IPMI_NUM_STATS];
31232 + atomic_unchecked_t stats[IPMI_NUM_STATS];
31233
31234 /*
31235 * run_to_completion duplicate of smb_info, smi_info
31236 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31237
31238
31239 #define ipmi_inc_stat(intf, stat) \
31240 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31241 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31242 #define ipmi_get_stat(intf, stat) \
31243 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31244 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31245
31246 static int is_lan_addr(struct ipmi_addr *addr)
31247 {
31248 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31249 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31250 init_waitqueue_head(&intf->waitq);
31251 for (i = 0; i < IPMI_NUM_STATS; i++)
31252 - atomic_set(&intf->stats[i], 0);
31253 + atomic_set_unchecked(&intf->stats[i], 0);
31254
31255 intf->proc_dir = NULL;
31256
31257 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31258 struct ipmi_smi_msg smi_msg;
31259 struct ipmi_recv_msg recv_msg;
31260
31261 + pax_track_stack();
31262 +
31263 si = (struct ipmi_system_interface_addr *) &addr;
31264 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31265 si->channel = IPMI_BMC_CHANNEL;
31266 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31267 index abae8c9..8021979 100644
31268 --- a/drivers/char/ipmi/ipmi_si_intf.c
31269 +++ b/drivers/char/ipmi/ipmi_si_intf.c
31270 @@ -277,7 +277,7 @@ struct smi_info {
31271 unsigned char slave_addr;
31272
31273 /* Counters and things for the proc filesystem. */
31274 - atomic_t stats[SI_NUM_STATS];
31275 + atomic_unchecked_t stats[SI_NUM_STATS];
31276
31277 struct task_struct *thread;
31278
31279 @@ -285,9 +285,9 @@ struct smi_info {
31280 };
31281
31282 #define smi_inc_stat(smi, stat) \
31283 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31284 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31285 #define smi_get_stat(smi, stat) \
31286 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31287 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31288
31289 #define SI_MAX_PARMS 4
31290
31291 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31292 atomic_set(&new_smi->req_events, 0);
31293 new_smi->run_to_completion = 0;
31294 for (i = 0; i < SI_NUM_STATS; i++)
31295 - atomic_set(&new_smi->stats[i], 0);
31296 + atomic_set_unchecked(&new_smi->stats[i], 0);
31297
31298 new_smi->interrupt_disabled = 0;
31299 atomic_set(&new_smi->stop_operation, 0);
31300 diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31301 index 402838f..55e2200 100644
31302 --- a/drivers/char/istallion.c
31303 +++ b/drivers/char/istallion.c
31304 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31305 * re-used for each stats call.
31306 */
31307 static comstats_t stli_comstats;
31308 -static combrd_t stli_brdstats;
31309 static struct asystats stli_cdkstats;
31310
31311 /*****************************************************************************/
31312 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31313 {
31314 struct stlibrd *brdp;
31315 unsigned int i;
31316 + combrd_t stli_brdstats;
31317
31318 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31319 return -EFAULT;
31320 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31321 struct stliport stli_dummyport;
31322 struct stliport *portp;
31323
31324 + pax_track_stack();
31325 +
31326 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31327 return -EFAULT;
31328 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31329 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31330 struct stlibrd stli_dummybrd;
31331 struct stlibrd *brdp;
31332
31333 + pax_track_stack();
31334 +
31335 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31336 return -EFAULT;
31337 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31338 diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31339 index 950837c..e55a288 100644
31340 --- a/drivers/char/keyboard.c
31341 +++ b/drivers/char/keyboard.c
31342 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31343 kbd->kbdmode == VC_MEDIUMRAW) &&
31344 value != KVAL(K_SAK))
31345 return; /* SAK is allowed even in raw mode */
31346 +
31347 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31348 + {
31349 + void *func = fn_handler[value];
31350 + if (func == fn_show_state || func == fn_show_ptregs ||
31351 + func == fn_show_mem)
31352 + return;
31353 + }
31354 +#endif
31355 +
31356 fn_handler[value](vc);
31357 }
31358
31359 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31360 .evbit = { BIT_MASK(EV_SND) },
31361 },
31362
31363 - { }, /* Terminating entry */
31364 + { 0 }, /* Terminating entry */
31365 };
31366
31367 MODULE_DEVICE_TABLE(input, kbd_ids);
31368 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31369 index 87c67b4..230527a 100644
31370 --- a/drivers/char/mbcs.c
31371 +++ b/drivers/char/mbcs.c
31372 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31373 return 0;
31374 }
31375
31376 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31377 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31378 {
31379 .part_num = MBCS_PART_NUM,
31380 .mfg_num = MBCS_MFG_NUM,
31381 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31382 index 1270f64..8495f49 100644
31383 --- a/drivers/char/mem.c
31384 +++ b/drivers/char/mem.c
31385 @@ -18,6 +18,7 @@
31386 #include <linux/raw.h>
31387 #include <linux/tty.h>
31388 #include <linux/capability.h>
31389 +#include <linux/security.h>
31390 #include <linux/ptrace.h>
31391 #include <linux/device.h>
31392 #include <linux/highmem.h>
31393 @@ -35,6 +36,10 @@
31394 # include <linux/efi.h>
31395 #endif
31396
31397 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31398 +extern struct file_operations grsec_fops;
31399 +#endif
31400 +
31401 static inline unsigned long size_inside_page(unsigned long start,
31402 unsigned long size)
31403 {
31404 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31405
31406 while (cursor < to) {
31407 if (!devmem_is_allowed(pfn)) {
31408 +#ifdef CONFIG_GRKERNSEC_KMEM
31409 + gr_handle_mem_readwrite(from, to);
31410 +#else
31411 printk(KERN_INFO
31412 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31413 current->comm, from, to);
31414 +#endif
31415 return 0;
31416 }
31417 cursor += PAGE_SIZE;
31418 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31419 }
31420 return 1;
31421 }
31422 +#elif defined(CONFIG_GRKERNSEC_KMEM)
31423 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31424 +{
31425 + return 0;
31426 +}
31427 #else
31428 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31429 {
31430 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31431 #endif
31432
31433 while (count > 0) {
31434 + char *temp;
31435 +
31436 /*
31437 * Handle first page in case it's not aligned
31438 */
31439 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31440 if (!ptr)
31441 return -EFAULT;
31442
31443 - if (copy_to_user(buf, ptr, sz)) {
31444 +#ifdef CONFIG_PAX_USERCOPY
31445 + temp = kmalloc(sz, GFP_KERNEL);
31446 + if (!temp) {
31447 + unxlate_dev_mem_ptr(p, ptr);
31448 + return -ENOMEM;
31449 + }
31450 + memcpy(temp, ptr, sz);
31451 +#else
31452 + temp = ptr;
31453 +#endif
31454 +
31455 + if (copy_to_user(buf, temp, sz)) {
31456 +
31457 +#ifdef CONFIG_PAX_USERCOPY
31458 + kfree(temp);
31459 +#endif
31460 +
31461 unxlate_dev_mem_ptr(p, ptr);
31462 return -EFAULT;
31463 }
31464
31465 +#ifdef CONFIG_PAX_USERCOPY
31466 + kfree(temp);
31467 +#endif
31468 +
31469 unxlate_dev_mem_ptr(p, ptr);
31470
31471 buf += sz;
31472 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31473 size_t count, loff_t *ppos)
31474 {
31475 unsigned long p = *ppos;
31476 - ssize_t low_count, read, sz;
31477 + ssize_t low_count, read, sz, err = 0;
31478 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31479 - int err = 0;
31480
31481 read = 0;
31482 if (p < (unsigned long) high_memory) {
31483 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31484 }
31485 #endif
31486 while (low_count > 0) {
31487 + char *temp;
31488 +
31489 sz = size_inside_page(p, low_count);
31490
31491 /*
31492 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31493 */
31494 kbuf = xlate_dev_kmem_ptr((char *)p);
31495
31496 - if (copy_to_user(buf, kbuf, sz))
31497 +#ifdef CONFIG_PAX_USERCOPY
31498 + temp = kmalloc(sz, GFP_KERNEL);
31499 + if (!temp)
31500 + return -ENOMEM;
31501 + memcpy(temp, kbuf, sz);
31502 +#else
31503 + temp = kbuf;
31504 +#endif
31505 +
31506 + err = copy_to_user(buf, temp, sz);
31507 +
31508 +#ifdef CONFIG_PAX_USERCOPY
31509 + kfree(temp);
31510 +#endif
31511 +
31512 + if (err)
31513 return -EFAULT;
31514 buf += sz;
31515 p += sz;
31516 @@ -889,6 +941,9 @@ static const struct memdev {
31517 #ifdef CONFIG_CRASH_DUMP
31518 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31519 #endif
31520 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31521 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31522 +#endif
31523 };
31524
31525 static int memory_open(struct inode *inode, struct file *filp)
31526 diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31527 index 674b3ab..a8d1970 100644
31528 --- a/drivers/char/pcmcia/ipwireless/tty.c
31529 +++ b/drivers/char/pcmcia/ipwireless/tty.c
31530 @@ -29,6 +29,7 @@
31531 #include <linux/tty_driver.h>
31532 #include <linux/tty_flip.h>
31533 #include <linux/uaccess.h>
31534 +#include <asm/local.h>
31535
31536 #include "tty.h"
31537 #include "network.h"
31538 @@ -51,7 +52,7 @@ struct ipw_tty {
31539 int tty_type;
31540 struct ipw_network *network;
31541 struct tty_struct *linux_tty;
31542 - int open_count;
31543 + local_t open_count;
31544 unsigned int control_lines;
31545 struct mutex ipw_tty_mutex;
31546 int tx_bytes_queued;
31547 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31548 mutex_unlock(&tty->ipw_tty_mutex);
31549 return -ENODEV;
31550 }
31551 - if (tty->open_count == 0)
31552 + if (local_read(&tty->open_count) == 0)
31553 tty->tx_bytes_queued = 0;
31554
31555 - tty->open_count++;
31556 + local_inc(&tty->open_count);
31557
31558 tty->linux_tty = linux_tty;
31559 linux_tty->driver_data = tty;
31560 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31561
31562 static void do_ipw_close(struct ipw_tty *tty)
31563 {
31564 - tty->open_count--;
31565 -
31566 - if (tty->open_count == 0) {
31567 + if (local_dec_return(&tty->open_count) == 0) {
31568 struct tty_struct *linux_tty = tty->linux_tty;
31569
31570 if (linux_tty != NULL) {
31571 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31572 return;
31573
31574 mutex_lock(&tty->ipw_tty_mutex);
31575 - if (tty->open_count == 0) {
31576 + if (local_read(&tty->open_count) == 0) {
31577 mutex_unlock(&tty->ipw_tty_mutex);
31578 return;
31579 }
31580 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31581 return;
31582 }
31583
31584 - if (!tty->open_count) {
31585 + if (!local_read(&tty->open_count)) {
31586 mutex_unlock(&tty->ipw_tty_mutex);
31587 return;
31588 }
31589 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31590 return -ENODEV;
31591
31592 mutex_lock(&tty->ipw_tty_mutex);
31593 - if (!tty->open_count) {
31594 + if (!local_read(&tty->open_count)) {
31595 mutex_unlock(&tty->ipw_tty_mutex);
31596 return -EINVAL;
31597 }
31598 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31599 if (!tty)
31600 return -ENODEV;
31601
31602 - if (!tty->open_count)
31603 + if (!local_read(&tty->open_count))
31604 return -EINVAL;
31605
31606 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31607 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31608 if (!tty)
31609 return 0;
31610
31611 - if (!tty->open_count)
31612 + if (!local_read(&tty->open_count))
31613 return 0;
31614
31615 return tty->tx_bytes_queued;
31616 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31617 if (!tty)
31618 return -ENODEV;
31619
31620 - if (!tty->open_count)
31621 + if (!local_read(&tty->open_count))
31622 return -EINVAL;
31623
31624 return get_control_lines(tty);
31625 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31626 if (!tty)
31627 return -ENODEV;
31628
31629 - if (!tty->open_count)
31630 + if (!local_read(&tty->open_count))
31631 return -EINVAL;
31632
31633 return set_control_lines(tty, set, clear);
31634 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31635 if (!tty)
31636 return -ENODEV;
31637
31638 - if (!tty->open_count)
31639 + if (!local_read(&tty->open_count))
31640 return -EINVAL;
31641
31642 /* FIXME: Exactly how is the tty object locked here .. */
31643 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31644 against a parallel ioctl etc */
31645 mutex_lock(&ttyj->ipw_tty_mutex);
31646 }
31647 - while (ttyj->open_count)
31648 + while (local_read(&ttyj->open_count))
31649 do_ipw_close(ttyj);
31650 ipwireless_disassociate_network_ttys(network,
31651 ttyj->channel_idx);
31652 diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31653 index 62f282e..e45c45c 100644
31654 --- a/drivers/char/pty.c
31655 +++ b/drivers/char/pty.c
31656 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31657 register_sysctl_table(pty_root_table);
31658
31659 /* Now create the /dev/ptmx special device */
31660 + pax_open_kernel();
31661 tty_default_fops(&ptmx_fops);
31662 - ptmx_fops.open = ptmx_open;
31663 + *(void **)&ptmx_fops.open = ptmx_open;
31664 + pax_close_kernel();
31665
31666 cdev_init(&ptmx_cdev, &ptmx_fops);
31667 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31668 diff --git a/drivers/char/random.c b/drivers/char/random.c
31669 index 3a19e2d..6ed09d3 100644
31670 --- a/drivers/char/random.c
31671 +++ b/drivers/char/random.c
31672 @@ -254,8 +254,13 @@
31673 /*
31674 * Configuration information
31675 */
31676 +#ifdef CONFIG_GRKERNSEC_RANDNET
31677 +#define INPUT_POOL_WORDS 512
31678 +#define OUTPUT_POOL_WORDS 128
31679 +#else
31680 #define INPUT_POOL_WORDS 128
31681 #define OUTPUT_POOL_WORDS 32
31682 +#endif
31683 #define SEC_XFER_SIZE 512
31684
31685 /*
31686 @@ -292,10 +297,17 @@ static struct poolinfo {
31687 int poolwords;
31688 int tap1, tap2, tap3, tap4, tap5;
31689 } poolinfo_table[] = {
31690 +#ifdef CONFIG_GRKERNSEC_RANDNET
31691 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31692 + { 512, 411, 308, 208, 104, 1 },
31693 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31694 + { 128, 103, 76, 51, 25, 1 },
31695 +#else
31696 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31697 { 128, 103, 76, 51, 25, 1 },
31698 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31699 { 32, 26, 20, 14, 7, 1 },
31700 +#endif
31701 #if 0
31702 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31703 { 2048, 1638, 1231, 819, 411, 1 },
31704 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31705 #include <linux/sysctl.h>
31706
31707 static int min_read_thresh = 8, min_write_thresh;
31708 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
31709 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31710 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31711 static char sysctl_bootid[16];
31712
31713 diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31714 index 0e29a23..0efc2c2 100644
31715 --- a/drivers/char/rocket.c
31716 +++ b/drivers/char/rocket.c
31717 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31718 struct rocket_ports tmp;
31719 int board;
31720
31721 + pax_track_stack();
31722 +
31723 if (!retports)
31724 return -EFAULT;
31725 memset(&tmp, 0, sizeof (tmp));
31726 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31727 index 8c262aa..4d3b058 100644
31728 --- a/drivers/char/sonypi.c
31729 +++ b/drivers/char/sonypi.c
31730 @@ -55,6 +55,7 @@
31731 #include <asm/uaccess.h>
31732 #include <asm/io.h>
31733 #include <asm/system.h>
31734 +#include <asm/local.h>
31735
31736 #include <linux/sonypi.h>
31737
31738 @@ -491,7 +492,7 @@ static struct sonypi_device {
31739 spinlock_t fifo_lock;
31740 wait_queue_head_t fifo_proc_list;
31741 struct fasync_struct *fifo_async;
31742 - int open_count;
31743 + local_t open_count;
31744 int model;
31745 struct input_dev *input_jog_dev;
31746 struct input_dev *input_key_dev;
31747 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31748 static int sonypi_misc_release(struct inode *inode, struct file *file)
31749 {
31750 mutex_lock(&sonypi_device.lock);
31751 - sonypi_device.open_count--;
31752 + local_dec(&sonypi_device.open_count);
31753 mutex_unlock(&sonypi_device.lock);
31754 return 0;
31755 }
31756 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31757 lock_kernel();
31758 mutex_lock(&sonypi_device.lock);
31759 /* Flush input queue on first open */
31760 - if (!sonypi_device.open_count)
31761 + if (!local_read(&sonypi_device.open_count))
31762 kfifo_reset(sonypi_device.fifo);
31763 - sonypi_device.open_count++;
31764 + local_inc(&sonypi_device.open_count);
31765 mutex_unlock(&sonypi_device.lock);
31766 unlock_kernel();
31767 return 0;
31768 diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31769 index db6dcfa..13834cb 100644
31770 --- a/drivers/char/stallion.c
31771 +++ b/drivers/char/stallion.c
31772 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31773 struct stlport stl_dummyport;
31774 struct stlport *portp;
31775
31776 + pax_track_stack();
31777 +
31778 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31779 return -EFAULT;
31780 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31781 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31782 index a0789f6..cea3902 100644
31783 --- a/drivers/char/tpm/tpm.c
31784 +++ b/drivers/char/tpm/tpm.c
31785 @@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31786 chip->vendor.req_complete_val)
31787 goto out_recv;
31788
31789 - if ((status == chip->vendor.req_canceled)) {
31790 + if (status == chip->vendor.req_canceled) {
31791 dev_err(chip->dev, "Operation Canceled\n");
31792 rc = -ECANCELED;
31793 goto out;
31794 @@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31795
31796 struct tpm_chip *chip = dev_get_drvdata(dev);
31797
31798 + pax_track_stack();
31799 +
31800 tpm_cmd.header.in = tpm_readpubek_header;
31801 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31802 "attempting to read the PUBEK");
31803 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31804 index bf2170f..ce8cab9 100644
31805 --- a/drivers/char/tpm/tpm_bios.c
31806 +++ b/drivers/char/tpm/tpm_bios.c
31807 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31808 event = addr;
31809
31810 if ((event->event_type == 0 && event->event_size == 0) ||
31811 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31812 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31813 return NULL;
31814
31815 return addr;
31816 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31817 return NULL;
31818
31819 if ((event->event_type == 0 && event->event_size == 0) ||
31820 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31821 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31822 return NULL;
31823
31824 (*pos)++;
31825 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31826 int i;
31827
31828 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31829 - seq_putc(m, data[i]);
31830 + if (!seq_putc(m, data[i]))
31831 + return -EFAULT;
31832
31833 return 0;
31834 }
31835 @@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31836 log->bios_event_log_end = log->bios_event_log + len;
31837
31838 virt = acpi_os_map_memory(start, len);
31839 + if (!virt) {
31840 + kfree(log->bios_event_log);
31841 + log->bios_event_log = NULL;
31842 + return -EFAULT;
31843 + }
31844
31845 - memcpy(log->bios_event_log, virt, len);
31846 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31847
31848 acpi_os_unmap_memory(virt, len);
31849 return 0;
31850 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31851 index 123cedf..137edef 100644
31852 --- a/drivers/char/tty_io.c
31853 +++ b/drivers/char/tty_io.c
31854 @@ -1774,6 +1774,7 @@ got_driver:
31855
31856 if (IS_ERR(tty)) {
31857 mutex_unlock(&tty_mutex);
31858 + tty_driver_kref_put(driver);
31859 return PTR_ERR(tty);
31860 }
31861 }
31862 @@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31863 return retval;
31864 }
31865
31866 +EXPORT_SYMBOL(tty_ioctl);
31867 +
31868 #ifdef CONFIG_COMPAT
31869 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31870 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
31871 unsigned long arg)
31872 {
31873 struct inode *inode = file->f_dentry->d_inode;
31874 @@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31875
31876 return retval;
31877 }
31878 +
31879 +EXPORT_SYMBOL(tty_compat_ioctl);
31880 #endif
31881
31882 /*
31883 @@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
31884
31885 void tty_default_fops(struct file_operations *fops)
31886 {
31887 - *fops = tty_fops;
31888 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
31889 }
31890
31891 /*
31892 diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
31893 index d814a3d..b55b9c9 100644
31894 --- a/drivers/char/tty_ldisc.c
31895 +++ b/drivers/char/tty_ldisc.c
31896 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
31897 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
31898 struct tty_ldisc_ops *ldo = ld->ops;
31899
31900 - ldo->refcount--;
31901 + atomic_dec(&ldo->refcount);
31902 module_put(ldo->owner);
31903 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31904
31905 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
31906 spin_lock_irqsave(&tty_ldisc_lock, flags);
31907 tty_ldiscs[disc] = new_ldisc;
31908 new_ldisc->num = disc;
31909 - new_ldisc->refcount = 0;
31910 + atomic_set(&new_ldisc->refcount, 0);
31911 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31912
31913 return ret;
31914 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
31915 return -EINVAL;
31916
31917 spin_lock_irqsave(&tty_ldisc_lock, flags);
31918 - if (tty_ldiscs[disc]->refcount)
31919 + if (atomic_read(&tty_ldiscs[disc]->refcount))
31920 ret = -EBUSY;
31921 else
31922 tty_ldiscs[disc] = NULL;
31923 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
31924 if (ldops) {
31925 ret = ERR_PTR(-EAGAIN);
31926 if (try_module_get(ldops->owner)) {
31927 - ldops->refcount++;
31928 + atomic_inc(&ldops->refcount);
31929 ret = ldops;
31930 }
31931 }
31932 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
31933 unsigned long flags;
31934
31935 spin_lock_irqsave(&tty_ldisc_lock, flags);
31936 - ldops->refcount--;
31937 + atomic_dec(&ldops->refcount);
31938 module_put(ldops->owner);
31939 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31940 }
31941 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
31942 index a035ae3..c27fe2c 100644
31943 --- a/drivers/char/virtio_console.c
31944 +++ b/drivers/char/virtio_console.c
31945 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
31946 * virtqueue, so we let the drivers do some boutique early-output thing. */
31947 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
31948 {
31949 - virtio_cons.put_chars = put_chars;
31950 + pax_open_kernel();
31951 + *(void **)&virtio_cons.put_chars = put_chars;
31952 + pax_close_kernel();
31953 return hvc_instantiate(0, 0, &virtio_cons);
31954 }
31955
31956 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
31957 out_vq = vqs[1];
31958
31959 /* Start using the new console output. */
31960 - virtio_cons.get_chars = get_chars;
31961 - virtio_cons.put_chars = put_chars;
31962 - virtio_cons.notifier_add = notifier_add_vio;
31963 - virtio_cons.notifier_del = notifier_del_vio;
31964 - virtio_cons.notifier_hangup = notifier_del_vio;
31965 + pax_open_kernel();
31966 + *(void **)&virtio_cons.get_chars = get_chars;
31967 + *(void **)&virtio_cons.put_chars = put_chars;
31968 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
31969 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
31970 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
31971 + pax_close_kernel();
31972
31973 /* The first argument of hvc_alloc() is the virtual console number, so
31974 * we use zero. The second argument is the parameter for the
31975 diff --git a/drivers/char/vt.c b/drivers/char/vt.c
31976 index 0c80c68..53d59c1 100644
31977 --- a/drivers/char/vt.c
31978 +++ b/drivers/char/vt.c
31979 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
31980
31981 static void notify_write(struct vc_data *vc, unsigned int unicode)
31982 {
31983 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
31984 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
31985 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
31986 }
31987
31988 diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
31989 index 6351a26..999af95 100644
31990 --- a/drivers/char/vt_ioctl.c
31991 +++ b/drivers/char/vt_ioctl.c
31992 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31993 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
31994 return -EFAULT;
31995
31996 - if (!capable(CAP_SYS_TTY_CONFIG))
31997 - perm = 0;
31998 -
31999 switch (cmd) {
32000 case KDGKBENT:
32001 key_map = key_maps[s];
32002 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32003 val = (i ? K_HOLE : K_NOSUCHMAP);
32004 return put_user(val, &user_kbe->kb_value);
32005 case KDSKBENT:
32006 + if (!capable(CAP_SYS_TTY_CONFIG))
32007 + perm = 0;
32008 +
32009 if (!perm)
32010 return -EPERM;
32011 +
32012 if (!i && v == K_NOSUCHMAP) {
32013 /* deallocate map */
32014 key_map = key_maps[s];
32015 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32016 int i, j, k;
32017 int ret;
32018
32019 - if (!capable(CAP_SYS_TTY_CONFIG))
32020 - perm = 0;
32021 -
32022 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
32023 if (!kbs) {
32024 ret = -ENOMEM;
32025 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32026 kfree(kbs);
32027 return ((p && *p) ? -EOVERFLOW : 0);
32028 case KDSKBSENT:
32029 + if (!capable(CAP_SYS_TTY_CONFIG))
32030 + perm = 0;
32031 +
32032 if (!perm) {
32033 ret = -EPERM;
32034 goto reterr;
32035 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
32036 index c7ae026..1769c1d 100644
32037 --- a/drivers/cpufreq/cpufreq.c
32038 +++ b/drivers/cpufreq/cpufreq.c
32039 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
32040 complete(&policy->kobj_unregister);
32041 }
32042
32043 -static struct sysfs_ops sysfs_ops = {
32044 +static const struct sysfs_ops sysfs_ops = {
32045 .show = show,
32046 .store = store,
32047 };
32048 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
32049 index 97b0038..2056670 100644
32050 --- a/drivers/cpuidle/sysfs.c
32051 +++ b/drivers/cpuidle/sysfs.c
32052 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
32053 return ret;
32054 }
32055
32056 -static struct sysfs_ops cpuidle_sysfs_ops = {
32057 +static const struct sysfs_ops cpuidle_sysfs_ops = {
32058 .show = cpuidle_show,
32059 .store = cpuidle_store,
32060 };
32061 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
32062 return ret;
32063 }
32064
32065 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
32066 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
32067 .show = cpuidle_state_show,
32068 };
32069
32070 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
32071 .release = cpuidle_state_sysfs_release,
32072 };
32073
32074 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32075 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32076 {
32077 kobject_put(&device->kobjs[i]->kobj);
32078 wait_for_completion(&device->kobjs[i]->kobj_unregister);
32079 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
32080 index 5f753fc..0377ae9 100644
32081 --- a/drivers/crypto/hifn_795x.c
32082 +++ b/drivers/crypto/hifn_795x.c
32083 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
32084 0xCA, 0x34, 0x2B, 0x2E};
32085 struct scatterlist sg;
32086
32087 + pax_track_stack();
32088 +
32089 memset(src, 0, sizeof(src));
32090 memset(ctx.key, 0, sizeof(ctx.key));
32091
32092 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
32093 index 71e6482..de8d96c 100644
32094 --- a/drivers/crypto/padlock-aes.c
32095 +++ b/drivers/crypto/padlock-aes.c
32096 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
32097 struct crypto_aes_ctx gen_aes;
32098 int cpu;
32099
32100 + pax_track_stack();
32101 +
32102 if (key_len % 8) {
32103 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32104 return -EINVAL;
32105 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32106 index dcc4ab7..cc834bb 100644
32107 --- a/drivers/dma/ioat/dma.c
32108 +++ b/drivers/dma/ioat/dma.c
32109 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32110 return entry->show(&chan->common, page);
32111 }
32112
32113 -struct sysfs_ops ioat_sysfs_ops = {
32114 +const struct sysfs_ops ioat_sysfs_ops = {
32115 .show = ioat_attr_show,
32116 };
32117
32118 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32119 index bbc3e78..f2db62c 100644
32120 --- a/drivers/dma/ioat/dma.h
32121 +++ b/drivers/dma/ioat/dma.h
32122 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32123 unsigned long *phys_complete);
32124 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32125 void ioat_kobject_del(struct ioatdma_device *device);
32126 -extern struct sysfs_ops ioat_sysfs_ops;
32127 +extern const struct sysfs_ops ioat_sysfs_ops;
32128 extern struct ioat_sysfs_entry ioat_version_attr;
32129 extern struct ioat_sysfs_entry ioat_cap_attr;
32130 #endif /* IOATDMA_H */
32131 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32132 index 9908c9e..3ceb0e5 100644
32133 --- a/drivers/dma/ioat/dma_v3.c
32134 +++ b/drivers/dma/ioat/dma_v3.c
32135 @@ -71,10 +71,10 @@
32136 /* provide a lookup table for setting the source address in the base or
32137 * extended descriptor of an xor or pq descriptor
32138 */
32139 -static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32140 -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32141 -static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32142 -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32143 +static const u8 xor_idx_to_desc = 0xd0;
32144 +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32145 +static const u8 pq_idx_to_desc = 0xf8;
32146 +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32147
32148 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32149 {
32150 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32151 index 85c464a..afd1e73 100644
32152 --- a/drivers/edac/amd64_edac.c
32153 +++ b/drivers/edac/amd64_edac.c
32154 @@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32155 * PCI core identifies what devices are on a system during boot, and then
32156 * inquiry this table to see if this driver is for a given device found.
32157 */
32158 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32159 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32160 {
32161 .vendor = PCI_VENDOR_ID_AMD,
32162 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32163 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32164 index 2b95f1a..4f52793 100644
32165 --- a/drivers/edac/amd76x_edac.c
32166 +++ b/drivers/edac/amd76x_edac.c
32167 @@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32168 edac_mc_free(mci);
32169 }
32170
32171 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32172 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32173 {
32174 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32175 AMD762},
32176 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32177 index d205d49..74c9672 100644
32178 --- a/drivers/edac/e752x_edac.c
32179 +++ b/drivers/edac/e752x_edac.c
32180 @@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32181 edac_mc_free(mci);
32182 }
32183
32184 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32185 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32186 {
32187 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32188 E7520},
32189 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32190 index c7d11cc..c59c1ca 100644
32191 --- a/drivers/edac/e7xxx_edac.c
32192 +++ b/drivers/edac/e7xxx_edac.c
32193 @@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32194 edac_mc_free(mci);
32195 }
32196
32197 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32198 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32199 {
32200 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32201 E7205},
32202 diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32203 index 5376457..5fdedbc 100644
32204 --- a/drivers/edac/edac_device_sysfs.c
32205 +++ b/drivers/edac/edac_device_sysfs.c
32206 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32207 }
32208
32209 /* edac_dev file operations for an 'ctl_info' */
32210 -static struct sysfs_ops device_ctl_info_ops = {
32211 +static const struct sysfs_ops device_ctl_info_ops = {
32212 .show = edac_dev_ctl_info_show,
32213 .store = edac_dev_ctl_info_store
32214 };
32215 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32216 }
32217
32218 /* edac_dev file operations for an 'instance' */
32219 -static struct sysfs_ops device_instance_ops = {
32220 +static const struct sysfs_ops device_instance_ops = {
32221 .show = edac_dev_instance_show,
32222 .store = edac_dev_instance_store
32223 };
32224 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32225 }
32226
32227 /* edac_dev file operations for a 'block' */
32228 -static struct sysfs_ops device_block_ops = {
32229 +static const struct sysfs_ops device_block_ops = {
32230 .show = edac_dev_block_show,
32231 .store = edac_dev_block_store
32232 };
32233 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32234 index e1d4ce0..88840e9 100644
32235 --- a/drivers/edac/edac_mc_sysfs.c
32236 +++ b/drivers/edac/edac_mc_sysfs.c
32237 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32238 return -EIO;
32239 }
32240
32241 -static struct sysfs_ops csrowfs_ops = {
32242 +static const struct sysfs_ops csrowfs_ops = {
32243 .show = csrowdev_show,
32244 .store = csrowdev_store
32245 };
32246 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32247 }
32248
32249 /* Intermediate show/store table */
32250 -static struct sysfs_ops mci_ops = {
32251 +static const struct sysfs_ops mci_ops = {
32252 .show = mcidev_show,
32253 .store = mcidev_store
32254 };
32255 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32256 index 422728c..d8d9c88 100644
32257 --- a/drivers/edac/edac_pci_sysfs.c
32258 +++ b/drivers/edac/edac_pci_sysfs.c
32259 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32260 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32261 static int edac_pci_poll_msec = 1000; /* one second workq period */
32262
32263 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
32264 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32265 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32266 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32267
32268 static struct kobject *edac_pci_top_main_kobj;
32269 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32270 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32271 }
32272
32273 /* fs_ops table */
32274 -static struct sysfs_ops pci_instance_ops = {
32275 +static const struct sysfs_ops pci_instance_ops = {
32276 .show = edac_pci_instance_show,
32277 .store = edac_pci_instance_store
32278 };
32279 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32280 return -EIO;
32281 }
32282
32283 -static struct sysfs_ops edac_pci_sysfs_ops = {
32284 +static const struct sysfs_ops edac_pci_sysfs_ops = {
32285 .show = edac_pci_dev_show,
32286 .store = edac_pci_dev_store
32287 };
32288 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32289 edac_printk(KERN_CRIT, EDAC_PCI,
32290 "Signaled System Error on %s\n",
32291 pci_name(dev));
32292 - atomic_inc(&pci_nonparity_count);
32293 + atomic_inc_unchecked(&pci_nonparity_count);
32294 }
32295
32296 if (status & (PCI_STATUS_PARITY)) {
32297 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32298 "Master Data Parity Error on %s\n",
32299 pci_name(dev));
32300
32301 - atomic_inc(&pci_parity_count);
32302 + atomic_inc_unchecked(&pci_parity_count);
32303 }
32304
32305 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32306 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32307 "Detected Parity Error on %s\n",
32308 pci_name(dev));
32309
32310 - atomic_inc(&pci_parity_count);
32311 + atomic_inc_unchecked(&pci_parity_count);
32312 }
32313 }
32314
32315 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32316 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32317 "Signaled System Error on %s\n",
32318 pci_name(dev));
32319 - atomic_inc(&pci_nonparity_count);
32320 + atomic_inc_unchecked(&pci_nonparity_count);
32321 }
32322
32323 if (status & (PCI_STATUS_PARITY)) {
32324 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32325 "Master Data Parity Error on "
32326 "%s\n", pci_name(dev));
32327
32328 - atomic_inc(&pci_parity_count);
32329 + atomic_inc_unchecked(&pci_parity_count);
32330 }
32331
32332 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32333 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32334 "Detected Parity Error on %s\n",
32335 pci_name(dev));
32336
32337 - atomic_inc(&pci_parity_count);
32338 + atomic_inc_unchecked(&pci_parity_count);
32339 }
32340 }
32341 }
32342 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32343 if (!check_pci_errors)
32344 return;
32345
32346 - before_count = atomic_read(&pci_parity_count);
32347 + before_count = atomic_read_unchecked(&pci_parity_count);
32348
32349 /* scan all PCI devices looking for a Parity Error on devices and
32350 * bridges.
32351 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32352 /* Only if operator has selected panic on PCI Error */
32353 if (edac_pci_get_panic_on_pe()) {
32354 /* If the count is different 'after' from 'before' */
32355 - if (before_count != atomic_read(&pci_parity_count))
32356 + if (before_count != atomic_read_unchecked(&pci_parity_count))
32357 panic("EDAC: PCI Parity Error");
32358 }
32359 }
32360 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32361 index 6c9a0f2..9c1cf7e 100644
32362 --- a/drivers/edac/i3000_edac.c
32363 +++ b/drivers/edac/i3000_edac.c
32364 @@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32365 edac_mc_free(mci);
32366 }
32367
32368 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32369 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32370 {
32371 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32372 I3000},
32373 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32374 index fde4db9..fe108f9 100644
32375 --- a/drivers/edac/i3200_edac.c
32376 +++ b/drivers/edac/i3200_edac.c
32377 @@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32378 edac_mc_free(mci);
32379 }
32380
32381 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32382 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32383 {
32384 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32385 I3200},
32386 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32387 index adc10a2..57d4ccf 100644
32388 --- a/drivers/edac/i5000_edac.c
32389 +++ b/drivers/edac/i5000_edac.c
32390 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32391 *
32392 * The "E500P" device is the first device supported.
32393 */
32394 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32395 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32396 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32397 .driver_data = I5000P},
32398
32399 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32400 index 22db05a..b2b5503 100644
32401 --- a/drivers/edac/i5100_edac.c
32402 +++ b/drivers/edac/i5100_edac.c
32403 @@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32404 edac_mc_free(mci);
32405 }
32406
32407 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32408 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32409 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32410 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32411 { 0, }
32412 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32413 index f99d106..f050710 100644
32414 --- a/drivers/edac/i5400_edac.c
32415 +++ b/drivers/edac/i5400_edac.c
32416 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32417 *
32418 * The "E500P" device is the first device supported.
32419 */
32420 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32421 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32422 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32423 {0,} /* 0 terminated list. */
32424 };
32425 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32426 index 577760a..9ce16ce 100644
32427 --- a/drivers/edac/i82443bxgx_edac.c
32428 +++ b/drivers/edac/i82443bxgx_edac.c
32429 @@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32430
32431 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32432
32433 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32434 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32435 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32436 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32437 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32438 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32439 index c0088ba..64a7b98 100644
32440 --- a/drivers/edac/i82860_edac.c
32441 +++ b/drivers/edac/i82860_edac.c
32442 @@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32443 edac_mc_free(mci);
32444 }
32445
32446 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32447 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32448 {
32449 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32450 I82860},
32451 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32452 index b2d83b9..a34357b 100644
32453 --- a/drivers/edac/i82875p_edac.c
32454 +++ b/drivers/edac/i82875p_edac.c
32455 @@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32456 edac_mc_free(mci);
32457 }
32458
32459 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32460 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32461 {
32462 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32463 I82875P},
32464 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32465 index 2eed3ea..87bbbd1 100644
32466 --- a/drivers/edac/i82975x_edac.c
32467 +++ b/drivers/edac/i82975x_edac.c
32468 @@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32469 edac_mc_free(mci);
32470 }
32471
32472 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32473 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32474 {
32475 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32476 I82975X
32477 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32478 index 9900675..78ac2b6 100644
32479 --- a/drivers/edac/r82600_edac.c
32480 +++ b/drivers/edac/r82600_edac.c
32481 @@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32482 edac_mc_free(mci);
32483 }
32484
32485 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32486 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32487 {
32488 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32489 },
32490 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32491 index d4ec605..4cfec4e 100644
32492 --- a/drivers/edac/x38_edac.c
32493 +++ b/drivers/edac/x38_edac.c
32494 @@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32495 edac_mc_free(mci);
32496 }
32497
32498 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32499 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32500 {
32501 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32502 X38},
32503 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32504 index 3fc2ceb..daf098f 100644
32505 --- a/drivers/firewire/core-card.c
32506 +++ b/drivers/firewire/core-card.c
32507 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32508
32509 void fw_core_remove_card(struct fw_card *card)
32510 {
32511 - struct fw_card_driver dummy_driver = dummy_driver_template;
32512 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
32513
32514 card->driver->update_phy_reg(card, 4,
32515 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32516 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32517 index 4560d8f..36db24a 100644
32518 --- a/drivers/firewire/core-cdev.c
32519 +++ b/drivers/firewire/core-cdev.c
32520 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32521 int ret;
32522
32523 if ((request->channels == 0 && request->bandwidth == 0) ||
32524 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32525 - request->bandwidth < 0)
32526 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32527 return -EINVAL;
32528
32529 r = kmalloc(sizeof(*r), GFP_KERNEL);
32530 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32531 index da628c7..cf54a2c 100644
32532 --- a/drivers/firewire/core-transaction.c
32533 +++ b/drivers/firewire/core-transaction.c
32534 @@ -36,6 +36,7 @@
32535 #include <linux/string.h>
32536 #include <linux/timer.h>
32537 #include <linux/types.h>
32538 +#include <linux/sched.h>
32539
32540 #include <asm/byteorder.h>
32541
32542 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32543 struct transaction_callback_data d;
32544 struct fw_transaction t;
32545
32546 + pax_track_stack();
32547 +
32548 init_completion(&d.done);
32549 d.payload = payload;
32550 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32551 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32552 index 7ff6e75..a2965d9 100644
32553 --- a/drivers/firewire/core.h
32554 +++ b/drivers/firewire/core.h
32555 @@ -86,6 +86,7 @@ struct fw_card_driver {
32556
32557 int (*stop_iso)(struct fw_iso_context *ctx);
32558 };
32559 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32560
32561 void fw_card_initialize(struct fw_card *card,
32562 const struct fw_card_driver *driver, struct device *device);
32563 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32564 index 3a2ccb0..82fd7c4 100644
32565 --- a/drivers/firmware/dmi_scan.c
32566 +++ b/drivers/firmware/dmi_scan.c
32567 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32568 }
32569 }
32570 else {
32571 - /*
32572 - * no iounmap() for that ioremap(); it would be a no-op, but
32573 - * it's so early in setup that sucker gets confused into doing
32574 - * what it shouldn't if we actually call it.
32575 - */
32576 p = dmi_ioremap(0xF0000, 0x10000);
32577 if (p == NULL)
32578 goto error;
32579 @@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32580 if (buf == NULL)
32581 return -1;
32582
32583 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32584 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32585
32586 iounmap(buf);
32587 return 0;
32588 diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32589 index 9e4f59d..110e24e 100644
32590 --- a/drivers/firmware/edd.c
32591 +++ b/drivers/firmware/edd.c
32592 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32593 return ret;
32594 }
32595
32596 -static struct sysfs_ops edd_attr_ops = {
32597 +static const struct sysfs_ops edd_attr_ops = {
32598 .show = edd_attr_show,
32599 };
32600
32601 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32602 index f4f709d..082f06e 100644
32603 --- a/drivers/firmware/efivars.c
32604 +++ b/drivers/firmware/efivars.c
32605 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32606 return ret;
32607 }
32608
32609 -static struct sysfs_ops efivar_attr_ops = {
32610 +static const struct sysfs_ops efivar_attr_ops = {
32611 .show = efivar_attr_show,
32612 .store = efivar_attr_store,
32613 };
32614 diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32615 index 051d1eb..0a5d4e7 100644
32616 --- a/drivers/firmware/iscsi_ibft.c
32617 +++ b/drivers/firmware/iscsi_ibft.c
32618 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32619 return ret;
32620 }
32621
32622 -static struct sysfs_ops ibft_attr_ops = {
32623 +static const struct sysfs_ops ibft_attr_ops = {
32624 .show = ibft_show_attribute,
32625 };
32626
32627 diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32628 index 56f9234..8c58c7b 100644
32629 --- a/drivers/firmware/memmap.c
32630 +++ b/drivers/firmware/memmap.c
32631 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32632 NULL
32633 };
32634
32635 -static struct sysfs_ops memmap_attr_ops = {
32636 +static const struct sysfs_ops memmap_attr_ops = {
32637 .show = memmap_attr_show,
32638 };
32639
32640 diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32641 index b16c9a8..2af7d3f 100644
32642 --- a/drivers/gpio/vr41xx_giu.c
32643 +++ b/drivers/gpio/vr41xx_giu.c
32644 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32645 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32646 maskl, pendl, maskh, pendh);
32647
32648 - atomic_inc(&irq_err_count);
32649 + atomic_inc_unchecked(&irq_err_count);
32650
32651 return -EINVAL;
32652 }
32653 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32654 index bea6efc..3dc0f42 100644
32655 --- a/drivers/gpu/drm/drm_crtc.c
32656 +++ b/drivers/gpu/drm/drm_crtc.c
32657 @@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32658 */
32659 if ((out_resp->count_modes >= mode_count) && mode_count) {
32660 copied = 0;
32661 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32662 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32663 list_for_each_entry(mode, &connector->modes, head) {
32664 drm_crtc_convert_to_umode(&u_mode, mode);
32665 if (copy_to_user(mode_ptr + copied,
32666 @@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32667
32668 if ((out_resp->count_props >= props_count) && props_count) {
32669 copied = 0;
32670 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32671 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32672 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32673 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32674 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32675 if (connector->property_ids[i] != 0) {
32676 if (put_user(connector->property_ids[i],
32677 @@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32678
32679 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32680 copied = 0;
32681 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32682 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32683 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32684 if (connector->encoder_ids[i] != 0) {
32685 if (put_user(connector->encoder_ids[i],
32686 @@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32687 }
32688
32689 for (i = 0; i < crtc_req->count_connectors; i++) {
32690 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32691 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32692 if (get_user(out_id, &set_connectors_ptr[i])) {
32693 ret = -EFAULT;
32694 goto out;
32695 @@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32696 out_resp->flags = property->flags;
32697
32698 if ((out_resp->count_values >= value_count) && value_count) {
32699 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32700 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32701 for (i = 0; i < value_count; i++) {
32702 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32703 ret = -EFAULT;
32704 @@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32705 if (property->flags & DRM_MODE_PROP_ENUM) {
32706 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32707 copied = 0;
32708 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32709 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32710 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32711
32712 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32713 @@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32714 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32715 copied = 0;
32716 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32717 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32718 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32719
32720 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32721 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32722 @@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32723 blob = obj_to_blob(obj);
32724
32725 if (out_resp->length == blob->length) {
32726 - blob_ptr = (void *)(unsigned long)out_resp->data;
32727 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
32728 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32729 ret = -EFAULT;
32730 goto done;
32731 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32732 index 1b8745d..92fdbf6 100644
32733 --- a/drivers/gpu/drm/drm_crtc_helper.c
32734 +++ b/drivers/gpu/drm/drm_crtc_helper.c
32735 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32736 struct drm_crtc *tmp;
32737 int crtc_mask = 1;
32738
32739 - WARN(!crtc, "checking null crtc?");
32740 + BUG_ON(!crtc);
32741
32742 dev = crtc->dev;
32743
32744 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32745
32746 adjusted_mode = drm_mode_duplicate(dev, mode);
32747
32748 + pax_track_stack();
32749 +
32750 crtc->enabled = drm_helper_crtc_in_use(crtc);
32751
32752 if (!crtc->enabled)
32753 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32754 index 0e27d98..dec8768 100644
32755 --- a/drivers/gpu/drm/drm_drv.c
32756 +++ b/drivers/gpu/drm/drm_drv.c
32757 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32758 char *kdata = NULL;
32759
32760 atomic_inc(&dev->ioctl_count);
32761 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32762 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32763 ++file_priv->ioctl_count;
32764
32765 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32766 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32767 index ba14553..182d0bb 100644
32768 --- a/drivers/gpu/drm/drm_fops.c
32769 +++ b/drivers/gpu/drm/drm_fops.c
32770 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32771 }
32772
32773 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32774 - atomic_set(&dev->counts[i], 0);
32775 + atomic_set_unchecked(&dev->counts[i], 0);
32776
32777 dev->sigdata.lock = NULL;
32778
32779 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32780
32781 retcode = drm_open_helper(inode, filp, dev);
32782 if (!retcode) {
32783 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32784 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32785 spin_lock(&dev->count_lock);
32786 - if (!dev->open_count++) {
32787 + if (local_inc_return(&dev->open_count) == 1) {
32788 spin_unlock(&dev->count_lock);
32789 retcode = drm_setup(dev);
32790 goto out;
32791 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32792
32793 lock_kernel();
32794
32795 - DRM_DEBUG("open_count = %d\n", dev->open_count);
32796 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32797
32798 if (dev->driver->preclose)
32799 dev->driver->preclose(dev, file_priv);
32800 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32801 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32802 task_pid_nr(current),
32803 (long)old_encode_dev(file_priv->minor->device),
32804 - dev->open_count);
32805 + local_read(&dev->open_count));
32806
32807 /* if the master has gone away we can't do anything with the lock */
32808 if (file_priv->minor->master)
32809 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, struct file *filp)
32810 * End inline drm_release
32811 */
32812
32813 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32814 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32815 spin_lock(&dev->count_lock);
32816 - if (!--dev->open_count) {
32817 + if (local_dec_and_test(&dev->open_count)) {
32818 if (atomic_read(&dev->ioctl_count)) {
32819 DRM_ERROR("Device busy: %d\n",
32820 atomic_read(&dev->ioctl_count));
32821 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32822 index 8bf3770..79422805 100644
32823 --- a/drivers/gpu/drm/drm_gem.c
32824 +++ b/drivers/gpu/drm/drm_gem.c
32825 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32826 spin_lock_init(&dev->object_name_lock);
32827 idr_init(&dev->object_name_idr);
32828 atomic_set(&dev->object_count, 0);
32829 - atomic_set(&dev->object_memory, 0);
32830 + atomic_set_unchecked(&dev->object_memory, 0);
32831 atomic_set(&dev->pin_count, 0);
32832 - atomic_set(&dev->pin_memory, 0);
32833 + atomic_set_unchecked(&dev->pin_memory, 0);
32834 atomic_set(&dev->gtt_count, 0);
32835 - atomic_set(&dev->gtt_memory, 0);
32836 + atomic_set_unchecked(&dev->gtt_memory, 0);
32837
32838 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32839 if (!mm) {
32840 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32841 goto fput;
32842 }
32843 atomic_inc(&dev->object_count);
32844 - atomic_add(obj->size, &dev->object_memory);
32845 + atomic_add_unchecked(obj->size, &dev->object_memory);
32846 return obj;
32847 fput:
32848 fput(obj->filp);
32849 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32850
32851 fput(obj->filp);
32852 atomic_dec(&dev->object_count);
32853 - atomic_sub(obj->size, &dev->object_memory);
32854 + atomic_sub_unchecked(obj->size, &dev->object_memory);
32855 kfree(obj);
32856 }
32857 EXPORT_SYMBOL(drm_gem_object_free);
32858 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32859 index f0f6c6b..34af322 100644
32860 --- a/drivers/gpu/drm/drm_info.c
32861 +++ b/drivers/gpu/drm/drm_info.c
32862 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32863 struct drm_local_map *map;
32864 struct drm_map_list *r_list;
32865
32866 - /* Hardcoded from _DRM_FRAME_BUFFER,
32867 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32868 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32869 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32870 + static const char * const types[] = {
32871 + [_DRM_FRAME_BUFFER] = "FB",
32872 + [_DRM_REGISTERS] = "REG",
32873 + [_DRM_SHM] = "SHM",
32874 + [_DRM_AGP] = "AGP",
32875 + [_DRM_SCATTER_GATHER] = "SG",
32876 + [_DRM_CONSISTENT] = "PCI",
32877 + [_DRM_GEM] = "GEM" };
32878 const char *type;
32879 int i;
32880
32881 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32882 map = r_list->map;
32883 if (!map)
32884 continue;
32885 - if (map->type < 0 || map->type > 5)
32886 + if (map->type >= ARRAY_SIZE(types))
32887 type = "??";
32888 else
32889 type = types[map->type];
32890 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
32891 struct drm_device *dev = node->minor->dev;
32892
32893 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
32894 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
32895 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
32896 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
32897 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
32898 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
32899 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
32900 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
32901 seq_printf(m, "%d gtt total\n", dev->gtt_total);
32902 return 0;
32903 }
32904 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32905 mutex_lock(&dev->struct_mutex);
32906 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
32907 atomic_read(&dev->vma_count),
32908 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32909 + NULL, 0);
32910 +#else
32911 high_memory, (u64)virt_to_phys(high_memory));
32912 +#endif
32913
32914 list_for_each_entry(pt, &dev->vmalist, head) {
32915 vma = pt->vma;
32916 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
32917 continue;
32918 seq_printf(m,
32919 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
32920 - pt->pid, vma->vm_start, vma->vm_end,
32921 + pt->pid,
32922 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32923 + 0, 0,
32924 +#else
32925 + vma->vm_start, vma->vm_end,
32926 +#endif
32927 vma->vm_flags & VM_READ ? 'r' : '-',
32928 vma->vm_flags & VM_WRITE ? 'w' : '-',
32929 vma->vm_flags & VM_EXEC ? 'x' : '-',
32930 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32931 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32932 vma->vm_flags & VM_IO ? 'i' : '-',
32933 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32934 + 0);
32935 +#else
32936 vma->vm_pgoff);
32937 +#endif
32938
32939 #if defined(__i386__)
32940 pgprot = pgprot_val(vma->vm_page_prot);
32941 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32942 index 282d9fd..71e5f11 100644
32943 --- a/drivers/gpu/drm/drm_ioc32.c
32944 +++ b/drivers/gpu/drm/drm_ioc32.c
32945 @@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32946 request = compat_alloc_user_space(nbytes);
32947 if (!access_ok(VERIFY_WRITE, request, nbytes))
32948 return -EFAULT;
32949 - list = (struct drm_buf_desc *) (request + 1);
32950 + list = (struct drm_buf_desc __user *) (request + 1);
32951
32952 if (__put_user(count, &request->count)
32953 || __put_user(list, &request->list))
32954 @@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32955 request = compat_alloc_user_space(nbytes);
32956 if (!access_ok(VERIFY_WRITE, request, nbytes))
32957 return -EFAULT;
32958 - list = (struct drm_buf_pub *) (request + 1);
32959 + list = (struct drm_buf_pub __user *) (request + 1);
32960
32961 if (__put_user(count, &request->count)
32962 || __put_user(list, &request->list))
32963 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32964 index 9b9ff46..4ea724c 100644
32965 --- a/drivers/gpu/drm/drm_ioctl.c
32966 +++ b/drivers/gpu/drm/drm_ioctl.c
32967 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32968 stats->data[i].value =
32969 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32970 else
32971 - stats->data[i].value = atomic_read(&dev->counts[i]);
32972 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32973 stats->data[i].type = dev->types[i];
32974 }
32975
32976 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32977 index e2f70a5..c703e86 100644
32978 --- a/drivers/gpu/drm/drm_lock.c
32979 +++ b/drivers/gpu/drm/drm_lock.c
32980 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32981 if (drm_lock_take(&master->lock, lock->context)) {
32982 master->lock.file_priv = file_priv;
32983 master->lock.lock_time = jiffies;
32984 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32985 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32986 break; /* Got lock */
32987 }
32988
32989 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32990 return -EINVAL;
32991 }
32992
32993 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32994 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32995
32996 /* kernel_context_switch isn't used by any of the x86 drm
32997 * modules but is required by the Sparc driver.
32998 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32999 index 7d1d88c..b9131b2 100644
33000 --- a/drivers/gpu/drm/i810/i810_dma.c
33001 +++ b/drivers/gpu/drm/i810/i810_dma.c
33002 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
33003 dma->buflist[vertex->idx],
33004 vertex->discard, vertex->used);
33005
33006 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33007 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33008 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33009 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33010 sarea_priv->last_enqueue = dev_priv->counter - 1;
33011 sarea_priv->last_dispatch = (int)hw_status[5];
33012
33013 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
33014 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
33015 mc->last_render);
33016
33017 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33018 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33019 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33020 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33021 sarea_priv->last_enqueue = dev_priv->counter - 1;
33022 sarea_priv->last_dispatch = (int)hw_status[5];
33023
33024 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
33025 index 21e2691..7321edd 100644
33026 --- a/drivers/gpu/drm/i810/i810_drv.h
33027 +++ b/drivers/gpu/drm/i810/i810_drv.h
33028 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
33029 int page_flipping;
33030
33031 wait_queue_head_t irq_queue;
33032 - atomic_t irq_received;
33033 - atomic_t irq_emitted;
33034 + atomic_unchecked_t irq_received;
33035 + atomic_unchecked_t irq_emitted;
33036
33037 int front_offset;
33038 } drm_i810_private_t;
33039 diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
33040 index da82afe..48a45de 100644
33041 --- a/drivers/gpu/drm/i830/i830_drv.h
33042 +++ b/drivers/gpu/drm/i830/i830_drv.h
33043 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
33044 int page_flipping;
33045
33046 wait_queue_head_t irq_queue;
33047 - atomic_t irq_received;
33048 - atomic_t irq_emitted;
33049 + atomic_unchecked_t irq_received;
33050 + atomic_unchecked_t irq_emitted;
33051
33052 int use_mi_batchbuffer_start;
33053
33054 diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
33055 index 91ec2bb..6f21fab 100644
33056 --- a/drivers/gpu/drm/i830/i830_irq.c
33057 +++ b/drivers/gpu/drm/i830/i830_irq.c
33058 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
33059
33060 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
33061
33062 - atomic_inc(&dev_priv->irq_received);
33063 + atomic_inc_unchecked(&dev_priv->irq_received);
33064 wake_up_interruptible(&dev_priv->irq_queue);
33065
33066 return IRQ_HANDLED;
33067 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
33068
33069 DRM_DEBUG("%s\n", __func__);
33070
33071 - atomic_inc(&dev_priv->irq_emitted);
33072 + atomic_inc_unchecked(&dev_priv->irq_emitted);
33073
33074 BEGIN_LP_RING(2);
33075 OUT_RING(0);
33076 OUT_RING(GFX_OP_USER_INTERRUPT);
33077 ADVANCE_LP_RING();
33078
33079 - return atomic_read(&dev_priv->irq_emitted);
33080 + return atomic_read_unchecked(&dev_priv->irq_emitted);
33081 }
33082
33083 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33084 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33085
33086 DRM_DEBUG("%s\n", __func__);
33087
33088 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33089 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33090 return 0;
33091
33092 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
33093 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33094
33095 for (;;) {
33096 __set_current_state(TASK_INTERRUPTIBLE);
33097 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33098 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33099 break;
33100 if ((signed)(end - jiffies) <= 0) {
33101 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
33102 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33103 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33104 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33105 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33106 - atomic_set(&dev_priv->irq_received, 0);
33107 - atomic_set(&dev_priv->irq_emitted, 0);
33108 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33109 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33110 init_waitqueue_head(&dev_priv->irq_queue);
33111 }
33112
33113 diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33114 index 288fc50..c6092055 100644
33115 --- a/drivers/gpu/drm/i915/dvo.h
33116 +++ b/drivers/gpu/drm/i915/dvo.h
33117 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33118 *
33119 * \return singly-linked list of modes or NULL if no modes found.
33120 */
33121 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33122 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33123
33124 /**
33125 * Clean up driver-specific bits of the output
33126 */
33127 - void (*destroy) (struct intel_dvo_device *dvo);
33128 + void (* const destroy) (struct intel_dvo_device *dvo);
33129
33130 /**
33131 * Debugging hook to dump device registers to log file
33132 */
33133 - void (*dump_regs)(struct intel_dvo_device *dvo);
33134 + void (* const dump_regs)(struct intel_dvo_device *dvo);
33135 };
33136
33137 -extern struct intel_dvo_dev_ops sil164_ops;
33138 -extern struct intel_dvo_dev_ops ch7xxx_ops;
33139 -extern struct intel_dvo_dev_ops ivch_ops;
33140 -extern struct intel_dvo_dev_ops tfp410_ops;
33141 -extern struct intel_dvo_dev_ops ch7017_ops;
33142 +extern const struct intel_dvo_dev_ops sil164_ops;
33143 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
33144 +extern const struct intel_dvo_dev_ops ivch_ops;
33145 +extern const struct intel_dvo_dev_ops tfp410_ops;
33146 +extern const struct intel_dvo_dev_ops ch7017_ops;
33147
33148 #endif /* _INTEL_DVO_H */
33149 diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33150 index 621815b..499d82e 100644
33151 --- a/drivers/gpu/drm/i915/dvo_ch7017.c
33152 +++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33153 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33154 }
33155 }
33156
33157 -struct intel_dvo_dev_ops ch7017_ops = {
33158 +const struct intel_dvo_dev_ops ch7017_ops = {
33159 .init = ch7017_init,
33160 .detect = ch7017_detect,
33161 .mode_valid = ch7017_mode_valid,
33162 diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33163 index a9b8962..ac769ba 100644
33164 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33165 +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33166 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33167 }
33168 }
33169
33170 -struct intel_dvo_dev_ops ch7xxx_ops = {
33171 +const struct intel_dvo_dev_ops ch7xxx_ops = {
33172 .init = ch7xxx_init,
33173 .detect = ch7xxx_detect,
33174 .mode_valid = ch7xxx_mode_valid,
33175 diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33176 index aa176f9..ed2930c 100644
33177 --- a/drivers/gpu/drm/i915/dvo_ivch.c
33178 +++ b/drivers/gpu/drm/i915/dvo_ivch.c
33179 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33180 }
33181 }
33182
33183 -struct intel_dvo_dev_ops ivch_ops= {
33184 +const struct intel_dvo_dev_ops ivch_ops= {
33185 .init = ivch_init,
33186 .dpms = ivch_dpms,
33187 .save = ivch_save,
33188 diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33189 index e1c1f73..7dbebcf 100644
33190 --- a/drivers/gpu/drm/i915/dvo_sil164.c
33191 +++ b/drivers/gpu/drm/i915/dvo_sil164.c
33192 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33193 }
33194 }
33195
33196 -struct intel_dvo_dev_ops sil164_ops = {
33197 +const struct intel_dvo_dev_ops sil164_ops = {
33198 .init = sil164_init,
33199 .detect = sil164_detect,
33200 .mode_valid = sil164_mode_valid,
33201 diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33202 index 16dce84..7e1b6f8 100644
33203 --- a/drivers/gpu/drm/i915/dvo_tfp410.c
33204 +++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33205 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33206 }
33207 }
33208
33209 -struct intel_dvo_dev_ops tfp410_ops = {
33210 +const struct intel_dvo_dev_ops tfp410_ops = {
33211 .init = tfp410_init,
33212 .detect = tfp410_detect,
33213 .mode_valid = tfp410_mode_valid,
33214 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33215 index 7e859d6..7d1cf2b 100644
33216 --- a/drivers/gpu/drm/i915/i915_debugfs.c
33217 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
33218 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33219 I915_READ(GTIMR));
33220 }
33221 seq_printf(m, "Interrupts received: %d\n",
33222 - atomic_read(&dev_priv->irq_received));
33223 + atomic_read_unchecked(&dev_priv->irq_received));
33224 if (dev_priv->hw_status_page != NULL) {
33225 seq_printf(m, "Current sequence: %d\n",
33226 i915_get_gem_seqno(dev));
33227 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33228 index 5449239..7e4f68d 100644
33229 --- a/drivers/gpu/drm/i915/i915_drv.c
33230 +++ b/drivers/gpu/drm/i915/i915_drv.c
33231 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33232 return i915_resume(dev);
33233 }
33234
33235 -static struct vm_operations_struct i915_gem_vm_ops = {
33236 +static const struct vm_operations_struct i915_gem_vm_ops = {
33237 .fault = i915_gem_fault,
33238 .open = drm_gem_vm_open,
33239 .close = drm_gem_vm_close,
33240 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33241 index 97163f7..c24c7c7 100644
33242 --- a/drivers/gpu/drm/i915/i915_drv.h
33243 +++ b/drivers/gpu/drm/i915/i915_drv.h
33244 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33245 /* display clock increase/decrease */
33246 /* pll clock increase/decrease */
33247 /* clock gating init */
33248 -};
33249 +} __no_const;
33250
33251 typedef struct drm_i915_private {
33252 struct drm_device *dev;
33253 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33254 int page_flipping;
33255
33256 wait_queue_head_t irq_queue;
33257 - atomic_t irq_received;
33258 + atomic_unchecked_t irq_received;
33259 /** Protects user_irq_refcount and irq_mask_reg */
33260 spinlock_t user_irq_lock;
33261 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33262 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33263 index 27a3074..eb3f959 100644
33264 --- a/drivers/gpu/drm/i915/i915_gem.c
33265 +++ b/drivers/gpu/drm/i915/i915_gem.c
33266 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33267
33268 args->aper_size = dev->gtt_total;
33269 args->aper_available_size = (args->aper_size -
33270 - atomic_read(&dev->pin_memory));
33271 + atomic_read_unchecked(&dev->pin_memory));
33272
33273 return 0;
33274 }
33275 @@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33276
33277 if (obj_priv->gtt_space) {
33278 atomic_dec(&dev->gtt_count);
33279 - atomic_sub(obj->size, &dev->gtt_memory);
33280 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33281
33282 drm_mm_put_block(obj_priv->gtt_space);
33283 obj_priv->gtt_space = NULL;
33284 @@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33285 goto search_free;
33286 }
33287 atomic_inc(&dev->gtt_count);
33288 - atomic_add(obj->size, &dev->gtt_memory);
33289 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
33290
33291 /* Assert that the object is not currently in any GPU domain. As it
33292 * wasn't in the GTT, there shouldn't be any way it could have been in
33293 @@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33294 "%d/%d gtt bytes\n",
33295 atomic_read(&dev->object_count),
33296 atomic_read(&dev->pin_count),
33297 - atomic_read(&dev->object_memory),
33298 - atomic_read(&dev->pin_memory),
33299 - atomic_read(&dev->gtt_memory),
33300 + atomic_read_unchecked(&dev->object_memory),
33301 + atomic_read_unchecked(&dev->pin_memory),
33302 + atomic_read_unchecked(&dev->gtt_memory),
33303 dev->gtt_total);
33304 }
33305 goto err;
33306 @@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33307 */
33308 if (obj_priv->pin_count == 1) {
33309 atomic_inc(&dev->pin_count);
33310 - atomic_add(obj->size, &dev->pin_memory);
33311 + atomic_add_unchecked(obj->size, &dev->pin_memory);
33312 if (!obj_priv->active &&
33313 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33314 !list_empty(&obj_priv->list))
33315 @@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33316 list_move_tail(&obj_priv->list,
33317 &dev_priv->mm.inactive_list);
33318 atomic_dec(&dev->pin_count);
33319 - atomic_sub(obj->size, &dev->pin_memory);
33320 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
33321 }
33322 i915_verify_inactive(dev, __FILE__, __LINE__);
33323 }
33324 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33325 index 63f28ad..f5469da 100644
33326 --- a/drivers/gpu/drm/i915/i915_irq.c
33327 +++ b/drivers/gpu/drm/i915/i915_irq.c
33328 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33329 int irq_received;
33330 int ret = IRQ_NONE;
33331
33332 - atomic_inc(&dev_priv->irq_received);
33333 + atomic_inc_unchecked(&dev_priv->irq_received);
33334
33335 if (IS_IGDNG(dev))
33336 return igdng_irq_handler(dev);
33337 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33338 {
33339 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33340
33341 - atomic_set(&dev_priv->irq_received, 0);
33342 + atomic_set_unchecked(&dev_priv->irq_received, 0);
33343
33344 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33345 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33346 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33347 index 5d9c6a7..d1b0e29 100644
33348 --- a/drivers/gpu/drm/i915/intel_sdvo.c
33349 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
33350 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33351 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33352
33353 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33354 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33355 + pax_open_kernel();
33356 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33357 + pax_close_kernel();
33358
33359 /* Read the regs to test if we can talk to the device */
33360 for (i = 0; i < 0x40; i++) {
33361 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33362 index be6c6b9..8615d9c 100644
33363 --- a/drivers/gpu/drm/mga/mga_drv.h
33364 +++ b/drivers/gpu/drm/mga/mga_drv.h
33365 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33366 u32 clear_cmd;
33367 u32 maccess;
33368
33369 - atomic_t vbl_received; /**< Number of vblanks received. */
33370 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33371 wait_queue_head_t fence_queue;
33372 - atomic_t last_fence_retired;
33373 + atomic_unchecked_t last_fence_retired;
33374 u32 next_fence_to_post;
33375
33376 unsigned int fb_cpp;
33377 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33378 index daa6041..a28a5da 100644
33379 --- a/drivers/gpu/drm/mga/mga_irq.c
33380 +++ b/drivers/gpu/drm/mga/mga_irq.c
33381 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33382 if (crtc != 0)
33383 return 0;
33384
33385 - return atomic_read(&dev_priv->vbl_received);
33386 + return atomic_read_unchecked(&dev_priv->vbl_received);
33387 }
33388
33389
33390 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33391 /* VBLANK interrupt */
33392 if (status & MGA_VLINEPEN) {
33393 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33394 - atomic_inc(&dev_priv->vbl_received);
33395 + atomic_inc_unchecked(&dev_priv->vbl_received);
33396 drm_handle_vblank(dev, 0);
33397 handled = 1;
33398 }
33399 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33400 MGA_WRITE(MGA_PRIMEND, prim_end);
33401 }
33402
33403 - atomic_inc(&dev_priv->last_fence_retired);
33404 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
33405 DRM_WAKEUP(&dev_priv->fence_queue);
33406 handled = 1;
33407 }
33408 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33409 * using fences.
33410 */
33411 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33412 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33413 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33414 - *sequence) <= (1 << 23)));
33415
33416 *sequence = cur_fence;
33417 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33418 index 4c39a40..b22a9ea 100644
33419 --- a/drivers/gpu/drm/r128/r128_cce.c
33420 +++ b/drivers/gpu/drm/r128/r128_cce.c
33421 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33422
33423 /* GH: Simple idle check.
33424 */
33425 - atomic_set(&dev_priv->idle_count, 0);
33426 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33427
33428 /* We don't support anything other than bus-mastering ring mode,
33429 * but the ring can be in either AGP or PCI space for the ring
33430 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33431 index 3c60829..4faf484 100644
33432 --- a/drivers/gpu/drm/r128/r128_drv.h
33433 +++ b/drivers/gpu/drm/r128/r128_drv.h
33434 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33435 int is_pci;
33436 unsigned long cce_buffers_offset;
33437
33438 - atomic_t idle_count;
33439 + atomic_unchecked_t idle_count;
33440
33441 int page_flipping;
33442 int current_page;
33443 u32 crtc_offset;
33444 u32 crtc_offset_cntl;
33445
33446 - atomic_t vbl_received;
33447 + atomic_unchecked_t vbl_received;
33448
33449 u32 color_fmt;
33450 unsigned int front_offset;
33451 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33452 index 69810fb..97bf17a 100644
33453 --- a/drivers/gpu/drm/r128/r128_irq.c
33454 +++ b/drivers/gpu/drm/r128/r128_irq.c
33455 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33456 if (crtc != 0)
33457 return 0;
33458
33459 - return atomic_read(&dev_priv->vbl_received);
33460 + return atomic_read_unchecked(&dev_priv->vbl_received);
33461 }
33462
33463 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33464 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33465 /* VBLANK interrupt */
33466 if (status & R128_CRTC_VBLANK_INT) {
33467 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33468 - atomic_inc(&dev_priv->vbl_received);
33469 + atomic_inc_unchecked(&dev_priv->vbl_received);
33470 drm_handle_vblank(dev, 0);
33471 return IRQ_HANDLED;
33472 }
33473 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33474 index af2665c..51922d2 100644
33475 --- a/drivers/gpu/drm/r128/r128_state.c
33476 +++ b/drivers/gpu/drm/r128/r128_state.c
33477 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33478
33479 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33480 {
33481 - if (atomic_read(&dev_priv->idle_count) == 0) {
33482 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33483 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33484 } else {
33485 - atomic_set(&dev_priv->idle_count, 0);
33486 + atomic_set_unchecked(&dev_priv->idle_count, 0);
33487 }
33488 }
33489
33490 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33491 index dd72b91..8644b3c 100644
33492 --- a/drivers/gpu/drm/radeon/atom.c
33493 +++ b/drivers/gpu/drm/radeon/atom.c
33494 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33495 char name[512];
33496 int i;
33497
33498 + pax_track_stack();
33499 +
33500 ctx->card = card;
33501 ctx->bios = bios;
33502
33503 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33504 index 0d79577..efaa7a5 100644
33505 --- a/drivers/gpu/drm/radeon/mkregtable.c
33506 +++ b/drivers/gpu/drm/radeon/mkregtable.c
33507 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33508 regex_t mask_rex;
33509 regmatch_t match[4];
33510 char buf[1024];
33511 - size_t end;
33512 + long end;
33513 int len;
33514 int done = 0;
33515 int r;
33516 unsigned o;
33517 struct offset *offset;
33518 char last_reg_s[10];
33519 - int last_reg;
33520 + unsigned long last_reg;
33521
33522 if (regcomp
33523 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33524 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33525 index 6735213..38c2c67 100644
33526 --- a/drivers/gpu/drm/radeon/radeon.h
33527 +++ b/drivers/gpu/drm/radeon/radeon.h
33528 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33529 */
33530 struct radeon_fence_driver {
33531 uint32_t scratch_reg;
33532 - atomic_t seq;
33533 + atomic_unchecked_t seq;
33534 uint32_t last_seq;
33535 unsigned long count_timeout;
33536 wait_queue_head_t queue;
33537 @@ -640,7 +640,7 @@ struct radeon_asic {
33538 uint32_t offset, uint32_t obj_size);
33539 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33540 void (*bandwidth_update)(struct radeon_device *rdev);
33541 -};
33542 +} __no_const;
33543
33544 /*
33545 * Asic structures
33546 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33547 index 4e928b9..d8b6008 100644
33548 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
33549 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33550 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33551 bool linkb;
33552 struct radeon_i2c_bus_rec ddc_bus;
33553
33554 + pax_track_stack();
33555 +
33556 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33557
33558 if (data_offset == 0)
33559 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33560 }
33561 }
33562
33563 -struct bios_connector {
33564 +static struct bios_connector {
33565 bool valid;
33566 uint16_t line_mux;
33567 uint16_t devices;
33568 int connector_type;
33569 struct radeon_i2c_bus_rec ddc_bus;
33570 -};
33571 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33572
33573 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33574 drm_device
33575 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33576 uint8_t dac;
33577 union atom_supported_devices *supported_devices;
33578 int i, j;
33579 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33580
33581 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33582
33583 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33584 index 083a181..ccccae0 100644
33585 --- a/drivers/gpu/drm/radeon/radeon_display.c
33586 +++ b/drivers/gpu/drm/radeon/radeon_display.c
33587 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33588
33589 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33590 error = freq - current_freq;
33591 - error = error < 0 ? 0xffffffff : error;
33592 + error = (int32_t)error < 0 ? 0xffffffff : error;
33593 } else
33594 error = abs(current_freq - freq);
33595 vco_diff = abs(vco - best_vco);
33596 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33597 index 76e4070..193fa7f 100644
33598 --- a/drivers/gpu/drm/radeon/radeon_drv.h
33599 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
33600 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33601
33602 /* SW interrupt */
33603 wait_queue_head_t swi_queue;
33604 - atomic_t swi_emitted;
33605 + atomic_unchecked_t swi_emitted;
33606 int vblank_crtc;
33607 uint32_t irq_enable_reg;
33608 uint32_t r500_disp_irq_reg;
33609 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33610 index 3beb26d..6ce9c4a 100644
33611 --- a/drivers/gpu/drm/radeon/radeon_fence.c
33612 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
33613 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33614 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33615 return 0;
33616 }
33617 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33618 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33619 if (!rdev->cp.ready) {
33620 /* FIXME: cp is not running assume everythings is done right
33621 * away
33622 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33623 return r;
33624 }
33625 WREG32(rdev->fence_drv.scratch_reg, 0);
33626 - atomic_set(&rdev->fence_drv.seq, 0);
33627 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33628 INIT_LIST_HEAD(&rdev->fence_drv.created);
33629 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33630 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33631 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33632 index a1bf11d..4a123c0 100644
33633 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33634 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33635 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33636 request = compat_alloc_user_space(sizeof(*request));
33637 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33638 || __put_user(req32.param, &request->param)
33639 - || __put_user((void __user *)(unsigned long)req32.value,
33640 + || __put_user((unsigned long)req32.value,
33641 &request->value))
33642 return -EFAULT;
33643
33644 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33645 index b79ecc4..8dab92d 100644
33646 --- a/drivers/gpu/drm/radeon/radeon_irq.c
33647 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
33648 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33649 unsigned int ret;
33650 RING_LOCALS;
33651
33652 - atomic_inc(&dev_priv->swi_emitted);
33653 - ret = atomic_read(&dev_priv->swi_emitted);
33654 + atomic_inc_unchecked(&dev_priv->swi_emitted);
33655 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33656
33657 BEGIN_RING(4);
33658 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33659 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33660 drm_radeon_private_t *dev_priv =
33661 (drm_radeon_private_t *) dev->dev_private;
33662
33663 - atomic_set(&dev_priv->swi_emitted, 0);
33664 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33665 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33666
33667 dev->max_vblank_count = 0x001fffff;
33668 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33669 index 4747910..48ca4b3 100644
33670 --- a/drivers/gpu/drm/radeon/radeon_state.c
33671 +++ b/drivers/gpu/drm/radeon/radeon_state.c
33672 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33673 {
33674 drm_radeon_private_t *dev_priv = dev->dev_private;
33675 drm_radeon_getparam_t *param = data;
33676 - int value;
33677 + int value = 0;
33678
33679 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33680
33681 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33682 index 1381e06..0e53b17 100644
33683 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
33684 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33685 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33686 DRM_INFO("radeon: ttm finalized\n");
33687 }
33688
33689 -static struct vm_operations_struct radeon_ttm_vm_ops;
33690 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
33691 -
33692 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33693 -{
33694 - struct ttm_buffer_object *bo;
33695 - int r;
33696 -
33697 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
33698 - if (bo == NULL) {
33699 - return VM_FAULT_NOPAGE;
33700 - }
33701 - r = ttm_vm_ops->fault(vma, vmf);
33702 - return r;
33703 -}
33704 -
33705 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33706 {
33707 struct drm_file *file_priv;
33708 struct radeon_device *rdev;
33709 - int r;
33710
33711 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33712 return drm_mmap(filp, vma);
33713 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33714
33715 file_priv = (struct drm_file *)filp->private_data;
33716 rdev = file_priv->minor->dev->dev_private;
33717 - if (rdev == NULL) {
33718 + if (!rdev)
33719 return -EINVAL;
33720 - }
33721 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33722 - if (unlikely(r != 0)) {
33723 - return r;
33724 - }
33725 - if (unlikely(ttm_vm_ops == NULL)) {
33726 - ttm_vm_ops = vma->vm_ops;
33727 - radeon_ttm_vm_ops = *ttm_vm_ops;
33728 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33729 - }
33730 - vma->vm_ops = &radeon_ttm_vm_ops;
33731 - return 0;
33732 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33733 }
33734
33735
33736 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33737 index b12ff76..0bd0c6e 100644
33738 --- a/drivers/gpu/drm/radeon/rs690.c
33739 +++ b/drivers/gpu/drm/radeon/rs690.c
33740 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33741 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33742 rdev->pm.sideport_bandwidth.full)
33743 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33744 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33745 + read_delay_latency.full = rfixed_const(800 * 1000);
33746 read_delay_latency.full = rfixed_div(read_delay_latency,
33747 rdev->pm.igp_sideport_mclk);
33748 + a.full = rfixed_const(370);
33749 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33750 } else {
33751 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33752 rdev->pm.k8_bandwidth.full)
33753 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33754 index 0ed436e..e6e7ce3 100644
33755 --- a/drivers/gpu/drm/ttm/ttm_bo.c
33756 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
33757 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33758 NULL
33759 };
33760
33761 -static struct sysfs_ops ttm_bo_global_ops = {
33762 +static const struct sysfs_ops ttm_bo_global_ops = {
33763 .show = &ttm_bo_global_show
33764 };
33765
33766 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33767 index 1c040d0..f9e4af8 100644
33768 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33769 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33770 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33771 {
33772 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33773 vma->vm_private_data;
33774 - struct ttm_bo_device *bdev = bo->bdev;
33775 + struct ttm_bo_device *bdev;
33776 unsigned long bus_base;
33777 unsigned long bus_offset;
33778 unsigned long bus_size;
33779 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33780 unsigned long address = (unsigned long)vmf->virtual_address;
33781 int retval = VM_FAULT_NOPAGE;
33782
33783 + if (!bo)
33784 + return VM_FAULT_NOPAGE;
33785 + bdev = bo->bdev;
33786 +
33787 /*
33788 * Work around locking order reversal in fault / nopfn
33789 * between mmap_sem and bo_reserve: Perform a trylock operation
33790 diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33791 index b170071..28ae90e 100644
33792 --- a/drivers/gpu/drm/ttm/ttm_global.c
33793 +++ b/drivers/gpu/drm/ttm/ttm_global.c
33794 @@ -36,7 +36,7 @@
33795 struct ttm_global_item {
33796 struct mutex mutex;
33797 void *object;
33798 - int refcount;
33799 + atomic_t refcount;
33800 };
33801
33802 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33803 @@ -49,7 +49,7 @@ void ttm_global_init(void)
33804 struct ttm_global_item *item = &glob[i];
33805 mutex_init(&item->mutex);
33806 item->object = NULL;
33807 - item->refcount = 0;
33808 + atomic_set(&item->refcount, 0);
33809 }
33810 }
33811
33812 @@ -59,7 +59,7 @@ void ttm_global_release(void)
33813 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33814 struct ttm_global_item *item = &glob[i];
33815 BUG_ON(item->object != NULL);
33816 - BUG_ON(item->refcount != 0);
33817 + BUG_ON(atomic_read(&item->refcount) != 0);
33818 }
33819 }
33820
33821 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33822 void *object;
33823
33824 mutex_lock(&item->mutex);
33825 - if (item->refcount == 0) {
33826 + if (atomic_read(&item->refcount) == 0) {
33827 item->object = kzalloc(ref->size, GFP_KERNEL);
33828 if (unlikely(item->object == NULL)) {
33829 ret = -ENOMEM;
33830 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33831 goto out_err;
33832
33833 }
33834 - ++item->refcount;
33835 + atomic_inc(&item->refcount);
33836 ref->object = item->object;
33837 object = item->object;
33838 mutex_unlock(&item->mutex);
33839 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33840 struct ttm_global_item *item = &glob[ref->global_type];
33841
33842 mutex_lock(&item->mutex);
33843 - BUG_ON(item->refcount == 0);
33844 + BUG_ON(atomic_read(&item->refcount) == 0);
33845 BUG_ON(ref->object != item->object);
33846 - if (--item->refcount == 0) {
33847 + if (atomic_dec_and_test(&item->refcount)) {
33848 ref->release(ref);
33849 item->object = NULL;
33850 }
33851 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33852 index 072c281..d8ef483 100644
33853 --- a/drivers/gpu/drm/ttm/ttm_memory.c
33854 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
33855 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33856 NULL
33857 };
33858
33859 -static struct sysfs_ops ttm_mem_zone_ops = {
33860 +static const struct sysfs_ops ttm_mem_zone_ops = {
33861 .show = &ttm_mem_zone_show,
33862 .store = &ttm_mem_zone_store
33863 };
33864 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33865 index cafcb84..b8e66cc 100644
33866 --- a/drivers/gpu/drm/via/via_drv.h
33867 +++ b/drivers/gpu/drm/via/via_drv.h
33868 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33869 typedef uint32_t maskarray_t[5];
33870
33871 typedef struct drm_via_irq {
33872 - atomic_t irq_received;
33873 + atomic_unchecked_t irq_received;
33874 uint32_t pending_mask;
33875 uint32_t enable_mask;
33876 wait_queue_head_t irq_queue;
33877 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
33878 struct timeval last_vblank;
33879 int last_vblank_valid;
33880 unsigned usec_per_vblank;
33881 - atomic_t vbl_received;
33882 + atomic_unchecked_t vbl_received;
33883 drm_via_state_t hc_state;
33884 char pci_buf[VIA_PCI_BUF_SIZE];
33885 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33886 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33887 index 5935b88..127a8a6 100644
33888 --- a/drivers/gpu/drm/via/via_irq.c
33889 +++ b/drivers/gpu/drm/via/via_irq.c
33890 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33891 if (crtc != 0)
33892 return 0;
33893
33894 - return atomic_read(&dev_priv->vbl_received);
33895 + return atomic_read_unchecked(&dev_priv->vbl_received);
33896 }
33897
33898 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33899 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33900
33901 status = VIA_READ(VIA_REG_INTERRUPT);
33902 if (status & VIA_IRQ_VBLANK_PENDING) {
33903 - atomic_inc(&dev_priv->vbl_received);
33904 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33905 + atomic_inc_unchecked(&dev_priv->vbl_received);
33906 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33907 do_gettimeofday(&cur_vblank);
33908 if (dev_priv->last_vblank_valid) {
33909 dev_priv->usec_per_vblank =
33910 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33911 dev_priv->last_vblank = cur_vblank;
33912 dev_priv->last_vblank_valid = 1;
33913 }
33914 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33915 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33916 DRM_DEBUG("US per vblank is: %u\n",
33917 dev_priv->usec_per_vblank);
33918 }
33919 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33920
33921 for (i = 0; i < dev_priv->num_irqs; ++i) {
33922 if (status & cur_irq->pending_mask) {
33923 - atomic_inc(&cur_irq->irq_received);
33924 + atomic_inc_unchecked(&cur_irq->irq_received);
33925 DRM_WAKEUP(&cur_irq->irq_queue);
33926 handled = 1;
33927 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
33928 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
33929 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33930 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33931 masks[irq][4]));
33932 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33933 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33934 } else {
33935 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33936 (((cur_irq_sequence =
33937 - atomic_read(&cur_irq->irq_received)) -
33938 + atomic_read_unchecked(&cur_irq->irq_received)) -
33939 *sequence) <= (1 << 23)));
33940 }
33941 *sequence = cur_irq_sequence;
33942 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
33943 }
33944
33945 for (i = 0; i < dev_priv->num_irqs; ++i) {
33946 - atomic_set(&cur_irq->irq_received, 0);
33947 + atomic_set_unchecked(&cur_irq->irq_received, 0);
33948 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33949 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33950 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33951 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33952 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33953 case VIA_IRQ_RELATIVE:
33954 irqwait->request.sequence +=
33955 - atomic_read(&cur_irq->irq_received);
33956 + atomic_read_unchecked(&cur_irq->irq_received);
33957 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33958 case VIA_IRQ_ABSOLUTE:
33959 break;
33960 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
33961 index aa8688d..6a0140c 100644
33962 --- a/drivers/gpu/vga/vgaarb.c
33963 +++ b/drivers/gpu/vga/vgaarb.c
33964 @@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
33965 uc = &priv->cards[i];
33966 }
33967
33968 - if (!uc)
33969 - return -EINVAL;
33970 + if (!uc) {
33971 + ret_val = -EINVAL;
33972 + goto done;
33973 + }
33974
33975 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
33976 - return -EINVAL;
33977 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
33978 + ret_val = -EINVAL;
33979 + goto done;
33980 + }
33981
33982 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
33983 - return -EINVAL;
33984 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
33985 + ret_val = -EINVAL;
33986 + goto done;
33987 + }
33988
33989 vga_put(pdev, io_state);
33990
33991 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33992 index f3f1415..e561d90 100644
33993 --- a/drivers/hid/hid-core.c
33994 +++ b/drivers/hid/hid-core.c
33995 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
33996
33997 int hid_add_device(struct hid_device *hdev)
33998 {
33999 - static atomic_t id = ATOMIC_INIT(0);
34000 + static atomic_unchecked_t id = ATOMIC_INIT(0);
34001 int ret;
34002
34003 if (WARN_ON(hdev->status & HID_STAT_ADDED))
34004 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
34005 /* XXX hack, any other cleaner solution after the driver core
34006 * is converted to allow more than 20 bytes as the device name? */
34007 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
34008 - hdev->vendor, hdev->product, atomic_inc_return(&id));
34009 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
34010
34011 ret = device_add(&hdev->dev);
34012 if (!ret)
34013 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
34014 index 8b6ee24..70f657d 100644
34015 --- a/drivers/hid/usbhid/hiddev.c
34016 +++ b/drivers/hid/usbhid/hiddev.c
34017 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
34018 return put_user(HID_VERSION, (int __user *)arg);
34019
34020 case HIDIOCAPPLICATION:
34021 - if (arg < 0 || arg >= hid->maxapplication)
34022 + if (arg >= hid->maxapplication)
34023 return -EINVAL;
34024
34025 for (i = 0; i < hid->maxcollection; i++)
34026 diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
34027 index 5d5ed69..f40533e 100644
34028 --- a/drivers/hwmon/lis3lv02d.c
34029 +++ b/drivers/hwmon/lis3lv02d.c
34030 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
34031 * the lid is closed. This leads to interrupts as soon as a little move
34032 * is done.
34033 */
34034 - atomic_inc(&lis3_dev.count);
34035 + atomic_inc_unchecked(&lis3_dev.count);
34036
34037 wake_up_interruptible(&lis3_dev.misc_wait);
34038 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
34039 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34040 if (test_and_set_bit(0, &lis3_dev.misc_opened))
34041 return -EBUSY; /* already open */
34042
34043 - atomic_set(&lis3_dev.count, 0);
34044 + atomic_set_unchecked(&lis3_dev.count, 0);
34045
34046 /*
34047 * The sensor can generate interrupts for free-fall and direction
34048 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34049 add_wait_queue(&lis3_dev.misc_wait, &wait);
34050 while (true) {
34051 set_current_state(TASK_INTERRUPTIBLE);
34052 - data = atomic_xchg(&lis3_dev.count, 0);
34053 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
34054 if (data)
34055 break;
34056
34057 @@ -244,7 +244,7 @@ out:
34058 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34059 {
34060 poll_wait(file, &lis3_dev.misc_wait, wait);
34061 - if (atomic_read(&lis3_dev.count))
34062 + if (atomic_read_unchecked(&lis3_dev.count))
34063 return POLLIN | POLLRDNORM;
34064 return 0;
34065 }
34066 diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
34067 index 7cdd76f..fe0efdf 100644
34068 --- a/drivers/hwmon/lis3lv02d.h
34069 +++ b/drivers/hwmon/lis3lv02d.h
34070 @@ -201,7 +201,7 @@ struct lis3lv02d {
34071
34072 struct input_polled_dev *idev; /* input device */
34073 struct platform_device *pdev; /* platform device */
34074 - atomic_t count; /* interrupt count after last read */
34075 + atomic_unchecked_t count; /* interrupt count after last read */
34076 int xcalib; /* calibrated null value for x */
34077 int ycalib; /* calibrated null value for y */
34078 int zcalib; /* calibrated null value for z */
34079 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
34080 index 2040507..706ec1e 100644
34081 --- a/drivers/hwmon/sht15.c
34082 +++ b/drivers/hwmon/sht15.c
34083 @@ -112,7 +112,7 @@ struct sht15_data {
34084 int supply_uV;
34085 int supply_uV_valid;
34086 struct work_struct update_supply_work;
34087 - atomic_t interrupt_handled;
34088 + atomic_unchecked_t interrupt_handled;
34089 };
34090
34091 /**
34092 @@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
34093 return ret;
34094
34095 gpio_direction_input(data->pdata->gpio_data);
34096 - atomic_set(&data->interrupt_handled, 0);
34097 + atomic_set_unchecked(&data->interrupt_handled, 0);
34098
34099 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34100 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34101 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34102 /* Only relevant if the interrupt hasn't occured. */
34103 - if (!atomic_read(&data->interrupt_handled))
34104 + if (!atomic_read_unchecked(&data->interrupt_handled))
34105 schedule_work(&data->read_work);
34106 }
34107 ret = wait_event_timeout(data->wait_queue,
34108 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34109 struct sht15_data *data = d;
34110 /* First disable the interrupt */
34111 disable_irq_nosync(irq);
34112 - atomic_inc(&data->interrupt_handled);
34113 + atomic_inc_unchecked(&data->interrupt_handled);
34114 /* Then schedule a reading work struct */
34115 if (data->flag != SHT15_READING_NOTHING)
34116 schedule_work(&data->read_work);
34117 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34118 here as could have gone low in meantime so verify
34119 it hasn't!
34120 */
34121 - atomic_set(&data->interrupt_handled, 0);
34122 + atomic_set_unchecked(&data->interrupt_handled, 0);
34123 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34124 /* If still not occured or another handler has been scheduled */
34125 if (gpio_get_value(data->pdata->gpio_data)
34126 - || atomic_read(&data->interrupt_handled))
34127 + || atomic_read_unchecked(&data->interrupt_handled))
34128 return;
34129 }
34130 /* Read the data back from the device */
34131 diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34132 index 97851c5..cb40626 100644
34133 --- a/drivers/hwmon/w83791d.c
34134 +++ b/drivers/hwmon/w83791d.c
34135 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34136 struct i2c_board_info *info);
34137 static int w83791d_remove(struct i2c_client *client);
34138
34139 -static int w83791d_read(struct i2c_client *client, u8 register);
34140 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34141 +static int w83791d_read(struct i2c_client *client, u8 reg);
34142 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34143 static struct w83791d_data *w83791d_update_device(struct device *dev);
34144
34145 #ifdef DEBUG
34146 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34147 index 378fcb5..5e91fa8 100644
34148 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
34149 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34150 @@ -43,7 +43,7 @@
34151 extern struct i2c_adapter amd756_smbus;
34152
34153 static struct i2c_adapter *s4882_adapter;
34154 -static struct i2c_algorithm *s4882_algo;
34155 +static i2c_algorithm_no_const *s4882_algo;
34156
34157 /* Wrapper access functions for multiplexed SMBus */
34158 static DEFINE_MUTEX(amd756_lock);
34159 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34160 index 29015eb..af2d8e9 100644
34161 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34162 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34163 @@ -41,7 +41,7 @@
34164 extern struct i2c_adapter *nforce2_smbus;
34165
34166 static struct i2c_adapter *s4985_adapter;
34167 -static struct i2c_algorithm *s4985_algo;
34168 +static i2c_algorithm_no_const *s4985_algo;
34169
34170 /* Wrapper access functions for multiplexed SMBus */
34171 static DEFINE_MUTEX(nforce2_lock);
34172 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34173 index 878f8ec..12376fc 100644
34174 --- a/drivers/ide/aec62xx.c
34175 +++ b/drivers/ide/aec62xx.c
34176 @@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34177 .cable_detect = atp86x_cable_detect,
34178 };
34179
34180 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34181 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34182 { /* 0: AEC6210 */
34183 .name = DRV_NAME,
34184 .init_chipset = init_chipset_aec62xx,
34185 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34186 index e59b6de..4b4fc65 100644
34187 --- a/drivers/ide/alim15x3.c
34188 +++ b/drivers/ide/alim15x3.c
34189 @@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34190 .dma_sff_read_status = ide_dma_sff_read_status,
34191 };
34192
34193 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
34194 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
34195 .name = DRV_NAME,
34196 .init_chipset = init_chipset_ali15x3,
34197 .init_hwif = init_hwif_ali15x3,
34198 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34199 index 628cd2e..087a414 100644
34200 --- a/drivers/ide/amd74xx.c
34201 +++ b/drivers/ide/amd74xx.c
34202 @@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34203 .udma_mask = udma, \
34204 }
34205
34206 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34207 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34208 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34209 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34210 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34211 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34212 index 837322b..837fd71 100644
34213 --- a/drivers/ide/atiixp.c
34214 +++ b/drivers/ide/atiixp.c
34215 @@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34216 .cable_detect = atiixp_cable_detect,
34217 };
34218
34219 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34220 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34221 { /* 0: IXP200/300/400/700 */
34222 .name = DRV_NAME,
34223 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34224 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34225 index ca0c46f..d55318a 100644
34226 --- a/drivers/ide/cmd64x.c
34227 +++ b/drivers/ide/cmd64x.c
34228 @@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34229 .dma_sff_read_status = ide_dma_sff_read_status,
34230 };
34231
34232 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34233 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34234 { /* 0: CMD643 */
34235 .name = DRV_NAME,
34236 .init_chipset = init_chipset_cmd64x,
34237 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34238 index 09f98ed..cebc5bc 100644
34239 --- a/drivers/ide/cs5520.c
34240 +++ b/drivers/ide/cs5520.c
34241 @@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34242 .set_dma_mode = cs5520_set_dma_mode,
34243 };
34244
34245 -static const struct ide_port_info cyrix_chipset __devinitdata = {
34246 +static const struct ide_port_info cyrix_chipset __devinitconst = {
34247 .name = DRV_NAME,
34248 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34249 .port_ops = &cs5520_port_ops,
34250 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34251 index 40bf05e..7d58ca0 100644
34252 --- a/drivers/ide/cs5530.c
34253 +++ b/drivers/ide/cs5530.c
34254 @@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34255 .udma_filter = cs5530_udma_filter,
34256 };
34257
34258 -static const struct ide_port_info cs5530_chipset __devinitdata = {
34259 +static const struct ide_port_info cs5530_chipset __devinitconst = {
34260 .name = DRV_NAME,
34261 .init_chipset = init_chipset_cs5530,
34262 .init_hwif = init_hwif_cs5530,
34263 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34264 index 983d957..53e6172 100644
34265 --- a/drivers/ide/cs5535.c
34266 +++ b/drivers/ide/cs5535.c
34267 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34268 .cable_detect = cs5535_cable_detect,
34269 };
34270
34271 -static const struct ide_port_info cs5535_chipset __devinitdata = {
34272 +static const struct ide_port_info cs5535_chipset __devinitconst = {
34273 .name = DRV_NAME,
34274 .port_ops = &cs5535_port_ops,
34275 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34276 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34277 index 74fc540..8e933d8 100644
34278 --- a/drivers/ide/cy82c693.c
34279 +++ b/drivers/ide/cy82c693.c
34280 @@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34281 .set_dma_mode = cy82c693_set_dma_mode,
34282 };
34283
34284 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
34285 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
34286 .name = DRV_NAME,
34287 .init_iops = init_iops_cy82c693,
34288 .port_ops = &cy82c693_port_ops,
34289 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34290 index 7ce68ef..e78197d 100644
34291 --- a/drivers/ide/hpt366.c
34292 +++ b/drivers/ide/hpt366.c
34293 @@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34294 }
34295 };
34296
34297 -static const struct hpt_info hpt36x __devinitdata = {
34298 +static const struct hpt_info hpt36x __devinitconst = {
34299 .chip_name = "HPT36x",
34300 .chip_type = HPT36x,
34301 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34302 @@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34303 .timings = &hpt36x_timings
34304 };
34305
34306 -static const struct hpt_info hpt370 __devinitdata = {
34307 +static const struct hpt_info hpt370 __devinitconst = {
34308 .chip_name = "HPT370",
34309 .chip_type = HPT370,
34310 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34311 @@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34312 .timings = &hpt37x_timings
34313 };
34314
34315 -static const struct hpt_info hpt370a __devinitdata = {
34316 +static const struct hpt_info hpt370a __devinitconst = {
34317 .chip_name = "HPT370A",
34318 .chip_type = HPT370A,
34319 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34320 @@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34321 .timings = &hpt37x_timings
34322 };
34323
34324 -static const struct hpt_info hpt374 __devinitdata = {
34325 +static const struct hpt_info hpt374 __devinitconst = {
34326 .chip_name = "HPT374",
34327 .chip_type = HPT374,
34328 .udma_mask = ATA_UDMA5,
34329 @@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34330 .timings = &hpt37x_timings
34331 };
34332
34333 -static const struct hpt_info hpt372 __devinitdata = {
34334 +static const struct hpt_info hpt372 __devinitconst = {
34335 .chip_name = "HPT372",
34336 .chip_type = HPT372,
34337 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34338 @@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34339 .timings = &hpt37x_timings
34340 };
34341
34342 -static const struct hpt_info hpt372a __devinitdata = {
34343 +static const struct hpt_info hpt372a __devinitconst = {
34344 .chip_name = "HPT372A",
34345 .chip_type = HPT372A,
34346 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34347 @@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34348 .timings = &hpt37x_timings
34349 };
34350
34351 -static const struct hpt_info hpt302 __devinitdata = {
34352 +static const struct hpt_info hpt302 __devinitconst = {
34353 .chip_name = "HPT302",
34354 .chip_type = HPT302,
34355 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34356 @@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34357 .timings = &hpt37x_timings
34358 };
34359
34360 -static const struct hpt_info hpt371 __devinitdata = {
34361 +static const struct hpt_info hpt371 __devinitconst = {
34362 .chip_name = "HPT371",
34363 .chip_type = HPT371,
34364 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34365 @@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34366 .timings = &hpt37x_timings
34367 };
34368
34369 -static const struct hpt_info hpt372n __devinitdata = {
34370 +static const struct hpt_info hpt372n __devinitconst = {
34371 .chip_name = "HPT372N",
34372 .chip_type = HPT372N,
34373 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34374 @@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34375 .timings = &hpt37x_timings
34376 };
34377
34378 -static const struct hpt_info hpt302n __devinitdata = {
34379 +static const struct hpt_info hpt302n __devinitconst = {
34380 .chip_name = "HPT302N",
34381 .chip_type = HPT302N,
34382 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34383 @@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34384 .timings = &hpt37x_timings
34385 };
34386
34387 -static const struct hpt_info hpt371n __devinitdata = {
34388 +static const struct hpt_info hpt371n __devinitconst = {
34389 .chip_name = "HPT371N",
34390 .chip_type = HPT371N,
34391 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34392 @@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34393 .dma_sff_read_status = ide_dma_sff_read_status,
34394 };
34395
34396 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34397 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34398 { /* 0: HPT36x */
34399 .name = DRV_NAME,
34400 .init_chipset = init_chipset_hpt366,
34401 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34402 index 2de76cc..74186a1 100644
34403 --- a/drivers/ide/ide-cd.c
34404 +++ b/drivers/ide/ide-cd.c
34405 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34406 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34407 if ((unsigned long)buf & alignment
34408 || blk_rq_bytes(rq) & q->dma_pad_mask
34409 - || object_is_on_stack(buf))
34410 + || object_starts_on_stack(buf))
34411 drive->dma = 0;
34412 }
34413 }
34414 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34415 index fefbdfc..62ff465 100644
34416 --- a/drivers/ide/ide-floppy.c
34417 +++ b/drivers/ide/ide-floppy.c
34418 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34419 u8 pc_buf[256], header_len, desc_cnt;
34420 int i, rc = 1, blocks, length;
34421
34422 + pax_track_stack();
34423 +
34424 ide_debug_log(IDE_DBG_FUNC, "enter");
34425
34426 drive->bios_cyl = 0;
34427 diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
34428 index 9c22882..05f024c 100644
34429 --- a/drivers/ide/ide-floppy_ioctl.c
34430 +++ b/drivers/ide/ide-floppy_ioctl.c
34431 @@ -287,8 +287,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
34432 * and CDROM_SEND_PACKET (legacy) ioctls
34433 */
34434 if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
34435 - err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
34436 - mode, cmd, argp);
34437 + err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
34438
34439 if (err == -ENOTTY)
34440 err = generic_ide_ioctl(drive, bdev, cmd, arg);
34441 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34442 index 39d4e01..11538ce 100644
34443 --- a/drivers/ide/ide-pci-generic.c
34444 +++ b/drivers/ide/ide-pci-generic.c
34445 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34446 .udma_mask = ATA_UDMA6, \
34447 }
34448
34449 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
34450 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
34451 /* 0: Unknown */
34452 DECLARE_GENERIC_PCI_DEV(0),
34453
34454 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34455 index 0d266a5..aaca790 100644
34456 --- a/drivers/ide/it8172.c
34457 +++ b/drivers/ide/it8172.c
34458 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34459 .set_dma_mode = it8172_set_dma_mode,
34460 };
34461
34462 -static const struct ide_port_info it8172_port_info __devinitdata = {
34463 +static const struct ide_port_info it8172_port_info __devinitconst = {
34464 .name = DRV_NAME,
34465 .port_ops = &it8172_port_ops,
34466 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34467 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34468 index 4797616..4be488a 100644
34469 --- a/drivers/ide/it8213.c
34470 +++ b/drivers/ide/it8213.c
34471 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34472 .cable_detect = it8213_cable_detect,
34473 };
34474
34475 -static const struct ide_port_info it8213_chipset __devinitdata = {
34476 +static const struct ide_port_info it8213_chipset __devinitconst = {
34477 .name = DRV_NAME,
34478 .enablebits = { {0x41, 0x80, 0x80} },
34479 .port_ops = &it8213_port_ops,
34480 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34481 index 51aa745..146ee60 100644
34482 --- a/drivers/ide/it821x.c
34483 +++ b/drivers/ide/it821x.c
34484 @@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34485 .cable_detect = it821x_cable_detect,
34486 };
34487
34488 -static const struct ide_port_info it821x_chipset __devinitdata = {
34489 +static const struct ide_port_info it821x_chipset __devinitconst = {
34490 .name = DRV_NAME,
34491 .init_chipset = init_chipset_it821x,
34492 .init_hwif = init_hwif_it821x,
34493 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34494 index bf2be64..9270098 100644
34495 --- a/drivers/ide/jmicron.c
34496 +++ b/drivers/ide/jmicron.c
34497 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34498 .cable_detect = jmicron_cable_detect,
34499 };
34500
34501 -static const struct ide_port_info jmicron_chipset __devinitdata = {
34502 +static const struct ide_port_info jmicron_chipset __devinitconst = {
34503 .name = DRV_NAME,
34504 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34505 .port_ops = &jmicron_port_ops,
34506 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34507 index 95327a2..73f78d8 100644
34508 --- a/drivers/ide/ns87415.c
34509 +++ b/drivers/ide/ns87415.c
34510 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34511 .dma_sff_read_status = superio_dma_sff_read_status,
34512 };
34513
34514 -static const struct ide_port_info ns87415_chipset __devinitdata = {
34515 +static const struct ide_port_info ns87415_chipset __devinitconst = {
34516 .name = DRV_NAME,
34517 .init_hwif = init_hwif_ns87415,
34518 .tp_ops = &ns87415_tp_ops,
34519 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34520 index f1d70d6..e1de05b 100644
34521 --- a/drivers/ide/opti621.c
34522 +++ b/drivers/ide/opti621.c
34523 @@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34524 .set_pio_mode = opti621_set_pio_mode,
34525 };
34526
34527 -static const struct ide_port_info opti621_chipset __devinitdata = {
34528 +static const struct ide_port_info opti621_chipset __devinitconst = {
34529 .name = DRV_NAME,
34530 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34531 .port_ops = &opti621_port_ops,
34532 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34533 index 65ba823..7311f4d 100644
34534 --- a/drivers/ide/pdc202xx_new.c
34535 +++ b/drivers/ide/pdc202xx_new.c
34536 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34537 .udma_mask = udma, \
34538 }
34539
34540 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34541 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34542 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34543 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34544 };
34545 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34546 index cb812f3..af816ef 100644
34547 --- a/drivers/ide/pdc202xx_old.c
34548 +++ b/drivers/ide/pdc202xx_old.c
34549 @@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34550 .max_sectors = sectors, \
34551 }
34552
34553 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34554 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34555 { /* 0: PDC20246 */
34556 .name = DRV_NAME,
34557 .init_chipset = init_chipset_pdc202xx,
34558 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34559 index bf14f39..15c4b98 100644
34560 --- a/drivers/ide/piix.c
34561 +++ b/drivers/ide/piix.c
34562 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34563 .udma_mask = udma, \
34564 }
34565
34566 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
34567 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
34568 /* 0: MPIIX */
34569 { /*
34570 * MPIIX actually has only a single IDE channel mapped to
34571 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34572 index a6414a8..c04173e 100644
34573 --- a/drivers/ide/rz1000.c
34574 +++ b/drivers/ide/rz1000.c
34575 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34576 }
34577 }
34578
34579 -static const struct ide_port_info rz1000_chipset __devinitdata = {
34580 +static const struct ide_port_info rz1000_chipset __devinitconst = {
34581 .name = DRV_NAME,
34582 .host_flags = IDE_HFLAG_NO_DMA,
34583 };
34584 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34585 index d467478..9203942 100644
34586 --- a/drivers/ide/sc1200.c
34587 +++ b/drivers/ide/sc1200.c
34588 @@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34589 .dma_sff_read_status = ide_dma_sff_read_status,
34590 };
34591
34592 -static const struct ide_port_info sc1200_chipset __devinitdata = {
34593 +static const struct ide_port_info sc1200_chipset __devinitconst = {
34594 .name = DRV_NAME,
34595 .port_ops = &sc1200_port_ops,
34596 .dma_ops = &sc1200_dma_ops,
34597 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34598 index 1104bb3..59c5194 100644
34599 --- a/drivers/ide/scc_pata.c
34600 +++ b/drivers/ide/scc_pata.c
34601 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34602 .dma_sff_read_status = scc_dma_sff_read_status,
34603 };
34604
34605 -static const struct ide_port_info scc_chipset __devinitdata = {
34606 +static const struct ide_port_info scc_chipset __devinitconst = {
34607 .name = "sccIDE",
34608 .init_iops = init_iops_scc,
34609 .init_dma = scc_init_dma,
34610 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34611 index b6554ef..6cc2cc3 100644
34612 --- a/drivers/ide/serverworks.c
34613 +++ b/drivers/ide/serverworks.c
34614 @@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34615 .cable_detect = svwks_cable_detect,
34616 };
34617
34618 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34619 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34620 { /* 0: OSB4 */
34621 .name = DRV_NAME,
34622 .init_chipset = init_chipset_svwks,
34623 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34624 index ab3db61..afed580 100644
34625 --- a/drivers/ide/setup-pci.c
34626 +++ b/drivers/ide/setup-pci.c
34627 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34628 int ret, i, n_ports = dev2 ? 4 : 2;
34629 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34630
34631 + pax_track_stack();
34632 +
34633 for (i = 0; i < n_ports / 2; i++) {
34634 ret = ide_setup_pci_controller(pdev[i], d, !i);
34635 if (ret < 0)
34636 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34637 index d95df52..0b03a39 100644
34638 --- a/drivers/ide/siimage.c
34639 +++ b/drivers/ide/siimage.c
34640 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34641 .udma_mask = ATA_UDMA6, \
34642 }
34643
34644 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34645 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34646 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34647 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34648 };
34649 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34650 index 3b88eba..ca8699d 100644
34651 --- a/drivers/ide/sis5513.c
34652 +++ b/drivers/ide/sis5513.c
34653 @@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34654 .cable_detect = sis_cable_detect,
34655 };
34656
34657 -static const struct ide_port_info sis5513_chipset __devinitdata = {
34658 +static const struct ide_port_info sis5513_chipset __devinitconst = {
34659 .name = DRV_NAME,
34660 .init_chipset = init_chipset_sis5513,
34661 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34662 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34663 index d698da4..fca42a4 100644
34664 --- a/drivers/ide/sl82c105.c
34665 +++ b/drivers/ide/sl82c105.c
34666 @@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34667 .dma_sff_read_status = ide_dma_sff_read_status,
34668 };
34669
34670 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
34671 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
34672 .name = DRV_NAME,
34673 .init_chipset = init_chipset_sl82c105,
34674 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34675 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34676 index 1ccfb40..83d5779 100644
34677 --- a/drivers/ide/slc90e66.c
34678 +++ b/drivers/ide/slc90e66.c
34679 @@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34680 .cable_detect = slc90e66_cable_detect,
34681 };
34682
34683 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
34684 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
34685 .name = DRV_NAME,
34686 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34687 .port_ops = &slc90e66_port_ops,
34688 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34689 index 05a93d6..5f9e325 100644
34690 --- a/drivers/ide/tc86c001.c
34691 +++ b/drivers/ide/tc86c001.c
34692 @@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34693 .dma_sff_read_status = ide_dma_sff_read_status,
34694 };
34695
34696 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
34697 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
34698 .name = DRV_NAME,
34699 .init_hwif = init_hwif_tc86c001,
34700 .port_ops = &tc86c001_port_ops,
34701 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34702 index 8773c3b..7907d6c 100644
34703 --- a/drivers/ide/triflex.c
34704 +++ b/drivers/ide/triflex.c
34705 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34706 .set_dma_mode = triflex_set_mode,
34707 };
34708
34709 -static const struct ide_port_info triflex_device __devinitdata = {
34710 +static const struct ide_port_info triflex_device __devinitconst = {
34711 .name = DRV_NAME,
34712 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34713 .port_ops = &triflex_port_ops,
34714 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34715 index 4b42ca0..e494a98 100644
34716 --- a/drivers/ide/trm290.c
34717 +++ b/drivers/ide/trm290.c
34718 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34719 .dma_check = trm290_dma_check,
34720 };
34721
34722 -static const struct ide_port_info trm290_chipset __devinitdata = {
34723 +static const struct ide_port_info trm290_chipset __devinitconst = {
34724 .name = DRV_NAME,
34725 .init_hwif = init_hwif_trm290,
34726 .tp_ops = &trm290_tp_ops,
34727 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34728 index 028de26..520d5d5 100644
34729 --- a/drivers/ide/via82cxxx.c
34730 +++ b/drivers/ide/via82cxxx.c
34731 @@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34732 .cable_detect = via82cxxx_cable_detect,
34733 };
34734
34735 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34736 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34737 .name = DRV_NAME,
34738 .init_chipset = init_chipset_via82cxxx,
34739 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34740 diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34741 index 2cd00b5..14de699 100644
34742 --- a/drivers/ieee1394/dv1394.c
34743 +++ b/drivers/ieee1394/dv1394.c
34744 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34745 based upon DIF section and sequence
34746 */
34747
34748 -static void inline
34749 +static inline void
34750 frame_put_packet (struct frame *f, struct packet *p)
34751 {
34752 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34753 diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34754 index e947d8f..6a966b9 100644
34755 --- a/drivers/ieee1394/hosts.c
34756 +++ b/drivers/ieee1394/hosts.c
34757 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34758 }
34759
34760 static struct hpsb_host_driver dummy_driver = {
34761 + .name = "dummy",
34762 .transmit_packet = dummy_transmit_packet,
34763 .devctl = dummy_devctl,
34764 .isoctl = dummy_isoctl
34765 diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34766 index ddaab6e..8d37435 100644
34767 --- a/drivers/ieee1394/init_ohci1394_dma.c
34768 +++ b/drivers/ieee1394/init_ohci1394_dma.c
34769 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34770 for (func = 0; func < 8; func++) {
34771 u32 class = read_pci_config(num,slot,func,
34772 PCI_CLASS_REVISION);
34773 - if ((class == 0xffffffff))
34774 + if (class == 0xffffffff)
34775 continue; /* No device at this func */
34776
34777 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34778 diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34779 index 65c1429..5d8c11f 100644
34780 --- a/drivers/ieee1394/ohci1394.c
34781 +++ b/drivers/ieee1394/ohci1394.c
34782 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34783 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34784
34785 /* Module Parameters */
34786 -static int phys_dma = 1;
34787 +static int phys_dma;
34788 module_param(phys_dma, int, 0444);
34789 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34790 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34791
34792 static void dma_trm_tasklet(unsigned long data);
34793 static void dma_trm_reset(struct dma_trm_ctx *d);
34794 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34795 index f199896..78c9fc8 100644
34796 --- a/drivers/ieee1394/sbp2.c
34797 +++ b/drivers/ieee1394/sbp2.c
34798 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34799 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34800 MODULE_LICENSE("GPL");
34801
34802 -static int sbp2_module_init(void)
34803 +static int __init sbp2_module_init(void)
34804 {
34805 int ret;
34806
34807 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34808 index a5dea6b..0cefe8f 100644
34809 --- a/drivers/infiniband/core/cm.c
34810 +++ b/drivers/infiniband/core/cm.c
34811 @@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34812
34813 struct cm_counter_group {
34814 struct kobject obj;
34815 - atomic_long_t counter[CM_ATTR_COUNT];
34816 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34817 };
34818
34819 struct cm_counter_attribute {
34820 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34821 struct ib_mad_send_buf *msg = NULL;
34822 int ret;
34823
34824 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34825 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34826 counter[CM_REQ_COUNTER]);
34827
34828 /* Quick state check to discard duplicate REQs. */
34829 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34830 if (!cm_id_priv)
34831 return;
34832
34833 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34834 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34835 counter[CM_REP_COUNTER]);
34836 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34837 if (ret)
34838 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34839 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34840 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34841 spin_unlock_irq(&cm_id_priv->lock);
34842 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34843 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34844 counter[CM_RTU_COUNTER]);
34845 goto out;
34846 }
34847 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34848 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34849 dreq_msg->local_comm_id);
34850 if (!cm_id_priv) {
34851 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34852 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34853 counter[CM_DREQ_COUNTER]);
34854 cm_issue_drep(work->port, work->mad_recv_wc);
34855 return -EINVAL;
34856 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34857 case IB_CM_MRA_REP_RCVD:
34858 break;
34859 case IB_CM_TIMEWAIT:
34860 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34861 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34862 counter[CM_DREQ_COUNTER]);
34863 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34864 goto unlock;
34865 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34866 cm_free_msg(msg);
34867 goto deref;
34868 case IB_CM_DREQ_RCVD:
34869 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34870 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34871 counter[CM_DREQ_COUNTER]);
34872 goto unlock;
34873 default:
34874 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34875 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34876 cm_id_priv->msg, timeout)) {
34877 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34878 - atomic_long_inc(&work->port->
34879 + atomic_long_inc_unchecked(&work->port->
34880 counter_group[CM_RECV_DUPLICATES].
34881 counter[CM_MRA_COUNTER]);
34882 goto out;
34883 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
34884 break;
34885 case IB_CM_MRA_REQ_RCVD:
34886 case IB_CM_MRA_REP_RCVD:
34887 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34888 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34889 counter[CM_MRA_COUNTER]);
34890 /* fall through */
34891 default:
34892 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
34893 case IB_CM_LAP_IDLE:
34894 break;
34895 case IB_CM_MRA_LAP_SENT:
34896 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34897 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34898 counter[CM_LAP_COUNTER]);
34899 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34900 goto unlock;
34901 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
34902 cm_free_msg(msg);
34903 goto deref;
34904 case IB_CM_LAP_RCVD:
34905 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34906 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34907 counter[CM_LAP_COUNTER]);
34908 goto unlock;
34909 default:
34910 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
34911 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
34912 if (cur_cm_id_priv) {
34913 spin_unlock_irq(&cm.lock);
34914 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34915 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34916 counter[CM_SIDR_REQ_COUNTER]);
34917 goto out; /* Duplicate message. */
34918 }
34919 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
34920 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
34921 msg->retries = 1;
34922
34923 - atomic_long_add(1 + msg->retries,
34924 + atomic_long_add_unchecked(1 + msg->retries,
34925 &port->counter_group[CM_XMIT].counter[attr_index]);
34926 if (msg->retries)
34927 - atomic_long_add(msg->retries,
34928 + atomic_long_add_unchecked(msg->retries,
34929 &port->counter_group[CM_XMIT_RETRIES].
34930 counter[attr_index]);
34931
34932 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
34933 }
34934
34935 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
34936 - atomic_long_inc(&port->counter_group[CM_RECV].
34937 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
34938 counter[attr_id - CM_ATTR_ID_OFFSET]);
34939
34940 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
34941 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
34942 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
34943
34944 return sprintf(buf, "%ld\n",
34945 - atomic_long_read(&group->counter[cm_attr->index]));
34946 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
34947 }
34948
34949 -static struct sysfs_ops cm_counter_ops = {
34950 +static const struct sysfs_ops cm_counter_ops = {
34951 .show = cm_show_counter
34952 };
34953
34954 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
34955 index 8fd3a6f..61d8075 100644
34956 --- a/drivers/infiniband/core/cma.c
34957 +++ b/drivers/infiniband/core/cma.c
34958 @@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
34959
34960 req.private_data_len = sizeof(struct cma_hdr) +
34961 conn_param->private_data_len;
34962 + if (req.private_data_len < conn_param->private_data_len)
34963 + return -EINVAL;
34964 +
34965 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
34966 if (!req.private_data)
34967 return -ENOMEM;
34968 @@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
34969 memset(&req, 0, sizeof req);
34970 offset = cma_user_data_offset(id_priv->id.ps);
34971 req.private_data_len = offset + conn_param->private_data_len;
34972 + if (req.private_data_len < conn_param->private_data_len)
34973 + return -EINVAL;
34974 +
34975 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
34976 if (!private_data)
34977 return -ENOMEM;
34978 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
34979 index 4507043..14ad522 100644
34980 --- a/drivers/infiniband/core/fmr_pool.c
34981 +++ b/drivers/infiniband/core/fmr_pool.c
34982 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
34983
34984 struct task_struct *thread;
34985
34986 - atomic_t req_ser;
34987 - atomic_t flush_ser;
34988 + atomic_unchecked_t req_ser;
34989 + atomic_unchecked_t flush_ser;
34990
34991 wait_queue_head_t force_wait;
34992 };
34993 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34994 struct ib_fmr_pool *pool = pool_ptr;
34995
34996 do {
34997 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
34998 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
34999 ib_fmr_batch_release(pool);
35000
35001 - atomic_inc(&pool->flush_ser);
35002 + atomic_inc_unchecked(&pool->flush_ser);
35003 wake_up_interruptible(&pool->force_wait);
35004
35005 if (pool->flush_function)
35006 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35007 }
35008
35009 set_current_state(TASK_INTERRUPTIBLE);
35010 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
35011 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
35012 !kthread_should_stop())
35013 schedule();
35014 __set_current_state(TASK_RUNNING);
35015 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
35016 pool->dirty_watermark = params->dirty_watermark;
35017 pool->dirty_len = 0;
35018 spin_lock_init(&pool->pool_lock);
35019 - atomic_set(&pool->req_ser, 0);
35020 - atomic_set(&pool->flush_ser, 0);
35021 + atomic_set_unchecked(&pool->req_ser, 0);
35022 + atomic_set_unchecked(&pool->flush_ser, 0);
35023 init_waitqueue_head(&pool->force_wait);
35024
35025 pool->thread = kthread_run(ib_fmr_cleanup_thread,
35026 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
35027 }
35028 spin_unlock_irq(&pool->pool_lock);
35029
35030 - serial = atomic_inc_return(&pool->req_ser);
35031 + serial = atomic_inc_return_unchecked(&pool->req_ser);
35032 wake_up_process(pool->thread);
35033
35034 if (wait_event_interruptible(pool->force_wait,
35035 - atomic_read(&pool->flush_ser) - serial >= 0))
35036 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
35037 return -EINTR;
35038
35039 return 0;
35040 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
35041 } else {
35042 list_add_tail(&fmr->list, &pool->dirty_list);
35043 if (++pool->dirty_len >= pool->dirty_watermark) {
35044 - atomic_inc(&pool->req_ser);
35045 + atomic_inc_unchecked(&pool->req_ser);
35046 wake_up_process(pool->thread);
35047 }
35048 }
35049 diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
35050 index 158a214..1558bb7 100644
35051 --- a/drivers/infiniband/core/sysfs.c
35052 +++ b/drivers/infiniband/core/sysfs.c
35053 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
35054 return port_attr->show(p, port_attr, buf);
35055 }
35056
35057 -static struct sysfs_ops port_sysfs_ops = {
35058 +static const struct sysfs_ops port_sysfs_ops = {
35059 .show = port_attr_show
35060 };
35061
35062 diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
35063 index 5440da0..1194ecb 100644
35064 --- a/drivers/infiniband/core/uverbs_marshall.c
35065 +++ b/drivers/infiniband/core/uverbs_marshall.c
35066 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
35067 dst->grh.sgid_index = src->grh.sgid_index;
35068 dst->grh.hop_limit = src->grh.hop_limit;
35069 dst->grh.traffic_class = src->grh.traffic_class;
35070 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
35071 dst->dlid = src->dlid;
35072 dst->sl = src->sl;
35073 dst->src_path_bits = src->src_path_bits;
35074 dst->static_rate = src->static_rate;
35075 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
35076 dst->port_num = src->port_num;
35077 + dst->reserved = 0;
35078 }
35079 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
35080
35081 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35082 struct ib_qp_attr *src)
35083 {
35084 + dst->qp_state = src->qp_state;
35085 dst->cur_qp_state = src->cur_qp_state;
35086 dst->path_mtu = src->path_mtu;
35087 dst->path_mig_state = src->path_mig_state;
35088 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35089 dst->rnr_retry = src->rnr_retry;
35090 dst->alt_port_num = src->alt_port_num;
35091 dst->alt_timeout = src->alt_timeout;
35092 + memset(dst->reserved, 0, sizeof(dst->reserved));
35093 }
35094 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
35095
35096 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
35097 index 100da85..62e6b88 100644
35098 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
35099 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
35100 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
35101 struct infinipath_counters counters;
35102 struct ipath_devdata *dd;
35103
35104 + pax_track_stack();
35105 +
35106 dd = file->f_path.dentry->d_inode->i_private;
35107 dd->ipath_f_read_counters(dd, &counters);
35108
35109 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
35110 index cbde0cf..afaf55c 100644
35111 --- a/drivers/infiniband/hw/nes/nes.c
35112 +++ b/drivers/infiniband/hw/nes/nes.c
35113 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
35114 LIST_HEAD(nes_adapter_list);
35115 static LIST_HEAD(nes_dev_list);
35116
35117 -atomic_t qps_destroyed;
35118 +atomic_unchecked_t qps_destroyed;
35119
35120 static unsigned int ee_flsh_adapter;
35121 static unsigned int sysfs_nonidx_addr;
35122 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35123 struct nes_adapter *nesadapter = nesdev->nesadapter;
35124 u32 qp_id;
35125
35126 - atomic_inc(&qps_destroyed);
35127 + atomic_inc_unchecked(&qps_destroyed);
35128
35129 /* Free the control structures */
35130
35131 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35132 index bcc6abc..9c76b2f 100644
35133 --- a/drivers/infiniband/hw/nes/nes.h
35134 +++ b/drivers/infiniband/hw/nes/nes.h
35135 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35136 extern unsigned int wqm_quanta;
35137 extern struct list_head nes_adapter_list;
35138
35139 -extern atomic_t cm_connects;
35140 -extern atomic_t cm_accepts;
35141 -extern atomic_t cm_disconnects;
35142 -extern atomic_t cm_closes;
35143 -extern atomic_t cm_connecteds;
35144 -extern atomic_t cm_connect_reqs;
35145 -extern atomic_t cm_rejects;
35146 -extern atomic_t mod_qp_timouts;
35147 -extern atomic_t qps_created;
35148 -extern atomic_t qps_destroyed;
35149 -extern atomic_t sw_qps_destroyed;
35150 +extern atomic_unchecked_t cm_connects;
35151 +extern atomic_unchecked_t cm_accepts;
35152 +extern atomic_unchecked_t cm_disconnects;
35153 +extern atomic_unchecked_t cm_closes;
35154 +extern atomic_unchecked_t cm_connecteds;
35155 +extern atomic_unchecked_t cm_connect_reqs;
35156 +extern atomic_unchecked_t cm_rejects;
35157 +extern atomic_unchecked_t mod_qp_timouts;
35158 +extern atomic_unchecked_t qps_created;
35159 +extern atomic_unchecked_t qps_destroyed;
35160 +extern atomic_unchecked_t sw_qps_destroyed;
35161 extern u32 mh_detected;
35162 extern u32 mh_pauses_sent;
35163 extern u32 cm_packets_sent;
35164 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35165 extern u32 cm_listens_created;
35166 extern u32 cm_listens_destroyed;
35167 extern u32 cm_backlog_drops;
35168 -extern atomic_t cm_loopbacks;
35169 -extern atomic_t cm_nodes_created;
35170 -extern atomic_t cm_nodes_destroyed;
35171 -extern atomic_t cm_accel_dropped_pkts;
35172 -extern atomic_t cm_resets_recvd;
35173 +extern atomic_unchecked_t cm_loopbacks;
35174 +extern atomic_unchecked_t cm_nodes_created;
35175 +extern atomic_unchecked_t cm_nodes_destroyed;
35176 +extern atomic_unchecked_t cm_accel_dropped_pkts;
35177 +extern atomic_unchecked_t cm_resets_recvd;
35178
35179 extern u32 int_mod_timer_init;
35180 extern u32 int_mod_cq_depth_256;
35181 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35182 index 73473db..5ed06e8 100644
35183 --- a/drivers/infiniband/hw/nes/nes_cm.c
35184 +++ b/drivers/infiniband/hw/nes/nes_cm.c
35185 @@ -69,11 +69,11 @@ u32 cm_packets_received;
35186 u32 cm_listens_created;
35187 u32 cm_listens_destroyed;
35188 u32 cm_backlog_drops;
35189 -atomic_t cm_loopbacks;
35190 -atomic_t cm_nodes_created;
35191 -atomic_t cm_nodes_destroyed;
35192 -atomic_t cm_accel_dropped_pkts;
35193 -atomic_t cm_resets_recvd;
35194 +atomic_unchecked_t cm_loopbacks;
35195 +atomic_unchecked_t cm_nodes_created;
35196 +atomic_unchecked_t cm_nodes_destroyed;
35197 +atomic_unchecked_t cm_accel_dropped_pkts;
35198 +atomic_unchecked_t cm_resets_recvd;
35199
35200 static inline int mini_cm_accelerated(struct nes_cm_core *,
35201 struct nes_cm_node *);
35202 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35203
35204 static struct nes_cm_core *g_cm_core;
35205
35206 -atomic_t cm_connects;
35207 -atomic_t cm_accepts;
35208 -atomic_t cm_disconnects;
35209 -atomic_t cm_closes;
35210 -atomic_t cm_connecteds;
35211 -atomic_t cm_connect_reqs;
35212 -atomic_t cm_rejects;
35213 +atomic_unchecked_t cm_connects;
35214 +atomic_unchecked_t cm_accepts;
35215 +atomic_unchecked_t cm_disconnects;
35216 +atomic_unchecked_t cm_closes;
35217 +atomic_unchecked_t cm_connecteds;
35218 +atomic_unchecked_t cm_connect_reqs;
35219 +atomic_unchecked_t cm_rejects;
35220
35221
35222 /**
35223 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35224 cm_node->rem_mac);
35225
35226 add_hte_node(cm_core, cm_node);
35227 - atomic_inc(&cm_nodes_created);
35228 + atomic_inc_unchecked(&cm_nodes_created);
35229
35230 return cm_node;
35231 }
35232 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35233 }
35234
35235 atomic_dec(&cm_core->node_cnt);
35236 - atomic_inc(&cm_nodes_destroyed);
35237 + atomic_inc_unchecked(&cm_nodes_destroyed);
35238 nesqp = cm_node->nesqp;
35239 if (nesqp) {
35240 nesqp->cm_node = NULL;
35241 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35242
35243 static void drop_packet(struct sk_buff *skb)
35244 {
35245 - atomic_inc(&cm_accel_dropped_pkts);
35246 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35247 dev_kfree_skb_any(skb);
35248 }
35249
35250 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35251
35252 int reset = 0; /* whether to send reset in case of err.. */
35253 int passive_state;
35254 - atomic_inc(&cm_resets_recvd);
35255 + atomic_inc_unchecked(&cm_resets_recvd);
35256 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35257 " refcnt=%d\n", cm_node, cm_node->state,
35258 atomic_read(&cm_node->ref_count));
35259 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35260 rem_ref_cm_node(cm_node->cm_core, cm_node);
35261 return NULL;
35262 }
35263 - atomic_inc(&cm_loopbacks);
35264 + atomic_inc_unchecked(&cm_loopbacks);
35265 loopbackremotenode->loopbackpartner = cm_node;
35266 loopbackremotenode->tcp_cntxt.rcv_wscale =
35267 NES_CM_DEFAULT_RCV_WND_SCALE;
35268 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35269 add_ref_cm_node(cm_node);
35270 } else if (cm_node->state == NES_CM_STATE_TSA) {
35271 rem_ref_cm_node(cm_core, cm_node);
35272 - atomic_inc(&cm_accel_dropped_pkts);
35273 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
35274 dev_kfree_skb_any(skb);
35275 break;
35276 }
35277 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35278
35279 if ((cm_id) && (cm_id->event_handler)) {
35280 if (issue_disconn) {
35281 - atomic_inc(&cm_disconnects);
35282 + atomic_inc_unchecked(&cm_disconnects);
35283 cm_event.event = IW_CM_EVENT_DISCONNECT;
35284 cm_event.status = disconn_status;
35285 cm_event.local_addr = cm_id->local_addr;
35286 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35287 }
35288
35289 if (issue_close) {
35290 - atomic_inc(&cm_closes);
35291 + atomic_inc_unchecked(&cm_closes);
35292 nes_disconnect(nesqp, 1);
35293
35294 cm_id->provider_data = nesqp;
35295 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35296
35297 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35298 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35299 - atomic_inc(&cm_accepts);
35300 + atomic_inc_unchecked(&cm_accepts);
35301
35302 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35303 atomic_read(&nesvnic->netdev->refcnt));
35304 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35305
35306 struct nes_cm_core *cm_core;
35307
35308 - atomic_inc(&cm_rejects);
35309 + atomic_inc_unchecked(&cm_rejects);
35310 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35311 loopback = cm_node->loopbackpartner;
35312 cm_core = cm_node->cm_core;
35313 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35314 ntohl(cm_id->local_addr.sin_addr.s_addr),
35315 ntohs(cm_id->local_addr.sin_port));
35316
35317 - atomic_inc(&cm_connects);
35318 + atomic_inc_unchecked(&cm_connects);
35319 nesqp->active_conn = 1;
35320
35321 /* cache the cm_id in the qp */
35322 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35323 if (nesqp->destroyed) {
35324 return;
35325 }
35326 - atomic_inc(&cm_connecteds);
35327 + atomic_inc_unchecked(&cm_connecteds);
35328 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35329 " local port 0x%04X. jiffies = %lu.\n",
35330 nesqp->hwqp.qp_id,
35331 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35332
35333 ret = cm_id->event_handler(cm_id, &cm_event);
35334 cm_id->add_ref(cm_id);
35335 - atomic_inc(&cm_closes);
35336 + atomic_inc_unchecked(&cm_closes);
35337 cm_event.event = IW_CM_EVENT_CLOSE;
35338 cm_event.status = IW_CM_EVENT_STATUS_OK;
35339 cm_event.provider_data = cm_id->provider_data;
35340 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35341 return;
35342 cm_id = cm_node->cm_id;
35343
35344 - atomic_inc(&cm_connect_reqs);
35345 + atomic_inc_unchecked(&cm_connect_reqs);
35346 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35347 cm_node, cm_id, jiffies);
35348
35349 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35350 return;
35351 cm_id = cm_node->cm_id;
35352
35353 - atomic_inc(&cm_connect_reqs);
35354 + atomic_inc_unchecked(&cm_connect_reqs);
35355 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35356 cm_node, cm_id, jiffies);
35357
35358 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35359 index e593af3..870694a 100644
35360 --- a/drivers/infiniband/hw/nes/nes_nic.c
35361 +++ b/drivers/infiniband/hw/nes/nes_nic.c
35362 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35363 target_stat_values[++index] = mh_detected;
35364 target_stat_values[++index] = mh_pauses_sent;
35365 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35366 - target_stat_values[++index] = atomic_read(&cm_connects);
35367 - target_stat_values[++index] = atomic_read(&cm_accepts);
35368 - target_stat_values[++index] = atomic_read(&cm_disconnects);
35369 - target_stat_values[++index] = atomic_read(&cm_connecteds);
35370 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35371 - target_stat_values[++index] = atomic_read(&cm_rejects);
35372 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35373 - target_stat_values[++index] = atomic_read(&qps_created);
35374 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35375 - target_stat_values[++index] = atomic_read(&qps_destroyed);
35376 - target_stat_values[++index] = atomic_read(&cm_closes);
35377 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35378 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35379 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35380 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35381 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35382 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35383 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35384 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35385 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35386 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35387 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35388 target_stat_values[++index] = cm_packets_sent;
35389 target_stat_values[++index] = cm_packets_bounced;
35390 target_stat_values[++index] = cm_packets_created;
35391 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35392 target_stat_values[++index] = cm_listens_created;
35393 target_stat_values[++index] = cm_listens_destroyed;
35394 target_stat_values[++index] = cm_backlog_drops;
35395 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
35396 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
35397 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35398 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35399 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35400 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35401 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35402 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35403 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35404 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35405 target_stat_values[++index] = int_mod_timer_init;
35406 target_stat_values[++index] = int_mod_cq_depth_1;
35407 target_stat_values[++index] = int_mod_cq_depth_4;
35408 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35409 index a680c42..f914deb 100644
35410 --- a/drivers/infiniband/hw/nes/nes_verbs.c
35411 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
35412 @@ -45,9 +45,9 @@
35413
35414 #include <rdma/ib_umem.h>
35415
35416 -atomic_t mod_qp_timouts;
35417 -atomic_t qps_created;
35418 -atomic_t sw_qps_destroyed;
35419 +atomic_unchecked_t mod_qp_timouts;
35420 +atomic_unchecked_t qps_created;
35421 +atomic_unchecked_t sw_qps_destroyed;
35422
35423 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35424
35425 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35426 if (init_attr->create_flags)
35427 return ERR_PTR(-EINVAL);
35428
35429 - atomic_inc(&qps_created);
35430 + atomic_inc_unchecked(&qps_created);
35431 switch (init_attr->qp_type) {
35432 case IB_QPT_RC:
35433 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35434 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35435 struct iw_cm_event cm_event;
35436 int ret;
35437
35438 - atomic_inc(&sw_qps_destroyed);
35439 + atomic_inc_unchecked(&sw_qps_destroyed);
35440 nesqp->destroyed = 1;
35441
35442 /* Blow away the connection if it exists. */
35443 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35444 index ac11be0..3883c04 100644
35445 --- a/drivers/input/gameport/gameport.c
35446 +++ b/drivers/input/gameport/gameport.c
35447 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35448 */
35449 static void gameport_init_port(struct gameport *gameport)
35450 {
35451 - static atomic_t gameport_no = ATOMIC_INIT(0);
35452 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35453
35454 __module_get(THIS_MODULE);
35455
35456 mutex_init(&gameport->drv_mutex);
35457 device_initialize(&gameport->dev);
35458 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35459 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35460 gameport->dev.bus = &gameport_bus;
35461 gameport->dev.release = gameport_release_port;
35462 if (gameport->parent)
35463 diff --git a/drivers/input/input.c b/drivers/input/input.c
35464 index c82ae82..8cfb9cb 100644
35465 --- a/drivers/input/input.c
35466 +++ b/drivers/input/input.c
35467 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35468 */
35469 int input_register_device(struct input_dev *dev)
35470 {
35471 - static atomic_t input_no = ATOMIC_INIT(0);
35472 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35473 struct input_handler *handler;
35474 const char *path;
35475 int error;
35476 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35477 dev->setkeycode = input_default_setkeycode;
35478
35479 dev_set_name(&dev->dev, "input%ld",
35480 - (unsigned long) atomic_inc_return(&input_no) - 1);
35481 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35482
35483 error = device_add(&dev->dev);
35484 if (error)
35485 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35486 index ca13a6b..b032b0c 100644
35487 --- a/drivers/input/joystick/sidewinder.c
35488 +++ b/drivers/input/joystick/sidewinder.c
35489 @@ -30,6 +30,7 @@
35490 #include <linux/kernel.h>
35491 #include <linux/module.h>
35492 #include <linux/slab.h>
35493 +#include <linux/sched.h>
35494 #include <linux/init.h>
35495 #include <linux/input.h>
35496 #include <linux/gameport.h>
35497 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35498 unsigned char buf[SW_LENGTH];
35499 int i;
35500
35501 + pax_track_stack();
35502 +
35503 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35504
35505 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35506 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35507 index 79e3edc..01412b9 100644
35508 --- a/drivers/input/joystick/xpad.c
35509 +++ b/drivers/input/joystick/xpad.c
35510 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35511
35512 static int xpad_led_probe(struct usb_xpad *xpad)
35513 {
35514 - static atomic_t led_seq = ATOMIC_INIT(0);
35515 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35516 long led_no;
35517 struct xpad_led *led;
35518 struct led_classdev *led_cdev;
35519 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35520 if (!led)
35521 return -ENOMEM;
35522
35523 - led_no = (long)atomic_inc_return(&led_seq) - 1;
35524 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35525
35526 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35527 led->xpad = xpad;
35528 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35529 index 0236f0d..c7327f1 100644
35530 --- a/drivers/input/serio/serio.c
35531 +++ b/drivers/input/serio/serio.c
35532 @@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35533 */
35534 static void serio_init_port(struct serio *serio)
35535 {
35536 - static atomic_t serio_no = ATOMIC_INIT(0);
35537 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35538
35539 __module_get(THIS_MODULE);
35540
35541 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35542 mutex_init(&serio->drv_mutex);
35543 device_initialize(&serio->dev);
35544 dev_set_name(&serio->dev, "serio%ld",
35545 - (long)atomic_inc_return(&serio_no) - 1);
35546 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
35547 serio->dev.bus = &serio_bus;
35548 serio->dev.release = serio_release_port;
35549 if (serio->parent) {
35550 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35551 index 33dcd8d..2783d25 100644
35552 --- a/drivers/isdn/gigaset/common.c
35553 +++ b/drivers/isdn/gigaset/common.c
35554 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35555 cs->commands_pending = 0;
35556 cs->cur_at_seq = 0;
35557 cs->gotfwver = -1;
35558 - cs->open_count = 0;
35559 + local_set(&cs->open_count, 0);
35560 cs->dev = NULL;
35561 cs->tty = NULL;
35562 cs->tty_dev = NULL;
35563 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35564 index a2f6125..6a70677 100644
35565 --- a/drivers/isdn/gigaset/gigaset.h
35566 +++ b/drivers/isdn/gigaset/gigaset.h
35567 @@ -34,6 +34,7 @@
35568 #include <linux/tty_driver.h>
35569 #include <linux/list.h>
35570 #include <asm/atomic.h>
35571 +#include <asm/local.h>
35572
35573 #define GIG_VERSION {0,5,0,0}
35574 #define GIG_COMPAT {0,4,0,0}
35575 @@ -446,7 +447,7 @@ struct cardstate {
35576 spinlock_t cmdlock;
35577 unsigned curlen, cmdbytes;
35578
35579 - unsigned open_count;
35580 + local_t open_count;
35581 struct tty_struct *tty;
35582 struct tasklet_struct if_wake_tasklet;
35583 unsigned control_state;
35584 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35585 index b3065b8..c7e8cc9 100644
35586 --- a/drivers/isdn/gigaset/interface.c
35587 +++ b/drivers/isdn/gigaset/interface.c
35588 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35589 return -ERESTARTSYS; // FIXME -EINTR?
35590 tty->driver_data = cs;
35591
35592 - ++cs->open_count;
35593 -
35594 - if (cs->open_count == 1) {
35595 + if (local_inc_return(&cs->open_count) == 1) {
35596 spin_lock_irqsave(&cs->lock, flags);
35597 cs->tty = tty;
35598 spin_unlock_irqrestore(&cs->lock, flags);
35599 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35600
35601 if (!cs->connected)
35602 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35603 - else if (!cs->open_count)
35604 + else if (!local_read(&cs->open_count))
35605 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35606 else {
35607 - if (!--cs->open_count) {
35608 + if (!local_dec_return(&cs->open_count)) {
35609 spin_lock_irqsave(&cs->lock, flags);
35610 cs->tty = NULL;
35611 spin_unlock_irqrestore(&cs->lock, flags);
35612 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35613 if (!cs->connected) {
35614 gig_dbg(DEBUG_IF, "not connected");
35615 retval = -ENODEV;
35616 - } else if (!cs->open_count)
35617 + } else if (!local_read(&cs->open_count))
35618 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35619 else {
35620 retval = 0;
35621 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35622 if (!cs->connected) {
35623 gig_dbg(DEBUG_IF, "not connected");
35624 retval = -ENODEV;
35625 - } else if (!cs->open_count)
35626 + } else if (!local_read(&cs->open_count))
35627 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35628 else if (cs->mstate != MS_LOCKED) {
35629 dev_warn(cs->dev, "can't write to unlocked device\n");
35630 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35631 if (!cs->connected) {
35632 gig_dbg(DEBUG_IF, "not connected");
35633 retval = -ENODEV;
35634 - } else if (!cs->open_count)
35635 + } else if (!local_read(&cs->open_count))
35636 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35637 else if (cs->mstate != MS_LOCKED) {
35638 dev_warn(cs->dev, "can't write to unlocked device\n");
35639 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35640
35641 if (!cs->connected)
35642 gig_dbg(DEBUG_IF, "not connected");
35643 - else if (!cs->open_count)
35644 + else if (!local_read(&cs->open_count))
35645 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35646 else if (cs->mstate != MS_LOCKED)
35647 dev_warn(cs->dev, "can't write to unlocked device\n");
35648 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35649
35650 if (!cs->connected)
35651 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35652 - else if (!cs->open_count)
35653 + else if (!local_read(&cs->open_count))
35654 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35655 else {
35656 //FIXME
35657 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35658
35659 if (!cs->connected)
35660 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35661 - else if (!cs->open_count)
35662 + else if (!local_read(&cs->open_count))
35663 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35664 else {
35665 //FIXME
35666 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35667 goto out;
35668 }
35669
35670 - if (!cs->open_count) {
35671 + if (!local_read(&cs->open_count)) {
35672 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35673 goto out;
35674 }
35675 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35676 index a7c0083..62a7cb6 100644
35677 --- a/drivers/isdn/hardware/avm/b1.c
35678 +++ b/drivers/isdn/hardware/avm/b1.c
35679 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35680 }
35681 if (left) {
35682 if (t4file->user) {
35683 - if (copy_from_user(buf, dp, left))
35684 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35685 return -EFAULT;
35686 } else {
35687 memcpy(buf, dp, left);
35688 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35689 }
35690 if (left) {
35691 if (config->user) {
35692 - if (copy_from_user(buf, dp, left))
35693 + if (left > sizeof buf || copy_from_user(buf, dp, left))
35694 return -EFAULT;
35695 } else {
35696 memcpy(buf, dp, left);
35697 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35698 index f130724..c373c68 100644
35699 --- a/drivers/isdn/hardware/eicon/capidtmf.c
35700 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
35701 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35702 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35703 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35704
35705 + pax_track_stack();
35706
35707 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35708 {
35709 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35710 index 4d425c6..a9be6c4 100644
35711 --- a/drivers/isdn/hardware/eicon/capifunc.c
35712 +++ b/drivers/isdn/hardware/eicon/capifunc.c
35713 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35714 IDI_SYNC_REQ req;
35715 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35716
35717 + pax_track_stack();
35718 +
35719 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35720
35721 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35722 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35723 index 3029234..ef0d9e2 100644
35724 --- a/drivers/isdn/hardware/eicon/diddfunc.c
35725 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
35726 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35727 IDI_SYNC_REQ req;
35728 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35729
35730 + pax_track_stack();
35731 +
35732 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35733
35734 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35735 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35736 index d36a4c0..11e7d1a 100644
35737 --- a/drivers/isdn/hardware/eicon/divasfunc.c
35738 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
35739 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35740 IDI_SYNC_REQ req;
35741 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35742
35743 + pax_track_stack();
35744 +
35745 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35746
35747 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35748 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35749 index 85784a7..a19ca98 100644
35750 --- a/drivers/isdn/hardware/eicon/divasync.h
35751 +++ b/drivers/isdn/hardware/eicon/divasync.h
35752 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35753 } diva_didd_add_adapter_t;
35754 typedef struct _diva_didd_remove_adapter {
35755 IDI_CALL p_request;
35756 -} diva_didd_remove_adapter_t;
35757 +} __no_const diva_didd_remove_adapter_t;
35758 typedef struct _diva_didd_read_adapter_array {
35759 void * buffer;
35760 dword length;
35761 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35762 index db87d51..7d09acf 100644
35763 --- a/drivers/isdn/hardware/eicon/idifunc.c
35764 +++ b/drivers/isdn/hardware/eicon/idifunc.c
35765 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35766 IDI_SYNC_REQ req;
35767 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35768
35769 + pax_track_stack();
35770 +
35771 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35772
35773 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35774 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35775 index ae89fb8..0fab299 100644
35776 --- a/drivers/isdn/hardware/eicon/message.c
35777 +++ b/drivers/isdn/hardware/eicon/message.c
35778 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35779 dword d;
35780 word w;
35781
35782 + pax_track_stack();
35783 +
35784 a = plci->adapter;
35785 Id = ((word)plci->Id<<8)|a->Id;
35786 PUT_WORD(&SS_Ind[4],0x0000);
35787 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35788 word j, n, w;
35789 dword d;
35790
35791 + pax_track_stack();
35792 +
35793
35794 for(i=0;i<8;i++) bp_parms[i].length = 0;
35795 for(i=0;i<2;i++) global_config[i].length = 0;
35796 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35797 const byte llc3[] = {4,3,2,2,6,6,0};
35798 const byte header[] = {0,2,3,3,0,0,0};
35799
35800 + pax_track_stack();
35801 +
35802 for(i=0;i<8;i++) bp_parms[i].length = 0;
35803 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35804 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35805 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35806 word appl_number_group_type[MAX_APPL];
35807 PLCI *auxplci;
35808
35809 + pax_track_stack();
35810 +
35811 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35812
35813 if(!a->group_optimization_enabled)
35814 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35815 index a564b75..f3cf8b5 100644
35816 --- a/drivers/isdn/hardware/eicon/mntfunc.c
35817 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
35818 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35819 IDI_SYNC_REQ req;
35820 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35821
35822 + pax_track_stack();
35823 +
35824 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35825
35826 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35827 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35828 index a3bd163..8956575 100644
35829 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35830 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35831 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35832 typedef struct _diva_os_idi_adapter_interface {
35833 diva_init_card_proc_t cleanup_adapter_proc;
35834 diva_cmd_card_proc_t cmd_proc;
35835 -} diva_os_idi_adapter_interface_t;
35836 +} __no_const diva_os_idi_adapter_interface_t;
35837
35838 typedef struct _diva_os_xdi_adapter {
35839 struct list_head link;
35840 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35841 index adb1e8c..21b590b 100644
35842 --- a/drivers/isdn/i4l/isdn_common.c
35843 +++ b/drivers/isdn/i4l/isdn_common.c
35844 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35845 } iocpar;
35846 void __user *argp = (void __user *)arg;
35847
35848 + pax_track_stack();
35849 +
35850 #define name iocpar.name
35851 #define bname iocpar.bname
35852 #define iocts iocpar.iocts
35853 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35854 index bf7997a..cf091db 100644
35855 --- a/drivers/isdn/icn/icn.c
35856 +++ b/drivers/isdn/icn/icn.c
35857 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35858 if (count > len)
35859 count = len;
35860 if (user) {
35861 - if (copy_from_user(msg, buf, count))
35862 + if (count > sizeof msg || copy_from_user(msg, buf, count))
35863 return -EFAULT;
35864 } else
35865 memcpy(msg, buf, count);
35866 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
35867 index feb0fa4..f76f830 100644
35868 --- a/drivers/isdn/mISDN/socket.c
35869 +++ b/drivers/isdn/mISDN/socket.c
35870 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35871 if (dev) {
35872 struct mISDN_devinfo di;
35873
35874 + memset(&di, 0, sizeof(di));
35875 di.id = dev->id;
35876 di.Dprotocols = dev->Dprotocols;
35877 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35878 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35879 if (dev) {
35880 struct mISDN_devinfo di;
35881
35882 + memset(&di, 0, sizeof(di));
35883 di.id = dev->id;
35884 di.Dprotocols = dev->Dprotocols;
35885 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35886 diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
35887 index 485be8b..f0225bc 100644
35888 --- a/drivers/isdn/sc/interrupt.c
35889 +++ b/drivers/isdn/sc/interrupt.c
35890 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35891 }
35892 else if(callid>=0x0000 && callid<=0x7FFF)
35893 {
35894 + int len;
35895 +
35896 pr_debug("%s: Got Incoming Call\n",
35897 sc_adapter[card]->devicename);
35898 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
35899 - strcpy(setup.eazmsn,
35900 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
35901 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
35902 + sizeof(setup.phone));
35903 + if (len >= sizeof(setup.phone))
35904 + continue;
35905 + len = strlcpy(setup.eazmsn,
35906 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35907 + sizeof(setup.eazmsn));
35908 + if (len >= sizeof(setup.eazmsn))
35909 + continue;
35910 setup.si1 = 7;
35911 setup.si2 = 0;
35912 setup.plan = 0;
35913 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35914 * Handle a GetMyNumber Rsp
35915 */
35916 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
35917 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
35918 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35919 + rcvmsg.msg_data.byte_array,
35920 + sizeof(rcvmsg.msg_data.byte_array));
35921 continue;
35922 }
35923
35924 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
35925 index 8744d24..d1f9a9a 100644
35926 --- a/drivers/lguest/core.c
35927 +++ b/drivers/lguest/core.c
35928 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
35929 * it's worked so far. The end address needs +1 because __get_vm_area
35930 * allocates an extra guard page, so we need space for that.
35931 */
35932 +
35933 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
35934 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35935 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
35936 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35937 +#else
35938 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35939 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
35940 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35941 +#endif
35942 +
35943 if (!switcher_vma) {
35944 err = -ENOMEM;
35945 printk("lguest: could not map switcher pages high\n");
35946 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
35947 * Now the Switcher is mapped at the right address, we can't fail!
35948 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
35949 */
35950 - memcpy(switcher_vma->addr, start_switcher_text,
35951 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
35952 end_switcher_text - start_switcher_text);
35953
35954 printk(KERN_INFO "lguest: mapped switcher at %p\n",
35955 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
35956 index 6ae3888..8b38145 100644
35957 --- a/drivers/lguest/x86/core.c
35958 +++ b/drivers/lguest/x86/core.c
35959 @@ -59,7 +59,7 @@ static struct {
35960 /* Offset from where switcher.S was compiled to where we've copied it */
35961 static unsigned long switcher_offset(void)
35962 {
35963 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
35964 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
35965 }
35966
35967 /* This cpu's struct lguest_pages. */
35968 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
35969 * These copies are pretty cheap, so we do them unconditionally: */
35970 /* Save the current Host top-level page directory.
35971 */
35972 +
35973 +#ifdef CONFIG_PAX_PER_CPU_PGD
35974 + pages->state.host_cr3 = read_cr3();
35975 +#else
35976 pages->state.host_cr3 = __pa(current->mm->pgd);
35977 +#endif
35978 +
35979 /*
35980 * Set up the Guest's page tables to see this CPU's pages (and no
35981 * other CPU's pages).
35982 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
35983 * compiled-in switcher code and the high-mapped copy we just made.
35984 */
35985 for (i = 0; i < IDT_ENTRIES; i++)
35986 - default_idt_entries[i] += switcher_offset();
35987 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
35988
35989 /*
35990 * Set up the Switcher's per-cpu areas.
35991 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
35992 * it will be undisturbed when we switch. To change %cs and jump we
35993 * need this structure to feed to Intel's "lcall" instruction.
35994 */
35995 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
35996 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
35997 lguest_entry.segment = LGUEST_CS;
35998
35999 /*
36000 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
36001 index 40634b0..4f5855e 100644
36002 --- a/drivers/lguest/x86/switcher_32.S
36003 +++ b/drivers/lguest/x86/switcher_32.S
36004 @@ -87,6 +87,7 @@
36005 #include <asm/page.h>
36006 #include <asm/segment.h>
36007 #include <asm/lguest.h>
36008 +#include <asm/processor-flags.h>
36009
36010 // We mark the start of the code to copy
36011 // It's placed in .text tho it's never run here
36012 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
36013 // Changes type when we load it: damn Intel!
36014 // For after we switch over our page tables
36015 // That entry will be read-only: we'd crash.
36016 +
36017 +#ifdef CONFIG_PAX_KERNEXEC
36018 + mov %cr0, %edx
36019 + xor $X86_CR0_WP, %edx
36020 + mov %edx, %cr0
36021 +#endif
36022 +
36023 movl $(GDT_ENTRY_TSS*8), %edx
36024 ltr %dx
36025
36026 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
36027 // Let's clear it again for our return.
36028 // The GDT descriptor of the Host
36029 // Points to the table after two "size" bytes
36030 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
36031 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
36032 // Clear "used" from type field (byte 5, bit 2)
36033 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
36034 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
36035 +
36036 +#ifdef CONFIG_PAX_KERNEXEC
36037 + mov %cr0, %eax
36038 + xor $X86_CR0_WP, %eax
36039 + mov %eax, %cr0
36040 +#endif
36041
36042 // Once our page table's switched, the Guest is live!
36043 // The Host fades as we run this final step.
36044 @@ -295,13 +309,12 @@ deliver_to_host:
36045 // I consulted gcc, and it gave
36046 // These instructions, which I gladly credit:
36047 leal (%edx,%ebx,8), %eax
36048 - movzwl (%eax),%edx
36049 - movl 4(%eax), %eax
36050 - xorw %ax, %ax
36051 - orl %eax, %edx
36052 + movl 4(%eax), %edx
36053 + movw (%eax), %dx
36054 // Now the address of the handler's in %edx
36055 // We call it now: its "iret" drops us home.
36056 - jmp *%edx
36057 + ljmp $__KERNEL_CS, $1f
36058 +1: jmp *%edx
36059
36060 // Every interrupt can come to us here
36061 // But we must truly tell each apart.
36062 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
36063 index 588a5b0..b71db89 100644
36064 --- a/drivers/macintosh/macio_asic.c
36065 +++ b/drivers/macintosh/macio_asic.c
36066 @@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
36067 * MacIO is matched against any Apple ID, it's probe() function
36068 * will then decide wether it applies or not
36069 */
36070 -static const struct pci_device_id __devinitdata pci_ids [] = { {
36071 +static const struct pci_device_id __devinitconst pci_ids [] = { {
36072 .vendor = PCI_VENDOR_ID_APPLE,
36073 .device = PCI_ANY_ID,
36074 .subvendor = PCI_ANY_ID,
36075 diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
36076 index a348bb0..ecd9b3f 100644
36077 --- a/drivers/macintosh/via-pmu-backlight.c
36078 +++ b/drivers/macintosh/via-pmu-backlight.c
36079 @@ -15,7 +15,7 @@
36080
36081 #define MAX_PMU_LEVEL 0xFF
36082
36083 -static struct backlight_ops pmu_backlight_data;
36084 +static const struct backlight_ops pmu_backlight_data;
36085 static DEFINE_SPINLOCK(pmu_backlight_lock);
36086 static int sleeping, uses_pmu_bl;
36087 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
36088 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
36089 return bd->props.brightness;
36090 }
36091
36092 -static struct backlight_ops pmu_backlight_data = {
36093 +static const struct backlight_ops pmu_backlight_data = {
36094 .get_brightness = pmu_backlight_get_brightness,
36095 .update_status = pmu_backlight_update_status,
36096
36097 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
36098 index 6f308a4..b5f7ff7 100644
36099 --- a/drivers/macintosh/via-pmu.c
36100 +++ b/drivers/macintosh/via-pmu.c
36101 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
36102 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
36103 }
36104
36105 -static struct platform_suspend_ops pmu_pm_ops = {
36106 +static const struct platform_suspend_ops pmu_pm_ops = {
36107 .enter = powerbook_sleep,
36108 .valid = pmu_sleep_valid,
36109 };
36110 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
36111 index 818b617..4656e38 100644
36112 --- a/drivers/md/dm-ioctl.c
36113 +++ b/drivers/md/dm-ioctl.c
36114 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
36115 cmd == DM_LIST_VERSIONS_CMD)
36116 return 0;
36117
36118 - if ((cmd == DM_DEV_CREATE_CMD)) {
36119 + if (cmd == DM_DEV_CREATE_CMD) {
36120 if (!*param->name) {
36121 DMWARN("name not supplied when creating device");
36122 return -EINVAL;
36123 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36124 index 6021d0a..a878643 100644
36125 --- a/drivers/md/dm-raid1.c
36126 +++ b/drivers/md/dm-raid1.c
36127 @@ -41,7 +41,7 @@ enum dm_raid1_error {
36128
36129 struct mirror {
36130 struct mirror_set *ms;
36131 - atomic_t error_count;
36132 + atomic_unchecked_t error_count;
36133 unsigned long error_type;
36134 struct dm_dev *dev;
36135 sector_t offset;
36136 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36137 * simple way to tell if a device has encountered
36138 * errors.
36139 */
36140 - atomic_inc(&m->error_count);
36141 + atomic_inc_unchecked(&m->error_count);
36142
36143 if (test_and_set_bit(error_type, &m->error_type))
36144 return;
36145 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36146 }
36147
36148 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36149 - if (!atomic_read(&new->error_count)) {
36150 + if (!atomic_read_unchecked(&new->error_count)) {
36151 set_default_mirror(new);
36152 break;
36153 }
36154 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36155 struct mirror *m = get_default_mirror(ms);
36156
36157 do {
36158 - if (likely(!atomic_read(&m->error_count)))
36159 + if (likely(!atomic_read_unchecked(&m->error_count)))
36160 return m;
36161
36162 if (m-- == ms->mirror)
36163 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36164 {
36165 struct mirror *default_mirror = get_default_mirror(m->ms);
36166
36167 - return !atomic_read(&default_mirror->error_count);
36168 + return !atomic_read_unchecked(&default_mirror->error_count);
36169 }
36170
36171 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36172 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36173 */
36174 if (likely(region_in_sync(ms, region, 1)))
36175 m = choose_mirror(ms, bio->bi_sector);
36176 - else if (m && atomic_read(&m->error_count))
36177 + else if (m && atomic_read_unchecked(&m->error_count))
36178 m = NULL;
36179
36180 if (likely(m))
36181 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36182 }
36183
36184 ms->mirror[mirror].ms = ms;
36185 - atomic_set(&(ms->mirror[mirror].error_count), 0);
36186 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36187 ms->mirror[mirror].error_type = 0;
36188 ms->mirror[mirror].offset = offset;
36189
36190 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36191 */
36192 static char device_status_char(struct mirror *m)
36193 {
36194 - if (!atomic_read(&(m->error_count)))
36195 + if (!atomic_read_unchecked(&(m->error_count)))
36196 return 'A';
36197
36198 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36199 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36200 index bd58703..9f26571 100644
36201 --- a/drivers/md/dm-stripe.c
36202 +++ b/drivers/md/dm-stripe.c
36203 @@ -20,7 +20,7 @@ struct stripe {
36204 struct dm_dev *dev;
36205 sector_t physical_start;
36206
36207 - atomic_t error_count;
36208 + atomic_unchecked_t error_count;
36209 };
36210
36211 struct stripe_c {
36212 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36213 kfree(sc);
36214 return r;
36215 }
36216 - atomic_set(&(sc->stripe[i].error_count), 0);
36217 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36218 }
36219
36220 ti->private = sc;
36221 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36222 DMEMIT("%d ", sc->stripes);
36223 for (i = 0; i < sc->stripes; i++) {
36224 DMEMIT("%s ", sc->stripe[i].dev->name);
36225 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36226 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36227 'D' : 'A';
36228 }
36229 buffer[i] = '\0';
36230 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36231 */
36232 for (i = 0; i < sc->stripes; i++)
36233 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36234 - atomic_inc(&(sc->stripe[i].error_count));
36235 - if (atomic_read(&(sc->stripe[i].error_count)) <
36236 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
36237 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36238 DM_IO_ERROR_THRESHOLD)
36239 queue_work(kstriped, &sc->kstriped_ws);
36240 }
36241 diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36242 index 4b04590..13a77b2 100644
36243 --- a/drivers/md/dm-sysfs.c
36244 +++ b/drivers/md/dm-sysfs.c
36245 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36246 NULL,
36247 };
36248
36249 -static struct sysfs_ops dm_sysfs_ops = {
36250 +static const struct sysfs_ops dm_sysfs_ops = {
36251 .show = dm_attr_show,
36252 };
36253
36254 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36255 index 03345bb..332250d 100644
36256 --- a/drivers/md/dm-table.c
36257 +++ b/drivers/md/dm-table.c
36258 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36259 if (!dev_size)
36260 return 0;
36261
36262 - if ((start >= dev_size) || (start + len > dev_size)) {
36263 + if ((start >= dev_size) || (len > dev_size - start)) {
36264 DMWARN("%s: %s too small for target: "
36265 "start=%llu, len=%llu, dev_size=%llu",
36266 dm_device_name(ti->table->md), bdevname(bdev, b),
36267 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36268 index c988ac2..c418141 100644
36269 --- a/drivers/md/dm.c
36270 +++ b/drivers/md/dm.c
36271 @@ -165,9 +165,9 @@ struct mapped_device {
36272 /*
36273 * Event handling.
36274 */
36275 - atomic_t event_nr;
36276 + atomic_unchecked_t event_nr;
36277 wait_queue_head_t eventq;
36278 - atomic_t uevent_seq;
36279 + atomic_unchecked_t uevent_seq;
36280 struct list_head uevent_list;
36281 spinlock_t uevent_lock; /* Protect access to uevent_list */
36282
36283 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36284 rwlock_init(&md->map_lock);
36285 atomic_set(&md->holders, 1);
36286 atomic_set(&md->open_count, 0);
36287 - atomic_set(&md->event_nr, 0);
36288 - atomic_set(&md->uevent_seq, 0);
36289 + atomic_set_unchecked(&md->event_nr, 0);
36290 + atomic_set_unchecked(&md->uevent_seq, 0);
36291 INIT_LIST_HEAD(&md->uevent_list);
36292 spin_lock_init(&md->uevent_lock);
36293
36294 @@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36295
36296 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36297
36298 - atomic_inc(&md->event_nr);
36299 + atomic_inc_unchecked(&md->event_nr);
36300 wake_up(&md->eventq);
36301 }
36302
36303 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36304
36305 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36306 {
36307 - return atomic_add_return(1, &md->uevent_seq);
36308 + return atomic_add_return_unchecked(1, &md->uevent_seq);
36309 }
36310
36311 uint32_t dm_get_event_nr(struct mapped_device *md)
36312 {
36313 - return atomic_read(&md->event_nr);
36314 + return atomic_read_unchecked(&md->event_nr);
36315 }
36316
36317 int dm_wait_event(struct mapped_device *md, int event_nr)
36318 {
36319 return wait_event_interruptible(md->eventq,
36320 - (event_nr != atomic_read(&md->event_nr)));
36321 + (event_nr != atomic_read_unchecked(&md->event_nr)));
36322 }
36323
36324 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36325 diff --git a/drivers/md/md.c b/drivers/md/md.c
36326 index 4ce6e2f..7a9530a 100644
36327 --- a/drivers/md/md.c
36328 +++ b/drivers/md/md.c
36329 @@ -153,10 +153,10 @@ static int start_readonly;
36330 * start build, activate spare
36331 */
36332 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36333 -static atomic_t md_event_count;
36334 +static atomic_unchecked_t md_event_count;
36335 void md_new_event(mddev_t *mddev)
36336 {
36337 - atomic_inc(&md_event_count);
36338 + atomic_inc_unchecked(&md_event_count);
36339 wake_up(&md_event_waiters);
36340 }
36341 EXPORT_SYMBOL_GPL(md_new_event);
36342 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36343 */
36344 static void md_new_event_inintr(mddev_t *mddev)
36345 {
36346 - atomic_inc(&md_event_count);
36347 + atomic_inc_unchecked(&md_event_count);
36348 wake_up(&md_event_waiters);
36349 }
36350
36351 @@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36352
36353 rdev->preferred_minor = 0xffff;
36354 rdev->data_offset = le64_to_cpu(sb->data_offset);
36355 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36356 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36357
36358 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36359 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36360 @@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36361 else
36362 sb->resync_offset = cpu_to_le64(0);
36363
36364 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36365 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36366
36367 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36368 sb->size = cpu_to_le64(mddev->dev_sectors);
36369 @@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36370 static ssize_t
36371 errors_show(mdk_rdev_t *rdev, char *page)
36372 {
36373 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36374 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36375 }
36376
36377 static ssize_t
36378 @@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36379 char *e;
36380 unsigned long n = simple_strtoul(buf, &e, 10);
36381 if (*buf && (*e == 0 || *e == '\n')) {
36382 - atomic_set(&rdev->corrected_errors, n);
36383 + atomic_set_unchecked(&rdev->corrected_errors, n);
36384 return len;
36385 }
36386 return -EINVAL;
36387 @@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36388 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36389 kfree(rdev);
36390 }
36391 -static struct sysfs_ops rdev_sysfs_ops = {
36392 +static const struct sysfs_ops rdev_sysfs_ops = {
36393 .show = rdev_attr_show,
36394 .store = rdev_attr_store,
36395 };
36396 @@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36397 rdev->data_offset = 0;
36398 rdev->sb_events = 0;
36399 atomic_set(&rdev->nr_pending, 0);
36400 - atomic_set(&rdev->read_errors, 0);
36401 - atomic_set(&rdev->corrected_errors, 0);
36402 + atomic_set_unchecked(&rdev->read_errors, 0);
36403 + atomic_set_unchecked(&rdev->corrected_errors, 0);
36404
36405 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36406 if (!size) {
36407 @@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36408 kfree(mddev);
36409 }
36410
36411 -static struct sysfs_ops md_sysfs_ops = {
36412 +static const struct sysfs_ops md_sysfs_ops = {
36413 .show = md_attr_show,
36414 .store = md_attr_store,
36415 };
36416 @@ -4482,7 +4482,8 @@ out:
36417 err = 0;
36418 blk_integrity_unregister(disk);
36419 md_new_event(mddev);
36420 - sysfs_notify_dirent(mddev->sysfs_state);
36421 + if (mddev->sysfs_state)
36422 + sysfs_notify_dirent(mddev->sysfs_state);
36423 return err;
36424 }
36425
36426 @@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36427
36428 spin_unlock(&pers_lock);
36429 seq_printf(seq, "\n");
36430 - mi->event = atomic_read(&md_event_count);
36431 + mi->event = atomic_read_unchecked(&md_event_count);
36432 return 0;
36433 }
36434 if (v == (void*)2) {
36435 @@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36436 chunk_kb ? "KB" : "B");
36437 if (bitmap->file) {
36438 seq_printf(seq, ", file: ");
36439 - seq_path(seq, &bitmap->file->f_path, " \t\n");
36440 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36441 }
36442
36443 seq_printf(seq, "\n");
36444 @@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36445 else {
36446 struct seq_file *p = file->private_data;
36447 p->private = mi;
36448 - mi->event = atomic_read(&md_event_count);
36449 + mi->event = atomic_read_unchecked(&md_event_count);
36450 }
36451 return error;
36452 }
36453 @@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36454 /* always allow read */
36455 mask = POLLIN | POLLRDNORM;
36456
36457 - if (mi->event != atomic_read(&md_event_count))
36458 + if (mi->event != atomic_read_unchecked(&md_event_count))
36459 mask |= POLLERR | POLLPRI;
36460 return mask;
36461 }
36462 @@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36463 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36464 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36465 (int)part_stat_read(&disk->part0, sectors[1]) -
36466 - atomic_read(&disk->sync_io);
36467 + atomic_read_unchecked(&disk->sync_io);
36468 /* sync IO will cause sync_io to increase before the disk_stats
36469 * as sync_io is counted when a request starts, and
36470 * disk_stats is counted when it completes.
36471 diff --git a/drivers/md/md.h b/drivers/md/md.h
36472 index 87430fe..0024a4c 100644
36473 --- a/drivers/md/md.h
36474 +++ b/drivers/md/md.h
36475 @@ -94,10 +94,10 @@ struct mdk_rdev_s
36476 * only maintained for arrays that
36477 * support hot removal
36478 */
36479 - atomic_t read_errors; /* number of consecutive read errors that
36480 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
36481 * we have tried to ignore.
36482 */
36483 - atomic_t corrected_errors; /* number of corrected read errors,
36484 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36485 * for reporting to userspace and storing
36486 * in superblock.
36487 */
36488 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36489
36490 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36491 {
36492 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36493 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36494 }
36495
36496 struct mdk_personality
36497 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36498 index 968cb14..f0ad2e4 100644
36499 --- a/drivers/md/raid1.c
36500 +++ b/drivers/md/raid1.c
36501 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36502 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36503 continue;
36504 rdev = conf->mirrors[d].rdev;
36505 - atomic_add(s, &rdev->corrected_errors);
36506 + atomic_add_unchecked(s, &rdev->corrected_errors);
36507 if (sync_page_io(rdev->bdev,
36508 sect + rdev->data_offset,
36509 s<<9,
36510 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36511 /* Well, this device is dead */
36512 md_error(mddev, rdev);
36513 else {
36514 - atomic_add(s, &rdev->corrected_errors);
36515 + atomic_add_unchecked(s, &rdev->corrected_errors);
36516 printk(KERN_INFO
36517 "raid1:%s: read error corrected "
36518 "(%d sectors at %llu on %s)\n",
36519 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36520 index 1b4e232..cf0f534 100644
36521 --- a/drivers/md/raid10.c
36522 +++ b/drivers/md/raid10.c
36523 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36524 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36525 set_bit(R10BIO_Uptodate, &r10_bio->state);
36526 else {
36527 - atomic_add(r10_bio->sectors,
36528 + atomic_add_unchecked(r10_bio->sectors,
36529 &conf->mirrors[d].rdev->corrected_errors);
36530 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36531 md_error(r10_bio->mddev,
36532 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36533 test_bit(In_sync, &rdev->flags)) {
36534 atomic_inc(&rdev->nr_pending);
36535 rcu_read_unlock();
36536 - atomic_add(s, &rdev->corrected_errors);
36537 + atomic_add_unchecked(s, &rdev->corrected_errors);
36538 if (sync_page_io(rdev->bdev,
36539 r10_bio->devs[sl].addr +
36540 sect + rdev->data_offset,
36541 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36542 index 883215d..675bf47 100644
36543 --- a/drivers/md/raid5.c
36544 +++ b/drivers/md/raid5.c
36545 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36546 bi->bi_next = NULL;
36547 if ((rw & WRITE) &&
36548 test_bit(R5_ReWrite, &sh->dev[i].flags))
36549 - atomic_add(STRIPE_SECTORS,
36550 + atomic_add_unchecked(STRIPE_SECTORS,
36551 &rdev->corrected_errors);
36552 generic_make_request(bi);
36553 } else {
36554 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36555 clear_bit(R5_ReadError, &sh->dev[i].flags);
36556 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36557 }
36558 - if (atomic_read(&conf->disks[i].rdev->read_errors))
36559 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
36560 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36561 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36562 } else {
36563 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36564 int retry = 0;
36565 rdev = conf->disks[i].rdev;
36566
36567 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36568 - atomic_inc(&rdev->read_errors);
36569 + atomic_inc_unchecked(&rdev->read_errors);
36570 if (conf->mddev->degraded >= conf->max_degraded)
36571 printk_rl(KERN_WARNING
36572 "raid5:%s: read error not correctable "
36573 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36574 (unsigned long long)(sh->sector
36575 + rdev->data_offset),
36576 bdn);
36577 - else if (atomic_read(&rdev->read_errors)
36578 + else if (atomic_read_unchecked(&rdev->read_errors)
36579 > conf->max_nr_stripes)
36580 printk(KERN_WARNING
36581 "raid5:%s: Too many read errors, failing device %s.\n",
36582 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36583 sector_t r_sector;
36584 struct stripe_head sh2;
36585
36586 + pax_track_stack();
36587
36588 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36589 stripe = new_sector;
36590 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36591 index 05bde9c..2f31d40 100644
36592 --- a/drivers/media/common/saa7146_hlp.c
36593 +++ b/drivers/media/common/saa7146_hlp.c
36594 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36595
36596 int x[32], y[32], w[32], h[32];
36597
36598 + pax_track_stack();
36599 +
36600 /* clear out memory */
36601 memset(&line_list[0], 0x00, sizeof(u32)*32);
36602 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36603 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36604 index cb22da5..82b686e 100644
36605 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36606 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36607 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36608 u8 buf[HOST_LINK_BUF_SIZE];
36609 int i;
36610
36611 + pax_track_stack();
36612 +
36613 dprintk("%s\n", __func__);
36614
36615 /* check if we have space for a link buf in the rx_buffer */
36616 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36617 unsigned long timeout;
36618 int written;
36619
36620 + pax_track_stack();
36621 +
36622 dprintk("%s\n", __func__);
36623
36624 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36625 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36626 index 2fe05d0..a3289c4 100644
36627 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
36628 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36629 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
36630 union {
36631 dmx_ts_cb ts;
36632 dmx_section_cb sec;
36633 - } cb;
36634 + } __no_const cb;
36635
36636 struct dvb_demux *demux;
36637 void *priv;
36638 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36639 index 94159b9..376bd8e 100644
36640 --- a/drivers/media/dvb/dvb-core/dvbdev.c
36641 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
36642 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36643 const struct dvb_device *template, void *priv, int type)
36644 {
36645 struct dvb_device *dvbdev;
36646 - struct file_operations *dvbdevfops;
36647 + file_operations_no_const *dvbdevfops;
36648 struct device *clsdev;
36649 int minor;
36650 int id;
36651 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36652 index 2a53dd0..db8c07a 100644
36653 --- a/drivers/media/dvb/dvb-usb/cxusb.c
36654 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
36655 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36656 struct dib0700_adapter_state {
36657 int (*set_param_save) (struct dvb_frontend *,
36658 struct dvb_frontend_parameters *);
36659 -};
36660 +} __no_const;
36661
36662 static int dib7070_set_param_override(struct dvb_frontend *fe,
36663 struct dvb_frontend_parameters *fep)
36664 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36665 index db7f7f7..f55e96f 100644
36666 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36667 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36668 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36669
36670 u8 buf[260];
36671
36672 + pax_track_stack();
36673 +
36674 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36675 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36676
36677 diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36678 index 524acf5..5ffc403 100644
36679 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36680 +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36681 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36682
36683 struct dib0700_adapter_state {
36684 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36685 -};
36686 +} __no_const;
36687
36688 /* Hauppauge Nova-T 500 (aka Bristol)
36689 * has a LNA on GPIO0 which is enabled by setting 1 */
36690 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36691 index ba91735..4261d84 100644
36692 --- a/drivers/media/dvb/frontends/dib3000.h
36693 +++ b/drivers/media/dvb/frontends/dib3000.h
36694 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36695 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36696 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36697 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36698 -};
36699 +} __no_const;
36700
36701 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36702 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36703 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36704 index c709ce6..b3fe620 100644
36705 --- a/drivers/media/dvb/frontends/or51211.c
36706 +++ b/drivers/media/dvb/frontends/or51211.c
36707 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36708 u8 tudata[585];
36709 int i;
36710
36711 + pax_track_stack();
36712 +
36713 dprintk("Firmware is %zd bytes\n",fw->size);
36714
36715 /* Get eprom data */
36716 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36717 index 482d0f3..ee1e202 100644
36718 --- a/drivers/media/radio/radio-cadet.c
36719 +++ b/drivers/media/radio/radio-cadet.c
36720 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36721 while (i < count && dev->rdsin != dev->rdsout)
36722 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36723
36724 - if (copy_to_user(data, readbuf, i))
36725 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36726 return -EFAULT;
36727 return i;
36728 }
36729 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36730 index 6dd51e2..0359b92 100644
36731 --- a/drivers/media/video/cx18/cx18-driver.c
36732 +++ b/drivers/media/video/cx18/cx18-driver.c
36733 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36734
36735 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36736
36737 -static atomic_t cx18_instance = ATOMIC_INIT(0);
36738 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36739
36740 /* Parameter declarations */
36741 static int cardtype[CX18_MAX_CARDS];
36742 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36743 struct i2c_client c;
36744 u8 eedata[256];
36745
36746 + pax_track_stack();
36747 +
36748 memset(&c, 0, sizeof(c));
36749 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36750 c.adapter = &cx->i2c_adap[0];
36751 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36752 struct cx18 *cx;
36753
36754 /* FIXME - module parameter arrays constrain max instances */
36755 - i = atomic_inc_return(&cx18_instance) - 1;
36756 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36757 if (i >= CX18_MAX_CARDS) {
36758 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36759 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36760 diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36761 index 463ec34..2f4625a 100644
36762 --- a/drivers/media/video/ivtv/ivtv-driver.c
36763 +++ b/drivers/media/video/ivtv/ivtv-driver.c
36764 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36765 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36766
36767 /* ivtv instance counter */
36768 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
36769 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36770
36771 /* Parameter declarations */
36772 static int cardtype[IVTV_MAX_CARDS];
36773 diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36774 index 5fc4ac0..652a54a 100644
36775 --- a/drivers/media/video/omap24xxcam.c
36776 +++ b/drivers/media/video/omap24xxcam.c
36777 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36778 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36779
36780 do_gettimeofday(&vb->ts);
36781 - vb->field_count = atomic_add_return(2, &fh->field_count);
36782 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36783 if (csr & csr_error) {
36784 vb->state = VIDEOBUF_ERROR;
36785 if (!atomic_read(&fh->cam->in_reset)) {
36786 diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36787 index 2ce67f5..cf26a5b 100644
36788 --- a/drivers/media/video/omap24xxcam.h
36789 +++ b/drivers/media/video/omap24xxcam.h
36790 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36791 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36792 struct videobuf_queue vbq;
36793 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36794 - atomic_t field_count; /* field counter for videobuf_buffer */
36795 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36796 /* accessing cam here doesn't need serialisation: it's constant */
36797 struct omap24xxcam_device *cam;
36798 };
36799 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36800 index 299afa4..eb47459 100644
36801 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36802 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36803 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36804 u8 *eeprom;
36805 struct tveeprom tvdata;
36806
36807 + pax_track_stack();
36808 +
36809 memset(&tvdata,0,sizeof(tvdata));
36810
36811 eeprom = pvr2_eeprom_fetch(hdw);
36812 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36813 index 5b152ff..3320638 100644
36814 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36815 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36816 @@ -195,7 +195,7 @@ struct pvr2_hdw {
36817
36818 /* I2C stuff */
36819 struct i2c_adapter i2c_adap;
36820 - struct i2c_algorithm i2c_algo;
36821 + i2c_algorithm_no_const i2c_algo;
36822 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36823 int i2c_cx25840_hack_state;
36824 int i2c_linked;
36825 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36826 index 1eabff6..8e2313a 100644
36827 --- a/drivers/media/video/saa7134/saa6752hs.c
36828 +++ b/drivers/media/video/saa7134/saa6752hs.c
36829 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36830 unsigned char localPAT[256];
36831 unsigned char localPMT[256];
36832
36833 + pax_track_stack();
36834 +
36835 /* Set video format - must be done first as it resets other settings */
36836 set_reg8(client, 0x41, h->video_format);
36837
36838 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36839 index 9c1d3ac..b1b49e9 100644
36840 --- a/drivers/media/video/saa7164/saa7164-cmd.c
36841 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
36842 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36843 wait_queue_head_t *q = 0;
36844 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36845
36846 + pax_track_stack();
36847 +
36848 /* While any outstand message on the bus exists... */
36849 do {
36850
36851 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36852 u8 tmp[512];
36853 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36854
36855 + pax_track_stack();
36856 +
36857 while (loop) {
36858
36859 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36860 diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36861 index b085496..cde0270 100644
36862 --- a/drivers/media/video/usbvideo/ibmcam.c
36863 +++ b/drivers/media/video/usbvideo/ibmcam.c
36864 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
36865 static int __init ibmcam_init(void)
36866 {
36867 struct usbvideo_cb cbTbl;
36868 - memset(&cbTbl, 0, sizeof(cbTbl));
36869 - cbTbl.probe = ibmcam_probe;
36870 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
36871 - cbTbl.videoStart = ibmcam_video_start;
36872 - cbTbl.videoStop = ibmcam_video_stop;
36873 - cbTbl.processData = ibmcam_ProcessIsocData;
36874 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36875 - cbTbl.adjustPicture = ibmcam_adjust_picture;
36876 - cbTbl.getFPS = ibmcam_calculate_fps;
36877 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
36878 + *(void **)&cbTbl.probe = ibmcam_probe;
36879 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
36880 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
36881 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
36882 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
36883 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36884 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
36885 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
36886 return usbvideo_register(
36887 &cams,
36888 MAX_IBMCAM,
36889 diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
36890 index 31d57f2..600b735 100644
36891 --- a/drivers/media/video/usbvideo/konicawc.c
36892 +++ b/drivers/media/video/usbvideo/konicawc.c
36893 @@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
36894 int error;
36895
36896 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36897 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36898 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36899
36900 cam->input = input_dev = input_allocate_device();
36901 if (!input_dev) {
36902 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
36903 struct usbvideo_cb cbTbl;
36904 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
36905 DRIVER_DESC "\n");
36906 - memset(&cbTbl, 0, sizeof(cbTbl));
36907 - cbTbl.probe = konicawc_probe;
36908 - cbTbl.setupOnOpen = konicawc_setup_on_open;
36909 - cbTbl.processData = konicawc_process_isoc;
36910 - cbTbl.getFPS = konicawc_calculate_fps;
36911 - cbTbl.setVideoMode = konicawc_set_video_mode;
36912 - cbTbl.startDataPump = konicawc_start_data;
36913 - cbTbl.stopDataPump = konicawc_stop_data;
36914 - cbTbl.adjustPicture = konicawc_adjust_picture;
36915 - cbTbl.userFree = konicawc_free_uvd;
36916 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
36917 + *(void **)&cbTbl.probe = konicawc_probe;
36918 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
36919 + *(void **)&cbTbl.processData = konicawc_process_isoc;
36920 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
36921 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
36922 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
36923 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
36924 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
36925 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
36926 return usbvideo_register(
36927 &cams,
36928 MAX_CAMERAS,
36929 diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
36930 index 803d3e4..c4d1b96 100644
36931 --- a/drivers/media/video/usbvideo/quickcam_messenger.c
36932 +++ b/drivers/media/video/usbvideo/quickcam_messenger.c
36933 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
36934 int error;
36935
36936 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36937 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36938 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36939
36940 cam->input = input_dev = input_allocate_device();
36941 if (!input_dev) {
36942 diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
36943 index fbd1b63..292f9f0 100644
36944 --- a/drivers/media/video/usbvideo/ultracam.c
36945 +++ b/drivers/media/video/usbvideo/ultracam.c
36946 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
36947 {
36948 struct usbvideo_cb cbTbl;
36949 memset(&cbTbl, 0, sizeof(cbTbl));
36950 - cbTbl.probe = ultracam_probe;
36951 - cbTbl.setupOnOpen = ultracam_setup_on_open;
36952 - cbTbl.videoStart = ultracam_video_start;
36953 - cbTbl.videoStop = ultracam_video_stop;
36954 - cbTbl.processData = ultracam_ProcessIsocData;
36955 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36956 - cbTbl.adjustPicture = ultracam_adjust_picture;
36957 - cbTbl.getFPS = ultracam_calculate_fps;
36958 + *(void **)&cbTbl.probe = ultracam_probe;
36959 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
36960 + *(void **)&cbTbl.videoStart = ultracam_video_start;
36961 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
36962 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
36963 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36964 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
36965 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
36966 return usbvideo_register(
36967 &cams,
36968 MAX_CAMERAS,
36969 diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
36970 index dea8b32..34f6878 100644
36971 --- a/drivers/media/video/usbvideo/usbvideo.c
36972 +++ b/drivers/media/video/usbvideo/usbvideo.c
36973 @@ -697,15 +697,15 @@ int usbvideo_register(
36974 __func__, cams, base_size, num_cams);
36975
36976 /* Copy callbacks, apply defaults for those that are not set */
36977 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
36978 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
36979 if (cams->cb.getFrame == NULL)
36980 - cams->cb.getFrame = usbvideo_GetFrame;
36981 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
36982 if (cams->cb.disconnect == NULL)
36983 - cams->cb.disconnect = usbvideo_Disconnect;
36984 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
36985 if (cams->cb.startDataPump == NULL)
36986 - cams->cb.startDataPump = usbvideo_StartDataPump;
36987 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
36988 if (cams->cb.stopDataPump == NULL)
36989 - cams->cb.stopDataPump = usbvideo_StopDataPump;
36990 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
36991
36992 cams->num_cameras = num_cams;
36993 cams->cam = (struct uvd *) &cams[1];
36994 diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
36995 index c66985b..7fa143a 100644
36996 --- a/drivers/media/video/usbvideo/usbvideo.h
36997 +++ b/drivers/media/video/usbvideo/usbvideo.h
36998 @@ -268,7 +268,7 @@ struct usbvideo_cb {
36999 int (*startDataPump)(struct uvd *uvd);
37000 void (*stopDataPump)(struct uvd *uvd);
37001 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
37002 -};
37003 +} __no_const;
37004
37005 struct usbvideo {
37006 int num_cameras; /* As allocated */
37007 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
37008 index e0f91e4..37554ea 100644
37009 --- a/drivers/media/video/usbvision/usbvision-core.c
37010 +++ b/drivers/media/video/usbvision/usbvision-core.c
37011 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
37012 unsigned char rv, gv, bv;
37013 static unsigned char *Y, *U, *V;
37014
37015 + pax_track_stack();
37016 +
37017 frame = usbvision->curFrame;
37018 imageSize = frame->frmwidth * frame->frmheight;
37019 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
37020 diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
37021 index 0d06e7c..3d17d24 100644
37022 --- a/drivers/media/video/v4l2-device.c
37023 +++ b/drivers/media/video/v4l2-device.c
37024 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
37025 EXPORT_SYMBOL_GPL(v4l2_device_register);
37026
37027 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
37028 - atomic_t *instance)
37029 + atomic_unchecked_t *instance)
37030 {
37031 - int num = atomic_inc_return(instance) - 1;
37032 + int num = atomic_inc_return_unchecked(instance) - 1;
37033 int len = strlen(basename);
37034
37035 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
37036 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
37037 index 032ebae..6a3532c 100644
37038 --- a/drivers/media/video/videobuf-dma-sg.c
37039 +++ b/drivers/media/video/videobuf-dma-sg.c
37040 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
37041 {
37042 struct videobuf_queue q;
37043
37044 + pax_track_stack();
37045 +
37046 /* Required to make generic handler to call __videobuf_alloc */
37047 q.int_ops = &sg_ops;
37048
37049 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
37050 index b6992b7..9fa7547 100644
37051 --- a/drivers/message/fusion/mptbase.c
37052 +++ b/drivers/message/fusion/mptbase.c
37053 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
37054 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
37055 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
37056
37057 +#ifdef CONFIG_GRKERNSEC_HIDESYM
37058 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37059 + NULL, NULL);
37060 +#else
37061 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37062 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
37063 +#endif
37064 +
37065 /*
37066 * Rounding UP to nearest 4-kB boundary here...
37067 */
37068 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
37069 index 83873e3..e360e9a 100644
37070 --- a/drivers/message/fusion/mptsas.c
37071 +++ b/drivers/message/fusion/mptsas.c
37072 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
37073 return 0;
37074 }
37075
37076 +static inline void
37077 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37078 +{
37079 + if (phy_info->port_details) {
37080 + phy_info->port_details->rphy = rphy;
37081 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37082 + ioc->name, rphy));
37083 + }
37084 +
37085 + if (rphy) {
37086 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37087 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37088 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37089 + ioc->name, rphy, rphy->dev.release));
37090 + }
37091 +}
37092 +
37093 /* no mutex */
37094 static void
37095 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
37096 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
37097 return NULL;
37098 }
37099
37100 -static inline void
37101 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37102 -{
37103 - if (phy_info->port_details) {
37104 - phy_info->port_details->rphy = rphy;
37105 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37106 - ioc->name, rphy));
37107 - }
37108 -
37109 - if (rphy) {
37110 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37111 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37112 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37113 - ioc->name, rphy, rphy->dev.release));
37114 - }
37115 -}
37116 -
37117 static inline struct sas_port *
37118 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37119 {
37120 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37121 index bd096ca..332cf76 100644
37122 --- a/drivers/message/fusion/mptscsih.c
37123 +++ b/drivers/message/fusion/mptscsih.c
37124 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37125
37126 h = shost_priv(SChost);
37127
37128 - if (h) {
37129 - if (h->info_kbuf == NULL)
37130 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37131 - return h->info_kbuf;
37132 - h->info_kbuf[0] = '\0';
37133 + if (!h)
37134 + return NULL;
37135
37136 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37137 - h->info_kbuf[size-1] = '\0';
37138 - }
37139 + if (h->info_kbuf == NULL)
37140 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37141 + return h->info_kbuf;
37142 + h->info_kbuf[0] = '\0';
37143 +
37144 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37145 + h->info_kbuf[size-1] = '\0';
37146
37147 return h->info_kbuf;
37148 }
37149 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37150 index efba702..59b2c0f 100644
37151 --- a/drivers/message/i2o/i2o_config.c
37152 +++ b/drivers/message/i2o/i2o_config.c
37153 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37154 struct i2o_message *msg;
37155 unsigned int iop;
37156
37157 + pax_track_stack();
37158 +
37159 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37160 return -EFAULT;
37161
37162 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37163 index 7045c45..c07b170 100644
37164 --- a/drivers/message/i2o/i2o_proc.c
37165 +++ b/drivers/message/i2o/i2o_proc.c
37166 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37167 "Array Controller Device"
37168 };
37169
37170 -static char *chtostr(u8 * chars, int n)
37171 -{
37172 - char tmp[256];
37173 - tmp[0] = 0;
37174 - return strncat(tmp, (char *)chars, n);
37175 -}
37176 -
37177 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37178 char *group)
37179 {
37180 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37181
37182 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37183 seq_printf(seq, "%-#8x", ddm_table.module_id);
37184 - seq_printf(seq, "%-29s",
37185 - chtostr(ddm_table.module_name_version, 28));
37186 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37187 seq_printf(seq, "%9d ", ddm_table.data_size);
37188 seq_printf(seq, "%8d", ddm_table.code_size);
37189
37190 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37191
37192 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37193 seq_printf(seq, "%-#8x", dst->module_id);
37194 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37195 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37196 + seq_printf(seq, "%-.28s", dst->module_name_version);
37197 + seq_printf(seq, "%-.8s", dst->date);
37198 seq_printf(seq, "%8d ", dst->module_size);
37199 seq_printf(seq, "%8d ", dst->mpb_size);
37200 seq_printf(seq, "0x%04x", dst->module_flags);
37201 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37202 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37203 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37204 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37205 - seq_printf(seq, "Vendor info : %s\n",
37206 - chtostr((u8 *) (work32 + 2), 16));
37207 - seq_printf(seq, "Product info : %s\n",
37208 - chtostr((u8 *) (work32 + 6), 16));
37209 - seq_printf(seq, "Description : %s\n",
37210 - chtostr((u8 *) (work32 + 10), 16));
37211 - seq_printf(seq, "Product rev. : %s\n",
37212 - chtostr((u8 *) (work32 + 14), 8));
37213 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37214 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37215 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37216 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37217
37218 seq_printf(seq, "Serial number : ");
37219 print_serial_number(seq, (u8 *) (work32 + 16),
37220 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37221 }
37222
37223 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37224 - seq_printf(seq, "Module name : %s\n",
37225 - chtostr(result.module_name, 24));
37226 - seq_printf(seq, "Module revision : %s\n",
37227 - chtostr(result.module_rev, 8));
37228 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
37229 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37230
37231 seq_printf(seq, "Serial number : ");
37232 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37233 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37234 return 0;
37235 }
37236
37237 - seq_printf(seq, "Device name : %s\n",
37238 - chtostr(result.device_name, 64));
37239 - seq_printf(seq, "Service name : %s\n",
37240 - chtostr(result.service_name, 64));
37241 - seq_printf(seq, "Physical name : %s\n",
37242 - chtostr(result.physical_location, 64));
37243 - seq_printf(seq, "Instance number : %s\n",
37244 - chtostr(result.instance_number, 4));
37245 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
37246 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
37247 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37248 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37249
37250 return 0;
37251 }
37252 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37253 index 27cf4af..b1205b8 100644
37254 --- a/drivers/message/i2o/iop.c
37255 +++ b/drivers/message/i2o/iop.c
37256 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37257
37258 spin_lock_irqsave(&c->context_list_lock, flags);
37259
37260 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37261 - atomic_inc(&c->context_list_counter);
37262 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37263 + atomic_inc_unchecked(&c->context_list_counter);
37264
37265 - entry->context = atomic_read(&c->context_list_counter);
37266 + entry->context = atomic_read_unchecked(&c->context_list_counter);
37267
37268 list_add(&entry->list, &c->context_list);
37269
37270 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37271
37272 #if BITS_PER_LONG == 64
37273 spin_lock_init(&c->context_list_lock);
37274 - atomic_set(&c->context_list_counter, 0);
37275 + atomic_set_unchecked(&c->context_list_counter, 0);
37276 INIT_LIST_HEAD(&c->context_list);
37277 #endif
37278
37279 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37280 index 78e3e85..66c9a0d 100644
37281 --- a/drivers/mfd/ab3100-core.c
37282 +++ b/drivers/mfd/ab3100-core.c
37283 @@ -777,7 +777,7 @@ struct ab_family_id {
37284 char *name;
37285 };
37286
37287 -static const struct ab_family_id ids[] __initdata = {
37288 +static const struct ab_family_id ids[] __initconst = {
37289 /* AB3100 */
37290 {
37291 .id = 0xc0,
37292 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37293 index 8d8c932..8104515 100644
37294 --- a/drivers/mfd/wm8350-i2c.c
37295 +++ b/drivers/mfd/wm8350-i2c.c
37296 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37297 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37298 int ret;
37299
37300 + pax_track_stack();
37301 +
37302 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37303 return -EINVAL;
37304
37305 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37306 index e4ff50b..4cc3f04 100644
37307 --- a/drivers/misc/kgdbts.c
37308 +++ b/drivers/misc/kgdbts.c
37309 @@ -118,7 +118,7 @@
37310 } while (0)
37311 #define MAX_CONFIG_LEN 40
37312
37313 -static struct kgdb_io kgdbts_io_ops;
37314 +static const struct kgdb_io kgdbts_io_ops;
37315 static char get_buf[BUFMAX];
37316 static int get_buf_cnt;
37317 static char put_buf[BUFMAX];
37318 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37319 module_put(THIS_MODULE);
37320 }
37321
37322 -static struct kgdb_io kgdbts_io_ops = {
37323 +static const struct kgdb_io kgdbts_io_ops = {
37324 .name = "kgdbts",
37325 .read_char = kgdbts_get_char,
37326 .write_char = kgdbts_put_char,
37327 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37328 index 37e7cfc..67cfb76 100644
37329 --- a/drivers/misc/sgi-gru/gruhandles.c
37330 +++ b/drivers/misc/sgi-gru/gruhandles.c
37331 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37332
37333 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37334 {
37335 - atomic_long_inc(&mcs_op_statistics[op].count);
37336 - atomic_long_add(clks, &mcs_op_statistics[op].total);
37337 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37338 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37339 if (mcs_op_statistics[op].max < clks)
37340 mcs_op_statistics[op].max = clks;
37341 }
37342 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37343 index 3f2375c..467c6e6 100644
37344 --- a/drivers/misc/sgi-gru/gruprocfs.c
37345 +++ b/drivers/misc/sgi-gru/gruprocfs.c
37346 @@ -32,9 +32,9 @@
37347
37348 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37349
37350 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37351 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37352 {
37353 - unsigned long val = atomic_long_read(v);
37354 + unsigned long val = atomic_long_read_unchecked(v);
37355
37356 if (val)
37357 seq_printf(s, "%16lu %s\n", val, id);
37358 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37359 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37360
37361 for (op = 0; op < mcsop_last; op++) {
37362 - count = atomic_long_read(&mcs_op_statistics[op].count);
37363 - total = atomic_long_read(&mcs_op_statistics[op].total);
37364 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37365 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37366 max = mcs_op_statistics[op].max;
37367 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37368 count ? total / count : 0, max);
37369 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37370 index 46990bc..4a251b5 100644
37371 --- a/drivers/misc/sgi-gru/grutables.h
37372 +++ b/drivers/misc/sgi-gru/grutables.h
37373 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37374 * GRU statistics.
37375 */
37376 struct gru_stats_s {
37377 - atomic_long_t vdata_alloc;
37378 - atomic_long_t vdata_free;
37379 - atomic_long_t gts_alloc;
37380 - atomic_long_t gts_free;
37381 - atomic_long_t vdata_double_alloc;
37382 - atomic_long_t gts_double_allocate;
37383 - atomic_long_t assign_context;
37384 - atomic_long_t assign_context_failed;
37385 - atomic_long_t free_context;
37386 - atomic_long_t load_user_context;
37387 - atomic_long_t load_kernel_context;
37388 - atomic_long_t lock_kernel_context;
37389 - atomic_long_t unlock_kernel_context;
37390 - atomic_long_t steal_user_context;
37391 - atomic_long_t steal_kernel_context;
37392 - atomic_long_t steal_context_failed;
37393 - atomic_long_t nopfn;
37394 - atomic_long_t break_cow;
37395 - atomic_long_t asid_new;
37396 - atomic_long_t asid_next;
37397 - atomic_long_t asid_wrap;
37398 - atomic_long_t asid_reuse;
37399 - atomic_long_t intr;
37400 - atomic_long_t intr_mm_lock_failed;
37401 - atomic_long_t call_os;
37402 - atomic_long_t call_os_offnode_reference;
37403 - atomic_long_t call_os_check_for_bug;
37404 - atomic_long_t call_os_wait_queue;
37405 - atomic_long_t user_flush_tlb;
37406 - atomic_long_t user_unload_context;
37407 - atomic_long_t user_exception;
37408 - atomic_long_t set_context_option;
37409 - atomic_long_t migrate_check;
37410 - atomic_long_t migrated_retarget;
37411 - atomic_long_t migrated_unload;
37412 - atomic_long_t migrated_unload_delay;
37413 - atomic_long_t migrated_nopfn_retarget;
37414 - atomic_long_t migrated_nopfn_unload;
37415 - atomic_long_t tlb_dropin;
37416 - atomic_long_t tlb_dropin_fail_no_asid;
37417 - atomic_long_t tlb_dropin_fail_upm;
37418 - atomic_long_t tlb_dropin_fail_invalid;
37419 - atomic_long_t tlb_dropin_fail_range_active;
37420 - atomic_long_t tlb_dropin_fail_idle;
37421 - atomic_long_t tlb_dropin_fail_fmm;
37422 - atomic_long_t tlb_dropin_fail_no_exception;
37423 - atomic_long_t tlb_dropin_fail_no_exception_war;
37424 - atomic_long_t tfh_stale_on_fault;
37425 - atomic_long_t mmu_invalidate_range;
37426 - atomic_long_t mmu_invalidate_page;
37427 - atomic_long_t mmu_clear_flush_young;
37428 - atomic_long_t flush_tlb;
37429 - atomic_long_t flush_tlb_gru;
37430 - atomic_long_t flush_tlb_gru_tgh;
37431 - atomic_long_t flush_tlb_gru_zero_asid;
37432 + atomic_long_unchecked_t vdata_alloc;
37433 + atomic_long_unchecked_t vdata_free;
37434 + atomic_long_unchecked_t gts_alloc;
37435 + atomic_long_unchecked_t gts_free;
37436 + atomic_long_unchecked_t vdata_double_alloc;
37437 + atomic_long_unchecked_t gts_double_allocate;
37438 + atomic_long_unchecked_t assign_context;
37439 + atomic_long_unchecked_t assign_context_failed;
37440 + atomic_long_unchecked_t free_context;
37441 + atomic_long_unchecked_t load_user_context;
37442 + atomic_long_unchecked_t load_kernel_context;
37443 + atomic_long_unchecked_t lock_kernel_context;
37444 + atomic_long_unchecked_t unlock_kernel_context;
37445 + atomic_long_unchecked_t steal_user_context;
37446 + atomic_long_unchecked_t steal_kernel_context;
37447 + atomic_long_unchecked_t steal_context_failed;
37448 + atomic_long_unchecked_t nopfn;
37449 + atomic_long_unchecked_t break_cow;
37450 + atomic_long_unchecked_t asid_new;
37451 + atomic_long_unchecked_t asid_next;
37452 + atomic_long_unchecked_t asid_wrap;
37453 + atomic_long_unchecked_t asid_reuse;
37454 + atomic_long_unchecked_t intr;
37455 + atomic_long_unchecked_t intr_mm_lock_failed;
37456 + atomic_long_unchecked_t call_os;
37457 + atomic_long_unchecked_t call_os_offnode_reference;
37458 + atomic_long_unchecked_t call_os_check_for_bug;
37459 + atomic_long_unchecked_t call_os_wait_queue;
37460 + atomic_long_unchecked_t user_flush_tlb;
37461 + atomic_long_unchecked_t user_unload_context;
37462 + atomic_long_unchecked_t user_exception;
37463 + atomic_long_unchecked_t set_context_option;
37464 + atomic_long_unchecked_t migrate_check;
37465 + atomic_long_unchecked_t migrated_retarget;
37466 + atomic_long_unchecked_t migrated_unload;
37467 + atomic_long_unchecked_t migrated_unload_delay;
37468 + atomic_long_unchecked_t migrated_nopfn_retarget;
37469 + atomic_long_unchecked_t migrated_nopfn_unload;
37470 + atomic_long_unchecked_t tlb_dropin;
37471 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37472 + atomic_long_unchecked_t tlb_dropin_fail_upm;
37473 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
37474 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
37475 + atomic_long_unchecked_t tlb_dropin_fail_idle;
37476 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
37477 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37478 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37479 + atomic_long_unchecked_t tfh_stale_on_fault;
37480 + atomic_long_unchecked_t mmu_invalidate_range;
37481 + atomic_long_unchecked_t mmu_invalidate_page;
37482 + atomic_long_unchecked_t mmu_clear_flush_young;
37483 + atomic_long_unchecked_t flush_tlb;
37484 + atomic_long_unchecked_t flush_tlb_gru;
37485 + atomic_long_unchecked_t flush_tlb_gru_tgh;
37486 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37487
37488 - atomic_long_t copy_gpa;
37489 + atomic_long_unchecked_t copy_gpa;
37490
37491 - atomic_long_t mesq_receive;
37492 - atomic_long_t mesq_receive_none;
37493 - atomic_long_t mesq_send;
37494 - atomic_long_t mesq_send_failed;
37495 - atomic_long_t mesq_noop;
37496 - atomic_long_t mesq_send_unexpected_error;
37497 - atomic_long_t mesq_send_lb_overflow;
37498 - atomic_long_t mesq_send_qlimit_reached;
37499 - atomic_long_t mesq_send_amo_nacked;
37500 - atomic_long_t mesq_send_put_nacked;
37501 - atomic_long_t mesq_qf_not_full;
37502 - atomic_long_t mesq_qf_locked;
37503 - atomic_long_t mesq_qf_noop_not_full;
37504 - atomic_long_t mesq_qf_switch_head_failed;
37505 - atomic_long_t mesq_qf_unexpected_error;
37506 - atomic_long_t mesq_noop_unexpected_error;
37507 - atomic_long_t mesq_noop_lb_overflow;
37508 - atomic_long_t mesq_noop_qlimit_reached;
37509 - atomic_long_t mesq_noop_amo_nacked;
37510 - atomic_long_t mesq_noop_put_nacked;
37511 + atomic_long_unchecked_t mesq_receive;
37512 + atomic_long_unchecked_t mesq_receive_none;
37513 + atomic_long_unchecked_t mesq_send;
37514 + atomic_long_unchecked_t mesq_send_failed;
37515 + atomic_long_unchecked_t mesq_noop;
37516 + atomic_long_unchecked_t mesq_send_unexpected_error;
37517 + atomic_long_unchecked_t mesq_send_lb_overflow;
37518 + atomic_long_unchecked_t mesq_send_qlimit_reached;
37519 + atomic_long_unchecked_t mesq_send_amo_nacked;
37520 + atomic_long_unchecked_t mesq_send_put_nacked;
37521 + atomic_long_unchecked_t mesq_qf_not_full;
37522 + atomic_long_unchecked_t mesq_qf_locked;
37523 + atomic_long_unchecked_t mesq_qf_noop_not_full;
37524 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
37525 + atomic_long_unchecked_t mesq_qf_unexpected_error;
37526 + atomic_long_unchecked_t mesq_noop_unexpected_error;
37527 + atomic_long_unchecked_t mesq_noop_lb_overflow;
37528 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
37529 + atomic_long_unchecked_t mesq_noop_amo_nacked;
37530 + atomic_long_unchecked_t mesq_noop_put_nacked;
37531
37532 };
37533
37534 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37535 cchop_deallocate, tghop_invalidate, mcsop_last};
37536
37537 struct mcs_op_statistic {
37538 - atomic_long_t count;
37539 - atomic_long_t total;
37540 + atomic_long_unchecked_t count;
37541 + atomic_long_unchecked_t total;
37542 unsigned long max;
37543 };
37544
37545 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37546
37547 #define STAT(id) do { \
37548 if (gru_options & OPT_STATS) \
37549 - atomic_long_inc(&gru_stats.id); \
37550 + atomic_long_inc_unchecked(&gru_stats.id); \
37551 } while (0)
37552
37553 #ifdef CONFIG_SGI_GRU_DEBUG
37554 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37555 index 2275126..12a9dbfb 100644
37556 --- a/drivers/misc/sgi-xp/xp.h
37557 +++ b/drivers/misc/sgi-xp/xp.h
37558 @@ -289,7 +289,7 @@ struct xpc_interface {
37559 xpc_notify_func, void *);
37560 void (*received) (short, int, void *);
37561 enum xp_retval (*partid_to_nasids) (short, void *);
37562 -};
37563 +} __no_const;
37564
37565 extern struct xpc_interface xpc_interface;
37566
37567 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37568 index b94d5f7..7f494c5 100644
37569 --- a/drivers/misc/sgi-xp/xpc.h
37570 +++ b/drivers/misc/sgi-xp/xpc.h
37571 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
37572 void (*received_payload) (struct xpc_channel *, void *);
37573 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37574 };
37575 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37576
37577 /* struct xpc_partition act_state values (for XPC HB) */
37578
37579 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37580 /* found in xpc_main.c */
37581 extern struct device *xpc_part;
37582 extern struct device *xpc_chan;
37583 -extern struct xpc_arch_operations xpc_arch_ops;
37584 +extern xpc_arch_operations_no_const xpc_arch_ops;
37585 extern int xpc_disengage_timelimit;
37586 extern int xpc_disengage_timedout;
37587 extern int xpc_activate_IRQ_rcvd;
37588 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37589 index fd3688a..7e211a4 100644
37590 --- a/drivers/misc/sgi-xp/xpc_main.c
37591 +++ b/drivers/misc/sgi-xp/xpc_main.c
37592 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37593 .notifier_call = xpc_system_die,
37594 };
37595
37596 -struct xpc_arch_operations xpc_arch_ops;
37597 +xpc_arch_operations_no_const xpc_arch_ops;
37598
37599 /*
37600 * Timer function to enforce the timelimit on the partition disengage.
37601 diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37602 index 8b70e03..700bda6 100644
37603 --- a/drivers/misc/sgi-xp/xpc_sn2.c
37604 +++ b/drivers/misc/sgi-xp/xpc_sn2.c
37605 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37606 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37607 }
37608
37609 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37610 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37611 .setup_partitions = xpc_setup_partitions_sn2,
37612 .teardown_partitions = xpc_teardown_partitions_sn2,
37613 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37614 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37615 int ret;
37616 size_t buf_size;
37617
37618 - xpc_arch_ops = xpc_arch_ops_sn2;
37619 + pax_open_kernel();
37620 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37621 + pax_close_kernel();
37622
37623 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37624 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37625 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37626 index 8e08d71..7cb8c9b 100644
37627 --- a/drivers/misc/sgi-xp/xpc_uv.c
37628 +++ b/drivers/misc/sgi-xp/xpc_uv.c
37629 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37630 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37631 }
37632
37633 -static struct xpc_arch_operations xpc_arch_ops_uv = {
37634 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
37635 .setup_partitions = xpc_setup_partitions_uv,
37636 .teardown_partitions = xpc_teardown_partitions_uv,
37637 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37638 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37639 int
37640 xpc_init_uv(void)
37641 {
37642 - xpc_arch_ops = xpc_arch_ops_uv;
37643 + pax_open_kernel();
37644 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37645 + pax_close_kernel();
37646
37647 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37648 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37649 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37650 index 6fd20b42..650efe3 100644
37651 --- a/drivers/mmc/host/sdhci-pci.c
37652 +++ b/drivers/mmc/host/sdhci-pci.c
37653 @@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37654 .probe = via_probe,
37655 };
37656
37657 -static const struct pci_device_id pci_ids[] __devinitdata = {
37658 +static const struct pci_device_id pci_ids[] __devinitconst = {
37659 {
37660 .vendor = PCI_VENDOR_ID_RICOH,
37661 .device = PCI_DEVICE_ID_RICOH_R5C822,
37662 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37663 index e7563a9..5f90ce5 100644
37664 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
37665 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37666 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37667 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37668 unsigned long timeo = jiffies + HZ;
37669
37670 + pax_track_stack();
37671 +
37672 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37673 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37674 goto sleep;
37675 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37676 unsigned long initial_adr;
37677 int initial_len = len;
37678
37679 + pax_track_stack();
37680 +
37681 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37682 adr += chip->start;
37683 initial_adr = adr;
37684 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37685 int retries = 3;
37686 int ret;
37687
37688 + pax_track_stack();
37689 +
37690 adr += chip->start;
37691
37692 retry:
37693 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37694 index 0667a67..3ab97ed 100644
37695 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
37696 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37697 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37698 unsigned long cmd_addr;
37699 struct cfi_private *cfi = map->fldrv_priv;
37700
37701 + pax_track_stack();
37702 +
37703 adr += chip->start;
37704
37705 /* Ensure cmd read/writes are aligned. */
37706 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37707 DECLARE_WAITQUEUE(wait, current);
37708 int wbufsize, z;
37709
37710 + pax_track_stack();
37711 +
37712 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37713 if (adr & (map_bankwidth(map)-1))
37714 return -EINVAL;
37715 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37716 DECLARE_WAITQUEUE(wait, current);
37717 int ret = 0;
37718
37719 + pax_track_stack();
37720 +
37721 adr += chip->start;
37722
37723 /* Let's determine this according to the interleave only once */
37724 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37725 unsigned long timeo = jiffies + HZ;
37726 DECLARE_WAITQUEUE(wait, current);
37727
37728 + pax_track_stack();
37729 +
37730 adr += chip->start;
37731
37732 /* Let's determine this according to the interleave only once */
37733 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37734 unsigned long timeo = jiffies + HZ;
37735 DECLARE_WAITQUEUE(wait, current);
37736
37737 + pax_track_stack();
37738 +
37739 adr += chip->start;
37740
37741 /* Let's determine this according to the interleave only once */
37742 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37743 index 5bf5f46..c5de373 100644
37744 --- a/drivers/mtd/devices/doc2000.c
37745 +++ b/drivers/mtd/devices/doc2000.c
37746 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37747
37748 /* The ECC will not be calculated correctly if less than 512 is written */
37749 /* DBB-
37750 - if (len != 0x200 && eccbuf)
37751 + if (len != 0x200)
37752 printk(KERN_WARNING
37753 "ECC needs a full sector write (adr: %lx size %lx)\n",
37754 (long) to, (long) len);
37755 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37756 index 0990f78..bb4e8a4 100644
37757 --- a/drivers/mtd/devices/doc2001.c
37758 +++ b/drivers/mtd/devices/doc2001.c
37759 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37760 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37761
37762 /* Don't allow read past end of device */
37763 - if (from >= this->totlen)
37764 + if (from >= this->totlen || !len)
37765 return -EINVAL;
37766
37767 /* Don't allow a single read to cross a 512-byte block boundary */
37768 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37769 index e56d6b4..f07e6cf 100644
37770 --- a/drivers/mtd/ftl.c
37771 +++ b/drivers/mtd/ftl.c
37772 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37773 loff_t offset;
37774 uint16_t srcunitswap = cpu_to_le16(srcunit);
37775
37776 + pax_track_stack();
37777 +
37778 eun = &part->EUNInfo[srcunit];
37779 xfer = &part->XferInfo[xferunit];
37780 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37781 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37782 index 8aca552..146446e 100755
37783 --- a/drivers/mtd/inftlcore.c
37784 +++ b/drivers/mtd/inftlcore.c
37785 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37786 struct inftl_oob oob;
37787 size_t retlen;
37788
37789 + pax_track_stack();
37790 +
37791 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37792 "pending=%d)\n", inftl, thisVUC, pendingblock);
37793
37794 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37795 index 32e82ae..ed50953 100644
37796 --- a/drivers/mtd/inftlmount.c
37797 +++ b/drivers/mtd/inftlmount.c
37798 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37799 struct INFTLPartition *ip;
37800 size_t retlen;
37801
37802 + pax_track_stack();
37803 +
37804 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37805
37806 /*
37807 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37808 index 79bf40f..fe5f8fd 100644
37809 --- a/drivers/mtd/lpddr/qinfo_probe.c
37810 +++ b/drivers/mtd/lpddr/qinfo_probe.c
37811 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37812 {
37813 map_word pfow_val[4];
37814
37815 + pax_track_stack();
37816 +
37817 /* Check identification string */
37818 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37819 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37820 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37821 index 726a1b8..f46b460 100644
37822 --- a/drivers/mtd/mtdchar.c
37823 +++ b/drivers/mtd/mtdchar.c
37824 @@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37825 u_long size;
37826 struct mtd_info_user info;
37827
37828 + pax_track_stack();
37829 +
37830 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37831
37832 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37833 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37834 index 1002e18..26d82d5 100644
37835 --- a/drivers/mtd/nftlcore.c
37836 +++ b/drivers/mtd/nftlcore.c
37837 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37838 int inplace = 1;
37839 size_t retlen;
37840
37841 + pax_track_stack();
37842 +
37843 memset(BlockMap, 0xff, sizeof(BlockMap));
37844 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37845
37846 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37847 index 8b22b18..6fada85 100644
37848 --- a/drivers/mtd/nftlmount.c
37849 +++ b/drivers/mtd/nftlmount.c
37850 @@ -23,6 +23,7 @@
37851 #include <asm/errno.h>
37852 #include <linux/delay.h>
37853 #include <linux/slab.h>
37854 +#include <linux/sched.h>
37855 #include <linux/mtd/mtd.h>
37856 #include <linux/mtd/nand.h>
37857 #include <linux/mtd/nftl.h>
37858 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37859 struct mtd_info *mtd = nftl->mbd.mtd;
37860 unsigned int i;
37861
37862 + pax_track_stack();
37863 +
37864 /* Assume logical EraseSize == physical erasesize for starting the scan.
37865 We'll sort it out later if we find a MediaHeader which says otherwise */
37866 /* Actually, we won't. The new DiskOnChip driver has already scanned
37867 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
37868 index 14cec04..d775b87 100644
37869 --- a/drivers/mtd/ubi/build.c
37870 +++ b/drivers/mtd/ubi/build.c
37871 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
37872 static int __init bytes_str_to_int(const char *str)
37873 {
37874 char *endp;
37875 - unsigned long result;
37876 + unsigned long result, scale = 1;
37877
37878 result = simple_strtoul(str, &endp, 0);
37879 if (str == endp || result >= INT_MAX) {
37880 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
37881
37882 switch (*endp) {
37883 case 'G':
37884 - result *= 1024;
37885 + scale *= 1024;
37886 case 'M':
37887 - result *= 1024;
37888 + scale *= 1024;
37889 case 'K':
37890 - result *= 1024;
37891 + scale *= 1024;
37892 if (endp[1] == 'i' && endp[2] == 'B')
37893 endp += 2;
37894 case '\0':
37895 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
37896 return -EINVAL;
37897 }
37898
37899 - return result;
37900 + if ((intoverflow_t)result*scale >= INT_MAX) {
37901 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
37902 + str);
37903 + return -EINVAL;
37904 + }
37905 +
37906 + return result*scale;
37907 }
37908
37909 /**
37910 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
37911 index ab68886..ca405e8 100644
37912 --- a/drivers/net/atlx/atl2.c
37913 +++ b/drivers/net/atlx/atl2.c
37914 @@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
37915 */
37916
37917 #define ATL2_PARAM(X, desc) \
37918 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37919 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37920 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
37921 MODULE_PARM_DESC(X, desc);
37922 #else
37923 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
37924 index 4874b2b..67f8526 100644
37925 --- a/drivers/net/bnx2.c
37926 +++ b/drivers/net/bnx2.c
37927 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
37928 int rc = 0;
37929 u32 magic, csum;
37930
37931 + pax_track_stack();
37932 +
37933 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
37934 goto test_nvram_done;
37935
37936 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
37937 index fd3eb07..8a6978d 100644
37938 --- a/drivers/net/cxgb3/l2t.h
37939 +++ b/drivers/net/cxgb3/l2t.h
37940 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
37941 */
37942 struct l2t_skb_cb {
37943 arp_failure_handler_func arp_failure_handler;
37944 -};
37945 +} __no_const;
37946
37947 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
37948
37949 diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
37950 index 032cfe0..411af379 100644
37951 --- a/drivers/net/cxgb3/t3_hw.c
37952 +++ b/drivers/net/cxgb3/t3_hw.c
37953 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
37954 int i, addr, ret;
37955 struct t3_vpd vpd;
37956
37957 + pax_track_stack();
37958 +
37959 /*
37960 * Card information is normally at VPD_BASE but some early cards had
37961 * it at 0.
37962 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
37963 index d1e0563..b9e129c 100644
37964 --- a/drivers/net/e1000e/82571.c
37965 +++ b/drivers/net/e1000e/82571.c
37966 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
37967 {
37968 struct e1000_hw *hw = &adapter->hw;
37969 struct e1000_mac_info *mac = &hw->mac;
37970 - struct e1000_mac_operations *func = &mac->ops;
37971 + e1000_mac_operations_no_const *func = &mac->ops;
37972 u32 swsm = 0;
37973 u32 swsm2 = 0;
37974 bool force_clear_smbi = false;
37975 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
37976 temp = er32(ICRXDMTC);
37977 }
37978
37979 -static struct e1000_mac_operations e82571_mac_ops = {
37980 +static const struct e1000_mac_operations e82571_mac_ops = {
37981 /* .check_mng_mode: mac type dependent */
37982 /* .check_for_link: media type dependent */
37983 .id_led_init = e1000e_id_led_init,
37984 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
37985 .setup_led = e1000e_setup_led_generic,
37986 };
37987
37988 -static struct e1000_phy_operations e82_phy_ops_igp = {
37989 +static const struct e1000_phy_operations e82_phy_ops_igp = {
37990 .acquire_phy = e1000_get_hw_semaphore_82571,
37991 .check_reset_block = e1000e_check_reset_block_generic,
37992 .commit_phy = NULL,
37993 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
37994 .cfg_on_link_up = NULL,
37995 };
37996
37997 -static struct e1000_phy_operations e82_phy_ops_m88 = {
37998 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
37999 .acquire_phy = e1000_get_hw_semaphore_82571,
38000 .check_reset_block = e1000e_check_reset_block_generic,
38001 .commit_phy = e1000e_phy_sw_reset,
38002 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
38003 .cfg_on_link_up = NULL,
38004 };
38005
38006 -static struct e1000_phy_operations e82_phy_ops_bm = {
38007 +static const struct e1000_phy_operations e82_phy_ops_bm = {
38008 .acquire_phy = e1000_get_hw_semaphore_82571,
38009 .check_reset_block = e1000e_check_reset_block_generic,
38010 .commit_phy = e1000e_phy_sw_reset,
38011 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
38012 .cfg_on_link_up = NULL,
38013 };
38014
38015 -static struct e1000_nvm_operations e82571_nvm_ops = {
38016 +static const struct e1000_nvm_operations e82571_nvm_ops = {
38017 .acquire_nvm = e1000_acquire_nvm_82571,
38018 .read_nvm = e1000e_read_nvm_eerd,
38019 .release_nvm = e1000_release_nvm_82571,
38020 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
38021 index 47db9bd..fa58ccd 100644
38022 --- a/drivers/net/e1000e/e1000.h
38023 +++ b/drivers/net/e1000e/e1000.h
38024 @@ -375,9 +375,9 @@ struct e1000_info {
38025 u32 pba;
38026 u32 max_hw_frame_size;
38027 s32 (*get_variants)(struct e1000_adapter *);
38028 - struct e1000_mac_operations *mac_ops;
38029 - struct e1000_phy_operations *phy_ops;
38030 - struct e1000_nvm_operations *nvm_ops;
38031 + const struct e1000_mac_operations *mac_ops;
38032 + const struct e1000_phy_operations *phy_ops;
38033 + const struct e1000_nvm_operations *nvm_ops;
38034 };
38035
38036 /* hardware capability, feature, and workaround flags */
38037 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
38038 index ae5d736..e9a93a1 100644
38039 --- a/drivers/net/e1000e/es2lan.c
38040 +++ b/drivers/net/e1000e/es2lan.c
38041 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
38042 {
38043 struct e1000_hw *hw = &adapter->hw;
38044 struct e1000_mac_info *mac = &hw->mac;
38045 - struct e1000_mac_operations *func = &mac->ops;
38046 + e1000_mac_operations_no_const *func = &mac->ops;
38047
38048 /* Set media type */
38049 switch (adapter->pdev->device) {
38050 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
38051 temp = er32(ICRXDMTC);
38052 }
38053
38054 -static struct e1000_mac_operations es2_mac_ops = {
38055 +static const struct e1000_mac_operations es2_mac_ops = {
38056 .id_led_init = e1000e_id_led_init,
38057 .check_mng_mode = e1000e_check_mng_mode_generic,
38058 /* check_for_link dependent on media type */
38059 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
38060 .setup_led = e1000e_setup_led_generic,
38061 };
38062
38063 -static struct e1000_phy_operations es2_phy_ops = {
38064 +static const struct e1000_phy_operations es2_phy_ops = {
38065 .acquire_phy = e1000_acquire_phy_80003es2lan,
38066 .check_reset_block = e1000e_check_reset_block_generic,
38067 .commit_phy = e1000e_phy_sw_reset,
38068 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
38069 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
38070 };
38071
38072 -static struct e1000_nvm_operations es2_nvm_ops = {
38073 +static const struct e1000_nvm_operations es2_nvm_ops = {
38074 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
38075 .read_nvm = e1000e_read_nvm_eerd,
38076 .release_nvm = e1000_release_nvm_80003es2lan,
38077 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
38078 index 11f3b7c..6381887 100644
38079 --- a/drivers/net/e1000e/hw.h
38080 +++ b/drivers/net/e1000e/hw.h
38081 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
38082 s32 (*setup_physical_interface)(struct e1000_hw *);
38083 s32 (*setup_led)(struct e1000_hw *);
38084 };
38085 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38086
38087 /* Function pointers for the PHY. */
38088 struct e1000_phy_operations {
38089 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
38090 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
38091 s32 (*cfg_on_link_up)(struct e1000_hw *);
38092 };
38093 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38094
38095 /* Function pointers for the NVM. */
38096 struct e1000_nvm_operations {
38097 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
38098 s32 (*validate_nvm)(struct e1000_hw *);
38099 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
38100 };
38101 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38102
38103 struct e1000_mac_info {
38104 - struct e1000_mac_operations ops;
38105 + e1000_mac_operations_no_const ops;
38106
38107 u8 addr[6];
38108 u8 perm_addr[6];
38109 @@ -823,7 +826,7 @@ struct e1000_mac_info {
38110 };
38111
38112 struct e1000_phy_info {
38113 - struct e1000_phy_operations ops;
38114 + e1000_phy_operations_no_const ops;
38115
38116 enum e1000_phy_type type;
38117
38118 @@ -857,7 +860,7 @@ struct e1000_phy_info {
38119 };
38120
38121 struct e1000_nvm_info {
38122 - struct e1000_nvm_operations ops;
38123 + e1000_nvm_operations_no_const ops;
38124
38125 enum e1000_nvm_type type;
38126 enum e1000_nvm_override override;
38127 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38128 index de39f9a..e28d3e0 100644
38129 --- a/drivers/net/e1000e/ich8lan.c
38130 +++ b/drivers/net/e1000e/ich8lan.c
38131 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38132 }
38133 }
38134
38135 -static struct e1000_mac_operations ich8_mac_ops = {
38136 +static const struct e1000_mac_operations ich8_mac_ops = {
38137 .id_led_init = e1000e_id_led_init,
38138 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38139 .check_for_link = e1000_check_for_copper_link_ich8lan,
38140 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38141 /* id_led_init dependent on mac type */
38142 };
38143
38144 -static struct e1000_phy_operations ich8_phy_ops = {
38145 +static const struct e1000_phy_operations ich8_phy_ops = {
38146 .acquire_phy = e1000_acquire_swflag_ich8lan,
38147 .check_reset_block = e1000_check_reset_block_ich8lan,
38148 .commit_phy = NULL,
38149 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38150 .write_phy_reg = e1000e_write_phy_reg_igp,
38151 };
38152
38153 -static struct e1000_nvm_operations ich8_nvm_ops = {
38154 +static const struct e1000_nvm_operations ich8_nvm_ops = {
38155 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38156 .read_nvm = e1000_read_nvm_ich8lan,
38157 .release_nvm = e1000_release_nvm_ich8lan,
38158 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38159 index 18d5fbb..542d96d 100644
38160 --- a/drivers/net/fealnx.c
38161 +++ b/drivers/net/fealnx.c
38162 @@ -151,7 +151,7 @@ struct chip_info {
38163 int flags;
38164 };
38165
38166 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38167 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38168 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38169 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38170 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38171 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38172 index 0e5b54b..b503f82 100644
38173 --- a/drivers/net/hamradio/6pack.c
38174 +++ b/drivers/net/hamradio/6pack.c
38175 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38176 unsigned char buf[512];
38177 int count1;
38178
38179 + pax_track_stack();
38180 +
38181 if (!count)
38182 return;
38183
38184 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38185 index 5862282..7cce8cb 100644
38186 --- a/drivers/net/ibmveth.c
38187 +++ b/drivers/net/ibmveth.c
38188 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38189 NULL,
38190 };
38191
38192 -static struct sysfs_ops veth_pool_ops = {
38193 +static const struct sysfs_ops veth_pool_ops = {
38194 .show = veth_pool_show,
38195 .store = veth_pool_store,
38196 };
38197 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38198 index d617f2d..57b5309 100644
38199 --- a/drivers/net/igb/e1000_82575.c
38200 +++ b/drivers/net/igb/e1000_82575.c
38201 @@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38202 wr32(E1000_VT_CTL, vt_ctl);
38203 }
38204
38205 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
38206 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38207 .reset_hw = igb_reset_hw_82575,
38208 .init_hw = igb_init_hw_82575,
38209 .check_for_link = igb_check_for_link_82575,
38210 @@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38211 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38212 };
38213
38214 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
38215 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38216 .acquire = igb_acquire_phy_82575,
38217 .get_cfg_done = igb_get_cfg_done_82575,
38218 .release = igb_release_phy_82575,
38219 };
38220
38221 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38222 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38223 .acquire = igb_acquire_nvm_82575,
38224 .read = igb_read_nvm_eerd,
38225 .release = igb_release_nvm_82575,
38226 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38227 index 72081df..d855cf5 100644
38228 --- a/drivers/net/igb/e1000_hw.h
38229 +++ b/drivers/net/igb/e1000_hw.h
38230 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
38231 s32 (*read_mac_addr)(struct e1000_hw *);
38232 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38233 };
38234 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38235
38236 struct e1000_phy_operations {
38237 s32 (*acquire)(struct e1000_hw *);
38238 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
38239 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38240 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38241 };
38242 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38243
38244 struct e1000_nvm_operations {
38245 s32 (*acquire)(struct e1000_hw *);
38246 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38247 void (*release)(struct e1000_hw *);
38248 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38249 };
38250 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38251
38252 struct e1000_info {
38253 s32 (*get_invariants)(struct e1000_hw *);
38254 @@ -321,7 +324,7 @@ struct e1000_info {
38255 extern const struct e1000_info e1000_82575_info;
38256
38257 struct e1000_mac_info {
38258 - struct e1000_mac_operations ops;
38259 + e1000_mac_operations_no_const ops;
38260
38261 u8 addr[6];
38262 u8 perm_addr[6];
38263 @@ -365,7 +368,7 @@ struct e1000_mac_info {
38264 };
38265
38266 struct e1000_phy_info {
38267 - struct e1000_phy_operations ops;
38268 + e1000_phy_operations_no_const ops;
38269
38270 enum e1000_phy_type type;
38271
38272 @@ -400,7 +403,7 @@ struct e1000_phy_info {
38273 };
38274
38275 struct e1000_nvm_info {
38276 - struct e1000_nvm_operations ops;
38277 + e1000_nvm_operations_no_const ops;
38278
38279 enum e1000_nvm_type type;
38280 enum e1000_nvm_override override;
38281 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38282 s32 (*check_for_ack)(struct e1000_hw *, u16);
38283 s32 (*check_for_rst)(struct e1000_hw *, u16);
38284 };
38285 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38286
38287 struct e1000_mbx_stats {
38288 u32 msgs_tx;
38289 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38290 };
38291
38292 struct e1000_mbx_info {
38293 - struct e1000_mbx_operations ops;
38294 + e1000_mbx_operations_no_const ops;
38295 struct e1000_mbx_stats stats;
38296 u32 timeout;
38297 u32 usec_delay;
38298 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38299 index 1e8ce37..549c453 100644
38300 --- a/drivers/net/igbvf/vf.h
38301 +++ b/drivers/net/igbvf/vf.h
38302 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
38303 s32 (*read_mac_addr)(struct e1000_hw *);
38304 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38305 };
38306 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38307
38308 struct e1000_mac_info {
38309 - struct e1000_mac_operations ops;
38310 + e1000_mac_operations_no_const ops;
38311 u8 addr[6];
38312 u8 perm_addr[6];
38313
38314 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38315 s32 (*check_for_ack)(struct e1000_hw *);
38316 s32 (*check_for_rst)(struct e1000_hw *);
38317 };
38318 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38319
38320 struct e1000_mbx_stats {
38321 u32 msgs_tx;
38322 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38323 };
38324
38325 struct e1000_mbx_info {
38326 - struct e1000_mbx_operations ops;
38327 + e1000_mbx_operations_no_const ops;
38328 struct e1000_mbx_stats stats;
38329 u32 timeout;
38330 u32 usec_delay;
38331 diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38332 index aa7286b..a61394f 100644
38333 --- a/drivers/net/iseries_veth.c
38334 +++ b/drivers/net/iseries_veth.c
38335 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38336 NULL
38337 };
38338
38339 -static struct sysfs_ops veth_cnx_sysfs_ops = {
38340 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
38341 .show = veth_cnx_attribute_show
38342 };
38343
38344 @@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38345 NULL
38346 };
38347
38348 -static struct sysfs_ops veth_port_sysfs_ops = {
38349 +static const struct sysfs_ops veth_port_sysfs_ops = {
38350 .show = veth_port_attribute_show
38351 };
38352
38353 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38354 index 8aa44dc..fa1e797 100644
38355 --- a/drivers/net/ixgb/ixgb_main.c
38356 +++ b/drivers/net/ixgb/ixgb_main.c
38357 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38358 u32 rctl;
38359 int i;
38360
38361 + pax_track_stack();
38362 +
38363 /* Check for Promiscuous and All Multicast modes */
38364
38365 rctl = IXGB_READ_REG(hw, RCTL);
38366 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38367 index af35e1d..8781785 100644
38368 --- a/drivers/net/ixgb/ixgb_param.c
38369 +++ b/drivers/net/ixgb/ixgb_param.c
38370 @@ -260,6 +260,9 @@ void __devinit
38371 ixgb_check_options(struct ixgb_adapter *adapter)
38372 {
38373 int bd = adapter->bd_number;
38374 +
38375 + pax_track_stack();
38376 +
38377 if (bd >= IXGB_MAX_NIC) {
38378 printk(KERN_NOTICE
38379 "Warning: no configuration for board #%i\n", bd);
38380 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38381 index b17aa73..ed74540 100644
38382 --- a/drivers/net/ixgbe/ixgbe_type.h
38383 +++ b/drivers/net/ixgbe/ixgbe_type.h
38384 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38385 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38386 s32 (*update_checksum)(struct ixgbe_hw *);
38387 };
38388 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38389
38390 struct ixgbe_mac_operations {
38391 s32 (*init_hw)(struct ixgbe_hw *);
38392 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38393 /* Flow Control */
38394 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38395 };
38396 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38397
38398 struct ixgbe_phy_operations {
38399 s32 (*identify)(struct ixgbe_hw *);
38400 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38401 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38402 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38403 };
38404 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38405
38406 struct ixgbe_eeprom_info {
38407 - struct ixgbe_eeprom_operations ops;
38408 + ixgbe_eeprom_operations_no_const ops;
38409 enum ixgbe_eeprom_type type;
38410 u32 semaphore_delay;
38411 u16 word_size;
38412 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38413 };
38414
38415 struct ixgbe_mac_info {
38416 - struct ixgbe_mac_operations ops;
38417 + ixgbe_mac_operations_no_const ops;
38418 enum ixgbe_mac_type type;
38419 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38420 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38421 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38422 };
38423
38424 struct ixgbe_phy_info {
38425 - struct ixgbe_phy_operations ops;
38426 + ixgbe_phy_operations_no_const ops;
38427 struct mdio_if_info mdio;
38428 enum ixgbe_phy_type type;
38429 u32 id;
38430 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38431 index 291a505..2543756 100644
38432 --- a/drivers/net/mlx4/main.c
38433 +++ b/drivers/net/mlx4/main.c
38434 @@ -38,6 +38,7 @@
38435 #include <linux/errno.h>
38436 #include <linux/pci.h>
38437 #include <linux/dma-mapping.h>
38438 +#include <linux/sched.h>
38439
38440 #include <linux/mlx4/device.h>
38441 #include <linux/mlx4/doorbell.h>
38442 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38443 u64 icm_size;
38444 int err;
38445
38446 + pax_track_stack();
38447 +
38448 err = mlx4_QUERY_FW(dev);
38449 if (err) {
38450 if (err == -EACCES)
38451 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38452 index 2dce134..fa5ce75 100644
38453 --- a/drivers/net/niu.c
38454 +++ b/drivers/net/niu.c
38455 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38456 int i, num_irqs, err;
38457 u8 first_ldg;
38458
38459 + pax_track_stack();
38460 +
38461 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38462 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38463 ldg_num_map[i] = first_ldg + i;
38464 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38465 index c1b3f09..97cd8c4 100644
38466 --- a/drivers/net/pcnet32.c
38467 +++ b/drivers/net/pcnet32.c
38468 @@ -79,7 +79,7 @@ static int cards_found;
38469 /*
38470 * VLB I/O addresses
38471 */
38472 -static unsigned int pcnet32_portlist[] __initdata =
38473 +static unsigned int pcnet32_portlist[] __devinitdata =
38474 { 0x300, 0x320, 0x340, 0x360, 0 };
38475
38476 static int pcnet32_debug = 0;
38477 @@ -267,7 +267,7 @@ struct pcnet32_private {
38478 struct sk_buff **rx_skbuff;
38479 dma_addr_t *tx_dma_addr;
38480 dma_addr_t *rx_dma_addr;
38481 - struct pcnet32_access a;
38482 + struct pcnet32_access *a;
38483 spinlock_t lock; /* Guard lock */
38484 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38485 unsigned int rx_ring_size; /* current rx ring size */
38486 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38487 u16 val;
38488
38489 netif_wake_queue(dev);
38490 - val = lp->a.read_csr(ioaddr, CSR3);
38491 + val = lp->a->read_csr(ioaddr, CSR3);
38492 val &= 0x00ff;
38493 - lp->a.write_csr(ioaddr, CSR3, val);
38494 + lp->a->write_csr(ioaddr, CSR3, val);
38495 napi_enable(&lp->napi);
38496 }
38497
38498 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38499 r = mii_link_ok(&lp->mii_if);
38500 } else if (lp->chip_version >= PCNET32_79C970A) {
38501 ulong ioaddr = dev->base_addr; /* card base I/O address */
38502 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38503 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38504 } else { /* can not detect link on really old chips */
38505 r = 1;
38506 }
38507 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38508 pcnet32_netif_stop(dev);
38509
38510 spin_lock_irqsave(&lp->lock, flags);
38511 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38512 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38513
38514 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38515
38516 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38517 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38518 {
38519 struct pcnet32_private *lp = netdev_priv(dev);
38520 - struct pcnet32_access *a = &lp->a; /* access to registers */
38521 + struct pcnet32_access *a = lp->a; /* access to registers */
38522 ulong ioaddr = dev->base_addr; /* card base I/O address */
38523 struct sk_buff *skb; /* sk buff */
38524 int x, i; /* counters */
38525 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38526 pcnet32_netif_stop(dev);
38527
38528 spin_lock_irqsave(&lp->lock, flags);
38529 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38530 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38531
38532 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38533
38534 /* Reset the PCNET32 */
38535 - lp->a.reset(ioaddr);
38536 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38537 + lp->a->reset(ioaddr);
38538 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38539
38540 /* switch pcnet32 to 32bit mode */
38541 - lp->a.write_bcr(ioaddr, 20, 2);
38542 + lp->a->write_bcr(ioaddr, 20, 2);
38543
38544 /* purge & init rings but don't actually restart */
38545 pcnet32_restart(dev, 0x0000);
38546
38547 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38548 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38549
38550 /* Initialize Transmit buffers. */
38551 size = data_len + 15;
38552 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38553
38554 /* set int loopback in CSR15 */
38555 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38556 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38557 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38558
38559 teststatus = cpu_to_le16(0x8000);
38560 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38561 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38562
38563 /* Check status of descriptors */
38564 for (x = 0; x < numbuffs; x++) {
38565 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38566 }
38567 }
38568
38569 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38570 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38571 wmb();
38572 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38573 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38574 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38575 pcnet32_restart(dev, CSR0_NORMAL);
38576 } else {
38577 pcnet32_purge_rx_ring(dev);
38578 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38579 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38580 }
38581 spin_unlock_irqrestore(&lp->lock, flags);
38582
38583 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38584 static void pcnet32_led_blink_callback(struct net_device *dev)
38585 {
38586 struct pcnet32_private *lp = netdev_priv(dev);
38587 - struct pcnet32_access *a = &lp->a;
38588 + struct pcnet32_access *a = lp->a;
38589 ulong ioaddr = dev->base_addr;
38590 unsigned long flags;
38591 int i;
38592 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38593 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38594 {
38595 struct pcnet32_private *lp = netdev_priv(dev);
38596 - struct pcnet32_access *a = &lp->a;
38597 + struct pcnet32_access *a = lp->a;
38598 ulong ioaddr = dev->base_addr;
38599 unsigned long flags;
38600 int i, regs[4];
38601 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38602 {
38603 int csr5;
38604 struct pcnet32_private *lp = netdev_priv(dev);
38605 - struct pcnet32_access *a = &lp->a;
38606 + struct pcnet32_access *a = lp->a;
38607 ulong ioaddr = dev->base_addr;
38608 int ticks;
38609
38610 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38611 spin_lock_irqsave(&lp->lock, flags);
38612 if (pcnet32_tx(dev)) {
38613 /* reset the chip to clear the error condition, then restart */
38614 - lp->a.reset(ioaddr);
38615 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38616 + lp->a->reset(ioaddr);
38617 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38618 pcnet32_restart(dev, CSR0_START);
38619 netif_wake_queue(dev);
38620 }
38621 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38622 __napi_complete(napi);
38623
38624 /* clear interrupt masks */
38625 - val = lp->a.read_csr(ioaddr, CSR3);
38626 + val = lp->a->read_csr(ioaddr, CSR3);
38627 val &= 0x00ff;
38628 - lp->a.write_csr(ioaddr, CSR3, val);
38629 + lp->a->write_csr(ioaddr, CSR3, val);
38630
38631 /* Set interrupt enable. */
38632 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38633 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38634
38635 spin_unlock_irqrestore(&lp->lock, flags);
38636 }
38637 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38638 int i, csr0;
38639 u16 *buff = ptr;
38640 struct pcnet32_private *lp = netdev_priv(dev);
38641 - struct pcnet32_access *a = &lp->a;
38642 + struct pcnet32_access *a = lp->a;
38643 ulong ioaddr = dev->base_addr;
38644 unsigned long flags;
38645
38646 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38647 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38648 if (lp->phymask & (1 << j)) {
38649 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38650 - lp->a.write_bcr(ioaddr, 33,
38651 + lp->a->write_bcr(ioaddr, 33,
38652 (j << 5) | i);
38653 - *buff++ = lp->a.read_bcr(ioaddr, 34);
38654 + *buff++ = lp->a->read_bcr(ioaddr, 34);
38655 }
38656 }
38657 }
38658 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38659 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38660 lp->options |= PCNET32_PORT_FD;
38661
38662 - lp->a = *a;
38663 + lp->a = a;
38664
38665 /* prior to register_netdev, dev->name is not yet correct */
38666 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38667 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38668 if (lp->mii) {
38669 /* lp->phycount and lp->phymask are set to 0 by memset above */
38670
38671 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38672 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38673 /* scan for PHYs */
38674 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38675 unsigned short id1, id2;
38676 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38677 "Found PHY %04x:%04x at address %d.\n",
38678 id1, id2, i);
38679 }
38680 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38681 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38682 if (lp->phycount > 1) {
38683 lp->options |= PCNET32_PORT_MII;
38684 }
38685 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38686 }
38687
38688 /* Reset the PCNET32 */
38689 - lp->a.reset(ioaddr);
38690 + lp->a->reset(ioaddr);
38691
38692 /* switch pcnet32 to 32bit mode */
38693 - lp->a.write_bcr(ioaddr, 20, 2);
38694 + lp->a->write_bcr(ioaddr, 20, 2);
38695
38696 if (netif_msg_ifup(lp))
38697 printk(KERN_DEBUG
38698 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38699 (u32) (lp->init_dma_addr));
38700
38701 /* set/reset autoselect bit */
38702 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
38703 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
38704 if (lp->options & PCNET32_PORT_ASEL)
38705 val |= 2;
38706 - lp->a.write_bcr(ioaddr, 2, val);
38707 + lp->a->write_bcr(ioaddr, 2, val);
38708
38709 /* handle full duplex setting */
38710 if (lp->mii_if.full_duplex) {
38711 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
38712 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
38713 if (lp->options & PCNET32_PORT_FD) {
38714 val |= 1;
38715 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38716 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38717 if (lp->chip_version == 0x2627)
38718 val |= 3;
38719 }
38720 - lp->a.write_bcr(ioaddr, 9, val);
38721 + lp->a->write_bcr(ioaddr, 9, val);
38722 }
38723
38724 /* set/reset GPSI bit in test register */
38725 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38726 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38727 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38728 val |= 0x10;
38729 - lp->a.write_csr(ioaddr, 124, val);
38730 + lp->a->write_csr(ioaddr, 124, val);
38731
38732 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38733 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38734 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38735 * duplex, and/or enable auto negotiation, and clear DANAS
38736 */
38737 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38738 - lp->a.write_bcr(ioaddr, 32,
38739 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
38740 + lp->a->write_bcr(ioaddr, 32,
38741 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
38742 /* disable Auto Negotiation, set 10Mpbs, HD */
38743 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38744 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38745 if (lp->options & PCNET32_PORT_FD)
38746 val |= 0x10;
38747 if (lp->options & PCNET32_PORT_100)
38748 val |= 0x08;
38749 - lp->a.write_bcr(ioaddr, 32, val);
38750 + lp->a->write_bcr(ioaddr, 32, val);
38751 } else {
38752 if (lp->options & PCNET32_PORT_ASEL) {
38753 - lp->a.write_bcr(ioaddr, 32,
38754 - lp->a.read_bcr(ioaddr,
38755 + lp->a->write_bcr(ioaddr, 32,
38756 + lp->a->read_bcr(ioaddr,
38757 32) | 0x0080);
38758 /* enable auto negotiate, setup, disable fd */
38759 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38760 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38761 val |= 0x20;
38762 - lp->a.write_bcr(ioaddr, 32, val);
38763 + lp->a->write_bcr(ioaddr, 32, val);
38764 }
38765 }
38766 } else {
38767 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38768 * There is really no good other way to handle multiple PHYs
38769 * other than turning off all automatics
38770 */
38771 - val = lp->a.read_bcr(ioaddr, 2);
38772 - lp->a.write_bcr(ioaddr, 2, val & ~2);
38773 - val = lp->a.read_bcr(ioaddr, 32);
38774 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38775 + val = lp->a->read_bcr(ioaddr, 2);
38776 + lp->a->write_bcr(ioaddr, 2, val & ~2);
38777 + val = lp->a->read_bcr(ioaddr, 32);
38778 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38779
38780 if (!(lp->options & PCNET32_PORT_ASEL)) {
38781 /* setup ecmd */
38782 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38783 ecmd.speed =
38784 lp->
38785 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38786 - bcr9 = lp->a.read_bcr(ioaddr, 9);
38787 + bcr9 = lp->a->read_bcr(ioaddr, 9);
38788
38789 if (lp->options & PCNET32_PORT_FD) {
38790 ecmd.duplex = DUPLEX_FULL;
38791 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38792 ecmd.duplex = DUPLEX_HALF;
38793 bcr9 |= ~(1 << 0);
38794 }
38795 - lp->a.write_bcr(ioaddr, 9, bcr9);
38796 + lp->a->write_bcr(ioaddr, 9, bcr9);
38797 }
38798
38799 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38800 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38801
38802 #ifdef DO_DXSUFLO
38803 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38804 - val = lp->a.read_csr(ioaddr, CSR3);
38805 + val = lp->a->read_csr(ioaddr, CSR3);
38806 val |= 0x40;
38807 - lp->a.write_csr(ioaddr, CSR3, val);
38808 + lp->a->write_csr(ioaddr, CSR3, val);
38809 }
38810 #endif
38811
38812 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38813 napi_enable(&lp->napi);
38814
38815 /* Re-initialize the PCNET32, and start it when done. */
38816 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38817 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38818 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38819 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38820
38821 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38822 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38823 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38824 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38825
38826 netif_start_queue(dev);
38827
38828 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38829
38830 i = 0;
38831 while (i++ < 100)
38832 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38833 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38834 break;
38835 /*
38836 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38837 * reports that doing so triggers a bug in the '974.
38838 */
38839 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38840 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38841
38842 if (netif_msg_ifup(lp))
38843 printk(KERN_DEBUG
38844 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38845 dev->name, i,
38846 (u32) (lp->init_dma_addr),
38847 - lp->a.read_csr(ioaddr, CSR0));
38848 + lp->a->read_csr(ioaddr, CSR0));
38849
38850 spin_unlock_irqrestore(&lp->lock, flags);
38851
38852 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38853 * Switch back to 16bit mode to avoid problems with dumb
38854 * DOS packet driver after a warm reboot
38855 */
38856 - lp->a.write_bcr(ioaddr, 20, 4);
38857 + lp->a->write_bcr(ioaddr, 20, 4);
38858
38859 err_free_irq:
38860 spin_unlock_irqrestore(&lp->lock, flags);
38861 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38862
38863 /* wait for stop */
38864 for (i = 0; i < 100; i++)
38865 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
38866 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
38867 break;
38868
38869 if (i >= 100 && netif_msg_drv(lp))
38870 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38871 return;
38872
38873 /* ReInit Ring */
38874 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38875 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38876 i = 0;
38877 while (i++ < 1000)
38878 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38879 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38880 break;
38881
38882 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
38883 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
38884 }
38885
38886 static void pcnet32_tx_timeout(struct net_device *dev)
38887 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
38888 if (pcnet32_debug & NETIF_MSG_DRV)
38889 printk(KERN_ERR
38890 "%s: transmit timed out, status %4.4x, resetting.\n",
38891 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38892 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38893 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38894 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38895 dev->stats.tx_errors++;
38896 if (netif_msg_tx_err(lp)) {
38897 int i;
38898 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38899 if (netif_msg_tx_queued(lp)) {
38900 printk(KERN_DEBUG
38901 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
38902 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38903 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38904 }
38905
38906 /* Default status -- will not enable Successful-TxDone
38907 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38908 dev->stats.tx_bytes += skb->len;
38909
38910 /* Trigger an immediate send poll. */
38911 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38912 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38913
38914 dev->trans_start = jiffies;
38915
38916 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
38917
38918 spin_lock(&lp->lock);
38919
38920 - csr0 = lp->a.read_csr(ioaddr, CSR0);
38921 + csr0 = lp->a->read_csr(ioaddr, CSR0);
38922 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
38923 if (csr0 == 0xffff) {
38924 break; /* PCMCIA remove happened */
38925 }
38926 /* Acknowledge all of the current interrupt sources ASAP. */
38927 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38928 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38929
38930 if (netif_msg_intr(lp))
38931 printk(KERN_DEBUG
38932 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
38933 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
38934 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
38935
38936 /* Log misc errors. */
38937 if (csr0 & 0x4000)
38938 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
38939 if (napi_schedule_prep(&lp->napi)) {
38940 u16 val;
38941 /* set interrupt masks */
38942 - val = lp->a.read_csr(ioaddr, CSR3);
38943 + val = lp->a->read_csr(ioaddr, CSR3);
38944 val |= 0x5f00;
38945 - lp->a.write_csr(ioaddr, CSR3, val);
38946 + lp->a->write_csr(ioaddr, CSR3, val);
38947
38948 __napi_schedule(&lp->napi);
38949 break;
38950 }
38951 - csr0 = lp->a.read_csr(ioaddr, CSR0);
38952 + csr0 = lp->a->read_csr(ioaddr, CSR0);
38953 }
38954
38955 if (netif_msg_intr(lp))
38956 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
38957 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38958 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38959
38960 spin_unlock(&lp->lock);
38961
38962 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
38963
38964 spin_lock_irqsave(&lp->lock, flags);
38965
38966 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38967 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38968
38969 if (netif_msg_ifdown(lp))
38970 printk(KERN_DEBUG
38971 "%s: Shutting down ethercard, status was %2.2x.\n",
38972 - dev->name, lp->a.read_csr(ioaddr, CSR0));
38973 + dev->name, lp->a->read_csr(ioaddr, CSR0));
38974
38975 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
38976 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38977 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38978
38979 /*
38980 * Switch back to 16bit mode to avoid problems with dumb
38981 * DOS packet driver after a warm reboot
38982 */
38983 - lp->a.write_bcr(ioaddr, 20, 4);
38984 + lp->a->write_bcr(ioaddr, 20, 4);
38985
38986 spin_unlock_irqrestore(&lp->lock, flags);
38987
38988 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
38989 unsigned long flags;
38990
38991 spin_lock_irqsave(&lp->lock, flags);
38992 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38993 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38994 spin_unlock_irqrestore(&lp->lock, flags);
38995
38996 return &dev->stats;
38997 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
38998 if (dev->flags & IFF_ALLMULTI) {
38999 ib->filter[0] = cpu_to_le32(~0U);
39000 ib->filter[1] = cpu_to_le32(~0U);
39001 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39002 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39003 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39004 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39005 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39006 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39007 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39008 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39009 return;
39010 }
39011 /* clear the multicast filter */
39012 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
39013 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
39014 }
39015 for (i = 0; i < 4; i++)
39016 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
39017 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
39018 le16_to_cpu(mcast_table[i]));
39019 return;
39020 }
39021 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39022
39023 spin_lock_irqsave(&lp->lock, flags);
39024 suspended = pcnet32_suspend(dev, &flags, 0);
39025 - csr15 = lp->a.read_csr(ioaddr, CSR15);
39026 + csr15 = lp->a->read_csr(ioaddr, CSR15);
39027 if (dev->flags & IFF_PROMISC) {
39028 /* Log any net taps. */
39029 if (netif_msg_hw(lp))
39030 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39031 lp->init_block->mode =
39032 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
39033 7);
39034 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
39035 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
39036 } else {
39037 lp->init_block->mode =
39038 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
39039 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39040 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39041 pcnet32_load_multicast(dev);
39042 }
39043
39044 if (suspended) {
39045 int csr5;
39046 /* clear SUSPEND (SPND) - CSR5 bit 0 */
39047 - csr5 = lp->a.read_csr(ioaddr, CSR5);
39048 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39049 + csr5 = lp->a->read_csr(ioaddr, CSR5);
39050 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39051 } else {
39052 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39053 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39054 pcnet32_restart(dev, CSR0_NORMAL);
39055 netif_wake_queue(dev);
39056 }
39057 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
39058 if (!lp->mii)
39059 return 0;
39060
39061 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39062 - val_out = lp->a.read_bcr(ioaddr, 34);
39063 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39064 + val_out = lp->a->read_bcr(ioaddr, 34);
39065
39066 return val_out;
39067 }
39068 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
39069 if (!lp->mii)
39070 return;
39071
39072 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39073 - lp->a.write_bcr(ioaddr, 34, val);
39074 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39075 + lp->a->write_bcr(ioaddr, 34, val);
39076 }
39077
39078 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39079 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39080 curr_link = mii_link_ok(&lp->mii_if);
39081 } else {
39082 ulong ioaddr = dev->base_addr; /* card base I/O address */
39083 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
39084 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
39085 }
39086 if (!curr_link) {
39087 if (prev_link || verbose) {
39088 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39089 (ecmd.duplex ==
39090 DUPLEX_FULL) ? "full" : "half");
39091 }
39092 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
39093 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
39094 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
39095 if (lp->mii_if.full_duplex)
39096 bcr9 |= (1 << 0);
39097 else
39098 bcr9 &= ~(1 << 0);
39099 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
39100 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
39101 }
39102 } else {
39103 if (netif_msg_link(lp))
39104 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
39105 index 7cc9898..6eb50d3 100644
39106 --- a/drivers/net/sis190.c
39107 +++ b/drivers/net/sis190.c
39108 @@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
39109 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
39110 struct net_device *dev)
39111 {
39112 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
39113 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
39114 struct sis190_private *tp = netdev_priv(dev);
39115 struct pci_dev *isa_bridge;
39116 u8 reg, tmp8;
39117 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39118 index e13685a..60c948c 100644
39119 --- a/drivers/net/sundance.c
39120 +++ b/drivers/net/sundance.c
39121 @@ -225,7 +225,7 @@ enum {
39122 struct pci_id_info {
39123 const char *name;
39124 };
39125 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39126 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39127 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39128 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39129 {"D-Link DFE-580TX 4 port Server Adapter"},
39130 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39131 index 529f55a..cccaa18 100644
39132 --- a/drivers/net/tg3.h
39133 +++ b/drivers/net/tg3.h
39134 @@ -95,6 +95,7 @@
39135 #define CHIPREV_ID_5750_A0 0x4000
39136 #define CHIPREV_ID_5750_A1 0x4001
39137 #define CHIPREV_ID_5750_A3 0x4003
39138 +#define CHIPREV_ID_5750_C1 0x4201
39139 #define CHIPREV_ID_5750_C2 0x4202
39140 #define CHIPREV_ID_5752_A0_HW 0x5000
39141 #define CHIPREV_ID_5752_A0 0x6000
39142 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39143 index b9db1b5..720f9ce 100644
39144 --- a/drivers/net/tokenring/abyss.c
39145 +++ b/drivers/net/tokenring/abyss.c
39146 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39147
39148 static int __init abyss_init (void)
39149 {
39150 - abyss_netdev_ops = tms380tr_netdev_ops;
39151 + pax_open_kernel();
39152 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39153
39154 - abyss_netdev_ops.ndo_open = abyss_open;
39155 - abyss_netdev_ops.ndo_stop = abyss_close;
39156 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39157 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39158 + pax_close_kernel();
39159
39160 return pci_register_driver(&abyss_driver);
39161 }
39162 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39163 index 456f8bf..373e56d 100644
39164 --- a/drivers/net/tokenring/madgemc.c
39165 +++ b/drivers/net/tokenring/madgemc.c
39166 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39167
39168 static int __init madgemc_init (void)
39169 {
39170 - madgemc_netdev_ops = tms380tr_netdev_ops;
39171 - madgemc_netdev_ops.ndo_open = madgemc_open;
39172 - madgemc_netdev_ops.ndo_stop = madgemc_close;
39173 + pax_open_kernel();
39174 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39175 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39176 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39177 + pax_close_kernel();
39178
39179 return mca_register_driver (&madgemc_driver);
39180 }
39181 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39182 index 16e8783..925bd49 100644
39183 --- a/drivers/net/tokenring/proteon.c
39184 +++ b/drivers/net/tokenring/proteon.c
39185 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
39186 struct platform_device *pdev;
39187 int i, num = 0, err = 0;
39188
39189 - proteon_netdev_ops = tms380tr_netdev_ops;
39190 - proteon_netdev_ops.ndo_open = proteon_open;
39191 - proteon_netdev_ops.ndo_stop = tms380tr_close;
39192 + pax_open_kernel();
39193 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39194 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39195 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39196 + pax_close_kernel();
39197
39198 err = platform_driver_register(&proteon_driver);
39199 if (err)
39200 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39201 index 46db5c5..37c1536 100644
39202 --- a/drivers/net/tokenring/skisa.c
39203 +++ b/drivers/net/tokenring/skisa.c
39204 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39205 struct platform_device *pdev;
39206 int i, num = 0, err = 0;
39207
39208 - sk_isa_netdev_ops = tms380tr_netdev_ops;
39209 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
39210 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39211 + pax_open_kernel();
39212 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39213 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39214 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39215 + pax_close_kernel();
39216
39217 err = platform_driver_register(&sk_isa_driver);
39218 if (err)
39219 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39220 index 74e5ba4..5cf6bc9 100644
39221 --- a/drivers/net/tulip/de2104x.c
39222 +++ b/drivers/net/tulip/de2104x.c
39223 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39224 struct de_srom_info_leaf *il;
39225 void *bufp;
39226
39227 + pax_track_stack();
39228 +
39229 /* download entire eeprom */
39230 for (i = 0; i < DE_EEPROM_WORDS; i++)
39231 ((__le16 *)ee_data)[i] =
39232 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39233 index a8349b7..90f9dfe 100644
39234 --- a/drivers/net/tulip/de4x5.c
39235 +++ b/drivers/net/tulip/de4x5.c
39236 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39237 for (i=0; i<ETH_ALEN; i++) {
39238 tmp.addr[i] = dev->dev_addr[i];
39239 }
39240 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39241 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39242 break;
39243
39244 case DE4X5_SET_HWADDR: /* Set the hardware address */
39245 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39246 spin_lock_irqsave(&lp->lock, flags);
39247 memcpy(&statbuf, &lp->pktStats, ioc->len);
39248 spin_unlock_irqrestore(&lp->lock, flags);
39249 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
39250 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39251 return -EFAULT;
39252 break;
39253 }
39254 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39255 index 391acd3..56d11cd 100644
39256 --- a/drivers/net/tulip/eeprom.c
39257 +++ b/drivers/net/tulip/eeprom.c
39258 @@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39259 {NULL}};
39260
39261
39262 -static const char *block_name[] __devinitdata = {
39263 +static const char *block_name[] __devinitconst = {
39264 "21140 non-MII",
39265 "21140 MII PHY",
39266 "21142 Serial PHY",
39267 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39268 index b38d3b7..b1cff23 100644
39269 --- a/drivers/net/tulip/winbond-840.c
39270 +++ b/drivers/net/tulip/winbond-840.c
39271 @@ -235,7 +235,7 @@ struct pci_id_info {
39272 int drv_flags; /* Driver use, intended as capability flags. */
39273 };
39274
39275 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39276 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39277 { /* Sometime a Level-One switch card. */
39278 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39279 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39280 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39281 index f450bc9..2b747c8 100644
39282 --- a/drivers/net/usb/hso.c
39283 +++ b/drivers/net/usb/hso.c
39284 @@ -71,7 +71,7 @@
39285 #include <asm/byteorder.h>
39286 #include <linux/serial_core.h>
39287 #include <linux/serial.h>
39288 -
39289 +#include <asm/local.h>
39290
39291 #define DRIVER_VERSION "1.2"
39292 #define MOD_AUTHOR "Option Wireless"
39293 @@ -258,7 +258,7 @@ struct hso_serial {
39294
39295 /* from usb_serial_port */
39296 struct tty_struct *tty;
39297 - int open_count;
39298 + local_t open_count;
39299 spinlock_t serial_lock;
39300
39301 int (*write_data) (struct hso_serial *serial);
39302 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39303 struct urb *urb;
39304
39305 urb = serial->rx_urb[0];
39306 - if (serial->open_count > 0) {
39307 + if (local_read(&serial->open_count) > 0) {
39308 count = put_rxbuf_data(urb, serial);
39309 if (count == -1)
39310 return;
39311 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39312 DUMP1(urb->transfer_buffer, urb->actual_length);
39313
39314 /* Anyone listening? */
39315 - if (serial->open_count == 0)
39316 + if (local_read(&serial->open_count) == 0)
39317 return;
39318
39319 if (status == 0) {
39320 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39321 spin_unlock_irq(&serial->serial_lock);
39322
39323 /* check for port already opened, if not set the termios */
39324 - serial->open_count++;
39325 - if (serial->open_count == 1) {
39326 + if (local_inc_return(&serial->open_count) == 1) {
39327 tty->low_latency = 1;
39328 serial->rx_state = RX_IDLE;
39329 /* Force default termio settings */
39330 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39331 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39332 if (result) {
39333 hso_stop_serial_device(serial->parent);
39334 - serial->open_count--;
39335 + local_dec(&serial->open_count);
39336 kref_put(&serial->parent->ref, hso_serial_ref_free);
39337 }
39338 } else {
39339 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39340
39341 /* reset the rts and dtr */
39342 /* do the actual close */
39343 - serial->open_count--;
39344 + local_dec(&serial->open_count);
39345
39346 - if (serial->open_count <= 0) {
39347 - serial->open_count = 0;
39348 + if (local_read(&serial->open_count) <= 0) {
39349 + local_set(&serial->open_count, 0);
39350 spin_lock_irq(&serial->serial_lock);
39351 if (serial->tty == tty) {
39352 serial->tty->driver_data = NULL;
39353 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39354
39355 /* the actual setup */
39356 spin_lock_irqsave(&serial->serial_lock, flags);
39357 - if (serial->open_count)
39358 + if (local_read(&serial->open_count))
39359 _hso_serial_set_termios(tty, old);
39360 else
39361 tty->termios = old;
39362 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39363 /* Start all serial ports */
39364 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39365 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39366 - if (dev2ser(serial_table[i])->open_count) {
39367 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
39368 result =
39369 hso_start_serial_device(serial_table[i], GFP_NOIO);
39370 hso_kick_transmit(dev2ser(serial_table[i]));
39371 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39372 index 3e94f0c..ffdd926 100644
39373 --- a/drivers/net/vxge/vxge-config.h
39374 +++ b/drivers/net/vxge/vxge-config.h
39375 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39376 void (*link_down)(struct __vxge_hw_device *devh);
39377 void (*crit_err)(struct __vxge_hw_device *devh,
39378 enum vxge_hw_event type, u64 ext_data);
39379 -};
39380 +} __no_const;
39381
39382 /*
39383 * struct __vxge_hw_blockpool_entry - Block private data structure
39384 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39385 index 068d7a9..35293de 100644
39386 --- a/drivers/net/vxge/vxge-main.c
39387 +++ b/drivers/net/vxge/vxge-main.c
39388 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39389 struct sk_buff *completed[NR_SKB_COMPLETED];
39390 int more;
39391
39392 + pax_track_stack();
39393 +
39394 do {
39395 more = 0;
39396 skb_ptr = completed;
39397 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39398 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39399 int index;
39400
39401 + pax_track_stack();
39402 +
39403 /*
39404 * Filling
39405 * - itable with bucket numbers
39406 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39407 index 461742b..81be42e 100644
39408 --- a/drivers/net/vxge/vxge-traffic.h
39409 +++ b/drivers/net/vxge/vxge-traffic.h
39410 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39411 struct vxge_hw_mempool_dma *dma_object,
39412 u32 index,
39413 u32 is_last);
39414 -};
39415 +} __no_const;
39416
39417 void
39418 __vxge_hw_mempool_destroy(
39419 diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39420 index cd8cb95..4153b79 100644
39421 --- a/drivers/net/wan/cycx_x25.c
39422 +++ b/drivers/net/wan/cycx_x25.c
39423 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39424 unsigned char hex[1024],
39425 * phex = hex;
39426
39427 + pax_track_stack();
39428 +
39429 if (len >= (sizeof(hex) / 2))
39430 len = (sizeof(hex) / 2) - 1;
39431
39432 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39433 index aa9248f..a4e3c3b 100644
39434 --- a/drivers/net/wan/hdlc_x25.c
39435 +++ b/drivers/net/wan/hdlc_x25.c
39436 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39437
39438 static int x25_open(struct net_device *dev)
39439 {
39440 - struct lapb_register_struct cb;
39441 + static struct lapb_register_struct cb = {
39442 + .connect_confirmation = x25_connected,
39443 + .connect_indication = x25_connected,
39444 + .disconnect_confirmation = x25_disconnected,
39445 + .disconnect_indication = x25_disconnected,
39446 + .data_indication = x25_data_indication,
39447 + .data_transmit = x25_data_transmit
39448 + };
39449 int result;
39450
39451 - cb.connect_confirmation = x25_connected;
39452 - cb.connect_indication = x25_connected;
39453 - cb.disconnect_confirmation = x25_disconnected;
39454 - cb.disconnect_indication = x25_disconnected;
39455 - cb.data_indication = x25_data_indication;
39456 - cb.data_transmit = x25_data_transmit;
39457 -
39458 result = lapb_register(dev, &cb);
39459 if (result != LAPB_OK)
39460 return result;
39461 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39462 index 5ad287c..783b020 100644
39463 --- a/drivers/net/wimax/i2400m/usb-fw.c
39464 +++ b/drivers/net/wimax/i2400m/usb-fw.c
39465 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39466 int do_autopm = 1;
39467 DECLARE_COMPLETION_ONSTACK(notif_completion);
39468
39469 + pax_track_stack();
39470 +
39471 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39472 i2400m, ack, ack_size);
39473 BUG_ON(_ack == i2400m->bm_ack_buf);
39474 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39475 index 6c26840..62c97c3 100644
39476 --- a/drivers/net/wireless/airo.c
39477 +++ b/drivers/net/wireless/airo.c
39478 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39479 BSSListElement * loop_net;
39480 BSSListElement * tmp_net;
39481
39482 + pax_track_stack();
39483 +
39484 /* Blow away current list of scan results */
39485 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39486 list_move_tail (&loop_net->list, &ai->network_free_list);
39487 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39488 WepKeyRid wkr;
39489 int rc;
39490
39491 + pax_track_stack();
39492 +
39493 memset( &mySsid, 0, sizeof( mySsid ) );
39494 kfree (ai->flash);
39495 ai->flash = NULL;
39496 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39497 __le32 *vals = stats.vals;
39498 int len;
39499
39500 + pax_track_stack();
39501 +
39502 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39503 return -ENOMEM;
39504 data = (struct proc_data *)file->private_data;
39505 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39506 /* If doLoseSync is not 1, we won't do a Lose Sync */
39507 int doLoseSync = -1;
39508
39509 + pax_track_stack();
39510 +
39511 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39512 return -ENOMEM;
39513 data = (struct proc_data *)file->private_data;
39514 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39515 int i;
39516 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39517
39518 + pax_track_stack();
39519 +
39520 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39521 if (!qual)
39522 return -ENOMEM;
39523 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39524 CapabilityRid cap_rid;
39525 __le32 *vals = stats_rid.vals;
39526
39527 + pax_track_stack();
39528 +
39529 /* Get stats out of the card */
39530 clear_bit(JOB_WSTATS, &local->jobs);
39531 if (local->power.event) {
39532 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39533 index 747508c..82e965d 100644
39534 --- a/drivers/net/wireless/ath/ath5k/debug.c
39535 +++ b/drivers/net/wireless/ath/ath5k/debug.c
39536 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39537 unsigned int v;
39538 u64 tsf;
39539
39540 + pax_track_stack();
39541 +
39542 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39543 len += snprintf(buf+len, sizeof(buf)-len,
39544 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39545 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39546 unsigned int len = 0;
39547 unsigned int i;
39548
39549 + pax_track_stack();
39550 +
39551 len += snprintf(buf+len, sizeof(buf)-len,
39552 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39553
39554 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39555 index 2be4c22..593b1eb 100644
39556 --- a/drivers/net/wireless/ath/ath9k/debug.c
39557 +++ b/drivers/net/wireless/ath/ath9k/debug.c
39558 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39559 char buf[512];
39560 unsigned int len = 0;
39561
39562 + pax_track_stack();
39563 +
39564 len += snprintf(buf + len, sizeof(buf) - len,
39565 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39566 len += snprintf(buf + len, sizeof(buf) - len,
39567 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39568 int i;
39569 u8 addr[ETH_ALEN];
39570
39571 + pax_track_stack();
39572 +
39573 len += snprintf(buf + len, sizeof(buf) - len,
39574 "primary: %s (%s chan=%d ht=%d)\n",
39575 wiphy_name(sc->pri_wiphy->hw->wiphy),
39576 diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39577 index 80b19a4..dab3a45 100644
39578 --- a/drivers/net/wireless/b43/debugfs.c
39579 +++ b/drivers/net/wireless/b43/debugfs.c
39580 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
39581 struct b43_debugfs_fops {
39582 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39583 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39584 - struct file_operations fops;
39585 + const struct file_operations fops;
39586 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39587 size_t file_struct_offset;
39588 };
39589 diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39590 index 1f85ac5..c99b4b4 100644
39591 --- a/drivers/net/wireless/b43legacy/debugfs.c
39592 +++ b/drivers/net/wireless/b43legacy/debugfs.c
39593 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
39594 struct b43legacy_debugfs_fops {
39595 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39596 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39597 - struct file_operations fops;
39598 + const struct file_operations fops;
39599 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39600 size_t file_struct_offset;
39601 /* Take wl->irq_lock before calling read/write? */
39602 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39603 index 43102bf..3b569c3 100644
39604 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
39605 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39606 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39607 int err;
39608 DECLARE_SSID_BUF(ssid);
39609
39610 + pax_track_stack();
39611 +
39612 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39613
39614 if (ssid_len)
39615 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39616 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39617 int err;
39618
39619 + pax_track_stack();
39620 +
39621 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39622 idx, keylen, len);
39623
39624 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39625 index 282b1f7..169f0cf 100644
39626 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39627 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39628 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39629 unsigned long flags;
39630 DECLARE_SSID_BUF(ssid);
39631
39632 + pax_track_stack();
39633 +
39634 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39635 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39636 print_ssid(ssid, info_element->data, info_element->len),
39637 diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39638 index 950267a..80d5fd2 100644
39639 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39640 +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39641 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39642 },
39643 };
39644
39645 -static struct iwl_ops iwl1000_ops = {
39646 +static const struct iwl_ops iwl1000_ops = {
39647 .ucode = &iwl5000_ucode,
39648 .lib = &iwl1000_lib,
39649 .hcmd = &iwl5000_hcmd,
39650 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39651 index 56bfcc3..b348020 100644
39652 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39653 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39654 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39655 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39656 };
39657
39658 -static struct iwl_ops iwl3945_ops = {
39659 +static const struct iwl_ops iwl3945_ops = {
39660 .ucode = &iwl3945_ucode,
39661 .lib = &iwl3945_lib,
39662 .hcmd = &iwl3945_hcmd,
39663 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39664 index 585b8d4..e142963 100644
39665 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39666 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39667 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39668 },
39669 };
39670
39671 -static struct iwl_ops iwl4965_ops = {
39672 +static const struct iwl_ops iwl4965_ops = {
39673 .ucode = &iwl4965_ucode,
39674 .lib = &iwl4965_lib,
39675 .hcmd = &iwl4965_hcmd,
39676 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39677 index 1f423f2..e37c192 100644
39678 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39679 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39680 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39681 },
39682 };
39683
39684 -struct iwl_ops iwl5000_ops = {
39685 +const struct iwl_ops iwl5000_ops = {
39686 .ucode = &iwl5000_ucode,
39687 .lib = &iwl5000_lib,
39688 .hcmd = &iwl5000_hcmd,
39689 .utils = &iwl5000_hcmd_utils,
39690 };
39691
39692 -static struct iwl_ops iwl5150_ops = {
39693 +static const struct iwl_ops iwl5150_ops = {
39694 .ucode = &iwl5000_ucode,
39695 .lib = &iwl5150_lib,
39696 .hcmd = &iwl5000_hcmd,
39697 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39698 index 1473452..f07d5e1 100644
39699 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39700 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39701 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39702 .calc_rssi = iwl5000_calc_rssi,
39703 };
39704
39705 -static struct iwl_ops iwl6000_ops = {
39706 +static const struct iwl_ops iwl6000_ops = {
39707 .ucode = &iwl5000_ucode,
39708 .lib = &iwl6000_lib,
39709 .hcmd = &iwl5000_hcmd,
39710 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39711 index 1a3dfa2..b3e0a61 100644
39712 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39713 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39714 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39715 u8 active_index = 0;
39716 s32 tpt = 0;
39717
39718 + pax_track_stack();
39719 +
39720 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39721
39722 if (!ieee80211_is_data(hdr->frame_control) ||
39723 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39724 u8 valid_tx_ant = 0;
39725 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39726
39727 + pax_track_stack();
39728 +
39729 /* Override starting rate (index 0) if needed for debug purposes */
39730 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39731
39732 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39733 index 0e56d78..6a3c107 100644
39734 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39735 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39736 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39737 if (iwl_debug_level & IWL_DL_INFO)
39738 dev_printk(KERN_DEBUG, &(pdev->dev),
39739 "Disabling hw_scan\n");
39740 - iwl_hw_ops.hw_scan = NULL;
39741 + pax_open_kernel();
39742 + *(void **)&iwl_hw_ops.hw_scan = NULL;
39743 + pax_close_kernel();
39744 }
39745
39746 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39747 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39748 index cbc6290..eb323d7 100644
39749 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39750 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39751 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39752 #endif
39753
39754 #else
39755 -#define IWL_DEBUG(__priv, level, fmt, args...)
39756 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39757 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39758 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39759 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39760 void *p, u32 len)
39761 {}
39762 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39763 index a198bcf..8e68233 100644
39764 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39765 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39766 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39767 int pos = 0;
39768 const size_t bufsz = sizeof(buf);
39769
39770 + pax_track_stack();
39771 +
39772 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39773 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39774 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39775 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39776 const size_t bufsz = sizeof(buf);
39777 ssize_t ret;
39778
39779 + pax_track_stack();
39780 +
39781 for (i = 0; i < AC_NUM; i++) {
39782 pos += scnprintf(buf + pos, bufsz - pos,
39783 "\tcw_min\tcw_max\taifsn\ttxop\n");
39784 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39785 index 3539ea4..b174bfa 100644
39786 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39787 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39788 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
39789
39790 /* shared structures from iwl-5000.c */
39791 extern struct iwl_mod_params iwl50_mod_params;
39792 -extern struct iwl_ops iwl5000_ops;
39793 +extern const struct iwl_ops iwl5000_ops;
39794 extern struct iwl_ucode_ops iwl5000_ucode;
39795 extern struct iwl_lib_ops iwl5000_lib;
39796 extern struct iwl_hcmd_ops iwl5000_hcmd;
39797 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39798 index 619590d..69235ee 100644
39799 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39800 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39801 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39802 */
39803 if (iwl3945_mod_params.disable_hw_scan) {
39804 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39805 - iwl3945_hw_ops.hw_scan = NULL;
39806 + pax_open_kernel();
39807 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39808 + pax_close_kernel();
39809 }
39810
39811
39812 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39813 index 1465379..fe4d78b 100644
39814 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39815 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39816 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39817 int buf_len = 512;
39818 size_t len = 0;
39819
39820 + pax_track_stack();
39821 +
39822 if (*ppos != 0)
39823 return 0;
39824 if (count < sizeof(buf))
39825 diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39826 index 893a55c..7f66a50 100644
39827 --- a/drivers/net/wireless/libertas/debugfs.c
39828 +++ b/drivers/net/wireless/libertas/debugfs.c
39829 @@ -708,7 +708,7 @@ out_unlock:
39830 struct lbs_debugfs_files {
39831 const char *name;
39832 int perm;
39833 - struct file_operations fops;
39834 + const struct file_operations fops;
39835 };
39836
39837 static const struct lbs_debugfs_files debugfs_files[] = {
39838 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39839 index 2ecbedb..42704f0 100644
39840 --- a/drivers/net/wireless/rndis_wlan.c
39841 +++ b/drivers/net/wireless/rndis_wlan.c
39842 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39843
39844 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39845
39846 - if (rts_threshold < 0 || rts_threshold > 2347)
39847 + if (rts_threshold > 2347)
39848 rts_threshold = 2347;
39849
39850 tmp = cpu_to_le32(rts_threshold);
39851 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39852 index 334ccd6..47f8944 100644
39853 --- a/drivers/oprofile/buffer_sync.c
39854 +++ b/drivers/oprofile/buffer_sync.c
39855 @@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39856 if (cookie == NO_COOKIE)
39857 offset = pc;
39858 if (cookie == INVALID_COOKIE) {
39859 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39860 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39861 offset = pc;
39862 }
39863 if (cookie != last_cookie) {
39864 @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39865 /* add userspace sample */
39866
39867 if (!mm) {
39868 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
39869 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39870 return 0;
39871 }
39872
39873 cookie = lookup_dcookie(mm, s->eip, &offset);
39874
39875 if (cookie == INVALID_COOKIE) {
39876 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39877 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39878 return 0;
39879 }
39880
39881 @@ -562,7 +562,7 @@ void sync_buffer(int cpu)
39882 /* ignore backtraces if failed to add a sample */
39883 if (state == sb_bt_start) {
39884 state = sb_bt_ignore;
39885 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39886 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39887 }
39888 }
39889 release_mm(mm);
39890 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39891 index 5df60a6..72f5c1c 100644
39892 --- a/drivers/oprofile/event_buffer.c
39893 +++ b/drivers/oprofile/event_buffer.c
39894 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39895 }
39896
39897 if (buffer_pos == buffer_size) {
39898 - atomic_inc(&oprofile_stats.event_lost_overflow);
39899 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39900 return;
39901 }
39902
39903 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39904 index dc8a042..fe5f315 100644
39905 --- a/drivers/oprofile/oprof.c
39906 +++ b/drivers/oprofile/oprof.c
39907 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39908 if (oprofile_ops.switch_events())
39909 return;
39910
39911 - atomic_inc(&oprofile_stats.multiplex_counter);
39912 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39913 start_switch_worker();
39914 }
39915
39916 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39917 index 61689e8..387f7f8 100644
39918 --- a/drivers/oprofile/oprofile_stats.c
39919 +++ b/drivers/oprofile/oprofile_stats.c
39920 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39921 cpu_buf->sample_invalid_eip = 0;
39922 }
39923
39924 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39925 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39926 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
39927 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39928 - atomic_set(&oprofile_stats.multiplex_counter, 0);
39929 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39930 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39931 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39932 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39933 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39934 }
39935
39936
39937 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39938 index 0b54e46..a37c527 100644
39939 --- a/drivers/oprofile/oprofile_stats.h
39940 +++ b/drivers/oprofile/oprofile_stats.h
39941 @@ -13,11 +13,11 @@
39942 #include <asm/atomic.h>
39943
39944 struct oprofile_stat_struct {
39945 - atomic_t sample_lost_no_mm;
39946 - atomic_t sample_lost_no_mapping;
39947 - atomic_t bt_lost_no_mapping;
39948 - atomic_t event_lost_overflow;
39949 - atomic_t multiplex_counter;
39950 + atomic_unchecked_t sample_lost_no_mm;
39951 + atomic_unchecked_t sample_lost_no_mapping;
39952 + atomic_unchecked_t bt_lost_no_mapping;
39953 + atomic_unchecked_t event_lost_overflow;
39954 + atomic_unchecked_t multiplex_counter;
39955 };
39956
39957 extern struct oprofile_stat_struct oprofile_stats;
39958 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
39959 index 2766a6d..80c77e2 100644
39960 --- a/drivers/oprofile/oprofilefs.c
39961 +++ b/drivers/oprofile/oprofilefs.c
39962 @@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
39963
39964
39965 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
39966 - char const *name, atomic_t *val)
39967 + char const *name, atomic_unchecked_t *val)
39968 {
39969 struct dentry *d = __oprofilefs_create_file(sb, root, name,
39970 &atomic_ro_fops, 0444);
39971 diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
39972 index 13a64bc..ad62835 100644
39973 --- a/drivers/parisc/pdc_stable.c
39974 +++ b/drivers/parisc/pdc_stable.c
39975 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
39976 return ret;
39977 }
39978
39979 -static struct sysfs_ops pdcspath_attr_ops = {
39980 +static const struct sysfs_ops pdcspath_attr_ops = {
39981 .show = pdcspath_attr_show,
39982 .store = pdcspath_attr_store,
39983 };
39984 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
39985 index 8eefe56..40751a7 100644
39986 --- a/drivers/parport/procfs.c
39987 +++ b/drivers/parport/procfs.c
39988 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
39989
39990 *ppos += len;
39991
39992 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
39993 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
39994 }
39995
39996 #ifdef CONFIG_PARPORT_1284
39997 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
39998
39999 *ppos += len;
40000
40001 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40002 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40003 }
40004 #endif /* IEEE1284.3 support. */
40005
40006 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
40007 index 73e7d8e..c80f3d2 100644
40008 --- a/drivers/pci/hotplug/acpiphp_glue.c
40009 +++ b/drivers/pci/hotplug/acpiphp_glue.c
40010 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
40011 }
40012
40013
40014 -static struct acpi_dock_ops acpiphp_dock_ops = {
40015 +static const struct acpi_dock_ops acpiphp_dock_ops = {
40016 .handler = handle_hotplug_event_func,
40017 };
40018
40019 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
40020 index 9fff878..ad0ad53 100644
40021 --- a/drivers/pci/hotplug/cpci_hotplug.h
40022 +++ b/drivers/pci/hotplug/cpci_hotplug.h
40023 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
40024 int (*hardware_test) (struct slot* slot, u32 value);
40025 u8 (*get_power) (struct slot* slot);
40026 int (*set_power) (struct slot* slot, int value);
40027 -};
40028 +} __no_const;
40029
40030 struct cpci_hp_controller {
40031 unsigned int irq;
40032 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40033 index 76ba8a1..20ca857 100644
40034 --- a/drivers/pci/hotplug/cpqphp_nvram.c
40035 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
40036 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40037
40038 void compaq_nvram_init (void __iomem *rom_start)
40039 {
40040 +
40041 +#ifndef CONFIG_PAX_KERNEXEC
40042 if (rom_start) {
40043 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40044 }
40045 +#endif
40046 +
40047 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40048
40049 /* initialize our int15 lock */
40050 diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
40051 index 6151389..0a894ef 100644
40052 --- a/drivers/pci/hotplug/fakephp.c
40053 +++ b/drivers/pci/hotplug/fakephp.c
40054 @@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
40055 }
40056
40057 static struct kobj_type legacy_ktype = {
40058 - .sysfs_ops = &(struct sysfs_ops){
40059 + .sysfs_ops = &(const struct sysfs_ops){
40060 .store = legacy_store, .show = legacy_show
40061 },
40062 .release = &legacy_release,
40063 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
40064 index 5b680df..fe05b7e 100644
40065 --- a/drivers/pci/intel-iommu.c
40066 +++ b/drivers/pci/intel-iommu.c
40067 @@ -2643,7 +2643,7 @@ error:
40068 return 0;
40069 }
40070
40071 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
40072 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
40073 unsigned long offset, size_t size,
40074 enum dma_data_direction dir,
40075 struct dma_attrs *attrs)
40076 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
40077 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
40078 }
40079
40080 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40081 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40082 size_t size, enum dma_data_direction dir,
40083 struct dma_attrs *attrs)
40084 {
40085 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40086 }
40087 }
40088
40089 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40090 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
40091 dma_addr_t *dma_handle, gfp_t flags)
40092 {
40093 void *vaddr;
40094 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40095 return NULL;
40096 }
40097
40098 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40099 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40100 dma_addr_t dma_handle)
40101 {
40102 int order;
40103 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40104 free_pages((unsigned long)vaddr, order);
40105 }
40106
40107 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40108 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40109 int nelems, enum dma_data_direction dir,
40110 struct dma_attrs *attrs)
40111 {
40112 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
40113 return nelems;
40114 }
40115
40116 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40117 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40118 enum dma_data_direction dir, struct dma_attrs *attrs)
40119 {
40120 int i;
40121 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40122 return nelems;
40123 }
40124
40125 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40126 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40127 {
40128 return !dma_addr;
40129 }
40130
40131 -struct dma_map_ops intel_dma_ops = {
40132 +const struct dma_map_ops intel_dma_ops = {
40133 .alloc_coherent = intel_alloc_coherent,
40134 .free_coherent = intel_free_coherent,
40135 .map_sg = intel_map_sg,
40136 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40137 index 5b7056c..607bc94 100644
40138 --- a/drivers/pci/pcie/aspm.c
40139 +++ b/drivers/pci/pcie/aspm.c
40140 @@ -27,9 +27,9 @@
40141 #define MODULE_PARAM_PREFIX "pcie_aspm."
40142
40143 /* Note: those are not register definitions */
40144 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40145 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40146 -#define ASPM_STATE_L1 (4) /* L1 state */
40147 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40148 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40149 +#define ASPM_STATE_L1 (4U) /* L1 state */
40150 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40151 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40152
40153 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40154 index 8105e32..ca10419 100644
40155 --- a/drivers/pci/probe.c
40156 +++ b/drivers/pci/probe.c
40157 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40158 return ret;
40159 }
40160
40161 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40162 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40163 struct device_attribute *attr,
40164 char *buf)
40165 {
40166 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40167 }
40168
40169 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40170 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40171 struct device_attribute *attr,
40172 char *buf)
40173 {
40174 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40175 index a03ad8c..024b0da 100644
40176 --- a/drivers/pci/proc.c
40177 +++ b/drivers/pci/proc.c
40178 @@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40179 static int __init pci_proc_init(void)
40180 {
40181 struct pci_dev *dev = NULL;
40182 +
40183 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40184 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40185 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40186 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40187 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40188 +#endif
40189 +#else
40190 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40191 +#endif
40192 proc_create("devices", 0, proc_bus_pci_dir,
40193 &proc_bus_pci_dev_operations);
40194 proc_initialized = 1;
40195 diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40196 index 8c02b6c..5584d8e 100644
40197 --- a/drivers/pci/slot.c
40198 +++ b/drivers/pci/slot.c
40199 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40200 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40201 }
40202
40203 -static struct sysfs_ops pci_slot_sysfs_ops = {
40204 +static const struct sysfs_ops pci_slot_sysfs_ops = {
40205 .show = pci_slot_attr_show,
40206 .store = pci_slot_attr_store,
40207 };
40208 diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40209 index 30cf71d2..50938f1 100644
40210 --- a/drivers/pcmcia/pcmcia_ioctl.c
40211 +++ b/drivers/pcmcia/pcmcia_ioctl.c
40212 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40213 return -EFAULT;
40214 }
40215 }
40216 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40217 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40218 if (!buf)
40219 return -ENOMEM;
40220
40221 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40222 index 52183c4..b224c69 100644
40223 --- a/drivers/platform/x86/acer-wmi.c
40224 +++ b/drivers/platform/x86/acer-wmi.c
40225 @@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40226 return 0;
40227 }
40228
40229 -static struct backlight_ops acer_bl_ops = {
40230 +static const struct backlight_ops acer_bl_ops = {
40231 .get_brightness = read_brightness,
40232 .update_status = update_bl_status,
40233 };
40234 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40235 index 767cb61..a87380b 100644
40236 --- a/drivers/platform/x86/asus-laptop.c
40237 +++ b/drivers/platform/x86/asus-laptop.c
40238 @@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40239 */
40240 static int read_brightness(struct backlight_device *bd);
40241 static int update_bl_status(struct backlight_device *bd);
40242 -static struct backlight_ops asusbl_ops = {
40243 +static const struct backlight_ops asusbl_ops = {
40244 .get_brightness = read_brightness,
40245 .update_status = update_bl_status,
40246 };
40247 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40248 index d66c07a..a4abaac 100644
40249 --- a/drivers/platform/x86/asus_acpi.c
40250 +++ b/drivers/platform/x86/asus_acpi.c
40251 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40252 return 0;
40253 }
40254
40255 -static struct backlight_ops asus_backlight_data = {
40256 +static const struct backlight_ops asus_backlight_data = {
40257 .get_brightness = read_brightness,
40258 .update_status = set_brightness_status,
40259 };
40260 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40261 index 11003bb..550ff1b 100644
40262 --- a/drivers/platform/x86/compal-laptop.c
40263 +++ b/drivers/platform/x86/compal-laptop.c
40264 @@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40265 return set_lcd_level(b->props.brightness);
40266 }
40267
40268 -static struct backlight_ops compalbl_ops = {
40269 +static const struct backlight_ops compalbl_ops = {
40270 .get_brightness = bl_get_brightness,
40271 .update_status = bl_update_status,
40272 };
40273 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40274 index 07a74da..9dc99fa 100644
40275 --- a/drivers/platform/x86/dell-laptop.c
40276 +++ b/drivers/platform/x86/dell-laptop.c
40277 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40278 return buffer.output[1];
40279 }
40280
40281 -static struct backlight_ops dell_ops = {
40282 +static const struct backlight_ops dell_ops = {
40283 .get_brightness = dell_get_intensity,
40284 .update_status = dell_send_intensity,
40285 };
40286 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40287 index c533b1c..5c81f22 100644
40288 --- a/drivers/platform/x86/eeepc-laptop.c
40289 +++ b/drivers/platform/x86/eeepc-laptop.c
40290 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40291 */
40292 static int read_brightness(struct backlight_device *bd);
40293 static int update_bl_status(struct backlight_device *bd);
40294 -static struct backlight_ops eeepcbl_ops = {
40295 +static const struct backlight_ops eeepcbl_ops = {
40296 .get_brightness = read_brightness,
40297 .update_status = update_bl_status,
40298 };
40299 diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40300 index bcd4ba8..a249b35 100644
40301 --- a/drivers/platform/x86/fujitsu-laptop.c
40302 +++ b/drivers/platform/x86/fujitsu-laptop.c
40303 @@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40304 return ret;
40305 }
40306
40307 -static struct backlight_ops fujitsubl_ops = {
40308 +static const struct backlight_ops fujitsubl_ops = {
40309 .get_brightness = bl_get_brightness,
40310 .update_status = bl_update_status,
40311 };
40312 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40313 index 759763d..1093ba2 100644
40314 --- a/drivers/platform/x86/msi-laptop.c
40315 +++ b/drivers/platform/x86/msi-laptop.c
40316 @@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40317 return set_lcd_level(b->props.brightness);
40318 }
40319
40320 -static struct backlight_ops msibl_ops = {
40321 +static const struct backlight_ops msibl_ops = {
40322 .get_brightness = bl_get_brightness,
40323 .update_status = bl_update_status,
40324 };
40325 diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40326 index fe7cf01..9012d8d 100644
40327 --- a/drivers/platform/x86/panasonic-laptop.c
40328 +++ b/drivers/platform/x86/panasonic-laptop.c
40329 @@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40330 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40331 }
40332
40333 -static struct backlight_ops pcc_backlight_ops = {
40334 +static const struct backlight_ops pcc_backlight_ops = {
40335 .get_brightness = bl_get,
40336 .update_status = bl_set_status,
40337 };
40338 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40339 index a2a742c..b37e25e 100644
40340 --- a/drivers/platform/x86/sony-laptop.c
40341 +++ b/drivers/platform/x86/sony-laptop.c
40342 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40343 }
40344
40345 static struct backlight_device *sony_backlight_device;
40346 -static struct backlight_ops sony_backlight_ops = {
40347 +static const struct backlight_ops sony_backlight_ops = {
40348 .update_status = sony_backlight_update_status,
40349 .get_brightness = sony_backlight_get_brightness,
40350 };
40351 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40352 index 68271ae..5e8fb10 100644
40353 --- a/drivers/platform/x86/thinkpad_acpi.c
40354 +++ b/drivers/platform/x86/thinkpad_acpi.c
40355 @@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40356 return 0;
40357 }
40358
40359 -void static hotkey_mask_warn_incomplete_mask(void)
40360 +static void hotkey_mask_warn_incomplete_mask(void)
40361 {
40362 /* log only what the user can fix... */
40363 const u32 wantedmask = hotkey_driver_mask &
40364 @@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40365 BACKLIGHT_UPDATE_HOTKEY);
40366 }
40367
40368 -static struct backlight_ops ibm_backlight_data = {
40369 +static const struct backlight_ops ibm_backlight_data = {
40370 .get_brightness = brightness_get,
40371 .update_status = brightness_update_status,
40372 };
40373 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40374 index 51c0a8b..0786629 100644
40375 --- a/drivers/platform/x86/toshiba_acpi.c
40376 +++ b/drivers/platform/x86/toshiba_acpi.c
40377 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40378 return AE_OK;
40379 }
40380
40381 -static struct backlight_ops toshiba_backlight_data = {
40382 +static const struct backlight_ops toshiba_backlight_data = {
40383 .get_brightness = get_lcd,
40384 .update_status = set_lcd_status,
40385 };
40386 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40387 index fc83783c..cf370d7 100644
40388 --- a/drivers/pnp/pnpbios/bioscalls.c
40389 +++ b/drivers/pnp/pnpbios/bioscalls.c
40390 @@ -60,7 +60,7 @@ do { \
40391 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40392 } while(0)
40393
40394 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40395 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40396 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40397
40398 /*
40399 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40400
40401 cpu = get_cpu();
40402 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40403 +
40404 + pax_open_kernel();
40405 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40406 + pax_close_kernel();
40407
40408 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40409 spin_lock_irqsave(&pnp_bios_lock, flags);
40410 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40411 :"memory");
40412 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40413
40414 + pax_open_kernel();
40415 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40416 + pax_close_kernel();
40417 +
40418 put_cpu();
40419
40420 /* If we get here and this is set then the PnP BIOS faulted on us. */
40421 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40422 return status;
40423 }
40424
40425 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
40426 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40427 {
40428 int i;
40429
40430 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40431 pnp_bios_callpoint.offset = header->fields.pm16offset;
40432 pnp_bios_callpoint.segment = PNP_CS16;
40433
40434 + pax_open_kernel();
40435 +
40436 for_each_possible_cpu(i) {
40437 struct desc_struct *gdt = get_cpu_gdt_table(i);
40438 if (!gdt)
40439 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40440 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40441 (unsigned long)__va(header->fields.pm16dseg));
40442 }
40443 +
40444 + pax_close_kernel();
40445 }
40446 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40447 index ba97654..66b99d4 100644
40448 --- a/drivers/pnp/resource.c
40449 +++ b/drivers/pnp/resource.c
40450 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40451 return 1;
40452
40453 /* check if the resource is valid */
40454 - if (*irq < 0 || *irq > 15)
40455 + if (*irq > 15)
40456 return 0;
40457
40458 /* check if the resource is reserved */
40459 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40460 return 1;
40461
40462 /* check if the resource is valid */
40463 - if (*dma < 0 || *dma == 4 || *dma > 7)
40464 + if (*dma == 4 || *dma > 7)
40465 return 0;
40466
40467 /* check if the resource is reserved */
40468 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40469 index 62bb981..24a2dc9 100644
40470 --- a/drivers/power/bq27x00_battery.c
40471 +++ b/drivers/power/bq27x00_battery.c
40472 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
40473 struct bq27x00_access_methods {
40474 int (*read)(u8 reg, int *rt_value, int b_single,
40475 struct bq27x00_device_info *di);
40476 -};
40477 +} __no_const;
40478
40479 struct bq27x00_device_info {
40480 struct device *dev;
40481 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40482 index 62227cd..b5b538b 100644
40483 --- a/drivers/rtc/rtc-dev.c
40484 +++ b/drivers/rtc/rtc-dev.c
40485 @@ -14,6 +14,7 @@
40486 #include <linux/module.h>
40487 #include <linux/rtc.h>
40488 #include <linux/sched.h>
40489 +#include <linux/grsecurity.h>
40490 #include "rtc-core.h"
40491
40492 static dev_t rtc_devt;
40493 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40494 if (copy_from_user(&tm, uarg, sizeof(tm)))
40495 return -EFAULT;
40496
40497 + gr_log_timechange();
40498 +
40499 return rtc_set_time(rtc, &tm);
40500
40501 case RTC_PIE_ON:
40502 diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40503 index 968e3c7..fbc637a 100644
40504 --- a/drivers/s390/cio/qdio_perf.c
40505 +++ b/drivers/s390/cio/qdio_perf.c
40506 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40507 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40508 {
40509 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40510 - (long)atomic_long_read(&perf_stats.qdio_int));
40511 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40512 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40513 - (long)atomic_long_read(&perf_stats.pci_int));
40514 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40515 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40516 - (long)atomic_long_read(&perf_stats.thin_int));
40517 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40518 seq_printf(m, "\n");
40519 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40520 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
40521 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40522 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40523 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
40524 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40525 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40526 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
40527 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40528 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40529 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40530 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40531 - (long)atomic_long_read(&perf_stats.thinint_inbound),
40532 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40533 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40534 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40535 seq_printf(m, "\n");
40536 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40537 - (long)atomic_long_read(&perf_stats.siga_in));
40538 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40539 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40540 - (long)atomic_long_read(&perf_stats.siga_out));
40541 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40542 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40543 - (long)atomic_long_read(&perf_stats.siga_sync));
40544 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40545 seq_printf(m, "\n");
40546 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40547 - (long)atomic_long_read(&perf_stats.inbound_handler));
40548 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40549 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40550 - (long)atomic_long_read(&perf_stats.outbound_handler));
40551 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40552 seq_printf(m, "\n");
40553 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40554 - (long)atomic_long_read(&perf_stats.fast_requeue));
40555 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40556 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40557 - (long)atomic_long_read(&perf_stats.outbound_target_full));
40558 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40559 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40560 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40561 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40562 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40563 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
40564 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40565 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40566 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40567 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40568 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40569 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40570 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40571 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40572 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40573 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40574 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40575 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40576 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40577 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40578 seq_printf(m, "\n");
40579 return 0;
40580 }
40581 diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40582 index ff4504c..b3604c3 100644
40583 --- a/drivers/s390/cio/qdio_perf.h
40584 +++ b/drivers/s390/cio/qdio_perf.h
40585 @@ -13,46 +13,46 @@
40586
40587 struct qdio_perf_stats {
40588 /* interrupt handler calls */
40589 - atomic_long_t qdio_int;
40590 - atomic_long_t pci_int;
40591 - atomic_long_t thin_int;
40592 + atomic_long_unchecked_t qdio_int;
40593 + atomic_long_unchecked_t pci_int;
40594 + atomic_long_unchecked_t thin_int;
40595
40596 /* tasklet runs */
40597 - atomic_long_t tasklet_inbound;
40598 - atomic_long_t tasklet_outbound;
40599 - atomic_long_t tasklet_thinint;
40600 - atomic_long_t tasklet_thinint_loop;
40601 - atomic_long_t thinint_inbound;
40602 - atomic_long_t thinint_inbound_loop;
40603 - atomic_long_t thinint_inbound_loop2;
40604 + atomic_long_unchecked_t tasklet_inbound;
40605 + atomic_long_unchecked_t tasklet_outbound;
40606 + atomic_long_unchecked_t tasklet_thinint;
40607 + atomic_long_unchecked_t tasklet_thinint_loop;
40608 + atomic_long_unchecked_t thinint_inbound;
40609 + atomic_long_unchecked_t thinint_inbound_loop;
40610 + atomic_long_unchecked_t thinint_inbound_loop2;
40611
40612 /* signal adapter calls */
40613 - atomic_long_t siga_out;
40614 - atomic_long_t siga_in;
40615 - atomic_long_t siga_sync;
40616 + atomic_long_unchecked_t siga_out;
40617 + atomic_long_unchecked_t siga_in;
40618 + atomic_long_unchecked_t siga_sync;
40619
40620 /* misc */
40621 - atomic_long_t inbound_handler;
40622 - atomic_long_t outbound_handler;
40623 - atomic_long_t fast_requeue;
40624 - atomic_long_t outbound_target_full;
40625 + atomic_long_unchecked_t inbound_handler;
40626 + atomic_long_unchecked_t outbound_handler;
40627 + atomic_long_unchecked_t fast_requeue;
40628 + atomic_long_unchecked_t outbound_target_full;
40629
40630 /* for debugging */
40631 - atomic_long_t debug_tl_out_timer;
40632 - atomic_long_t debug_stop_polling;
40633 - atomic_long_t debug_eqbs_all;
40634 - atomic_long_t debug_eqbs_incomplete;
40635 - atomic_long_t debug_sqbs_all;
40636 - atomic_long_t debug_sqbs_incomplete;
40637 + atomic_long_unchecked_t debug_tl_out_timer;
40638 + atomic_long_unchecked_t debug_stop_polling;
40639 + atomic_long_unchecked_t debug_eqbs_all;
40640 + atomic_long_unchecked_t debug_eqbs_incomplete;
40641 + atomic_long_unchecked_t debug_sqbs_all;
40642 + atomic_long_unchecked_t debug_sqbs_incomplete;
40643 };
40644
40645 extern struct qdio_perf_stats perf_stats;
40646 extern int qdio_performance_stats;
40647
40648 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
40649 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40650 {
40651 if (qdio_performance_stats)
40652 - atomic_long_inc(count);
40653 + atomic_long_inc_unchecked(count);
40654 }
40655
40656 int qdio_setup_perf_stats(void);
40657 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40658 index 1ddcf40..a85f062 100644
40659 --- a/drivers/scsi/BusLogic.c
40660 +++ b/drivers/scsi/BusLogic.c
40661 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40662 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40663 *PrototypeHostAdapter)
40664 {
40665 + pax_track_stack();
40666 +
40667 /*
40668 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40669 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40670 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40671 index cdbdec9..b7d560b 100644
40672 --- a/drivers/scsi/aacraid/aacraid.h
40673 +++ b/drivers/scsi/aacraid/aacraid.h
40674 @@ -471,7 +471,7 @@ struct adapter_ops
40675 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40676 /* Administrative operations */
40677 int (*adapter_comm)(struct aac_dev * dev, int comm);
40678 -};
40679 +} __no_const;
40680
40681 /*
40682 * Define which interrupt handler needs to be installed
40683 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40684 index a5b8e7b..a6a0e43 100644
40685 --- a/drivers/scsi/aacraid/commctrl.c
40686 +++ b/drivers/scsi/aacraid/commctrl.c
40687 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40688 u32 actual_fibsize64, actual_fibsize = 0;
40689 int i;
40690
40691 + pax_track_stack();
40692
40693 if (dev->in_reset) {
40694 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40695 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40696 index 9b97c3e..f099725 100644
40697 --- a/drivers/scsi/aacraid/linit.c
40698 +++ b/drivers/scsi/aacraid/linit.c
40699 @@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40700 #elif defined(__devinitconst)
40701 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40702 #else
40703 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40704 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40705 #endif
40706 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40707 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40708 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40709 index 996f722..9127845 100644
40710 --- a/drivers/scsi/aic94xx/aic94xx_init.c
40711 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
40712 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40713 flash_error_table[i].reason);
40714 }
40715
40716 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40717 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40718 asd_show_update_bios, asd_store_update_bios);
40719
40720 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40721 @@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40722 .lldd_control_phy = asd_control_phy,
40723 };
40724
40725 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40726 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40727 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40728 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40729 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40730 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40731 index 58efd4b..cb48dc7 100644
40732 --- a/drivers/scsi/bfa/bfa_ioc.h
40733 +++ b/drivers/scsi/bfa/bfa_ioc.h
40734 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40735 bfa_ioc_disable_cbfn_t disable_cbfn;
40736 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40737 bfa_ioc_reset_cbfn_t reset_cbfn;
40738 -};
40739 +} __no_const;
40740
40741 /**
40742 * Heartbeat failure notification queue element.
40743 diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40744 index 7ad177e..5503586 100644
40745 --- a/drivers/scsi/bfa/bfa_iocfc.h
40746 +++ b/drivers/scsi/bfa/bfa_iocfc.h
40747 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
40748 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40749 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40750 u32 *nvecs, u32 *maxvec);
40751 -};
40752 +} __no_const;
40753 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40754
40755 struct bfa_iocfc_s {
40756 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40757 index 4967643..cbec06b 100644
40758 --- a/drivers/scsi/dpt_i2o.c
40759 +++ b/drivers/scsi/dpt_i2o.c
40760 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40761 dma_addr_t addr;
40762 ulong flags = 0;
40763
40764 + pax_track_stack();
40765 +
40766 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40767 // get user msg size in u32s
40768 if(get_user(size, &user_msg[0])){
40769 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40770 s32 rcode;
40771 dma_addr_t addr;
40772
40773 + pax_track_stack();
40774 +
40775 memset(msg, 0 , sizeof(msg));
40776 len = scsi_bufflen(cmd);
40777 direction = 0x00000000;
40778 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40779 index c7076ce..e20c67c 100644
40780 --- a/drivers/scsi/eata.c
40781 +++ b/drivers/scsi/eata.c
40782 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40783 struct hostdata *ha;
40784 char name[16];
40785
40786 + pax_track_stack();
40787 +
40788 sprintf(name, "%s%d", driver_name, j);
40789
40790 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40791 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40792 index 11ae5c9..891daec 100644
40793 --- a/drivers/scsi/fcoe/libfcoe.c
40794 +++ b/drivers/scsi/fcoe/libfcoe.c
40795 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40796 size_t rlen;
40797 size_t dlen;
40798
40799 + pax_track_stack();
40800 +
40801 fiph = (struct fip_header *)skb->data;
40802 sub = fiph->fip_subcode;
40803 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40804 diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40805 index 71c7bbe..e93088a 100644
40806 --- a/drivers/scsi/fnic/fnic_main.c
40807 +++ b/drivers/scsi/fnic/fnic_main.c
40808 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40809 /* Start local port initiatialization */
40810
40811 lp->link_up = 0;
40812 - lp->tt = fnic_transport_template;
40813 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40814
40815 lp->max_retry_count = fnic->config.flogi_retries;
40816 lp->max_rport_retry_count = fnic->config.plogi_retries;
40817 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40818 index bb96d74..9ec3ce4 100644
40819 --- a/drivers/scsi/gdth.c
40820 +++ b/drivers/scsi/gdth.c
40821 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40822 ulong flags;
40823 gdth_ha_str *ha;
40824
40825 + pax_track_stack();
40826 +
40827 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40828 return -EFAULT;
40829 ha = gdth_find_ha(ldrv.ionode);
40830 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40831 gdth_ha_str *ha;
40832 int rval;
40833
40834 + pax_track_stack();
40835 +
40836 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40837 res.number >= MAX_HDRIVES)
40838 return -EFAULT;
40839 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40840 gdth_ha_str *ha;
40841 int rval;
40842
40843 + pax_track_stack();
40844 +
40845 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40846 return -EFAULT;
40847 ha = gdth_find_ha(gen.ionode);
40848 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40849 int i;
40850 gdth_cmd_str gdtcmd;
40851 char cmnd[MAX_COMMAND_SIZE];
40852 +
40853 + pax_track_stack();
40854 +
40855 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40856
40857 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40858 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40859 index 1258da3..20d8ae6 100644
40860 --- a/drivers/scsi/gdth_proc.c
40861 +++ b/drivers/scsi/gdth_proc.c
40862 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
40863 ulong64 paddr;
40864
40865 char cmnd[MAX_COMMAND_SIZE];
40866 +
40867 + pax_track_stack();
40868 +
40869 memset(cmnd, 0xff, 12);
40870 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
40871
40872 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
40873 gdth_hget_str *phg;
40874 char cmnd[MAX_COMMAND_SIZE];
40875
40876 + pax_track_stack();
40877 +
40878 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
40879 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
40880 if (!gdtcmd || !estr)
40881 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40882 index d03a926..f324286 100644
40883 --- a/drivers/scsi/hosts.c
40884 +++ b/drivers/scsi/hosts.c
40885 @@ -40,7 +40,7 @@
40886 #include "scsi_logging.h"
40887
40888
40889 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
40890 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
40891
40892
40893 static void scsi_host_cls_release(struct device *dev)
40894 @@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40895 * subtract one because we increment first then return, but we need to
40896 * know what the next host number was before increment
40897 */
40898 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40899 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40900 shost->dma_channel = 0xff;
40901
40902 /* These three are default values which can be overridden */
40903 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
40904 index a601159..55e19d2 100644
40905 --- a/drivers/scsi/ipr.c
40906 +++ b/drivers/scsi/ipr.c
40907 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
40908 return true;
40909 }
40910
40911 -static struct ata_port_operations ipr_sata_ops = {
40912 +static const struct ata_port_operations ipr_sata_ops = {
40913 .phy_reset = ipr_ata_phy_reset,
40914 .hardreset = ipr_sata_reset,
40915 .post_internal_cmd = ipr_ata_post_internal,
40916 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
40917 index 4e49fbc..97907ff 100644
40918 --- a/drivers/scsi/ips.h
40919 +++ b/drivers/scsi/ips.h
40920 @@ -1027,7 +1027,7 @@ typedef struct {
40921 int (*intr)(struct ips_ha *);
40922 void (*enableint)(struct ips_ha *);
40923 uint32_t (*statupd)(struct ips_ha *);
40924 -} ips_hw_func_t;
40925 +} __no_const ips_hw_func_t;
40926
40927 typedef struct ips_ha {
40928 uint8_t ha_id[IPS_MAX_CHANNELS+1];
40929 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40930 index c1c1574..a9c9348 100644
40931 --- a/drivers/scsi/libfc/fc_exch.c
40932 +++ b/drivers/scsi/libfc/fc_exch.c
40933 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
40934 * all together if not used XXX
40935 */
40936 struct {
40937 - atomic_t no_free_exch;
40938 - atomic_t no_free_exch_xid;
40939 - atomic_t xid_not_found;
40940 - atomic_t xid_busy;
40941 - atomic_t seq_not_found;
40942 - atomic_t non_bls_resp;
40943 + atomic_unchecked_t no_free_exch;
40944 + atomic_unchecked_t no_free_exch_xid;
40945 + atomic_unchecked_t xid_not_found;
40946 + atomic_unchecked_t xid_busy;
40947 + atomic_unchecked_t seq_not_found;
40948 + atomic_unchecked_t non_bls_resp;
40949 } stats;
40950 };
40951 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
40952 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40953 /* allocate memory for exchange */
40954 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40955 if (!ep) {
40956 - atomic_inc(&mp->stats.no_free_exch);
40957 + atomic_inc_unchecked(&mp->stats.no_free_exch);
40958 goto out;
40959 }
40960 memset(ep, 0, sizeof(*ep));
40961 @@ -557,7 +557,7 @@ out:
40962 return ep;
40963 err:
40964 spin_unlock_bh(&pool->lock);
40965 - atomic_inc(&mp->stats.no_free_exch_xid);
40966 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40967 mempool_free(ep, mp->ep_pool);
40968 return NULL;
40969 }
40970 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40971 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40972 ep = fc_exch_find(mp, xid);
40973 if (!ep) {
40974 - atomic_inc(&mp->stats.xid_not_found);
40975 + atomic_inc_unchecked(&mp->stats.xid_not_found);
40976 reject = FC_RJT_OX_ID;
40977 goto out;
40978 }
40979 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40980 ep = fc_exch_find(mp, xid);
40981 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40982 if (ep) {
40983 - atomic_inc(&mp->stats.xid_busy);
40984 + atomic_inc_unchecked(&mp->stats.xid_busy);
40985 reject = FC_RJT_RX_ID;
40986 goto rel;
40987 }
40988 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40989 }
40990 xid = ep->xid; /* get our XID */
40991 } else if (!ep) {
40992 - atomic_inc(&mp->stats.xid_not_found);
40993 + atomic_inc_unchecked(&mp->stats.xid_not_found);
40994 reject = FC_RJT_RX_ID; /* XID not found */
40995 goto out;
40996 }
40997 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40998 } else {
40999 sp = &ep->seq;
41000 if (sp->id != fh->fh_seq_id) {
41001 - atomic_inc(&mp->stats.seq_not_found);
41002 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41003 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
41004 goto rel;
41005 }
41006 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41007
41008 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41009 if (!ep) {
41010 - atomic_inc(&mp->stats.xid_not_found);
41011 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41012 goto out;
41013 }
41014 if (ep->esb_stat & ESB_ST_COMPLETE) {
41015 - atomic_inc(&mp->stats.xid_not_found);
41016 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41017 goto out;
41018 }
41019 if (ep->rxid == FC_XID_UNKNOWN)
41020 ep->rxid = ntohs(fh->fh_rx_id);
41021 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41022 - atomic_inc(&mp->stats.xid_not_found);
41023 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41024 goto rel;
41025 }
41026 if (ep->did != ntoh24(fh->fh_s_id) &&
41027 ep->did != FC_FID_FLOGI) {
41028 - atomic_inc(&mp->stats.xid_not_found);
41029 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41030 goto rel;
41031 }
41032 sof = fr_sof(fp);
41033 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41034 } else {
41035 sp = &ep->seq;
41036 if (sp->id != fh->fh_seq_id) {
41037 - atomic_inc(&mp->stats.seq_not_found);
41038 + atomic_inc_unchecked(&mp->stats.seq_not_found);
41039 goto rel;
41040 }
41041 }
41042 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41043 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41044
41045 if (!sp)
41046 - atomic_inc(&mp->stats.xid_not_found);
41047 + atomic_inc_unchecked(&mp->stats.xid_not_found);
41048 else
41049 - atomic_inc(&mp->stats.non_bls_resp);
41050 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
41051
41052 fc_frame_free(fp);
41053 }
41054 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41055 index 0ee989f..a582241 100644
41056 --- a/drivers/scsi/libsas/sas_ata.c
41057 +++ b/drivers/scsi/libsas/sas_ata.c
41058 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
41059 }
41060 }
41061
41062 -static struct ata_port_operations sas_sata_ops = {
41063 +static const struct ata_port_operations sas_sata_ops = {
41064 .phy_reset = sas_ata_phy_reset,
41065 .post_internal_cmd = sas_ata_post_internal,
41066 .qc_defer = ata_std_qc_defer,
41067 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41068 index aa10f79..5cc79e4 100644
41069 --- a/drivers/scsi/lpfc/lpfc.h
41070 +++ b/drivers/scsi/lpfc/lpfc.h
41071 @@ -400,7 +400,7 @@ struct lpfc_vport {
41072 struct dentry *debug_nodelist;
41073 struct dentry *vport_debugfs_root;
41074 struct lpfc_debugfs_trc *disc_trc;
41075 - atomic_t disc_trc_cnt;
41076 + atomic_unchecked_t disc_trc_cnt;
41077 #endif
41078 uint8_t stat_data_enabled;
41079 uint8_t stat_data_blocked;
41080 @@ -725,8 +725,8 @@ struct lpfc_hba {
41081 struct timer_list fabric_block_timer;
41082 unsigned long bit_flags;
41083 #define FABRIC_COMANDS_BLOCKED 0
41084 - atomic_t num_rsrc_err;
41085 - atomic_t num_cmd_success;
41086 + atomic_unchecked_t num_rsrc_err;
41087 + atomic_unchecked_t num_cmd_success;
41088 unsigned long last_rsrc_error_time;
41089 unsigned long last_ramp_down_time;
41090 unsigned long last_ramp_up_time;
41091 @@ -740,7 +740,7 @@ struct lpfc_hba {
41092 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
41093 struct dentry *debug_slow_ring_trc;
41094 struct lpfc_debugfs_trc *slow_ring_trc;
41095 - atomic_t slow_ring_trc_cnt;
41096 + atomic_unchecked_t slow_ring_trc_cnt;
41097 #endif
41098
41099 /* Used for deferred freeing of ELS data buffers */
41100 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41101 index 8d0f0de..7c77a62 100644
41102 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
41103 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41104 @@ -124,7 +124,7 @@ struct lpfc_debug {
41105 int len;
41106 };
41107
41108 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41109 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41110 static unsigned long lpfc_debugfs_start_time = 0L;
41111
41112 /**
41113 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41114 lpfc_debugfs_enable = 0;
41115
41116 len = 0;
41117 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41118 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41119 (lpfc_debugfs_max_disc_trc - 1);
41120 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41121 dtp = vport->disc_trc + i;
41122 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41123 lpfc_debugfs_enable = 0;
41124
41125 len = 0;
41126 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41127 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41128 (lpfc_debugfs_max_slow_ring_trc - 1);
41129 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41130 dtp = phba->slow_ring_trc + i;
41131 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41132 uint32_t *ptr;
41133 char buffer[1024];
41134
41135 + pax_track_stack();
41136 +
41137 off = 0;
41138 spin_lock_irq(&phba->hbalock);
41139
41140 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41141 !vport || !vport->disc_trc)
41142 return;
41143
41144 - index = atomic_inc_return(&vport->disc_trc_cnt) &
41145 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41146 (lpfc_debugfs_max_disc_trc - 1);
41147 dtp = vport->disc_trc + index;
41148 dtp->fmt = fmt;
41149 dtp->data1 = data1;
41150 dtp->data2 = data2;
41151 dtp->data3 = data3;
41152 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41153 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41154 dtp->jif = jiffies;
41155 #endif
41156 return;
41157 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41158 !phba || !phba->slow_ring_trc)
41159 return;
41160
41161 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41162 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41163 (lpfc_debugfs_max_slow_ring_trc - 1);
41164 dtp = phba->slow_ring_trc + index;
41165 dtp->fmt = fmt;
41166 dtp->data1 = data1;
41167 dtp->data2 = data2;
41168 dtp->data3 = data3;
41169 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41170 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41171 dtp->jif = jiffies;
41172 #endif
41173 return;
41174 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41175 "slow_ring buffer\n");
41176 goto debug_failed;
41177 }
41178 - atomic_set(&phba->slow_ring_trc_cnt, 0);
41179 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41180 memset(phba->slow_ring_trc, 0,
41181 (sizeof(struct lpfc_debugfs_trc) *
41182 lpfc_debugfs_max_slow_ring_trc));
41183 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41184 "buffer\n");
41185 goto debug_failed;
41186 }
41187 - atomic_set(&vport->disc_trc_cnt, 0);
41188 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41189
41190 snprintf(name, sizeof(name), "discovery_trace");
41191 vport->debug_disc_trc =
41192 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41193 index 549bc7d..8189dbb 100644
41194 --- a/drivers/scsi/lpfc/lpfc_init.c
41195 +++ b/drivers/scsi/lpfc/lpfc_init.c
41196 @@ -8021,8 +8021,10 @@ lpfc_init(void)
41197 printk(LPFC_COPYRIGHT "\n");
41198
41199 if (lpfc_enable_npiv) {
41200 - lpfc_transport_functions.vport_create = lpfc_vport_create;
41201 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41202 + pax_open_kernel();
41203 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41204 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41205 + pax_close_kernel();
41206 }
41207 lpfc_transport_template =
41208 fc_attach_transport(&lpfc_transport_functions);
41209 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41210 index c88f59f..ff2a42f 100644
41211 --- a/drivers/scsi/lpfc/lpfc_scsi.c
41212 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
41213 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41214 uint32_t evt_posted;
41215
41216 spin_lock_irqsave(&phba->hbalock, flags);
41217 - atomic_inc(&phba->num_rsrc_err);
41218 + atomic_inc_unchecked(&phba->num_rsrc_err);
41219 phba->last_rsrc_error_time = jiffies;
41220
41221 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41222 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41223 unsigned long flags;
41224 struct lpfc_hba *phba = vport->phba;
41225 uint32_t evt_posted;
41226 - atomic_inc(&phba->num_cmd_success);
41227 + atomic_inc_unchecked(&phba->num_cmd_success);
41228
41229 if (vport->cfg_lun_queue_depth <= queue_depth)
41230 return;
41231 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41232 int i;
41233 struct lpfc_rport_data *rdata;
41234
41235 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41236 - num_cmd_success = atomic_read(&phba->num_cmd_success);
41237 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41238 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41239
41240 vports = lpfc_create_vport_work_array(phba);
41241 if (vports != NULL)
41242 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41243 }
41244 }
41245 lpfc_destroy_vport_work_array(phba, vports);
41246 - atomic_set(&phba->num_rsrc_err, 0);
41247 - atomic_set(&phba->num_cmd_success, 0);
41248 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41249 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41250 }
41251
41252 /**
41253 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41254 }
41255 }
41256 lpfc_destroy_vport_work_array(phba, vports);
41257 - atomic_set(&phba->num_rsrc_err, 0);
41258 - atomic_set(&phba->num_cmd_success, 0);
41259 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
41260 + atomic_set_unchecked(&phba->num_cmd_success, 0);
41261 }
41262
41263 /**
41264 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41265 index 234f0b7..3020aea 100644
41266 --- a/drivers/scsi/megaraid/megaraid_mbox.c
41267 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
41268 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41269 int rval;
41270 int i;
41271
41272 + pax_track_stack();
41273 +
41274 // Allocate memory for the base list of scb for management module.
41275 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41276
41277 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41278 index 7a117c1..ee01e9e 100644
41279 --- a/drivers/scsi/osd/osd_initiator.c
41280 +++ b/drivers/scsi/osd/osd_initiator.c
41281 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41282 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41283 int ret;
41284
41285 + pax_track_stack();
41286 +
41287 or = osd_start_request(od, GFP_KERNEL);
41288 if (!or)
41289 return -ENOMEM;
41290 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41291 index 9ab8c86..9425ad3 100644
41292 --- a/drivers/scsi/pmcraid.c
41293 +++ b/drivers/scsi/pmcraid.c
41294 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41295 res->scsi_dev = scsi_dev;
41296 scsi_dev->hostdata = res;
41297 res->change_detected = 0;
41298 - atomic_set(&res->read_failures, 0);
41299 - atomic_set(&res->write_failures, 0);
41300 + atomic_set_unchecked(&res->read_failures, 0);
41301 + atomic_set_unchecked(&res->write_failures, 0);
41302 rc = 0;
41303 }
41304 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41305 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41306
41307 /* If this was a SCSI read/write command keep count of errors */
41308 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41309 - atomic_inc(&res->read_failures);
41310 + atomic_inc_unchecked(&res->read_failures);
41311 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41312 - atomic_inc(&res->write_failures);
41313 + atomic_inc_unchecked(&res->write_failures);
41314
41315 if (!RES_IS_GSCSI(res->cfg_entry) &&
41316 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41317 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41318
41319 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41320 /* add resources only after host is added into system */
41321 - if (!atomic_read(&pinstance->expose_resources))
41322 + if (!atomic_read_unchecked(&pinstance->expose_resources))
41323 return;
41324
41325 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41326 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41327 init_waitqueue_head(&pinstance->reset_wait_q);
41328
41329 atomic_set(&pinstance->outstanding_cmds, 0);
41330 - atomic_set(&pinstance->expose_resources, 0);
41331 + atomic_set_unchecked(&pinstance->expose_resources, 0);
41332
41333 INIT_LIST_HEAD(&pinstance->free_res_q);
41334 INIT_LIST_HEAD(&pinstance->used_res_q);
41335 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41336 /* Schedule worker thread to handle CCN and take care of adding and
41337 * removing devices to OS
41338 */
41339 - atomic_set(&pinstance->expose_resources, 1);
41340 + atomic_set_unchecked(&pinstance->expose_resources, 1);
41341 schedule_work(&pinstance->worker_q);
41342 return rc;
41343
41344 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41345 index 3441b3f..6cbe8f7 100644
41346 --- a/drivers/scsi/pmcraid.h
41347 +++ b/drivers/scsi/pmcraid.h
41348 @@ -690,7 +690,7 @@ struct pmcraid_instance {
41349 atomic_t outstanding_cmds;
41350
41351 /* should add/delete resources to mid-layer now ?*/
41352 - atomic_t expose_resources;
41353 + atomic_unchecked_t expose_resources;
41354
41355 /* Tasklet to handle deferred processing */
41356 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41357 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41358 struct list_head queue; /* link to "to be exposed" resources */
41359 struct pmcraid_config_table_entry cfg_entry;
41360 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41361 - atomic_t read_failures; /* count of failed READ commands */
41362 - atomic_t write_failures; /* count of failed WRITE commands */
41363 + atomic_unchecked_t read_failures; /* count of failed READ commands */
41364 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41365
41366 /* To indicate add/delete/modify during CCN */
41367 u8 change_detected;
41368 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41369 index 2150618..7034215 100644
41370 --- a/drivers/scsi/qla2xxx/qla_def.h
41371 +++ b/drivers/scsi/qla2xxx/qla_def.h
41372 @@ -2089,7 +2089,7 @@ struct isp_operations {
41373
41374 int (*get_flash_version) (struct scsi_qla_host *, void *);
41375 int (*start_scsi) (srb_t *);
41376 -};
41377 +} __no_const;
41378
41379 /* MSI-X Support *************************************************************/
41380
41381 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41382 index 81b5f29..2ae1fad 100644
41383 --- a/drivers/scsi/qla4xxx/ql4_def.h
41384 +++ b/drivers/scsi/qla4xxx/ql4_def.h
41385 @@ -240,7 +240,7 @@ struct ddb_entry {
41386 atomic_t retry_relogin_timer; /* Min Time between relogins
41387 * (4000 only) */
41388 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41389 - atomic_t relogin_retry_count; /* Num of times relogin has been
41390 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41391 * retried */
41392
41393 uint16_t port;
41394 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41395 index af8c323..515dd51 100644
41396 --- a/drivers/scsi/qla4xxx/ql4_init.c
41397 +++ b/drivers/scsi/qla4xxx/ql4_init.c
41398 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41399 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41400 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41401 atomic_set(&ddb_entry->relogin_timer, 0);
41402 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41403 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41404 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41405 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41406 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41407 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41408 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41409 atomic_set(&ddb_entry->port_down_timer,
41410 ha->port_down_retry_count);
41411 - atomic_set(&ddb_entry->relogin_retry_count, 0);
41412 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41413 atomic_set(&ddb_entry->relogin_timer, 0);
41414 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41415 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41416 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41417 index 83c8b5e..a82b348 100644
41418 --- a/drivers/scsi/qla4xxx/ql4_os.c
41419 +++ b/drivers/scsi/qla4xxx/ql4_os.c
41420 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41421 ddb_entry->fw_ddb_device_state ==
41422 DDB_DS_SESSION_FAILED) {
41423 /* Reset retry relogin timer */
41424 - atomic_inc(&ddb_entry->relogin_retry_count);
41425 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41426 DEBUG2(printk("scsi%ld: index[%d] relogin"
41427 " timed out-retrying"
41428 " relogin (%d)\n",
41429 ha->host_no,
41430 ddb_entry->fw_ddb_index,
41431 - atomic_read(&ddb_entry->
41432 + atomic_read_unchecked(&ddb_entry->
41433 relogin_retry_count))
41434 );
41435 start_dpc++;
41436 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41437 index dd098ca..686ce01 100644
41438 --- a/drivers/scsi/scsi.c
41439 +++ b/drivers/scsi/scsi.c
41440 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41441 unsigned long timeout;
41442 int rtn = 0;
41443
41444 - atomic_inc(&cmd->device->iorequest_cnt);
41445 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41446
41447 /* check if the device is still usable */
41448 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41449 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41450 index bc3e363..e1a8e50 100644
41451 --- a/drivers/scsi/scsi_debug.c
41452 +++ b/drivers/scsi/scsi_debug.c
41453 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41454 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41455 unsigned char *cmd = (unsigned char *)scp->cmnd;
41456
41457 + pax_track_stack();
41458 +
41459 if ((errsts = check_readiness(scp, 1, devip)))
41460 return errsts;
41461 memset(arr, 0, sizeof(arr));
41462 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41463 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41464 unsigned char *cmd = (unsigned char *)scp->cmnd;
41465
41466 + pax_track_stack();
41467 +
41468 if ((errsts = check_readiness(scp, 1, devip)))
41469 return errsts;
41470 memset(arr, 0, sizeof(arr));
41471 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41472 index 8df12522..c4c1472 100644
41473 --- a/drivers/scsi/scsi_lib.c
41474 +++ b/drivers/scsi/scsi_lib.c
41475 @@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41476 shost = sdev->host;
41477 scsi_init_cmd_errh(cmd);
41478 cmd->result = DID_NO_CONNECT << 16;
41479 - atomic_inc(&cmd->device->iorequest_cnt);
41480 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41481
41482 /*
41483 * SCSI request completion path will do scsi_device_unbusy(),
41484 @@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41485 */
41486 cmd->serial_number = 0;
41487
41488 - atomic_inc(&cmd->device->iodone_cnt);
41489 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
41490 if (cmd->result)
41491 - atomic_inc(&cmd->device->ioerr_cnt);
41492 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41493
41494 disposition = scsi_decide_disposition(cmd);
41495 if (disposition != SUCCESS &&
41496 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41497 index 91a93e0..eae0fe3 100644
41498 --- a/drivers/scsi/scsi_sysfs.c
41499 +++ b/drivers/scsi/scsi_sysfs.c
41500 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41501 char *buf) \
41502 { \
41503 struct scsi_device *sdev = to_scsi_device(dev); \
41504 - unsigned long long count = atomic_read(&sdev->field); \
41505 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
41506 return snprintf(buf, 20, "0x%llx\n", count); \
41507 } \
41508 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41509 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41510 index 1030327..f91fd30 100644
41511 --- a/drivers/scsi/scsi_tgt_lib.c
41512 +++ b/drivers/scsi/scsi_tgt_lib.c
41513 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41514 int err;
41515
41516 dprintk("%lx %u\n", uaddr, len);
41517 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41518 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41519 if (err) {
41520 /*
41521 * TODO: need to fixup sg_tablesize, max_segment_size,
41522 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41523 index db02e31..1b42ea9 100644
41524 --- a/drivers/scsi/scsi_transport_fc.c
41525 +++ b/drivers/scsi/scsi_transport_fc.c
41526 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41527 * Netlink Infrastructure
41528 */
41529
41530 -static atomic_t fc_event_seq;
41531 +static atomic_unchecked_t fc_event_seq;
41532
41533 /**
41534 * fc_get_event_number - Obtain the next sequential FC event number
41535 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41536 u32
41537 fc_get_event_number(void)
41538 {
41539 - return atomic_add_return(1, &fc_event_seq);
41540 + return atomic_add_return_unchecked(1, &fc_event_seq);
41541 }
41542 EXPORT_SYMBOL(fc_get_event_number);
41543
41544 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41545 {
41546 int error;
41547
41548 - atomic_set(&fc_event_seq, 0);
41549 + atomic_set_unchecked(&fc_event_seq, 0);
41550
41551 error = transport_class_register(&fc_host_class);
41552 if (error)
41553 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41554 index de2f8c4..63c5278 100644
41555 --- a/drivers/scsi/scsi_transport_iscsi.c
41556 +++ b/drivers/scsi/scsi_transport_iscsi.c
41557 @@ -81,7 +81,7 @@ struct iscsi_internal {
41558 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41559 };
41560
41561 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41562 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41563 static struct workqueue_struct *iscsi_eh_timer_workq;
41564
41565 /*
41566 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41567 int err;
41568
41569 ihost = shost->shost_data;
41570 - session->sid = atomic_add_return(1, &iscsi_session_nr);
41571 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41572
41573 if (id == ISCSI_MAX_TARGET) {
41574 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41575 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41576 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41577 ISCSI_TRANSPORT_VERSION);
41578
41579 - atomic_set(&iscsi_session_nr, 0);
41580 + atomic_set_unchecked(&iscsi_session_nr, 0);
41581
41582 err = class_register(&iscsi_transport_class);
41583 if (err)
41584 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41585 index 21a045e..ec89e03 100644
41586 --- a/drivers/scsi/scsi_transport_srp.c
41587 +++ b/drivers/scsi/scsi_transport_srp.c
41588 @@ -33,7 +33,7 @@
41589 #include "scsi_transport_srp_internal.h"
41590
41591 struct srp_host_attrs {
41592 - atomic_t next_port_id;
41593 + atomic_unchecked_t next_port_id;
41594 };
41595 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41596
41597 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41598 struct Scsi_Host *shost = dev_to_shost(dev);
41599 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41600
41601 - atomic_set(&srp_host->next_port_id, 0);
41602 + atomic_set_unchecked(&srp_host->next_port_id, 0);
41603 return 0;
41604 }
41605
41606 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41607 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41608 rport->roles = ids->roles;
41609
41610 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41611 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41612 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41613
41614 transport_setup_device(&rport->dev);
41615 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
41616 index 568d363..fd8145f 100644
41617 --- a/drivers/scsi/sd.c
41618 +++ b/drivers/scsi/sd.c
41619 @@ -817,6 +817,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
41620 SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n",
41621 disk->disk_name, cmd));
41622
41623 + error = scsi_verify_blk_ioctl(bdev, cmd);
41624 + if (error < 0)
41625 + return error;
41626 +
41627 /*
41628 * If we are in the middle of error recovery, don't let anyone
41629 * else try and use this device. Also, if error recovery fails, it
41630 @@ -838,7 +842,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
41631 case SCSI_IOCTL_GET_BUS_NUMBER:
41632 return scsi_ioctl(sdp, cmd, p);
41633 default:
41634 - error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
41635 + error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
41636 if (error != -ENOTTY)
41637 return error;
41638 }
41639 @@ -996,6 +1000,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
41640 unsigned int cmd, unsigned long arg)
41641 {
41642 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
41643 + int ret;
41644 +
41645 + ret = scsi_verify_blk_ioctl(bdev, cmd);
41646 + if (ret < 0)
41647 + return ret;
41648
41649 /*
41650 * If we are in the middle of error recovery, don't let anyone
41651 @@ -1007,8 +1016,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
41652 return -ENODEV;
41653
41654 if (sdev->host->hostt->compat_ioctl) {
41655 - int ret;
41656 -
41657 ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
41658
41659 return ret;
41660 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41661 index 040f751..98a5ed2 100644
41662 --- a/drivers/scsi/sg.c
41663 +++ b/drivers/scsi/sg.c
41664 @@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41665 sdp->disk->disk_name,
41666 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41667 NULL,
41668 - (char *)arg);
41669 + (char __user *)arg);
41670 case BLKTRACESTART:
41671 return blk_trace_startstop(sdp->device->request_queue, 1);
41672 case BLKTRACESTOP:
41673 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41674 const struct file_operations * fops;
41675 };
41676
41677 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41678 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41679 {"allow_dio", &adio_fops},
41680 {"debug", &debug_fops},
41681 {"def_reserved_size", &dressz_fops},
41682 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
41683 {
41684 int k, mask;
41685 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41686 - struct sg_proc_leaf * leaf;
41687 + const struct sg_proc_leaf * leaf;
41688
41689 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41690 if (!sg_proc_sgp)
41691 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41692 index 45374d6..61ee484 100644
41693 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41694 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41695 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41696 int do_iounmap = 0;
41697 int do_disable_device = 1;
41698
41699 + pax_track_stack();
41700 +
41701 memset(&sym_dev, 0, sizeof(sym_dev));
41702 memset(&nvram, 0, sizeof(nvram));
41703 sym_dev.pdev = pdev;
41704 diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41705 index eadc1ab..2d81457 100644
41706 --- a/drivers/serial/kgdboc.c
41707 +++ b/drivers/serial/kgdboc.c
41708 @@ -18,7 +18,7 @@
41709
41710 #define MAX_CONFIG_LEN 40
41711
41712 -static struct kgdb_io kgdboc_io_ops;
41713 +static const struct kgdb_io kgdboc_io_ops;
41714
41715 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41716 static int configured = -1;
41717 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41718 module_put(THIS_MODULE);
41719 }
41720
41721 -static struct kgdb_io kgdboc_io_ops = {
41722 +static const struct kgdb_io kgdboc_io_ops = {
41723 .name = "kgdboc",
41724 .read_char = kgdboc_get_char,
41725 .write_char = kgdboc_put_char,
41726 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41727 index b76f246..7f41af7 100644
41728 --- a/drivers/spi/spi.c
41729 +++ b/drivers/spi/spi.c
41730 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41731 EXPORT_SYMBOL_GPL(spi_sync);
41732
41733 /* portable code must never pass more than 32 bytes */
41734 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41735 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41736
41737 static u8 *buf;
41738
41739 diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41740 index 99010d4..6bad87b 100644
41741 --- a/drivers/staging/android/binder.c
41742 +++ b/drivers/staging/android/binder.c
41743 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41744 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41745 }
41746
41747 -static struct vm_operations_struct binder_vm_ops = {
41748 +static const struct vm_operations_struct binder_vm_ops = {
41749 .open = binder_vma_open,
41750 .close = binder_vma_close,
41751 };
41752 diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41753 index cda26bb..39fed3f 100644
41754 --- a/drivers/staging/b3dfg/b3dfg.c
41755 +++ b/drivers/staging/b3dfg/b3dfg.c
41756 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41757 return VM_FAULT_NOPAGE;
41758 }
41759
41760 -static struct vm_operations_struct b3dfg_vm_ops = {
41761 +static const struct vm_operations_struct b3dfg_vm_ops = {
41762 .fault = b3dfg_vma_fault,
41763 };
41764
41765 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41766 return r;
41767 }
41768
41769 -static struct file_operations b3dfg_fops = {
41770 +static const struct file_operations b3dfg_fops = {
41771 .owner = THIS_MODULE,
41772 .open = b3dfg_open,
41773 .release = b3dfg_release,
41774 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41775 index 908f25a..c9a579b 100644
41776 --- a/drivers/staging/comedi/comedi_fops.c
41777 +++ b/drivers/staging/comedi/comedi_fops.c
41778 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41779 mutex_unlock(&dev->mutex);
41780 }
41781
41782 -static struct vm_operations_struct comedi_vm_ops = {
41783 +static const struct vm_operations_struct comedi_vm_ops = {
41784 .close = comedi_unmap,
41785 };
41786
41787 diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41788 index e55a0db..577b776 100644
41789 --- a/drivers/staging/dream/qdsp5/adsp_driver.c
41790 +++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41791 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41792 static dev_t adsp_devno;
41793 static struct class *adsp_class;
41794
41795 -static struct file_operations adsp_fops = {
41796 +static const struct file_operations adsp_fops = {
41797 .owner = THIS_MODULE,
41798 .open = adsp_open,
41799 .unlocked_ioctl = adsp_ioctl,
41800 diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41801 index ad2390f..4116ee8 100644
41802 --- a/drivers/staging/dream/qdsp5/audio_aac.c
41803 +++ b/drivers/staging/dream/qdsp5/audio_aac.c
41804 @@ -1022,7 +1022,7 @@ done:
41805 return rc;
41806 }
41807
41808 -static struct file_operations audio_aac_fops = {
41809 +static const struct file_operations audio_aac_fops = {
41810 .owner = THIS_MODULE,
41811 .open = audio_open,
41812 .release = audio_release,
41813 diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41814 index cd818a5..870b37b 100644
41815 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41816 +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41817 @@ -833,7 +833,7 @@ done:
41818 return rc;
41819 }
41820
41821 -static struct file_operations audio_amrnb_fops = {
41822 +static const struct file_operations audio_amrnb_fops = {
41823 .owner = THIS_MODULE,
41824 .open = audamrnb_open,
41825 .release = audamrnb_release,
41826 diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41827 index 4b43e18..cedafda 100644
41828 --- a/drivers/staging/dream/qdsp5/audio_evrc.c
41829 +++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41830 @@ -805,7 +805,7 @@ dma_fail:
41831 return rc;
41832 }
41833
41834 -static struct file_operations audio_evrc_fops = {
41835 +static const struct file_operations audio_evrc_fops = {
41836 .owner = THIS_MODULE,
41837 .open = audevrc_open,
41838 .release = audevrc_release,
41839 diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41840 index 3d950a2..9431118 100644
41841 --- a/drivers/staging/dream/qdsp5/audio_in.c
41842 +++ b/drivers/staging/dream/qdsp5/audio_in.c
41843 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41844 return 0;
41845 }
41846
41847 -static struct file_operations audio_fops = {
41848 +static const struct file_operations audio_fops = {
41849 .owner = THIS_MODULE,
41850 .open = audio_in_open,
41851 .release = audio_in_release,
41852 @@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41853 .unlocked_ioctl = audio_in_ioctl,
41854 };
41855
41856 -static struct file_operations audpre_fops = {
41857 +static const struct file_operations audpre_fops = {
41858 .owner = THIS_MODULE,
41859 .open = audpre_open,
41860 .unlocked_ioctl = audpre_ioctl,
41861 diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41862 index b95574f..286c2f4 100644
41863 --- a/drivers/staging/dream/qdsp5/audio_mp3.c
41864 +++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41865 @@ -941,7 +941,7 @@ done:
41866 return rc;
41867 }
41868
41869 -static struct file_operations audio_mp3_fops = {
41870 +static const struct file_operations audio_mp3_fops = {
41871 .owner = THIS_MODULE,
41872 .open = audio_open,
41873 .release = audio_release,
41874 diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41875 index d1adcf6..f8f9833 100644
41876 --- a/drivers/staging/dream/qdsp5/audio_out.c
41877 +++ b/drivers/staging/dream/qdsp5/audio_out.c
41878 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41879 return 0;
41880 }
41881
41882 -static struct file_operations audio_fops = {
41883 +static const struct file_operations audio_fops = {
41884 .owner = THIS_MODULE,
41885 .open = audio_open,
41886 .release = audio_release,
41887 @@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41888 .unlocked_ioctl = audio_ioctl,
41889 };
41890
41891 -static struct file_operations audpp_fops = {
41892 +static const struct file_operations audpp_fops = {
41893 .owner = THIS_MODULE,
41894 .open = audpp_open,
41895 .unlocked_ioctl = audpp_ioctl,
41896 diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41897 index f0f50e3..f6b9dbc 100644
41898 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41899 +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41900 @@ -816,7 +816,7 @@ err:
41901 return rc;
41902 }
41903
41904 -static struct file_operations audio_qcelp_fops = {
41905 +static const struct file_operations audio_qcelp_fops = {
41906 .owner = THIS_MODULE,
41907 .open = audqcelp_open,
41908 .release = audqcelp_release,
41909 diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
41910 index 037d7ff..5469ec3 100644
41911 --- a/drivers/staging/dream/qdsp5/snd.c
41912 +++ b/drivers/staging/dream/qdsp5/snd.c
41913 @@ -242,7 +242,7 @@ err:
41914 return rc;
41915 }
41916
41917 -static struct file_operations snd_fops = {
41918 +static const struct file_operations snd_fops = {
41919 .owner = THIS_MODULE,
41920 .open = snd_open,
41921 .release = snd_release,
41922 diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
41923 index d4e7d88..0ea632a 100644
41924 --- a/drivers/staging/dream/smd/smd_qmi.c
41925 +++ b/drivers/staging/dream/smd/smd_qmi.c
41926 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
41927 return 0;
41928 }
41929
41930 -static struct file_operations qmi_fops = {
41931 +static const struct file_operations qmi_fops = {
41932 .owner = THIS_MODULE,
41933 .read = qmi_read,
41934 .write = qmi_write,
41935 diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41936 index cd3910b..ff053d3 100644
41937 --- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
41938 +++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41939 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
41940 return rc;
41941 }
41942
41943 -static struct file_operations rpcrouter_server_fops = {
41944 +static const struct file_operations rpcrouter_server_fops = {
41945 .owner = THIS_MODULE,
41946 .open = rpcrouter_open,
41947 .release = rpcrouter_release,
41948 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
41949 .unlocked_ioctl = rpcrouter_ioctl,
41950 };
41951
41952 -static struct file_operations rpcrouter_router_fops = {
41953 +static const struct file_operations rpcrouter_router_fops = {
41954 .owner = THIS_MODULE,
41955 .open = rpcrouter_open,
41956 .release = rpcrouter_release,
41957 diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
41958 index c24e4e0..07665be 100644
41959 --- a/drivers/staging/dst/dcore.c
41960 +++ b/drivers/staging/dst/dcore.c
41961 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
41962 return 0;
41963 }
41964
41965 -static struct block_device_operations dst_blk_ops = {
41966 +static const struct block_device_operations dst_blk_ops = {
41967 .open = dst_bdev_open,
41968 .release = dst_bdev_release,
41969 .owner = THIS_MODULE,
41970 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
41971 n->size = ctl->size;
41972
41973 atomic_set(&n->refcnt, 1);
41974 - atomic_long_set(&n->gen, 0);
41975 + atomic_long_set_unchecked(&n->gen, 0);
41976 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
41977
41978 err = dst_node_sysfs_init(n);
41979 diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
41980 index 557d372..8d84422 100644
41981 --- a/drivers/staging/dst/trans.c
41982 +++ b/drivers/staging/dst/trans.c
41983 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
41984 t->error = 0;
41985 t->retries = 0;
41986 atomic_set(&t->refcnt, 1);
41987 - t->gen = atomic_long_inc_return(&n->gen);
41988 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
41989
41990 t->enc = bio_data_dir(bio);
41991 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
41992 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
41993 index 94f7752..d051514 100644
41994 --- a/drivers/staging/et131x/et1310_tx.c
41995 +++ b/drivers/staging/et131x/et1310_tx.c
41996 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
41997 struct net_device_stats *stats = &etdev->net_stats;
41998
41999 if (pMpTcb->Flags & fMP_DEST_BROAD)
42000 - atomic_inc(&etdev->Stats.brdcstxmt);
42001 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
42002 else if (pMpTcb->Flags & fMP_DEST_MULTI)
42003 - atomic_inc(&etdev->Stats.multixmt);
42004 + atomic_inc_unchecked(&etdev->Stats.multixmt);
42005 else
42006 - atomic_inc(&etdev->Stats.unixmt);
42007 + atomic_inc_unchecked(&etdev->Stats.unixmt);
42008
42009 if (pMpTcb->Packet) {
42010 stats->tx_bytes += pMpTcb->Packet->len;
42011 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
42012 index 1dfe06f..f469b4d 100644
42013 --- a/drivers/staging/et131x/et131x_adapter.h
42014 +++ b/drivers/staging/et131x/et131x_adapter.h
42015 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
42016 * operations
42017 */
42018 u32 unircv; /* # multicast packets received */
42019 - atomic_t unixmt; /* # multicast packets for Tx */
42020 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
42021 u32 multircv; /* # multicast packets received */
42022 - atomic_t multixmt; /* # multicast packets for Tx */
42023 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
42024 u32 brdcstrcv; /* # broadcast packets received */
42025 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
42026 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
42027 u32 norcvbuf; /* # Rx packets discarded */
42028 u32 noxmtbuf; /* # Tx packets discarded */
42029
42030 diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
42031 index 4bd353a..e28f455 100644
42032 --- a/drivers/staging/go7007/go7007-v4l2.c
42033 +++ b/drivers/staging/go7007/go7007-v4l2.c
42034 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42035 return 0;
42036 }
42037
42038 -static struct vm_operations_struct go7007_vm_ops = {
42039 +static const struct vm_operations_struct go7007_vm_ops = {
42040 .open = go7007_vm_open,
42041 .close = go7007_vm_close,
42042 .fault = go7007_vm_fault,
42043 diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
42044 index 366dc95..b974d87 100644
42045 --- a/drivers/staging/hv/Channel.c
42046 +++ b/drivers/staging/hv/Channel.c
42047 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
42048
42049 DPRINT_ENTER(VMBUS);
42050
42051 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
42052 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
42053 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
42054 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
42055
42056 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
42057 ASSERT(msgInfo != NULL);
42058 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
42059 index b12237f..01ae28a 100644
42060 --- a/drivers/staging/hv/Hv.c
42061 +++ b/drivers/staging/hv/Hv.c
42062 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
42063 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
42064 u32 outputAddressHi = outputAddress >> 32;
42065 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
42066 - volatile void *hypercallPage = gHvContext.HypercallPage;
42067 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
42068
42069 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
42070 Control, Input, Output);
42071 diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
42072 index d089bb1..2ebc158 100644
42073 --- a/drivers/staging/hv/VmbusApi.h
42074 +++ b/drivers/staging/hv/VmbusApi.h
42075 @@ -109,7 +109,7 @@ struct vmbus_channel_interface {
42076 u32 *GpadlHandle);
42077 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
42078 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
42079 -};
42080 +} __no_const;
42081
42082 /* Base driver object */
42083 struct hv_driver {
42084 diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
42085 index 5a37cce..6ecc88c 100644
42086 --- a/drivers/staging/hv/VmbusPrivate.h
42087 +++ b/drivers/staging/hv/VmbusPrivate.h
42088 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
42089 struct VMBUS_CONNECTION {
42090 enum VMBUS_CONNECT_STATE ConnectState;
42091
42092 - atomic_t NextGpadlHandle;
42093 + atomic_unchecked_t NextGpadlHandle;
42094
42095 /*
42096 * Represents channel interrupts. Each bit position represents a
42097 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
42098 index 871a202..ca50ddf 100644
42099 --- a/drivers/staging/hv/blkvsc_drv.c
42100 +++ b/drivers/staging/hv/blkvsc_drv.c
42101 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
42102 /* The one and only one */
42103 static struct blkvsc_driver_context g_blkvsc_drv;
42104
42105 -static struct block_device_operations block_ops = {
42106 +static const struct block_device_operations block_ops = {
42107 .owner = THIS_MODULE,
42108 .open = blkvsc_open,
42109 .release = blkvsc_release,
42110 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
42111 index 6acc49a..fbc8d46 100644
42112 --- a/drivers/staging/hv/vmbus_drv.c
42113 +++ b/drivers/staging/hv/vmbus_drv.c
42114 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42115 to_device_context(root_device_obj);
42116 struct device_context *child_device_ctx =
42117 to_device_context(child_device_obj);
42118 - static atomic_t device_num = ATOMIC_INIT(0);
42119 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42120
42121 DPRINT_ENTER(VMBUS_DRV);
42122
42123 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42124
42125 /* Set the device name. Otherwise, device_register() will fail. */
42126 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
42127 - atomic_inc_return(&device_num));
42128 + atomic_inc_return_unchecked(&device_num));
42129
42130 /* The new device belongs to this bus */
42131 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
42132 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
42133 index d926189..17b19fd 100644
42134 --- a/drivers/staging/iio/ring_generic.h
42135 +++ b/drivers/staging/iio/ring_generic.h
42136 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
42137
42138 int (*is_enabled)(struct iio_ring_buffer *ring);
42139 int (*enable)(struct iio_ring_buffer *ring);
42140 -};
42141 +} __no_const;
42142
42143 /**
42144 * struct iio_ring_buffer - general ring buffer structure
42145 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42146 index 1b237b7..88c624e 100644
42147 --- a/drivers/staging/octeon/ethernet-rx.c
42148 +++ b/drivers/staging/octeon/ethernet-rx.c
42149 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42150 /* Increment RX stats for virtual ports */
42151 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42152 #ifdef CONFIG_64BIT
42153 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42154 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42155 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42156 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42157 #else
42158 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42159 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42160 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42161 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42162 #endif
42163 }
42164 netif_receive_skb(skb);
42165 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42166 dev->name);
42167 */
42168 #ifdef CONFIG_64BIT
42169 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42170 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42171 #else
42172 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42173 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42174 #endif
42175 dev_kfree_skb_irq(skb);
42176 }
42177 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42178 index 492c502..d9909f1 100644
42179 --- a/drivers/staging/octeon/ethernet.c
42180 +++ b/drivers/staging/octeon/ethernet.c
42181 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42182 * since the RX tasklet also increments it.
42183 */
42184 #ifdef CONFIG_64BIT
42185 - atomic64_add(rx_status.dropped_packets,
42186 - (atomic64_t *)&priv->stats.rx_dropped);
42187 + atomic64_add_unchecked(rx_status.dropped_packets,
42188 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42189 #else
42190 - atomic_add(rx_status.dropped_packets,
42191 - (atomic_t *)&priv->stats.rx_dropped);
42192 + atomic_add_unchecked(rx_status.dropped_packets,
42193 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
42194 #endif
42195 }
42196
42197 diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42198 index a35bd5d..28fff45 100644
42199 --- a/drivers/staging/otus/80211core/pub_zfi.h
42200 +++ b/drivers/staging/otus/80211core/pub_zfi.h
42201 @@ -531,7 +531,7 @@ struct zsCbFuncTbl
42202 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42203
42204 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42205 -};
42206 +} __no_const;
42207
42208 extern void zfZeroMemory(u8_t* va, u16_t length);
42209 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42210 diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42211 index c39a25f..696f5aa 100644
42212 --- a/drivers/staging/panel/panel.c
42213 +++ b/drivers/staging/panel/panel.c
42214 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42215 return 0;
42216 }
42217
42218 -static struct file_operations lcd_fops = {
42219 +static const struct file_operations lcd_fops = {
42220 .write = lcd_write,
42221 .open = lcd_open,
42222 .release = lcd_release,
42223 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42224 return 0;
42225 }
42226
42227 -static struct file_operations keypad_fops = {
42228 +static const struct file_operations keypad_fops = {
42229 .read = keypad_read, /* read */
42230 .open = keypad_open, /* open */
42231 .release = keypad_release, /* close */
42232 diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42233 index 270ebcb..37e46af 100644
42234 --- a/drivers/staging/phison/phison.c
42235 +++ b/drivers/staging/phison/phison.c
42236 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42237 ATA_BMDMA_SHT(DRV_NAME),
42238 };
42239
42240 -static struct ata_port_operations phison_ops = {
42241 +static const struct ata_port_operations phison_ops = {
42242 .inherits = &ata_bmdma_port_ops,
42243 .prereset = phison_pre_reset,
42244 };
42245 diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42246 index 2eb8e3d..57616a7 100644
42247 --- a/drivers/staging/poch/poch.c
42248 +++ b/drivers/staging/poch/poch.c
42249 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42250 return 0;
42251 }
42252
42253 -static struct file_operations poch_fops = {
42254 +static const struct file_operations poch_fops = {
42255 .owner = THIS_MODULE,
42256 .open = poch_open,
42257 .release = poch_release,
42258 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42259 index c94de31..19402bc 100644
42260 --- a/drivers/staging/pohmelfs/inode.c
42261 +++ b/drivers/staging/pohmelfs/inode.c
42262 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42263 mutex_init(&psb->mcache_lock);
42264 psb->mcache_root = RB_ROOT;
42265 psb->mcache_timeout = msecs_to_jiffies(5000);
42266 - atomic_long_set(&psb->mcache_gen, 0);
42267 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
42268
42269 psb->trans_max_pages = 100;
42270
42271 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42272 INIT_LIST_HEAD(&psb->crypto_ready_list);
42273 INIT_LIST_HEAD(&psb->crypto_active_list);
42274
42275 - atomic_set(&psb->trans_gen, 1);
42276 + atomic_set_unchecked(&psb->trans_gen, 1);
42277 atomic_long_set(&psb->total_inodes, 0);
42278
42279 mutex_init(&psb->state_lock);
42280 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42281 index e22665c..a2a9390 100644
42282 --- a/drivers/staging/pohmelfs/mcache.c
42283 +++ b/drivers/staging/pohmelfs/mcache.c
42284 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42285 m->data = data;
42286 m->start = start;
42287 m->size = size;
42288 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
42289 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42290
42291 mutex_lock(&psb->mcache_lock);
42292 err = pohmelfs_mcache_insert(psb, m);
42293 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42294 index 623a07d..4035c19 100644
42295 --- a/drivers/staging/pohmelfs/netfs.h
42296 +++ b/drivers/staging/pohmelfs/netfs.h
42297 @@ -570,14 +570,14 @@ struct pohmelfs_config;
42298 struct pohmelfs_sb {
42299 struct rb_root mcache_root;
42300 struct mutex mcache_lock;
42301 - atomic_long_t mcache_gen;
42302 + atomic_long_unchecked_t mcache_gen;
42303 unsigned long mcache_timeout;
42304
42305 unsigned int idx;
42306
42307 unsigned int trans_retries;
42308
42309 - atomic_t trans_gen;
42310 + atomic_unchecked_t trans_gen;
42311
42312 unsigned int crypto_attached_size;
42313 unsigned int crypto_align_size;
42314 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42315 index 36a2535..0591bf4 100644
42316 --- a/drivers/staging/pohmelfs/trans.c
42317 +++ b/drivers/staging/pohmelfs/trans.c
42318 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42319 int err;
42320 struct netfs_cmd *cmd = t->iovec.iov_base;
42321
42322 - t->gen = atomic_inc_return(&psb->trans_gen);
42323 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42324
42325 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42326 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42327 diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42328 index f890a16..509ece8 100644
42329 --- a/drivers/staging/sep/sep_driver.c
42330 +++ b/drivers/staging/sep/sep_driver.c
42331 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42332 static dev_t sep_devno;
42333
42334 /* the files operations structure of the driver */
42335 -static struct file_operations sep_file_operations = {
42336 +static const struct file_operations sep_file_operations = {
42337 .owner = THIS_MODULE,
42338 .ioctl = sep_ioctl,
42339 .poll = sep_poll,
42340 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42341 index 5e16bc3..7655b10 100644
42342 --- a/drivers/staging/usbip/usbip_common.h
42343 +++ b/drivers/staging/usbip/usbip_common.h
42344 @@ -374,7 +374,7 @@ struct usbip_device {
42345 void (*shutdown)(struct usbip_device *);
42346 void (*reset)(struct usbip_device *);
42347 void (*unusable)(struct usbip_device *);
42348 - } eh_ops;
42349 + } __no_const eh_ops;
42350 };
42351
42352
42353 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42354 index 57f7946..d9df23d 100644
42355 --- a/drivers/staging/usbip/vhci.h
42356 +++ b/drivers/staging/usbip/vhci.h
42357 @@ -92,7 +92,7 @@ struct vhci_hcd {
42358 unsigned resuming:1;
42359 unsigned long re_timeout;
42360
42361 - atomic_t seqnum;
42362 + atomic_unchecked_t seqnum;
42363
42364 /*
42365 * NOTE:
42366 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42367 index 20cd7db..c2693ff 100644
42368 --- a/drivers/staging/usbip/vhci_hcd.c
42369 +++ b/drivers/staging/usbip/vhci_hcd.c
42370 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42371 return;
42372 }
42373
42374 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42375 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42376 if (priv->seqnum == 0xffff)
42377 usbip_uinfo("seqnum max\n");
42378
42379 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42380 return -ENOMEM;
42381 }
42382
42383 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42384 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42385 if (unlink->seqnum == 0xffff)
42386 usbip_uinfo("seqnum max\n");
42387
42388 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42389 vdev->rhport = rhport;
42390 }
42391
42392 - atomic_set(&vhci->seqnum, 0);
42393 + atomic_set_unchecked(&vhci->seqnum, 0);
42394 spin_lock_init(&vhci->lock);
42395
42396
42397 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42398 index 7fd76fe..673695a 100644
42399 --- a/drivers/staging/usbip/vhci_rx.c
42400 +++ b/drivers/staging/usbip/vhci_rx.c
42401 @@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42402 usbip_uerr("cannot find a urb of seqnum %u\n",
42403 pdu->base.seqnum);
42404 usbip_uinfo("max seqnum %d\n",
42405 - atomic_read(&the_controller->seqnum));
42406 + atomic_read_unchecked(&the_controller->seqnum));
42407 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42408 return;
42409 }
42410 diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42411 index 7891288..8e31300 100644
42412 --- a/drivers/staging/vme/devices/vme_user.c
42413 +++ b/drivers/staging/vme/devices/vme_user.c
42414 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42415 static int __init vme_user_probe(struct device *, int, int);
42416 static int __exit vme_user_remove(struct device *, int, int);
42417
42418 -static struct file_operations vme_user_fops = {
42419 +static const struct file_operations vme_user_fops = {
42420 .open = vme_user_open,
42421 .release = vme_user_release,
42422 .read = vme_user_read,
42423 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42424 index 58abf44..00c1fc8 100644
42425 --- a/drivers/staging/vt6655/hostap.c
42426 +++ b/drivers/staging/vt6655/hostap.c
42427 @@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42428 PSDevice apdev_priv;
42429 struct net_device *dev = pDevice->dev;
42430 int ret;
42431 - const struct net_device_ops apdev_netdev_ops = {
42432 + net_device_ops_no_const apdev_netdev_ops = {
42433 .ndo_start_xmit = pDevice->tx_80211,
42434 };
42435
42436 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42437 index 0c8267a..db1f363 100644
42438 --- a/drivers/staging/vt6656/hostap.c
42439 +++ b/drivers/staging/vt6656/hostap.c
42440 @@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42441 PSDevice apdev_priv;
42442 struct net_device *dev = pDevice->dev;
42443 int ret;
42444 - const struct net_device_ops apdev_netdev_ops = {
42445 + net_device_ops_no_const apdev_netdev_ops = {
42446 .ndo_start_xmit = pDevice->tx_80211,
42447 };
42448
42449 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42450 index 925678b..da7f5ed 100644
42451 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
42452 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42453 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42454
42455 struct usbctlx_completor {
42456 int (*complete) (struct usbctlx_completor *);
42457 -};
42458 +} __no_const;
42459 typedef struct usbctlx_completor usbctlx_completor_t;
42460
42461 static int
42462 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42463 index 40de151..924f268 100644
42464 --- a/drivers/telephony/ixj.c
42465 +++ b/drivers/telephony/ixj.c
42466 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42467 bool mContinue;
42468 char *pIn, *pOut;
42469
42470 + pax_track_stack();
42471 +
42472 if (!SCI_Prepare(j))
42473 return 0;
42474
42475 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42476 index e941367..b631f5a 100644
42477 --- a/drivers/uio/uio.c
42478 +++ b/drivers/uio/uio.c
42479 @@ -23,6 +23,7 @@
42480 #include <linux/string.h>
42481 #include <linux/kobject.h>
42482 #include <linux/uio_driver.h>
42483 +#include <asm/local.h>
42484
42485 #define UIO_MAX_DEVICES 255
42486
42487 @@ -30,10 +31,10 @@ struct uio_device {
42488 struct module *owner;
42489 struct device *dev;
42490 int minor;
42491 - atomic_t event;
42492 + atomic_unchecked_t event;
42493 struct fasync_struct *async_queue;
42494 wait_queue_head_t wait;
42495 - int vma_count;
42496 + local_t vma_count;
42497 struct uio_info *info;
42498 struct kobject *map_dir;
42499 struct kobject *portio_dir;
42500 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42501 return entry->show(mem, buf);
42502 }
42503
42504 -static struct sysfs_ops map_sysfs_ops = {
42505 +static const struct sysfs_ops map_sysfs_ops = {
42506 .show = map_type_show,
42507 };
42508
42509 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42510 return entry->show(port, buf);
42511 }
42512
42513 -static struct sysfs_ops portio_sysfs_ops = {
42514 +static const struct sysfs_ops portio_sysfs_ops = {
42515 .show = portio_type_show,
42516 };
42517
42518 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42519 struct uio_device *idev = dev_get_drvdata(dev);
42520 if (idev)
42521 return sprintf(buf, "%u\n",
42522 - (unsigned int)atomic_read(&idev->event));
42523 + (unsigned int)atomic_read_unchecked(&idev->event));
42524 else
42525 return -ENODEV;
42526 }
42527 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42528 {
42529 struct uio_device *idev = info->uio_dev;
42530
42531 - atomic_inc(&idev->event);
42532 + atomic_inc_unchecked(&idev->event);
42533 wake_up_interruptible(&idev->wait);
42534 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42535 }
42536 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42537 }
42538
42539 listener->dev = idev;
42540 - listener->event_count = atomic_read(&idev->event);
42541 + listener->event_count = atomic_read_unchecked(&idev->event);
42542 filep->private_data = listener;
42543
42544 if (idev->info->open) {
42545 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42546 return -EIO;
42547
42548 poll_wait(filep, &idev->wait, wait);
42549 - if (listener->event_count != atomic_read(&idev->event))
42550 + if (listener->event_count != atomic_read_unchecked(&idev->event))
42551 return POLLIN | POLLRDNORM;
42552 return 0;
42553 }
42554 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42555 do {
42556 set_current_state(TASK_INTERRUPTIBLE);
42557
42558 - event_count = atomic_read(&idev->event);
42559 + event_count = atomic_read_unchecked(&idev->event);
42560 if (event_count != listener->event_count) {
42561 if (copy_to_user(buf, &event_count, count))
42562 retval = -EFAULT;
42563 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42564 static void uio_vma_open(struct vm_area_struct *vma)
42565 {
42566 struct uio_device *idev = vma->vm_private_data;
42567 - idev->vma_count++;
42568 + local_inc(&idev->vma_count);
42569 }
42570
42571 static void uio_vma_close(struct vm_area_struct *vma)
42572 {
42573 struct uio_device *idev = vma->vm_private_data;
42574 - idev->vma_count--;
42575 + local_dec(&idev->vma_count);
42576 }
42577
42578 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42579 @@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42580 idev->owner = owner;
42581 idev->info = info;
42582 init_waitqueue_head(&idev->wait);
42583 - atomic_set(&idev->event, 0);
42584 + atomic_set_unchecked(&idev->event, 0);
42585
42586 ret = uio_get_minor(idev);
42587 if (ret)
42588 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42589 index fbea856..06efea6 100644
42590 --- a/drivers/usb/atm/usbatm.c
42591 +++ b/drivers/usb/atm/usbatm.c
42592 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42593 if (printk_ratelimit())
42594 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42595 __func__, vpi, vci);
42596 - atomic_inc(&vcc->stats->rx_err);
42597 + atomic_inc_unchecked(&vcc->stats->rx_err);
42598 return;
42599 }
42600
42601 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42602 if (length > ATM_MAX_AAL5_PDU) {
42603 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42604 __func__, length, vcc);
42605 - atomic_inc(&vcc->stats->rx_err);
42606 + atomic_inc_unchecked(&vcc->stats->rx_err);
42607 goto out;
42608 }
42609
42610 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42611 if (sarb->len < pdu_length) {
42612 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42613 __func__, pdu_length, sarb->len, vcc);
42614 - atomic_inc(&vcc->stats->rx_err);
42615 + atomic_inc_unchecked(&vcc->stats->rx_err);
42616 goto out;
42617 }
42618
42619 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42620 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42621 __func__, vcc);
42622 - atomic_inc(&vcc->stats->rx_err);
42623 + atomic_inc_unchecked(&vcc->stats->rx_err);
42624 goto out;
42625 }
42626
42627 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42628 if (printk_ratelimit())
42629 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42630 __func__, length);
42631 - atomic_inc(&vcc->stats->rx_drop);
42632 + atomic_inc_unchecked(&vcc->stats->rx_drop);
42633 goto out;
42634 }
42635
42636 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42637
42638 vcc->push(vcc, skb);
42639
42640 - atomic_inc(&vcc->stats->rx);
42641 + atomic_inc_unchecked(&vcc->stats->rx);
42642 out:
42643 skb_trim(sarb, 0);
42644 }
42645 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42646 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42647
42648 usbatm_pop(vcc, skb);
42649 - atomic_inc(&vcc->stats->tx);
42650 + atomic_inc_unchecked(&vcc->stats->tx);
42651
42652 skb = skb_dequeue(&instance->sndqueue);
42653 }
42654 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42655 if (!left--)
42656 return sprintf(page,
42657 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42658 - atomic_read(&atm_dev->stats.aal5.tx),
42659 - atomic_read(&atm_dev->stats.aal5.tx_err),
42660 - atomic_read(&atm_dev->stats.aal5.rx),
42661 - atomic_read(&atm_dev->stats.aal5.rx_err),
42662 - atomic_read(&atm_dev->stats.aal5.rx_drop));
42663 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42664 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42665 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42666 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42667 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42668
42669 if (!left--) {
42670 if (instance->disconnected)
42671 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
42672 index 3e564bf..949b448 100644
42673 --- a/drivers/usb/class/cdc-wdm.c
42674 +++ b/drivers/usb/class/cdc-wdm.c
42675 @@ -314,7 +314,7 @@ static ssize_t wdm_write
42676 if (r < 0)
42677 goto outnp;
42678
42679 - if (!file->f_flags && O_NONBLOCK)
42680 + if (!(file->f_flags & O_NONBLOCK))
42681 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
42682 &desc->flags));
42683 else
42684 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42685 index 24e6205..fe5a5d4 100644
42686 --- a/drivers/usb/core/hcd.c
42687 +++ b/drivers/usb/core/hcd.c
42688 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42689
42690 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42691
42692 -struct usb_mon_operations *mon_ops;
42693 +const struct usb_mon_operations *mon_ops;
42694
42695 /*
42696 * The registration is unlocked.
42697 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42698 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42699 */
42700
42701 -int usb_mon_register (struct usb_mon_operations *ops)
42702 +int usb_mon_register (const struct usb_mon_operations *ops)
42703 {
42704
42705 if (mon_ops)
42706 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42707 index bcbe104..9cfd1c6 100644
42708 --- a/drivers/usb/core/hcd.h
42709 +++ b/drivers/usb/core/hcd.h
42710 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42711 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42712
42713 struct usb_mon_operations {
42714 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42715 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42716 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42717 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42718 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42719 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42720 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42721 };
42722
42723 -extern struct usb_mon_operations *mon_ops;
42724 +extern const struct usb_mon_operations *mon_ops;
42725
42726 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42727 {
42728 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42729 (*mon_ops->urb_complete)(bus, urb, status);
42730 }
42731
42732 -int usb_mon_register(struct usb_mon_operations *ops);
42733 +int usb_mon_register(const struct usb_mon_operations *ops);
42734 void usb_mon_deregister(void);
42735
42736 #else
42737 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42738 index 409cc94..a673bad 100644
42739 --- a/drivers/usb/core/message.c
42740 +++ b/drivers/usb/core/message.c
42741 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42742 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42743 if (buf) {
42744 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42745 - if (len > 0) {
42746 - smallbuf = kmalloc(++len, GFP_NOIO);
42747 + if (len++ > 0) {
42748 + smallbuf = kmalloc(len, GFP_NOIO);
42749 if (!smallbuf)
42750 return buf;
42751 memcpy(smallbuf, buf, len);
42752 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42753 index 62ff5e7..530b74e 100644
42754 --- a/drivers/usb/misc/appledisplay.c
42755 +++ b/drivers/usb/misc/appledisplay.c
42756 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42757 return pdata->msgdata[1];
42758 }
42759
42760 -static struct backlight_ops appledisplay_bl_data = {
42761 +static const struct backlight_ops appledisplay_bl_data = {
42762 .get_brightness = appledisplay_bl_get_brightness,
42763 .update_status = appledisplay_bl_update_status,
42764 };
42765 diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42766 index e0c2db3..bd8cb66 100644
42767 --- a/drivers/usb/mon/mon_main.c
42768 +++ b/drivers/usb/mon/mon_main.c
42769 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42770 /*
42771 * Ops
42772 */
42773 -static struct usb_mon_operations mon_ops_0 = {
42774 +static const struct usb_mon_operations mon_ops_0 = {
42775 .urb_submit = mon_submit,
42776 .urb_submit_error = mon_submit_error,
42777 .urb_complete = mon_complete,
42778 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42779 index d6bea3e..60b250e 100644
42780 --- a/drivers/usb/wusbcore/wa-hc.h
42781 +++ b/drivers/usb/wusbcore/wa-hc.h
42782 @@ -192,7 +192,7 @@ struct wahc {
42783 struct list_head xfer_delayed_list;
42784 spinlock_t xfer_list_lock;
42785 struct work_struct xfer_work;
42786 - atomic_t xfer_id_count;
42787 + atomic_unchecked_t xfer_id_count;
42788 };
42789
42790
42791 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42792 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42793 spin_lock_init(&wa->xfer_list_lock);
42794 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42795 - atomic_set(&wa->xfer_id_count, 1);
42796 + atomic_set_unchecked(&wa->xfer_id_count, 1);
42797 }
42798
42799 /**
42800 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42801 index 613a5fc..3174865 100644
42802 --- a/drivers/usb/wusbcore/wa-xfer.c
42803 +++ b/drivers/usb/wusbcore/wa-xfer.c
42804 @@ -293,7 +293,7 @@ out:
42805 */
42806 static void wa_xfer_id_init(struct wa_xfer *xfer)
42807 {
42808 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42809 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42810 }
42811
42812 /*
42813 diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42814 index aa42fce..f8a828c 100644
42815 --- a/drivers/uwb/wlp/messages.c
42816 +++ b/drivers/uwb/wlp/messages.c
42817 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42818 size_t len = skb->len;
42819 size_t used;
42820 ssize_t result;
42821 - struct wlp_nonce enonce, rnonce;
42822 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42823 enum wlp_assc_error assc_err;
42824 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42825 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42826 diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42827 index 0370399..6627c94 100644
42828 --- a/drivers/uwb/wlp/sysfs.c
42829 +++ b/drivers/uwb/wlp/sysfs.c
42830 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42831 return ret;
42832 }
42833
42834 -static
42835 -struct sysfs_ops wss_sysfs_ops = {
42836 +static const struct sysfs_ops wss_sysfs_ops = {
42837 .show = wlp_wss_attr_show,
42838 .store = wlp_wss_attr_store,
42839 };
42840 diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42841 index d5e8010..5687b56 100644
42842 --- a/drivers/video/atmel_lcdfb.c
42843 +++ b/drivers/video/atmel_lcdfb.c
42844 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42845 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42846 }
42847
42848 -static struct backlight_ops atmel_lcdc_bl_ops = {
42849 +static const struct backlight_ops atmel_lcdc_bl_ops = {
42850 .update_status = atmel_bl_update_status,
42851 .get_brightness = atmel_bl_get_brightness,
42852 };
42853 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42854 index e4e4d43..66bcbcc 100644
42855 --- a/drivers/video/aty/aty128fb.c
42856 +++ b/drivers/video/aty/aty128fb.c
42857 @@ -149,7 +149,7 @@ enum {
42858 };
42859
42860 /* Must match above enum */
42861 -static const char *r128_family[] __devinitdata = {
42862 +static const char *r128_family[] __devinitconst = {
42863 "AGP",
42864 "PCI",
42865 "PRO AGP",
42866 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42867 return bd->props.brightness;
42868 }
42869
42870 -static struct backlight_ops aty128_bl_data = {
42871 +static const struct backlight_ops aty128_bl_data = {
42872 .get_brightness = aty128_bl_get_brightness,
42873 .update_status = aty128_bl_update_status,
42874 };
42875 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42876 index 913b4a4..9295a38 100644
42877 --- a/drivers/video/aty/atyfb_base.c
42878 +++ b/drivers/video/aty/atyfb_base.c
42879 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42880 return bd->props.brightness;
42881 }
42882
42883 -static struct backlight_ops aty_bl_data = {
42884 +static const struct backlight_ops aty_bl_data = {
42885 .get_brightness = aty_bl_get_brightness,
42886 .update_status = aty_bl_update_status,
42887 };
42888 diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42889 index 1a056ad..221bd6a 100644
42890 --- a/drivers/video/aty/radeon_backlight.c
42891 +++ b/drivers/video/aty/radeon_backlight.c
42892 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42893 return bd->props.brightness;
42894 }
42895
42896 -static struct backlight_ops radeon_bl_data = {
42897 +static const struct backlight_ops radeon_bl_data = {
42898 .get_brightness = radeon_bl_get_brightness,
42899 .update_status = radeon_bl_update_status,
42900 };
42901 diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42902 index ad05da5..3cb2cb9 100644
42903 --- a/drivers/video/backlight/adp5520_bl.c
42904 +++ b/drivers/video/backlight/adp5520_bl.c
42905 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42906 return error ? data->current_brightness : reg_val;
42907 }
42908
42909 -static struct backlight_ops adp5520_bl_ops = {
42910 +static const struct backlight_ops adp5520_bl_ops = {
42911 .update_status = adp5520_bl_update_status,
42912 .get_brightness = adp5520_bl_get_brightness,
42913 };
42914 diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42915 index 2c3bdfc..d769b0b 100644
42916 --- a/drivers/video/backlight/adx_bl.c
42917 +++ b/drivers/video/backlight/adx_bl.c
42918 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42919 return 1;
42920 }
42921
42922 -static struct backlight_ops adx_backlight_ops = {
42923 +static const struct backlight_ops adx_backlight_ops = {
42924 .options = 0,
42925 .update_status = adx_backlight_update_status,
42926 .get_brightness = adx_backlight_get_brightness,
42927 diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
42928 index 505c082..6b6b3cc 100644
42929 --- a/drivers/video/backlight/atmel-pwm-bl.c
42930 +++ b/drivers/video/backlight/atmel-pwm-bl.c
42931 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
42932 return pwm_channel_enable(&pwmbl->pwmc);
42933 }
42934
42935 -static struct backlight_ops atmel_pwm_bl_ops = {
42936 +static const struct backlight_ops atmel_pwm_bl_ops = {
42937 .get_brightness = atmel_pwm_bl_get_intensity,
42938 .update_status = atmel_pwm_bl_set_intensity,
42939 };
42940 diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
42941 index 5e20e6e..89025e6 100644
42942 --- a/drivers/video/backlight/backlight.c
42943 +++ b/drivers/video/backlight/backlight.c
42944 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
42945 * ERR_PTR() or a pointer to the newly allocated device.
42946 */
42947 struct backlight_device *backlight_device_register(const char *name,
42948 - struct device *parent, void *devdata, struct backlight_ops *ops)
42949 + struct device *parent, void *devdata, const struct backlight_ops *ops)
42950 {
42951 struct backlight_device *new_bd;
42952 int rc;
42953 diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
42954 index 9677494..b4bcf80 100644
42955 --- a/drivers/video/backlight/corgi_lcd.c
42956 +++ b/drivers/video/backlight/corgi_lcd.c
42957 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
42958 }
42959 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
42960
42961 -static struct backlight_ops corgi_bl_ops = {
42962 +static const struct backlight_ops corgi_bl_ops = {
42963 .get_brightness = corgi_bl_get_intensity,
42964 .update_status = corgi_bl_update_status,
42965 };
42966 diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
42967 index b9fe62b..2914bf1 100644
42968 --- a/drivers/video/backlight/cr_bllcd.c
42969 +++ b/drivers/video/backlight/cr_bllcd.c
42970 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
42971 return intensity;
42972 }
42973
42974 -static struct backlight_ops cr_backlight_ops = {
42975 +static const struct backlight_ops cr_backlight_ops = {
42976 .get_brightness = cr_backlight_get_intensity,
42977 .update_status = cr_backlight_set_intensity,
42978 };
42979 diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
42980 index 701a108..feacfd5 100644
42981 --- a/drivers/video/backlight/da903x_bl.c
42982 +++ b/drivers/video/backlight/da903x_bl.c
42983 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
42984 return data->current_brightness;
42985 }
42986
42987 -static struct backlight_ops da903x_backlight_ops = {
42988 +static const struct backlight_ops da903x_backlight_ops = {
42989 .update_status = da903x_backlight_update_status,
42990 .get_brightness = da903x_backlight_get_brightness,
42991 };
42992 diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
42993 index 6d27f62..e6d348e 100644
42994 --- a/drivers/video/backlight/generic_bl.c
42995 +++ b/drivers/video/backlight/generic_bl.c
42996 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
42997 }
42998 EXPORT_SYMBOL(corgibl_limit_intensity);
42999
43000 -static struct backlight_ops genericbl_ops = {
43001 +static const struct backlight_ops genericbl_ops = {
43002 .options = BL_CORE_SUSPENDRESUME,
43003 .get_brightness = genericbl_get_intensity,
43004 .update_status = genericbl_send_intensity,
43005 diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
43006 index 7fb4eef..f7cc528 100644
43007 --- a/drivers/video/backlight/hp680_bl.c
43008 +++ b/drivers/video/backlight/hp680_bl.c
43009 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
43010 return current_intensity;
43011 }
43012
43013 -static struct backlight_ops hp680bl_ops = {
43014 +static const struct backlight_ops hp680bl_ops = {
43015 .get_brightness = hp680bl_get_intensity,
43016 .update_status = hp680bl_set_intensity,
43017 };
43018 diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
43019 index 7aed256..db9071f 100644
43020 --- a/drivers/video/backlight/jornada720_bl.c
43021 +++ b/drivers/video/backlight/jornada720_bl.c
43022 @@ -93,7 +93,7 @@ out:
43023 return ret;
43024 }
43025
43026 -static struct backlight_ops jornada_bl_ops = {
43027 +static const struct backlight_ops jornada_bl_ops = {
43028 .get_brightness = jornada_bl_get_brightness,
43029 .update_status = jornada_bl_update_status,
43030 .options = BL_CORE_SUSPENDRESUME,
43031 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43032 index a38fda1..939e7b8 100644
43033 --- a/drivers/video/backlight/kb3886_bl.c
43034 +++ b/drivers/video/backlight/kb3886_bl.c
43035 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
43036 return kb3886bl_intensity;
43037 }
43038
43039 -static struct backlight_ops kb3886bl_ops = {
43040 +static const struct backlight_ops kb3886bl_ops = {
43041 .get_brightness = kb3886bl_get_intensity,
43042 .update_status = kb3886bl_send_intensity,
43043 };
43044 diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
43045 index 6b488b8..00a9591 100644
43046 --- a/drivers/video/backlight/locomolcd.c
43047 +++ b/drivers/video/backlight/locomolcd.c
43048 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
43049 return current_intensity;
43050 }
43051
43052 -static struct backlight_ops locomobl_data = {
43053 +static const struct backlight_ops locomobl_data = {
43054 .get_brightness = locomolcd_get_intensity,
43055 .update_status = locomolcd_set_intensity,
43056 };
43057 diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
43058 index 99bdfa8..3dac448 100644
43059 --- a/drivers/video/backlight/mbp_nvidia_bl.c
43060 +++ b/drivers/video/backlight/mbp_nvidia_bl.c
43061 @@ -33,7 +33,7 @@ struct dmi_match_data {
43062 unsigned long iostart;
43063 unsigned long iolen;
43064 /* Backlight operations structure. */
43065 - struct backlight_ops backlight_ops;
43066 + const struct backlight_ops backlight_ops;
43067 };
43068
43069 /* Module parameters. */
43070 diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
43071 index cbad67e..3cf900e 100644
43072 --- a/drivers/video/backlight/omap1_bl.c
43073 +++ b/drivers/video/backlight/omap1_bl.c
43074 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
43075 return bl->current_intensity;
43076 }
43077
43078 -static struct backlight_ops omapbl_ops = {
43079 +static const struct backlight_ops omapbl_ops = {
43080 .get_brightness = omapbl_get_intensity,
43081 .update_status = omapbl_update_status,
43082 };
43083 diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
43084 index 9edaf24..075786e 100644
43085 --- a/drivers/video/backlight/progear_bl.c
43086 +++ b/drivers/video/backlight/progear_bl.c
43087 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
43088 return intensity - HW_LEVEL_MIN;
43089 }
43090
43091 -static struct backlight_ops progearbl_ops = {
43092 +static const struct backlight_ops progearbl_ops = {
43093 .get_brightness = progearbl_get_intensity,
43094 .update_status = progearbl_set_intensity,
43095 };
43096 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
43097 index 8871662..df9e0b3 100644
43098 --- a/drivers/video/backlight/pwm_bl.c
43099 +++ b/drivers/video/backlight/pwm_bl.c
43100 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
43101 return bl->props.brightness;
43102 }
43103
43104 -static struct backlight_ops pwm_backlight_ops = {
43105 +static const struct backlight_ops pwm_backlight_ops = {
43106 .update_status = pwm_backlight_update_status,
43107 .get_brightness = pwm_backlight_get_brightness,
43108 };
43109 diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
43110 index 43edbad..e14ce4d 100644
43111 --- a/drivers/video/backlight/tosa_bl.c
43112 +++ b/drivers/video/backlight/tosa_bl.c
43113 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
43114 return props->brightness;
43115 }
43116
43117 -static struct backlight_ops bl_ops = {
43118 +static const struct backlight_ops bl_ops = {
43119 .get_brightness = tosa_bl_get_brightness,
43120 .update_status = tosa_bl_update_status,
43121 };
43122 diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
43123 index 467bdb7..e32add3 100644
43124 --- a/drivers/video/backlight/wm831x_bl.c
43125 +++ b/drivers/video/backlight/wm831x_bl.c
43126 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
43127 return data->current_brightness;
43128 }
43129
43130 -static struct backlight_ops wm831x_backlight_ops = {
43131 +static const struct backlight_ops wm831x_backlight_ops = {
43132 .options = BL_CORE_SUSPENDRESUME,
43133 .update_status = wm831x_backlight_update_status,
43134 .get_brightness = wm831x_backlight_get_brightness,
43135 diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
43136 index e49ae5e..db4e6f7 100644
43137 --- a/drivers/video/bf54x-lq043fb.c
43138 +++ b/drivers/video/bf54x-lq043fb.c
43139 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43140 return 0;
43141 }
43142
43143 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43144 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43145 .get_brightness = bl_get_brightness,
43146 };
43147
43148 diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
43149 index 2c72a7c..d523e52 100644
43150 --- a/drivers/video/bfin-t350mcqb-fb.c
43151 +++ b/drivers/video/bfin-t350mcqb-fb.c
43152 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43153 return 0;
43154 }
43155
43156 -static struct backlight_ops bfin_lq043fb_bl_ops = {
43157 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
43158 .get_brightness = bl_get_brightness,
43159 };
43160
43161 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43162 index f53b9f1..958bf4e 100644
43163 --- a/drivers/video/fbcmap.c
43164 +++ b/drivers/video/fbcmap.c
43165 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43166 rc = -ENODEV;
43167 goto out;
43168 }
43169 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43170 - !info->fbops->fb_setcmap)) {
43171 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43172 rc = -EINVAL;
43173 goto out1;
43174 }
43175 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43176 index 99bbd28..ad3829e 100644
43177 --- a/drivers/video/fbmem.c
43178 +++ b/drivers/video/fbmem.c
43179 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43180 image->dx += image->width + 8;
43181 }
43182 } else if (rotate == FB_ROTATE_UD) {
43183 - for (x = 0; x < num && image->dx >= 0; x++) {
43184 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43185 info->fbops->fb_imageblit(info, image);
43186 image->dx -= image->width + 8;
43187 }
43188 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43189 image->dy += image->height + 8;
43190 }
43191 } else if (rotate == FB_ROTATE_CCW) {
43192 - for (x = 0; x < num && image->dy >= 0; x++) {
43193 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43194 info->fbops->fb_imageblit(info, image);
43195 image->dy -= image->height + 8;
43196 }
43197 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43198 int flags = info->flags;
43199 int ret = 0;
43200
43201 + pax_track_stack();
43202 +
43203 if (var->activate & FB_ACTIVATE_INV_MODE) {
43204 struct fb_videomode mode1, mode2;
43205
43206 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43207 void __user *argp = (void __user *)arg;
43208 long ret = 0;
43209
43210 + pax_track_stack();
43211 +
43212 switch (cmd) {
43213 case FBIOGET_VSCREENINFO:
43214 if (!lock_fb_info(info))
43215 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43216 return -EFAULT;
43217 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43218 return -EINVAL;
43219 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43220 + if (con2fb.framebuffer >= FB_MAX)
43221 return -EINVAL;
43222 if (!registered_fb[con2fb.framebuffer])
43223 request_module("fb%d", con2fb.framebuffer);
43224 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43225 index f20eff8..3e4f622 100644
43226 --- a/drivers/video/geode/gx1fb_core.c
43227 +++ b/drivers/video/geode/gx1fb_core.c
43228 @@ -30,7 +30,7 @@ static int crt_option = 1;
43229 static char panel_option[32] = "";
43230
43231 /* Modes relevant to the GX1 (taken from modedb.c) */
43232 -static const struct fb_videomode __initdata gx1_modedb[] = {
43233 +static const struct fb_videomode __initconst gx1_modedb[] = {
43234 /* 640x480-60 VESA */
43235 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43236 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43237 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43238 index 896e53d..4d87d0b 100644
43239 --- a/drivers/video/gxt4500.c
43240 +++ b/drivers/video/gxt4500.c
43241 @@ -156,7 +156,7 @@ struct gxt4500_par {
43242 static char *mode_option;
43243
43244 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43245 -static const struct fb_videomode defaultmode __devinitdata = {
43246 +static const struct fb_videomode defaultmode __devinitconst = {
43247 .refresh = 60,
43248 .xres = 1280,
43249 .yres = 1024,
43250 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43251 return 0;
43252 }
43253
43254 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43255 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43256 .id = "IBM GXT4500P",
43257 .type = FB_TYPE_PACKED_PIXELS,
43258 .visual = FB_VISUAL_PSEUDOCOLOR,
43259 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43260 index f5bedee..28c6028 100644
43261 --- a/drivers/video/i810/i810_accel.c
43262 +++ b/drivers/video/i810/i810_accel.c
43263 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43264 }
43265 }
43266 printk("ringbuffer lockup!!!\n");
43267 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43268 i810_report_error(mmio);
43269 par->dev_flags |= LOCKUP;
43270 info->pixmap.scan_align = 1;
43271 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43272 index 5743ea2..457f82c 100644
43273 --- a/drivers/video/i810/i810_main.c
43274 +++ b/drivers/video/i810/i810_main.c
43275 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43276 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43277
43278 /* PCI */
43279 -static const char *i810_pci_list[] __devinitdata = {
43280 +static const char *i810_pci_list[] __devinitconst = {
43281 "Intel(R) 810 Framebuffer Device" ,
43282 "Intel(R) 810-DC100 Framebuffer Device" ,
43283 "Intel(R) 810E Framebuffer Device" ,
43284 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43285 index 3c14e43..eafa544 100644
43286 --- a/drivers/video/logo/logo_linux_clut224.ppm
43287 +++ b/drivers/video/logo/logo_linux_clut224.ppm
43288 @@ -1,1604 +1,1123 @@
43289 P3
43290 -# Standard 224-color Linux logo
43291 80 80
43292 255
43293 - 0 0 0 0 0 0 0 0 0 0 0 0
43294 - 0 0 0 0 0 0 0 0 0 0 0 0
43295 - 0 0 0 0 0 0 0 0 0 0 0 0
43296 - 0 0 0 0 0 0 0 0 0 0 0 0
43297 - 0 0 0 0 0 0 0 0 0 0 0 0
43298 - 0 0 0 0 0 0 0 0 0 0 0 0
43299 - 0 0 0 0 0 0 0 0 0 0 0 0
43300 - 0 0 0 0 0 0 0 0 0 0 0 0
43301 - 0 0 0 0 0 0 0 0 0 0 0 0
43302 - 6 6 6 6 6 6 10 10 10 10 10 10
43303 - 10 10 10 6 6 6 6 6 6 6 6 6
43304 - 0 0 0 0 0 0 0 0 0 0 0 0
43305 - 0 0 0 0 0 0 0 0 0 0 0 0
43306 - 0 0 0 0 0 0 0 0 0 0 0 0
43307 - 0 0 0 0 0 0 0 0 0 0 0 0
43308 - 0 0 0 0 0 0 0 0 0 0 0 0
43309 - 0 0 0 0 0 0 0 0 0 0 0 0
43310 - 0 0 0 0 0 0 0 0 0 0 0 0
43311 - 0 0 0 0 0 0 0 0 0 0 0 0
43312 - 0 0 0 0 0 0 0 0 0 0 0 0
43313 - 0 0 0 0 0 0 0 0 0 0 0 0
43314 - 0 0 0 0 0 0 0 0 0 0 0 0
43315 - 0 0 0 0 0 0 0 0 0 0 0 0
43316 - 0 0 0 0 0 0 0 0 0 0 0 0
43317 - 0 0 0 0 0 0 0 0 0 0 0 0
43318 - 0 0 0 0 0 0 0 0 0 0 0 0
43319 - 0 0 0 0 0 0 0 0 0 0 0 0
43320 - 0 0 0 0 0 0 0 0 0 0 0 0
43321 - 0 0 0 6 6 6 10 10 10 14 14 14
43322 - 22 22 22 26 26 26 30 30 30 34 34 34
43323 - 30 30 30 30 30 30 26 26 26 18 18 18
43324 - 14 14 14 10 10 10 6 6 6 0 0 0
43325 - 0 0 0 0 0 0 0 0 0 0 0 0
43326 - 0 0 0 0 0 0 0 0 0 0 0 0
43327 - 0 0 0 0 0 0 0 0 0 0 0 0
43328 - 0 0 0 0 0 0 0 0 0 0 0 0
43329 - 0 0 0 0 0 0 0 0 0 0 0 0
43330 - 0 0 0 0 0 0 0 0 0 0 0 0
43331 - 0 0 0 0 0 0 0 0 0 0 0 0
43332 - 0 0 0 0 0 0 0 0 0 0 0 0
43333 - 0 0 0 0 0 0 0 0 0 0 0 0
43334 - 0 0 0 0 0 1 0 0 1 0 0 0
43335 - 0 0 0 0 0 0 0 0 0 0 0 0
43336 - 0 0 0 0 0 0 0 0 0 0 0 0
43337 - 0 0 0 0 0 0 0 0 0 0 0 0
43338 - 0 0 0 0 0 0 0 0 0 0 0 0
43339 - 0 0 0 0 0 0 0 0 0 0 0 0
43340 - 0 0 0 0 0 0 0 0 0 0 0 0
43341 - 6 6 6 14 14 14 26 26 26 42 42 42
43342 - 54 54 54 66 66 66 78 78 78 78 78 78
43343 - 78 78 78 74 74 74 66 66 66 54 54 54
43344 - 42 42 42 26 26 26 18 18 18 10 10 10
43345 - 6 6 6 0 0 0 0 0 0 0 0 0
43346 - 0 0 0 0 0 0 0 0 0 0 0 0
43347 - 0 0 0 0 0 0 0 0 0 0 0 0
43348 - 0 0 0 0 0 0 0 0 0 0 0 0
43349 - 0 0 0 0 0 0 0 0 0 0 0 0
43350 - 0 0 0 0 0 0 0 0 0 0 0 0
43351 - 0 0 0 0 0 0 0 0 0 0 0 0
43352 - 0 0 0 0 0 0 0 0 0 0 0 0
43353 - 0 0 0 0 0 0 0 0 0 0 0 0
43354 - 0 0 1 0 0 0 0 0 0 0 0 0
43355 - 0 0 0 0 0 0 0 0 0 0 0 0
43356 - 0 0 0 0 0 0 0 0 0 0 0 0
43357 - 0 0 0 0 0 0 0 0 0 0 0 0
43358 - 0 0 0 0 0 0 0 0 0 0 0 0
43359 - 0 0 0 0 0 0 0 0 0 0 0 0
43360 - 0 0 0 0 0 0 0 0 0 10 10 10
43361 - 22 22 22 42 42 42 66 66 66 86 86 86
43362 - 66 66 66 38 38 38 38 38 38 22 22 22
43363 - 26 26 26 34 34 34 54 54 54 66 66 66
43364 - 86 86 86 70 70 70 46 46 46 26 26 26
43365 - 14 14 14 6 6 6 0 0 0 0 0 0
43366 - 0 0 0 0 0 0 0 0 0 0 0 0
43367 - 0 0 0 0 0 0 0 0 0 0 0 0
43368 - 0 0 0 0 0 0 0 0 0 0 0 0
43369 - 0 0 0 0 0 0 0 0 0 0 0 0
43370 - 0 0 0 0 0 0 0 0 0 0 0 0
43371 - 0 0 0 0 0 0 0 0 0 0 0 0
43372 - 0 0 0 0 0 0 0 0 0 0 0 0
43373 - 0 0 0 0 0 0 0 0 0 0 0 0
43374 - 0 0 1 0 0 1 0 0 1 0 0 0
43375 - 0 0 0 0 0 0 0 0 0 0 0 0
43376 - 0 0 0 0 0 0 0 0 0 0 0 0
43377 - 0 0 0 0 0 0 0 0 0 0 0 0
43378 - 0 0 0 0 0 0 0 0 0 0 0 0
43379 - 0 0 0 0 0 0 0 0 0 0 0 0
43380 - 0 0 0 0 0 0 10 10 10 26 26 26
43381 - 50 50 50 82 82 82 58 58 58 6 6 6
43382 - 2 2 6 2 2 6 2 2 6 2 2 6
43383 - 2 2 6 2 2 6 2 2 6 2 2 6
43384 - 6 6 6 54 54 54 86 86 86 66 66 66
43385 - 38 38 38 18 18 18 6 6 6 0 0 0
43386 - 0 0 0 0 0 0 0 0 0 0 0 0
43387 - 0 0 0 0 0 0 0 0 0 0 0 0
43388 - 0 0 0 0 0 0 0 0 0 0 0 0
43389 - 0 0 0 0 0 0 0 0 0 0 0 0
43390 - 0 0 0 0 0 0 0 0 0 0 0 0
43391 - 0 0 0 0 0 0 0 0 0 0 0 0
43392 - 0 0 0 0 0 0 0 0 0 0 0 0
43393 - 0 0 0 0 0 0 0 0 0 0 0 0
43394 - 0 0 0 0 0 0 0 0 0 0 0 0
43395 - 0 0 0 0 0 0 0 0 0 0 0 0
43396 - 0 0 0 0 0 0 0 0 0 0 0 0
43397 - 0 0 0 0 0 0 0 0 0 0 0 0
43398 - 0 0 0 0 0 0 0 0 0 0 0 0
43399 - 0 0 0 0 0 0 0 0 0 0 0 0
43400 - 0 0 0 6 6 6 22 22 22 50 50 50
43401 - 78 78 78 34 34 34 2 2 6 2 2 6
43402 - 2 2 6 2 2 6 2 2 6 2 2 6
43403 - 2 2 6 2 2 6 2 2 6 2 2 6
43404 - 2 2 6 2 2 6 6 6 6 70 70 70
43405 - 78 78 78 46 46 46 22 22 22 6 6 6
43406 - 0 0 0 0 0 0 0 0 0 0 0 0
43407 - 0 0 0 0 0 0 0 0 0 0 0 0
43408 - 0 0 0 0 0 0 0 0 0 0 0 0
43409 - 0 0 0 0 0 0 0 0 0 0 0 0
43410 - 0 0 0 0 0 0 0 0 0 0 0 0
43411 - 0 0 0 0 0 0 0 0 0 0 0 0
43412 - 0 0 0 0 0 0 0 0 0 0 0 0
43413 - 0 0 0 0 0 0 0 0 0 0 0 0
43414 - 0 0 1 0 0 1 0 0 1 0 0 0
43415 - 0 0 0 0 0 0 0 0 0 0 0 0
43416 - 0 0 0 0 0 0 0 0 0 0 0 0
43417 - 0 0 0 0 0 0 0 0 0 0 0 0
43418 - 0 0 0 0 0 0 0 0 0 0 0 0
43419 - 0 0 0 0 0 0 0 0 0 0 0 0
43420 - 6 6 6 18 18 18 42 42 42 82 82 82
43421 - 26 26 26 2 2 6 2 2 6 2 2 6
43422 - 2 2 6 2 2 6 2 2 6 2 2 6
43423 - 2 2 6 2 2 6 2 2 6 14 14 14
43424 - 46 46 46 34 34 34 6 6 6 2 2 6
43425 - 42 42 42 78 78 78 42 42 42 18 18 18
43426 - 6 6 6 0 0 0 0 0 0 0 0 0
43427 - 0 0 0 0 0 0 0 0 0 0 0 0
43428 - 0 0 0 0 0 0 0 0 0 0 0 0
43429 - 0 0 0 0 0 0 0 0 0 0 0 0
43430 - 0 0 0 0 0 0 0 0 0 0 0 0
43431 - 0 0 0 0 0 0 0 0 0 0 0 0
43432 - 0 0 0 0 0 0 0 0 0 0 0 0
43433 - 0 0 0 0 0 0 0 0 0 0 0 0
43434 - 0 0 1 0 0 0 0 0 1 0 0 0
43435 - 0 0 0 0 0 0 0 0 0 0 0 0
43436 - 0 0 0 0 0 0 0 0 0 0 0 0
43437 - 0 0 0 0 0 0 0 0 0 0 0 0
43438 - 0 0 0 0 0 0 0 0 0 0 0 0
43439 - 0 0 0 0 0 0 0 0 0 0 0 0
43440 - 10 10 10 30 30 30 66 66 66 58 58 58
43441 - 2 2 6 2 2 6 2 2 6 2 2 6
43442 - 2 2 6 2 2 6 2 2 6 2 2 6
43443 - 2 2 6 2 2 6 2 2 6 26 26 26
43444 - 86 86 86 101 101 101 46 46 46 10 10 10
43445 - 2 2 6 58 58 58 70 70 70 34 34 34
43446 - 10 10 10 0 0 0 0 0 0 0 0 0
43447 - 0 0 0 0 0 0 0 0 0 0 0 0
43448 - 0 0 0 0 0 0 0 0 0 0 0 0
43449 - 0 0 0 0 0 0 0 0 0 0 0 0
43450 - 0 0 0 0 0 0 0 0 0 0 0 0
43451 - 0 0 0 0 0 0 0 0 0 0 0 0
43452 - 0 0 0 0 0 0 0 0 0 0 0 0
43453 - 0 0 0 0 0 0 0 0 0 0 0 0
43454 - 0 0 1 0 0 1 0 0 1 0 0 0
43455 - 0 0 0 0 0 0 0 0 0 0 0 0
43456 - 0 0 0 0 0 0 0 0 0 0 0 0
43457 - 0 0 0 0 0 0 0 0 0 0 0 0
43458 - 0 0 0 0 0 0 0 0 0 0 0 0
43459 - 0 0 0 0 0 0 0 0 0 0 0 0
43460 - 14 14 14 42 42 42 86 86 86 10 10 10
43461 - 2 2 6 2 2 6 2 2 6 2 2 6
43462 - 2 2 6 2 2 6 2 2 6 2 2 6
43463 - 2 2 6 2 2 6 2 2 6 30 30 30
43464 - 94 94 94 94 94 94 58 58 58 26 26 26
43465 - 2 2 6 6 6 6 78 78 78 54 54 54
43466 - 22 22 22 6 6 6 0 0 0 0 0 0
43467 - 0 0 0 0 0 0 0 0 0 0 0 0
43468 - 0 0 0 0 0 0 0 0 0 0 0 0
43469 - 0 0 0 0 0 0 0 0 0 0 0 0
43470 - 0 0 0 0 0 0 0 0 0 0 0 0
43471 - 0 0 0 0 0 0 0 0 0 0 0 0
43472 - 0 0 0 0 0 0 0 0 0 0 0 0
43473 - 0 0 0 0 0 0 0 0 0 0 0 0
43474 - 0 0 0 0 0 0 0 0 0 0 0 0
43475 - 0 0 0 0 0 0 0 0 0 0 0 0
43476 - 0 0 0 0 0 0 0 0 0 0 0 0
43477 - 0 0 0 0 0 0 0 0 0 0 0 0
43478 - 0 0 0 0 0 0 0 0 0 0 0 0
43479 - 0 0 0 0 0 0 0 0 0 6 6 6
43480 - 22 22 22 62 62 62 62 62 62 2 2 6
43481 - 2 2 6 2 2 6 2 2 6 2 2 6
43482 - 2 2 6 2 2 6 2 2 6 2 2 6
43483 - 2 2 6 2 2 6 2 2 6 26 26 26
43484 - 54 54 54 38 38 38 18 18 18 10 10 10
43485 - 2 2 6 2 2 6 34 34 34 82 82 82
43486 - 38 38 38 14 14 14 0 0 0 0 0 0
43487 - 0 0 0 0 0 0 0 0 0 0 0 0
43488 - 0 0 0 0 0 0 0 0 0 0 0 0
43489 - 0 0 0 0 0 0 0 0 0 0 0 0
43490 - 0 0 0 0 0 0 0 0 0 0 0 0
43491 - 0 0 0 0 0 0 0 0 0 0 0 0
43492 - 0 0 0 0 0 0 0 0 0 0 0 0
43493 - 0 0 0 0 0 0 0 0 0 0 0 0
43494 - 0 0 0 0 0 1 0 0 1 0 0 0
43495 - 0 0 0 0 0 0 0 0 0 0 0 0
43496 - 0 0 0 0 0 0 0 0 0 0 0 0
43497 - 0 0 0 0 0 0 0 0 0 0 0 0
43498 - 0 0 0 0 0 0 0 0 0 0 0 0
43499 - 0 0 0 0 0 0 0 0 0 6 6 6
43500 - 30 30 30 78 78 78 30 30 30 2 2 6
43501 - 2 2 6 2 2 6 2 2 6 2 2 6
43502 - 2 2 6 2 2 6 2 2 6 2 2 6
43503 - 2 2 6 2 2 6 2 2 6 10 10 10
43504 - 10 10 10 2 2 6 2 2 6 2 2 6
43505 - 2 2 6 2 2 6 2 2 6 78 78 78
43506 - 50 50 50 18 18 18 6 6 6 0 0 0
43507 - 0 0 0 0 0 0 0 0 0 0 0 0
43508 - 0 0 0 0 0 0 0 0 0 0 0 0
43509 - 0 0 0 0 0 0 0 0 0 0 0 0
43510 - 0 0 0 0 0 0 0 0 0 0 0 0
43511 - 0 0 0 0 0 0 0 0 0 0 0 0
43512 - 0 0 0 0 0 0 0 0 0 0 0 0
43513 - 0 0 0 0 0 0 0 0 0 0 0 0
43514 - 0 0 1 0 0 0 0 0 0 0 0 0
43515 - 0 0 0 0 0 0 0 0 0 0 0 0
43516 - 0 0 0 0 0 0 0 0 0 0 0 0
43517 - 0 0 0 0 0 0 0 0 0 0 0 0
43518 - 0 0 0 0 0 0 0 0 0 0 0 0
43519 - 0 0 0 0 0 0 0 0 0 10 10 10
43520 - 38 38 38 86 86 86 14 14 14 2 2 6
43521 - 2 2 6 2 2 6 2 2 6 2 2 6
43522 - 2 2 6 2 2 6 2 2 6 2 2 6
43523 - 2 2 6 2 2 6 2 2 6 2 2 6
43524 - 2 2 6 2 2 6 2 2 6 2 2 6
43525 - 2 2 6 2 2 6 2 2 6 54 54 54
43526 - 66 66 66 26 26 26 6 6 6 0 0 0
43527 - 0 0 0 0 0 0 0 0 0 0 0 0
43528 - 0 0 0 0 0 0 0 0 0 0 0 0
43529 - 0 0 0 0 0 0 0 0 0 0 0 0
43530 - 0 0 0 0 0 0 0 0 0 0 0 0
43531 - 0 0 0 0 0 0 0 0 0 0 0 0
43532 - 0 0 0 0 0 0 0 0 0 0 0 0
43533 - 0 0 0 0 0 0 0 0 0 0 0 0
43534 - 0 0 0 0 0 1 0 0 1 0 0 0
43535 - 0 0 0 0 0 0 0 0 0 0 0 0
43536 - 0 0 0 0 0 0 0 0 0 0 0 0
43537 - 0 0 0 0 0 0 0 0 0 0 0 0
43538 - 0 0 0 0 0 0 0 0 0 0 0 0
43539 - 0 0 0 0 0 0 0 0 0 14 14 14
43540 - 42 42 42 82 82 82 2 2 6 2 2 6
43541 - 2 2 6 6 6 6 10 10 10 2 2 6
43542 - 2 2 6 2 2 6 2 2 6 2 2 6
43543 - 2 2 6 2 2 6 2 2 6 6 6 6
43544 - 14 14 14 10 10 10 2 2 6 2 2 6
43545 - 2 2 6 2 2 6 2 2 6 18 18 18
43546 - 82 82 82 34 34 34 10 10 10 0 0 0
43547 - 0 0 0 0 0 0 0 0 0 0 0 0
43548 - 0 0 0 0 0 0 0 0 0 0 0 0
43549 - 0 0 0 0 0 0 0 0 0 0 0 0
43550 - 0 0 0 0 0 0 0 0 0 0 0 0
43551 - 0 0 0 0 0 0 0 0 0 0 0 0
43552 - 0 0 0 0 0 0 0 0 0 0 0 0
43553 - 0 0 0 0 0 0 0 0 0 0 0 0
43554 - 0 0 1 0 0 0 0 0 0 0 0 0
43555 - 0 0 0 0 0 0 0 0 0 0 0 0
43556 - 0 0 0 0 0 0 0 0 0 0 0 0
43557 - 0 0 0 0 0 0 0 0 0 0 0 0
43558 - 0 0 0 0 0 0 0 0 0 0 0 0
43559 - 0 0 0 0 0 0 0 0 0 14 14 14
43560 - 46 46 46 86 86 86 2 2 6 2 2 6
43561 - 6 6 6 6 6 6 22 22 22 34 34 34
43562 - 6 6 6 2 2 6 2 2 6 2 2 6
43563 - 2 2 6 2 2 6 18 18 18 34 34 34
43564 - 10 10 10 50 50 50 22 22 22 2 2 6
43565 - 2 2 6 2 2 6 2 2 6 10 10 10
43566 - 86 86 86 42 42 42 14 14 14 0 0 0
43567 - 0 0 0 0 0 0 0 0 0 0 0 0
43568 - 0 0 0 0 0 0 0 0 0 0 0 0
43569 - 0 0 0 0 0 0 0 0 0 0 0 0
43570 - 0 0 0 0 0 0 0 0 0 0 0 0
43571 - 0 0 0 0 0 0 0 0 0 0 0 0
43572 - 0 0 0 0 0 0 0 0 0 0 0 0
43573 - 0 0 0 0 0 0 0 0 0 0 0 0
43574 - 0 0 1 0 0 1 0 0 1 0 0 0
43575 - 0 0 0 0 0 0 0 0 0 0 0 0
43576 - 0 0 0 0 0 0 0 0 0 0 0 0
43577 - 0 0 0 0 0 0 0 0 0 0 0 0
43578 - 0 0 0 0 0 0 0 0 0 0 0 0
43579 - 0 0 0 0 0 0 0 0 0 14 14 14
43580 - 46 46 46 86 86 86 2 2 6 2 2 6
43581 - 38 38 38 116 116 116 94 94 94 22 22 22
43582 - 22 22 22 2 2 6 2 2 6 2 2 6
43583 - 14 14 14 86 86 86 138 138 138 162 162 162
43584 -154 154 154 38 38 38 26 26 26 6 6 6
43585 - 2 2 6 2 2 6 2 2 6 2 2 6
43586 - 86 86 86 46 46 46 14 14 14 0 0 0
43587 - 0 0 0 0 0 0 0 0 0 0 0 0
43588 - 0 0 0 0 0 0 0 0 0 0 0 0
43589 - 0 0 0 0 0 0 0 0 0 0 0 0
43590 - 0 0 0 0 0 0 0 0 0 0 0 0
43591 - 0 0 0 0 0 0 0 0 0 0 0 0
43592 - 0 0 0 0 0 0 0 0 0 0 0 0
43593 - 0 0 0 0 0 0 0 0 0 0 0 0
43594 - 0 0 0 0 0 0 0 0 0 0 0 0
43595 - 0 0 0 0 0 0 0 0 0 0 0 0
43596 - 0 0 0 0 0 0 0 0 0 0 0 0
43597 - 0 0 0 0 0 0 0 0 0 0 0 0
43598 - 0 0 0 0 0 0 0 0 0 0 0 0
43599 - 0 0 0 0 0 0 0 0 0 14 14 14
43600 - 46 46 46 86 86 86 2 2 6 14 14 14
43601 -134 134 134 198 198 198 195 195 195 116 116 116
43602 - 10 10 10 2 2 6 2 2 6 6 6 6
43603 -101 98 89 187 187 187 210 210 210 218 218 218
43604 -214 214 214 134 134 134 14 14 14 6 6 6
43605 - 2 2 6 2 2 6 2 2 6 2 2 6
43606 - 86 86 86 50 50 50 18 18 18 6 6 6
43607 - 0 0 0 0 0 0 0 0 0 0 0 0
43608 - 0 0 0 0 0 0 0 0 0 0 0 0
43609 - 0 0 0 0 0 0 0 0 0 0 0 0
43610 - 0 0 0 0 0 0 0 0 0 0 0 0
43611 - 0 0 0 0 0 0 0 0 0 0 0 0
43612 - 0 0 0 0 0 0 0 0 0 0 0 0
43613 - 0 0 0 0 0 0 0 0 1 0 0 0
43614 - 0 0 1 0 0 1 0 0 1 0 0 0
43615 - 0 0 0 0 0 0 0 0 0 0 0 0
43616 - 0 0 0 0 0 0 0 0 0 0 0 0
43617 - 0 0 0 0 0 0 0 0 0 0 0 0
43618 - 0 0 0 0 0 0 0 0 0 0 0 0
43619 - 0 0 0 0 0 0 0 0 0 14 14 14
43620 - 46 46 46 86 86 86 2 2 6 54 54 54
43621 -218 218 218 195 195 195 226 226 226 246 246 246
43622 - 58 58 58 2 2 6 2 2 6 30 30 30
43623 -210 210 210 253 253 253 174 174 174 123 123 123
43624 -221 221 221 234 234 234 74 74 74 2 2 6
43625 - 2 2 6 2 2 6 2 2 6 2 2 6
43626 - 70 70 70 58 58 58 22 22 22 6 6 6
43627 - 0 0 0 0 0 0 0 0 0 0 0 0
43628 - 0 0 0 0 0 0 0 0 0 0 0 0
43629 - 0 0 0 0 0 0 0 0 0 0 0 0
43630 - 0 0 0 0 0 0 0 0 0 0 0 0
43631 - 0 0 0 0 0 0 0 0 0 0 0 0
43632 - 0 0 0 0 0 0 0 0 0 0 0 0
43633 - 0 0 0 0 0 0 0 0 0 0 0 0
43634 - 0 0 0 0 0 0 0 0 0 0 0 0
43635 - 0 0 0 0 0 0 0 0 0 0 0 0
43636 - 0 0 0 0 0 0 0 0 0 0 0 0
43637 - 0 0 0 0 0 0 0 0 0 0 0 0
43638 - 0 0 0 0 0 0 0 0 0 0 0 0
43639 - 0 0 0 0 0 0 0 0 0 14 14 14
43640 - 46 46 46 82 82 82 2 2 6 106 106 106
43641 -170 170 170 26 26 26 86 86 86 226 226 226
43642 -123 123 123 10 10 10 14 14 14 46 46 46
43643 -231 231 231 190 190 190 6 6 6 70 70 70
43644 - 90 90 90 238 238 238 158 158 158 2 2 6
43645 - 2 2 6 2 2 6 2 2 6 2 2 6
43646 - 70 70 70 58 58 58 22 22 22 6 6 6
43647 - 0 0 0 0 0 0 0 0 0 0 0 0
43648 - 0 0 0 0 0 0 0 0 0 0 0 0
43649 - 0 0 0 0 0 0 0 0 0 0 0 0
43650 - 0 0 0 0 0 0 0 0 0 0 0 0
43651 - 0 0 0 0 0 0 0 0 0 0 0 0
43652 - 0 0 0 0 0 0 0 0 0 0 0 0
43653 - 0 0 0 0 0 0 0 0 1 0 0 0
43654 - 0 0 1 0 0 1 0 0 1 0 0 0
43655 - 0 0 0 0 0 0 0 0 0 0 0 0
43656 - 0 0 0 0 0 0 0 0 0 0 0 0
43657 - 0 0 0 0 0 0 0 0 0 0 0 0
43658 - 0 0 0 0 0 0 0 0 0 0 0 0
43659 - 0 0 0 0 0 0 0 0 0 14 14 14
43660 - 42 42 42 86 86 86 6 6 6 116 116 116
43661 -106 106 106 6 6 6 70 70 70 149 149 149
43662 -128 128 128 18 18 18 38 38 38 54 54 54
43663 -221 221 221 106 106 106 2 2 6 14 14 14
43664 - 46 46 46 190 190 190 198 198 198 2 2 6
43665 - 2 2 6 2 2 6 2 2 6 2 2 6
43666 - 74 74 74 62 62 62 22 22 22 6 6 6
43667 - 0 0 0 0 0 0 0 0 0 0 0 0
43668 - 0 0 0 0 0 0 0 0 0 0 0 0
43669 - 0 0 0 0 0 0 0 0 0 0 0 0
43670 - 0 0 0 0 0 0 0 0 0 0 0 0
43671 - 0 0 0 0 0 0 0 0 0 0 0 0
43672 - 0 0 0 0 0 0 0 0 0 0 0 0
43673 - 0 0 0 0 0 0 0 0 1 0 0 0
43674 - 0 0 1 0 0 0 0 0 1 0 0 0
43675 - 0 0 0 0 0 0 0 0 0 0 0 0
43676 - 0 0 0 0 0 0 0 0 0 0 0 0
43677 - 0 0 0 0 0 0 0 0 0 0 0 0
43678 - 0 0 0 0 0 0 0 0 0 0 0 0
43679 - 0 0 0 0 0 0 0 0 0 14 14 14
43680 - 42 42 42 94 94 94 14 14 14 101 101 101
43681 -128 128 128 2 2 6 18 18 18 116 116 116
43682 -118 98 46 121 92 8 121 92 8 98 78 10
43683 -162 162 162 106 106 106 2 2 6 2 2 6
43684 - 2 2 6 195 195 195 195 195 195 6 6 6
43685 - 2 2 6 2 2 6 2 2 6 2 2 6
43686 - 74 74 74 62 62 62 22 22 22 6 6 6
43687 - 0 0 0 0 0 0 0 0 0 0 0 0
43688 - 0 0 0 0 0 0 0 0 0 0 0 0
43689 - 0 0 0 0 0 0 0 0 0 0 0 0
43690 - 0 0 0 0 0 0 0 0 0 0 0 0
43691 - 0 0 0 0 0 0 0 0 0 0 0 0
43692 - 0 0 0 0 0 0 0 0 0 0 0 0
43693 - 0 0 0 0 0 0 0 0 1 0 0 1
43694 - 0 0 1 0 0 0 0 0 1 0 0 0
43695 - 0 0 0 0 0 0 0 0 0 0 0 0
43696 - 0 0 0 0 0 0 0 0 0 0 0 0
43697 - 0 0 0 0 0 0 0 0 0 0 0 0
43698 - 0 0 0 0 0 0 0 0 0 0 0 0
43699 - 0 0 0 0 0 0 0 0 0 10 10 10
43700 - 38 38 38 90 90 90 14 14 14 58 58 58
43701 -210 210 210 26 26 26 54 38 6 154 114 10
43702 -226 170 11 236 186 11 225 175 15 184 144 12
43703 -215 174 15 175 146 61 37 26 9 2 2 6
43704 - 70 70 70 246 246 246 138 138 138 2 2 6
43705 - 2 2 6 2 2 6 2 2 6 2 2 6
43706 - 70 70 70 66 66 66 26 26 26 6 6 6
43707 - 0 0 0 0 0 0 0 0 0 0 0 0
43708 - 0 0 0 0 0 0 0 0 0 0 0 0
43709 - 0 0 0 0 0 0 0 0 0 0 0 0
43710 - 0 0 0 0 0 0 0 0 0 0 0 0
43711 - 0 0 0 0 0 0 0 0 0 0 0 0
43712 - 0 0 0 0 0 0 0 0 0 0 0 0
43713 - 0 0 0 0 0 0 0 0 0 0 0 0
43714 - 0 0 0 0 0 0 0 0 0 0 0 0
43715 - 0 0 0 0 0 0 0 0 0 0 0 0
43716 - 0 0 0 0 0 0 0 0 0 0 0 0
43717 - 0 0 0 0 0 0 0 0 0 0 0 0
43718 - 0 0 0 0 0 0 0 0 0 0 0 0
43719 - 0 0 0 0 0 0 0 0 0 10 10 10
43720 - 38 38 38 86 86 86 14 14 14 10 10 10
43721 -195 195 195 188 164 115 192 133 9 225 175 15
43722 -239 182 13 234 190 10 232 195 16 232 200 30
43723 -245 207 45 241 208 19 232 195 16 184 144 12
43724 -218 194 134 211 206 186 42 42 42 2 2 6
43725 - 2 2 6 2 2 6 2 2 6 2 2 6
43726 - 50 50 50 74 74 74 30 30 30 6 6 6
43727 - 0 0 0 0 0 0 0 0 0 0 0 0
43728 - 0 0 0 0 0 0 0 0 0 0 0 0
43729 - 0 0 0 0 0 0 0 0 0 0 0 0
43730 - 0 0 0 0 0 0 0 0 0 0 0 0
43731 - 0 0 0 0 0 0 0 0 0 0 0 0
43732 - 0 0 0 0 0 0 0 0 0 0 0 0
43733 - 0 0 0 0 0 0 0 0 0 0 0 0
43734 - 0 0 0 0 0 0 0 0 0 0 0 0
43735 - 0 0 0 0 0 0 0 0 0 0 0 0
43736 - 0 0 0 0 0 0 0 0 0 0 0 0
43737 - 0 0 0 0 0 0 0 0 0 0 0 0
43738 - 0 0 0 0 0 0 0 0 0 0 0 0
43739 - 0 0 0 0 0 0 0 0 0 10 10 10
43740 - 34 34 34 86 86 86 14 14 14 2 2 6
43741 -121 87 25 192 133 9 219 162 10 239 182 13
43742 -236 186 11 232 195 16 241 208 19 244 214 54
43743 -246 218 60 246 218 38 246 215 20 241 208 19
43744 -241 208 19 226 184 13 121 87 25 2 2 6
43745 - 2 2 6 2 2 6 2 2 6 2 2 6
43746 - 50 50 50 82 82 82 34 34 34 10 10 10
43747 - 0 0 0 0 0 0 0 0 0 0 0 0
43748 - 0 0 0 0 0 0 0 0 0 0 0 0
43749 - 0 0 0 0 0 0 0 0 0 0 0 0
43750 - 0 0 0 0 0 0 0 0 0 0 0 0
43751 - 0 0 0 0 0 0 0 0 0 0 0 0
43752 - 0 0 0 0 0 0 0 0 0 0 0 0
43753 - 0 0 0 0 0 0 0 0 0 0 0 0
43754 - 0 0 0 0 0 0 0 0 0 0 0 0
43755 - 0 0 0 0 0 0 0 0 0 0 0 0
43756 - 0 0 0 0 0 0 0 0 0 0 0 0
43757 - 0 0 0 0 0 0 0 0 0 0 0 0
43758 - 0 0 0 0 0 0 0 0 0 0 0 0
43759 - 0 0 0 0 0 0 0 0 0 10 10 10
43760 - 34 34 34 82 82 82 30 30 30 61 42 6
43761 -180 123 7 206 145 10 230 174 11 239 182 13
43762 -234 190 10 238 202 15 241 208 19 246 218 74
43763 -246 218 38 246 215 20 246 215 20 246 215 20
43764 -226 184 13 215 174 15 184 144 12 6 6 6
43765 - 2 2 6 2 2 6 2 2 6 2 2 6
43766 - 26 26 26 94 94 94 42 42 42 14 14 14
43767 - 0 0 0 0 0 0 0 0 0 0 0 0
43768 - 0 0 0 0 0 0 0 0 0 0 0 0
43769 - 0 0 0 0 0 0 0 0 0 0 0 0
43770 - 0 0 0 0 0 0 0 0 0 0 0 0
43771 - 0 0 0 0 0 0 0 0 0 0 0 0
43772 - 0 0 0 0 0 0 0 0 0 0 0 0
43773 - 0 0 0 0 0 0 0 0 0 0 0 0
43774 - 0 0 0 0 0 0 0 0 0 0 0 0
43775 - 0 0 0 0 0 0 0 0 0 0 0 0
43776 - 0 0 0 0 0 0 0 0 0 0 0 0
43777 - 0 0 0 0 0 0 0 0 0 0 0 0
43778 - 0 0 0 0 0 0 0 0 0 0 0 0
43779 - 0 0 0 0 0 0 0 0 0 10 10 10
43780 - 30 30 30 78 78 78 50 50 50 104 69 6
43781 -192 133 9 216 158 10 236 178 12 236 186 11
43782 -232 195 16 241 208 19 244 214 54 245 215 43
43783 -246 215 20 246 215 20 241 208 19 198 155 10
43784 -200 144 11 216 158 10 156 118 10 2 2 6
43785 - 2 2 6 2 2 6 2 2 6 2 2 6
43786 - 6 6 6 90 90 90 54 54 54 18 18 18
43787 - 6 6 6 0 0 0 0 0 0 0 0 0
43788 - 0 0 0 0 0 0 0 0 0 0 0 0
43789 - 0 0 0 0 0 0 0 0 0 0 0 0
43790 - 0 0 0 0 0 0 0 0 0 0 0 0
43791 - 0 0 0 0 0 0 0 0 0 0 0 0
43792 - 0 0 0 0 0 0 0 0 0 0 0 0
43793 - 0 0 0 0 0 0 0 0 0 0 0 0
43794 - 0 0 0 0 0 0 0 0 0 0 0 0
43795 - 0 0 0 0 0 0 0 0 0 0 0 0
43796 - 0 0 0 0 0 0 0 0 0 0 0 0
43797 - 0 0 0 0 0 0 0 0 0 0 0 0
43798 - 0 0 0 0 0 0 0 0 0 0 0 0
43799 - 0 0 0 0 0 0 0 0 0 10 10 10
43800 - 30 30 30 78 78 78 46 46 46 22 22 22
43801 -137 92 6 210 162 10 239 182 13 238 190 10
43802 -238 202 15 241 208 19 246 215 20 246 215 20
43803 -241 208 19 203 166 17 185 133 11 210 150 10
43804 -216 158 10 210 150 10 102 78 10 2 2 6
43805 - 6 6 6 54 54 54 14 14 14 2 2 6
43806 - 2 2 6 62 62 62 74 74 74 30 30 30
43807 - 10 10 10 0 0 0 0 0 0 0 0 0
43808 - 0 0 0 0 0 0 0 0 0 0 0 0
43809 - 0 0 0 0 0 0 0 0 0 0 0 0
43810 - 0 0 0 0 0 0 0 0 0 0 0 0
43811 - 0 0 0 0 0 0 0 0 0 0 0 0
43812 - 0 0 0 0 0 0 0 0 0 0 0 0
43813 - 0 0 0 0 0 0 0 0 0 0 0 0
43814 - 0 0 0 0 0 0 0 0 0 0 0 0
43815 - 0 0 0 0 0 0 0 0 0 0 0 0
43816 - 0 0 0 0 0 0 0 0 0 0 0 0
43817 - 0 0 0 0 0 0 0 0 0 0 0 0
43818 - 0 0 0 0 0 0 0 0 0 0 0 0
43819 - 0 0 0 0 0 0 0 0 0 10 10 10
43820 - 34 34 34 78 78 78 50 50 50 6 6 6
43821 - 94 70 30 139 102 15 190 146 13 226 184 13
43822 -232 200 30 232 195 16 215 174 15 190 146 13
43823 -168 122 10 192 133 9 210 150 10 213 154 11
43824 -202 150 34 182 157 106 101 98 89 2 2 6
43825 - 2 2 6 78 78 78 116 116 116 58 58 58
43826 - 2 2 6 22 22 22 90 90 90 46 46 46
43827 - 18 18 18 6 6 6 0 0 0 0 0 0
43828 - 0 0 0 0 0 0 0 0 0 0 0 0
43829 - 0 0 0 0 0 0 0 0 0 0 0 0
43830 - 0 0 0 0 0 0 0 0 0 0 0 0
43831 - 0 0 0 0 0 0 0 0 0 0 0 0
43832 - 0 0 0 0 0 0 0 0 0 0 0 0
43833 - 0 0 0 0 0 0 0 0 0 0 0 0
43834 - 0 0 0 0 0 0 0 0 0 0 0 0
43835 - 0 0 0 0 0 0 0 0 0 0 0 0
43836 - 0 0 0 0 0 0 0 0 0 0 0 0
43837 - 0 0 0 0 0 0 0 0 0 0 0 0
43838 - 0 0 0 0 0 0 0 0 0 0 0 0
43839 - 0 0 0 0 0 0 0 0 0 10 10 10
43840 - 38 38 38 86 86 86 50 50 50 6 6 6
43841 -128 128 128 174 154 114 156 107 11 168 122 10
43842 -198 155 10 184 144 12 197 138 11 200 144 11
43843 -206 145 10 206 145 10 197 138 11 188 164 115
43844 -195 195 195 198 198 198 174 174 174 14 14 14
43845 - 2 2 6 22 22 22 116 116 116 116 116 116
43846 - 22 22 22 2 2 6 74 74 74 70 70 70
43847 - 30 30 30 10 10 10 0 0 0 0 0 0
43848 - 0 0 0 0 0 0 0 0 0 0 0 0
43849 - 0 0 0 0 0 0 0 0 0 0 0 0
43850 - 0 0 0 0 0 0 0 0 0 0 0 0
43851 - 0 0 0 0 0 0 0 0 0 0 0 0
43852 - 0 0 0 0 0 0 0 0 0 0 0 0
43853 - 0 0 0 0 0 0 0 0 0 0 0 0
43854 - 0 0 0 0 0 0 0 0 0 0 0 0
43855 - 0 0 0 0 0 0 0 0 0 0 0 0
43856 - 0 0 0 0 0 0 0 0 0 0 0 0
43857 - 0 0 0 0 0 0 0 0 0 0 0 0
43858 - 0 0 0 0 0 0 0 0 0 0 0 0
43859 - 0 0 0 0 0 0 6 6 6 18 18 18
43860 - 50 50 50 101 101 101 26 26 26 10 10 10
43861 -138 138 138 190 190 190 174 154 114 156 107 11
43862 -197 138 11 200 144 11 197 138 11 192 133 9
43863 -180 123 7 190 142 34 190 178 144 187 187 187
43864 -202 202 202 221 221 221 214 214 214 66 66 66
43865 - 2 2 6 2 2 6 50 50 50 62 62 62
43866 - 6 6 6 2 2 6 10 10 10 90 90 90
43867 - 50 50 50 18 18 18 6 6 6 0 0 0
43868 - 0 0 0 0 0 0 0 0 0 0 0 0
43869 - 0 0 0 0 0 0 0 0 0 0 0 0
43870 - 0 0 0 0 0 0 0 0 0 0 0 0
43871 - 0 0 0 0 0 0 0 0 0 0 0 0
43872 - 0 0 0 0 0 0 0 0 0 0 0 0
43873 - 0 0 0 0 0 0 0 0 0 0 0 0
43874 - 0 0 0 0 0 0 0 0 0 0 0 0
43875 - 0 0 0 0 0 0 0 0 0 0 0 0
43876 - 0 0 0 0 0 0 0 0 0 0 0 0
43877 - 0 0 0 0 0 0 0 0 0 0 0 0
43878 - 0 0 0 0 0 0 0 0 0 0 0 0
43879 - 0 0 0 0 0 0 10 10 10 34 34 34
43880 - 74 74 74 74 74 74 2 2 6 6 6 6
43881 -144 144 144 198 198 198 190 190 190 178 166 146
43882 -154 121 60 156 107 11 156 107 11 168 124 44
43883 -174 154 114 187 187 187 190 190 190 210 210 210
43884 -246 246 246 253 253 253 253 253 253 182 182 182
43885 - 6 6 6 2 2 6 2 2 6 2 2 6
43886 - 2 2 6 2 2 6 2 2 6 62 62 62
43887 - 74 74 74 34 34 34 14 14 14 0 0 0
43888 - 0 0 0 0 0 0 0 0 0 0 0 0
43889 - 0 0 0 0 0 0 0 0 0 0 0 0
43890 - 0 0 0 0 0 0 0 0 0 0 0 0
43891 - 0 0 0 0 0 0 0 0 0 0 0 0
43892 - 0 0 0 0 0 0 0 0 0 0 0 0
43893 - 0 0 0 0 0 0 0 0 0 0 0 0
43894 - 0 0 0 0 0 0 0 0 0 0 0 0
43895 - 0 0 0 0 0 0 0 0 0 0 0 0
43896 - 0 0 0 0 0 0 0 0 0 0 0 0
43897 - 0 0 0 0 0 0 0 0 0 0 0 0
43898 - 0 0 0 0 0 0 0 0 0 0 0 0
43899 - 0 0 0 10 10 10 22 22 22 54 54 54
43900 - 94 94 94 18 18 18 2 2 6 46 46 46
43901 -234 234 234 221 221 221 190 190 190 190 190 190
43902 -190 190 190 187 187 187 187 187 187 190 190 190
43903 -190 190 190 195 195 195 214 214 214 242 242 242
43904 -253 253 253 253 253 253 253 253 253 253 253 253
43905 - 82 82 82 2 2 6 2 2 6 2 2 6
43906 - 2 2 6 2 2 6 2 2 6 14 14 14
43907 - 86 86 86 54 54 54 22 22 22 6 6 6
43908 - 0 0 0 0 0 0 0 0 0 0 0 0
43909 - 0 0 0 0 0 0 0 0 0 0 0 0
43910 - 0 0 0 0 0 0 0 0 0 0 0 0
43911 - 0 0 0 0 0 0 0 0 0 0 0 0
43912 - 0 0 0 0 0 0 0 0 0 0 0 0
43913 - 0 0 0 0 0 0 0 0 0 0 0 0
43914 - 0 0 0 0 0 0 0 0 0 0 0 0
43915 - 0 0 0 0 0 0 0 0 0 0 0 0
43916 - 0 0 0 0 0 0 0 0 0 0 0 0
43917 - 0 0 0 0 0 0 0 0 0 0 0 0
43918 - 0 0 0 0 0 0 0 0 0 0 0 0
43919 - 6 6 6 18 18 18 46 46 46 90 90 90
43920 - 46 46 46 18 18 18 6 6 6 182 182 182
43921 -253 253 253 246 246 246 206 206 206 190 190 190
43922 -190 190 190 190 190 190 190 190 190 190 190 190
43923 -206 206 206 231 231 231 250 250 250 253 253 253
43924 -253 253 253 253 253 253 253 253 253 253 253 253
43925 -202 202 202 14 14 14 2 2 6 2 2 6
43926 - 2 2 6 2 2 6 2 2 6 2 2 6
43927 - 42 42 42 86 86 86 42 42 42 18 18 18
43928 - 6 6 6 0 0 0 0 0 0 0 0 0
43929 - 0 0 0 0 0 0 0 0 0 0 0 0
43930 - 0 0 0 0 0 0 0 0 0 0 0 0
43931 - 0 0 0 0 0 0 0 0 0 0 0 0
43932 - 0 0 0 0 0 0 0 0 0 0 0 0
43933 - 0 0 0 0 0 0 0 0 0 0 0 0
43934 - 0 0 0 0 0 0 0 0 0 0 0 0
43935 - 0 0 0 0 0 0 0 0 0 0 0 0
43936 - 0 0 0 0 0 0 0 0 0 0 0 0
43937 - 0 0 0 0 0 0 0 0 0 0 0 0
43938 - 0 0 0 0 0 0 0 0 0 6 6 6
43939 - 14 14 14 38 38 38 74 74 74 66 66 66
43940 - 2 2 6 6 6 6 90 90 90 250 250 250
43941 -253 253 253 253 253 253 238 238 238 198 198 198
43942 -190 190 190 190 190 190 195 195 195 221 221 221
43943 -246 246 246 253 253 253 253 253 253 253 253 253
43944 -253 253 253 253 253 253 253 253 253 253 253 253
43945 -253 253 253 82 82 82 2 2 6 2 2 6
43946 - 2 2 6 2 2 6 2 2 6 2 2 6
43947 - 2 2 6 78 78 78 70 70 70 34 34 34
43948 - 14 14 14 6 6 6 0 0 0 0 0 0
43949 - 0 0 0 0 0 0 0 0 0 0 0 0
43950 - 0 0 0 0 0 0 0 0 0 0 0 0
43951 - 0 0 0 0 0 0 0 0 0 0 0 0
43952 - 0 0 0 0 0 0 0 0 0 0 0 0
43953 - 0 0 0 0 0 0 0 0 0 0 0 0
43954 - 0 0 0 0 0 0 0 0 0 0 0 0
43955 - 0 0 0 0 0 0 0 0 0 0 0 0
43956 - 0 0 0 0 0 0 0 0 0 0 0 0
43957 - 0 0 0 0 0 0 0 0 0 0 0 0
43958 - 0 0 0 0 0 0 0 0 0 14 14 14
43959 - 34 34 34 66 66 66 78 78 78 6 6 6
43960 - 2 2 6 18 18 18 218 218 218 253 253 253
43961 -253 253 253 253 253 253 253 253 253 246 246 246
43962 -226 226 226 231 231 231 246 246 246 253 253 253
43963 -253 253 253 253 253 253 253 253 253 253 253 253
43964 -253 253 253 253 253 253 253 253 253 253 253 253
43965 -253 253 253 178 178 178 2 2 6 2 2 6
43966 - 2 2 6 2 2 6 2 2 6 2 2 6
43967 - 2 2 6 18 18 18 90 90 90 62 62 62
43968 - 30 30 30 10 10 10 0 0 0 0 0 0
43969 - 0 0 0 0 0 0 0 0 0 0 0 0
43970 - 0 0 0 0 0 0 0 0 0 0 0 0
43971 - 0 0 0 0 0 0 0 0 0 0 0 0
43972 - 0 0 0 0 0 0 0 0 0 0 0 0
43973 - 0 0 0 0 0 0 0 0 0 0 0 0
43974 - 0 0 0 0 0 0 0 0 0 0 0 0
43975 - 0 0 0 0 0 0 0 0 0 0 0 0
43976 - 0 0 0 0 0 0 0 0 0 0 0 0
43977 - 0 0 0 0 0 0 0 0 0 0 0 0
43978 - 0 0 0 0 0 0 10 10 10 26 26 26
43979 - 58 58 58 90 90 90 18 18 18 2 2 6
43980 - 2 2 6 110 110 110 253 253 253 253 253 253
43981 -253 253 253 253 253 253 253 253 253 253 253 253
43982 -250 250 250 253 253 253 253 253 253 253 253 253
43983 -253 253 253 253 253 253 253 253 253 253 253 253
43984 -253 253 253 253 253 253 253 253 253 253 253 253
43985 -253 253 253 231 231 231 18 18 18 2 2 6
43986 - 2 2 6 2 2 6 2 2 6 2 2 6
43987 - 2 2 6 2 2 6 18 18 18 94 94 94
43988 - 54 54 54 26 26 26 10 10 10 0 0 0
43989 - 0 0 0 0 0 0 0 0 0 0 0 0
43990 - 0 0 0 0 0 0 0 0 0 0 0 0
43991 - 0 0 0 0 0 0 0 0 0 0 0 0
43992 - 0 0 0 0 0 0 0 0 0 0 0 0
43993 - 0 0 0 0 0 0 0 0 0 0 0 0
43994 - 0 0 0 0 0 0 0 0 0 0 0 0
43995 - 0 0 0 0 0 0 0 0 0 0 0 0
43996 - 0 0 0 0 0 0 0 0 0 0 0 0
43997 - 0 0 0 0 0 0 0 0 0 0 0 0
43998 - 0 0 0 6 6 6 22 22 22 50 50 50
43999 - 90 90 90 26 26 26 2 2 6 2 2 6
44000 - 14 14 14 195 195 195 250 250 250 253 253 253
44001 -253 253 253 253 253 253 253 253 253 253 253 253
44002 -253 253 253 253 253 253 253 253 253 253 253 253
44003 -253 253 253 253 253 253 253 253 253 253 253 253
44004 -253 253 253 253 253 253 253 253 253 253 253 253
44005 -250 250 250 242 242 242 54 54 54 2 2 6
44006 - 2 2 6 2 2 6 2 2 6 2 2 6
44007 - 2 2 6 2 2 6 2 2 6 38 38 38
44008 - 86 86 86 50 50 50 22 22 22 6 6 6
44009 - 0 0 0 0 0 0 0 0 0 0 0 0
44010 - 0 0 0 0 0 0 0 0 0 0 0 0
44011 - 0 0 0 0 0 0 0 0 0 0 0 0
44012 - 0 0 0 0 0 0 0 0 0 0 0 0
44013 - 0 0 0 0 0 0 0 0 0 0 0 0
44014 - 0 0 0 0 0 0 0 0 0 0 0 0
44015 - 0 0 0 0 0 0 0 0 0 0 0 0
44016 - 0 0 0 0 0 0 0 0 0 0 0 0
44017 - 0 0 0 0 0 0 0 0 0 0 0 0
44018 - 6 6 6 14 14 14 38 38 38 82 82 82
44019 - 34 34 34 2 2 6 2 2 6 2 2 6
44020 - 42 42 42 195 195 195 246 246 246 253 253 253
44021 -253 253 253 253 253 253 253 253 253 250 250 250
44022 -242 242 242 242 242 242 250 250 250 253 253 253
44023 -253 253 253 253 253 253 253 253 253 253 253 253
44024 -253 253 253 250 250 250 246 246 246 238 238 238
44025 -226 226 226 231 231 231 101 101 101 6 6 6
44026 - 2 2 6 2 2 6 2 2 6 2 2 6
44027 - 2 2 6 2 2 6 2 2 6 2 2 6
44028 - 38 38 38 82 82 82 42 42 42 14 14 14
44029 - 6 6 6 0 0 0 0 0 0 0 0 0
44030 - 0 0 0 0 0 0 0 0 0 0 0 0
44031 - 0 0 0 0 0 0 0 0 0 0 0 0
44032 - 0 0 0 0 0 0 0 0 0 0 0 0
44033 - 0 0 0 0 0 0 0 0 0 0 0 0
44034 - 0 0 0 0 0 0 0 0 0 0 0 0
44035 - 0 0 0 0 0 0 0 0 0 0 0 0
44036 - 0 0 0 0 0 0 0 0 0 0 0 0
44037 - 0 0 0 0 0 0 0 0 0 0 0 0
44038 - 10 10 10 26 26 26 62 62 62 66 66 66
44039 - 2 2 6 2 2 6 2 2 6 6 6 6
44040 - 70 70 70 170 170 170 206 206 206 234 234 234
44041 -246 246 246 250 250 250 250 250 250 238 238 238
44042 -226 226 226 231 231 231 238 238 238 250 250 250
44043 -250 250 250 250 250 250 246 246 246 231 231 231
44044 -214 214 214 206 206 206 202 202 202 202 202 202
44045 -198 198 198 202 202 202 182 182 182 18 18 18
44046 - 2 2 6 2 2 6 2 2 6 2 2 6
44047 - 2 2 6 2 2 6 2 2 6 2 2 6
44048 - 2 2 6 62 62 62 66 66 66 30 30 30
44049 - 10 10 10 0 0 0 0 0 0 0 0 0
44050 - 0 0 0 0 0 0 0 0 0 0 0 0
44051 - 0 0 0 0 0 0 0 0 0 0 0 0
44052 - 0 0 0 0 0 0 0 0 0 0 0 0
44053 - 0 0 0 0 0 0 0 0 0 0 0 0
44054 - 0 0 0 0 0 0 0 0 0 0 0 0
44055 - 0 0 0 0 0 0 0 0 0 0 0 0
44056 - 0 0 0 0 0 0 0 0 0 0 0 0
44057 - 0 0 0 0 0 0 0 0 0 0 0 0
44058 - 14 14 14 42 42 42 82 82 82 18 18 18
44059 - 2 2 6 2 2 6 2 2 6 10 10 10
44060 - 94 94 94 182 182 182 218 218 218 242 242 242
44061 -250 250 250 253 253 253 253 253 253 250 250 250
44062 -234 234 234 253 253 253 253 253 253 253 253 253
44063 -253 253 253 253 253 253 253 253 253 246 246 246
44064 -238 238 238 226 226 226 210 210 210 202 202 202
44065 -195 195 195 195 195 195 210 210 210 158 158 158
44066 - 6 6 6 14 14 14 50 50 50 14 14 14
44067 - 2 2 6 2 2 6 2 2 6 2 2 6
44068 - 2 2 6 6 6 6 86 86 86 46 46 46
44069 - 18 18 18 6 6 6 0 0 0 0 0 0
44070 - 0 0 0 0 0 0 0 0 0 0 0 0
44071 - 0 0 0 0 0 0 0 0 0 0 0 0
44072 - 0 0 0 0 0 0 0 0 0 0 0 0
44073 - 0 0 0 0 0 0 0 0 0 0 0 0
44074 - 0 0 0 0 0 0 0 0 0 0 0 0
44075 - 0 0 0 0 0 0 0 0 0 0 0 0
44076 - 0 0 0 0 0 0 0 0 0 0 0 0
44077 - 0 0 0 0 0 0 0 0 0 6 6 6
44078 - 22 22 22 54 54 54 70 70 70 2 2 6
44079 - 2 2 6 10 10 10 2 2 6 22 22 22
44080 -166 166 166 231 231 231 250 250 250 253 253 253
44081 -253 253 253 253 253 253 253 253 253 250 250 250
44082 -242 242 242 253 253 253 253 253 253 253 253 253
44083 -253 253 253 253 253 253 253 253 253 253 253 253
44084 -253 253 253 253 253 253 253 253 253 246 246 246
44085 -231 231 231 206 206 206 198 198 198 226 226 226
44086 - 94 94 94 2 2 6 6 6 6 38 38 38
44087 - 30 30 30 2 2 6 2 2 6 2 2 6
44088 - 2 2 6 2 2 6 62 62 62 66 66 66
44089 - 26 26 26 10 10 10 0 0 0 0 0 0
44090 - 0 0 0 0 0 0 0 0 0 0 0 0
44091 - 0 0 0 0 0 0 0 0 0 0 0 0
44092 - 0 0 0 0 0 0 0 0 0 0 0 0
44093 - 0 0 0 0 0 0 0 0 0 0 0 0
44094 - 0 0 0 0 0 0 0 0 0 0 0 0
44095 - 0 0 0 0 0 0 0 0 0 0 0 0
44096 - 0 0 0 0 0 0 0 0 0 0 0 0
44097 - 0 0 0 0 0 0 0 0 0 10 10 10
44098 - 30 30 30 74 74 74 50 50 50 2 2 6
44099 - 26 26 26 26 26 26 2 2 6 106 106 106
44100 -238 238 238 253 253 253 253 253 253 253 253 253
44101 -253 253 253 253 253 253 253 253 253 253 253 253
44102 -253 253 253 253 253 253 253 253 253 253 253 253
44103 -253 253 253 253 253 253 253 253 253 253 253 253
44104 -253 253 253 253 253 253 253 253 253 253 253 253
44105 -253 253 253 246 246 246 218 218 218 202 202 202
44106 -210 210 210 14 14 14 2 2 6 2 2 6
44107 - 30 30 30 22 22 22 2 2 6 2 2 6
44108 - 2 2 6 2 2 6 18 18 18 86 86 86
44109 - 42 42 42 14 14 14 0 0 0 0 0 0
44110 - 0 0 0 0 0 0 0 0 0 0 0 0
44111 - 0 0 0 0 0 0 0 0 0 0 0 0
44112 - 0 0 0 0 0 0 0 0 0 0 0 0
44113 - 0 0 0 0 0 0 0 0 0 0 0 0
44114 - 0 0 0 0 0 0 0 0 0 0 0 0
44115 - 0 0 0 0 0 0 0 0 0 0 0 0
44116 - 0 0 0 0 0 0 0 0 0 0 0 0
44117 - 0 0 0 0 0 0 0 0 0 14 14 14
44118 - 42 42 42 90 90 90 22 22 22 2 2 6
44119 - 42 42 42 2 2 6 18 18 18 218 218 218
44120 -253 253 253 253 253 253 253 253 253 253 253 253
44121 -253 253 253 253 253 253 253 253 253 253 253 253
44122 -253 253 253 253 253 253 253 253 253 253 253 253
44123 -253 253 253 253 253 253 253 253 253 253 253 253
44124 -253 253 253 253 253 253 253 253 253 253 253 253
44125 -253 253 253 253 253 253 250 250 250 221 221 221
44126 -218 218 218 101 101 101 2 2 6 14 14 14
44127 - 18 18 18 38 38 38 10 10 10 2 2 6
44128 - 2 2 6 2 2 6 2 2 6 78 78 78
44129 - 58 58 58 22 22 22 6 6 6 0 0 0
44130 - 0 0 0 0 0 0 0 0 0 0 0 0
44131 - 0 0 0 0 0 0 0 0 0 0 0 0
44132 - 0 0 0 0 0 0 0 0 0 0 0 0
44133 - 0 0 0 0 0 0 0 0 0 0 0 0
44134 - 0 0 0 0 0 0 0 0 0 0 0 0
44135 - 0 0 0 0 0 0 0 0 0 0 0 0
44136 - 0 0 0 0 0 0 0 0 0 0 0 0
44137 - 0 0 0 0 0 0 6 6 6 18 18 18
44138 - 54 54 54 82 82 82 2 2 6 26 26 26
44139 - 22 22 22 2 2 6 123 123 123 253 253 253
44140 -253 253 253 253 253 253 253 253 253 253 253 253
44141 -253 253 253 253 253 253 253 253 253 253 253 253
44142 -253 253 253 253 253 253 253 253 253 253 253 253
44143 -253 253 253 253 253 253 253 253 253 253 253 253
44144 -253 253 253 253 253 253 253 253 253 253 253 253
44145 -253 253 253 253 253 253 253 253 253 250 250 250
44146 -238 238 238 198 198 198 6 6 6 38 38 38
44147 - 58 58 58 26 26 26 38 38 38 2 2 6
44148 - 2 2 6 2 2 6 2 2 6 46 46 46
44149 - 78 78 78 30 30 30 10 10 10 0 0 0
44150 - 0 0 0 0 0 0 0 0 0 0 0 0
44151 - 0 0 0 0 0 0 0 0 0 0 0 0
44152 - 0 0 0 0 0 0 0 0 0 0 0 0
44153 - 0 0 0 0 0 0 0 0 0 0 0 0
44154 - 0 0 0 0 0 0 0 0 0 0 0 0
44155 - 0 0 0 0 0 0 0 0 0 0 0 0
44156 - 0 0 0 0 0 0 0 0 0 0 0 0
44157 - 0 0 0 0 0 0 10 10 10 30 30 30
44158 - 74 74 74 58 58 58 2 2 6 42 42 42
44159 - 2 2 6 22 22 22 231 231 231 253 253 253
44160 -253 253 253 253 253 253 253 253 253 253 253 253
44161 -253 253 253 253 253 253 253 253 253 250 250 250
44162 -253 253 253 253 253 253 253 253 253 253 253 253
44163 -253 253 253 253 253 253 253 253 253 253 253 253
44164 -253 253 253 253 253 253 253 253 253 253 253 253
44165 -253 253 253 253 253 253 253 253 253 253 253 253
44166 -253 253 253 246 246 246 46 46 46 38 38 38
44167 - 42 42 42 14 14 14 38 38 38 14 14 14
44168 - 2 2 6 2 2 6 2 2 6 6 6 6
44169 - 86 86 86 46 46 46 14 14 14 0 0 0
44170 - 0 0 0 0 0 0 0 0 0 0 0 0
44171 - 0 0 0 0 0 0 0 0 0 0 0 0
44172 - 0 0 0 0 0 0 0 0 0 0 0 0
44173 - 0 0 0 0 0 0 0 0 0 0 0 0
44174 - 0 0 0 0 0 0 0 0 0 0 0 0
44175 - 0 0 0 0 0 0 0 0 0 0 0 0
44176 - 0 0 0 0 0 0 0 0 0 0 0 0
44177 - 0 0 0 6 6 6 14 14 14 42 42 42
44178 - 90 90 90 18 18 18 18 18 18 26 26 26
44179 - 2 2 6 116 116 116 253 253 253 253 253 253
44180 -253 253 253 253 253 253 253 253 253 253 253 253
44181 -253 253 253 253 253 253 250 250 250 238 238 238
44182 -253 253 253 253 253 253 253 253 253 253 253 253
44183 -253 253 253 253 253 253 253 253 253 253 253 253
44184 -253 253 253 253 253 253 253 253 253 253 253 253
44185 -253 253 253 253 253 253 253 253 253 253 253 253
44186 -253 253 253 253 253 253 94 94 94 6 6 6
44187 - 2 2 6 2 2 6 10 10 10 34 34 34
44188 - 2 2 6 2 2 6 2 2 6 2 2 6
44189 - 74 74 74 58 58 58 22 22 22 6 6 6
44190 - 0 0 0 0 0 0 0 0 0 0 0 0
44191 - 0 0 0 0 0 0 0 0 0 0 0 0
44192 - 0 0 0 0 0 0 0 0 0 0 0 0
44193 - 0 0 0 0 0 0 0 0 0 0 0 0
44194 - 0 0 0 0 0 0 0 0 0 0 0 0
44195 - 0 0 0 0 0 0 0 0 0 0 0 0
44196 - 0 0 0 0 0 0 0 0 0 0 0 0
44197 - 0 0 0 10 10 10 26 26 26 66 66 66
44198 - 82 82 82 2 2 6 38 38 38 6 6 6
44199 - 14 14 14 210 210 210 253 253 253 253 253 253
44200 -253 253 253 253 253 253 253 253 253 253 253 253
44201 -253 253 253 253 253 253 246 246 246 242 242 242
44202 -253 253 253 253 253 253 253 253 253 253 253 253
44203 -253 253 253 253 253 253 253 253 253 253 253 253
44204 -253 253 253 253 253 253 253 253 253 253 253 253
44205 -253 253 253 253 253 253 253 253 253 253 253 253
44206 -253 253 253 253 253 253 144 144 144 2 2 6
44207 - 2 2 6 2 2 6 2 2 6 46 46 46
44208 - 2 2 6 2 2 6 2 2 6 2 2 6
44209 - 42 42 42 74 74 74 30 30 30 10 10 10
44210 - 0 0 0 0 0 0 0 0 0 0 0 0
44211 - 0 0 0 0 0 0 0 0 0 0 0 0
44212 - 0 0 0 0 0 0 0 0 0 0 0 0
44213 - 0 0 0 0 0 0 0 0 0 0 0 0
44214 - 0 0 0 0 0 0 0 0 0 0 0 0
44215 - 0 0 0 0 0 0 0 0 0 0 0 0
44216 - 0 0 0 0 0 0 0 0 0 0 0 0
44217 - 6 6 6 14 14 14 42 42 42 90 90 90
44218 - 26 26 26 6 6 6 42 42 42 2 2 6
44219 - 74 74 74 250 250 250 253 253 253 253 253 253
44220 -253 253 253 253 253 253 253 253 253 253 253 253
44221 -253 253 253 253 253 253 242 242 242 242 242 242
44222 -253 253 253 253 253 253 253 253 253 253 253 253
44223 -253 253 253 253 253 253 253 253 253 253 253 253
44224 -253 253 253 253 253 253 253 253 253 253 253 253
44225 -253 253 253 253 253 253 253 253 253 253 253 253
44226 -253 253 253 253 253 253 182 182 182 2 2 6
44227 - 2 2 6 2 2 6 2 2 6 46 46 46
44228 - 2 2 6 2 2 6 2 2 6 2 2 6
44229 - 10 10 10 86 86 86 38 38 38 10 10 10
44230 - 0 0 0 0 0 0 0 0 0 0 0 0
44231 - 0 0 0 0 0 0 0 0 0 0 0 0
44232 - 0 0 0 0 0 0 0 0 0 0 0 0
44233 - 0 0 0 0 0 0 0 0 0 0 0 0
44234 - 0 0 0 0 0 0 0 0 0 0 0 0
44235 - 0 0 0 0 0 0 0 0 0 0 0 0
44236 - 0 0 0 0 0 0 0 0 0 0 0 0
44237 - 10 10 10 26 26 26 66 66 66 82 82 82
44238 - 2 2 6 22 22 22 18 18 18 2 2 6
44239 -149 149 149 253 253 253 253 253 253 253 253 253
44240 -253 253 253 253 253 253 253 253 253 253 253 253
44241 -253 253 253 253 253 253 234 234 234 242 242 242
44242 -253 253 253 253 253 253 253 253 253 253 253 253
44243 -253 253 253 253 253 253 253 253 253 253 253 253
44244 -253 253 253 253 253 253 253 253 253 253 253 253
44245 -253 253 253 253 253 253 253 253 253 253 253 253
44246 -253 253 253 253 253 253 206 206 206 2 2 6
44247 - 2 2 6 2 2 6 2 2 6 38 38 38
44248 - 2 2 6 2 2 6 2 2 6 2 2 6
44249 - 6 6 6 86 86 86 46 46 46 14 14 14
44250 - 0 0 0 0 0 0 0 0 0 0 0 0
44251 - 0 0 0 0 0 0 0 0 0 0 0 0
44252 - 0 0 0 0 0 0 0 0 0 0 0 0
44253 - 0 0 0 0 0 0 0 0 0 0 0 0
44254 - 0 0 0 0 0 0 0 0 0 0 0 0
44255 - 0 0 0 0 0 0 0 0 0 0 0 0
44256 - 0 0 0 0 0 0 0 0 0 6 6 6
44257 - 18 18 18 46 46 46 86 86 86 18 18 18
44258 - 2 2 6 34 34 34 10 10 10 6 6 6
44259 -210 210 210 253 253 253 253 253 253 253 253 253
44260 -253 253 253 253 253 253 253 253 253 253 253 253
44261 -253 253 253 253 253 253 234 234 234 242 242 242
44262 -253 253 253 253 253 253 253 253 253 253 253 253
44263 -253 253 253 253 253 253 253 253 253 253 253 253
44264 -253 253 253 253 253 253 253 253 253 253 253 253
44265 -253 253 253 253 253 253 253 253 253 253 253 253
44266 -253 253 253 253 253 253 221 221 221 6 6 6
44267 - 2 2 6 2 2 6 6 6 6 30 30 30
44268 - 2 2 6 2 2 6 2 2 6 2 2 6
44269 - 2 2 6 82 82 82 54 54 54 18 18 18
44270 - 6 6 6 0 0 0 0 0 0 0 0 0
44271 - 0 0 0 0 0 0 0 0 0 0 0 0
44272 - 0 0 0 0 0 0 0 0 0 0 0 0
44273 - 0 0 0 0 0 0 0 0 0 0 0 0
44274 - 0 0 0 0 0 0 0 0 0 0 0 0
44275 - 0 0 0 0 0 0 0 0 0 0 0 0
44276 - 0 0 0 0 0 0 0 0 0 10 10 10
44277 - 26 26 26 66 66 66 62 62 62 2 2 6
44278 - 2 2 6 38 38 38 10 10 10 26 26 26
44279 -238 238 238 253 253 253 253 253 253 253 253 253
44280 -253 253 253 253 253 253 253 253 253 253 253 253
44281 -253 253 253 253 253 253 231 231 231 238 238 238
44282 -253 253 253 253 253 253 253 253 253 253 253 253
44283 -253 253 253 253 253 253 253 253 253 253 253 253
44284 -253 253 253 253 253 253 253 253 253 253 253 253
44285 -253 253 253 253 253 253 253 253 253 253 253 253
44286 -253 253 253 253 253 253 231 231 231 6 6 6
44287 - 2 2 6 2 2 6 10 10 10 30 30 30
44288 - 2 2 6 2 2 6 2 2 6 2 2 6
44289 - 2 2 6 66 66 66 58 58 58 22 22 22
44290 - 6 6 6 0 0 0 0 0 0 0 0 0
44291 - 0 0 0 0 0 0 0 0 0 0 0 0
44292 - 0 0 0 0 0 0 0 0 0 0 0 0
44293 - 0 0 0 0 0 0 0 0 0 0 0 0
44294 - 0 0 0 0 0 0 0 0 0 0 0 0
44295 - 0 0 0 0 0 0 0 0 0 0 0 0
44296 - 0 0 0 0 0 0 0 0 0 10 10 10
44297 - 38 38 38 78 78 78 6 6 6 2 2 6
44298 - 2 2 6 46 46 46 14 14 14 42 42 42
44299 -246 246 246 253 253 253 253 253 253 253 253 253
44300 -253 253 253 253 253 253 253 253 253 253 253 253
44301 -253 253 253 253 253 253 231 231 231 242 242 242
44302 -253 253 253 253 253 253 253 253 253 253 253 253
44303 -253 253 253 253 253 253 253 253 253 253 253 253
44304 -253 253 253 253 253 253 253 253 253 253 253 253
44305 -253 253 253 253 253 253 253 253 253 253 253 253
44306 -253 253 253 253 253 253 234 234 234 10 10 10
44307 - 2 2 6 2 2 6 22 22 22 14 14 14
44308 - 2 2 6 2 2 6 2 2 6 2 2 6
44309 - 2 2 6 66 66 66 62 62 62 22 22 22
44310 - 6 6 6 0 0 0 0 0 0 0 0 0
44311 - 0 0 0 0 0 0 0 0 0 0 0 0
44312 - 0 0 0 0 0 0 0 0 0 0 0 0
44313 - 0 0 0 0 0 0 0 0 0 0 0 0
44314 - 0 0 0 0 0 0 0 0 0 0 0 0
44315 - 0 0 0 0 0 0 0 0 0 0 0 0
44316 - 0 0 0 0 0 0 6 6 6 18 18 18
44317 - 50 50 50 74 74 74 2 2 6 2 2 6
44318 - 14 14 14 70 70 70 34 34 34 62 62 62
44319 -250 250 250 253 253 253 253 253 253 253 253 253
44320 -253 253 253 253 253 253 253 253 253 253 253 253
44321 -253 253 253 253 253 253 231 231 231 246 246 246
44322 -253 253 253 253 253 253 253 253 253 253 253 253
44323 -253 253 253 253 253 253 253 253 253 253 253 253
44324 -253 253 253 253 253 253 253 253 253 253 253 253
44325 -253 253 253 253 253 253 253 253 253 253 253 253
44326 -253 253 253 253 253 253 234 234 234 14 14 14
44327 - 2 2 6 2 2 6 30 30 30 2 2 6
44328 - 2 2 6 2 2 6 2 2 6 2 2 6
44329 - 2 2 6 66 66 66 62 62 62 22 22 22
44330 - 6 6 6 0 0 0 0 0 0 0 0 0
44331 - 0 0 0 0 0 0 0 0 0 0 0 0
44332 - 0 0 0 0 0 0 0 0 0 0 0 0
44333 - 0 0 0 0 0 0 0 0 0 0 0 0
44334 - 0 0 0 0 0 0 0 0 0 0 0 0
44335 - 0 0 0 0 0 0 0 0 0 0 0 0
44336 - 0 0 0 0 0 0 6 6 6 18 18 18
44337 - 54 54 54 62 62 62 2 2 6 2 2 6
44338 - 2 2 6 30 30 30 46 46 46 70 70 70
44339 -250 250 250 253 253 253 253 253 253 253 253 253
44340 -253 253 253 253 253 253 253 253 253 253 253 253
44341 -253 253 253 253 253 253 231 231 231 246 246 246
44342 -253 253 253 253 253 253 253 253 253 253 253 253
44343 -253 253 253 253 253 253 253 253 253 253 253 253
44344 -253 253 253 253 253 253 253 253 253 253 253 253
44345 -253 253 253 253 253 253 253 253 253 253 253 253
44346 -253 253 253 253 253 253 226 226 226 10 10 10
44347 - 2 2 6 6 6 6 30 30 30 2 2 6
44348 - 2 2 6 2 2 6 2 2 6 2 2 6
44349 - 2 2 6 66 66 66 58 58 58 22 22 22
44350 - 6 6 6 0 0 0 0 0 0 0 0 0
44351 - 0 0 0 0 0 0 0 0 0 0 0 0
44352 - 0 0 0 0 0 0 0 0 0 0 0 0
44353 - 0 0 0 0 0 0 0 0 0 0 0 0
44354 - 0 0 0 0 0 0 0 0 0 0 0 0
44355 - 0 0 0 0 0 0 0 0 0 0 0 0
44356 - 0 0 0 0 0 0 6 6 6 22 22 22
44357 - 58 58 58 62 62 62 2 2 6 2 2 6
44358 - 2 2 6 2 2 6 30 30 30 78 78 78
44359 -250 250 250 253 253 253 253 253 253 253 253 253
44360 -253 253 253 253 253 253 253 253 253 253 253 253
44361 -253 253 253 253 253 253 231 231 231 246 246 246
44362 -253 253 253 253 253 253 253 253 253 253 253 253
44363 -253 253 253 253 253 253 253 253 253 253 253 253
44364 -253 253 253 253 253 253 253 253 253 253 253 253
44365 -253 253 253 253 253 253 253 253 253 253 253 253
44366 -253 253 253 253 253 253 206 206 206 2 2 6
44367 - 22 22 22 34 34 34 18 14 6 22 22 22
44368 - 26 26 26 18 18 18 6 6 6 2 2 6
44369 - 2 2 6 82 82 82 54 54 54 18 18 18
44370 - 6 6 6 0 0 0 0 0 0 0 0 0
44371 - 0 0 0 0 0 0 0 0 0 0 0 0
44372 - 0 0 0 0 0 0 0 0 0 0 0 0
44373 - 0 0 0 0 0 0 0 0 0 0 0 0
44374 - 0 0 0 0 0 0 0 0 0 0 0 0
44375 - 0 0 0 0 0 0 0 0 0 0 0 0
44376 - 0 0 0 0 0 0 6 6 6 26 26 26
44377 - 62 62 62 106 106 106 74 54 14 185 133 11
44378 -210 162 10 121 92 8 6 6 6 62 62 62
44379 -238 238 238 253 253 253 253 253 253 253 253 253
44380 -253 253 253 253 253 253 253 253 253 253 253 253
44381 -253 253 253 253 253 253 231 231 231 246 246 246
44382 -253 253 253 253 253 253 253 253 253 253 253 253
44383 -253 253 253 253 253 253 253 253 253 253 253 253
44384 -253 253 253 253 253 253 253 253 253 253 253 253
44385 -253 253 253 253 253 253 253 253 253 253 253 253
44386 -253 253 253 253 253 253 158 158 158 18 18 18
44387 - 14 14 14 2 2 6 2 2 6 2 2 6
44388 - 6 6 6 18 18 18 66 66 66 38 38 38
44389 - 6 6 6 94 94 94 50 50 50 18 18 18
44390 - 6 6 6 0 0 0 0 0 0 0 0 0
44391 - 0 0 0 0 0 0 0 0 0 0 0 0
44392 - 0 0 0 0 0 0 0 0 0 0 0 0
44393 - 0 0 0 0 0 0 0 0 0 0 0 0
44394 - 0 0 0 0 0 0 0 0 0 0 0 0
44395 - 0 0 0 0 0 0 0 0 0 6 6 6
44396 - 10 10 10 10 10 10 18 18 18 38 38 38
44397 - 78 78 78 142 134 106 216 158 10 242 186 14
44398 -246 190 14 246 190 14 156 118 10 10 10 10
44399 - 90 90 90 238 238 238 253 253 253 253 253 253
44400 -253 253 253 253 253 253 253 253 253 253 253 253
44401 -253 253 253 253 253 253 231 231 231 250 250 250
44402 -253 253 253 253 253 253 253 253 253 253 253 253
44403 -253 253 253 253 253 253 253 253 253 253 253 253
44404 -253 253 253 253 253 253 253 253 253 253 253 253
44405 -253 253 253 253 253 253 253 253 253 246 230 190
44406 -238 204 91 238 204 91 181 142 44 37 26 9
44407 - 2 2 6 2 2 6 2 2 6 2 2 6
44408 - 2 2 6 2 2 6 38 38 38 46 46 46
44409 - 26 26 26 106 106 106 54 54 54 18 18 18
44410 - 6 6 6 0 0 0 0 0 0 0 0 0
44411 - 0 0 0 0 0 0 0 0 0 0 0 0
44412 - 0 0 0 0 0 0 0 0 0 0 0 0
44413 - 0 0 0 0 0 0 0 0 0 0 0 0
44414 - 0 0 0 0 0 0 0 0 0 0 0 0
44415 - 0 0 0 6 6 6 14 14 14 22 22 22
44416 - 30 30 30 38 38 38 50 50 50 70 70 70
44417 -106 106 106 190 142 34 226 170 11 242 186 14
44418 -246 190 14 246 190 14 246 190 14 154 114 10
44419 - 6 6 6 74 74 74 226 226 226 253 253 253
44420 -253 253 253 253 253 253 253 253 253 253 253 253
44421 -253 253 253 253 253 253 231 231 231 250 250 250
44422 -253 253 253 253 253 253 253 253 253 253 253 253
44423 -253 253 253 253 253 253 253 253 253 253 253 253
44424 -253 253 253 253 253 253 253 253 253 253 253 253
44425 -253 253 253 253 253 253 253 253 253 228 184 62
44426 -241 196 14 241 208 19 232 195 16 38 30 10
44427 - 2 2 6 2 2 6 2 2 6 2 2 6
44428 - 2 2 6 6 6 6 30 30 30 26 26 26
44429 -203 166 17 154 142 90 66 66 66 26 26 26
44430 - 6 6 6 0 0 0 0 0 0 0 0 0
44431 - 0 0 0 0 0 0 0 0 0 0 0 0
44432 - 0 0 0 0 0 0 0 0 0 0 0 0
44433 - 0 0 0 0 0 0 0 0 0 0 0 0
44434 - 0 0 0 0 0 0 0 0 0 0 0 0
44435 - 6 6 6 18 18 18 38 38 38 58 58 58
44436 - 78 78 78 86 86 86 101 101 101 123 123 123
44437 -175 146 61 210 150 10 234 174 13 246 186 14
44438 -246 190 14 246 190 14 246 190 14 238 190 10
44439 -102 78 10 2 2 6 46 46 46 198 198 198
44440 -253 253 253 253 253 253 253 253 253 253 253 253
44441 -253 253 253 253 253 253 234 234 234 242 242 242
44442 -253 253 253 253 253 253 253 253 253 253 253 253
44443 -253 253 253 253 253 253 253 253 253 253 253 253
44444 -253 253 253 253 253 253 253 253 253 253 253 253
44445 -253 253 253 253 253 253 253 253 253 224 178 62
44446 -242 186 14 241 196 14 210 166 10 22 18 6
44447 - 2 2 6 2 2 6 2 2 6 2 2 6
44448 - 2 2 6 2 2 6 6 6 6 121 92 8
44449 -238 202 15 232 195 16 82 82 82 34 34 34
44450 - 10 10 10 0 0 0 0 0 0 0 0 0
44451 - 0 0 0 0 0 0 0 0 0 0 0 0
44452 - 0 0 0 0 0 0 0 0 0 0 0 0
44453 - 0 0 0 0 0 0 0 0 0 0 0 0
44454 - 0 0 0 0 0 0 0 0 0 0 0 0
44455 - 14 14 14 38 38 38 70 70 70 154 122 46
44456 -190 142 34 200 144 11 197 138 11 197 138 11
44457 -213 154 11 226 170 11 242 186 14 246 190 14
44458 -246 190 14 246 190 14 246 190 14 246 190 14
44459 -225 175 15 46 32 6 2 2 6 22 22 22
44460 -158 158 158 250 250 250 253 253 253 253 253 253
44461 -253 253 253 253 253 253 253 253 253 253 253 253
44462 -253 253 253 253 253 253 253 253 253 253 253 253
44463 -253 253 253 253 253 253 253 253 253 253 253 253
44464 -253 253 253 253 253 253 253 253 253 253 253 253
44465 -253 253 253 250 250 250 242 242 242 224 178 62
44466 -239 182 13 236 186 11 213 154 11 46 32 6
44467 - 2 2 6 2 2 6 2 2 6 2 2 6
44468 - 2 2 6 2 2 6 61 42 6 225 175 15
44469 -238 190 10 236 186 11 112 100 78 42 42 42
44470 - 14 14 14 0 0 0 0 0 0 0 0 0
44471 - 0 0 0 0 0 0 0 0 0 0 0 0
44472 - 0 0 0 0 0 0 0 0 0 0 0 0
44473 - 0 0 0 0 0 0 0 0 0 0 0 0
44474 - 0 0 0 0 0 0 0 0 0 6 6 6
44475 - 22 22 22 54 54 54 154 122 46 213 154 11
44476 -226 170 11 230 174 11 226 170 11 226 170 11
44477 -236 178 12 242 186 14 246 190 14 246 190 14
44478 -246 190 14 246 190 14 246 190 14 246 190 14
44479 -241 196 14 184 144 12 10 10 10 2 2 6
44480 - 6 6 6 116 116 116 242 242 242 253 253 253
44481 -253 253 253 253 253 253 253 253 253 253 253 253
44482 -253 253 253 253 253 253 253 253 253 253 253 253
44483 -253 253 253 253 253 253 253 253 253 253 253 253
44484 -253 253 253 253 253 253 253 253 253 253 253 253
44485 -253 253 253 231 231 231 198 198 198 214 170 54
44486 -236 178 12 236 178 12 210 150 10 137 92 6
44487 - 18 14 6 2 2 6 2 2 6 2 2 6
44488 - 6 6 6 70 47 6 200 144 11 236 178 12
44489 -239 182 13 239 182 13 124 112 88 58 58 58
44490 - 22 22 22 6 6 6 0 0 0 0 0 0
44491 - 0 0 0 0 0 0 0 0 0 0 0 0
44492 - 0 0 0 0 0 0 0 0 0 0 0 0
44493 - 0 0 0 0 0 0 0 0 0 0 0 0
44494 - 0 0 0 0 0 0 0 0 0 10 10 10
44495 - 30 30 30 70 70 70 180 133 36 226 170 11
44496 -239 182 13 242 186 14 242 186 14 246 186 14
44497 -246 190 14 246 190 14 246 190 14 246 190 14
44498 -246 190 14 246 190 14 246 190 14 246 190 14
44499 -246 190 14 232 195 16 98 70 6 2 2 6
44500 - 2 2 6 2 2 6 66 66 66 221 221 221
44501 -253 253 253 253 253 253 253 253 253 253 253 253
44502 -253 253 253 253 253 253 253 253 253 253 253 253
44503 -253 253 253 253 253 253 253 253 253 253 253 253
44504 -253 253 253 253 253 253 253 253 253 253 253 253
44505 -253 253 253 206 206 206 198 198 198 214 166 58
44506 -230 174 11 230 174 11 216 158 10 192 133 9
44507 -163 110 8 116 81 8 102 78 10 116 81 8
44508 -167 114 7 197 138 11 226 170 11 239 182 13
44509 -242 186 14 242 186 14 162 146 94 78 78 78
44510 - 34 34 34 14 14 14 6 6 6 0 0 0
44511 - 0 0 0 0 0 0 0 0 0 0 0 0
44512 - 0 0 0 0 0 0 0 0 0 0 0 0
44513 - 0 0 0 0 0 0 0 0 0 0 0 0
44514 - 0 0 0 0 0 0 0 0 0 6 6 6
44515 - 30 30 30 78 78 78 190 142 34 226 170 11
44516 -239 182 13 246 190 14 246 190 14 246 190 14
44517 -246 190 14 246 190 14 246 190 14 246 190 14
44518 -246 190 14 246 190 14 246 190 14 246 190 14
44519 -246 190 14 241 196 14 203 166 17 22 18 6
44520 - 2 2 6 2 2 6 2 2 6 38 38 38
44521 -218 218 218 253 253 253 253 253 253 253 253 253
44522 -253 253 253 253 253 253 253 253 253 253 253 253
44523 -253 253 253 253 253 253 253 253 253 253 253 253
44524 -253 253 253 253 253 253 253 253 253 253 253 253
44525 -250 250 250 206 206 206 198 198 198 202 162 69
44526 -226 170 11 236 178 12 224 166 10 210 150 10
44527 -200 144 11 197 138 11 192 133 9 197 138 11
44528 -210 150 10 226 170 11 242 186 14 246 190 14
44529 -246 190 14 246 186 14 225 175 15 124 112 88
44530 - 62 62 62 30 30 30 14 14 14 6 6 6
44531 - 0 0 0 0 0 0 0 0 0 0 0 0
44532 - 0 0 0 0 0 0 0 0 0 0 0 0
44533 - 0 0 0 0 0 0 0 0 0 0 0 0
44534 - 0 0 0 0 0 0 0 0 0 10 10 10
44535 - 30 30 30 78 78 78 174 135 50 224 166 10
44536 -239 182 13 246 190 14 246 190 14 246 190 14
44537 -246 190 14 246 190 14 246 190 14 246 190 14
44538 -246 190 14 246 190 14 246 190 14 246 190 14
44539 -246 190 14 246 190 14 241 196 14 139 102 15
44540 - 2 2 6 2 2 6 2 2 6 2 2 6
44541 - 78 78 78 250 250 250 253 253 253 253 253 253
44542 -253 253 253 253 253 253 253 253 253 253 253 253
44543 -253 253 253 253 253 253 253 253 253 253 253 253
44544 -253 253 253 253 253 253 253 253 253 253 253 253
44545 -250 250 250 214 214 214 198 198 198 190 150 46
44546 -219 162 10 236 178 12 234 174 13 224 166 10
44547 -216 158 10 213 154 11 213 154 11 216 158 10
44548 -226 170 11 239 182 13 246 190 14 246 190 14
44549 -246 190 14 246 190 14 242 186 14 206 162 42
44550 -101 101 101 58 58 58 30 30 30 14 14 14
44551 - 6 6 6 0 0 0 0 0 0 0 0 0
44552 - 0 0 0 0 0 0 0 0 0 0 0 0
44553 - 0 0 0 0 0 0 0 0 0 0 0 0
44554 - 0 0 0 0 0 0 0 0 0 10 10 10
44555 - 30 30 30 74 74 74 174 135 50 216 158 10
44556 -236 178 12 246 190 14 246 190 14 246 190 14
44557 -246 190 14 246 190 14 246 190 14 246 190 14
44558 -246 190 14 246 190 14 246 190 14 246 190 14
44559 -246 190 14 246 190 14 241 196 14 226 184 13
44560 - 61 42 6 2 2 6 2 2 6 2 2 6
44561 - 22 22 22 238 238 238 253 253 253 253 253 253
44562 -253 253 253 253 253 253 253 253 253 253 253 253
44563 -253 253 253 253 253 253 253 253 253 253 253 253
44564 -253 253 253 253 253 253 253 253 253 253 253 253
44565 -253 253 253 226 226 226 187 187 187 180 133 36
44566 -216 158 10 236 178 12 239 182 13 236 178 12
44567 -230 174 11 226 170 11 226 170 11 230 174 11
44568 -236 178 12 242 186 14 246 190 14 246 190 14
44569 -246 190 14 246 190 14 246 186 14 239 182 13
44570 -206 162 42 106 106 106 66 66 66 34 34 34
44571 - 14 14 14 6 6 6 0 0 0 0 0 0
44572 - 0 0 0 0 0 0 0 0 0 0 0 0
44573 - 0 0 0 0 0 0 0 0 0 0 0 0
44574 - 0 0 0 0 0 0 0 0 0 6 6 6
44575 - 26 26 26 70 70 70 163 133 67 213 154 11
44576 -236 178 12 246 190 14 246 190 14 246 190 14
44577 -246 190 14 246 190 14 246 190 14 246 190 14
44578 -246 190 14 246 190 14 246 190 14 246 190 14
44579 -246 190 14 246 190 14 246 190 14 241 196 14
44580 -190 146 13 18 14 6 2 2 6 2 2 6
44581 - 46 46 46 246 246 246 253 253 253 253 253 253
44582 -253 253 253 253 253 253 253 253 253 253 253 253
44583 -253 253 253 253 253 253 253 253 253 253 253 253
44584 -253 253 253 253 253 253 253 253 253 253 253 253
44585 -253 253 253 221 221 221 86 86 86 156 107 11
44586 -216 158 10 236 178 12 242 186 14 246 186 14
44587 -242 186 14 239 182 13 239 182 13 242 186 14
44588 -242 186 14 246 186 14 246 190 14 246 190 14
44589 -246 190 14 246 190 14 246 190 14 246 190 14
44590 -242 186 14 225 175 15 142 122 72 66 66 66
44591 - 30 30 30 10 10 10 0 0 0 0 0 0
44592 - 0 0 0 0 0 0 0 0 0 0 0 0
44593 - 0 0 0 0 0 0 0 0 0 0 0 0
44594 - 0 0 0 0 0 0 0 0 0 6 6 6
44595 - 26 26 26 70 70 70 163 133 67 210 150 10
44596 -236 178 12 246 190 14 246 190 14 246 190 14
44597 -246 190 14 246 190 14 246 190 14 246 190 14
44598 -246 190 14 246 190 14 246 190 14 246 190 14
44599 -246 190 14 246 190 14 246 190 14 246 190 14
44600 -232 195 16 121 92 8 34 34 34 106 106 106
44601 -221 221 221 253 253 253 253 253 253 253 253 253
44602 -253 253 253 253 253 253 253 253 253 253 253 253
44603 -253 253 253 253 253 253 253 253 253 253 253 253
44604 -253 253 253 253 253 253 253 253 253 253 253 253
44605 -242 242 242 82 82 82 18 14 6 163 110 8
44606 -216 158 10 236 178 12 242 186 14 246 190 14
44607 -246 190 14 246 190 14 246 190 14 246 190 14
44608 -246 190 14 246 190 14 246 190 14 246 190 14
44609 -246 190 14 246 190 14 246 190 14 246 190 14
44610 -246 190 14 246 190 14 242 186 14 163 133 67
44611 - 46 46 46 18 18 18 6 6 6 0 0 0
44612 - 0 0 0 0 0 0 0 0 0 0 0 0
44613 - 0 0 0 0 0 0 0 0 0 0 0 0
44614 - 0 0 0 0 0 0 0 0 0 10 10 10
44615 - 30 30 30 78 78 78 163 133 67 210 150 10
44616 -236 178 12 246 186 14 246 190 14 246 190 14
44617 -246 190 14 246 190 14 246 190 14 246 190 14
44618 -246 190 14 246 190 14 246 190 14 246 190 14
44619 -246 190 14 246 190 14 246 190 14 246 190 14
44620 -241 196 14 215 174 15 190 178 144 253 253 253
44621 -253 253 253 253 253 253 253 253 253 253 253 253
44622 -253 253 253 253 253 253 253 253 253 253 253 253
44623 -253 253 253 253 253 253 253 253 253 253 253 253
44624 -253 253 253 253 253 253 253 253 253 218 218 218
44625 - 58 58 58 2 2 6 22 18 6 167 114 7
44626 -216 158 10 236 178 12 246 186 14 246 190 14
44627 -246 190 14 246 190 14 246 190 14 246 190 14
44628 -246 190 14 246 190 14 246 190 14 246 190 14
44629 -246 190 14 246 190 14 246 190 14 246 190 14
44630 -246 190 14 246 186 14 242 186 14 190 150 46
44631 - 54 54 54 22 22 22 6 6 6 0 0 0
44632 - 0 0 0 0 0 0 0 0 0 0 0 0
44633 - 0 0 0 0 0 0 0 0 0 0 0 0
44634 - 0 0 0 0 0 0 0 0 0 14 14 14
44635 - 38 38 38 86 86 86 180 133 36 213 154 11
44636 -236 178 12 246 186 14 246 190 14 246 190 14
44637 -246 190 14 246 190 14 246 190 14 246 190 14
44638 -246 190 14 246 190 14 246 190 14 246 190 14
44639 -246 190 14 246 190 14 246 190 14 246 190 14
44640 -246 190 14 232 195 16 190 146 13 214 214 214
44641 -253 253 253 253 253 253 253 253 253 253 253 253
44642 -253 253 253 253 253 253 253 253 253 253 253 253
44643 -253 253 253 253 253 253 253 253 253 253 253 253
44644 -253 253 253 250 250 250 170 170 170 26 26 26
44645 - 2 2 6 2 2 6 37 26 9 163 110 8
44646 -219 162 10 239 182 13 246 186 14 246 190 14
44647 -246 190 14 246 190 14 246 190 14 246 190 14
44648 -246 190 14 246 190 14 246 190 14 246 190 14
44649 -246 190 14 246 190 14 246 190 14 246 190 14
44650 -246 186 14 236 178 12 224 166 10 142 122 72
44651 - 46 46 46 18 18 18 6 6 6 0 0 0
44652 - 0 0 0 0 0 0 0 0 0 0 0 0
44653 - 0 0 0 0 0 0 0 0 0 0 0 0
44654 - 0 0 0 0 0 0 6 6 6 18 18 18
44655 - 50 50 50 109 106 95 192 133 9 224 166 10
44656 -242 186 14 246 190 14 246 190 14 246 190 14
44657 -246 190 14 246 190 14 246 190 14 246 190 14
44658 -246 190 14 246 190 14 246 190 14 246 190 14
44659 -246 190 14 246 190 14 246 190 14 246 190 14
44660 -242 186 14 226 184 13 210 162 10 142 110 46
44661 -226 226 226 253 253 253 253 253 253 253 253 253
44662 -253 253 253 253 253 253 253 253 253 253 253 253
44663 -253 253 253 253 253 253 253 253 253 253 253 253
44664 -198 198 198 66 66 66 2 2 6 2 2 6
44665 - 2 2 6 2 2 6 50 34 6 156 107 11
44666 -219 162 10 239 182 13 246 186 14 246 190 14
44667 -246 190 14 246 190 14 246 190 14 246 190 14
44668 -246 190 14 246 190 14 246 190 14 246 190 14
44669 -246 190 14 246 190 14 246 190 14 242 186 14
44670 -234 174 13 213 154 11 154 122 46 66 66 66
44671 - 30 30 30 10 10 10 0 0 0 0 0 0
44672 - 0 0 0 0 0 0 0 0 0 0 0 0
44673 - 0 0 0 0 0 0 0 0 0 0 0 0
44674 - 0 0 0 0 0 0 6 6 6 22 22 22
44675 - 58 58 58 154 121 60 206 145 10 234 174 13
44676 -242 186 14 246 186 14 246 190 14 246 190 14
44677 -246 190 14 246 190 14 246 190 14 246 190 14
44678 -246 190 14 246 190 14 246 190 14 246 190 14
44679 -246 190 14 246 190 14 246 190 14 246 190 14
44680 -246 186 14 236 178 12 210 162 10 163 110 8
44681 - 61 42 6 138 138 138 218 218 218 250 250 250
44682 -253 253 253 253 253 253 253 253 253 250 250 250
44683 -242 242 242 210 210 210 144 144 144 66 66 66
44684 - 6 6 6 2 2 6 2 2 6 2 2 6
44685 - 2 2 6 2 2 6 61 42 6 163 110 8
44686 -216 158 10 236 178 12 246 190 14 246 190 14
44687 -246 190 14 246 190 14 246 190 14 246 190 14
44688 -246 190 14 246 190 14 246 190 14 246 190 14
44689 -246 190 14 239 182 13 230 174 11 216 158 10
44690 -190 142 34 124 112 88 70 70 70 38 38 38
44691 - 18 18 18 6 6 6 0 0 0 0 0 0
44692 - 0 0 0 0 0 0 0 0 0 0 0 0
44693 - 0 0 0 0 0 0 0 0 0 0 0 0
44694 - 0 0 0 0 0 0 6 6 6 22 22 22
44695 - 62 62 62 168 124 44 206 145 10 224 166 10
44696 -236 178 12 239 182 13 242 186 14 242 186 14
44697 -246 186 14 246 190 14 246 190 14 246 190 14
44698 -246 190 14 246 190 14 246 190 14 246 190 14
44699 -246 190 14 246 190 14 246 190 14 246 190 14
44700 -246 190 14 236 178 12 216 158 10 175 118 6
44701 - 80 54 7 2 2 6 6 6 6 30 30 30
44702 - 54 54 54 62 62 62 50 50 50 38 38 38
44703 - 14 14 14 2 2 6 2 2 6 2 2 6
44704 - 2 2 6 2 2 6 2 2 6 2 2 6
44705 - 2 2 6 6 6 6 80 54 7 167 114 7
44706 -213 154 11 236 178 12 246 190 14 246 190 14
44707 -246 190 14 246 190 14 246 190 14 246 190 14
44708 -246 190 14 242 186 14 239 182 13 239 182 13
44709 -230 174 11 210 150 10 174 135 50 124 112 88
44710 - 82 82 82 54 54 54 34 34 34 18 18 18
44711 - 6 6 6 0 0 0 0 0 0 0 0 0
44712 - 0 0 0 0 0 0 0 0 0 0 0 0
44713 - 0 0 0 0 0 0 0 0 0 0 0 0
44714 - 0 0 0 0 0 0 6 6 6 18 18 18
44715 - 50 50 50 158 118 36 192 133 9 200 144 11
44716 -216 158 10 219 162 10 224 166 10 226 170 11
44717 -230 174 11 236 178 12 239 182 13 239 182 13
44718 -242 186 14 246 186 14 246 190 14 246 190 14
44719 -246 190 14 246 190 14 246 190 14 246 190 14
44720 -246 186 14 230 174 11 210 150 10 163 110 8
44721 -104 69 6 10 10 10 2 2 6 2 2 6
44722 - 2 2 6 2 2 6 2 2 6 2 2 6
44723 - 2 2 6 2 2 6 2 2 6 2 2 6
44724 - 2 2 6 2 2 6 2 2 6 2 2 6
44725 - 2 2 6 6 6 6 91 60 6 167 114 7
44726 -206 145 10 230 174 11 242 186 14 246 190 14
44727 -246 190 14 246 190 14 246 186 14 242 186 14
44728 -239 182 13 230 174 11 224 166 10 213 154 11
44729 -180 133 36 124 112 88 86 86 86 58 58 58
44730 - 38 38 38 22 22 22 10 10 10 6 6 6
44731 - 0 0 0 0 0 0 0 0 0 0 0 0
44732 - 0 0 0 0 0 0 0 0 0 0 0 0
44733 - 0 0 0 0 0 0 0 0 0 0 0 0
44734 - 0 0 0 0 0 0 0 0 0 14 14 14
44735 - 34 34 34 70 70 70 138 110 50 158 118 36
44736 -167 114 7 180 123 7 192 133 9 197 138 11
44737 -200 144 11 206 145 10 213 154 11 219 162 10
44738 -224 166 10 230 174 11 239 182 13 242 186 14
44739 -246 186 14 246 186 14 246 186 14 246 186 14
44740 -239 182 13 216 158 10 185 133 11 152 99 6
44741 -104 69 6 18 14 6 2 2 6 2 2 6
44742 - 2 2 6 2 2 6 2 2 6 2 2 6
44743 - 2 2 6 2 2 6 2 2 6 2 2 6
44744 - 2 2 6 2 2 6 2 2 6 2 2 6
44745 - 2 2 6 6 6 6 80 54 7 152 99 6
44746 -192 133 9 219 162 10 236 178 12 239 182 13
44747 -246 186 14 242 186 14 239 182 13 236 178 12
44748 -224 166 10 206 145 10 192 133 9 154 121 60
44749 - 94 94 94 62 62 62 42 42 42 22 22 22
44750 - 14 14 14 6 6 6 0 0 0 0 0 0
44751 - 0 0 0 0 0 0 0 0 0 0 0 0
44752 - 0 0 0 0 0 0 0 0 0 0 0 0
44753 - 0 0 0 0 0 0 0 0 0 0 0 0
44754 - 0 0 0 0 0 0 0 0 0 6 6 6
44755 - 18 18 18 34 34 34 58 58 58 78 78 78
44756 -101 98 89 124 112 88 142 110 46 156 107 11
44757 -163 110 8 167 114 7 175 118 6 180 123 7
44758 -185 133 11 197 138 11 210 150 10 219 162 10
44759 -226 170 11 236 178 12 236 178 12 234 174 13
44760 -219 162 10 197 138 11 163 110 8 130 83 6
44761 - 91 60 6 10 10 10 2 2 6 2 2 6
44762 - 18 18 18 38 38 38 38 38 38 38 38 38
44763 - 38 38 38 38 38 38 38 38 38 38 38 38
44764 - 38 38 38 38 38 38 26 26 26 2 2 6
44765 - 2 2 6 6 6 6 70 47 6 137 92 6
44766 -175 118 6 200 144 11 219 162 10 230 174 11
44767 -234 174 13 230 174 11 219 162 10 210 150 10
44768 -192 133 9 163 110 8 124 112 88 82 82 82
44769 - 50 50 50 30 30 30 14 14 14 6 6 6
44770 - 0 0 0 0 0 0 0 0 0 0 0 0
44771 - 0 0 0 0 0 0 0 0 0 0 0 0
44772 - 0 0 0 0 0 0 0 0 0 0 0 0
44773 - 0 0 0 0 0 0 0 0 0 0 0 0
44774 - 0 0 0 0 0 0 0 0 0 0 0 0
44775 - 6 6 6 14 14 14 22 22 22 34 34 34
44776 - 42 42 42 58 58 58 74 74 74 86 86 86
44777 -101 98 89 122 102 70 130 98 46 121 87 25
44778 -137 92 6 152 99 6 163 110 8 180 123 7
44779 -185 133 11 197 138 11 206 145 10 200 144 11
44780 -180 123 7 156 107 11 130 83 6 104 69 6
44781 - 50 34 6 54 54 54 110 110 110 101 98 89
44782 - 86 86 86 82 82 82 78 78 78 78 78 78
44783 - 78 78 78 78 78 78 78 78 78 78 78 78
44784 - 78 78 78 82 82 82 86 86 86 94 94 94
44785 -106 106 106 101 101 101 86 66 34 124 80 6
44786 -156 107 11 180 123 7 192 133 9 200 144 11
44787 -206 145 10 200 144 11 192 133 9 175 118 6
44788 -139 102 15 109 106 95 70 70 70 42 42 42
44789 - 22 22 22 10 10 10 0 0 0 0 0 0
44790 - 0 0 0 0 0 0 0 0 0 0 0 0
44791 - 0 0 0 0 0 0 0 0 0 0 0 0
44792 - 0 0 0 0 0 0 0 0 0 0 0 0
44793 - 0 0 0 0 0 0 0 0 0 0 0 0
44794 - 0 0 0 0 0 0 0 0 0 0 0 0
44795 - 0 0 0 0 0 0 6 6 6 10 10 10
44796 - 14 14 14 22 22 22 30 30 30 38 38 38
44797 - 50 50 50 62 62 62 74 74 74 90 90 90
44798 -101 98 89 112 100 78 121 87 25 124 80 6
44799 -137 92 6 152 99 6 152 99 6 152 99 6
44800 -138 86 6 124 80 6 98 70 6 86 66 30
44801 -101 98 89 82 82 82 58 58 58 46 46 46
44802 - 38 38 38 34 34 34 34 34 34 34 34 34
44803 - 34 34 34 34 34 34 34 34 34 34 34 34
44804 - 34 34 34 34 34 34 38 38 38 42 42 42
44805 - 54 54 54 82 82 82 94 86 76 91 60 6
44806 -134 86 6 156 107 11 167 114 7 175 118 6
44807 -175 118 6 167 114 7 152 99 6 121 87 25
44808 -101 98 89 62 62 62 34 34 34 18 18 18
44809 - 6 6 6 0 0 0 0 0 0 0 0 0
44810 - 0 0 0 0 0 0 0 0 0 0 0 0
44811 - 0 0 0 0 0 0 0 0 0 0 0 0
44812 - 0 0 0 0 0 0 0 0 0 0 0 0
44813 - 0 0 0 0 0 0 0 0 0 0 0 0
44814 - 0 0 0 0 0 0 0 0 0 0 0 0
44815 - 0 0 0 0 0 0 0 0 0 0 0 0
44816 - 0 0 0 6 6 6 6 6 6 10 10 10
44817 - 18 18 18 22 22 22 30 30 30 42 42 42
44818 - 50 50 50 66 66 66 86 86 86 101 98 89
44819 -106 86 58 98 70 6 104 69 6 104 69 6
44820 -104 69 6 91 60 6 82 62 34 90 90 90
44821 - 62 62 62 38 38 38 22 22 22 14 14 14
44822 - 10 10 10 10 10 10 10 10 10 10 10 10
44823 - 10 10 10 10 10 10 6 6 6 10 10 10
44824 - 10 10 10 10 10 10 10 10 10 14 14 14
44825 - 22 22 22 42 42 42 70 70 70 89 81 66
44826 - 80 54 7 104 69 6 124 80 6 137 92 6
44827 -134 86 6 116 81 8 100 82 52 86 86 86
44828 - 58 58 58 30 30 30 14 14 14 6 6 6
44829 - 0 0 0 0 0 0 0 0 0 0 0 0
44830 - 0 0 0 0 0 0 0 0 0 0 0 0
44831 - 0 0 0 0 0 0 0 0 0 0 0 0
44832 - 0 0 0 0 0 0 0 0 0 0 0 0
44833 - 0 0 0 0 0 0 0 0 0 0 0 0
44834 - 0 0 0 0 0 0 0 0 0 0 0 0
44835 - 0 0 0 0 0 0 0 0 0 0 0 0
44836 - 0 0 0 0 0 0 0 0 0 0 0 0
44837 - 0 0 0 6 6 6 10 10 10 14 14 14
44838 - 18 18 18 26 26 26 38 38 38 54 54 54
44839 - 70 70 70 86 86 86 94 86 76 89 81 66
44840 - 89 81 66 86 86 86 74 74 74 50 50 50
44841 - 30 30 30 14 14 14 6 6 6 0 0 0
44842 - 0 0 0 0 0 0 0 0 0 0 0 0
44843 - 0 0 0 0 0 0 0 0 0 0 0 0
44844 - 0 0 0 0 0 0 0 0 0 0 0 0
44845 - 6 6 6 18 18 18 34 34 34 58 58 58
44846 - 82 82 82 89 81 66 89 81 66 89 81 66
44847 - 94 86 66 94 86 76 74 74 74 50 50 50
44848 - 26 26 26 14 14 14 6 6 6 0 0 0
44849 - 0 0 0 0 0 0 0 0 0 0 0 0
44850 - 0 0 0 0 0 0 0 0 0 0 0 0
44851 - 0 0 0 0 0 0 0 0 0 0 0 0
44852 - 0 0 0 0 0 0 0 0 0 0 0 0
44853 - 0 0 0 0 0 0 0 0 0 0 0 0
44854 - 0 0 0 0 0 0 0 0 0 0 0 0
44855 - 0 0 0 0 0 0 0 0 0 0 0 0
44856 - 0 0 0 0 0 0 0 0 0 0 0 0
44857 - 0 0 0 0 0 0 0 0 0 0 0 0
44858 - 6 6 6 6 6 6 14 14 14 18 18 18
44859 - 30 30 30 38 38 38 46 46 46 54 54 54
44860 - 50 50 50 42 42 42 30 30 30 18 18 18
44861 - 10 10 10 0 0 0 0 0 0 0 0 0
44862 - 0 0 0 0 0 0 0 0 0 0 0 0
44863 - 0 0 0 0 0 0 0 0 0 0 0 0
44864 - 0 0 0 0 0 0 0 0 0 0 0 0
44865 - 0 0 0 6 6 6 14 14 14 26 26 26
44866 - 38 38 38 50 50 50 58 58 58 58 58 58
44867 - 54 54 54 42 42 42 30 30 30 18 18 18
44868 - 10 10 10 0 0 0 0 0 0 0 0 0
44869 - 0 0 0 0 0 0 0 0 0 0 0 0
44870 - 0 0 0 0 0 0 0 0 0 0 0 0
44871 - 0 0 0 0 0 0 0 0 0 0 0 0
44872 - 0 0 0 0 0 0 0 0 0 0 0 0
44873 - 0 0 0 0 0 0 0 0 0 0 0 0
44874 - 0 0 0 0 0 0 0 0 0 0 0 0
44875 - 0 0 0 0 0 0 0 0 0 0 0 0
44876 - 0 0 0 0 0 0 0 0 0 0 0 0
44877 - 0 0 0 0 0 0 0 0 0 0 0 0
44878 - 0 0 0 0 0 0 0 0 0 6 6 6
44879 - 6 6 6 10 10 10 14 14 14 18 18 18
44880 - 18 18 18 14 14 14 10 10 10 6 6 6
44881 - 0 0 0 0 0 0 0 0 0 0 0 0
44882 - 0 0 0 0 0 0 0 0 0 0 0 0
44883 - 0 0 0 0 0 0 0 0 0 0 0 0
44884 - 0 0 0 0 0 0 0 0 0 0 0 0
44885 - 0 0 0 0 0 0 0 0 0 6 6 6
44886 - 14 14 14 18 18 18 22 22 22 22 22 22
44887 - 18 18 18 14 14 14 10 10 10 6 6 6
44888 - 0 0 0 0 0 0 0 0 0 0 0 0
44889 - 0 0 0 0 0 0 0 0 0 0 0 0
44890 - 0 0 0 0 0 0 0 0 0 0 0 0
44891 - 0 0 0 0 0 0 0 0 0 0 0 0
44892 - 0 0 0 0 0 0 0 0 0 0 0 0
44893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44906 +4 4 4 4 4 4
44907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44920 +4 4 4 4 4 4
44921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44934 +4 4 4 4 4 4
44935 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44948 +4 4 4 4 4 4
44949 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44962 +4 4 4 4 4 4
44963 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44976 +4 4 4 4 4 4
44977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44981 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
44982 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
44983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44986 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
44987 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44988 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
44989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44990 +4 4 4 4 4 4
44991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44995 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
44996 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
44997 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45000 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
45001 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
45002 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
45003 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45004 +4 4 4 4 4 4
45005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45009 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45010 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45011 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45014 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45015 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45016 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45017 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45018 +4 4 4 4 4 4
45019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45022 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45023 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45024 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45025 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45027 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45028 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45029 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45030 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45031 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45032 +4 4 4 4 4 4
45033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45036 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45037 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45038 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45039 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45040 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45041 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45042 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45043 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45044 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45045 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45046 +4 4 4 4 4 4
45047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45050 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45051 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45052 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45053 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45054 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45055 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45056 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45057 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45058 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45059 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45060 +4 4 4 4 4 4
45061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45063 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45064 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45065 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45066 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45067 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45068 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45069 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45070 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45071 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45072 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45073 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45074 +4 4 4 4 4 4
45075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45077 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45078 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45079 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45080 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45081 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45082 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45083 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45084 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45085 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45086 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45087 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45088 +4 4 4 4 4 4
45089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45091 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45092 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45093 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45094 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45095 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45096 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45097 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45098 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45099 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45100 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45101 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45102 +4 4 4 4 4 4
45103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45105 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45106 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45107 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45108 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45109 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45110 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45111 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45112 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45113 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45114 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45115 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45116 +4 4 4 4 4 4
45117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45118 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45119 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45120 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45121 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45122 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45123 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45124 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45125 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45126 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45127 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45128 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45129 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45130 +4 4 4 4 4 4
45131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45132 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45133 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45134 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45135 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45136 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45137 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45138 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45139 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45140 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45141 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45142 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45143 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45144 +0 0 0 4 4 4
45145 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45146 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45147 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45148 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45149 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45150 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45151 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45152 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45153 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45154 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45155 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45156 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45157 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45158 +2 0 0 0 0 0
45159 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45160 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45161 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45162 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45163 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45164 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45165 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45166 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45167 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45168 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45169 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45170 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45171 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45172 +37 38 37 0 0 0
45173 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45174 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45175 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45176 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45177 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45178 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45179 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45180 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45181 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45182 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45183 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45184 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45185 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45186 +85 115 134 4 0 0
45187 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45188 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45189 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45190 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45191 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45192 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45193 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45194 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45195 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45196 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45197 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45198 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45199 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45200 +60 73 81 4 0 0
45201 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45202 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45203 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45204 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45205 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45206 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45207 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45208 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45209 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45210 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45211 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45212 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45213 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45214 +16 19 21 4 0 0
45215 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45216 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45217 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45218 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45219 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45220 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45221 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45222 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45223 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45224 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45225 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45226 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45227 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45228 +4 0 0 4 3 3
45229 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45230 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45231 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45233 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45234 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45235 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45236 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45237 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45238 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45239 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45240 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45241 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45242 +3 2 2 4 4 4
45243 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45244 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45245 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45246 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45247 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45248 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45249 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45250 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45251 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45252 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45253 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45254 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45255 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45256 +4 4 4 4 4 4
45257 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45258 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45259 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45260 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45261 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45262 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45263 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45264 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45265 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45266 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45267 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45268 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45269 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45270 +4 4 4 4 4 4
45271 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45272 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45273 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45274 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45275 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45276 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45277 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45278 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45279 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45280 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45281 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45282 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45283 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45284 +5 5 5 5 5 5
45285 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45286 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45287 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45288 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45289 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45290 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45291 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45292 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45293 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45294 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45295 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45296 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45297 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45298 +5 5 5 4 4 4
45299 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45300 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45301 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45302 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45303 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45304 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45305 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45306 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45307 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45308 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45309 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45310 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45312 +4 4 4 4 4 4
45313 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45314 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45315 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45316 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45317 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45318 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45319 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45320 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45321 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45322 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45323 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45324 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45326 +4 4 4 4 4 4
45327 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45328 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45329 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45330 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45331 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45332 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45333 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45334 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45335 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45336 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45337 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45340 +4 4 4 4 4 4
45341 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45342 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45343 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45344 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45345 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45346 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45347 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45348 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45349 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45350 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45351 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45354 +4 4 4 4 4 4
45355 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45356 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45357 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45358 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45359 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45360 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45361 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45362 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45363 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45364 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45365 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45368 +4 4 4 4 4 4
45369 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45370 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45371 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45372 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45373 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45374 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45375 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45376 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45377 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45378 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45379 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45382 +4 4 4 4 4 4
45383 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45384 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45385 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45386 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45387 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45388 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45389 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45390 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45391 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45392 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45393 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45395 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45396 +4 4 4 4 4 4
45397 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45398 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45399 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45400 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45401 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45402 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45403 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45404 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45405 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45406 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45407 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45409 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45410 +4 4 4 4 4 4
45411 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45412 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45413 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45414 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45415 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45416 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45417 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45418 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45419 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45420 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45421 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45423 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45424 +4 4 4 4 4 4
45425 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45426 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45427 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45428 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45429 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45430 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45431 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45432 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45433 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45434 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45435 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45436 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45437 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45438 +4 4 4 4 4 4
45439 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45440 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45441 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45442 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45443 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45444 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45445 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45446 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45447 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45448 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45449 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45450 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45451 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45452 +4 4 4 4 4 4
45453 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45454 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45455 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45456 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45457 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45458 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45459 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45460 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45461 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45462 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45463 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45464 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45465 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45466 +4 4 4 4 4 4
45467 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45468 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45469 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45470 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45471 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45472 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45473 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45474 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45475 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45476 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45477 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45478 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45479 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45480 +4 4 4 4 4 4
45481 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45482 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45483 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45484 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45485 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45486 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45487 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45488 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45489 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45490 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45491 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45492 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45493 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45494 +4 4 4 4 4 4
45495 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45496 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45497 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45498 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45499 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45500 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45501 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45502 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45503 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45504 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45505 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45506 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45507 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45508 +4 4 4 4 4 4
45509 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45510 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45511 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45512 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45513 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45514 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45515 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45516 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45517 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45518 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45519 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45520 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45521 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45522 +4 4 4 4 4 4
45523 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45524 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45525 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45526 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45527 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45528 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45529 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45530 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45531 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45532 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45533 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45534 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45535 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45536 +4 4 4 4 4 4
45537 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45538 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45539 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45540 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45541 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45542 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45543 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45544 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45545 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45546 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45547 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45548 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45549 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45550 +4 4 4 4 4 4
45551 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45552 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45553 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45554 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45555 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45556 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45557 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45558 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45559 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45560 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45561 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564 +4 4 4 4 4 4
45565 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45566 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45567 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45568 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45569 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45570 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45571 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45572 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45573 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45574 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45575 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578 +4 4 4 4 4 4
45579 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45580 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45581 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45582 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45583 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45584 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45585 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45586 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45587 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45588 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45589 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592 +4 4 4 4 4 4
45593 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45594 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45595 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45596 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45597 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45598 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45599 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45600 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45601 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45602 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45603 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606 +4 4 4 4 4 4
45607 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45608 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45609 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45610 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45611 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45612 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45613 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45614 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45615 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45616 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45617 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45620 +4 4 4 4 4 4
45621 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45622 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45623 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45624 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45625 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45626 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45627 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45628 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45629 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45630 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45631 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45634 +4 4 4 4 4 4
45635 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45636 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45637 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45638 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45639 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45640 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45641 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45642 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45643 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45644 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45645 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45648 +4 4 4 4 4 4
45649 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45650 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45651 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45652 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45653 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45654 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45655 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45656 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45657 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45658 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45659 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45662 +4 4 4 4 4 4
45663 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45664 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45665 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45666 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45667 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45668 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45669 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45670 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45671 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45672 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45673 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676 +4 4 4 4 4 4
45677 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45678 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45679 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45680 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45681 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45682 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45683 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45684 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45685 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45686 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45687 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45690 +4 4 4 4 4 4
45691 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45692 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45693 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45694 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45695 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45696 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45697 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45698 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45699 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45700 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45701 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45704 +4 4 4 4 4 4
45705 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45706 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45707 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45708 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45709 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45710 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45711 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45712 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45713 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45714 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45718 +4 4 4 4 4 4
45719 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45720 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45721 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45722 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45723 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45724 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45725 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45726 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45727 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45728 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45729 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45732 +4 4 4 4 4 4
45733 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45734 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45735 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45736 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45737 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45738 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45739 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45740 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45741 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45742 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45746 +4 4 4 4 4 4
45747 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45748 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45749 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45750 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45751 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45752 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45753 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45754 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45755 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45756 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45760 +4 4 4 4 4 4
45761 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45762 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45763 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45764 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45765 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45766 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45767 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45768 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45769 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45770 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45774 +4 4 4 4 4 4
45775 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45776 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45777 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45778 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45779 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45780 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45781 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45782 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45783 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45784 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45788 +4 4 4 4 4 4
45789 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45790 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45791 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45792 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45793 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45794 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45795 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45796 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45797 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45798 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45799 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45800 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45802 +4 4 4 4 4 4
45803 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45804 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45805 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45806 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45807 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45808 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45809 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45810 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45811 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45812 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45813 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45816 +4 4 4 4 4 4
45817 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45818 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45819 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45820 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45821 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45822 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45823 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45824 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45825 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45826 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45830 +4 4 4 4 4 4
45831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45832 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45833 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45834 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45835 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45836 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45837 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45838 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45839 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45840 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45844 +4 4 4 4 4 4
45845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45846 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45847 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45848 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45849 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45850 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45851 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45852 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45853 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45854 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45858 +4 4 4 4 4 4
45859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45860 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45861 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45862 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45863 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45864 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45865 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45866 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45867 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45868 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45872 +4 4 4 4 4 4
45873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45875 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45876 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45877 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45878 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45879 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45880 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45881 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45882 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45886 +4 4 4 4 4 4
45887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45890 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45891 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45892 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45893 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45900 +4 4 4 4 4 4
45901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45904 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45905 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45906 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45907 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45914 +4 4 4 4 4 4
45915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45918 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45919 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45920 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45921 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
45922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45928 +4 4 4 4 4 4
45929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45932 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
45933 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
45934 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
45935 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
45936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45942 +4 4 4 4 4 4
45943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45947 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
45948 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45949 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45956 +4 4 4 4 4 4
45957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45961 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
45962 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
45963 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45970 +4 4 4 4 4 4
45971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45975 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
45976 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
45977 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45984 +4 4 4 4 4 4
45985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45989 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
45990 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
45991 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45998 +4 4 4 4 4 4
45999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46003 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46004 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
46005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46012 +4 4 4 4 4 4
46013 diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
46014 index 443e3c8..c443d6a 100644
46015 --- a/drivers/video/nvidia/nv_backlight.c
46016 +++ b/drivers/video/nvidia/nv_backlight.c
46017 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
46018 return bd->props.brightness;
46019 }
46020
46021 -static struct backlight_ops nvidia_bl_ops = {
46022 +static const struct backlight_ops nvidia_bl_ops = {
46023 .get_brightness = nvidia_bl_get_brightness,
46024 .update_status = nvidia_bl_update_status,
46025 };
46026 diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
46027 index d94c57f..912984c 100644
46028 --- a/drivers/video/riva/fbdev.c
46029 +++ b/drivers/video/riva/fbdev.c
46030 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
46031 return bd->props.brightness;
46032 }
46033
46034 -static struct backlight_ops riva_bl_ops = {
46035 +static const struct backlight_ops riva_bl_ops = {
46036 .get_brightness = riva_bl_get_brightness,
46037 .update_status = riva_bl_update_status,
46038 };
46039 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46040 index 54fbb29..2c108fc 100644
46041 --- a/drivers/video/uvesafb.c
46042 +++ b/drivers/video/uvesafb.c
46043 @@ -18,6 +18,7 @@
46044 #include <linux/fb.h>
46045 #include <linux/io.h>
46046 #include <linux/mutex.h>
46047 +#include <linux/moduleloader.h>
46048 #include <video/edid.h>
46049 #include <video/uvesafb.h>
46050 #ifdef CONFIG_X86
46051 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
46052 NULL,
46053 };
46054
46055 - return call_usermodehelper(v86d_path, argv, envp, 1);
46056 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
46057 }
46058
46059 /*
46060 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46061 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46062 par->pmi_setpal = par->ypan = 0;
46063 } else {
46064 +
46065 +#ifdef CONFIG_PAX_KERNEXEC
46066 +#ifdef CONFIG_MODULES
46067 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46068 +#endif
46069 + if (!par->pmi_code) {
46070 + par->pmi_setpal = par->ypan = 0;
46071 + return 0;
46072 + }
46073 +#endif
46074 +
46075 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46076 + task->t.regs.edi);
46077 +
46078 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46079 + pax_open_kernel();
46080 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46081 + pax_close_kernel();
46082 +
46083 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46084 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46085 +#else
46086 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46087 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46088 +#endif
46089 +
46090 printk(KERN_INFO "uvesafb: protected mode interface info at "
46091 "%04x:%04x\n",
46092 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46093 @@ -1799,6 +1822,11 @@ out:
46094 if (par->vbe_modes)
46095 kfree(par->vbe_modes);
46096
46097 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46098 + if (par->pmi_code)
46099 + module_free_exec(NULL, par->pmi_code);
46100 +#endif
46101 +
46102 framebuffer_release(info);
46103 return err;
46104 }
46105 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
46106 kfree(par->vbe_state_orig);
46107 if (par->vbe_state_saved)
46108 kfree(par->vbe_state_saved);
46109 +
46110 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46111 + if (par->pmi_code)
46112 + module_free_exec(NULL, par->pmi_code);
46113 +#endif
46114 +
46115 }
46116
46117 framebuffer_release(info);
46118 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
46119 index bd37ee1..cb827e8 100644
46120 --- a/drivers/video/vesafb.c
46121 +++ b/drivers/video/vesafb.c
46122 @@ -9,6 +9,7 @@
46123 */
46124
46125 #include <linux/module.h>
46126 +#include <linux/moduleloader.h>
46127 #include <linux/kernel.h>
46128 #include <linux/errno.h>
46129 #include <linux/string.h>
46130 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
46131 static int vram_total __initdata; /* Set total amount of memory */
46132 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
46133 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
46134 -static void (*pmi_start)(void) __read_mostly;
46135 -static void (*pmi_pal) (void) __read_mostly;
46136 +static void (*pmi_start)(void) __read_only;
46137 +static void (*pmi_pal) (void) __read_only;
46138 static int depth __read_mostly;
46139 static int vga_compat __read_mostly;
46140 /* --------------------------------------------------------------------- */
46141 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
46142 unsigned int size_vmode;
46143 unsigned int size_remap;
46144 unsigned int size_total;
46145 + void *pmi_code = NULL;
46146
46147 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
46148 return -ENODEV;
46149 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
46150 size_remap = size_total;
46151 vesafb_fix.smem_len = size_remap;
46152
46153 -#ifndef __i386__
46154 - screen_info.vesapm_seg = 0;
46155 -#endif
46156 -
46157 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
46158 printk(KERN_WARNING
46159 "vesafb: cannot reserve video memory at 0x%lx\n",
46160 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
46161 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
46162 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
46163
46164 +#ifdef __i386__
46165 +
46166 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46167 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
46168 + if (!pmi_code)
46169 +#elif !defined(CONFIG_PAX_KERNEXEC)
46170 + if (0)
46171 +#endif
46172 +
46173 +#endif
46174 + screen_info.vesapm_seg = 0;
46175 +
46176 if (screen_info.vesapm_seg) {
46177 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46178 - screen_info.vesapm_seg,screen_info.vesapm_off);
46179 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46180 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46181 }
46182
46183 if (screen_info.vesapm_seg < 0xc000)
46184 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46185
46186 if (ypan || pmi_setpal) {
46187 unsigned short *pmi_base;
46188 +
46189 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46190 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46191 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46192 +
46193 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46194 + pax_open_kernel();
46195 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46196 +#else
46197 + pmi_code = pmi_base;
46198 +#endif
46199 +
46200 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46201 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46202 +
46203 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46204 + pmi_start = ktva_ktla(pmi_start);
46205 + pmi_pal = ktva_ktla(pmi_pal);
46206 + pax_close_kernel();
46207 +#endif
46208 +
46209 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46210 if (pmi_base[3]) {
46211 printk(KERN_INFO "vesafb: pmi: ports = ");
46212 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46213 info->node, info->fix.id);
46214 return 0;
46215 err:
46216 +
46217 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46218 + module_free_exec(NULL, pmi_code);
46219 +#endif
46220 +
46221 if (info->screen_base)
46222 iounmap(info->screen_base);
46223 framebuffer_release(info);
46224 diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46225 index 88a60e0..6783cc2 100644
46226 --- a/drivers/xen/sys-hypervisor.c
46227 +++ b/drivers/xen/sys-hypervisor.c
46228 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46229 return 0;
46230 }
46231
46232 -static struct sysfs_ops hyp_sysfs_ops = {
46233 +static const struct sysfs_ops hyp_sysfs_ops = {
46234 .show = hyp_sysfs_show,
46235 .store = hyp_sysfs_store,
46236 };
46237 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46238 index 18f74ec..3227009 100644
46239 --- a/fs/9p/vfs_inode.c
46240 +++ b/fs/9p/vfs_inode.c
46241 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46242 static void
46243 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46244 {
46245 - char *s = nd_get_link(nd);
46246 + const char *s = nd_get_link(nd);
46247
46248 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46249 IS_ERR(s) ? "<error>" : s);
46250 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46251 index bb4cc5b..df5eaa0 100644
46252 --- a/fs/Kconfig.binfmt
46253 +++ b/fs/Kconfig.binfmt
46254 @@ -86,7 +86,7 @@ config HAVE_AOUT
46255
46256 config BINFMT_AOUT
46257 tristate "Kernel support for a.out and ECOFF binaries"
46258 - depends on HAVE_AOUT
46259 + depends on HAVE_AOUT && BROKEN
46260 ---help---
46261 A.out (Assembler.OUTput) is a set of formats for libraries and
46262 executables used in the earliest versions of UNIX. Linux used
46263 diff --git a/fs/aio.c b/fs/aio.c
46264 index 22a19ad..d484e5b 100644
46265 --- a/fs/aio.c
46266 +++ b/fs/aio.c
46267 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46268 size += sizeof(struct io_event) * nr_events;
46269 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46270
46271 - if (nr_pages < 0)
46272 + if (nr_pages <= 0)
46273 return -EINVAL;
46274
46275 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46276 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46277 struct aio_timeout to;
46278 int retry = 0;
46279
46280 + pax_track_stack();
46281 +
46282 /* needed to zero any padding within an entry (there shouldn't be
46283 * any, but C is fun!
46284 */
46285 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46286 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46287 {
46288 ssize_t ret;
46289 + struct iovec iovstack;
46290
46291 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46292 kiocb->ki_nbytes, 1,
46293 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46294 + &iovstack, &kiocb->ki_iovec);
46295 if (ret < 0)
46296 goto out;
46297
46298 + if (kiocb->ki_iovec == &iovstack) {
46299 + kiocb->ki_inline_vec = iovstack;
46300 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
46301 + }
46302 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46303 kiocb->ki_cur_seg = 0;
46304 /* ki_nbytes/left now reflect bytes instead of segs */
46305 diff --git a/fs/attr.c b/fs/attr.c
46306 index 96d394b..33cf5b4 100644
46307 --- a/fs/attr.c
46308 +++ b/fs/attr.c
46309 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46310 unsigned long limit;
46311
46312 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46313 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46314 if (limit != RLIM_INFINITY && offset > limit)
46315 goto out_sig;
46316 if (offset > inode->i_sb->s_maxbytes)
46317 diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46318 index 4a1401c..05eb5ca 100644
46319 --- a/fs/autofs/root.c
46320 +++ b/fs/autofs/root.c
46321 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46322 set_bit(n,sbi->symlink_bitmap);
46323 sl = &sbi->symlink[n];
46324 sl->len = strlen(symname);
46325 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46326 + slsize = sl->len+1;
46327 + sl->data = kmalloc(slsize, GFP_KERNEL);
46328 if (!sl->data) {
46329 clear_bit(n,sbi->symlink_bitmap);
46330 unlock_kernel();
46331 diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46332 index b4ea829..e63ef18 100644
46333 --- a/fs/autofs4/symlink.c
46334 +++ b/fs/autofs4/symlink.c
46335 @@ -15,7 +15,7 @@
46336 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46337 {
46338 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46339 - nd_set_link(nd, (char *)ino->u.symlink);
46340 + nd_set_link(nd, ino->u.symlink);
46341 return NULL;
46342 }
46343
46344 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46345 index 2341375..df9d1c2 100644
46346 --- a/fs/autofs4/waitq.c
46347 +++ b/fs/autofs4/waitq.c
46348 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46349 {
46350 unsigned long sigpipe, flags;
46351 mm_segment_t fs;
46352 - const char *data = (const char *)addr;
46353 + const char __user *data = (const char __force_user *)addr;
46354 ssize_t wr = 0;
46355
46356 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46357 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46358 index 9158c07..3f06659 100644
46359 --- a/fs/befs/linuxvfs.c
46360 +++ b/fs/befs/linuxvfs.c
46361 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46362 {
46363 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46364 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46365 - char *link = nd_get_link(nd);
46366 + const char *link = nd_get_link(nd);
46367 if (!IS_ERR(link))
46368 kfree(link);
46369 }
46370 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46371 index 0133b5a..b3baa9f 100644
46372 --- a/fs/binfmt_aout.c
46373 +++ b/fs/binfmt_aout.c
46374 @@ -16,6 +16,7 @@
46375 #include <linux/string.h>
46376 #include <linux/fs.h>
46377 #include <linux/file.h>
46378 +#include <linux/security.h>
46379 #include <linux/stat.h>
46380 #include <linux/fcntl.h>
46381 #include <linux/ptrace.h>
46382 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46383 #endif
46384 # define START_STACK(u) (u.start_stack)
46385
46386 + memset(&dump, 0, sizeof(dump));
46387 +
46388 fs = get_fs();
46389 set_fs(KERNEL_DS);
46390 has_dumped = 1;
46391 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46392
46393 /* If the size of the dump file exceeds the rlimit, then see what would happen
46394 if we wrote the stack, but not the data area. */
46395 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46396 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46397 dump.u_dsize = 0;
46398
46399 /* Make sure we have enough room to write the stack and data areas. */
46400 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46401 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46402 dump.u_ssize = 0;
46403
46404 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46405 dump_size = dump.u_ssize << PAGE_SHIFT;
46406 DUMP_WRITE(dump_start,dump_size);
46407 }
46408 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
46409 - set_fs(KERNEL_DS);
46410 - DUMP_WRITE(current,sizeof(*current));
46411 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46412 end_coredump:
46413 set_fs(fs);
46414 return has_dumped;
46415 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46416 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46417 if (rlim >= RLIM_INFINITY)
46418 rlim = ~0;
46419 +
46420 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46421 if (ex.a_data + ex.a_bss > rlim)
46422 return -ENOMEM;
46423
46424 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46425 install_exec_creds(bprm);
46426 current->flags &= ~PF_FORKNOEXEC;
46427
46428 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46429 + current->mm->pax_flags = 0UL;
46430 +#endif
46431 +
46432 +#ifdef CONFIG_PAX_PAGEEXEC
46433 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46434 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46435 +
46436 +#ifdef CONFIG_PAX_EMUTRAMP
46437 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46438 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46439 +#endif
46440 +
46441 +#ifdef CONFIG_PAX_MPROTECT
46442 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46443 + current->mm->pax_flags |= MF_PAX_MPROTECT;
46444 +#endif
46445 +
46446 + }
46447 +#endif
46448 +
46449 if (N_MAGIC(ex) == OMAGIC) {
46450 unsigned long text_addr, map_size;
46451 loff_t pos;
46452 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46453
46454 down_write(&current->mm->mmap_sem);
46455 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46456 - PROT_READ | PROT_WRITE | PROT_EXEC,
46457 + PROT_READ | PROT_WRITE,
46458 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46459 fd_offset + ex.a_text);
46460 up_write(&current->mm->mmap_sem);
46461 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46462 index 1ed37ba..b9c035f 100644
46463 --- a/fs/binfmt_elf.c
46464 +++ b/fs/binfmt_elf.c
46465 @@ -31,6 +31,7 @@
46466 #include <linux/random.h>
46467 #include <linux/elf.h>
46468 #include <linux/utsname.h>
46469 +#include <linux/xattr.h>
46470 #include <asm/uaccess.h>
46471 #include <asm/param.h>
46472 #include <asm/page.h>
46473 @@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46474 #define elf_core_dump NULL
46475 #endif
46476
46477 +#ifdef CONFIG_PAX_MPROTECT
46478 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46479 +#endif
46480 +
46481 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46482 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46483 #else
46484 @@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46485 .load_binary = load_elf_binary,
46486 .load_shlib = load_elf_library,
46487 .core_dump = elf_core_dump,
46488 +
46489 +#ifdef CONFIG_PAX_MPROTECT
46490 + .handle_mprotect= elf_handle_mprotect,
46491 +#endif
46492 +
46493 .min_coredump = ELF_EXEC_PAGESIZE,
46494 .hasvdso = 1
46495 };
46496 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46497
46498 static int set_brk(unsigned long start, unsigned long end)
46499 {
46500 + unsigned long e = end;
46501 +
46502 start = ELF_PAGEALIGN(start);
46503 end = ELF_PAGEALIGN(end);
46504 if (end > start) {
46505 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46506 if (BAD_ADDR(addr))
46507 return addr;
46508 }
46509 - current->mm->start_brk = current->mm->brk = end;
46510 + current->mm->start_brk = current->mm->brk = e;
46511 return 0;
46512 }
46513
46514 @@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46515 elf_addr_t __user *u_rand_bytes;
46516 const char *k_platform = ELF_PLATFORM;
46517 const char *k_base_platform = ELF_BASE_PLATFORM;
46518 - unsigned char k_rand_bytes[16];
46519 + u32 k_rand_bytes[4];
46520 int items;
46521 elf_addr_t *elf_info;
46522 int ei_index = 0;
46523 const struct cred *cred = current_cred();
46524 struct vm_area_struct *vma;
46525 + unsigned long saved_auxv[AT_VECTOR_SIZE];
46526 +
46527 + pax_track_stack();
46528
46529 /*
46530 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46531 @@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46532 * Generate 16 random bytes for userspace PRNG seeding.
46533 */
46534 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46535 - u_rand_bytes = (elf_addr_t __user *)
46536 - STACK_ALLOC(p, sizeof(k_rand_bytes));
46537 + srandom32(k_rand_bytes[0] ^ random32());
46538 + srandom32(k_rand_bytes[1] ^ random32());
46539 + srandom32(k_rand_bytes[2] ^ random32());
46540 + srandom32(k_rand_bytes[3] ^ random32());
46541 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
46542 + u_rand_bytes = (elf_addr_t __user *) p;
46543 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46544 return -EFAULT;
46545
46546 @@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46547 return -EFAULT;
46548 current->mm->env_end = p;
46549
46550 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46551 +
46552 /* Put the elf_info on the stack in the right place. */
46553 sp = (elf_addr_t __user *)envp + 1;
46554 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46555 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46556 return -EFAULT;
46557 return 0;
46558 }
46559 @@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46560 {
46561 struct elf_phdr *elf_phdata;
46562 struct elf_phdr *eppnt;
46563 - unsigned long load_addr = 0;
46564 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46565 int load_addr_set = 0;
46566 unsigned long last_bss = 0, elf_bss = 0;
46567 - unsigned long error = ~0UL;
46568 + unsigned long error = -EINVAL;
46569 unsigned long total_size;
46570 int retval, i, size;
46571
46572 @@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46573 goto out_close;
46574 }
46575
46576 +#ifdef CONFIG_PAX_SEGMEXEC
46577 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46578 + pax_task_size = SEGMEXEC_TASK_SIZE;
46579 +#endif
46580 +
46581 eppnt = elf_phdata;
46582 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46583 if (eppnt->p_type == PT_LOAD) {
46584 @@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46585 k = load_addr + eppnt->p_vaddr;
46586 if (BAD_ADDR(k) ||
46587 eppnt->p_filesz > eppnt->p_memsz ||
46588 - eppnt->p_memsz > TASK_SIZE ||
46589 - TASK_SIZE - eppnt->p_memsz < k) {
46590 + eppnt->p_memsz > pax_task_size ||
46591 + pax_task_size - eppnt->p_memsz < k) {
46592 error = -ENOMEM;
46593 goto out_close;
46594 }
46595 @@ -532,6 +558,348 @@ out:
46596 return error;
46597 }
46598
46599 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46600 +{
46601 + unsigned long pax_flags = 0UL;
46602 +
46603 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46604 +
46605 +#ifdef CONFIG_PAX_PAGEEXEC
46606 + if (elf_phdata->p_flags & PF_PAGEEXEC)
46607 + pax_flags |= MF_PAX_PAGEEXEC;
46608 +#endif
46609 +
46610 +#ifdef CONFIG_PAX_SEGMEXEC
46611 + if (elf_phdata->p_flags & PF_SEGMEXEC)
46612 + pax_flags |= MF_PAX_SEGMEXEC;
46613 +#endif
46614 +
46615 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46616 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46617 + if (nx_enabled)
46618 + pax_flags &= ~MF_PAX_SEGMEXEC;
46619 + else
46620 + pax_flags &= ~MF_PAX_PAGEEXEC;
46621 + }
46622 +#endif
46623 +
46624 +#ifdef CONFIG_PAX_EMUTRAMP
46625 + if (elf_phdata->p_flags & PF_EMUTRAMP)
46626 + pax_flags |= MF_PAX_EMUTRAMP;
46627 +#endif
46628 +
46629 +#ifdef CONFIG_PAX_MPROTECT
46630 + if (elf_phdata->p_flags & PF_MPROTECT)
46631 + pax_flags |= MF_PAX_MPROTECT;
46632 +#endif
46633 +
46634 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46635 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46636 + pax_flags |= MF_PAX_RANDMMAP;
46637 +#endif
46638 +
46639 +#endif
46640 +
46641 + return pax_flags;
46642 +}
46643 +
46644 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46645 +{
46646 + unsigned long pax_flags = 0UL;
46647 +
46648 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46649 +
46650 +#ifdef CONFIG_PAX_PAGEEXEC
46651 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46652 + pax_flags |= MF_PAX_PAGEEXEC;
46653 +#endif
46654 +
46655 +#ifdef CONFIG_PAX_SEGMEXEC
46656 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46657 + pax_flags |= MF_PAX_SEGMEXEC;
46658 +#endif
46659 +
46660 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46661 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46662 + if (nx_enabled)
46663 + pax_flags &= ~MF_PAX_SEGMEXEC;
46664 + else
46665 + pax_flags &= ~MF_PAX_PAGEEXEC;
46666 + }
46667 +#endif
46668 +
46669 +#ifdef CONFIG_PAX_EMUTRAMP
46670 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46671 + pax_flags |= MF_PAX_EMUTRAMP;
46672 +#endif
46673 +
46674 +#ifdef CONFIG_PAX_MPROTECT
46675 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46676 + pax_flags |= MF_PAX_MPROTECT;
46677 +#endif
46678 +
46679 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46680 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46681 + pax_flags |= MF_PAX_RANDMMAP;
46682 +#endif
46683 +
46684 +#endif
46685 +
46686 + return pax_flags;
46687 +}
46688 +
46689 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46690 +{
46691 + unsigned long pax_flags = 0UL;
46692 +
46693 +#ifdef CONFIG_PAX_EI_PAX
46694 +
46695 +#ifdef CONFIG_PAX_PAGEEXEC
46696 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46697 + pax_flags |= MF_PAX_PAGEEXEC;
46698 +#endif
46699 +
46700 +#ifdef CONFIG_PAX_SEGMEXEC
46701 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46702 + pax_flags |= MF_PAX_SEGMEXEC;
46703 +#endif
46704 +
46705 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46706 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46707 + if (nx_enabled)
46708 + pax_flags &= ~MF_PAX_SEGMEXEC;
46709 + else
46710 + pax_flags &= ~MF_PAX_PAGEEXEC;
46711 + }
46712 +#endif
46713 +
46714 +#ifdef CONFIG_PAX_EMUTRAMP
46715 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46716 + pax_flags |= MF_PAX_EMUTRAMP;
46717 +#endif
46718 +
46719 +#ifdef CONFIG_PAX_MPROTECT
46720 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46721 + pax_flags |= MF_PAX_MPROTECT;
46722 +#endif
46723 +
46724 +#ifdef CONFIG_PAX_ASLR
46725 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46726 + pax_flags |= MF_PAX_RANDMMAP;
46727 +#endif
46728 +
46729 +#else
46730 +
46731 +#ifdef CONFIG_PAX_PAGEEXEC
46732 + pax_flags |= MF_PAX_PAGEEXEC;
46733 +#endif
46734 +
46735 +#ifdef CONFIG_PAX_MPROTECT
46736 + pax_flags |= MF_PAX_MPROTECT;
46737 +#endif
46738 +
46739 +#ifdef CONFIG_PAX_RANDMMAP
46740 + pax_flags |= MF_PAX_RANDMMAP;
46741 +#endif
46742 +
46743 +#ifdef CONFIG_PAX_SEGMEXEC
46744 + if (!(__supported_pte_mask & _PAGE_NX)) {
46745 + pax_flags &= ~MF_PAX_PAGEEXEC;
46746 + pax_flags |= MF_PAX_SEGMEXEC;
46747 + }
46748 +#endif
46749 +
46750 +#endif
46751 +
46752 + return pax_flags;
46753 +}
46754 +
46755 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46756 +{
46757 +
46758 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
46759 + unsigned long i;
46760 +
46761 + for (i = 0UL; i < elf_ex->e_phnum; i++)
46762 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46763 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46764 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46765 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46766 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46767 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46768 + return ~0UL;
46769 +
46770 +#ifdef CONFIG_PAX_SOFTMODE
46771 + if (pax_softmode)
46772 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46773 + else
46774 +#endif
46775 +
46776 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46777 + break;
46778 + }
46779 +#endif
46780 +
46781 + return ~0UL;
46782 +}
46783 +
46784 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46785 +{
46786 + unsigned long pax_flags = 0UL;
46787 +
46788 +#ifdef CONFIG_PAX_PAGEEXEC
46789 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46790 + pax_flags |= MF_PAX_PAGEEXEC;
46791 +#endif
46792 +
46793 +#ifdef CONFIG_PAX_SEGMEXEC
46794 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46795 + pax_flags |= MF_PAX_SEGMEXEC;
46796 +#endif
46797 +
46798 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46799 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46800 + if ((__supported_pte_mask & _PAGE_NX))
46801 + pax_flags &= ~MF_PAX_SEGMEXEC;
46802 + else
46803 + pax_flags &= ~MF_PAX_PAGEEXEC;
46804 + }
46805 +#endif
46806 +
46807 +#ifdef CONFIG_PAX_EMUTRAMP
46808 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46809 + pax_flags |= MF_PAX_EMUTRAMP;
46810 +#endif
46811 +
46812 +#ifdef CONFIG_PAX_MPROTECT
46813 + if (pax_flags_softmode & MF_PAX_MPROTECT)
46814 + pax_flags |= MF_PAX_MPROTECT;
46815 +#endif
46816 +
46817 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46818 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46819 + pax_flags |= MF_PAX_RANDMMAP;
46820 +#endif
46821 +
46822 + return pax_flags;
46823 +}
46824 +
46825 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46826 +{
46827 + unsigned long pax_flags = 0UL;
46828 +
46829 +#ifdef CONFIG_PAX_PAGEEXEC
46830 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46831 + pax_flags |= MF_PAX_PAGEEXEC;
46832 +#endif
46833 +
46834 +#ifdef CONFIG_PAX_SEGMEXEC
46835 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46836 + pax_flags |= MF_PAX_SEGMEXEC;
46837 +#endif
46838 +
46839 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46840 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46841 + if ((__supported_pte_mask & _PAGE_NX))
46842 + pax_flags &= ~MF_PAX_SEGMEXEC;
46843 + else
46844 + pax_flags &= ~MF_PAX_PAGEEXEC;
46845 + }
46846 +#endif
46847 +
46848 +#ifdef CONFIG_PAX_EMUTRAMP
46849 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46850 + pax_flags |= MF_PAX_EMUTRAMP;
46851 +#endif
46852 +
46853 +#ifdef CONFIG_PAX_MPROTECT
46854 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46855 + pax_flags |= MF_PAX_MPROTECT;
46856 +#endif
46857 +
46858 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46859 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46860 + pax_flags |= MF_PAX_RANDMMAP;
46861 +#endif
46862 +
46863 + return pax_flags;
46864 +}
46865 +
46866 +static unsigned long pax_parse_xattr_pax(struct file * const file)
46867 +{
46868 +
46869 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46870 + ssize_t xattr_size, i;
46871 + unsigned char xattr_value[5];
46872 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46873 +
46874 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46875 + if (xattr_size <= 0)
46876 + return ~0UL;
46877 +
46878 + for (i = 0; i < xattr_size; i++)
46879 + switch (xattr_value[i]) {
46880 + default:
46881 + return ~0UL;
46882 +
46883 +#define parse_flag(option1, option2, flag) \
46884 + case option1: \
46885 + pax_flags_hardmode |= MF_PAX_##flag; \
46886 + break; \
46887 + case option2: \
46888 + pax_flags_softmode |= MF_PAX_##flag; \
46889 + break;
46890 +
46891 + parse_flag('p', 'P', PAGEEXEC);
46892 + parse_flag('e', 'E', EMUTRAMP);
46893 + parse_flag('m', 'M', MPROTECT);
46894 + parse_flag('r', 'R', RANDMMAP);
46895 + parse_flag('s', 'S', SEGMEXEC);
46896 +
46897 +#undef parse_flag
46898 + }
46899 +
46900 + if (pax_flags_hardmode & pax_flags_softmode)
46901 + return ~0UL;
46902 +
46903 +#ifdef CONFIG_PAX_SOFTMODE
46904 + if (pax_softmode)
46905 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
46906 + else
46907 +#endif
46908 +
46909 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
46910 +#else
46911 + return ~0UL;
46912 +#endif
46913 +}
46914 +
46915 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46916 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
46917 +{
46918 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
46919 +
46920 + pax_flags = pax_parse_ei_pax(elf_ex);
46921 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
46922 + xattr_pax_flags = pax_parse_xattr_pax(file);
46923 +
46924 + if (pt_pax_flags == ~0UL)
46925 + pt_pax_flags = xattr_pax_flags;
46926 + else if (xattr_pax_flags == ~0UL)
46927 + xattr_pax_flags = pt_pax_flags;
46928 + if (pt_pax_flags != xattr_pax_flags)
46929 + return -EINVAL;
46930 + if (pt_pax_flags != ~0UL)
46931 + pax_flags = pt_pax_flags;
46932 +
46933 + if (0 > pax_check_flags(&pax_flags))
46934 + return -EINVAL;
46935 +
46936 + current->mm->pax_flags = pax_flags;
46937 + return 0;
46938 +}
46939 +#endif
46940 +
46941 /*
46942 * These are the functions used to load ELF style executables and shared
46943 * libraries. There is no binary dependent code anywhere else.
46944 @@ -548,6 +916,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
46945 {
46946 unsigned int random_variable = 0;
46947
46948 +#ifdef CONFIG_PAX_RANDUSTACK
46949 + if (randomize_va_space)
46950 + return stack_top - current->mm->delta_stack;
46951 +#endif
46952 +
46953 if ((current->flags & PF_RANDOMIZE) &&
46954 !(current->personality & ADDR_NO_RANDOMIZE)) {
46955 random_variable = get_random_int() & STACK_RND_MASK;
46956 @@ -566,7 +939,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46957 unsigned long load_addr = 0, load_bias = 0;
46958 int load_addr_set = 0;
46959 char * elf_interpreter = NULL;
46960 - unsigned long error;
46961 + unsigned long error = 0;
46962 struct elf_phdr *elf_ppnt, *elf_phdata;
46963 unsigned long elf_bss, elf_brk;
46964 int retval, i;
46965 @@ -576,11 +949,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46966 unsigned long start_code, end_code, start_data, end_data;
46967 unsigned long reloc_func_desc = 0;
46968 int executable_stack = EXSTACK_DEFAULT;
46969 - unsigned long def_flags = 0;
46970 struct {
46971 struct elfhdr elf_ex;
46972 struct elfhdr interp_elf_ex;
46973 } *loc;
46974 + unsigned long pax_task_size = TASK_SIZE;
46975
46976 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
46977 if (!loc) {
46978 @@ -718,11 +1091,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46979
46980 /* OK, This is the point of no return */
46981 current->flags &= ~PF_FORKNOEXEC;
46982 - current->mm->def_flags = def_flags;
46983 +
46984 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46985 + current->mm->pax_flags = 0UL;
46986 +#endif
46987 +
46988 +#ifdef CONFIG_PAX_DLRESOLVE
46989 + current->mm->call_dl_resolve = 0UL;
46990 +#endif
46991 +
46992 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
46993 + current->mm->call_syscall = 0UL;
46994 +#endif
46995 +
46996 +#ifdef CONFIG_PAX_ASLR
46997 + current->mm->delta_mmap = 0UL;
46998 + current->mm->delta_stack = 0UL;
46999 +#endif
47000 +
47001 + current->mm->def_flags = 0;
47002 +
47003 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
47004 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
47005 + send_sig(SIGKILL, current, 0);
47006 + goto out_free_dentry;
47007 + }
47008 +#endif
47009 +
47010 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47011 + pax_set_initial_flags(bprm);
47012 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47013 + if (pax_set_initial_flags_func)
47014 + (pax_set_initial_flags_func)(bprm);
47015 +#endif
47016 +
47017 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47018 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
47019 + current->mm->context.user_cs_limit = PAGE_SIZE;
47020 + current->mm->def_flags |= VM_PAGEEXEC;
47021 + }
47022 +#endif
47023 +
47024 +#ifdef CONFIG_PAX_SEGMEXEC
47025 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47026 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47027 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47028 + pax_task_size = SEGMEXEC_TASK_SIZE;
47029 + }
47030 +#endif
47031 +
47032 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47033 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47034 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47035 + put_cpu();
47036 + }
47037 +#endif
47038
47039 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47040 may depend on the personality. */
47041 SET_PERSONALITY(loc->elf_ex);
47042 +
47043 +#ifdef CONFIG_PAX_ASLR
47044 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47045 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47046 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47047 + }
47048 +#endif
47049 +
47050 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47051 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47052 + executable_stack = EXSTACK_DISABLE_X;
47053 + current->personality &= ~READ_IMPLIES_EXEC;
47054 + } else
47055 +#endif
47056 +
47057 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47058 current->personality |= READ_IMPLIES_EXEC;
47059
47060 @@ -800,10 +1242,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47061 * might try to exec. This is because the brk will
47062 * follow the loader, and is not movable. */
47063 #ifdef CONFIG_X86
47064 - load_bias = 0;
47065 + if (current->flags & PF_RANDOMIZE)
47066 + load_bias = 0;
47067 + else
47068 + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47069 #else
47070 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47071 #endif
47072 +
47073 +#ifdef CONFIG_PAX_RANDMMAP
47074 + /* PaX: randomize base address at the default exe base if requested */
47075 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47076 +#ifdef CONFIG_SPARC64
47077 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47078 +#else
47079 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47080 +#endif
47081 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47082 + elf_flags |= MAP_FIXED;
47083 + }
47084 +#endif
47085 +
47086 }
47087
47088 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47089 @@ -836,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47090 * allowed task size. Note that p_filesz must always be
47091 * <= p_memsz so it is only necessary to check p_memsz.
47092 */
47093 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47094 - elf_ppnt->p_memsz > TASK_SIZE ||
47095 - TASK_SIZE - elf_ppnt->p_memsz < k) {
47096 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47097 + elf_ppnt->p_memsz > pax_task_size ||
47098 + pax_task_size - elf_ppnt->p_memsz < k) {
47099 /* set_brk can never work. Avoid overflows. */
47100 send_sig(SIGKILL, current, 0);
47101 retval = -EINVAL;
47102 @@ -866,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47103 start_data += load_bias;
47104 end_data += load_bias;
47105
47106 +#ifdef CONFIG_PAX_RANDMMAP
47107 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47108 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
47109 +#endif
47110 +
47111 /* Calling set_brk effectively mmaps the pages that we need
47112 * for the bss and break sections. We must do this before
47113 * mapping in the interpreter, to make sure it doesn't wind
47114 @@ -877,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47115 goto out_free_dentry;
47116 }
47117 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
47118 - send_sig(SIGSEGV, current, 0);
47119 - retval = -EFAULT; /* Nobody gets to see this, but.. */
47120 - goto out_free_dentry;
47121 + /*
47122 + * This bss-zeroing can fail if the ELF
47123 + * file specifies odd protections. So
47124 + * we don't check the return value
47125 + */
47126 }
47127
47128 if (elf_interpreter) {
47129 @@ -1112,8 +1578,10 @@ static int dump_seek(struct file *file, loff_t off)
47130 unsigned long n = off;
47131 if (n > PAGE_SIZE)
47132 n = PAGE_SIZE;
47133 - if (!dump_write(file, buf, n))
47134 + if (!dump_write(file, buf, n)) {
47135 + free_page((unsigned long)buf);
47136 return 0;
47137 + }
47138 off -= n;
47139 }
47140 free_page((unsigned long)buf);
47141 @@ -1125,7 +1593,7 @@ static int dump_seek(struct file *file, loff_t off)
47142 * Decide what to dump of a segment, part, all or none.
47143 */
47144 static unsigned long vma_dump_size(struct vm_area_struct *vma,
47145 - unsigned long mm_flags)
47146 + unsigned long mm_flags, long signr)
47147 {
47148 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
47149
47150 @@ -1159,7 +1627,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
47151 if (vma->vm_file == NULL)
47152 return 0;
47153
47154 - if (FILTER(MAPPED_PRIVATE))
47155 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
47156 goto whole;
47157
47158 /*
47159 @@ -1255,8 +1723,11 @@ static int writenote(struct memelfnote *men, struct file *file,
47160 #undef DUMP_WRITE
47161
47162 #define DUMP_WRITE(addr, nr) \
47163 + do { \
47164 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
47165 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
47166 - goto end_coredump;
47167 + goto end_coredump; \
47168 + } while (0);
47169
47170 static void fill_elf_header(struct elfhdr *elf, int segs,
47171 u16 machine, u32 flags, u8 osabi)
47172 @@ -1385,9 +1856,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
47173 {
47174 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47175 int i = 0;
47176 - do
47177 + do {
47178 i += 2;
47179 - while (auxv[i - 2] != AT_NULL);
47180 + } while (auxv[i - 2] != AT_NULL);
47181 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47182 }
47183
47184 @@ -1973,7 +2444,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47185 phdr.p_offset = offset;
47186 phdr.p_vaddr = vma->vm_start;
47187 phdr.p_paddr = 0;
47188 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
47189 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47190 phdr.p_memsz = vma->vm_end - vma->vm_start;
47191 offset += phdr.p_filesz;
47192 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47193 @@ -2006,7 +2477,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47194 unsigned long addr;
47195 unsigned long end;
47196
47197 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
47198 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47199
47200 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47201 struct page *page;
47202 @@ -2015,6 +2486,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47203 page = get_dump_page(addr);
47204 if (page) {
47205 void *kaddr = kmap(page);
47206 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47207 stop = ((size += PAGE_SIZE) > limit) ||
47208 !dump_write(file, kaddr, PAGE_SIZE);
47209 kunmap(page);
47210 @@ -2042,6 +2514,97 @@ out:
47211
47212 #endif /* USE_ELF_CORE_DUMP */
47213
47214 +#ifdef CONFIG_PAX_MPROTECT
47215 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
47216 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47217 + * we'll remove VM_MAYWRITE for good on RELRO segments.
47218 + *
47219 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47220 + * basis because we want to allow the common case and not the special ones.
47221 + */
47222 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47223 +{
47224 + struct elfhdr elf_h;
47225 + struct elf_phdr elf_p;
47226 + unsigned long i;
47227 + unsigned long oldflags;
47228 + bool is_textrel_rw, is_textrel_rx, is_relro;
47229 +
47230 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47231 + return;
47232 +
47233 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47234 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47235 +
47236 +#ifdef CONFIG_PAX_ELFRELOCS
47237 + /* possible TEXTREL */
47238 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47239 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47240 +#else
47241 + is_textrel_rw = false;
47242 + is_textrel_rx = false;
47243 +#endif
47244 +
47245 + /* possible RELRO */
47246 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47247 +
47248 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47249 + return;
47250 +
47251 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47252 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47253 +
47254 +#ifdef CONFIG_PAX_ETEXECRELOCS
47255 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47256 +#else
47257 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47258 +#endif
47259 +
47260 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47261 + !elf_check_arch(&elf_h) ||
47262 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47263 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47264 + return;
47265 +
47266 + for (i = 0UL; i < elf_h.e_phnum; i++) {
47267 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47268 + return;
47269 + switch (elf_p.p_type) {
47270 + case PT_DYNAMIC:
47271 + if (!is_textrel_rw && !is_textrel_rx)
47272 + continue;
47273 + i = 0UL;
47274 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47275 + elf_dyn dyn;
47276 +
47277 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47278 + return;
47279 + if (dyn.d_tag == DT_NULL)
47280 + return;
47281 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47282 + gr_log_textrel(vma);
47283 + if (is_textrel_rw)
47284 + vma->vm_flags |= VM_MAYWRITE;
47285 + else
47286 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47287 + vma->vm_flags &= ~VM_MAYWRITE;
47288 + return;
47289 + }
47290 + i++;
47291 + }
47292 + return;
47293 +
47294 + case PT_GNU_RELRO:
47295 + if (!is_relro)
47296 + continue;
47297 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47298 + vma->vm_flags &= ~VM_MAYWRITE;
47299 + return;
47300 + }
47301 + }
47302 +}
47303 +#endif
47304 +
47305 static int __init init_elf_binfmt(void)
47306 {
47307 return register_binfmt(&elf_format);
47308 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47309 index ca88c46..f155a60 100644
47310 --- a/fs/binfmt_flat.c
47311 +++ b/fs/binfmt_flat.c
47312 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47313 realdatastart = (unsigned long) -ENOMEM;
47314 printk("Unable to allocate RAM for process data, errno %d\n",
47315 (int)-realdatastart);
47316 + down_write(&current->mm->mmap_sem);
47317 do_munmap(current->mm, textpos, text_len);
47318 + up_write(&current->mm->mmap_sem);
47319 ret = realdatastart;
47320 goto err;
47321 }
47322 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47323 }
47324 if (IS_ERR_VALUE(result)) {
47325 printk("Unable to read data+bss, errno %d\n", (int)-result);
47326 + down_write(&current->mm->mmap_sem);
47327 do_munmap(current->mm, textpos, text_len);
47328 do_munmap(current->mm, realdatastart, data_len + extra);
47329 + up_write(&current->mm->mmap_sem);
47330 ret = result;
47331 goto err;
47332 }
47333 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47334 }
47335 if (IS_ERR_VALUE(result)) {
47336 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47337 + down_write(&current->mm->mmap_sem);
47338 do_munmap(current->mm, textpos, text_len + data_len + extra +
47339 MAX_SHARED_LIBS * sizeof(unsigned long));
47340 + up_write(&current->mm->mmap_sem);
47341 ret = result;
47342 goto err;
47343 }
47344 diff --git a/fs/bio.c b/fs/bio.c
47345 index e696713..83de133 100644
47346 --- a/fs/bio.c
47347 +++ b/fs/bio.c
47348 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47349
47350 i = 0;
47351 while (i < bio_slab_nr) {
47352 - struct bio_slab *bslab = &bio_slabs[i];
47353 + bslab = &bio_slabs[i];
47354
47355 if (!bslab->slab && entry == -1)
47356 entry = i;
47357 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47358 const int read = bio_data_dir(bio) == READ;
47359 struct bio_map_data *bmd = bio->bi_private;
47360 int i;
47361 - char *p = bmd->sgvecs[0].iov_base;
47362 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47363
47364 __bio_for_each_segment(bvec, bio, i, 0) {
47365 char *addr = page_address(bvec->bv_page);
47366 diff --git a/fs/block_dev.c b/fs/block_dev.c
47367 index e65efa2..04fae57 100644
47368 --- a/fs/block_dev.c
47369 +++ b/fs/block_dev.c
47370 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47371 else if (bdev->bd_contains == bdev)
47372 res = 0; /* is a whole device which isn't held */
47373
47374 - else if (bdev->bd_contains->bd_holder == bd_claim)
47375 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47376 res = 0; /* is a partition of a device that is being partitioned */
47377 else if (bdev->bd_contains->bd_holder != NULL)
47378 res = -EBUSY; /* is a partition of a held device */
47379 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47380 index c4bc570..42acd8d 100644
47381 --- a/fs/btrfs/ctree.c
47382 +++ b/fs/btrfs/ctree.c
47383 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47384 free_extent_buffer(buf);
47385 add_root_to_dirty_list(root);
47386 } else {
47387 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47388 - parent_start = parent->start;
47389 - else
47390 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47391 + if (parent)
47392 + parent_start = parent->start;
47393 + else
47394 + parent_start = 0;
47395 + } else
47396 parent_start = 0;
47397
47398 WARN_ON(trans->transid != btrfs_header_generation(parent));
47399 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47400
47401 ret = 0;
47402 if (slot == 0) {
47403 - struct btrfs_disk_key disk_key;
47404 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47405 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47406 }
47407 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47408 index f447188..59c17c5 100644
47409 --- a/fs/btrfs/disk-io.c
47410 +++ b/fs/btrfs/disk-io.c
47411 @@ -39,7 +39,7 @@
47412 #include "tree-log.h"
47413 #include "free-space-cache.h"
47414
47415 -static struct extent_io_ops btree_extent_io_ops;
47416 +static const struct extent_io_ops btree_extent_io_ops;
47417 static void end_workqueue_fn(struct btrfs_work *work);
47418 static void free_fs_root(struct btrfs_root *root);
47419
47420 @@ -2607,7 +2607,7 @@ out:
47421 return 0;
47422 }
47423
47424 -static struct extent_io_ops btree_extent_io_ops = {
47425 +static const struct extent_io_ops btree_extent_io_ops = {
47426 .write_cache_pages_lock_hook = btree_lock_page_hook,
47427 .readpage_end_io_hook = btree_readpage_end_io_hook,
47428 .submit_bio_hook = btree_submit_bio_hook,
47429 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47430 index 559f724..a026171 100644
47431 --- a/fs/btrfs/extent-tree.c
47432 +++ b/fs/btrfs/extent-tree.c
47433 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47434 u64 group_start = group->key.objectid;
47435 new_extents = kmalloc(sizeof(*new_extents),
47436 GFP_NOFS);
47437 + if (!new_extents) {
47438 + ret = -ENOMEM;
47439 + goto out;
47440 + }
47441 nr_extents = 1;
47442 ret = get_new_locations(reloc_inode,
47443 extent_key,
47444 diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47445 index 36de250..7ec75c7 100644
47446 --- a/fs/btrfs/extent_io.h
47447 +++ b/fs/btrfs/extent_io.h
47448 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47449 struct bio *bio, int mirror_num,
47450 unsigned long bio_flags);
47451 struct extent_io_ops {
47452 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47453 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47454 u64 start, u64 end, int *page_started,
47455 unsigned long *nr_written);
47456 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47457 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47458 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47459 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47460 extent_submit_bio_hook_t *submit_bio_hook;
47461 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
47462 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47463 size_t size, struct bio *bio,
47464 unsigned long bio_flags);
47465 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47466 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47467 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47468 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47469 u64 start, u64 end,
47470 struct extent_state *state);
47471 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47472 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47473 u64 start, u64 end,
47474 struct extent_state *state);
47475 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47476 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47477 struct extent_state *state);
47478 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47479 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47480 struct extent_state *state, int uptodate);
47481 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47482 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47483 unsigned long old, unsigned long bits);
47484 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47485 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47486 unsigned long bits);
47487 - int (*merge_extent_hook)(struct inode *inode,
47488 + int (* const merge_extent_hook)(struct inode *inode,
47489 struct extent_state *new,
47490 struct extent_state *other);
47491 - int (*split_extent_hook)(struct inode *inode,
47492 + int (* const split_extent_hook)(struct inode *inode,
47493 struct extent_state *orig, u64 split);
47494 - int (*write_cache_pages_lock_hook)(struct page *page);
47495 + int (* const write_cache_pages_lock_hook)(struct page *page);
47496 };
47497
47498 struct extent_io_tree {
47499 @@ -88,7 +88,7 @@ struct extent_io_tree {
47500 u64 dirty_bytes;
47501 spinlock_t lock;
47502 spinlock_t buffer_lock;
47503 - struct extent_io_ops *ops;
47504 + const struct extent_io_ops *ops;
47505 };
47506
47507 struct extent_state {
47508 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47509 index cb2849f..3718fb4 100644
47510 --- a/fs/btrfs/free-space-cache.c
47511 +++ b/fs/btrfs/free-space-cache.c
47512 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47513
47514 while(1) {
47515 if (entry->bytes < bytes || entry->offset < min_start) {
47516 - struct rb_node *node;
47517 -
47518 node = rb_next(&entry->offset_index);
47519 if (!node)
47520 break;
47521 @@ -1226,7 +1224,7 @@ again:
47522 */
47523 while (entry->bitmap || found_bitmap ||
47524 (!entry->bitmap && entry->bytes < min_bytes)) {
47525 - struct rb_node *node = rb_next(&entry->offset_index);
47526 + node = rb_next(&entry->offset_index);
47527
47528 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47529 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47530 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47531 index e03a836..323837e 100644
47532 --- a/fs/btrfs/inode.c
47533 +++ b/fs/btrfs/inode.c
47534 @@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47535 static const struct address_space_operations btrfs_aops;
47536 static const struct address_space_operations btrfs_symlink_aops;
47537 static const struct file_operations btrfs_dir_file_operations;
47538 -static struct extent_io_ops btrfs_extent_io_ops;
47539 +static const struct extent_io_ops btrfs_extent_io_ops;
47540
47541 static struct kmem_cache *btrfs_inode_cachep;
47542 struct kmem_cache *btrfs_trans_handle_cachep;
47543 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47544 1, 0, NULL, GFP_NOFS);
47545 while (start < end) {
47546 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47547 + BUG_ON(!async_cow);
47548 async_cow->inode = inode;
47549 async_cow->root = root;
47550 async_cow->locked_page = locked_page;
47551 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47552 inline_size = btrfs_file_extent_inline_item_len(leaf,
47553 btrfs_item_nr(leaf, path->slots[0]));
47554 tmp = kmalloc(inline_size, GFP_NOFS);
47555 + if (!tmp)
47556 + return -ENOMEM;
47557 ptr = btrfs_file_extent_inline_start(item);
47558
47559 read_extent_buffer(leaf, tmp, ptr, inline_size);
47560 @@ -5410,7 +5413,7 @@ fail:
47561 return -ENOMEM;
47562 }
47563
47564 -static int btrfs_getattr(struct vfsmount *mnt,
47565 +int btrfs_getattr(struct vfsmount *mnt,
47566 struct dentry *dentry, struct kstat *stat)
47567 {
47568 struct inode *inode = dentry->d_inode;
47569 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47570 return 0;
47571 }
47572
47573 +EXPORT_SYMBOL(btrfs_getattr);
47574 +
47575 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
47576 +{
47577 + return BTRFS_I(inode)->root->anon_super.s_dev;
47578 +}
47579 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47580 +
47581 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47582 struct inode *new_dir, struct dentry *new_dentry)
47583 {
47584 @@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47585 .fsync = btrfs_sync_file,
47586 };
47587
47588 -static struct extent_io_ops btrfs_extent_io_ops = {
47589 +static const struct extent_io_ops btrfs_extent_io_ops = {
47590 .fill_delalloc = run_delalloc_range,
47591 .submit_bio_hook = btrfs_submit_bio_hook,
47592 .merge_bio_hook = btrfs_merge_bio_hook,
47593 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47594 index ab7ab53..94e0781 100644
47595 --- a/fs/btrfs/relocation.c
47596 +++ b/fs/btrfs/relocation.c
47597 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47598 }
47599 spin_unlock(&rc->reloc_root_tree.lock);
47600
47601 - BUG_ON((struct btrfs_root *)node->data != root);
47602 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
47603
47604 if (!del) {
47605 spin_lock(&rc->reloc_root_tree.lock);
47606 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47607 index a240b6f..4ce16ef 100644
47608 --- a/fs/btrfs/sysfs.c
47609 +++ b/fs/btrfs/sysfs.c
47610 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47611 complete(&root->kobj_unregister);
47612 }
47613
47614 -static struct sysfs_ops btrfs_super_attr_ops = {
47615 +static const struct sysfs_ops btrfs_super_attr_ops = {
47616 .show = btrfs_super_attr_show,
47617 .store = btrfs_super_attr_store,
47618 };
47619
47620 -static struct sysfs_ops btrfs_root_attr_ops = {
47621 +static const struct sysfs_ops btrfs_root_attr_ops = {
47622 .show = btrfs_root_attr_show,
47623 .store = btrfs_root_attr_store,
47624 };
47625 diff --git a/fs/buffer.c b/fs/buffer.c
47626 index 6fa5302..395d9f6 100644
47627 --- a/fs/buffer.c
47628 +++ b/fs/buffer.c
47629 @@ -25,6 +25,7 @@
47630 #include <linux/percpu.h>
47631 #include <linux/slab.h>
47632 #include <linux/capability.h>
47633 +#include <linux/security.h>
47634 #include <linux/blkdev.h>
47635 #include <linux/file.h>
47636 #include <linux/quotaops.h>
47637 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47638 index 3797e00..ce776f6 100644
47639 --- a/fs/cachefiles/bind.c
47640 +++ b/fs/cachefiles/bind.c
47641 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47642 args);
47643
47644 /* start by checking things over */
47645 - ASSERT(cache->fstop_percent >= 0 &&
47646 - cache->fstop_percent < cache->fcull_percent &&
47647 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
47648 cache->fcull_percent < cache->frun_percent &&
47649 cache->frun_percent < 100);
47650
47651 - ASSERT(cache->bstop_percent >= 0 &&
47652 - cache->bstop_percent < cache->bcull_percent &&
47653 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
47654 cache->bcull_percent < cache->brun_percent &&
47655 cache->brun_percent < 100);
47656
47657 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47658 index 4618516..bb30d01 100644
47659 --- a/fs/cachefiles/daemon.c
47660 +++ b/fs/cachefiles/daemon.c
47661 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47662 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47663 return -EIO;
47664
47665 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
47666 + if (datalen > PAGE_SIZE - 1)
47667 return -EOPNOTSUPP;
47668
47669 /* drag the command string into the kernel so we can parse it */
47670 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47671 if (args[0] != '%' || args[1] != '\0')
47672 return -EINVAL;
47673
47674 - if (fstop < 0 || fstop >= cache->fcull_percent)
47675 + if (fstop >= cache->fcull_percent)
47676 return cachefiles_daemon_range_error(cache, args);
47677
47678 cache->fstop_percent = fstop;
47679 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47680 if (args[0] != '%' || args[1] != '\0')
47681 return -EINVAL;
47682
47683 - if (bstop < 0 || bstop >= cache->bcull_percent)
47684 + if (bstop >= cache->bcull_percent)
47685 return cachefiles_daemon_range_error(cache, args);
47686
47687 cache->bstop_percent = bstop;
47688 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47689 index f7c255f..fcd61de 100644
47690 --- a/fs/cachefiles/internal.h
47691 +++ b/fs/cachefiles/internal.h
47692 @@ -56,7 +56,7 @@ struct cachefiles_cache {
47693 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47694 struct rb_root active_nodes; /* active nodes (can't be culled) */
47695 rwlock_t active_lock; /* lock for active_nodes */
47696 - atomic_t gravecounter; /* graveyard uniquifier */
47697 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47698 unsigned frun_percent; /* when to stop culling (% files) */
47699 unsigned fcull_percent; /* when to start culling (% files) */
47700 unsigned fstop_percent; /* when to stop allocating (% files) */
47701 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47702 * proc.c
47703 */
47704 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47705 -extern atomic_t cachefiles_lookup_histogram[HZ];
47706 -extern atomic_t cachefiles_mkdir_histogram[HZ];
47707 -extern atomic_t cachefiles_create_histogram[HZ];
47708 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47709 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47710 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47711
47712 extern int __init cachefiles_proc_init(void);
47713 extern void cachefiles_proc_cleanup(void);
47714 static inline
47715 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47716 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47717 {
47718 unsigned long jif = jiffies - start_jif;
47719 if (jif >= HZ)
47720 jif = HZ - 1;
47721 - atomic_inc(&histogram[jif]);
47722 + atomic_inc_unchecked(&histogram[jif]);
47723 }
47724
47725 #else
47726 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47727 index 14ac480..a62766c 100644
47728 --- a/fs/cachefiles/namei.c
47729 +++ b/fs/cachefiles/namei.c
47730 @@ -250,7 +250,7 @@ try_again:
47731 /* first step is to make up a grave dentry in the graveyard */
47732 sprintf(nbuffer, "%08x%08x",
47733 (uint32_t) get_seconds(),
47734 - (uint32_t) atomic_inc_return(&cache->gravecounter));
47735 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47736
47737 /* do the multiway lock magic */
47738 trap = lock_rename(cache->graveyard, dir);
47739 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47740 index eccd339..4c1d995 100644
47741 --- a/fs/cachefiles/proc.c
47742 +++ b/fs/cachefiles/proc.c
47743 @@ -14,9 +14,9 @@
47744 #include <linux/seq_file.h>
47745 #include "internal.h"
47746
47747 -atomic_t cachefiles_lookup_histogram[HZ];
47748 -atomic_t cachefiles_mkdir_histogram[HZ];
47749 -atomic_t cachefiles_create_histogram[HZ];
47750 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47751 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47752 +atomic_unchecked_t cachefiles_create_histogram[HZ];
47753
47754 /*
47755 * display the latency histogram
47756 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47757 return 0;
47758 default:
47759 index = (unsigned long) v - 3;
47760 - x = atomic_read(&cachefiles_lookup_histogram[index]);
47761 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
47762 - z = atomic_read(&cachefiles_create_histogram[index]);
47763 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47764 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47765 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47766 if (x == 0 && y == 0 && z == 0)
47767 return 0;
47768
47769 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47770 index a6c8c6f..5cf8517 100644
47771 --- a/fs/cachefiles/rdwr.c
47772 +++ b/fs/cachefiles/rdwr.c
47773 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47774 old_fs = get_fs();
47775 set_fs(KERNEL_DS);
47776 ret = file->f_op->write(
47777 - file, (const void __user *) data, len, &pos);
47778 + file, (const void __force_user *) data, len, &pos);
47779 set_fs(old_fs);
47780 kunmap(page);
47781 if (ret != len)
47782 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47783 index 42cec2a..2aba466 100644
47784 --- a/fs/cifs/cifs_debug.c
47785 +++ b/fs/cifs/cifs_debug.c
47786 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47787 tcon = list_entry(tmp3,
47788 struct cifsTconInfo,
47789 tcon_list);
47790 - atomic_set(&tcon->num_smbs_sent, 0);
47791 - atomic_set(&tcon->num_writes, 0);
47792 - atomic_set(&tcon->num_reads, 0);
47793 - atomic_set(&tcon->num_oplock_brks, 0);
47794 - atomic_set(&tcon->num_opens, 0);
47795 - atomic_set(&tcon->num_posixopens, 0);
47796 - atomic_set(&tcon->num_posixmkdirs, 0);
47797 - atomic_set(&tcon->num_closes, 0);
47798 - atomic_set(&tcon->num_deletes, 0);
47799 - atomic_set(&tcon->num_mkdirs, 0);
47800 - atomic_set(&tcon->num_rmdirs, 0);
47801 - atomic_set(&tcon->num_renames, 0);
47802 - atomic_set(&tcon->num_t2renames, 0);
47803 - atomic_set(&tcon->num_ffirst, 0);
47804 - atomic_set(&tcon->num_fnext, 0);
47805 - atomic_set(&tcon->num_fclose, 0);
47806 - atomic_set(&tcon->num_hardlinks, 0);
47807 - atomic_set(&tcon->num_symlinks, 0);
47808 - atomic_set(&tcon->num_locks, 0);
47809 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47810 + atomic_set_unchecked(&tcon->num_writes, 0);
47811 + atomic_set_unchecked(&tcon->num_reads, 0);
47812 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47813 + atomic_set_unchecked(&tcon->num_opens, 0);
47814 + atomic_set_unchecked(&tcon->num_posixopens, 0);
47815 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47816 + atomic_set_unchecked(&tcon->num_closes, 0);
47817 + atomic_set_unchecked(&tcon->num_deletes, 0);
47818 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
47819 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
47820 + atomic_set_unchecked(&tcon->num_renames, 0);
47821 + atomic_set_unchecked(&tcon->num_t2renames, 0);
47822 + atomic_set_unchecked(&tcon->num_ffirst, 0);
47823 + atomic_set_unchecked(&tcon->num_fnext, 0);
47824 + atomic_set_unchecked(&tcon->num_fclose, 0);
47825 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
47826 + atomic_set_unchecked(&tcon->num_symlinks, 0);
47827 + atomic_set_unchecked(&tcon->num_locks, 0);
47828 }
47829 }
47830 }
47831 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47832 if (tcon->need_reconnect)
47833 seq_puts(m, "\tDISCONNECTED ");
47834 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47835 - atomic_read(&tcon->num_smbs_sent),
47836 - atomic_read(&tcon->num_oplock_brks));
47837 + atomic_read_unchecked(&tcon->num_smbs_sent),
47838 + atomic_read_unchecked(&tcon->num_oplock_brks));
47839 seq_printf(m, "\nReads: %d Bytes: %lld",
47840 - atomic_read(&tcon->num_reads),
47841 + atomic_read_unchecked(&tcon->num_reads),
47842 (long long)(tcon->bytes_read));
47843 seq_printf(m, "\nWrites: %d Bytes: %lld",
47844 - atomic_read(&tcon->num_writes),
47845 + atomic_read_unchecked(&tcon->num_writes),
47846 (long long)(tcon->bytes_written));
47847 seq_printf(m, "\nFlushes: %d",
47848 - atomic_read(&tcon->num_flushes));
47849 + atomic_read_unchecked(&tcon->num_flushes));
47850 seq_printf(m, "\nLocks: %d HardLinks: %d "
47851 "Symlinks: %d",
47852 - atomic_read(&tcon->num_locks),
47853 - atomic_read(&tcon->num_hardlinks),
47854 - atomic_read(&tcon->num_symlinks));
47855 + atomic_read_unchecked(&tcon->num_locks),
47856 + atomic_read_unchecked(&tcon->num_hardlinks),
47857 + atomic_read_unchecked(&tcon->num_symlinks));
47858 seq_printf(m, "\nOpens: %d Closes: %d "
47859 "Deletes: %d",
47860 - atomic_read(&tcon->num_opens),
47861 - atomic_read(&tcon->num_closes),
47862 - atomic_read(&tcon->num_deletes));
47863 + atomic_read_unchecked(&tcon->num_opens),
47864 + atomic_read_unchecked(&tcon->num_closes),
47865 + atomic_read_unchecked(&tcon->num_deletes));
47866 seq_printf(m, "\nPosix Opens: %d "
47867 "Posix Mkdirs: %d",
47868 - atomic_read(&tcon->num_posixopens),
47869 - atomic_read(&tcon->num_posixmkdirs));
47870 + atomic_read_unchecked(&tcon->num_posixopens),
47871 + atomic_read_unchecked(&tcon->num_posixmkdirs));
47872 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47873 - atomic_read(&tcon->num_mkdirs),
47874 - atomic_read(&tcon->num_rmdirs));
47875 + atomic_read_unchecked(&tcon->num_mkdirs),
47876 + atomic_read_unchecked(&tcon->num_rmdirs));
47877 seq_printf(m, "\nRenames: %d T2 Renames %d",
47878 - atomic_read(&tcon->num_renames),
47879 - atomic_read(&tcon->num_t2renames));
47880 + atomic_read_unchecked(&tcon->num_renames),
47881 + atomic_read_unchecked(&tcon->num_t2renames));
47882 seq_printf(m, "\nFindFirst: %d FNext %d "
47883 "FClose %d",
47884 - atomic_read(&tcon->num_ffirst),
47885 - atomic_read(&tcon->num_fnext),
47886 - atomic_read(&tcon->num_fclose));
47887 + atomic_read_unchecked(&tcon->num_ffirst),
47888 + atomic_read_unchecked(&tcon->num_fnext),
47889 + atomic_read_unchecked(&tcon->num_fclose));
47890 }
47891 }
47892 }
47893 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47894 index 1445407..68cb0dc 100644
47895 --- a/fs/cifs/cifsfs.c
47896 +++ b/fs/cifs/cifsfs.c
47897 @@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47898 cifs_req_cachep = kmem_cache_create("cifs_request",
47899 CIFSMaxBufSize +
47900 MAX_CIFS_HDR_SIZE, 0,
47901 - SLAB_HWCACHE_ALIGN, NULL);
47902 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47903 if (cifs_req_cachep == NULL)
47904 return -ENOMEM;
47905
47906 @@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47907 efficient to alloc 1 per page off the slab compared to 17K (5page)
47908 alloc of large cifs buffers even when page debugging is on */
47909 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47910 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47911 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47912 NULL);
47913 if (cifs_sm_req_cachep == NULL) {
47914 mempool_destroy(cifs_req_poolp);
47915 @@ -991,8 +991,8 @@ init_cifs(void)
47916 atomic_set(&bufAllocCount, 0);
47917 atomic_set(&smBufAllocCount, 0);
47918 #ifdef CONFIG_CIFS_STATS2
47919 - atomic_set(&totBufAllocCount, 0);
47920 - atomic_set(&totSmBufAllocCount, 0);
47921 + atomic_set_unchecked(&totBufAllocCount, 0);
47922 + atomic_set_unchecked(&totSmBufAllocCount, 0);
47923 #endif /* CONFIG_CIFS_STATS2 */
47924
47925 atomic_set(&midCount, 0);
47926 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
47927 index e29581e..1c22bab 100644
47928 --- a/fs/cifs/cifsglob.h
47929 +++ b/fs/cifs/cifsglob.h
47930 @@ -252,28 +252,28 @@ struct cifsTconInfo {
47931 __u16 Flags; /* optional support bits */
47932 enum statusEnum tidStatus;
47933 #ifdef CONFIG_CIFS_STATS
47934 - atomic_t num_smbs_sent;
47935 - atomic_t num_writes;
47936 - atomic_t num_reads;
47937 - atomic_t num_flushes;
47938 - atomic_t num_oplock_brks;
47939 - atomic_t num_opens;
47940 - atomic_t num_closes;
47941 - atomic_t num_deletes;
47942 - atomic_t num_mkdirs;
47943 - atomic_t num_posixopens;
47944 - atomic_t num_posixmkdirs;
47945 - atomic_t num_rmdirs;
47946 - atomic_t num_renames;
47947 - atomic_t num_t2renames;
47948 - atomic_t num_ffirst;
47949 - atomic_t num_fnext;
47950 - atomic_t num_fclose;
47951 - atomic_t num_hardlinks;
47952 - atomic_t num_symlinks;
47953 - atomic_t num_locks;
47954 - atomic_t num_acl_get;
47955 - atomic_t num_acl_set;
47956 + atomic_unchecked_t num_smbs_sent;
47957 + atomic_unchecked_t num_writes;
47958 + atomic_unchecked_t num_reads;
47959 + atomic_unchecked_t num_flushes;
47960 + atomic_unchecked_t num_oplock_brks;
47961 + atomic_unchecked_t num_opens;
47962 + atomic_unchecked_t num_closes;
47963 + atomic_unchecked_t num_deletes;
47964 + atomic_unchecked_t num_mkdirs;
47965 + atomic_unchecked_t num_posixopens;
47966 + atomic_unchecked_t num_posixmkdirs;
47967 + atomic_unchecked_t num_rmdirs;
47968 + atomic_unchecked_t num_renames;
47969 + atomic_unchecked_t num_t2renames;
47970 + atomic_unchecked_t num_ffirst;
47971 + atomic_unchecked_t num_fnext;
47972 + atomic_unchecked_t num_fclose;
47973 + atomic_unchecked_t num_hardlinks;
47974 + atomic_unchecked_t num_symlinks;
47975 + atomic_unchecked_t num_locks;
47976 + atomic_unchecked_t num_acl_get;
47977 + atomic_unchecked_t num_acl_set;
47978 #ifdef CONFIG_CIFS_STATS2
47979 unsigned long long time_writes;
47980 unsigned long long time_reads;
47981 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
47982 }
47983
47984 #ifdef CONFIG_CIFS_STATS
47985 -#define cifs_stats_inc atomic_inc
47986 +#define cifs_stats_inc atomic_inc_unchecked
47987
47988 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
47989 unsigned int bytes)
47990 @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
47991 /* Various Debug counters */
47992 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
47993 #ifdef CONFIG_CIFS_STATS2
47994 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
47995 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
47996 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
47997 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
47998 #endif
47999 GLOBAL_EXTERN atomic_t smBufAllocCount;
48000 GLOBAL_EXTERN atomic_t midCount;
48001 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
48002 index fc1e048..28b3441 100644
48003 --- a/fs/cifs/link.c
48004 +++ b/fs/cifs/link.c
48005 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
48006
48007 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48008 {
48009 - char *p = nd_get_link(nd);
48010 + const char *p = nd_get_link(nd);
48011 if (!IS_ERR(p))
48012 kfree(p);
48013 }
48014 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48015 index 95b82e8..12a538d 100644
48016 --- a/fs/cifs/misc.c
48017 +++ b/fs/cifs/misc.c
48018 @@ -155,7 +155,7 @@ cifs_buf_get(void)
48019 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
48020 atomic_inc(&bufAllocCount);
48021 #ifdef CONFIG_CIFS_STATS2
48022 - atomic_inc(&totBufAllocCount);
48023 + atomic_inc_unchecked(&totBufAllocCount);
48024 #endif /* CONFIG_CIFS_STATS2 */
48025 }
48026
48027 @@ -190,7 +190,7 @@ cifs_small_buf_get(void)
48028 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48029 atomic_inc(&smBufAllocCount);
48030 #ifdef CONFIG_CIFS_STATS2
48031 - atomic_inc(&totSmBufAllocCount);
48032 + atomic_inc_unchecked(&totSmBufAllocCount);
48033 #endif /* CONFIG_CIFS_STATS2 */
48034
48035 }
48036 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
48037 index a5bf577..6d19845 100644
48038 --- a/fs/coda/cache.c
48039 +++ b/fs/coda/cache.c
48040 @@ -24,14 +24,14 @@
48041 #include <linux/coda_fs_i.h>
48042 #include <linux/coda_cache.h>
48043
48044 -static atomic_t permission_epoch = ATOMIC_INIT(0);
48045 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
48046
48047 /* replace or extend an acl cache hit */
48048 void coda_cache_enter(struct inode *inode, int mask)
48049 {
48050 struct coda_inode_info *cii = ITOC(inode);
48051
48052 - cii->c_cached_epoch = atomic_read(&permission_epoch);
48053 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
48054 if (cii->c_uid != current_fsuid()) {
48055 cii->c_uid = current_fsuid();
48056 cii->c_cached_perm = mask;
48057 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
48058 void coda_cache_clear_inode(struct inode *inode)
48059 {
48060 struct coda_inode_info *cii = ITOC(inode);
48061 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
48062 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
48063 }
48064
48065 /* remove all acl caches */
48066 void coda_cache_clear_all(struct super_block *sb)
48067 {
48068 - atomic_inc(&permission_epoch);
48069 + atomic_inc_unchecked(&permission_epoch);
48070 }
48071
48072
48073 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
48074
48075 hit = (mask & cii->c_cached_perm) == mask &&
48076 cii->c_uid == current_fsuid() &&
48077 - cii->c_cached_epoch == atomic_read(&permission_epoch);
48078 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
48079
48080 return hit;
48081 }
48082 diff --git a/fs/compat.c b/fs/compat.c
48083 index d1e2411..27064e4 100644
48084 --- a/fs/compat.c
48085 +++ b/fs/compat.c
48086 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
48087 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
48088 {
48089 compat_ino_t ino = stat->ino;
48090 - typeof(ubuf->st_uid) uid = 0;
48091 - typeof(ubuf->st_gid) gid = 0;
48092 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
48093 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
48094 int err;
48095
48096 SET_UID(uid, stat->uid);
48097 @@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
48098
48099 set_fs(KERNEL_DS);
48100 /* The __user pointer cast is valid because of the set_fs() */
48101 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
48102 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
48103 set_fs(oldfs);
48104 /* truncating is ok because it's a user address */
48105 if (!ret)
48106 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
48107
48108 struct compat_readdir_callback {
48109 struct compat_old_linux_dirent __user *dirent;
48110 + struct file * file;
48111 int result;
48112 };
48113
48114 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
48115 buf->result = -EOVERFLOW;
48116 return -EOVERFLOW;
48117 }
48118 +
48119 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48120 + return 0;
48121 +
48122 buf->result++;
48123 dirent = buf->dirent;
48124 if (!access_ok(VERIFY_WRITE, dirent,
48125 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
48126
48127 buf.result = 0;
48128 buf.dirent = dirent;
48129 + buf.file = file;
48130
48131 error = vfs_readdir(file, compat_fillonedir, &buf);
48132 if (buf.result)
48133 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
48134 struct compat_getdents_callback {
48135 struct compat_linux_dirent __user *current_dir;
48136 struct compat_linux_dirent __user *previous;
48137 + struct file * file;
48138 int count;
48139 int error;
48140 };
48141 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
48142 buf->error = -EOVERFLOW;
48143 return -EOVERFLOW;
48144 }
48145 +
48146 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48147 + return 0;
48148 +
48149 dirent = buf->previous;
48150 if (dirent) {
48151 if (__put_user(offset, &dirent->d_off))
48152 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
48153 buf.previous = NULL;
48154 buf.count = count;
48155 buf.error = 0;
48156 + buf.file = file;
48157
48158 error = vfs_readdir(file, compat_filldir, &buf);
48159 if (error >= 0)
48160 @@ -987,6 +999,7 @@ out:
48161 struct compat_getdents_callback64 {
48162 struct linux_dirent64 __user *current_dir;
48163 struct linux_dirent64 __user *previous;
48164 + struct file * file;
48165 int count;
48166 int error;
48167 };
48168 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
48169 buf->error = -EINVAL; /* only used if we fail.. */
48170 if (reclen > buf->count)
48171 return -EINVAL;
48172 +
48173 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48174 + return 0;
48175 +
48176 dirent = buf->previous;
48177
48178 if (dirent) {
48179 @@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48180 buf.previous = NULL;
48181 buf.count = count;
48182 buf.error = 0;
48183 + buf.file = file;
48184
48185 error = vfs_readdir(file, compat_filldir64, &buf);
48186 if (error >= 0)
48187 error = buf.error;
48188 lastdirent = buf.previous;
48189 if (lastdirent) {
48190 - typeof(lastdirent->d_off) d_off = file->f_pos;
48191 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48192 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48193 error = -EFAULT;
48194 else
48195 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48196 * verify all the pointers
48197 */
48198 ret = -EINVAL;
48199 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48200 + if (nr_segs > UIO_MAXIOV)
48201 goto out;
48202 if (!file->f_op)
48203 goto out;
48204 @@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
48205 compat_uptr_t __user *envp,
48206 struct pt_regs * regs)
48207 {
48208 +#ifdef CONFIG_GRKERNSEC
48209 + struct file *old_exec_file;
48210 + struct acl_subject_label *old_acl;
48211 + struct rlimit old_rlim[RLIM_NLIMITS];
48212 +#endif
48213 struct linux_binprm *bprm;
48214 struct file *file;
48215 struct files_struct *displaced;
48216 bool clear_in_exec;
48217 int retval;
48218 + const struct cred *cred = current_cred();
48219 +
48220 + /*
48221 + * We move the actual failure in case of RLIMIT_NPROC excess from
48222 + * set*uid() to execve() because too many poorly written programs
48223 + * don't check setuid() return code. Here we additionally recheck
48224 + * whether NPROC limit is still exceeded.
48225 + */
48226 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48227 +
48228 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48229 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48230 + retval = -EAGAIN;
48231 + goto out_ret;
48232 + }
48233 +
48234 + /* We're below the limit (still or again), so we don't want to make
48235 + * further execve() calls fail. */
48236 + current->flags &= ~PF_NPROC_EXCEEDED;
48237
48238 retval = unshare_files(&displaced);
48239 if (retval)
48240 @@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
48241 bprm->filename = filename;
48242 bprm->interp = filename;
48243
48244 + if (gr_process_user_ban()) {
48245 + retval = -EPERM;
48246 + goto out_file;
48247 + }
48248 +
48249 + retval = -EACCES;
48250 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48251 + goto out_file;
48252 +
48253 retval = bprm_mm_init(bprm);
48254 if (retval)
48255 goto out_file;
48256 @@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
48257 if (retval < 0)
48258 goto out;
48259
48260 + if (!gr_tpe_allow(file)) {
48261 + retval = -EACCES;
48262 + goto out;
48263 + }
48264 +
48265 + if (gr_check_crash_exec(file)) {
48266 + retval = -EACCES;
48267 + goto out;
48268 + }
48269 +
48270 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48271 +
48272 + gr_handle_exec_args_compat(bprm, argv);
48273 +
48274 +#ifdef CONFIG_GRKERNSEC
48275 + old_acl = current->acl;
48276 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48277 + old_exec_file = current->exec_file;
48278 + get_file(file);
48279 + current->exec_file = file;
48280 +#endif
48281 +
48282 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48283 + bprm->unsafe & LSM_UNSAFE_SHARE);
48284 + if (retval < 0)
48285 + goto out_fail;
48286 +
48287 retval = search_binary_handler(bprm, regs);
48288 if (retval < 0)
48289 - goto out;
48290 + goto out_fail;
48291 +#ifdef CONFIG_GRKERNSEC
48292 + if (old_exec_file)
48293 + fput(old_exec_file);
48294 +#endif
48295
48296 /* execve succeeded */
48297 current->fs->in_exec = 0;
48298 @@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
48299 put_files_struct(displaced);
48300 return retval;
48301
48302 +out_fail:
48303 +#ifdef CONFIG_GRKERNSEC
48304 + current->acl = old_acl;
48305 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48306 + fput(current->exec_file);
48307 + current->exec_file = old_exec_file;
48308 +#endif
48309 +
48310 out:
48311 if (bprm->mm) {
48312 acct_arg_size(bprm, 0);
48313 @@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48314 struct fdtable *fdt;
48315 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48316
48317 + pax_track_stack();
48318 +
48319 if (n < 0)
48320 goto out_nofds;
48321
48322 @@ -2151,7 +2243,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48323 oldfs = get_fs();
48324 set_fs(KERNEL_DS);
48325 /* The __user pointer casts are valid because of the set_fs() */
48326 - err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48327 + err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48328 set_fs(oldfs);
48329
48330 if (err)
48331 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48332 index 0adced2..bbb1b0d 100644
48333 --- a/fs/compat_binfmt_elf.c
48334 +++ b/fs/compat_binfmt_elf.c
48335 @@ -29,10 +29,12 @@
48336 #undef elfhdr
48337 #undef elf_phdr
48338 #undef elf_note
48339 +#undef elf_dyn
48340 #undef elf_addr_t
48341 #define elfhdr elf32_hdr
48342 #define elf_phdr elf32_phdr
48343 #define elf_note elf32_note
48344 +#define elf_dyn Elf32_Dyn
48345 #define elf_addr_t Elf32_Addr
48346
48347 /*
48348 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48349 index d84e705..d8c364c 100644
48350 --- a/fs/compat_ioctl.c
48351 +++ b/fs/compat_ioctl.c
48352 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48353 up = (struct compat_video_spu_palette __user *) arg;
48354 err = get_user(palp, &up->palette);
48355 err |= get_user(length, &up->length);
48356 + if (err)
48357 + return -EFAULT;
48358
48359 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48360 err = put_user(compat_ptr(palp), &up_native->palette);
48361 @@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48362 return -EFAULT;
48363 if (__get_user(udata, &ss32->iomem_base))
48364 return -EFAULT;
48365 - ss.iomem_base = compat_ptr(udata);
48366 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48367 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48368 __get_user(ss.port_high, &ss32->port_high))
48369 return -EFAULT;
48370 @@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48371 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48372 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48373 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48374 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48375 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48376 return -EFAULT;
48377
48378 return ioctl_preallocate(file, p);
48379 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48380 index 8e48b52..f01ed91 100644
48381 --- a/fs/configfs/dir.c
48382 +++ b/fs/configfs/dir.c
48383 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48384 }
48385 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48386 struct configfs_dirent *next;
48387 - const char * name;
48388 + const unsigned char * name;
48389 + char d_name[sizeof(next->s_dentry->d_iname)];
48390 int len;
48391
48392 next = list_entry(p, struct configfs_dirent,
48393 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48394 continue;
48395
48396 name = configfs_get_name(next);
48397 - len = strlen(name);
48398 + if (next->s_dentry && name == next->s_dentry->d_iname) {
48399 + len = next->s_dentry->d_name.len;
48400 + memcpy(d_name, name, len);
48401 + name = d_name;
48402 + } else
48403 + len = strlen(name);
48404 if (next->s_dentry)
48405 ino = next->s_dentry->d_inode->i_ino;
48406 else
48407 diff --git a/fs/dcache.c b/fs/dcache.c
48408 index 44c0aea..2529092 100644
48409 --- a/fs/dcache.c
48410 +++ b/fs/dcache.c
48411 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48412
48413 static struct kmem_cache *dentry_cache __read_mostly;
48414
48415 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48416 -
48417 /*
48418 * This is the single most critical data structure when it comes
48419 * to the dcache: the hashtable for lookups. Somebody should try
48420 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48421 mempages -= reserve;
48422
48423 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48424 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48425 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48426
48427 dcache_init();
48428 inode_init();
48429 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48430 index c010ecf..a8d8c59 100644
48431 --- a/fs/dlm/lockspace.c
48432 +++ b/fs/dlm/lockspace.c
48433 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48434 kfree(ls);
48435 }
48436
48437 -static struct sysfs_ops dlm_attr_ops = {
48438 +static const struct sysfs_ops dlm_attr_ops = {
48439 .show = dlm_attr_show,
48440 .store = dlm_attr_store,
48441 };
48442 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48443 index 88ba4d4..073f003 100644
48444 --- a/fs/ecryptfs/inode.c
48445 +++ b/fs/ecryptfs/inode.c
48446 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48447 old_fs = get_fs();
48448 set_fs(get_ds());
48449 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48450 - (char __user *)lower_buf,
48451 + (char __force_user *)lower_buf,
48452 lower_bufsiz);
48453 set_fs(old_fs);
48454 if (rc < 0)
48455 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48456 }
48457 old_fs = get_fs();
48458 set_fs(get_ds());
48459 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48460 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48461 set_fs(old_fs);
48462 if (rc < 0)
48463 goto out_free;
48464 diff --git a/fs/exec.c b/fs/exec.c
48465 index 86fafc6..6d33cbb 100644
48466 --- a/fs/exec.c
48467 +++ b/fs/exec.c
48468 @@ -56,12 +56,28 @@
48469 #include <linux/fsnotify.h>
48470 #include <linux/fs_struct.h>
48471 #include <linux/pipe_fs_i.h>
48472 +#include <linux/random.h>
48473 +#include <linux/seq_file.h>
48474 +
48475 +#ifdef CONFIG_PAX_REFCOUNT
48476 +#include <linux/kallsyms.h>
48477 +#include <linux/kdebug.h>
48478 +#endif
48479
48480 #include <asm/uaccess.h>
48481 #include <asm/mmu_context.h>
48482 #include <asm/tlb.h>
48483 #include "internal.h"
48484
48485 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48486 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48487 +#endif
48488 +
48489 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48490 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48491 +EXPORT_SYMBOL(pax_set_initial_flags_func);
48492 +#endif
48493 +
48494 int core_uses_pid;
48495 char core_pattern[CORENAME_MAX_SIZE] = "core";
48496 unsigned int core_pipe_limit;
48497 @@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48498 int write)
48499 {
48500 struct page *page;
48501 - int ret;
48502
48503 -#ifdef CONFIG_STACK_GROWSUP
48504 - if (write) {
48505 - ret = expand_stack_downwards(bprm->vma, pos);
48506 - if (ret < 0)
48507 - return NULL;
48508 - }
48509 -#endif
48510 - ret = get_user_pages(current, bprm->mm, pos,
48511 - 1, write, 1, &page, NULL);
48512 - if (ret <= 0)
48513 + if (0 > expand_stack_downwards(bprm->vma, pos))
48514 + return NULL;
48515 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48516 return NULL;
48517
48518 if (write) {
48519 @@ -263,6 +271,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48520 vma->vm_end = STACK_TOP_MAX;
48521 vma->vm_start = vma->vm_end - PAGE_SIZE;
48522 vma->vm_flags = VM_STACK_FLAGS;
48523 +
48524 +#ifdef CONFIG_PAX_SEGMEXEC
48525 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48526 +#endif
48527 +
48528 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48529
48530 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48531 @@ -276,6 +289,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48532 mm->stack_vm = mm->total_vm = 1;
48533 up_write(&mm->mmap_sem);
48534 bprm->p = vma->vm_end - sizeof(void *);
48535 +
48536 +#ifdef CONFIG_PAX_RANDUSTACK
48537 + if (randomize_va_space)
48538 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48539 +#endif
48540 +
48541 return 0;
48542 err:
48543 up_write(&mm->mmap_sem);
48544 @@ -510,7 +529,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48545 int r;
48546 mm_segment_t oldfs = get_fs();
48547 set_fs(KERNEL_DS);
48548 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
48549 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48550 set_fs(oldfs);
48551 return r;
48552 }
48553 @@ -540,7 +559,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48554 unsigned long new_end = old_end - shift;
48555 struct mmu_gather *tlb;
48556
48557 - BUG_ON(new_start > new_end);
48558 + if (new_start >= new_end || new_start < mmap_min_addr)
48559 + return -ENOMEM;
48560
48561 /*
48562 * ensure there are no vmas between where we want to go
48563 @@ -549,6 +569,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48564 if (vma != find_vma(mm, new_start))
48565 return -EFAULT;
48566
48567 +#ifdef CONFIG_PAX_SEGMEXEC
48568 + BUG_ON(pax_find_mirror_vma(vma));
48569 +#endif
48570 +
48571 /*
48572 * cover the whole range: [new_start, old_end)
48573 */
48574 @@ -630,10 +654,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48575 stack_top = arch_align_stack(stack_top);
48576 stack_top = PAGE_ALIGN(stack_top);
48577
48578 - if (unlikely(stack_top < mmap_min_addr) ||
48579 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48580 - return -ENOMEM;
48581 -
48582 stack_shift = vma->vm_end - stack_top;
48583
48584 bprm->p -= stack_shift;
48585 @@ -645,6 +665,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48586 bprm->exec -= stack_shift;
48587
48588 down_write(&mm->mmap_sem);
48589 +
48590 + /* Move stack pages down in memory. */
48591 + if (stack_shift) {
48592 + ret = shift_arg_pages(vma, stack_shift);
48593 + if (ret)
48594 + goto out_unlock;
48595 + }
48596 +
48597 vm_flags = VM_STACK_FLAGS;
48598
48599 /*
48600 @@ -658,19 +686,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48601 vm_flags &= ~VM_EXEC;
48602 vm_flags |= mm->def_flags;
48603
48604 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48605 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48606 + vm_flags &= ~VM_EXEC;
48607 +
48608 +#ifdef CONFIG_PAX_MPROTECT
48609 + if (mm->pax_flags & MF_PAX_MPROTECT)
48610 + vm_flags &= ~VM_MAYEXEC;
48611 +#endif
48612 +
48613 + }
48614 +#endif
48615 +
48616 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48617 vm_flags);
48618 if (ret)
48619 goto out_unlock;
48620 BUG_ON(prev != vma);
48621
48622 - /* Move stack pages down in memory. */
48623 - if (stack_shift) {
48624 - ret = shift_arg_pages(vma, stack_shift);
48625 - if (ret)
48626 - goto out_unlock;
48627 - }
48628 -
48629 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48630 stack_size = vma->vm_end - vma->vm_start;
48631 /*
48632 @@ -744,7 +777,7 @@ int kernel_read(struct file *file, loff_t offset,
48633 old_fs = get_fs();
48634 set_fs(get_ds());
48635 /* The cast to a user pointer is valid due to the set_fs() */
48636 - result = vfs_read(file, (void __user *)addr, count, &pos);
48637 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
48638 set_fs(old_fs);
48639 return result;
48640 }
48641 @@ -1152,7 +1185,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48642 }
48643 rcu_read_unlock();
48644
48645 - if (p->fs->users > n_fs) {
48646 + if (atomic_read(&p->fs->users) > n_fs) {
48647 bprm->unsafe |= LSM_UNSAFE_SHARE;
48648 } else {
48649 res = -EAGAIN;
48650 @@ -1347,11 +1380,35 @@ int do_execve(char * filename,
48651 char __user *__user *envp,
48652 struct pt_regs * regs)
48653 {
48654 +#ifdef CONFIG_GRKERNSEC
48655 + struct file *old_exec_file;
48656 + struct acl_subject_label *old_acl;
48657 + struct rlimit old_rlim[RLIM_NLIMITS];
48658 +#endif
48659 struct linux_binprm *bprm;
48660 struct file *file;
48661 struct files_struct *displaced;
48662 bool clear_in_exec;
48663 int retval;
48664 + const struct cred *cred = current_cred();
48665 +
48666 + /*
48667 + * We move the actual failure in case of RLIMIT_NPROC excess from
48668 + * set*uid() to execve() because too many poorly written programs
48669 + * don't check setuid() return code. Here we additionally recheck
48670 + * whether NPROC limit is still exceeded.
48671 + */
48672 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48673 +
48674 + if ((current->flags & PF_NPROC_EXCEEDED) &&
48675 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48676 + retval = -EAGAIN;
48677 + goto out_ret;
48678 + }
48679 +
48680 + /* We're below the limit (still or again), so we don't want to make
48681 + * further execve() calls fail. */
48682 + current->flags &= ~PF_NPROC_EXCEEDED;
48683
48684 retval = unshare_files(&displaced);
48685 if (retval)
48686 @@ -1383,6 +1440,16 @@ int do_execve(char * filename,
48687 bprm->filename = filename;
48688 bprm->interp = filename;
48689
48690 + if (gr_process_user_ban()) {
48691 + retval = -EPERM;
48692 + goto out_file;
48693 + }
48694 +
48695 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48696 + retval = -EACCES;
48697 + goto out_file;
48698 + }
48699 +
48700 retval = bprm_mm_init(bprm);
48701 if (retval)
48702 goto out_file;
48703 @@ -1412,10 +1479,41 @@ int do_execve(char * filename,
48704 if (retval < 0)
48705 goto out;
48706
48707 + if (!gr_tpe_allow(file)) {
48708 + retval = -EACCES;
48709 + goto out;
48710 + }
48711 +
48712 + if (gr_check_crash_exec(file)) {
48713 + retval = -EACCES;
48714 + goto out;
48715 + }
48716 +
48717 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48718 +
48719 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
48720 +
48721 +#ifdef CONFIG_GRKERNSEC
48722 + old_acl = current->acl;
48723 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48724 + old_exec_file = current->exec_file;
48725 + get_file(file);
48726 + current->exec_file = file;
48727 +#endif
48728 +
48729 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48730 + bprm->unsafe & LSM_UNSAFE_SHARE);
48731 + if (retval < 0)
48732 + goto out_fail;
48733 +
48734 current->flags &= ~PF_KTHREAD;
48735 retval = search_binary_handler(bprm,regs);
48736 if (retval < 0)
48737 - goto out;
48738 + goto out_fail;
48739 +#ifdef CONFIG_GRKERNSEC
48740 + if (old_exec_file)
48741 + fput(old_exec_file);
48742 +#endif
48743
48744 /* execve succeeded */
48745 current->fs->in_exec = 0;
48746 @@ -1426,6 +1524,14 @@ int do_execve(char * filename,
48747 put_files_struct(displaced);
48748 return retval;
48749
48750 +out_fail:
48751 +#ifdef CONFIG_GRKERNSEC
48752 + current->acl = old_acl;
48753 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48754 + fput(current->exec_file);
48755 + current->exec_file = old_exec_file;
48756 +#endif
48757 +
48758 out:
48759 if (bprm->mm) {
48760 acct_arg_size(bprm, 0);
48761 @@ -1591,6 +1697,220 @@ out:
48762 return ispipe;
48763 }
48764
48765 +int pax_check_flags(unsigned long *flags)
48766 +{
48767 + int retval = 0;
48768 +
48769 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
48770 + if (*flags & MF_PAX_SEGMEXEC)
48771 + {
48772 + *flags &= ~MF_PAX_SEGMEXEC;
48773 + retval = -EINVAL;
48774 + }
48775 +#endif
48776 +
48777 + if ((*flags & MF_PAX_PAGEEXEC)
48778 +
48779 +#ifdef CONFIG_PAX_PAGEEXEC
48780 + && (*flags & MF_PAX_SEGMEXEC)
48781 +#endif
48782 +
48783 + )
48784 + {
48785 + *flags &= ~MF_PAX_PAGEEXEC;
48786 + retval = -EINVAL;
48787 + }
48788 +
48789 + if ((*flags & MF_PAX_MPROTECT)
48790 +
48791 +#ifdef CONFIG_PAX_MPROTECT
48792 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48793 +#endif
48794 +
48795 + )
48796 + {
48797 + *flags &= ~MF_PAX_MPROTECT;
48798 + retval = -EINVAL;
48799 + }
48800 +
48801 + if ((*flags & MF_PAX_EMUTRAMP)
48802 +
48803 +#ifdef CONFIG_PAX_EMUTRAMP
48804 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48805 +#endif
48806 +
48807 + )
48808 + {
48809 + *flags &= ~MF_PAX_EMUTRAMP;
48810 + retval = -EINVAL;
48811 + }
48812 +
48813 + return retval;
48814 +}
48815 +
48816 +EXPORT_SYMBOL(pax_check_flags);
48817 +
48818 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48819 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
48820 +{
48821 + struct task_struct *tsk = current;
48822 + struct mm_struct *mm = current->mm;
48823 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
48824 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
48825 + char *path_exec = NULL;
48826 + char *path_fault = NULL;
48827 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
48828 +
48829 + if (buffer_exec && buffer_fault) {
48830 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
48831 +
48832 + down_read(&mm->mmap_sem);
48833 + vma = mm->mmap;
48834 + while (vma && (!vma_exec || !vma_fault)) {
48835 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
48836 + vma_exec = vma;
48837 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
48838 + vma_fault = vma;
48839 + vma = vma->vm_next;
48840 + }
48841 + if (vma_exec) {
48842 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
48843 + if (IS_ERR(path_exec))
48844 + path_exec = "<path too long>";
48845 + else {
48846 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
48847 + if (path_exec) {
48848 + *path_exec = 0;
48849 + path_exec = buffer_exec;
48850 + } else
48851 + path_exec = "<path too long>";
48852 + }
48853 + }
48854 + if (vma_fault) {
48855 + start = vma_fault->vm_start;
48856 + end = vma_fault->vm_end;
48857 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
48858 + if (vma_fault->vm_file) {
48859 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
48860 + if (IS_ERR(path_fault))
48861 + path_fault = "<path too long>";
48862 + else {
48863 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
48864 + if (path_fault) {
48865 + *path_fault = 0;
48866 + path_fault = buffer_fault;
48867 + } else
48868 + path_fault = "<path too long>";
48869 + }
48870 + } else
48871 + path_fault = "<anonymous mapping>";
48872 + }
48873 + up_read(&mm->mmap_sem);
48874 + }
48875 + if (tsk->signal->curr_ip)
48876 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
48877 + else
48878 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
48879 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
48880 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
48881 + task_uid(tsk), task_euid(tsk), pc, sp);
48882 + free_page((unsigned long)buffer_exec);
48883 + free_page((unsigned long)buffer_fault);
48884 + pax_report_insns(regs, pc, sp);
48885 + do_coredump(SIGKILL, SIGKILL, regs);
48886 +}
48887 +#endif
48888 +
48889 +#ifdef CONFIG_PAX_REFCOUNT
48890 +void pax_report_refcount_overflow(struct pt_regs *regs)
48891 +{
48892 + if (current->signal->curr_ip)
48893 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48894 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
48895 + else
48896 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48897 + current->comm, task_pid_nr(current), current_uid(), current_euid());
48898 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
48899 + show_regs(regs);
48900 + force_sig_specific(SIGKILL, current);
48901 +}
48902 +#endif
48903 +
48904 +#ifdef CONFIG_PAX_USERCOPY
48905 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
48906 +int object_is_on_stack(const void *obj, unsigned long len)
48907 +{
48908 + const void * const stack = task_stack_page(current);
48909 + const void * const stackend = stack + THREAD_SIZE;
48910 +
48911 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48912 + const void *frame = NULL;
48913 + const void *oldframe;
48914 +#endif
48915 +
48916 + if (obj + len < obj)
48917 + return -1;
48918 +
48919 + if (obj + len <= stack || stackend <= obj)
48920 + return 0;
48921 +
48922 + if (obj < stack || stackend < obj + len)
48923 + return -1;
48924 +
48925 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48926 + oldframe = __builtin_frame_address(1);
48927 + if (oldframe)
48928 + frame = __builtin_frame_address(2);
48929 + /*
48930 + low ----------------------------------------------> high
48931 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
48932 + ^----------------^
48933 + allow copies only within here
48934 + */
48935 + while (stack <= frame && frame < stackend) {
48936 + /* if obj + len extends past the last frame, this
48937 + check won't pass and the next frame will be 0,
48938 + causing us to bail out and correctly report
48939 + the copy as invalid
48940 + */
48941 + if (obj + len <= frame)
48942 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
48943 + oldframe = frame;
48944 + frame = *(const void * const *)frame;
48945 + }
48946 + return -1;
48947 +#else
48948 + return 1;
48949 +#endif
48950 +}
48951 +
48952 +
48953 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
48954 +{
48955 + if (current->signal->curr_ip)
48956 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48957 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48958 + else
48959 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48960 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48961 +
48962 + dump_stack();
48963 + gr_handle_kernel_exploit();
48964 + do_group_exit(SIGKILL);
48965 +}
48966 +#endif
48967 +
48968 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
48969 +void pax_track_stack(void)
48970 +{
48971 + unsigned long sp = (unsigned long)&sp;
48972 + if (sp < current_thread_info()->lowest_stack &&
48973 + sp > (unsigned long)task_stack_page(current))
48974 + current_thread_info()->lowest_stack = sp;
48975 +}
48976 +EXPORT_SYMBOL(pax_track_stack);
48977 +#endif
48978 +
48979 static int zap_process(struct task_struct *start)
48980 {
48981 struct task_struct *t;
48982 @@ -1793,17 +2113,17 @@ static void wait_for_dump_helpers(struct file *file)
48983 pipe = file->f_path.dentry->d_inode->i_pipe;
48984
48985 pipe_lock(pipe);
48986 - pipe->readers++;
48987 - pipe->writers--;
48988 + atomic_inc(&pipe->readers);
48989 + atomic_dec(&pipe->writers);
48990
48991 - while ((pipe->readers > 1) && (!signal_pending(current))) {
48992 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
48993 wake_up_interruptible_sync(&pipe->wait);
48994 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48995 pipe_wait(pipe);
48996 }
48997
48998 - pipe->readers--;
48999 - pipe->writers++;
49000 + atomic_dec(&pipe->readers);
49001 + atomic_inc(&pipe->writers);
49002 pipe_unlock(pipe);
49003
49004 }
49005 @@ -1826,10 +2146,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49006 char **helper_argv = NULL;
49007 int helper_argc = 0;
49008 int dump_count = 0;
49009 - static atomic_t core_dump_count = ATOMIC_INIT(0);
49010 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49011
49012 audit_core_dumps(signr);
49013
49014 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49015 + gr_handle_brute_attach(current, mm->flags);
49016 +
49017 binfmt = mm->binfmt;
49018 if (!binfmt || !binfmt->core_dump)
49019 goto fail;
49020 @@ -1874,6 +2197,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49021 */
49022 clear_thread_flag(TIF_SIGPENDING);
49023
49024 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49025 +
49026 /*
49027 * lock_kernel() because format_corename() is controlled by sysctl, which
49028 * uses lock_kernel()
49029 @@ -1908,7 +2233,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49030 goto fail_unlock;
49031 }
49032
49033 - dump_count = atomic_inc_return(&core_dump_count);
49034 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
49035 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49036 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49037 task_tgid_vnr(current), current->comm);
49038 @@ -1972,7 +2297,7 @@ close_fail:
49039 filp_close(file, NULL);
49040 fail_dropcount:
49041 if (dump_count)
49042 - atomic_dec(&core_dump_count);
49043 + atomic_dec_unchecked(&core_dump_count);
49044 fail_unlock:
49045 if (helper_argv)
49046 argv_free(helper_argv);
49047 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49048 index 7f8d2e5..a1abdbb 100644
49049 --- a/fs/ext2/balloc.c
49050 +++ b/fs/ext2/balloc.c
49051 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49052
49053 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49054 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49055 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49056 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49057 sbi->s_resuid != current_fsuid() &&
49058 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49059 return 0;
49060 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49061 index 27967f9..9f2a5fb 100644
49062 --- a/fs/ext3/balloc.c
49063 +++ b/fs/ext3/balloc.c
49064 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49065
49066 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49067 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49068 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49069 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49070 sbi->s_resuid != current_fsuid() &&
49071 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49072 return 0;
49073 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49074 index e85b63c..80398e6 100644
49075 --- a/fs/ext4/balloc.c
49076 +++ b/fs/ext4/balloc.c
49077 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49078 /* Hm, nope. Are (enough) root reserved blocks available? */
49079 if (sbi->s_resuid == current_fsuid() ||
49080 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49081 - capable(CAP_SYS_RESOURCE)) {
49082 + capable_nolog(CAP_SYS_RESOURCE)) {
49083 if (free_blocks >= (nblocks + dirty_blocks))
49084 return 1;
49085 }
49086 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49087 index 67c46ed..1f237e5 100644
49088 --- a/fs/ext4/ext4.h
49089 +++ b/fs/ext4/ext4.h
49090 @@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49091
49092 /* stats for buddy allocator */
49093 spinlock_t s_mb_pa_lock;
49094 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49095 - atomic_t s_bal_success; /* we found long enough chunks */
49096 - atomic_t s_bal_allocated; /* in blocks */
49097 - atomic_t s_bal_ex_scanned; /* total extents scanned */
49098 - atomic_t s_bal_goals; /* goal hits */
49099 - atomic_t s_bal_breaks; /* too long searches */
49100 - atomic_t s_bal_2orders; /* 2^order hits */
49101 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49102 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49103 + atomic_unchecked_t s_bal_allocated; /* in blocks */
49104 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49105 + atomic_unchecked_t s_bal_goals; /* goal hits */
49106 + atomic_unchecked_t s_bal_breaks; /* too long searches */
49107 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49108 spinlock_t s_bal_lock;
49109 unsigned long s_mb_buddies_generated;
49110 unsigned long long s_mb_generation_time;
49111 - atomic_t s_mb_lost_chunks;
49112 - atomic_t s_mb_preallocated;
49113 - atomic_t s_mb_discarded;
49114 + atomic_unchecked_t s_mb_lost_chunks;
49115 + atomic_unchecked_t s_mb_preallocated;
49116 + atomic_unchecked_t s_mb_discarded;
49117 atomic_t s_lock_busy;
49118
49119 /* locality groups */
49120 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49121 index 2a60541..7439d61 100644
49122 --- a/fs/ext4/file.c
49123 +++ b/fs/ext4/file.c
49124 @@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49125 cp = d_path(&path, buf, sizeof(buf));
49126 path_put(&path);
49127 if (!IS_ERR(cp)) {
49128 - memcpy(sbi->s_es->s_last_mounted, cp,
49129 - sizeof(sbi->s_es->s_last_mounted));
49130 + strlcpy(sbi->s_es->s_last_mounted, cp,
49131 + sizeof(sbi->s_es->s_last_mounted));
49132 sb->s_dirt = 1;
49133 }
49134 }
49135 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49136 index 42bac1b..0aab9d8 100644
49137 --- a/fs/ext4/mballoc.c
49138 +++ b/fs/ext4/mballoc.c
49139 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49140 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49141
49142 if (EXT4_SB(sb)->s_mb_stats)
49143 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49144 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49145
49146 break;
49147 }
49148 @@ -2131,7 +2131,7 @@ repeat:
49149 ac->ac_status = AC_STATUS_CONTINUE;
49150 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49151 cr = 3;
49152 - atomic_inc(&sbi->s_mb_lost_chunks);
49153 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49154 goto repeat;
49155 }
49156 }
49157 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49158 ext4_grpblk_t counters[16];
49159 } sg;
49160
49161 + pax_track_stack();
49162 +
49163 group--;
49164 if (group == 0)
49165 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49166 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49167 if (sbi->s_mb_stats) {
49168 printk(KERN_INFO
49169 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49170 - atomic_read(&sbi->s_bal_allocated),
49171 - atomic_read(&sbi->s_bal_reqs),
49172 - atomic_read(&sbi->s_bal_success));
49173 + atomic_read_unchecked(&sbi->s_bal_allocated),
49174 + atomic_read_unchecked(&sbi->s_bal_reqs),
49175 + atomic_read_unchecked(&sbi->s_bal_success));
49176 printk(KERN_INFO
49177 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49178 "%u 2^N hits, %u breaks, %u lost\n",
49179 - atomic_read(&sbi->s_bal_ex_scanned),
49180 - atomic_read(&sbi->s_bal_goals),
49181 - atomic_read(&sbi->s_bal_2orders),
49182 - atomic_read(&sbi->s_bal_breaks),
49183 - atomic_read(&sbi->s_mb_lost_chunks));
49184 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49185 + atomic_read_unchecked(&sbi->s_bal_goals),
49186 + atomic_read_unchecked(&sbi->s_bal_2orders),
49187 + atomic_read_unchecked(&sbi->s_bal_breaks),
49188 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49189 printk(KERN_INFO
49190 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49191 sbi->s_mb_buddies_generated++,
49192 sbi->s_mb_generation_time);
49193 printk(KERN_INFO
49194 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49195 - atomic_read(&sbi->s_mb_preallocated),
49196 - atomic_read(&sbi->s_mb_discarded));
49197 + atomic_read_unchecked(&sbi->s_mb_preallocated),
49198 + atomic_read_unchecked(&sbi->s_mb_discarded));
49199 }
49200
49201 free_percpu(sbi->s_locality_groups);
49202 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49203 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49204
49205 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49206 - atomic_inc(&sbi->s_bal_reqs);
49207 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49208 + atomic_inc_unchecked(&sbi->s_bal_reqs);
49209 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49210 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49211 - atomic_inc(&sbi->s_bal_success);
49212 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49213 + atomic_inc_unchecked(&sbi->s_bal_success);
49214 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49215 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49216 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49217 - atomic_inc(&sbi->s_bal_goals);
49218 + atomic_inc_unchecked(&sbi->s_bal_goals);
49219 if (ac->ac_found > sbi->s_mb_max_to_scan)
49220 - atomic_inc(&sbi->s_bal_breaks);
49221 + atomic_inc_unchecked(&sbi->s_bal_breaks);
49222 }
49223
49224 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49225 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49226 trace_ext4_mb_new_inode_pa(ac, pa);
49227
49228 ext4_mb_use_inode_pa(ac, pa);
49229 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49230 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49231
49232 ei = EXT4_I(ac->ac_inode);
49233 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49234 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49235 trace_ext4_mb_new_group_pa(ac, pa);
49236
49237 ext4_mb_use_group_pa(ac, pa);
49238 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49239 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49240
49241 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49242 lg = ac->ac_lg;
49243 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49244 * from the bitmap and continue.
49245 */
49246 }
49247 - atomic_add(free, &sbi->s_mb_discarded);
49248 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
49249
49250 return err;
49251 }
49252 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49253 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49254 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49255 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49256 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49257 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49258
49259 if (ac) {
49260 ac->ac_sb = sb;
49261 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49262 index f27e045..be5a1c3 100644
49263 --- a/fs/ext4/super.c
49264 +++ b/fs/ext4/super.c
49265 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobject *kobj)
49266 }
49267
49268
49269 -static struct sysfs_ops ext4_attr_ops = {
49270 +static const struct sysfs_ops ext4_attr_ops = {
49271 .show = ext4_attr_show,
49272 .store = ext4_attr_store,
49273 };
49274 diff --git a/fs/fcntl.c b/fs/fcntl.c
49275 index 97e01dc..e9aab2d 100644
49276 --- a/fs/fcntl.c
49277 +++ b/fs/fcntl.c
49278 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49279 if (err)
49280 return err;
49281
49282 + if (gr_handle_chroot_fowner(pid, type))
49283 + return -ENOENT;
49284 + if (gr_check_protected_task_fowner(pid, type))
49285 + return -EACCES;
49286 +
49287 f_modown(filp, pid, type, force);
49288 return 0;
49289 }
49290 @@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49291
49292 static int f_setown_ex(struct file *filp, unsigned long arg)
49293 {
49294 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49295 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49296 struct f_owner_ex owner;
49297 struct pid *pid;
49298 int type;
49299 @@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49300
49301 static int f_getown_ex(struct file *filp, unsigned long arg)
49302 {
49303 - struct f_owner_ex * __user owner_p = (void * __user)arg;
49304 + struct f_owner_ex __user *owner_p = (void __user *)arg;
49305 struct f_owner_ex owner;
49306 int ret = 0;
49307
49308 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49309 switch (cmd) {
49310 case F_DUPFD:
49311 case F_DUPFD_CLOEXEC:
49312 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49313 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49314 break;
49315 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49316 diff --git a/fs/fifo.c b/fs/fifo.c
49317 index f8f97b8..b1f2259 100644
49318 --- a/fs/fifo.c
49319 +++ b/fs/fifo.c
49320 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49321 */
49322 filp->f_op = &read_pipefifo_fops;
49323 pipe->r_counter++;
49324 - if (pipe->readers++ == 0)
49325 + if (atomic_inc_return(&pipe->readers) == 1)
49326 wake_up_partner(inode);
49327
49328 - if (!pipe->writers) {
49329 + if (!atomic_read(&pipe->writers)) {
49330 if ((filp->f_flags & O_NONBLOCK)) {
49331 /* suppress POLLHUP until we have
49332 * seen a writer */
49333 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49334 * errno=ENXIO when there is no process reading the FIFO.
49335 */
49336 ret = -ENXIO;
49337 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49338 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49339 goto err;
49340
49341 filp->f_op = &write_pipefifo_fops;
49342 pipe->w_counter++;
49343 - if (!pipe->writers++)
49344 + if (atomic_inc_return(&pipe->writers) == 1)
49345 wake_up_partner(inode);
49346
49347 - if (!pipe->readers) {
49348 + if (!atomic_read(&pipe->readers)) {
49349 wait_for_partner(inode, &pipe->r_counter);
49350 if (signal_pending(current))
49351 goto err_wr;
49352 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49353 */
49354 filp->f_op = &rdwr_pipefifo_fops;
49355
49356 - pipe->readers++;
49357 - pipe->writers++;
49358 + atomic_inc(&pipe->readers);
49359 + atomic_inc(&pipe->writers);
49360 pipe->r_counter++;
49361 pipe->w_counter++;
49362 - if (pipe->readers == 1 || pipe->writers == 1)
49363 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49364 wake_up_partner(inode);
49365 break;
49366
49367 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49368 return 0;
49369
49370 err_rd:
49371 - if (!--pipe->readers)
49372 + if (atomic_dec_and_test(&pipe->readers))
49373 wake_up_interruptible(&pipe->wait);
49374 ret = -ERESTARTSYS;
49375 goto err;
49376
49377 err_wr:
49378 - if (!--pipe->writers)
49379 + if (atomic_dec_and_test(&pipe->writers))
49380 wake_up_interruptible(&pipe->wait);
49381 ret = -ERESTARTSYS;
49382 goto err;
49383
49384 err:
49385 - if (!pipe->readers && !pipe->writers)
49386 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49387 free_pipe_info(inode);
49388
49389 err_nocleanup:
49390 diff --git a/fs/file.c b/fs/file.c
49391 index 87e1290..a930cc4 100644
49392 --- a/fs/file.c
49393 +++ b/fs/file.c
49394 @@ -14,6 +14,7 @@
49395 #include <linux/slab.h>
49396 #include <linux/vmalloc.h>
49397 #include <linux/file.h>
49398 +#include <linux/security.h>
49399 #include <linux/fdtable.h>
49400 #include <linux/bitops.h>
49401 #include <linux/interrupt.h>
49402 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49403 * N.B. For clone tasks sharing a files structure, this test
49404 * will limit the total number of files that can be opened.
49405 */
49406 +
49407 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49408 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49409 return -EMFILE;
49410
49411 diff --git a/fs/filesystems.c b/fs/filesystems.c
49412 index a24c58e..53f91ee 100644
49413 --- a/fs/filesystems.c
49414 +++ b/fs/filesystems.c
49415 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49416 int len = dot ? dot - name : strlen(name);
49417
49418 fs = __get_fs_type(name, len);
49419 +
49420 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
49421 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49422 +#else
49423 if (!fs && (request_module("%.*s", len, name) == 0))
49424 +#endif
49425 fs = __get_fs_type(name, len);
49426
49427 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49428 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49429 index eee0590..ef5bc0e 100644
49430 --- a/fs/fs_struct.c
49431 +++ b/fs/fs_struct.c
49432 @@ -4,6 +4,7 @@
49433 #include <linux/path.h>
49434 #include <linux/slab.h>
49435 #include <linux/fs_struct.h>
49436 +#include <linux/grsecurity.h>
49437
49438 /*
49439 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49440 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49441 old_root = fs->root;
49442 fs->root = *path;
49443 path_get(path);
49444 + gr_set_chroot_entries(current, path);
49445 write_unlock(&fs->lock);
49446 if (old_root.dentry)
49447 path_put(&old_root);
49448 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49449 && fs->root.mnt == old_root->mnt) {
49450 path_get(new_root);
49451 fs->root = *new_root;
49452 + gr_set_chroot_entries(p, new_root);
49453 count++;
49454 }
49455 if (fs->pwd.dentry == old_root->dentry
49456 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49457 task_lock(tsk);
49458 write_lock(&fs->lock);
49459 tsk->fs = NULL;
49460 - kill = !--fs->users;
49461 + gr_clear_chroot_entries(tsk);
49462 + kill = !atomic_dec_return(&fs->users);
49463 write_unlock(&fs->lock);
49464 task_unlock(tsk);
49465 if (kill)
49466 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49467 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49468 /* We don't need to lock fs - think why ;-) */
49469 if (fs) {
49470 - fs->users = 1;
49471 + atomic_set(&fs->users, 1);
49472 fs->in_exec = 0;
49473 rwlock_init(&fs->lock);
49474 fs->umask = old->umask;
49475 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49476
49477 task_lock(current);
49478 write_lock(&fs->lock);
49479 - kill = !--fs->users;
49480 + kill = !atomic_dec_return(&fs->users);
49481 current->fs = new_fs;
49482 + gr_set_chroot_entries(current, &new_fs->root);
49483 write_unlock(&fs->lock);
49484 task_unlock(current);
49485
49486 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
49487
49488 /* to be mentioned only in INIT_TASK */
49489 struct fs_struct init_fs = {
49490 - .users = 1,
49491 + .users = ATOMIC_INIT(1),
49492 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49493 .umask = 0022,
49494 };
49495 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49496 task_lock(current);
49497
49498 write_lock(&init_fs.lock);
49499 - init_fs.users++;
49500 + atomic_inc(&init_fs.users);
49501 write_unlock(&init_fs.lock);
49502
49503 write_lock(&fs->lock);
49504 current->fs = &init_fs;
49505 - kill = !--fs->users;
49506 + gr_set_chroot_entries(current, &current->fs->root);
49507 + kill = !atomic_dec_return(&fs->users);
49508 write_unlock(&fs->lock);
49509
49510 task_unlock(current);
49511 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49512 index 9905350..02eaec4 100644
49513 --- a/fs/fscache/cookie.c
49514 +++ b/fs/fscache/cookie.c
49515 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49516 parent ? (char *) parent->def->name : "<no-parent>",
49517 def->name, netfs_data);
49518
49519 - fscache_stat(&fscache_n_acquires);
49520 + fscache_stat_unchecked(&fscache_n_acquires);
49521
49522 /* if there's no parent cookie, then we don't create one here either */
49523 if (!parent) {
49524 - fscache_stat(&fscache_n_acquires_null);
49525 + fscache_stat_unchecked(&fscache_n_acquires_null);
49526 _leave(" [no parent]");
49527 return NULL;
49528 }
49529 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49530 /* allocate and initialise a cookie */
49531 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49532 if (!cookie) {
49533 - fscache_stat(&fscache_n_acquires_oom);
49534 + fscache_stat_unchecked(&fscache_n_acquires_oom);
49535 _leave(" [ENOMEM]");
49536 return NULL;
49537 }
49538 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49539
49540 switch (cookie->def->type) {
49541 case FSCACHE_COOKIE_TYPE_INDEX:
49542 - fscache_stat(&fscache_n_cookie_index);
49543 + fscache_stat_unchecked(&fscache_n_cookie_index);
49544 break;
49545 case FSCACHE_COOKIE_TYPE_DATAFILE:
49546 - fscache_stat(&fscache_n_cookie_data);
49547 + fscache_stat_unchecked(&fscache_n_cookie_data);
49548 break;
49549 default:
49550 - fscache_stat(&fscache_n_cookie_special);
49551 + fscache_stat_unchecked(&fscache_n_cookie_special);
49552 break;
49553 }
49554
49555 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49556 if (fscache_acquire_non_index_cookie(cookie) < 0) {
49557 atomic_dec(&parent->n_children);
49558 __fscache_cookie_put(cookie);
49559 - fscache_stat(&fscache_n_acquires_nobufs);
49560 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
49561 _leave(" = NULL");
49562 return NULL;
49563 }
49564 }
49565
49566 - fscache_stat(&fscache_n_acquires_ok);
49567 + fscache_stat_unchecked(&fscache_n_acquires_ok);
49568 _leave(" = %p", cookie);
49569 return cookie;
49570 }
49571 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49572 cache = fscache_select_cache_for_object(cookie->parent);
49573 if (!cache) {
49574 up_read(&fscache_addremove_sem);
49575 - fscache_stat(&fscache_n_acquires_no_cache);
49576 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49577 _leave(" = -ENOMEDIUM [no cache]");
49578 return -ENOMEDIUM;
49579 }
49580 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49581 object = cache->ops->alloc_object(cache, cookie);
49582 fscache_stat_d(&fscache_n_cop_alloc_object);
49583 if (IS_ERR(object)) {
49584 - fscache_stat(&fscache_n_object_no_alloc);
49585 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
49586 ret = PTR_ERR(object);
49587 goto error;
49588 }
49589
49590 - fscache_stat(&fscache_n_object_alloc);
49591 + fscache_stat_unchecked(&fscache_n_object_alloc);
49592
49593 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49594
49595 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49596 struct fscache_object *object;
49597 struct hlist_node *_p;
49598
49599 - fscache_stat(&fscache_n_updates);
49600 + fscache_stat_unchecked(&fscache_n_updates);
49601
49602 if (!cookie) {
49603 - fscache_stat(&fscache_n_updates_null);
49604 + fscache_stat_unchecked(&fscache_n_updates_null);
49605 _leave(" [no cookie]");
49606 return;
49607 }
49608 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49609 struct fscache_object *object;
49610 unsigned long event;
49611
49612 - fscache_stat(&fscache_n_relinquishes);
49613 + fscache_stat_unchecked(&fscache_n_relinquishes);
49614 if (retire)
49615 - fscache_stat(&fscache_n_relinquishes_retire);
49616 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49617
49618 if (!cookie) {
49619 - fscache_stat(&fscache_n_relinquishes_null);
49620 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
49621 _leave(" [no cookie]");
49622 return;
49623 }
49624 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49625
49626 /* wait for the cookie to finish being instantiated (or to fail) */
49627 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49628 - fscache_stat(&fscache_n_relinquishes_waitcrt);
49629 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49630 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49631 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49632 }
49633 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49634 index edd7434..0725e66 100644
49635 --- a/fs/fscache/internal.h
49636 +++ b/fs/fscache/internal.h
49637 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49638 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49639 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49640
49641 -extern atomic_t fscache_n_op_pend;
49642 -extern atomic_t fscache_n_op_run;
49643 -extern atomic_t fscache_n_op_enqueue;
49644 -extern atomic_t fscache_n_op_deferred_release;
49645 -extern atomic_t fscache_n_op_release;
49646 -extern atomic_t fscache_n_op_gc;
49647 -extern atomic_t fscache_n_op_cancelled;
49648 -extern atomic_t fscache_n_op_rejected;
49649 +extern atomic_unchecked_t fscache_n_op_pend;
49650 +extern atomic_unchecked_t fscache_n_op_run;
49651 +extern atomic_unchecked_t fscache_n_op_enqueue;
49652 +extern atomic_unchecked_t fscache_n_op_deferred_release;
49653 +extern atomic_unchecked_t fscache_n_op_release;
49654 +extern atomic_unchecked_t fscache_n_op_gc;
49655 +extern atomic_unchecked_t fscache_n_op_cancelled;
49656 +extern atomic_unchecked_t fscache_n_op_rejected;
49657
49658 -extern atomic_t fscache_n_attr_changed;
49659 -extern atomic_t fscache_n_attr_changed_ok;
49660 -extern atomic_t fscache_n_attr_changed_nobufs;
49661 -extern atomic_t fscache_n_attr_changed_nomem;
49662 -extern atomic_t fscache_n_attr_changed_calls;
49663 +extern atomic_unchecked_t fscache_n_attr_changed;
49664 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
49665 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49666 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49667 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
49668
49669 -extern atomic_t fscache_n_allocs;
49670 -extern atomic_t fscache_n_allocs_ok;
49671 -extern atomic_t fscache_n_allocs_wait;
49672 -extern atomic_t fscache_n_allocs_nobufs;
49673 -extern atomic_t fscache_n_allocs_intr;
49674 -extern atomic_t fscache_n_allocs_object_dead;
49675 -extern atomic_t fscache_n_alloc_ops;
49676 -extern atomic_t fscache_n_alloc_op_waits;
49677 +extern atomic_unchecked_t fscache_n_allocs;
49678 +extern atomic_unchecked_t fscache_n_allocs_ok;
49679 +extern atomic_unchecked_t fscache_n_allocs_wait;
49680 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
49681 +extern atomic_unchecked_t fscache_n_allocs_intr;
49682 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
49683 +extern atomic_unchecked_t fscache_n_alloc_ops;
49684 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
49685
49686 -extern atomic_t fscache_n_retrievals;
49687 -extern atomic_t fscache_n_retrievals_ok;
49688 -extern atomic_t fscache_n_retrievals_wait;
49689 -extern atomic_t fscache_n_retrievals_nodata;
49690 -extern atomic_t fscache_n_retrievals_nobufs;
49691 -extern atomic_t fscache_n_retrievals_intr;
49692 -extern atomic_t fscache_n_retrievals_nomem;
49693 -extern atomic_t fscache_n_retrievals_object_dead;
49694 -extern atomic_t fscache_n_retrieval_ops;
49695 -extern atomic_t fscache_n_retrieval_op_waits;
49696 +extern atomic_unchecked_t fscache_n_retrievals;
49697 +extern atomic_unchecked_t fscache_n_retrievals_ok;
49698 +extern atomic_unchecked_t fscache_n_retrievals_wait;
49699 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
49700 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
49701 +extern atomic_unchecked_t fscache_n_retrievals_intr;
49702 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
49703 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
49704 +extern atomic_unchecked_t fscache_n_retrieval_ops;
49705 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
49706
49707 -extern atomic_t fscache_n_stores;
49708 -extern atomic_t fscache_n_stores_ok;
49709 -extern atomic_t fscache_n_stores_again;
49710 -extern atomic_t fscache_n_stores_nobufs;
49711 -extern atomic_t fscache_n_stores_oom;
49712 -extern atomic_t fscache_n_store_ops;
49713 -extern atomic_t fscache_n_store_calls;
49714 -extern atomic_t fscache_n_store_pages;
49715 -extern atomic_t fscache_n_store_radix_deletes;
49716 -extern atomic_t fscache_n_store_pages_over_limit;
49717 +extern atomic_unchecked_t fscache_n_stores;
49718 +extern atomic_unchecked_t fscache_n_stores_ok;
49719 +extern atomic_unchecked_t fscache_n_stores_again;
49720 +extern atomic_unchecked_t fscache_n_stores_nobufs;
49721 +extern atomic_unchecked_t fscache_n_stores_oom;
49722 +extern atomic_unchecked_t fscache_n_store_ops;
49723 +extern atomic_unchecked_t fscache_n_store_calls;
49724 +extern atomic_unchecked_t fscache_n_store_pages;
49725 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
49726 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
49727
49728 -extern atomic_t fscache_n_store_vmscan_not_storing;
49729 -extern atomic_t fscache_n_store_vmscan_gone;
49730 -extern atomic_t fscache_n_store_vmscan_busy;
49731 -extern atomic_t fscache_n_store_vmscan_cancelled;
49732 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
49733 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
49734 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
49735 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
49736
49737 -extern atomic_t fscache_n_marks;
49738 -extern atomic_t fscache_n_uncaches;
49739 +extern atomic_unchecked_t fscache_n_marks;
49740 +extern atomic_unchecked_t fscache_n_uncaches;
49741
49742 -extern atomic_t fscache_n_acquires;
49743 -extern atomic_t fscache_n_acquires_null;
49744 -extern atomic_t fscache_n_acquires_no_cache;
49745 -extern atomic_t fscache_n_acquires_ok;
49746 -extern atomic_t fscache_n_acquires_nobufs;
49747 -extern atomic_t fscache_n_acquires_oom;
49748 +extern atomic_unchecked_t fscache_n_acquires;
49749 +extern atomic_unchecked_t fscache_n_acquires_null;
49750 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
49751 +extern atomic_unchecked_t fscache_n_acquires_ok;
49752 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
49753 +extern atomic_unchecked_t fscache_n_acquires_oom;
49754
49755 -extern atomic_t fscache_n_updates;
49756 -extern atomic_t fscache_n_updates_null;
49757 -extern atomic_t fscache_n_updates_run;
49758 +extern atomic_unchecked_t fscache_n_updates;
49759 +extern atomic_unchecked_t fscache_n_updates_null;
49760 +extern atomic_unchecked_t fscache_n_updates_run;
49761
49762 -extern atomic_t fscache_n_relinquishes;
49763 -extern atomic_t fscache_n_relinquishes_null;
49764 -extern atomic_t fscache_n_relinquishes_waitcrt;
49765 -extern atomic_t fscache_n_relinquishes_retire;
49766 +extern atomic_unchecked_t fscache_n_relinquishes;
49767 +extern atomic_unchecked_t fscache_n_relinquishes_null;
49768 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
49769 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
49770
49771 -extern atomic_t fscache_n_cookie_index;
49772 -extern atomic_t fscache_n_cookie_data;
49773 -extern atomic_t fscache_n_cookie_special;
49774 +extern atomic_unchecked_t fscache_n_cookie_index;
49775 +extern atomic_unchecked_t fscache_n_cookie_data;
49776 +extern atomic_unchecked_t fscache_n_cookie_special;
49777
49778 -extern atomic_t fscache_n_object_alloc;
49779 -extern atomic_t fscache_n_object_no_alloc;
49780 -extern atomic_t fscache_n_object_lookups;
49781 -extern atomic_t fscache_n_object_lookups_negative;
49782 -extern atomic_t fscache_n_object_lookups_positive;
49783 -extern atomic_t fscache_n_object_lookups_timed_out;
49784 -extern atomic_t fscache_n_object_created;
49785 -extern atomic_t fscache_n_object_avail;
49786 -extern atomic_t fscache_n_object_dead;
49787 +extern atomic_unchecked_t fscache_n_object_alloc;
49788 +extern atomic_unchecked_t fscache_n_object_no_alloc;
49789 +extern atomic_unchecked_t fscache_n_object_lookups;
49790 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
49791 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
49792 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
49793 +extern atomic_unchecked_t fscache_n_object_created;
49794 +extern atomic_unchecked_t fscache_n_object_avail;
49795 +extern atomic_unchecked_t fscache_n_object_dead;
49796
49797 -extern atomic_t fscache_n_checkaux_none;
49798 -extern atomic_t fscache_n_checkaux_okay;
49799 -extern atomic_t fscache_n_checkaux_update;
49800 -extern atomic_t fscache_n_checkaux_obsolete;
49801 +extern atomic_unchecked_t fscache_n_checkaux_none;
49802 +extern atomic_unchecked_t fscache_n_checkaux_okay;
49803 +extern atomic_unchecked_t fscache_n_checkaux_update;
49804 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
49805
49806 extern atomic_t fscache_n_cop_alloc_object;
49807 extern atomic_t fscache_n_cop_lookup_object;
49808 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
49809 atomic_inc(stat);
49810 }
49811
49812 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
49813 +{
49814 + atomic_inc_unchecked(stat);
49815 +}
49816 +
49817 static inline void fscache_stat_d(atomic_t *stat)
49818 {
49819 atomic_dec(stat);
49820 @@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
49821
49822 #define __fscache_stat(stat) (NULL)
49823 #define fscache_stat(stat) do {} while (0)
49824 +#define fscache_stat_unchecked(stat) do {} while (0)
49825 #define fscache_stat_d(stat) do {} while (0)
49826 #endif
49827
49828 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
49829 index e513ac5..e888d34 100644
49830 --- a/fs/fscache/object.c
49831 +++ b/fs/fscache/object.c
49832 @@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49833 /* update the object metadata on disk */
49834 case FSCACHE_OBJECT_UPDATING:
49835 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
49836 - fscache_stat(&fscache_n_updates_run);
49837 + fscache_stat_unchecked(&fscache_n_updates_run);
49838 fscache_stat(&fscache_n_cop_update_object);
49839 object->cache->ops->update_object(object);
49840 fscache_stat_d(&fscache_n_cop_update_object);
49841 @@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49842 spin_lock(&object->lock);
49843 object->state = FSCACHE_OBJECT_DEAD;
49844 spin_unlock(&object->lock);
49845 - fscache_stat(&fscache_n_object_dead);
49846 + fscache_stat_unchecked(&fscache_n_object_dead);
49847 goto terminal_transit;
49848
49849 /* handle the parent cache of this object being withdrawn from
49850 @@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49851 spin_lock(&object->lock);
49852 object->state = FSCACHE_OBJECT_DEAD;
49853 spin_unlock(&object->lock);
49854 - fscache_stat(&fscache_n_object_dead);
49855 + fscache_stat_unchecked(&fscache_n_object_dead);
49856 goto terminal_transit;
49857
49858 /* complain about the object being woken up once it is
49859 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49860 parent->cookie->def->name, cookie->def->name,
49861 object->cache->tag->name);
49862
49863 - fscache_stat(&fscache_n_object_lookups);
49864 + fscache_stat_unchecked(&fscache_n_object_lookups);
49865 fscache_stat(&fscache_n_cop_lookup_object);
49866 ret = object->cache->ops->lookup_object(object);
49867 fscache_stat_d(&fscache_n_cop_lookup_object);
49868 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49869 if (ret == -ETIMEDOUT) {
49870 /* probably stuck behind another object, so move this one to
49871 * the back of the queue */
49872 - fscache_stat(&fscache_n_object_lookups_timed_out);
49873 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
49874 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49875 }
49876
49877 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
49878
49879 spin_lock(&object->lock);
49880 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49881 - fscache_stat(&fscache_n_object_lookups_negative);
49882 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
49883
49884 /* transit here to allow write requests to begin stacking up
49885 * and read requests to begin returning ENODATA */
49886 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
49887 * result, in which case there may be data available */
49888 spin_lock(&object->lock);
49889 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49890 - fscache_stat(&fscache_n_object_lookups_positive);
49891 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
49892
49893 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
49894
49895 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
49896 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49897 } else {
49898 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
49899 - fscache_stat(&fscache_n_object_created);
49900 + fscache_stat_unchecked(&fscache_n_object_created);
49901
49902 object->state = FSCACHE_OBJECT_AVAILABLE;
49903 spin_unlock(&object->lock);
49904 @@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
49905 fscache_enqueue_dependents(object);
49906
49907 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
49908 - fscache_stat(&fscache_n_object_avail);
49909 + fscache_stat_unchecked(&fscache_n_object_avail);
49910
49911 _leave("");
49912 }
49913 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49914 enum fscache_checkaux result;
49915
49916 if (!object->cookie->def->check_aux) {
49917 - fscache_stat(&fscache_n_checkaux_none);
49918 + fscache_stat_unchecked(&fscache_n_checkaux_none);
49919 return FSCACHE_CHECKAUX_OKAY;
49920 }
49921
49922 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49923 switch (result) {
49924 /* entry okay as is */
49925 case FSCACHE_CHECKAUX_OKAY:
49926 - fscache_stat(&fscache_n_checkaux_okay);
49927 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
49928 break;
49929
49930 /* entry requires update */
49931 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
49932 - fscache_stat(&fscache_n_checkaux_update);
49933 + fscache_stat_unchecked(&fscache_n_checkaux_update);
49934 break;
49935
49936 /* entry requires deletion */
49937 case FSCACHE_CHECKAUX_OBSOLETE:
49938 - fscache_stat(&fscache_n_checkaux_obsolete);
49939 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
49940 break;
49941
49942 default:
49943 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
49944 index 313e79a..775240f 100644
49945 --- a/fs/fscache/operation.c
49946 +++ b/fs/fscache/operation.c
49947 @@ -16,7 +16,7 @@
49948 #include <linux/seq_file.h>
49949 #include "internal.h"
49950
49951 -atomic_t fscache_op_debug_id;
49952 +atomic_unchecked_t fscache_op_debug_id;
49953 EXPORT_SYMBOL(fscache_op_debug_id);
49954
49955 /**
49956 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
49957 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
49958 ASSERTCMP(atomic_read(&op->usage), >, 0);
49959
49960 - fscache_stat(&fscache_n_op_enqueue);
49961 + fscache_stat_unchecked(&fscache_n_op_enqueue);
49962 switch (op->flags & FSCACHE_OP_TYPE) {
49963 case FSCACHE_OP_FAST:
49964 _debug("queue fast");
49965 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
49966 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
49967 if (op->processor)
49968 fscache_enqueue_operation(op);
49969 - fscache_stat(&fscache_n_op_run);
49970 + fscache_stat_unchecked(&fscache_n_op_run);
49971 }
49972
49973 /*
49974 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49975 if (object->n_ops > 0) {
49976 atomic_inc(&op->usage);
49977 list_add_tail(&op->pend_link, &object->pending_ops);
49978 - fscache_stat(&fscache_n_op_pend);
49979 + fscache_stat_unchecked(&fscache_n_op_pend);
49980 } else if (!list_empty(&object->pending_ops)) {
49981 atomic_inc(&op->usage);
49982 list_add_tail(&op->pend_link, &object->pending_ops);
49983 - fscache_stat(&fscache_n_op_pend);
49984 + fscache_stat_unchecked(&fscache_n_op_pend);
49985 fscache_start_operations(object);
49986 } else {
49987 ASSERTCMP(object->n_in_progress, ==, 0);
49988 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49989 object->n_exclusive++; /* reads and writes must wait */
49990 atomic_inc(&op->usage);
49991 list_add_tail(&op->pend_link, &object->pending_ops);
49992 - fscache_stat(&fscache_n_op_pend);
49993 + fscache_stat_unchecked(&fscache_n_op_pend);
49994 ret = 0;
49995 } else {
49996 /* not allowed to submit ops in any other state */
49997 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
49998 if (object->n_exclusive > 0) {
49999 atomic_inc(&op->usage);
50000 list_add_tail(&op->pend_link, &object->pending_ops);
50001 - fscache_stat(&fscache_n_op_pend);
50002 + fscache_stat_unchecked(&fscache_n_op_pend);
50003 } else if (!list_empty(&object->pending_ops)) {
50004 atomic_inc(&op->usage);
50005 list_add_tail(&op->pend_link, &object->pending_ops);
50006 - fscache_stat(&fscache_n_op_pend);
50007 + fscache_stat_unchecked(&fscache_n_op_pend);
50008 fscache_start_operations(object);
50009 } else {
50010 ASSERTCMP(object->n_exclusive, ==, 0);
50011 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
50012 object->n_ops++;
50013 atomic_inc(&op->usage);
50014 list_add_tail(&op->pend_link, &object->pending_ops);
50015 - fscache_stat(&fscache_n_op_pend);
50016 + fscache_stat_unchecked(&fscache_n_op_pend);
50017 ret = 0;
50018 } else if (object->state == FSCACHE_OBJECT_DYING ||
50019 object->state == FSCACHE_OBJECT_LC_DYING ||
50020 object->state == FSCACHE_OBJECT_WITHDRAWING) {
50021 - fscache_stat(&fscache_n_op_rejected);
50022 + fscache_stat_unchecked(&fscache_n_op_rejected);
50023 ret = -ENOBUFS;
50024 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
50025 fscache_report_unexpected_submission(object, op, ostate);
50026 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
50027
50028 ret = -EBUSY;
50029 if (!list_empty(&op->pend_link)) {
50030 - fscache_stat(&fscache_n_op_cancelled);
50031 + fscache_stat_unchecked(&fscache_n_op_cancelled);
50032 list_del_init(&op->pend_link);
50033 object->n_ops--;
50034 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
50035 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
50036 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
50037 BUG();
50038
50039 - fscache_stat(&fscache_n_op_release);
50040 + fscache_stat_unchecked(&fscache_n_op_release);
50041
50042 if (op->release) {
50043 op->release(op);
50044 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50045 * lock, and defer it otherwise */
50046 if (!spin_trylock(&object->lock)) {
50047 _debug("defer put");
50048 - fscache_stat(&fscache_n_op_deferred_release);
50049 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
50050
50051 cache = object->cache;
50052 spin_lock(&cache->op_gc_list_lock);
50053 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50054
50055 _debug("GC DEFERRED REL OBJ%x OP%x",
50056 object->debug_id, op->debug_id);
50057 - fscache_stat(&fscache_n_op_gc);
50058 + fscache_stat_unchecked(&fscache_n_op_gc);
50059
50060 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50061
50062 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50063 index c598ea4..6aac13e 100644
50064 --- a/fs/fscache/page.c
50065 +++ b/fs/fscache/page.c
50066 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50067 val = radix_tree_lookup(&cookie->stores, page->index);
50068 if (!val) {
50069 rcu_read_unlock();
50070 - fscache_stat(&fscache_n_store_vmscan_not_storing);
50071 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50072 __fscache_uncache_page(cookie, page);
50073 return true;
50074 }
50075 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50076 spin_unlock(&cookie->stores_lock);
50077
50078 if (xpage) {
50079 - fscache_stat(&fscache_n_store_vmscan_cancelled);
50080 - fscache_stat(&fscache_n_store_radix_deletes);
50081 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50082 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50083 ASSERTCMP(xpage, ==, page);
50084 } else {
50085 - fscache_stat(&fscache_n_store_vmscan_gone);
50086 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50087 }
50088
50089 wake_up_bit(&cookie->flags, 0);
50090 @@ -106,7 +106,7 @@ page_busy:
50091 /* we might want to wait here, but that could deadlock the allocator as
50092 * the slow-work threads writing to the cache may all end up sleeping
50093 * on memory allocation */
50094 - fscache_stat(&fscache_n_store_vmscan_busy);
50095 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50096 return false;
50097 }
50098 EXPORT_SYMBOL(__fscache_maybe_release_page);
50099 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50100 FSCACHE_COOKIE_STORING_TAG);
50101 if (!radix_tree_tag_get(&cookie->stores, page->index,
50102 FSCACHE_COOKIE_PENDING_TAG)) {
50103 - fscache_stat(&fscache_n_store_radix_deletes);
50104 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50105 xpage = radix_tree_delete(&cookie->stores, page->index);
50106 }
50107 spin_unlock(&cookie->stores_lock);
50108 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50109
50110 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50111
50112 - fscache_stat(&fscache_n_attr_changed_calls);
50113 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50114
50115 if (fscache_object_is_active(object)) {
50116 fscache_set_op_state(op, "CallFS");
50117 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50118
50119 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50120
50121 - fscache_stat(&fscache_n_attr_changed);
50122 + fscache_stat_unchecked(&fscache_n_attr_changed);
50123
50124 op = kzalloc(sizeof(*op), GFP_KERNEL);
50125 if (!op) {
50126 - fscache_stat(&fscache_n_attr_changed_nomem);
50127 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50128 _leave(" = -ENOMEM");
50129 return -ENOMEM;
50130 }
50131 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50132 if (fscache_submit_exclusive_op(object, op) < 0)
50133 goto nobufs;
50134 spin_unlock(&cookie->lock);
50135 - fscache_stat(&fscache_n_attr_changed_ok);
50136 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50137 fscache_put_operation(op);
50138 _leave(" = 0");
50139 return 0;
50140 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50141 nobufs:
50142 spin_unlock(&cookie->lock);
50143 kfree(op);
50144 - fscache_stat(&fscache_n_attr_changed_nobufs);
50145 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50146 _leave(" = %d", -ENOBUFS);
50147 return -ENOBUFS;
50148 }
50149 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50150 /* allocate a retrieval operation and attempt to submit it */
50151 op = kzalloc(sizeof(*op), GFP_NOIO);
50152 if (!op) {
50153 - fscache_stat(&fscache_n_retrievals_nomem);
50154 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50155 return NULL;
50156 }
50157
50158 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50159 return 0;
50160 }
50161
50162 - fscache_stat(&fscache_n_retrievals_wait);
50163 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
50164
50165 jif = jiffies;
50166 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50167 fscache_wait_bit_interruptible,
50168 TASK_INTERRUPTIBLE) != 0) {
50169 - fscache_stat(&fscache_n_retrievals_intr);
50170 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50171 _leave(" = -ERESTARTSYS");
50172 return -ERESTARTSYS;
50173 }
50174 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50175 */
50176 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50177 struct fscache_retrieval *op,
50178 - atomic_t *stat_op_waits,
50179 - atomic_t *stat_object_dead)
50180 + atomic_unchecked_t *stat_op_waits,
50181 + atomic_unchecked_t *stat_object_dead)
50182 {
50183 int ret;
50184
50185 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50186 goto check_if_dead;
50187
50188 _debug(">>> WT");
50189 - fscache_stat(stat_op_waits);
50190 + fscache_stat_unchecked(stat_op_waits);
50191 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50192 fscache_wait_bit_interruptible,
50193 TASK_INTERRUPTIBLE) < 0) {
50194 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50195
50196 check_if_dead:
50197 if (unlikely(fscache_object_is_dead(object))) {
50198 - fscache_stat(stat_object_dead);
50199 + fscache_stat_unchecked(stat_object_dead);
50200 return -ENOBUFS;
50201 }
50202 return 0;
50203 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50204
50205 _enter("%p,%p,,,", cookie, page);
50206
50207 - fscache_stat(&fscache_n_retrievals);
50208 + fscache_stat_unchecked(&fscache_n_retrievals);
50209
50210 if (hlist_empty(&cookie->backing_objects))
50211 goto nobufs;
50212 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50213 goto nobufs_unlock;
50214 spin_unlock(&cookie->lock);
50215
50216 - fscache_stat(&fscache_n_retrieval_ops);
50217 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50218
50219 /* pin the netfs read context in case we need to do the actual netfs
50220 * read because we've encountered a cache read failure */
50221 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50222
50223 error:
50224 if (ret == -ENOMEM)
50225 - fscache_stat(&fscache_n_retrievals_nomem);
50226 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50227 else if (ret == -ERESTARTSYS)
50228 - fscache_stat(&fscache_n_retrievals_intr);
50229 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50230 else if (ret == -ENODATA)
50231 - fscache_stat(&fscache_n_retrievals_nodata);
50232 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50233 else if (ret < 0)
50234 - fscache_stat(&fscache_n_retrievals_nobufs);
50235 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50236 else
50237 - fscache_stat(&fscache_n_retrievals_ok);
50238 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50239
50240 fscache_put_retrieval(op);
50241 _leave(" = %d", ret);
50242 @@ -453,7 +453,7 @@ nobufs_unlock:
50243 spin_unlock(&cookie->lock);
50244 kfree(op);
50245 nobufs:
50246 - fscache_stat(&fscache_n_retrievals_nobufs);
50247 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50248 _leave(" = -ENOBUFS");
50249 return -ENOBUFS;
50250 }
50251 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50252
50253 _enter("%p,,%d,,,", cookie, *nr_pages);
50254
50255 - fscache_stat(&fscache_n_retrievals);
50256 + fscache_stat_unchecked(&fscache_n_retrievals);
50257
50258 if (hlist_empty(&cookie->backing_objects))
50259 goto nobufs;
50260 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50261 goto nobufs_unlock;
50262 spin_unlock(&cookie->lock);
50263
50264 - fscache_stat(&fscache_n_retrieval_ops);
50265 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
50266
50267 /* pin the netfs read context in case we need to do the actual netfs
50268 * read because we've encountered a cache read failure */
50269 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50270
50271 error:
50272 if (ret == -ENOMEM)
50273 - fscache_stat(&fscache_n_retrievals_nomem);
50274 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50275 else if (ret == -ERESTARTSYS)
50276 - fscache_stat(&fscache_n_retrievals_intr);
50277 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
50278 else if (ret == -ENODATA)
50279 - fscache_stat(&fscache_n_retrievals_nodata);
50280 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50281 else if (ret < 0)
50282 - fscache_stat(&fscache_n_retrievals_nobufs);
50283 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50284 else
50285 - fscache_stat(&fscache_n_retrievals_ok);
50286 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
50287
50288 fscache_put_retrieval(op);
50289 _leave(" = %d", ret);
50290 @@ -570,7 +570,7 @@ nobufs_unlock:
50291 spin_unlock(&cookie->lock);
50292 kfree(op);
50293 nobufs:
50294 - fscache_stat(&fscache_n_retrievals_nobufs);
50295 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50296 _leave(" = -ENOBUFS");
50297 return -ENOBUFS;
50298 }
50299 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50300
50301 _enter("%p,%p,,,", cookie, page);
50302
50303 - fscache_stat(&fscache_n_allocs);
50304 + fscache_stat_unchecked(&fscache_n_allocs);
50305
50306 if (hlist_empty(&cookie->backing_objects))
50307 goto nobufs;
50308 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50309 goto nobufs_unlock;
50310 spin_unlock(&cookie->lock);
50311
50312 - fscache_stat(&fscache_n_alloc_ops);
50313 + fscache_stat_unchecked(&fscache_n_alloc_ops);
50314
50315 ret = fscache_wait_for_retrieval_activation(
50316 object, op,
50317 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50318
50319 error:
50320 if (ret == -ERESTARTSYS)
50321 - fscache_stat(&fscache_n_allocs_intr);
50322 + fscache_stat_unchecked(&fscache_n_allocs_intr);
50323 else if (ret < 0)
50324 - fscache_stat(&fscache_n_allocs_nobufs);
50325 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50326 else
50327 - fscache_stat(&fscache_n_allocs_ok);
50328 + fscache_stat_unchecked(&fscache_n_allocs_ok);
50329
50330 fscache_put_retrieval(op);
50331 _leave(" = %d", ret);
50332 @@ -651,7 +651,7 @@ nobufs_unlock:
50333 spin_unlock(&cookie->lock);
50334 kfree(op);
50335 nobufs:
50336 - fscache_stat(&fscache_n_allocs_nobufs);
50337 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50338 _leave(" = -ENOBUFS");
50339 return -ENOBUFS;
50340 }
50341 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50342
50343 spin_lock(&cookie->stores_lock);
50344
50345 - fscache_stat(&fscache_n_store_calls);
50346 + fscache_stat_unchecked(&fscache_n_store_calls);
50347
50348 /* find a page to store */
50349 page = NULL;
50350 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50351 page = results[0];
50352 _debug("gang %d [%lx]", n, page->index);
50353 if (page->index > op->store_limit) {
50354 - fscache_stat(&fscache_n_store_pages_over_limit);
50355 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50356 goto superseded;
50357 }
50358
50359 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50360
50361 if (page) {
50362 fscache_set_op_state(&op->op, "Store");
50363 - fscache_stat(&fscache_n_store_pages);
50364 + fscache_stat_unchecked(&fscache_n_store_pages);
50365 fscache_stat(&fscache_n_cop_write_page);
50366 ret = object->cache->ops->write_page(op, page);
50367 fscache_stat_d(&fscache_n_cop_write_page);
50368 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50369 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50370 ASSERT(PageFsCache(page));
50371
50372 - fscache_stat(&fscache_n_stores);
50373 + fscache_stat_unchecked(&fscache_n_stores);
50374
50375 op = kzalloc(sizeof(*op), GFP_NOIO);
50376 if (!op)
50377 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50378 spin_unlock(&cookie->stores_lock);
50379 spin_unlock(&object->lock);
50380
50381 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50382 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50383 op->store_limit = object->store_limit;
50384
50385 if (fscache_submit_op(object, &op->op) < 0)
50386 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50387
50388 spin_unlock(&cookie->lock);
50389 radix_tree_preload_end();
50390 - fscache_stat(&fscache_n_store_ops);
50391 - fscache_stat(&fscache_n_stores_ok);
50392 + fscache_stat_unchecked(&fscache_n_store_ops);
50393 + fscache_stat_unchecked(&fscache_n_stores_ok);
50394
50395 /* the slow work queue now carries its own ref on the object */
50396 fscache_put_operation(&op->op);
50397 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50398 return 0;
50399
50400 already_queued:
50401 - fscache_stat(&fscache_n_stores_again);
50402 + fscache_stat_unchecked(&fscache_n_stores_again);
50403 already_pending:
50404 spin_unlock(&cookie->stores_lock);
50405 spin_unlock(&object->lock);
50406 spin_unlock(&cookie->lock);
50407 radix_tree_preload_end();
50408 kfree(op);
50409 - fscache_stat(&fscache_n_stores_ok);
50410 + fscache_stat_unchecked(&fscache_n_stores_ok);
50411 _leave(" = 0");
50412 return 0;
50413
50414 @@ -886,14 +886,14 @@ nobufs:
50415 spin_unlock(&cookie->lock);
50416 radix_tree_preload_end();
50417 kfree(op);
50418 - fscache_stat(&fscache_n_stores_nobufs);
50419 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
50420 _leave(" = -ENOBUFS");
50421 return -ENOBUFS;
50422
50423 nomem_free:
50424 kfree(op);
50425 nomem:
50426 - fscache_stat(&fscache_n_stores_oom);
50427 + fscache_stat_unchecked(&fscache_n_stores_oom);
50428 _leave(" = -ENOMEM");
50429 return -ENOMEM;
50430 }
50431 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50432 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50433 ASSERTCMP(page, !=, NULL);
50434
50435 - fscache_stat(&fscache_n_uncaches);
50436 + fscache_stat_unchecked(&fscache_n_uncaches);
50437
50438 /* cache withdrawal may beat us to it */
50439 if (!PageFsCache(page))
50440 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50441 unsigned long loop;
50442
50443 #ifdef CONFIG_FSCACHE_STATS
50444 - atomic_add(pagevec->nr, &fscache_n_marks);
50445 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50446 #endif
50447
50448 for (loop = 0; loop < pagevec->nr; loop++) {
50449 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50450 index 46435f3..8cddf18 100644
50451 --- a/fs/fscache/stats.c
50452 +++ b/fs/fscache/stats.c
50453 @@ -18,95 +18,95 @@
50454 /*
50455 * operation counters
50456 */
50457 -atomic_t fscache_n_op_pend;
50458 -atomic_t fscache_n_op_run;
50459 -atomic_t fscache_n_op_enqueue;
50460 -atomic_t fscache_n_op_requeue;
50461 -atomic_t fscache_n_op_deferred_release;
50462 -atomic_t fscache_n_op_release;
50463 -atomic_t fscache_n_op_gc;
50464 -atomic_t fscache_n_op_cancelled;
50465 -atomic_t fscache_n_op_rejected;
50466 +atomic_unchecked_t fscache_n_op_pend;
50467 +atomic_unchecked_t fscache_n_op_run;
50468 +atomic_unchecked_t fscache_n_op_enqueue;
50469 +atomic_unchecked_t fscache_n_op_requeue;
50470 +atomic_unchecked_t fscache_n_op_deferred_release;
50471 +atomic_unchecked_t fscache_n_op_release;
50472 +atomic_unchecked_t fscache_n_op_gc;
50473 +atomic_unchecked_t fscache_n_op_cancelled;
50474 +atomic_unchecked_t fscache_n_op_rejected;
50475
50476 -atomic_t fscache_n_attr_changed;
50477 -atomic_t fscache_n_attr_changed_ok;
50478 -atomic_t fscache_n_attr_changed_nobufs;
50479 -atomic_t fscache_n_attr_changed_nomem;
50480 -atomic_t fscache_n_attr_changed_calls;
50481 +atomic_unchecked_t fscache_n_attr_changed;
50482 +atomic_unchecked_t fscache_n_attr_changed_ok;
50483 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
50484 +atomic_unchecked_t fscache_n_attr_changed_nomem;
50485 +atomic_unchecked_t fscache_n_attr_changed_calls;
50486
50487 -atomic_t fscache_n_allocs;
50488 -atomic_t fscache_n_allocs_ok;
50489 -atomic_t fscache_n_allocs_wait;
50490 -atomic_t fscache_n_allocs_nobufs;
50491 -atomic_t fscache_n_allocs_intr;
50492 -atomic_t fscache_n_allocs_object_dead;
50493 -atomic_t fscache_n_alloc_ops;
50494 -atomic_t fscache_n_alloc_op_waits;
50495 +atomic_unchecked_t fscache_n_allocs;
50496 +atomic_unchecked_t fscache_n_allocs_ok;
50497 +atomic_unchecked_t fscache_n_allocs_wait;
50498 +atomic_unchecked_t fscache_n_allocs_nobufs;
50499 +atomic_unchecked_t fscache_n_allocs_intr;
50500 +atomic_unchecked_t fscache_n_allocs_object_dead;
50501 +atomic_unchecked_t fscache_n_alloc_ops;
50502 +atomic_unchecked_t fscache_n_alloc_op_waits;
50503
50504 -atomic_t fscache_n_retrievals;
50505 -atomic_t fscache_n_retrievals_ok;
50506 -atomic_t fscache_n_retrievals_wait;
50507 -atomic_t fscache_n_retrievals_nodata;
50508 -atomic_t fscache_n_retrievals_nobufs;
50509 -atomic_t fscache_n_retrievals_intr;
50510 -atomic_t fscache_n_retrievals_nomem;
50511 -atomic_t fscache_n_retrievals_object_dead;
50512 -atomic_t fscache_n_retrieval_ops;
50513 -atomic_t fscache_n_retrieval_op_waits;
50514 +atomic_unchecked_t fscache_n_retrievals;
50515 +atomic_unchecked_t fscache_n_retrievals_ok;
50516 +atomic_unchecked_t fscache_n_retrievals_wait;
50517 +atomic_unchecked_t fscache_n_retrievals_nodata;
50518 +atomic_unchecked_t fscache_n_retrievals_nobufs;
50519 +atomic_unchecked_t fscache_n_retrievals_intr;
50520 +atomic_unchecked_t fscache_n_retrievals_nomem;
50521 +atomic_unchecked_t fscache_n_retrievals_object_dead;
50522 +atomic_unchecked_t fscache_n_retrieval_ops;
50523 +atomic_unchecked_t fscache_n_retrieval_op_waits;
50524
50525 -atomic_t fscache_n_stores;
50526 -atomic_t fscache_n_stores_ok;
50527 -atomic_t fscache_n_stores_again;
50528 -atomic_t fscache_n_stores_nobufs;
50529 -atomic_t fscache_n_stores_oom;
50530 -atomic_t fscache_n_store_ops;
50531 -atomic_t fscache_n_store_calls;
50532 -atomic_t fscache_n_store_pages;
50533 -atomic_t fscache_n_store_radix_deletes;
50534 -atomic_t fscache_n_store_pages_over_limit;
50535 +atomic_unchecked_t fscache_n_stores;
50536 +atomic_unchecked_t fscache_n_stores_ok;
50537 +atomic_unchecked_t fscache_n_stores_again;
50538 +atomic_unchecked_t fscache_n_stores_nobufs;
50539 +atomic_unchecked_t fscache_n_stores_oom;
50540 +atomic_unchecked_t fscache_n_store_ops;
50541 +atomic_unchecked_t fscache_n_store_calls;
50542 +atomic_unchecked_t fscache_n_store_pages;
50543 +atomic_unchecked_t fscache_n_store_radix_deletes;
50544 +atomic_unchecked_t fscache_n_store_pages_over_limit;
50545
50546 -atomic_t fscache_n_store_vmscan_not_storing;
50547 -atomic_t fscache_n_store_vmscan_gone;
50548 -atomic_t fscache_n_store_vmscan_busy;
50549 -atomic_t fscache_n_store_vmscan_cancelled;
50550 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50551 +atomic_unchecked_t fscache_n_store_vmscan_gone;
50552 +atomic_unchecked_t fscache_n_store_vmscan_busy;
50553 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50554
50555 -atomic_t fscache_n_marks;
50556 -atomic_t fscache_n_uncaches;
50557 +atomic_unchecked_t fscache_n_marks;
50558 +atomic_unchecked_t fscache_n_uncaches;
50559
50560 -atomic_t fscache_n_acquires;
50561 -atomic_t fscache_n_acquires_null;
50562 -atomic_t fscache_n_acquires_no_cache;
50563 -atomic_t fscache_n_acquires_ok;
50564 -atomic_t fscache_n_acquires_nobufs;
50565 -atomic_t fscache_n_acquires_oom;
50566 +atomic_unchecked_t fscache_n_acquires;
50567 +atomic_unchecked_t fscache_n_acquires_null;
50568 +atomic_unchecked_t fscache_n_acquires_no_cache;
50569 +atomic_unchecked_t fscache_n_acquires_ok;
50570 +atomic_unchecked_t fscache_n_acquires_nobufs;
50571 +atomic_unchecked_t fscache_n_acquires_oom;
50572
50573 -atomic_t fscache_n_updates;
50574 -atomic_t fscache_n_updates_null;
50575 -atomic_t fscache_n_updates_run;
50576 +atomic_unchecked_t fscache_n_updates;
50577 +atomic_unchecked_t fscache_n_updates_null;
50578 +atomic_unchecked_t fscache_n_updates_run;
50579
50580 -atomic_t fscache_n_relinquishes;
50581 -atomic_t fscache_n_relinquishes_null;
50582 -atomic_t fscache_n_relinquishes_waitcrt;
50583 -atomic_t fscache_n_relinquishes_retire;
50584 +atomic_unchecked_t fscache_n_relinquishes;
50585 +atomic_unchecked_t fscache_n_relinquishes_null;
50586 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50587 +atomic_unchecked_t fscache_n_relinquishes_retire;
50588
50589 -atomic_t fscache_n_cookie_index;
50590 -atomic_t fscache_n_cookie_data;
50591 -atomic_t fscache_n_cookie_special;
50592 +atomic_unchecked_t fscache_n_cookie_index;
50593 +atomic_unchecked_t fscache_n_cookie_data;
50594 +atomic_unchecked_t fscache_n_cookie_special;
50595
50596 -atomic_t fscache_n_object_alloc;
50597 -atomic_t fscache_n_object_no_alloc;
50598 -atomic_t fscache_n_object_lookups;
50599 -atomic_t fscache_n_object_lookups_negative;
50600 -atomic_t fscache_n_object_lookups_positive;
50601 -atomic_t fscache_n_object_lookups_timed_out;
50602 -atomic_t fscache_n_object_created;
50603 -atomic_t fscache_n_object_avail;
50604 -atomic_t fscache_n_object_dead;
50605 +atomic_unchecked_t fscache_n_object_alloc;
50606 +atomic_unchecked_t fscache_n_object_no_alloc;
50607 +atomic_unchecked_t fscache_n_object_lookups;
50608 +atomic_unchecked_t fscache_n_object_lookups_negative;
50609 +atomic_unchecked_t fscache_n_object_lookups_positive;
50610 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
50611 +atomic_unchecked_t fscache_n_object_created;
50612 +atomic_unchecked_t fscache_n_object_avail;
50613 +atomic_unchecked_t fscache_n_object_dead;
50614
50615 -atomic_t fscache_n_checkaux_none;
50616 -atomic_t fscache_n_checkaux_okay;
50617 -atomic_t fscache_n_checkaux_update;
50618 -atomic_t fscache_n_checkaux_obsolete;
50619 +atomic_unchecked_t fscache_n_checkaux_none;
50620 +atomic_unchecked_t fscache_n_checkaux_okay;
50621 +atomic_unchecked_t fscache_n_checkaux_update;
50622 +atomic_unchecked_t fscache_n_checkaux_obsolete;
50623
50624 atomic_t fscache_n_cop_alloc_object;
50625 atomic_t fscache_n_cop_lookup_object;
50626 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50627 seq_puts(m, "FS-Cache statistics\n");
50628
50629 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50630 - atomic_read(&fscache_n_cookie_index),
50631 - atomic_read(&fscache_n_cookie_data),
50632 - atomic_read(&fscache_n_cookie_special));
50633 + atomic_read_unchecked(&fscache_n_cookie_index),
50634 + atomic_read_unchecked(&fscache_n_cookie_data),
50635 + atomic_read_unchecked(&fscache_n_cookie_special));
50636
50637 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50638 - atomic_read(&fscache_n_object_alloc),
50639 - atomic_read(&fscache_n_object_no_alloc),
50640 - atomic_read(&fscache_n_object_avail),
50641 - atomic_read(&fscache_n_object_dead));
50642 + atomic_read_unchecked(&fscache_n_object_alloc),
50643 + atomic_read_unchecked(&fscache_n_object_no_alloc),
50644 + atomic_read_unchecked(&fscache_n_object_avail),
50645 + atomic_read_unchecked(&fscache_n_object_dead));
50646 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50647 - atomic_read(&fscache_n_checkaux_none),
50648 - atomic_read(&fscache_n_checkaux_okay),
50649 - atomic_read(&fscache_n_checkaux_update),
50650 - atomic_read(&fscache_n_checkaux_obsolete));
50651 + atomic_read_unchecked(&fscache_n_checkaux_none),
50652 + atomic_read_unchecked(&fscache_n_checkaux_okay),
50653 + atomic_read_unchecked(&fscache_n_checkaux_update),
50654 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50655
50656 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50657 - atomic_read(&fscache_n_marks),
50658 - atomic_read(&fscache_n_uncaches));
50659 + atomic_read_unchecked(&fscache_n_marks),
50660 + atomic_read_unchecked(&fscache_n_uncaches));
50661
50662 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50663 " oom=%u\n",
50664 - atomic_read(&fscache_n_acquires),
50665 - atomic_read(&fscache_n_acquires_null),
50666 - atomic_read(&fscache_n_acquires_no_cache),
50667 - atomic_read(&fscache_n_acquires_ok),
50668 - atomic_read(&fscache_n_acquires_nobufs),
50669 - atomic_read(&fscache_n_acquires_oom));
50670 + atomic_read_unchecked(&fscache_n_acquires),
50671 + atomic_read_unchecked(&fscache_n_acquires_null),
50672 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
50673 + atomic_read_unchecked(&fscache_n_acquires_ok),
50674 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
50675 + atomic_read_unchecked(&fscache_n_acquires_oom));
50676
50677 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50678 - atomic_read(&fscache_n_object_lookups),
50679 - atomic_read(&fscache_n_object_lookups_negative),
50680 - atomic_read(&fscache_n_object_lookups_positive),
50681 - atomic_read(&fscache_n_object_lookups_timed_out),
50682 - atomic_read(&fscache_n_object_created));
50683 + atomic_read_unchecked(&fscache_n_object_lookups),
50684 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
50685 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
50686 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50687 + atomic_read_unchecked(&fscache_n_object_created));
50688
50689 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
50690 - atomic_read(&fscache_n_updates),
50691 - atomic_read(&fscache_n_updates_null),
50692 - atomic_read(&fscache_n_updates_run));
50693 + atomic_read_unchecked(&fscache_n_updates),
50694 + atomic_read_unchecked(&fscache_n_updates_null),
50695 + atomic_read_unchecked(&fscache_n_updates_run));
50696
50697 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
50698 - atomic_read(&fscache_n_relinquishes),
50699 - atomic_read(&fscache_n_relinquishes_null),
50700 - atomic_read(&fscache_n_relinquishes_waitcrt),
50701 - atomic_read(&fscache_n_relinquishes_retire));
50702 + atomic_read_unchecked(&fscache_n_relinquishes),
50703 + atomic_read_unchecked(&fscache_n_relinquishes_null),
50704 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
50705 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
50706
50707 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
50708 - atomic_read(&fscache_n_attr_changed),
50709 - atomic_read(&fscache_n_attr_changed_ok),
50710 - atomic_read(&fscache_n_attr_changed_nobufs),
50711 - atomic_read(&fscache_n_attr_changed_nomem),
50712 - atomic_read(&fscache_n_attr_changed_calls));
50713 + atomic_read_unchecked(&fscache_n_attr_changed),
50714 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
50715 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
50716 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
50717 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
50718
50719 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
50720 - atomic_read(&fscache_n_allocs),
50721 - atomic_read(&fscache_n_allocs_ok),
50722 - atomic_read(&fscache_n_allocs_wait),
50723 - atomic_read(&fscache_n_allocs_nobufs),
50724 - atomic_read(&fscache_n_allocs_intr));
50725 + atomic_read_unchecked(&fscache_n_allocs),
50726 + atomic_read_unchecked(&fscache_n_allocs_ok),
50727 + atomic_read_unchecked(&fscache_n_allocs_wait),
50728 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
50729 + atomic_read_unchecked(&fscache_n_allocs_intr));
50730 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
50731 - atomic_read(&fscache_n_alloc_ops),
50732 - atomic_read(&fscache_n_alloc_op_waits),
50733 - atomic_read(&fscache_n_allocs_object_dead));
50734 + atomic_read_unchecked(&fscache_n_alloc_ops),
50735 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
50736 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
50737
50738 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
50739 " int=%u oom=%u\n",
50740 - atomic_read(&fscache_n_retrievals),
50741 - atomic_read(&fscache_n_retrievals_ok),
50742 - atomic_read(&fscache_n_retrievals_wait),
50743 - atomic_read(&fscache_n_retrievals_nodata),
50744 - atomic_read(&fscache_n_retrievals_nobufs),
50745 - atomic_read(&fscache_n_retrievals_intr),
50746 - atomic_read(&fscache_n_retrievals_nomem));
50747 + atomic_read_unchecked(&fscache_n_retrievals),
50748 + atomic_read_unchecked(&fscache_n_retrievals_ok),
50749 + atomic_read_unchecked(&fscache_n_retrievals_wait),
50750 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
50751 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
50752 + atomic_read_unchecked(&fscache_n_retrievals_intr),
50753 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
50754 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
50755 - atomic_read(&fscache_n_retrieval_ops),
50756 - atomic_read(&fscache_n_retrieval_op_waits),
50757 - atomic_read(&fscache_n_retrievals_object_dead));
50758 + atomic_read_unchecked(&fscache_n_retrieval_ops),
50759 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
50760 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
50761
50762 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
50763 - atomic_read(&fscache_n_stores),
50764 - atomic_read(&fscache_n_stores_ok),
50765 - atomic_read(&fscache_n_stores_again),
50766 - atomic_read(&fscache_n_stores_nobufs),
50767 - atomic_read(&fscache_n_stores_oom));
50768 + atomic_read_unchecked(&fscache_n_stores),
50769 + atomic_read_unchecked(&fscache_n_stores_ok),
50770 + atomic_read_unchecked(&fscache_n_stores_again),
50771 + atomic_read_unchecked(&fscache_n_stores_nobufs),
50772 + atomic_read_unchecked(&fscache_n_stores_oom));
50773 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
50774 - atomic_read(&fscache_n_store_ops),
50775 - atomic_read(&fscache_n_store_calls),
50776 - atomic_read(&fscache_n_store_pages),
50777 - atomic_read(&fscache_n_store_radix_deletes),
50778 - atomic_read(&fscache_n_store_pages_over_limit));
50779 + atomic_read_unchecked(&fscache_n_store_ops),
50780 + atomic_read_unchecked(&fscache_n_store_calls),
50781 + atomic_read_unchecked(&fscache_n_store_pages),
50782 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
50783 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
50784
50785 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
50786 - atomic_read(&fscache_n_store_vmscan_not_storing),
50787 - atomic_read(&fscache_n_store_vmscan_gone),
50788 - atomic_read(&fscache_n_store_vmscan_busy),
50789 - atomic_read(&fscache_n_store_vmscan_cancelled));
50790 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
50791 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
50792 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
50793 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
50794
50795 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
50796 - atomic_read(&fscache_n_op_pend),
50797 - atomic_read(&fscache_n_op_run),
50798 - atomic_read(&fscache_n_op_enqueue),
50799 - atomic_read(&fscache_n_op_cancelled),
50800 - atomic_read(&fscache_n_op_rejected));
50801 + atomic_read_unchecked(&fscache_n_op_pend),
50802 + atomic_read_unchecked(&fscache_n_op_run),
50803 + atomic_read_unchecked(&fscache_n_op_enqueue),
50804 + atomic_read_unchecked(&fscache_n_op_cancelled),
50805 + atomic_read_unchecked(&fscache_n_op_rejected));
50806 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
50807 - atomic_read(&fscache_n_op_deferred_release),
50808 - atomic_read(&fscache_n_op_release),
50809 - atomic_read(&fscache_n_op_gc));
50810 + atomic_read_unchecked(&fscache_n_op_deferred_release),
50811 + atomic_read_unchecked(&fscache_n_op_release),
50812 + atomic_read_unchecked(&fscache_n_op_gc));
50813
50814 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
50815 atomic_read(&fscache_n_cop_alloc_object),
50816 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
50817 index de792dc..448b532 100644
50818 --- a/fs/fuse/cuse.c
50819 +++ b/fs/fuse/cuse.c
50820 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
50821 INIT_LIST_HEAD(&cuse_conntbl[i]);
50822
50823 /* inherit and extend fuse_dev_operations */
50824 - cuse_channel_fops = fuse_dev_operations;
50825 - cuse_channel_fops.owner = THIS_MODULE;
50826 - cuse_channel_fops.open = cuse_channel_open;
50827 - cuse_channel_fops.release = cuse_channel_release;
50828 + pax_open_kernel();
50829 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
50830 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
50831 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
50832 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
50833 + pax_close_kernel();
50834
50835 cuse_class = class_create(THIS_MODULE, "cuse");
50836 if (IS_ERR(cuse_class))
50837 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
50838 index 1facb39..7f48557 100644
50839 --- a/fs/fuse/dev.c
50840 +++ b/fs/fuse/dev.c
50841 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50842 {
50843 struct fuse_notify_inval_entry_out outarg;
50844 int err = -EINVAL;
50845 - char buf[FUSE_NAME_MAX+1];
50846 + char *buf = NULL;
50847 struct qstr name;
50848
50849 if (size < sizeof(outarg))
50850 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50851 if (outarg.namelen > FUSE_NAME_MAX)
50852 goto err;
50853
50854 + err = -ENOMEM;
50855 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
50856 + if (!buf)
50857 + goto err;
50858 +
50859 err = -EINVAL;
50860 if (size != sizeof(outarg) + outarg.namelen + 1)
50861 goto err;
50862 @@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50863
50864 down_read(&fc->killsb);
50865 err = -ENOENT;
50866 - if (!fc->sb)
50867 - goto err_unlock;
50868 -
50869 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50870 -
50871 -err_unlock:
50872 + if (fc->sb)
50873 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50874 up_read(&fc->killsb);
50875 + kfree(buf);
50876 return err;
50877
50878 err:
50879 fuse_copy_finish(cs);
50880 + kfree(buf);
50881 return err;
50882 }
50883
50884 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
50885 index 4787ae6..73efff7 100644
50886 --- a/fs/fuse/dir.c
50887 +++ b/fs/fuse/dir.c
50888 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
50889 return link;
50890 }
50891
50892 -static void free_link(char *link)
50893 +static void free_link(const char *link)
50894 {
50895 if (!IS_ERR(link))
50896 free_page((unsigned long) link);
50897 diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
50898 index 247436c..e650ccb 100644
50899 --- a/fs/gfs2/ops_inode.c
50900 +++ b/fs/gfs2/ops_inode.c
50901 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
50902 unsigned int x;
50903 int error;
50904
50905 + pax_track_stack();
50906 +
50907 if (ndentry->d_inode) {
50908 nip = GFS2_I(ndentry->d_inode);
50909 if (ip == nip)
50910 diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
50911 index 4463297..4fed53b 100644
50912 --- a/fs/gfs2/sys.c
50913 +++ b/fs/gfs2/sys.c
50914 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
50915 return a->store ? a->store(sdp, buf, len) : len;
50916 }
50917
50918 -static struct sysfs_ops gfs2_attr_ops = {
50919 +static const struct sysfs_ops gfs2_attr_ops = {
50920 .show = gfs2_attr_show,
50921 .store = gfs2_attr_store,
50922 };
50923 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
50924 return 0;
50925 }
50926
50927 -static struct kset_uevent_ops gfs2_uevent_ops = {
50928 +static const struct kset_uevent_ops gfs2_uevent_ops = {
50929 .uevent = gfs2_uevent,
50930 };
50931
50932 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
50933 index f6874ac..7cd98a8 100644
50934 --- a/fs/hfsplus/catalog.c
50935 +++ b/fs/hfsplus/catalog.c
50936 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
50937 int err;
50938 u16 type;
50939
50940 + pax_track_stack();
50941 +
50942 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
50943 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
50944 if (err)
50945 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
50946 int entry_size;
50947 int err;
50948
50949 + pax_track_stack();
50950 +
50951 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
50952 sb = dir->i_sb;
50953 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
50954 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
50955 int entry_size, type;
50956 int err = 0;
50957
50958 + pax_track_stack();
50959 +
50960 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
50961 dst_dir->i_ino, dst_name->name);
50962 sb = src_dir->i_sb;
50963 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
50964 index 5f40236..dac3421 100644
50965 --- a/fs/hfsplus/dir.c
50966 +++ b/fs/hfsplus/dir.c
50967 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
50968 struct hfsplus_readdir_data *rd;
50969 u16 type;
50970
50971 + pax_track_stack();
50972 +
50973 if (filp->f_pos >= inode->i_size)
50974 return 0;
50975
50976 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
50977 index 1bcf597..905a251 100644
50978 --- a/fs/hfsplus/inode.c
50979 +++ b/fs/hfsplus/inode.c
50980 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
50981 int res = 0;
50982 u16 type;
50983
50984 + pax_track_stack();
50985 +
50986 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
50987
50988 HFSPLUS_I(inode).dev = 0;
50989 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
50990 struct hfs_find_data fd;
50991 hfsplus_cat_entry entry;
50992
50993 + pax_track_stack();
50994 +
50995 if (HFSPLUS_IS_RSRC(inode))
50996 main_inode = HFSPLUS_I(inode).rsrc_inode;
50997
50998 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
50999 index f457d2c..7ef4ad5 100644
51000 --- a/fs/hfsplus/ioctl.c
51001 +++ b/fs/hfsplus/ioctl.c
51002 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
51003 struct hfsplus_cat_file *file;
51004 int res;
51005
51006 + pax_track_stack();
51007 +
51008 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51009 return -EOPNOTSUPP;
51010
51011 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
51012 struct hfsplus_cat_file *file;
51013 ssize_t res = 0;
51014
51015 + pax_track_stack();
51016 +
51017 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51018 return -EOPNOTSUPP;
51019
51020 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
51021 index 43022f3..7298079 100644
51022 --- a/fs/hfsplus/super.c
51023 +++ b/fs/hfsplus/super.c
51024 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
51025 struct nls_table *nls = NULL;
51026 int err = -EINVAL;
51027
51028 + pax_track_stack();
51029 +
51030 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
51031 if (!sbi)
51032 return -ENOMEM;
51033 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
51034 index 87a1258..5694d91 100644
51035 --- a/fs/hugetlbfs/inode.c
51036 +++ b/fs/hugetlbfs/inode.c
51037 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51038 .kill_sb = kill_litter_super,
51039 };
51040
51041 -static struct vfsmount *hugetlbfs_vfsmount;
51042 +struct vfsmount *hugetlbfs_vfsmount;
51043
51044 static int can_do_hugetlb_shm(void)
51045 {
51046 diff --git a/fs/ioctl.c b/fs/ioctl.c
51047 index 6c75110..19d2c3c 100644
51048 --- a/fs/ioctl.c
51049 +++ b/fs/ioctl.c
51050 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51051 u64 phys, u64 len, u32 flags)
51052 {
51053 struct fiemap_extent extent;
51054 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
51055 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51056
51057 /* only count the extents */
51058 if (fieinfo->fi_extents_max == 0) {
51059 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51060
51061 fieinfo.fi_flags = fiemap.fm_flags;
51062 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51063 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51064 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51065
51066 if (fiemap.fm_extent_count != 0 &&
51067 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51068 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51069 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51070 fiemap.fm_flags = fieinfo.fi_flags;
51071 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51072 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51073 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51074 error = -EFAULT;
51075
51076 return error;
51077 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51078 index b0435dd..81ee0be 100644
51079 --- a/fs/jbd/checkpoint.c
51080 +++ b/fs/jbd/checkpoint.c
51081 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51082 tid_t this_tid;
51083 int result;
51084
51085 + pax_track_stack();
51086 +
51087 jbd_debug(1, "Start checkpoint\n");
51088
51089 /*
51090 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51091 index 546d153..736896c 100644
51092 --- a/fs/jffs2/compr_rtime.c
51093 +++ b/fs/jffs2/compr_rtime.c
51094 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51095 int outpos = 0;
51096 int pos=0;
51097
51098 + pax_track_stack();
51099 +
51100 memset(positions,0,sizeof(positions));
51101
51102 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51103 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51104 int outpos = 0;
51105 int pos=0;
51106
51107 + pax_track_stack();
51108 +
51109 memset(positions,0,sizeof(positions));
51110
51111 while (outpos<destlen) {
51112 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51113 index 170d289..3254b98 100644
51114 --- a/fs/jffs2/compr_rubin.c
51115 +++ b/fs/jffs2/compr_rubin.c
51116 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51117 int ret;
51118 uint32_t mysrclen, mydstlen;
51119
51120 + pax_track_stack();
51121 +
51122 mysrclen = *sourcelen;
51123 mydstlen = *dstlen - 8;
51124
51125 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51126 index b47679b..00d65d3 100644
51127 --- a/fs/jffs2/erase.c
51128 +++ b/fs/jffs2/erase.c
51129 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51130 struct jffs2_unknown_node marker = {
51131 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51132 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51133 - .totlen = cpu_to_je32(c->cleanmarker_size)
51134 + .totlen = cpu_to_je32(c->cleanmarker_size),
51135 + .hdr_crc = cpu_to_je32(0)
51136 };
51137
51138 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51139 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51140 index 5ef7bac..4fd1e3c 100644
51141 --- a/fs/jffs2/wbuf.c
51142 +++ b/fs/jffs2/wbuf.c
51143 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51144 {
51145 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51146 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51147 - .totlen = constant_cpu_to_je32(8)
51148 + .totlen = constant_cpu_to_je32(8),
51149 + .hdr_crc = constant_cpu_to_je32(0)
51150 };
51151
51152 /*
51153 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51154 index 082e844..52012a1 100644
51155 --- a/fs/jffs2/xattr.c
51156 +++ b/fs/jffs2/xattr.c
51157 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51158
51159 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51160
51161 + pax_track_stack();
51162 +
51163 /* Phase.1 : Merge same xref */
51164 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51165 xref_tmphash[i] = NULL;
51166 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51167 index 2234c73..f6e6e6b 100644
51168 --- a/fs/jfs/super.c
51169 +++ b/fs/jfs/super.c
51170 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51171
51172 jfs_inode_cachep =
51173 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51174 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51175 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51176 init_once);
51177 if (jfs_inode_cachep == NULL)
51178 return -ENOMEM;
51179 diff --git a/fs/libfs.c b/fs/libfs.c
51180 index ba36e93..3153fce 100644
51181 --- a/fs/libfs.c
51182 +++ b/fs/libfs.c
51183 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51184
51185 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51186 struct dentry *next;
51187 + char d_name[sizeof(next->d_iname)];
51188 + const unsigned char *name;
51189 +
51190 next = list_entry(p, struct dentry, d_u.d_child);
51191 if (d_unhashed(next) || !next->d_inode)
51192 continue;
51193
51194 spin_unlock(&dcache_lock);
51195 - if (filldir(dirent, next->d_name.name,
51196 + name = next->d_name.name;
51197 + if (name == next->d_iname) {
51198 + memcpy(d_name, name, next->d_name.len);
51199 + name = d_name;
51200 + }
51201 + if (filldir(dirent, name,
51202 next->d_name.len, filp->f_pos,
51203 next->d_inode->i_ino,
51204 dt_type(next->d_inode)) < 0)
51205 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51206 index c325a83..d15b07b 100644
51207 --- a/fs/lockd/clntproc.c
51208 +++ b/fs/lockd/clntproc.c
51209 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51210 /*
51211 * Cookie counter for NLM requests
51212 */
51213 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51214 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51215
51216 void nlmclnt_next_cookie(struct nlm_cookie *c)
51217 {
51218 - u32 cookie = atomic_inc_return(&nlm_cookie);
51219 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51220
51221 memcpy(c->data, &cookie, 4);
51222 c->len=4;
51223 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51224 struct nlm_rqst reqst, *req;
51225 int status;
51226
51227 + pax_track_stack();
51228 +
51229 req = &reqst;
51230 memset(req, 0, sizeof(*req));
51231 locks_init_lock(&req->a_args.lock.fl);
51232 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51233 index 1a54ae1..6a16c27 100644
51234 --- a/fs/lockd/svc.c
51235 +++ b/fs/lockd/svc.c
51236 @@ -43,7 +43,7 @@
51237
51238 static struct svc_program nlmsvc_program;
51239
51240 -struct nlmsvc_binding * nlmsvc_ops;
51241 +const struct nlmsvc_binding * nlmsvc_ops;
51242 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51243
51244 static DEFINE_MUTEX(nlmsvc_mutex);
51245 diff --git a/fs/locks.c b/fs/locks.c
51246 index a8794f2..4041e55 100644
51247 --- a/fs/locks.c
51248 +++ b/fs/locks.c
51249 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51250
51251 static struct kmem_cache *filelock_cache __read_mostly;
51252
51253 +static void locks_init_lock_always(struct file_lock *fl)
51254 +{
51255 + fl->fl_next = NULL;
51256 + fl->fl_fasync = NULL;
51257 + fl->fl_owner = NULL;
51258 + fl->fl_pid = 0;
51259 + fl->fl_nspid = NULL;
51260 + fl->fl_file = NULL;
51261 + fl->fl_flags = 0;
51262 + fl->fl_type = 0;
51263 + fl->fl_start = fl->fl_end = 0;
51264 +}
51265 +
51266 /* Allocate an empty lock structure. */
51267 static struct file_lock *locks_alloc_lock(void)
51268 {
51269 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51270 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51271 +
51272 + if (fl)
51273 + locks_init_lock_always(fl);
51274 +
51275 + return fl;
51276 }
51277
51278 void locks_release_private(struct file_lock *fl)
51279 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51280 INIT_LIST_HEAD(&fl->fl_link);
51281 INIT_LIST_HEAD(&fl->fl_block);
51282 init_waitqueue_head(&fl->fl_wait);
51283 - fl->fl_next = NULL;
51284 - fl->fl_fasync = NULL;
51285 - fl->fl_owner = NULL;
51286 - fl->fl_pid = 0;
51287 - fl->fl_nspid = NULL;
51288 - fl->fl_file = NULL;
51289 - fl->fl_flags = 0;
51290 - fl->fl_type = 0;
51291 - fl->fl_start = fl->fl_end = 0;
51292 fl->fl_ops = NULL;
51293 fl->fl_lmops = NULL;
51294 + locks_init_lock_always(fl);
51295 }
51296
51297 EXPORT_SYMBOL(locks_init_lock);
51298 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51299 return;
51300
51301 if (filp->f_op && filp->f_op->flock) {
51302 - struct file_lock fl = {
51303 + struct file_lock flock = {
51304 .fl_pid = current->tgid,
51305 .fl_file = filp,
51306 .fl_flags = FL_FLOCK,
51307 .fl_type = F_UNLCK,
51308 .fl_end = OFFSET_MAX,
51309 };
51310 - filp->f_op->flock(filp, F_SETLKW, &fl);
51311 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
51312 - fl.fl_ops->fl_release_private(&fl);
51313 + filp->f_op->flock(filp, F_SETLKW, &flock);
51314 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
51315 + flock.fl_ops->fl_release_private(&flock);
51316 }
51317
51318 lock_kernel();
51319 diff --git a/fs/mbcache.c b/fs/mbcache.c
51320 index ec88ff3..b843a82 100644
51321 --- a/fs/mbcache.c
51322 +++ b/fs/mbcache.c
51323 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51324 if (!cache)
51325 goto fail;
51326 cache->c_name = name;
51327 - cache->c_op.free = NULL;
51328 + *(void **)&cache->c_op.free = NULL;
51329 if (cache_op)
51330 - cache->c_op.free = cache_op->free;
51331 + *(void **)&cache->c_op.free = cache_op->free;
51332 atomic_set(&cache->c_entry_count, 0);
51333 cache->c_bucket_bits = bucket_bits;
51334 #ifdef MB_CACHE_INDEXES_COUNT
51335 diff --git a/fs/namei.c b/fs/namei.c
51336 index b0afbd4..8d065a1 100644
51337 --- a/fs/namei.c
51338 +++ b/fs/namei.c
51339 @@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51340 return ret;
51341
51342 /*
51343 + * Searching includes executable on directories, else just read.
51344 + */
51345 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51346 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51347 + if (capable(CAP_DAC_READ_SEARCH))
51348 + return 0;
51349 +
51350 + /*
51351 * Read/write DACs are always overridable.
51352 * Executable DACs are overridable if at least one exec bit is set.
51353 */
51354 @@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51355 if (capable(CAP_DAC_OVERRIDE))
51356 return 0;
51357
51358 - /*
51359 - * Searching includes executable on directories, else just read.
51360 - */
51361 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51362 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51363 - if (capable(CAP_DAC_READ_SEARCH))
51364 - return 0;
51365 -
51366 return -EACCES;
51367 }
51368
51369 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51370 if (!ret)
51371 goto ok;
51372
51373 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51374 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51375 + capable(CAP_DAC_OVERRIDE))
51376 goto ok;
51377
51378 return ret;
51379 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51380 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51381 error = PTR_ERR(cookie);
51382 if (!IS_ERR(cookie)) {
51383 - char *s = nd_get_link(nd);
51384 + const char *s = nd_get_link(nd);
51385 error = 0;
51386 if (s)
51387 error = __vfs_follow_link(nd, s);
51388 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51389 err = security_inode_follow_link(path->dentry, nd);
51390 if (err)
51391 goto loop;
51392 +
51393 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51394 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51395 + err = -EACCES;
51396 + goto loop;
51397 + }
51398 +
51399 current->link_count++;
51400 current->total_link_count++;
51401 nd->depth++;
51402 @@ -1016,11 +1024,19 @@ return_reval:
51403 break;
51404 }
51405 return_base:
51406 + if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51407 + !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51408 + path_put(&nd->path);
51409 + return -ENOENT;
51410 + }
51411 return 0;
51412 out_dput:
51413 path_put_conditional(&next, nd);
51414 break;
51415 }
51416 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51417 + err = -ENOENT;
51418 +
51419 path_put(&nd->path);
51420 return_err:
51421 return err;
51422 @@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51423 int retval = path_init(dfd, name, flags, nd);
51424 if (!retval)
51425 retval = path_walk(name, nd);
51426 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51427 - nd->path.dentry->d_inode))
51428 - audit_inode(name, nd->path.dentry);
51429 +
51430 + if (likely(!retval)) {
51431 + if (nd->path.dentry && nd->path.dentry->d_inode) {
51432 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51433 + retval = -ENOENT;
51434 + if (!audit_dummy_context())
51435 + audit_inode(name, nd->path.dentry);
51436 + }
51437 + }
51438 if (nd->root.mnt) {
51439 path_put(&nd->root);
51440 nd->root.mnt = NULL;
51441 }
51442 +
51443 return retval;
51444 }
51445
51446 @@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51447 if (error)
51448 goto err_out;
51449
51450 +
51451 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51452 + error = -EPERM;
51453 + goto err_out;
51454 + }
51455 + if (gr_handle_rawio(inode)) {
51456 + error = -EPERM;
51457 + goto err_out;
51458 + }
51459 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51460 + error = -EACCES;
51461 + goto err_out;
51462 + }
51463 +
51464 if (flag & O_TRUNC) {
51465 error = get_write_access(inode);
51466 if (error)
51467 @@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51468 {
51469 int error;
51470 struct dentry *dir = nd->path.dentry;
51471 + int acc_mode = ACC_MODE(flag);
51472 +
51473 + if (flag & O_TRUNC)
51474 + acc_mode |= MAY_WRITE;
51475 + if (flag & O_APPEND)
51476 + acc_mode |= MAY_APPEND;
51477 +
51478 + if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51479 + error = -EACCES;
51480 + goto out_unlock;
51481 + }
51482
51483 if (!IS_POSIXACL(dir->d_inode))
51484 mode &= ~current_umask();
51485 @@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51486 if (error)
51487 goto out_unlock;
51488 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51489 + if (!error)
51490 + gr_handle_create(path->dentry, nd->path.mnt);
51491 out_unlock:
51492 mutex_unlock(&dir->d_inode->i_mutex);
51493 dput(nd->path.dentry);
51494 @@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51495 &nd, flag);
51496 if (error)
51497 return ERR_PTR(error);
51498 +
51499 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51500 + error = -EPERM;
51501 + goto exit;
51502 + }
51503 +
51504 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51505 + error = -EPERM;
51506 + goto exit;
51507 + }
51508 +
51509 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51510 + error = -EACCES;
51511 + goto exit;
51512 + }
51513 +
51514 goto ok;
51515 }
51516
51517 @@ -1795,6 +1861,19 @@ do_last:
51518 /*
51519 * It already exists.
51520 */
51521 +
51522 + if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51523 + error = -ENOENT;
51524 + goto exit_mutex_unlock;
51525 + }
51526 +
51527 + /* only check if O_CREAT is specified, all other checks need
51528 + to go into may_open */
51529 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51530 + error = -EACCES;
51531 + goto exit_mutex_unlock;
51532 + }
51533 +
51534 mutex_unlock(&dir->d_inode->i_mutex);
51535 audit_inode(pathname, path.dentry);
51536
51537 @@ -1887,6 +1966,13 @@ do_link:
51538 error = security_inode_follow_link(path.dentry, &nd);
51539 if (error)
51540 goto exit_dput;
51541 +
51542 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51543 + path.dentry, nd.path.mnt)) {
51544 + error = -EACCES;
51545 + goto exit_dput;
51546 + }
51547 +
51548 error = __do_follow_link(&path, &nd);
51549 if (error) {
51550 /* Does someone understand code flow here? Or it is only
51551 @@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51552 }
51553 return dentry;
51554 eexist:
51555 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51556 + dput(dentry);
51557 + return ERR_PTR(-ENOENT);
51558 + }
51559 dput(dentry);
51560 dentry = ERR_PTR(-EEXIST);
51561 fail:
51562 @@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51563 error = may_mknod(mode);
51564 if (error)
51565 goto out_dput;
51566 +
51567 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51568 + error = -EPERM;
51569 + goto out_dput;
51570 + }
51571 +
51572 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51573 + error = -EACCES;
51574 + goto out_dput;
51575 + }
51576 +
51577 error = mnt_want_write(nd.path.mnt);
51578 if (error)
51579 goto out_dput;
51580 @@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51581 }
51582 out_drop_write:
51583 mnt_drop_write(nd.path.mnt);
51584 +
51585 + if (!error)
51586 + gr_handle_create(dentry, nd.path.mnt);
51587 out_dput:
51588 dput(dentry);
51589 out_unlock:
51590 @@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51591 if (IS_ERR(dentry))
51592 goto out_unlock;
51593
51594 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51595 + error = -EACCES;
51596 + goto out_dput;
51597 + }
51598 +
51599 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51600 mode &= ~current_umask();
51601 error = mnt_want_write(nd.path.mnt);
51602 @@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51603 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51604 out_drop_write:
51605 mnt_drop_write(nd.path.mnt);
51606 +
51607 + if (!error)
51608 + gr_handle_create(dentry, nd.path.mnt);
51609 +
51610 out_dput:
51611 dput(dentry);
51612 out_unlock:
51613 @@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51614 char * name;
51615 struct dentry *dentry;
51616 struct nameidata nd;
51617 + ino_t saved_ino = 0;
51618 + dev_t saved_dev = 0;
51619
51620 error = user_path_parent(dfd, pathname, &nd, &name);
51621 if (error)
51622 @@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51623 error = PTR_ERR(dentry);
51624 if (IS_ERR(dentry))
51625 goto exit2;
51626 +
51627 + if (dentry->d_inode != NULL) {
51628 + saved_ino = dentry->d_inode->i_ino;
51629 + saved_dev = gr_get_dev_from_dentry(dentry);
51630 +
51631 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51632 + error = -EACCES;
51633 + goto exit3;
51634 + }
51635 + }
51636 +
51637 error = mnt_want_write(nd.path.mnt);
51638 if (error)
51639 goto exit3;
51640 @@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51641 if (error)
51642 goto exit4;
51643 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51644 + if (!error && (saved_dev || saved_ino))
51645 + gr_handle_delete(saved_ino, saved_dev);
51646 exit4:
51647 mnt_drop_write(nd.path.mnt);
51648 exit3:
51649 @@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51650 struct dentry *dentry;
51651 struct nameidata nd;
51652 struct inode *inode = NULL;
51653 + ino_t saved_ino = 0;
51654 + dev_t saved_dev = 0;
51655
51656 error = user_path_parent(dfd, pathname, &nd, &name);
51657 if (error)
51658 @@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51659 if (nd.last.name[nd.last.len])
51660 goto slashes;
51661 inode = dentry->d_inode;
51662 - if (inode)
51663 + if (inode) {
51664 + if (inode->i_nlink <= 1) {
51665 + saved_ino = inode->i_ino;
51666 + saved_dev = gr_get_dev_from_dentry(dentry);
51667 + }
51668 +
51669 atomic_inc(&inode->i_count);
51670 +
51671 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51672 + error = -EACCES;
51673 + goto exit2;
51674 + }
51675 + }
51676 error = mnt_want_write(nd.path.mnt);
51677 if (error)
51678 goto exit2;
51679 @@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51680 if (error)
51681 goto exit3;
51682 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51683 + if (!error && (saved_ino || saved_dev))
51684 + gr_handle_delete(saved_ino, saved_dev);
51685 exit3:
51686 mnt_drop_write(nd.path.mnt);
51687 exit2:
51688 @@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51689 if (IS_ERR(dentry))
51690 goto out_unlock;
51691
51692 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
51693 + error = -EACCES;
51694 + goto out_dput;
51695 + }
51696 +
51697 error = mnt_want_write(nd.path.mnt);
51698 if (error)
51699 goto out_dput;
51700 @@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51701 if (error)
51702 goto out_drop_write;
51703 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
51704 + if (!error)
51705 + gr_handle_create(dentry, nd.path.mnt);
51706 out_drop_write:
51707 mnt_drop_write(nd.path.mnt);
51708 out_dput:
51709 @@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51710 error = PTR_ERR(new_dentry);
51711 if (IS_ERR(new_dentry))
51712 goto out_unlock;
51713 +
51714 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
51715 + old_path.dentry->d_inode,
51716 + old_path.dentry->d_inode->i_mode, to)) {
51717 + error = -EACCES;
51718 + goto out_dput;
51719 + }
51720 +
51721 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
51722 + old_path.dentry, old_path.mnt, to)) {
51723 + error = -EACCES;
51724 + goto out_dput;
51725 + }
51726 +
51727 error = mnt_want_write(nd.path.mnt);
51728 if (error)
51729 goto out_dput;
51730 @@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51731 if (error)
51732 goto out_drop_write;
51733 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
51734 + if (!error)
51735 + gr_handle_create(new_dentry, nd.path.mnt);
51736 out_drop_write:
51737 mnt_drop_write(nd.path.mnt);
51738 out_dput:
51739 @@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51740 char *to;
51741 int error;
51742
51743 + pax_track_stack();
51744 +
51745 error = user_path_parent(olddfd, oldname, &oldnd, &from);
51746 if (error)
51747 goto exit;
51748 @@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51749 if (new_dentry == trap)
51750 goto exit5;
51751
51752 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
51753 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
51754 + to);
51755 + if (error)
51756 + goto exit5;
51757 +
51758 error = mnt_want_write(oldnd.path.mnt);
51759 if (error)
51760 goto exit5;
51761 @@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51762 goto exit6;
51763 error = vfs_rename(old_dir->d_inode, old_dentry,
51764 new_dir->d_inode, new_dentry);
51765 + if (!error)
51766 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
51767 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
51768 exit6:
51769 mnt_drop_write(oldnd.path.mnt);
51770 exit5:
51771 @@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
51772
51773 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
51774 {
51775 + char tmpbuf[64];
51776 + const char *newlink;
51777 int len;
51778
51779 len = PTR_ERR(link);
51780 @@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
51781 len = strlen(link);
51782 if (len > (unsigned) buflen)
51783 len = buflen;
51784 - if (copy_to_user(buffer, link, len))
51785 +
51786 + if (len < sizeof(tmpbuf)) {
51787 + memcpy(tmpbuf, link, len);
51788 + newlink = tmpbuf;
51789 + } else
51790 + newlink = link;
51791 +
51792 + if (copy_to_user(buffer, newlink, len))
51793 len = -EFAULT;
51794 out:
51795 return len;
51796 diff --git a/fs/namespace.c b/fs/namespace.c
51797 index 2beb0fb..11a95a5 100644
51798 --- a/fs/namespace.c
51799 +++ b/fs/namespace.c
51800 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51801 if (!(sb->s_flags & MS_RDONLY))
51802 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
51803 up_write(&sb->s_umount);
51804 +
51805 + gr_log_remount(mnt->mnt_devname, retval);
51806 +
51807 return retval;
51808 }
51809
51810 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51811 security_sb_umount_busy(mnt);
51812 up_write(&namespace_sem);
51813 release_mounts(&umount_list);
51814 +
51815 + gr_log_unmount(mnt->mnt_devname, retval);
51816 +
51817 return retval;
51818 }
51819
51820 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51821 if (retval)
51822 goto dput_out;
51823
51824 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
51825 + retval = -EPERM;
51826 + goto dput_out;
51827 + }
51828 +
51829 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
51830 + retval = -EPERM;
51831 + goto dput_out;
51832 + }
51833 +
51834 if (flags & MS_REMOUNT)
51835 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
51836 data_page);
51837 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51838 dev_name, data_page);
51839 dput_out:
51840 path_put(&path);
51841 +
51842 + gr_log_mount(dev_name, dir_name, retval);
51843 +
51844 return retval;
51845 }
51846
51847 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
51848 goto out1;
51849 }
51850
51851 + if (gr_handle_chroot_pivot()) {
51852 + error = -EPERM;
51853 + path_put(&old);
51854 + goto out1;
51855 + }
51856 +
51857 read_lock(&current->fs->lock);
51858 root = current->fs->root;
51859 path_get(&current->fs->root);
51860 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
51861 index b8b5b30..2bd9ccb 100644
51862 --- a/fs/ncpfs/dir.c
51863 +++ b/fs/ncpfs/dir.c
51864 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
51865 int res, val = 0, len;
51866 __u8 __name[NCP_MAXPATHLEN + 1];
51867
51868 + pax_track_stack();
51869 +
51870 parent = dget_parent(dentry);
51871 dir = parent->d_inode;
51872
51873 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
51874 int error, res, len;
51875 __u8 __name[NCP_MAXPATHLEN + 1];
51876
51877 + pax_track_stack();
51878 +
51879 lock_kernel();
51880 error = -EIO;
51881 if (!ncp_conn_valid(server))
51882 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
51883 int error, result, len;
51884 int opmode;
51885 __u8 __name[NCP_MAXPATHLEN + 1];
51886 -
51887 +
51888 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
51889 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
51890
51891 + pax_track_stack();
51892 +
51893 error = -EIO;
51894 lock_kernel();
51895 if (!ncp_conn_valid(server))
51896 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51897 int error, len;
51898 __u8 __name[NCP_MAXPATHLEN + 1];
51899
51900 + pax_track_stack();
51901 +
51902 DPRINTK("ncp_mkdir: making %s/%s\n",
51903 dentry->d_parent->d_name.name, dentry->d_name.name);
51904
51905 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51906 if (!ncp_conn_valid(server))
51907 goto out;
51908
51909 + pax_track_stack();
51910 +
51911 ncp_age_dentry(server, dentry);
51912 len = sizeof(__name);
51913 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
51914 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
51915 int old_len, new_len;
51916 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
51917
51918 + pax_track_stack();
51919 +
51920 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
51921 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
51922 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
51923 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
51924 index cf98da1..da890a9 100644
51925 --- a/fs/ncpfs/inode.c
51926 +++ b/fs/ncpfs/inode.c
51927 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
51928 #endif
51929 struct ncp_entry_info finfo;
51930
51931 + pax_track_stack();
51932 +
51933 data.wdog_pid = NULL;
51934 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
51935 if (!server)
51936 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
51937 index bfaef7b..e9d03ca 100644
51938 --- a/fs/nfs/inode.c
51939 +++ b/fs/nfs/inode.c
51940 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
51941 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
51942 nfsi->attrtimeo_timestamp = jiffies;
51943
51944 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
51945 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
51946 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
51947 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
51948 else
51949 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
51950 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
51951 }
51952
51953 -static atomic_long_t nfs_attr_generation_counter;
51954 +static atomic_long_unchecked_t nfs_attr_generation_counter;
51955
51956 static unsigned long nfs_read_attr_generation_counter(void)
51957 {
51958 - return atomic_long_read(&nfs_attr_generation_counter);
51959 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
51960 }
51961
51962 unsigned long nfs_inc_attr_generation_counter(void)
51963 {
51964 - return atomic_long_inc_return(&nfs_attr_generation_counter);
51965 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
51966 }
51967
51968 void nfs_fattr_init(struct nfs_fattr *fattr)
51969 diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
51970 index cc2f505..f6a236f 100644
51971 --- a/fs/nfsd/lockd.c
51972 +++ b/fs/nfsd/lockd.c
51973 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
51974 fput(filp);
51975 }
51976
51977 -static struct nlmsvc_binding nfsd_nlm_ops = {
51978 +static const struct nlmsvc_binding nfsd_nlm_ops = {
51979 .fopen = nlm_fopen, /* open file for locking */
51980 .fclose = nlm_fclose, /* close file */
51981 };
51982 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
51983 index cfc3391..dcc083a 100644
51984 --- a/fs/nfsd/nfs4state.c
51985 +++ b/fs/nfsd/nfs4state.c
51986 @@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
51987 unsigned int cmd;
51988 int err;
51989
51990 + pax_track_stack();
51991 +
51992 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
51993 (long long) lock->lk_offset,
51994 (long long) lock->lk_length);
51995 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
51996 index 4a82a96..0d5fb49 100644
51997 --- a/fs/nfsd/nfs4xdr.c
51998 +++ b/fs/nfsd/nfs4xdr.c
51999 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
52000 struct nfsd4_compoundres *resp = rqstp->rq_resp;
52001 u32 minorversion = resp->cstate.minorversion;
52002
52003 + pax_track_stack();
52004 +
52005 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
52006 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
52007 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
52008 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
52009 index 2e09588..596421d 100644
52010 --- a/fs/nfsd/vfs.c
52011 +++ b/fs/nfsd/vfs.c
52012 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52013 } else {
52014 oldfs = get_fs();
52015 set_fs(KERNEL_DS);
52016 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
52017 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
52018 set_fs(oldfs);
52019 }
52020
52021 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52022
52023 /* Write the data. */
52024 oldfs = get_fs(); set_fs(KERNEL_DS);
52025 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
52026 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
52027 set_fs(oldfs);
52028 if (host_err < 0)
52029 goto out_nfserr;
52030 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
52031 */
52032
52033 oldfs = get_fs(); set_fs(KERNEL_DS);
52034 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
52035 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
52036 set_fs(oldfs);
52037
52038 if (host_err < 0)
52039 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52040 index f6af760..d0adf34 100644
52041 --- a/fs/nilfs2/ioctl.c
52042 +++ b/fs/nilfs2/ioctl.c
52043 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52044 unsigned int cmd, void __user *argp)
52045 {
52046 struct nilfs_argv argv[5];
52047 - const static size_t argsz[5] = {
52048 + static const size_t argsz[5] = {
52049 sizeof(struct nilfs_vdesc),
52050 sizeof(struct nilfs_period),
52051 sizeof(__u64),
52052 @@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52053 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52054 goto out_free;
52055
52056 + if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52057 + goto out_free;
52058 +
52059 len = argv[n].v_size * argv[n].v_nmembs;
52060 base = (void __user *)(unsigned long)argv[n].v_base;
52061 if (len == 0) {
52062 diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52063 index 7e54e52..9337248 100644
52064 --- a/fs/notify/dnotify/dnotify.c
52065 +++ b/fs/notify/dnotify/dnotify.c
52066 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52067 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52068 }
52069
52070 -static struct fsnotify_ops dnotify_fsnotify_ops = {
52071 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
52072 .handle_event = dnotify_handle_event,
52073 .should_send_event = dnotify_should_send_event,
52074 .free_group_priv = NULL,
52075 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52076 index b8bf53b..c518688 100644
52077 --- a/fs/notify/notification.c
52078 +++ b/fs/notify/notification.c
52079 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52080 * get set to 0 so it will never get 'freed'
52081 */
52082 static struct fsnotify_event q_overflow_event;
52083 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52084 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52085
52086 /**
52087 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52088 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52089 */
52090 u32 fsnotify_get_cookie(void)
52091 {
52092 - return atomic_inc_return(&fsnotify_sync_cookie);
52093 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52094 }
52095 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52096
52097 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52098 index 5a9e344..0f8cd28 100644
52099 --- a/fs/ntfs/dir.c
52100 +++ b/fs/ntfs/dir.c
52101 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
52102 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52103 ~(s64)(ndir->itype.index.block_size - 1)));
52104 /* Bounds checks. */
52105 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52106 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52107 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52108 "inode 0x%lx or driver bug.", vdir->i_ino);
52109 goto err_out;
52110 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52111 index 663c0e3..b6868e9 100644
52112 --- a/fs/ntfs/file.c
52113 +++ b/fs/ntfs/file.c
52114 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52115 #endif /* NTFS_RW */
52116 };
52117
52118 -const struct file_operations ntfs_empty_file_ops = {};
52119 +const struct file_operations ntfs_empty_file_ops __read_only;
52120
52121 -const struct inode_operations ntfs_empty_inode_ops = {};
52122 +const struct inode_operations ntfs_empty_inode_ops __read_only;
52123 diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52124 index 1cd2934..880b5d2 100644
52125 --- a/fs/ocfs2/cluster/masklog.c
52126 +++ b/fs/ocfs2/cluster/masklog.c
52127 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52128 return mlog_mask_store(mlog_attr->mask, buf, count);
52129 }
52130
52131 -static struct sysfs_ops mlog_attr_ops = {
52132 +static const struct sysfs_ops mlog_attr_ops = {
52133 .show = mlog_show,
52134 .store = mlog_store,
52135 };
52136 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52137 index ac10f83..2cd2607 100644
52138 --- a/fs/ocfs2/localalloc.c
52139 +++ b/fs/ocfs2/localalloc.c
52140 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52141 goto bail;
52142 }
52143
52144 - atomic_inc(&osb->alloc_stats.moves);
52145 + atomic_inc_unchecked(&osb->alloc_stats.moves);
52146
52147 status = 0;
52148 bail:
52149 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52150 index f010b22..9f9ed34 100644
52151 --- a/fs/ocfs2/namei.c
52152 +++ b/fs/ocfs2/namei.c
52153 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52154 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52155 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52156
52157 + pax_track_stack();
52158 +
52159 /* At some point it might be nice to break this function up a
52160 * bit. */
52161
52162 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52163 index d963d86..914cfbd 100644
52164 --- a/fs/ocfs2/ocfs2.h
52165 +++ b/fs/ocfs2/ocfs2.h
52166 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
52167
52168 struct ocfs2_alloc_stats
52169 {
52170 - atomic_t moves;
52171 - atomic_t local_data;
52172 - atomic_t bitmap_data;
52173 - atomic_t bg_allocs;
52174 - atomic_t bg_extends;
52175 + atomic_unchecked_t moves;
52176 + atomic_unchecked_t local_data;
52177 + atomic_unchecked_t bitmap_data;
52178 + atomic_unchecked_t bg_allocs;
52179 + atomic_unchecked_t bg_extends;
52180 };
52181
52182 enum ocfs2_local_alloc_state
52183 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52184 index 79b5dac..d322952 100644
52185 --- a/fs/ocfs2/suballoc.c
52186 +++ b/fs/ocfs2/suballoc.c
52187 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52188 mlog_errno(status);
52189 goto bail;
52190 }
52191 - atomic_inc(&osb->alloc_stats.bg_extends);
52192 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52193
52194 /* You should never ask for this much metadata */
52195 BUG_ON(bits_wanted >
52196 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52197 mlog_errno(status);
52198 goto bail;
52199 }
52200 - atomic_inc(&osb->alloc_stats.bg_allocs);
52201 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52202
52203 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52204 ac->ac_bits_given += (*num_bits);
52205 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52206 mlog_errno(status);
52207 goto bail;
52208 }
52209 - atomic_inc(&osb->alloc_stats.bg_allocs);
52210 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52211
52212 BUG_ON(num_bits != 1);
52213
52214 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52215 cluster_start,
52216 num_clusters);
52217 if (!status)
52218 - atomic_inc(&osb->alloc_stats.local_data);
52219 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
52220 } else {
52221 if (min_clusters > (osb->bitmap_cpg - 1)) {
52222 /* The only paths asking for contiguousness
52223 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52224 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52225 bg_blkno,
52226 bg_bit_off);
52227 - atomic_inc(&osb->alloc_stats.bitmap_data);
52228 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52229 }
52230 }
52231 if (status < 0) {
52232 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52233 index 9f55be4..a3f8048 100644
52234 --- a/fs/ocfs2/super.c
52235 +++ b/fs/ocfs2/super.c
52236 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52237 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52238 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52239 "Stats",
52240 - atomic_read(&osb->alloc_stats.bitmap_data),
52241 - atomic_read(&osb->alloc_stats.local_data),
52242 - atomic_read(&osb->alloc_stats.bg_allocs),
52243 - atomic_read(&osb->alloc_stats.moves),
52244 - atomic_read(&osb->alloc_stats.bg_extends));
52245 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52246 + atomic_read_unchecked(&osb->alloc_stats.local_data),
52247 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52248 + atomic_read_unchecked(&osb->alloc_stats.moves),
52249 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52250
52251 out += snprintf(buf + out, len - out,
52252 "%10s => State: %u Descriptor: %llu Size: %u bits "
52253 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52254 spin_lock_init(&osb->osb_xattr_lock);
52255 ocfs2_init_inode_steal_slot(osb);
52256
52257 - atomic_set(&osb->alloc_stats.moves, 0);
52258 - atomic_set(&osb->alloc_stats.local_data, 0);
52259 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
52260 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
52261 - atomic_set(&osb->alloc_stats.bg_extends, 0);
52262 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52263 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52264 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52265 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52266 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52267
52268 /* Copy the blockcheck stats from the superblock probe */
52269 osb->osb_ecc_stats = *stats;
52270 diff --git a/fs/open.c b/fs/open.c
52271 index 4f01e06..091f6c3 100644
52272 --- a/fs/open.c
52273 +++ b/fs/open.c
52274 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52275 error = locks_verify_truncate(inode, NULL, length);
52276 if (!error)
52277 error = security_path_truncate(&path, length, 0);
52278 +
52279 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52280 + error = -EACCES;
52281 +
52282 if (!error) {
52283 vfs_dq_init(inode);
52284 error = do_truncate(path.dentry, length, 0, NULL);
52285 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52286 if (__mnt_is_readonly(path.mnt))
52287 res = -EROFS;
52288
52289 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52290 + res = -EACCES;
52291 +
52292 out_path_release:
52293 path_put(&path);
52294 out:
52295 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52296 if (error)
52297 goto dput_and_out;
52298
52299 + gr_log_chdir(path.dentry, path.mnt);
52300 +
52301 set_fs_pwd(current->fs, &path);
52302
52303 dput_and_out:
52304 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52305 goto out_putf;
52306
52307 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52308 +
52309 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52310 + error = -EPERM;
52311 +
52312 + if (!error)
52313 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52314 +
52315 if (!error)
52316 set_fs_pwd(current->fs, &file->f_path);
52317 out_putf:
52318 @@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52319 if (!capable(CAP_SYS_CHROOT))
52320 goto dput_and_out;
52321
52322 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52323 + goto dput_and_out;
52324 +
52325 set_fs_root(current->fs, &path);
52326 +
52327 + gr_handle_chroot_chdir(&path);
52328 +
52329 error = 0;
52330 dput_and_out:
52331 path_put(&path);
52332 @@ -616,12 +638,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52333 err = mnt_want_write_file(file);
52334 if (err)
52335 goto out_putf;
52336 +
52337 mutex_lock(&inode->i_mutex);
52338 +
52339 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
52340 + err = -EACCES;
52341 + goto out_unlock;
52342 + }
52343 +
52344 if (mode == (mode_t) -1)
52345 mode = inode->i_mode;
52346 +
52347 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
52348 + err = -EPERM;
52349 + goto out_unlock;
52350 + }
52351 +
52352 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52353 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52354 err = notify_change(dentry, &newattrs);
52355 +
52356 +out_unlock:
52357 mutex_unlock(&inode->i_mutex);
52358 mnt_drop_write(file->f_path.mnt);
52359 out_putf:
52360 @@ -645,12 +682,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52361 error = mnt_want_write(path.mnt);
52362 if (error)
52363 goto dput_and_out;
52364 +
52365 mutex_lock(&inode->i_mutex);
52366 +
52367 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
52368 + error = -EACCES;
52369 + goto out_unlock;
52370 + }
52371 +
52372 if (mode == (mode_t) -1)
52373 mode = inode->i_mode;
52374 +
52375 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
52376 + error = -EACCES;
52377 + goto out_unlock;
52378 + }
52379 +
52380 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52381 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52382 error = notify_change(path.dentry, &newattrs);
52383 +
52384 +out_unlock:
52385 mutex_unlock(&inode->i_mutex);
52386 mnt_drop_write(path.mnt);
52387 dput_and_out:
52388 @@ -664,12 +716,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52389 return sys_fchmodat(AT_FDCWD, filename, mode);
52390 }
52391
52392 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52393 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52394 {
52395 struct inode *inode = dentry->d_inode;
52396 int error;
52397 struct iattr newattrs;
52398
52399 + if (!gr_acl_handle_chown(dentry, mnt))
52400 + return -EACCES;
52401 +
52402 newattrs.ia_valid = ATTR_CTIME;
52403 if (user != (uid_t) -1) {
52404 newattrs.ia_valid |= ATTR_UID;
52405 @@ -700,7 +755,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52406 error = mnt_want_write(path.mnt);
52407 if (error)
52408 goto out_release;
52409 - error = chown_common(path.dentry, user, group);
52410 + error = chown_common(path.dentry, user, group, path.mnt);
52411 mnt_drop_write(path.mnt);
52412 out_release:
52413 path_put(&path);
52414 @@ -725,7 +780,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52415 error = mnt_want_write(path.mnt);
52416 if (error)
52417 goto out_release;
52418 - error = chown_common(path.dentry, user, group);
52419 + error = chown_common(path.dentry, user, group, path.mnt);
52420 mnt_drop_write(path.mnt);
52421 out_release:
52422 path_put(&path);
52423 @@ -744,7 +799,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52424 error = mnt_want_write(path.mnt);
52425 if (error)
52426 goto out_release;
52427 - error = chown_common(path.dentry, user, group);
52428 + error = chown_common(path.dentry, user, group, path.mnt);
52429 mnt_drop_write(path.mnt);
52430 out_release:
52431 path_put(&path);
52432 @@ -767,7 +822,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52433 goto out_fput;
52434 dentry = file->f_path.dentry;
52435 audit_inode(NULL, dentry);
52436 - error = chown_common(dentry, user, group);
52437 + error = chown_common(dentry, user, group, file->f_path.mnt);
52438 mnt_drop_write(file->f_path.mnt);
52439 out_fput:
52440 fput(file);
52441 @@ -1036,7 +1091,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52442 if (!IS_ERR(tmp)) {
52443 fd = get_unused_fd_flags(flags);
52444 if (fd >= 0) {
52445 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52446 + struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52447 if (IS_ERR(f)) {
52448 put_unused_fd(fd);
52449 fd = PTR_ERR(f);
52450 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52451 index 6ab70f4..f4103d1 100644
52452 --- a/fs/partitions/efi.c
52453 +++ b/fs/partitions/efi.c
52454 @@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52455 if (!bdev || !gpt)
52456 return NULL;
52457
52458 + if (!le32_to_cpu(gpt->num_partition_entries))
52459 + return NULL;
52460 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52461 + if (!pte)
52462 + return NULL;
52463 +
52464 count = le32_to_cpu(gpt->num_partition_entries) *
52465 le32_to_cpu(gpt->sizeof_partition_entry);
52466 - if (!count)
52467 - return NULL;
52468 - pte = kzalloc(count, GFP_KERNEL);
52469 - if (!pte)
52470 - return NULL;
52471 -
52472 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52473 (u8 *) pte,
52474 count) < count) {
52475 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52476 index dd6efdb..3babc6c 100644
52477 --- a/fs/partitions/ldm.c
52478 +++ b/fs/partitions/ldm.c
52479 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52480 ldm_error ("A VBLK claims to have %d parts.", num);
52481 return false;
52482 }
52483 +
52484 if (rec >= num) {
52485 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52486 return false;
52487 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52488 goto found;
52489 }
52490
52491 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
52492 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
52493 if (!f) {
52494 ldm_crit ("Out of memory.");
52495 return false;
52496 diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
52497 index 5765198..7f8e9e0 100644
52498 --- a/fs/partitions/mac.c
52499 +++ b/fs/partitions/mac.c
52500 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
52501 return 0; /* not a MacOS disk */
52502 }
52503 blocks_in_map = be32_to_cpu(part->map_count);
52504 - if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52505 - put_dev_sector(sect);
52506 - return 0;
52507 - }
52508 printk(" [mac]");
52509 + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52510 + put_dev_sector(sect);
52511 + return 0;
52512 + }
52513 for (slot = 1; slot <= blocks_in_map; ++slot) {
52514 int pos = slot * secsize;
52515 put_dev_sector(sect);
52516 diff --git a/fs/pipe.c b/fs/pipe.c
52517 index d0cc080..8a6f211 100644
52518 --- a/fs/pipe.c
52519 +++ b/fs/pipe.c
52520 @@ -401,9 +401,9 @@ redo:
52521 }
52522 if (bufs) /* More to do? */
52523 continue;
52524 - if (!pipe->writers)
52525 + if (!atomic_read(&pipe->writers))
52526 break;
52527 - if (!pipe->waiting_writers) {
52528 + if (!atomic_read(&pipe->waiting_writers)) {
52529 /* syscall merging: Usually we must not sleep
52530 * if O_NONBLOCK is set, or if we got some data.
52531 * But if a writer sleeps in kernel space, then
52532 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
52533 mutex_lock(&inode->i_mutex);
52534 pipe = inode->i_pipe;
52535
52536 - if (!pipe->readers) {
52537 + if (!atomic_read(&pipe->readers)) {
52538 send_sig(SIGPIPE, current, 0);
52539 ret = -EPIPE;
52540 goto out;
52541 @@ -511,7 +511,7 @@ redo1:
52542 for (;;) {
52543 int bufs;
52544
52545 - if (!pipe->readers) {
52546 + if (!atomic_read(&pipe->readers)) {
52547 send_sig(SIGPIPE, current, 0);
52548 if (!ret)
52549 ret = -EPIPE;
52550 @@ -597,9 +597,9 @@ redo2:
52551 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52552 do_wakeup = 0;
52553 }
52554 - pipe->waiting_writers++;
52555 + atomic_inc(&pipe->waiting_writers);
52556 pipe_wait(pipe);
52557 - pipe->waiting_writers--;
52558 + atomic_dec(&pipe->waiting_writers);
52559 }
52560 out:
52561 mutex_unlock(&inode->i_mutex);
52562 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52563 mask = 0;
52564 if (filp->f_mode & FMODE_READ) {
52565 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52566 - if (!pipe->writers && filp->f_version != pipe->w_counter)
52567 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52568 mask |= POLLHUP;
52569 }
52570
52571 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52572 * Most Unices do not set POLLERR for FIFOs but on Linux they
52573 * behave exactly like pipes for poll().
52574 */
52575 - if (!pipe->readers)
52576 + if (!atomic_read(&pipe->readers))
52577 mask |= POLLERR;
52578 }
52579
52580 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52581
52582 mutex_lock(&inode->i_mutex);
52583 pipe = inode->i_pipe;
52584 - pipe->readers -= decr;
52585 - pipe->writers -= decw;
52586 + atomic_sub(decr, &pipe->readers);
52587 + atomic_sub(decw, &pipe->writers);
52588
52589 - if (!pipe->readers && !pipe->writers) {
52590 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52591 free_pipe_info(inode);
52592 } else {
52593 wake_up_interruptible_sync(&pipe->wait);
52594 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52595
52596 if (inode->i_pipe) {
52597 ret = 0;
52598 - inode->i_pipe->readers++;
52599 + atomic_inc(&inode->i_pipe->readers);
52600 }
52601
52602 mutex_unlock(&inode->i_mutex);
52603 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52604
52605 if (inode->i_pipe) {
52606 ret = 0;
52607 - inode->i_pipe->writers++;
52608 + atomic_inc(&inode->i_pipe->writers);
52609 }
52610
52611 mutex_unlock(&inode->i_mutex);
52612 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52613 if (inode->i_pipe) {
52614 ret = 0;
52615 if (filp->f_mode & FMODE_READ)
52616 - inode->i_pipe->readers++;
52617 + atomic_inc(&inode->i_pipe->readers);
52618 if (filp->f_mode & FMODE_WRITE)
52619 - inode->i_pipe->writers++;
52620 + atomic_inc(&inode->i_pipe->writers);
52621 }
52622
52623 mutex_unlock(&inode->i_mutex);
52624 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52625 inode->i_pipe = NULL;
52626 }
52627
52628 -static struct vfsmount *pipe_mnt __read_mostly;
52629 +struct vfsmount *pipe_mnt __read_mostly;
52630 static int pipefs_delete_dentry(struct dentry *dentry)
52631 {
52632 /*
52633 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52634 goto fail_iput;
52635 inode->i_pipe = pipe;
52636
52637 - pipe->readers = pipe->writers = 1;
52638 + atomic_set(&pipe->readers, 1);
52639 + atomic_set(&pipe->writers, 1);
52640 inode->i_fop = &rdwr_pipefifo_fops;
52641
52642 /*
52643 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
52644 index 50f8f06..c5755df 100644
52645 --- a/fs/proc/Kconfig
52646 +++ b/fs/proc/Kconfig
52647 @@ -30,12 +30,12 @@ config PROC_FS
52648
52649 config PROC_KCORE
52650 bool "/proc/kcore support" if !ARM
52651 - depends on PROC_FS && MMU
52652 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
52653
52654 config PROC_VMCORE
52655 bool "/proc/vmcore support (EXPERIMENTAL)"
52656 - depends on PROC_FS && CRASH_DUMP
52657 - default y
52658 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
52659 + default n
52660 help
52661 Exports the dump image of crashed kernel in ELF format.
52662
52663 @@ -59,8 +59,8 @@ config PROC_SYSCTL
52664 limited in memory.
52665
52666 config PROC_PAGE_MONITOR
52667 - default y
52668 - depends on PROC_FS && MMU
52669 + default n
52670 + depends on PROC_FS && MMU && !GRKERNSEC
52671 bool "Enable /proc page monitoring" if EMBEDDED
52672 help
52673 Various /proc files exist to monitor process memory utilization:
52674 diff --git a/fs/proc/array.c b/fs/proc/array.c
52675 index c5ef152..1363194 100644
52676 --- a/fs/proc/array.c
52677 +++ b/fs/proc/array.c
52678 @@ -60,6 +60,7 @@
52679 #include <linux/tty.h>
52680 #include <linux/string.h>
52681 #include <linux/mman.h>
52682 +#include <linux/grsecurity.h>
52683 #include <linux/proc_fs.h>
52684 #include <linux/ioport.h>
52685 #include <linux/uaccess.h>
52686 @@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
52687 p->nivcsw);
52688 }
52689
52690 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52691 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
52692 +{
52693 + if (p->mm)
52694 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
52695 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
52696 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
52697 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
52698 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
52699 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
52700 + else
52701 + seq_printf(m, "PaX:\t-----\n");
52702 +}
52703 +#endif
52704 +
52705 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52706 struct pid *pid, struct task_struct *task)
52707 {
52708 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52709 task_cap(m, task);
52710 cpuset_task_status_allowed(m, task);
52711 task_context_switch_counts(m, task);
52712 +
52713 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52714 + task_pax(m, task);
52715 +#endif
52716 +
52717 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
52718 + task_grsec_rbac(m, task);
52719 +#endif
52720 +
52721 return 0;
52722 }
52723
52724 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52725 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52726 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
52727 + _mm->pax_flags & MF_PAX_SEGMEXEC))
52728 +#endif
52729 +
52730 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52731 struct pid *pid, struct task_struct *task, int whole)
52732 {
52733 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52734 cputime_t cutime, cstime, utime, stime;
52735 cputime_t cgtime, gtime;
52736 unsigned long rsslim = 0;
52737 - char tcomm[sizeof(task->comm)];
52738 + char tcomm[sizeof(task->comm)] = { 0 };
52739 unsigned long flags;
52740
52741 + pax_track_stack();
52742 +
52743 state = *get_task_state(task);
52744 vsize = eip = esp = 0;
52745 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
52746 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52747 gtime = task_gtime(task);
52748 }
52749
52750 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52751 + if (PAX_RAND_FLAGS(mm)) {
52752 + eip = 0;
52753 + esp = 0;
52754 + wchan = 0;
52755 + }
52756 +#endif
52757 +#ifdef CONFIG_GRKERNSEC_HIDESYM
52758 + wchan = 0;
52759 + eip =0;
52760 + esp =0;
52761 +#endif
52762 +
52763 /* scale priority and nice values from timeslices to -20..20 */
52764 /* to make it look like a "normal" Unix priority/nice value */
52765 priority = task_prio(task);
52766 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52767 vsize,
52768 mm ? get_mm_rss(mm) : 0,
52769 rsslim,
52770 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52771 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
52772 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
52773 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
52774 +#else
52775 mm ? (permitted ? mm->start_code : 1) : 0,
52776 mm ? (permitted ? mm->end_code : 1) : 0,
52777 (permitted && mm) ? mm->start_stack : 0,
52778 +#endif
52779 esp,
52780 eip,
52781 /* The signal information here is obsolete.
52782 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52783
52784 return 0;
52785 }
52786 +
52787 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52788 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
52789 +{
52790 + u32 curr_ip = 0;
52791 + unsigned long flags;
52792 +
52793 + if (lock_task_sighand(task, &flags)) {
52794 + curr_ip = task->signal->curr_ip;
52795 + unlock_task_sighand(task, &flags);
52796 + }
52797 +
52798 + return sprintf(buffer, "%pI4\n", &curr_ip);
52799 +}
52800 +#endif
52801 diff --git a/fs/proc/base.c b/fs/proc/base.c
52802 index 67f7dc0..e95ea4f 100644
52803 --- a/fs/proc/base.c
52804 +++ b/fs/proc/base.c
52805 @@ -102,6 +102,22 @@ struct pid_entry {
52806 union proc_op op;
52807 };
52808
52809 +struct getdents_callback {
52810 + struct linux_dirent __user * current_dir;
52811 + struct linux_dirent __user * previous;
52812 + struct file * file;
52813 + int count;
52814 + int error;
52815 +};
52816 +
52817 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
52818 + loff_t offset, u64 ino, unsigned int d_type)
52819 +{
52820 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
52821 + buf->error = -EINVAL;
52822 + return 0;
52823 +}
52824 +
52825 #define NOD(NAME, MODE, IOP, FOP, OP) { \
52826 .name = (NAME), \
52827 .len = sizeof(NAME) - 1, \
52828 @@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
52829 if (task == current)
52830 return 0;
52831
52832 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
52833 + return -EPERM;
52834 +
52835 /*
52836 * If current is actively ptrace'ing, and would also be
52837 * permitted to freshly attach with ptrace now, permit it.
52838 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
52839 if (!mm->arg_end)
52840 goto out_mm; /* Shh! No looking before we're done */
52841
52842 + if (gr_acl_handle_procpidmem(task))
52843 + goto out_mm;
52844 +
52845 len = mm->arg_end - mm->arg_start;
52846
52847 if (len > PAGE_SIZE)
52848 @@ -287,12 +309,28 @@ out:
52849 return res;
52850 }
52851
52852 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52853 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52854 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
52855 + _mm->pax_flags & MF_PAX_SEGMEXEC))
52856 +#endif
52857 +
52858 static int proc_pid_auxv(struct task_struct *task, char *buffer)
52859 {
52860 int res = 0;
52861 struct mm_struct *mm = get_task_mm(task);
52862 if (mm) {
52863 unsigned int nwords = 0;
52864 +
52865 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52866 + /* allow if we're currently ptracing this task */
52867 + if (PAX_RAND_FLAGS(mm) &&
52868 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
52869 + mmput(mm);
52870 + return 0;
52871 + }
52872 +#endif
52873 +
52874 do {
52875 nwords += 2;
52876 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
52877 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
52878 }
52879
52880
52881 -#ifdef CONFIG_KALLSYMS
52882 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52883 /*
52884 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
52885 * Returns the resolved symbol. If that fails, simply return the address.
52886 @@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
52887 mutex_unlock(&task->cred_guard_mutex);
52888 }
52889
52890 -#ifdef CONFIG_STACKTRACE
52891 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52892
52893 #define MAX_STACK_TRACE_DEPTH 64
52894
52895 @@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
52896 return count;
52897 }
52898
52899 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52900 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52901 static int proc_pid_syscall(struct task_struct *task, char *buffer)
52902 {
52903 long nr;
52904 @@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
52905 /************************************************************************/
52906
52907 /* permission checks */
52908 -static int proc_fd_access_allowed(struct inode *inode)
52909 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
52910 {
52911 struct task_struct *task;
52912 int allowed = 0;
52913 @@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
52914 */
52915 task = get_proc_task(inode);
52916 if (task) {
52917 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52918 + if (log)
52919 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
52920 + else
52921 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52922 put_task_struct(task);
52923 }
52924 return allowed;
52925 @@ -963,6 +1004,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
52926 if (!task)
52927 goto out_no_task;
52928
52929 + if (gr_acl_handle_procpidmem(task))
52930 + goto out;
52931 +
52932 if (!ptrace_may_access(task, PTRACE_MODE_READ))
52933 goto out;
52934
52935 @@ -1377,7 +1421,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
52936 path_put(&nd->path);
52937
52938 /* Are we allowed to snoop on the tasks file descriptors? */
52939 - if (!proc_fd_access_allowed(inode))
52940 + if (!proc_fd_access_allowed(inode,0))
52941 goto out;
52942
52943 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
52944 @@ -1417,8 +1461,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
52945 struct path path;
52946
52947 /* Are we allowed to snoop on the tasks file descriptors? */
52948 - if (!proc_fd_access_allowed(inode))
52949 - goto out;
52950 + /* logging this is needed for learning on chromium to work properly,
52951 + but we don't want to flood the logs from 'ps' which does a readlink
52952 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
52953 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
52954 + */
52955 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
52956 + if (!proc_fd_access_allowed(inode,0))
52957 + goto out;
52958 + } else {
52959 + if (!proc_fd_access_allowed(inode,1))
52960 + goto out;
52961 + }
52962
52963 error = PROC_I(inode)->op.proc_get_link(inode, &path);
52964 if (error)
52965 @@ -1483,7 +1537,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
52966 rcu_read_lock();
52967 cred = __task_cred(task);
52968 inode->i_uid = cred->euid;
52969 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52970 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52971 +#else
52972 inode->i_gid = cred->egid;
52973 +#endif
52974 rcu_read_unlock();
52975 }
52976 security_task_to_inode(task, inode);
52977 @@ -1501,6 +1559,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52978 struct inode *inode = dentry->d_inode;
52979 struct task_struct *task;
52980 const struct cred *cred;
52981 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52982 + const struct cred *tmpcred = current_cred();
52983 +#endif
52984
52985 generic_fillattr(inode, stat);
52986
52987 @@ -1508,13 +1569,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52988 stat->uid = 0;
52989 stat->gid = 0;
52990 task = pid_task(proc_pid(inode), PIDTYPE_PID);
52991 +
52992 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
52993 + rcu_read_unlock();
52994 + return -ENOENT;
52995 + }
52996 +
52997 if (task) {
52998 + cred = __task_cred(task);
52999 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53000 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
53001 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53002 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53003 +#endif
53004 + ) {
53005 +#endif
53006 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53007 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53008 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53009 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53010 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53011 +#endif
53012 task_dumpable(task)) {
53013 - cred = __task_cred(task);
53014 stat->uid = cred->euid;
53015 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53016 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53017 +#else
53018 stat->gid = cred->egid;
53019 +#endif
53020 }
53021 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53022 + } else {
53023 + rcu_read_unlock();
53024 + return -ENOENT;
53025 + }
53026 +#endif
53027 }
53028 rcu_read_unlock();
53029 return 0;
53030 @@ -1545,11 +1634,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53031
53032 if (task) {
53033 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53034 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53035 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53036 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53037 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53038 +#endif
53039 task_dumpable(task)) {
53040 rcu_read_lock();
53041 cred = __task_cred(task);
53042 inode->i_uid = cred->euid;
53043 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53044 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53045 +#else
53046 inode->i_gid = cred->egid;
53047 +#endif
53048 rcu_read_unlock();
53049 } else {
53050 inode->i_uid = 0;
53051 @@ -1670,7 +1768,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53052 int fd = proc_fd(inode);
53053
53054 if (task) {
53055 - files = get_files_struct(task);
53056 + if (!gr_acl_handle_procpidmem(task))
53057 + files = get_files_struct(task);
53058 put_task_struct(task);
53059 }
53060 if (files) {
53061 @@ -1922,12 +2021,22 @@ static const struct file_operations proc_fd_operations = {
53062 static int proc_fd_permission(struct inode *inode, int mask)
53063 {
53064 int rv;
53065 + struct task_struct *task;
53066
53067 rv = generic_permission(inode, mask, NULL);
53068 - if (rv == 0)
53069 - return 0;
53070 +
53071 if (task_pid(current) == proc_pid(inode))
53072 rv = 0;
53073 +
53074 + task = get_proc_task(inode);
53075 + if (task == NULL)
53076 + return rv;
53077 +
53078 + if (gr_acl_handle_procpidmem(task))
53079 + rv = -EACCES;
53080 +
53081 + put_task_struct(task);
53082 +
53083 return rv;
53084 }
53085
53086 @@ -2036,6 +2145,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53087 if (!task)
53088 goto out_no_task;
53089
53090 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53091 + goto out;
53092 +
53093 /*
53094 * Yes, it does not scale. And it should not. Don't add
53095 * new entries into /proc/<tgid>/ without very good reasons.
53096 @@ -2080,6 +2192,9 @@ static int proc_pident_readdir(struct file *filp,
53097 if (!task)
53098 goto out_no_task;
53099
53100 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53101 + goto out;
53102 +
53103 ret = 0;
53104 i = filp->f_pos;
53105 switch (i) {
53106 @@ -2347,7 +2462,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53107 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53108 void *cookie)
53109 {
53110 - char *s = nd_get_link(nd);
53111 + const char *s = nd_get_link(nd);
53112 if (!IS_ERR(s))
53113 __putname(s);
53114 }
53115 @@ -2553,7 +2668,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53116 #ifdef CONFIG_SCHED_DEBUG
53117 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53118 #endif
53119 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53120 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53121 INF("syscall", S_IRUGO, proc_pid_syscall),
53122 #endif
53123 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53124 @@ -2578,10 +2693,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53125 #ifdef CONFIG_SECURITY
53126 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53127 #endif
53128 -#ifdef CONFIG_KALLSYMS
53129 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53130 INF("wchan", S_IRUGO, proc_pid_wchan),
53131 #endif
53132 -#ifdef CONFIG_STACKTRACE
53133 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53134 ONE("stack", S_IRUGO, proc_pid_stack),
53135 #endif
53136 #ifdef CONFIG_SCHEDSTATS
53137 @@ -2611,6 +2726,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53138 #ifdef CONFIG_TASK_IO_ACCOUNTING
53139 INF("io", S_IRUSR, proc_tgid_io_accounting),
53140 #endif
53141 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53142 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53143 +#endif
53144 };
53145
53146 static int proc_tgid_base_readdir(struct file * filp,
53147 @@ -2735,7 +2853,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53148 if (!inode)
53149 goto out;
53150
53151 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53152 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53153 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53154 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53155 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53156 +#else
53157 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53158 +#endif
53159 inode->i_op = &proc_tgid_base_inode_operations;
53160 inode->i_fop = &proc_tgid_base_operations;
53161 inode->i_flags|=S_IMMUTABLE;
53162 @@ -2777,7 +2902,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53163 if (!task)
53164 goto out;
53165
53166 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53167 + goto out_put_task;
53168 +
53169 result = proc_pid_instantiate(dir, dentry, task, NULL);
53170 +out_put_task:
53171 put_task_struct(task);
53172 out:
53173 return result;
53174 @@ -2842,6 +2971,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53175 {
53176 unsigned int nr;
53177 struct task_struct *reaper;
53178 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53179 + const struct cred *tmpcred = current_cred();
53180 + const struct cred *itercred;
53181 +#endif
53182 + filldir_t __filldir = filldir;
53183 struct tgid_iter iter;
53184 struct pid_namespace *ns;
53185
53186 @@ -2865,8 +2999,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53187 for (iter = next_tgid(ns, iter);
53188 iter.task;
53189 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53190 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53191 + rcu_read_lock();
53192 + itercred = __task_cred(iter.task);
53193 +#endif
53194 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53195 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53196 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53197 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53198 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53199 +#endif
53200 + )
53201 +#endif
53202 + )
53203 + __filldir = &gr_fake_filldir;
53204 + else
53205 + __filldir = filldir;
53206 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53207 + rcu_read_unlock();
53208 +#endif
53209 filp->f_pos = iter.tgid + TGID_OFFSET;
53210 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53211 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53212 put_task_struct(iter.task);
53213 goto out;
53214 }
53215 @@ -2892,7 +3045,7 @@ static const struct pid_entry tid_base_stuff[] = {
53216 #ifdef CONFIG_SCHED_DEBUG
53217 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53218 #endif
53219 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53220 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53221 INF("syscall", S_IRUGO, proc_pid_syscall),
53222 #endif
53223 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53224 @@ -2916,10 +3069,10 @@ static const struct pid_entry tid_base_stuff[] = {
53225 #ifdef CONFIG_SECURITY
53226 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53227 #endif
53228 -#ifdef CONFIG_KALLSYMS
53229 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53230 INF("wchan", S_IRUGO, proc_pid_wchan),
53231 #endif
53232 -#ifdef CONFIG_STACKTRACE
53233 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53234 ONE("stack", S_IRUGO, proc_pid_stack),
53235 #endif
53236 #ifdef CONFIG_SCHEDSTATS
53237 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53238 index 82676e3..5f8518a 100644
53239 --- a/fs/proc/cmdline.c
53240 +++ b/fs/proc/cmdline.c
53241 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53242
53243 static int __init proc_cmdline_init(void)
53244 {
53245 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53246 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53247 +#else
53248 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53249 +#endif
53250 return 0;
53251 }
53252 module_init(proc_cmdline_init);
53253 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53254 index 59ee7da..469b4b6 100644
53255 --- a/fs/proc/devices.c
53256 +++ b/fs/proc/devices.c
53257 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53258
53259 static int __init proc_devices_init(void)
53260 {
53261 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53262 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53263 +#else
53264 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53265 +#endif
53266 return 0;
53267 }
53268 module_init(proc_devices_init);
53269 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53270 index d78ade3..81767f9 100644
53271 --- a/fs/proc/inode.c
53272 +++ b/fs/proc/inode.c
53273 @@ -18,12 +18,19 @@
53274 #include <linux/module.h>
53275 #include <linux/smp_lock.h>
53276 #include <linux/sysctl.h>
53277 +#include <linux/grsecurity.h>
53278
53279 #include <asm/system.h>
53280 #include <asm/uaccess.h>
53281
53282 #include "internal.h"
53283
53284 +#ifdef CONFIG_PROC_SYSCTL
53285 +extern const struct inode_operations proc_sys_inode_operations;
53286 +extern const struct inode_operations proc_sys_dir_operations;
53287 +#endif
53288 +
53289 +
53290 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53291 {
53292 atomic_inc(&de->count);
53293 @@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53294 de_put(de);
53295 if (PROC_I(inode)->sysctl)
53296 sysctl_head_put(PROC_I(inode)->sysctl);
53297 +
53298 +#ifdef CONFIG_PROC_SYSCTL
53299 + if (inode->i_op == &proc_sys_inode_operations ||
53300 + inode->i_op == &proc_sys_dir_operations)
53301 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53302 +#endif
53303 +
53304 clear_inode(inode);
53305 }
53306
53307 @@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53308 if (de->mode) {
53309 inode->i_mode = de->mode;
53310 inode->i_uid = de->uid;
53311 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53312 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53313 +#else
53314 inode->i_gid = de->gid;
53315 +#endif
53316 }
53317 if (de->size)
53318 inode->i_size = de->size;
53319 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53320 index 753ca37..26bcf3b 100644
53321 --- a/fs/proc/internal.h
53322 +++ b/fs/proc/internal.h
53323 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53324 struct pid *pid, struct task_struct *task);
53325 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53326 struct pid *pid, struct task_struct *task);
53327 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53328 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53329 +#endif
53330 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53331
53332 extern const struct file_operations proc_maps_operations;
53333 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53334 index b442dac..aab29cb 100644
53335 --- a/fs/proc/kcore.c
53336 +++ b/fs/proc/kcore.c
53337 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53338 off_t offset = 0;
53339 struct kcore_list *m;
53340
53341 + pax_track_stack();
53342 +
53343 /* setup ELF header */
53344 elf = (struct elfhdr *) bufp;
53345 bufp += sizeof(struct elfhdr);
53346 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53347 * the addresses in the elf_phdr on our list.
53348 */
53349 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53350 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53351 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53352 + if (tsz > buflen)
53353 tsz = buflen;
53354 -
53355 +
53356 while (buflen) {
53357 struct kcore_list *m;
53358
53359 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53360 kfree(elf_buf);
53361 } else {
53362 if (kern_addr_valid(start)) {
53363 - unsigned long n;
53364 + char *elf_buf;
53365 + mm_segment_t oldfs;
53366
53367 - n = copy_to_user(buffer, (char *)start, tsz);
53368 - /*
53369 - * We cannot distingush between fault on source
53370 - * and fault on destination. When this happens
53371 - * we clear too and hope it will trigger the
53372 - * EFAULT again.
53373 - */
53374 - if (n) {
53375 - if (clear_user(buffer + tsz - n,
53376 - n))
53377 + elf_buf = kmalloc(tsz, GFP_KERNEL);
53378 + if (!elf_buf)
53379 + return -ENOMEM;
53380 + oldfs = get_fs();
53381 + set_fs(KERNEL_DS);
53382 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
53383 + set_fs(oldfs);
53384 + if (copy_to_user(buffer, elf_buf, tsz)) {
53385 + kfree(elf_buf);
53386 return -EFAULT;
53387 + }
53388 }
53389 + set_fs(oldfs);
53390 + kfree(elf_buf);
53391 } else {
53392 if (clear_user(buffer, tsz))
53393 return -EFAULT;
53394 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53395
53396 static int open_kcore(struct inode *inode, struct file *filp)
53397 {
53398 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
53399 + return -EPERM;
53400 +#endif
53401 if (!capable(CAP_SYS_RAWIO))
53402 return -EPERM;
53403 if (kcore_need_update)
53404 diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
53405 index 7ca7834..cfe90a4 100644
53406 --- a/fs/proc/kmsg.c
53407 +++ b/fs/proc/kmsg.c
53408 @@ -12,37 +12,37 @@
53409 #include <linux/poll.h>
53410 #include <linux/proc_fs.h>
53411 #include <linux/fs.h>
53412 +#include <linux/syslog.h>
53413
53414 #include <asm/uaccess.h>
53415 #include <asm/io.h>
53416
53417 extern wait_queue_head_t log_wait;
53418
53419 -extern int do_syslog(int type, char __user *bug, int count);
53420 -
53421 static int kmsg_open(struct inode * inode, struct file * file)
53422 {
53423 - return do_syslog(1,NULL,0);
53424 + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
53425 }
53426
53427 static int kmsg_release(struct inode * inode, struct file * file)
53428 {
53429 - (void) do_syslog(0,NULL,0);
53430 + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
53431 return 0;
53432 }
53433
53434 static ssize_t kmsg_read(struct file *file, char __user *buf,
53435 size_t count, loff_t *ppos)
53436 {
53437 - if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
53438 + if ((file->f_flags & O_NONBLOCK) &&
53439 + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53440 return -EAGAIN;
53441 - return do_syslog(2, buf, count);
53442 + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
53443 }
53444
53445 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
53446 {
53447 poll_wait(file, &log_wait, wait);
53448 - if (do_syslog(9, NULL, 0))
53449 + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53450 return POLLIN | POLLRDNORM;
53451 return 0;
53452 }
53453 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
53454 index a65239c..ad1182a 100644
53455 --- a/fs/proc/meminfo.c
53456 +++ b/fs/proc/meminfo.c
53457 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53458 unsigned long pages[NR_LRU_LISTS];
53459 int lru;
53460
53461 + pax_track_stack();
53462 +
53463 /*
53464 * display in kilobytes.
53465 */
53466 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53467 vmi.used >> 10,
53468 vmi.largest_chunk >> 10
53469 #ifdef CONFIG_MEMORY_FAILURE
53470 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
53471 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
53472 #endif
53473 );
53474
53475 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
53476 index 9fe7d7e..cdb62c9 100644
53477 --- a/fs/proc/nommu.c
53478 +++ b/fs/proc/nommu.c
53479 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
53480 if (len < 1)
53481 len = 1;
53482 seq_printf(m, "%*c", len, ' ');
53483 - seq_path(m, &file->f_path, "");
53484 + seq_path(m, &file->f_path, "\n\\");
53485 }
53486
53487 seq_putc(m, '\n');
53488 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
53489 index 04d1270..25e1173 100644
53490 --- a/fs/proc/proc_net.c
53491 +++ b/fs/proc/proc_net.c
53492 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
53493 struct task_struct *task;
53494 struct nsproxy *ns;
53495 struct net *net = NULL;
53496 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53497 + const struct cred *cred = current_cred();
53498 +#endif
53499 +
53500 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53501 + if (cred->fsuid)
53502 + return net;
53503 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53504 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
53505 + return net;
53506 +#endif
53507
53508 rcu_read_lock();
53509 task = pid_task(proc_pid(dir), PIDTYPE_PID);
53510 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
53511 index f667e8a..55f4d96 100644
53512 --- a/fs/proc/proc_sysctl.c
53513 +++ b/fs/proc/proc_sysctl.c
53514 @@ -7,11 +7,13 @@
53515 #include <linux/security.h>
53516 #include "internal.h"
53517
53518 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
53519 +
53520 static const struct dentry_operations proc_sys_dentry_operations;
53521 static const struct file_operations proc_sys_file_operations;
53522 -static const struct inode_operations proc_sys_inode_operations;
53523 +const struct inode_operations proc_sys_inode_operations;
53524 static const struct file_operations proc_sys_dir_file_operations;
53525 -static const struct inode_operations proc_sys_dir_operations;
53526 +const struct inode_operations proc_sys_dir_operations;
53527
53528 static struct inode *proc_sys_make_inode(struct super_block *sb,
53529 struct ctl_table_header *head, struct ctl_table *table)
53530 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53531 if (!p)
53532 goto out;
53533
53534 + if (gr_handle_sysctl(p, MAY_EXEC))
53535 + goto out;
53536 +
53537 err = ERR_PTR(-ENOMEM);
53538 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
53539 if (h)
53540 @@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53541
53542 err = NULL;
53543 dentry->d_op = &proc_sys_dentry_operations;
53544 +
53545 + gr_handle_proc_create(dentry, inode);
53546 +
53547 d_add(dentry, inode);
53548
53549 out:
53550 @@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
53551 return -ENOMEM;
53552 } else {
53553 child->d_op = &proc_sys_dentry_operations;
53554 +
53555 + gr_handle_proc_create(child, inode);
53556 +
53557 d_add(child, inode);
53558 }
53559 } else {
53560 @@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
53561 if (*pos < file->f_pos)
53562 continue;
53563
53564 + if (gr_handle_sysctl(table, 0))
53565 + continue;
53566 +
53567 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
53568 if (res)
53569 return res;
53570 @@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
53571 if (IS_ERR(head))
53572 return PTR_ERR(head);
53573
53574 + if (table && gr_handle_sysctl(table, MAY_EXEC))
53575 + return -ENOENT;
53576 +
53577 generic_fillattr(inode, stat);
53578 if (table)
53579 stat->mode = (stat->mode & S_IFMT) | table->mode;
53580 @@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
53581 };
53582
53583 static const struct file_operations proc_sys_dir_file_operations = {
53584 + .read = generic_read_dir,
53585 .readdir = proc_sys_readdir,
53586 .llseek = generic_file_llseek,
53587 };
53588
53589 -static const struct inode_operations proc_sys_inode_operations = {
53590 +const struct inode_operations proc_sys_inode_operations = {
53591 .permission = proc_sys_permission,
53592 .setattr = proc_sys_setattr,
53593 .getattr = proc_sys_getattr,
53594 };
53595
53596 -static const struct inode_operations proc_sys_dir_operations = {
53597 +const struct inode_operations proc_sys_dir_operations = {
53598 .lookup = proc_sys_lookup,
53599 .permission = proc_sys_permission,
53600 .setattr = proc_sys_setattr,
53601 diff --git a/fs/proc/root.c b/fs/proc/root.c
53602 index b080b79..d957e63 100644
53603 --- a/fs/proc/root.c
53604 +++ b/fs/proc/root.c
53605 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
53606 #ifdef CONFIG_PROC_DEVICETREE
53607 proc_device_tree_init();
53608 #endif
53609 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53610 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53611 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
53612 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53613 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
53614 +#endif
53615 +#else
53616 proc_mkdir("bus", NULL);
53617 +#endif
53618 proc_sys_init();
53619 }
53620
53621 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
53622 index 3b7b82a..7dbb571 100644
53623 --- a/fs/proc/task_mmu.c
53624 +++ b/fs/proc/task_mmu.c
53625 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53626 "VmStk:\t%8lu kB\n"
53627 "VmExe:\t%8lu kB\n"
53628 "VmLib:\t%8lu kB\n"
53629 - "VmPTE:\t%8lu kB\n",
53630 - hiwater_vm << (PAGE_SHIFT-10),
53631 + "VmPTE:\t%8lu kB\n"
53632 +
53633 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53634 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
53635 +#endif
53636 +
53637 + ,hiwater_vm << (PAGE_SHIFT-10),
53638 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
53639 mm->locked_vm << (PAGE_SHIFT-10),
53640 hiwater_rss << (PAGE_SHIFT-10),
53641 total_rss << (PAGE_SHIFT-10),
53642 data << (PAGE_SHIFT-10),
53643 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
53644 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
53645 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
53646 +
53647 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53648 + , mm->context.user_cs_base, mm->context.user_cs_limit
53649 +#endif
53650 +
53651 + );
53652 }
53653
53654 unsigned long task_vsize(struct mm_struct *mm)
53655 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, void *v)
53656 struct proc_maps_private *priv = m->private;
53657 struct vm_area_struct *vma = v;
53658
53659 - vma_stop(priv, vma);
53660 + if (!IS_ERR(vma))
53661 + vma_stop(priv, vma);
53662 if (priv->task)
53663 put_task_struct(priv->task);
53664 }
53665 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
53666 return ret;
53667 }
53668
53669 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53670 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53671 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
53672 + _mm->pax_flags & MF_PAX_SEGMEXEC))
53673 +#endif
53674 +
53675 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53676 {
53677 struct mm_struct *mm = vma->vm_mm;
53678 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53679 int flags = vma->vm_flags;
53680 unsigned long ino = 0;
53681 unsigned long long pgoff = 0;
53682 - unsigned long start;
53683 dev_t dev = 0;
53684 int len;
53685
53686 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53687 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
53688 }
53689
53690 - /* We don't show the stack guard page in /proc/maps */
53691 - start = vma->vm_start;
53692 - if (vma->vm_flags & VM_GROWSDOWN)
53693 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
53694 - start += PAGE_SIZE;
53695 -
53696 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
53697 - start,
53698 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53699 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
53700 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
53701 +#else
53702 + vma->vm_start,
53703 vma->vm_end,
53704 +#endif
53705 flags & VM_READ ? 'r' : '-',
53706 flags & VM_WRITE ? 'w' : '-',
53707 flags & VM_EXEC ? 'x' : '-',
53708 flags & VM_MAYSHARE ? 's' : 'p',
53709 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53710 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
53711 +#else
53712 pgoff,
53713 +#endif
53714 MAJOR(dev), MINOR(dev), ino, &len);
53715
53716 /*
53717 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53718 */
53719 if (file) {
53720 pad_len_spaces(m, len);
53721 - seq_path(m, &file->f_path, "\n");
53722 + seq_path(m, &file->f_path, "\n\\");
53723 } else {
53724 const char *name = arch_vma_name(vma);
53725 if (!name) {
53726 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53727 if (vma->vm_start <= mm->brk &&
53728 vma->vm_end >= mm->start_brk) {
53729 name = "[heap]";
53730 - } else if (vma->vm_start <= mm->start_stack &&
53731 - vma->vm_end >= mm->start_stack) {
53732 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
53733 + (vma->vm_start <= mm->start_stack &&
53734 + vma->vm_end >= mm->start_stack)) {
53735 name = "[stack]";
53736 }
53737 } else {
53738 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m, void *v)
53739 };
53740
53741 memset(&mss, 0, sizeof mss);
53742 - mss.vma = vma;
53743 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53744 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53745 +
53746 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53747 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
53748 +#endif
53749 + mss.vma = vma;
53750 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53751 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53752 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53753 + }
53754 +#endif
53755
53756 show_map_vma(m, vma);
53757
53758 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m, void *v)
53759 "Swap: %8lu kB\n"
53760 "KernelPageSize: %8lu kB\n"
53761 "MMUPageSize: %8lu kB\n",
53762 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53763 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
53764 +#else
53765 (vma->vm_end - vma->vm_start) >> 10,
53766 +#endif
53767 mss.resident >> 10,
53768 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
53769 mss.shared_clean >> 10,
53770 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
53771 index 8f5c05d..c99c76d 100644
53772 --- a/fs/proc/task_nommu.c
53773 +++ b/fs/proc/task_nommu.c
53774 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53775 else
53776 bytes += kobjsize(mm);
53777
53778 - if (current->fs && current->fs->users > 1)
53779 + if (current->fs && atomic_read(&current->fs->users) > 1)
53780 sbytes += kobjsize(current->fs);
53781 else
53782 bytes += kobjsize(current->fs);
53783 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
53784 if (len < 1)
53785 len = 1;
53786 seq_printf(m, "%*c", len, ' ');
53787 - seq_path(m, &file->f_path, "");
53788 + seq_path(m, &file->f_path, "\n\\");
53789 }
53790
53791 seq_putc(m, '\n');
53792 diff --git a/fs/readdir.c b/fs/readdir.c
53793 index 7723401..30059a6 100644
53794 --- a/fs/readdir.c
53795 +++ b/fs/readdir.c
53796 @@ -16,6 +16,7 @@
53797 #include <linux/security.h>
53798 #include <linux/syscalls.h>
53799 #include <linux/unistd.h>
53800 +#include <linux/namei.h>
53801
53802 #include <asm/uaccess.h>
53803
53804 @@ -67,6 +68,7 @@ struct old_linux_dirent {
53805
53806 struct readdir_callback {
53807 struct old_linux_dirent __user * dirent;
53808 + struct file * file;
53809 int result;
53810 };
53811
53812 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
53813 buf->result = -EOVERFLOW;
53814 return -EOVERFLOW;
53815 }
53816 +
53817 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53818 + return 0;
53819 +
53820 buf->result++;
53821 dirent = buf->dirent;
53822 if (!access_ok(VERIFY_WRITE, dirent,
53823 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
53824
53825 buf.result = 0;
53826 buf.dirent = dirent;
53827 + buf.file = file;
53828
53829 error = vfs_readdir(file, fillonedir, &buf);
53830 if (buf.result)
53831 @@ -142,6 +149,7 @@ struct linux_dirent {
53832 struct getdents_callback {
53833 struct linux_dirent __user * current_dir;
53834 struct linux_dirent __user * previous;
53835 + struct file * file;
53836 int count;
53837 int error;
53838 };
53839 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
53840 buf->error = -EOVERFLOW;
53841 return -EOVERFLOW;
53842 }
53843 +
53844 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53845 + return 0;
53846 +
53847 dirent = buf->previous;
53848 if (dirent) {
53849 if (__put_user(offset, &dirent->d_off))
53850 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
53851 buf.previous = NULL;
53852 buf.count = count;
53853 buf.error = 0;
53854 + buf.file = file;
53855
53856 error = vfs_readdir(file, filldir, &buf);
53857 if (error >= 0)
53858 @@ -228,6 +241,7 @@ out:
53859 struct getdents_callback64 {
53860 struct linux_dirent64 __user * current_dir;
53861 struct linux_dirent64 __user * previous;
53862 + struct file *file;
53863 int count;
53864 int error;
53865 };
53866 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
53867 buf->error = -EINVAL; /* only used if we fail.. */
53868 if (reclen > buf->count)
53869 return -EINVAL;
53870 +
53871 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53872 + return 0;
53873 +
53874 dirent = buf->previous;
53875 if (dirent) {
53876 if (__put_user(offset, &dirent->d_off))
53877 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53878
53879 buf.current_dir = dirent;
53880 buf.previous = NULL;
53881 + buf.file = file;
53882 buf.count = count;
53883 buf.error = 0;
53884
53885 @@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53886 error = buf.error;
53887 lastdirent = buf.previous;
53888 if (lastdirent) {
53889 - typeof(lastdirent->d_off) d_off = file->f_pos;
53890 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
53891 if (__put_user(d_off, &lastdirent->d_off))
53892 error = -EFAULT;
53893 else
53894 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
53895 index d42c30c..4fd8718 100644
53896 --- a/fs/reiserfs/dir.c
53897 +++ b/fs/reiserfs/dir.c
53898 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
53899 struct reiserfs_dir_entry de;
53900 int ret = 0;
53901
53902 + pax_track_stack();
53903 +
53904 reiserfs_write_lock(inode->i_sb);
53905
53906 reiserfs_check_lock_depth(inode->i_sb, "readdir");
53907 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
53908 index 128d3f7..8840d44 100644
53909 --- a/fs/reiserfs/do_balan.c
53910 +++ b/fs/reiserfs/do_balan.c
53911 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
53912 return;
53913 }
53914
53915 - atomic_inc(&(fs_generation(tb->tb_sb)));
53916 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
53917 do_balance_starts(tb);
53918
53919 /* balance leaf returns 0 except if combining L R and S into
53920 diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
53921 index 72cb1cc..d0e3181 100644
53922 --- a/fs/reiserfs/item_ops.c
53923 +++ b/fs/reiserfs/item_ops.c
53924 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
53925 vi->vi_index, vi->vi_type, vi->vi_ih);
53926 }
53927
53928 -static struct item_operations stat_data_ops = {
53929 +static const struct item_operations stat_data_ops = {
53930 .bytes_number = sd_bytes_number,
53931 .decrement_key = sd_decrement_key,
53932 .is_left_mergeable = sd_is_left_mergeable,
53933 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
53934 vi->vi_index, vi->vi_type, vi->vi_ih);
53935 }
53936
53937 -static struct item_operations direct_ops = {
53938 +static const struct item_operations direct_ops = {
53939 .bytes_number = direct_bytes_number,
53940 .decrement_key = direct_decrement_key,
53941 .is_left_mergeable = direct_is_left_mergeable,
53942 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
53943 vi->vi_index, vi->vi_type, vi->vi_ih);
53944 }
53945
53946 -static struct item_operations indirect_ops = {
53947 +static const struct item_operations indirect_ops = {
53948 .bytes_number = indirect_bytes_number,
53949 .decrement_key = indirect_decrement_key,
53950 .is_left_mergeable = indirect_is_left_mergeable,
53951 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
53952 printk("\n");
53953 }
53954
53955 -static struct item_operations direntry_ops = {
53956 +static const struct item_operations direntry_ops = {
53957 .bytes_number = direntry_bytes_number,
53958 .decrement_key = direntry_decrement_key,
53959 .is_left_mergeable = direntry_is_left_mergeable,
53960 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
53961 "Invalid item type observed, run fsck ASAP");
53962 }
53963
53964 -static struct item_operations errcatch_ops = {
53965 +static const struct item_operations errcatch_ops = {
53966 errcatch_bytes_number,
53967 errcatch_decrement_key,
53968 errcatch_is_left_mergeable,
53969 @@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
53970 #error Item types must use disk-format assigned values.
53971 #endif
53972
53973 -struct item_operations *item_ops[TYPE_ANY + 1] = {
53974 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
53975 &stat_data_ops,
53976 &indirect_ops,
53977 &direct_ops,
53978 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
53979 index b5fe0aa..e0e25c4 100644
53980 --- a/fs/reiserfs/journal.c
53981 +++ b/fs/reiserfs/journal.c
53982 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
53983 struct buffer_head *bh;
53984 int i, j;
53985
53986 + pax_track_stack();
53987 +
53988 bh = __getblk(dev, block, bufsize);
53989 if (buffer_uptodate(bh))
53990 return (bh);
53991 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
53992 index 2715791..b8996db 100644
53993 --- a/fs/reiserfs/namei.c
53994 +++ b/fs/reiserfs/namei.c
53995 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
53996 unsigned long savelink = 1;
53997 struct timespec ctime;
53998
53999 + pax_track_stack();
54000 +
54001 /* three balancings: (1) old name removal, (2) new name insertion
54002 and (3) maybe "save" link insertion
54003 stat data updates: (1) old directory,
54004 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
54005 index 9229e55..3d2e3b7 100644
54006 --- a/fs/reiserfs/procfs.c
54007 +++ b/fs/reiserfs/procfs.c
54008 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
54009 "SMALL_TAILS " : "NO_TAILS ",
54010 replay_only(sb) ? "REPLAY_ONLY " : "",
54011 convert_reiserfs(sb) ? "CONV " : "",
54012 - atomic_read(&r->s_generation_counter),
54013 + atomic_read_unchecked(&r->s_generation_counter),
54014 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
54015 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54016 SF(s_good_search_by_key_reada), SF(s_bmaps),
54017 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54018 struct journal_params *jp = &rs->s_v1.s_journal;
54019 char b[BDEVNAME_SIZE];
54020
54021 + pax_track_stack();
54022 +
54023 seq_printf(m, /* on-disk fields */
54024 "jp_journal_1st_block: \t%i\n"
54025 "jp_journal_dev: \t%s[%x]\n"
54026 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54027 index d036ee5..4c7dca1 100644
54028 --- a/fs/reiserfs/stree.c
54029 +++ b/fs/reiserfs/stree.c
54030 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54031 int iter = 0;
54032 #endif
54033
54034 + pax_track_stack();
54035 +
54036 BUG_ON(!th->t_trans_id);
54037
54038 init_tb_struct(th, &s_del_balance, sb, path,
54039 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54040 int retval;
54041 int quota_cut_bytes = 0;
54042
54043 + pax_track_stack();
54044 +
54045 BUG_ON(!th->t_trans_id);
54046
54047 le_key2cpu_key(&cpu_key, key);
54048 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54049 int quota_cut_bytes;
54050 loff_t tail_pos = 0;
54051
54052 + pax_track_stack();
54053 +
54054 BUG_ON(!th->t_trans_id);
54055
54056 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54057 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54058 int retval;
54059 int fs_gen;
54060
54061 + pax_track_stack();
54062 +
54063 BUG_ON(!th->t_trans_id);
54064
54065 fs_gen = get_generation(inode->i_sb);
54066 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54067 int fs_gen = 0;
54068 int quota_bytes = 0;
54069
54070 + pax_track_stack();
54071 +
54072 BUG_ON(!th->t_trans_id);
54073
54074 if (inode) { /* Do we count quotas for item? */
54075 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54076 index 7cb1285..c726cd0 100644
54077 --- a/fs/reiserfs/super.c
54078 +++ b/fs/reiserfs/super.c
54079 @@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54080 {.option_name = NULL}
54081 };
54082
54083 + pax_track_stack();
54084 +
54085 *blocks = 0;
54086 if (!options || !*options)
54087 /* use default configuration: create tails, journaling on, no
54088 diff --git a/fs/select.c b/fs/select.c
54089 index fd38ce2..f5381b8 100644
54090 --- a/fs/select.c
54091 +++ b/fs/select.c
54092 @@ -20,6 +20,7 @@
54093 #include <linux/module.h>
54094 #include <linux/slab.h>
54095 #include <linux/poll.h>
54096 +#include <linux/security.h>
54097 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54098 #include <linux/file.h>
54099 #include <linux/fdtable.h>
54100 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54101 int retval, i, timed_out = 0;
54102 unsigned long slack = 0;
54103
54104 + pax_track_stack();
54105 +
54106 rcu_read_lock();
54107 retval = max_select_fd(n, fds);
54108 rcu_read_unlock();
54109 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54110 /* Allocate small arguments on the stack to save memory and be faster */
54111 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54112
54113 + pax_track_stack();
54114 +
54115 ret = -EINVAL;
54116 if (n < 0)
54117 goto out_nofds;
54118 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54119 struct poll_list *walk = head;
54120 unsigned long todo = nfds;
54121
54122 + pax_track_stack();
54123 +
54124 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54125 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54126 return -EINVAL;
54127
54128 diff --git a/fs/seq_file.c b/fs/seq_file.c
54129 index eae7d9d..679f099 100644
54130 --- a/fs/seq_file.c
54131 +++ b/fs/seq_file.c
54132 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54133 return 0;
54134 }
54135 if (!m->buf) {
54136 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54137 + m->size = PAGE_SIZE;
54138 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54139 if (!m->buf)
54140 return -ENOMEM;
54141 }
54142 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54143 Eoverflow:
54144 m->op->stop(m, p);
54145 kfree(m->buf);
54146 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54147 + m->size <<= 1;
54148 + m->buf = kmalloc(m->size, GFP_KERNEL);
54149 return !m->buf ? -ENOMEM : -EAGAIN;
54150 }
54151
54152 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54153 m->version = file->f_version;
54154 /* grab buffer if we didn't have one */
54155 if (!m->buf) {
54156 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54157 + m->size = PAGE_SIZE;
54158 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54159 if (!m->buf)
54160 goto Enomem;
54161 }
54162 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54163 goto Fill;
54164 m->op->stop(m, p);
54165 kfree(m->buf);
54166 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54167 + m->size <<= 1;
54168 + m->buf = kmalloc(m->size, GFP_KERNEL);
54169 if (!m->buf)
54170 goto Enomem;
54171 m->count = 0;
54172 @@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
54173 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54174 void *data)
54175 {
54176 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54177 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54178 int res = -ENOMEM;
54179
54180 if (op) {
54181 diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54182 index 71c29b6..54694dd 100644
54183 --- a/fs/smbfs/proc.c
54184 +++ b/fs/smbfs/proc.c
54185 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54186
54187 out:
54188 if (server->local_nls != NULL && server->remote_nls != NULL)
54189 - server->ops->convert = convert_cp;
54190 + *(void **)&server->ops->convert = convert_cp;
54191 else
54192 - server->ops->convert = convert_memcpy;
54193 + *(void **)&server->ops->convert = convert_memcpy;
54194
54195 smb_unlock_server(server);
54196 return n;
54197 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54198
54199 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54200 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54201 - server->ops->getattr = smb_proc_getattr_core;
54202 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
54203 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54204 - server->ops->getattr = smb_proc_getattr_ff;
54205 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54206 }
54207
54208 /* Decode server capabilities */
54209 @@ -3439,7 +3439,7 @@ out:
54210 static void
54211 install_ops(struct smb_ops *dst, struct smb_ops *src)
54212 {
54213 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54214 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54215 }
54216
54217 /* < LANMAN2 */
54218 diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54219 index 00b2909..2ace383 100644
54220 --- a/fs/smbfs/symlink.c
54221 +++ b/fs/smbfs/symlink.c
54222 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54223
54224 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54225 {
54226 - char *s = nd_get_link(nd);
54227 + const char *s = nd_get_link(nd);
54228 if (!IS_ERR(s))
54229 __putname(s);
54230 }
54231 diff --git a/fs/splice.c b/fs/splice.c
54232 index bb92b7c..5aa72b0 100644
54233 --- a/fs/splice.c
54234 +++ b/fs/splice.c
54235 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54236 pipe_lock(pipe);
54237
54238 for (;;) {
54239 - if (!pipe->readers) {
54240 + if (!atomic_read(&pipe->readers)) {
54241 send_sig(SIGPIPE, current, 0);
54242 if (!ret)
54243 ret = -EPIPE;
54244 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54245 do_wakeup = 0;
54246 }
54247
54248 - pipe->waiting_writers++;
54249 + atomic_inc(&pipe->waiting_writers);
54250 pipe_wait(pipe);
54251 - pipe->waiting_writers--;
54252 + atomic_dec(&pipe->waiting_writers);
54253 }
54254
54255 pipe_unlock(pipe);
54256 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
54257 .spd_release = spd_release_page,
54258 };
54259
54260 + pax_track_stack();
54261 +
54262 index = *ppos >> PAGE_CACHE_SHIFT;
54263 loff = *ppos & ~PAGE_CACHE_MASK;
54264 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54265 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
54266 old_fs = get_fs();
54267 set_fs(get_ds());
54268 /* The cast to a user pointer is valid due to the set_fs() */
54269 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
54270 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
54271 set_fs(old_fs);
54272
54273 return res;
54274 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
54275 old_fs = get_fs();
54276 set_fs(get_ds());
54277 /* The cast to a user pointer is valid due to the set_fs() */
54278 - res = vfs_write(file, (const char __user *)buf, count, &pos);
54279 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
54280 set_fs(old_fs);
54281
54282 return res;
54283 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54284 .spd_release = spd_release_page,
54285 };
54286
54287 + pax_track_stack();
54288 +
54289 index = *ppos >> PAGE_CACHE_SHIFT;
54290 offset = *ppos & ~PAGE_CACHE_MASK;
54291 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54292 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54293 goto err;
54294
54295 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54296 - vec[i].iov_base = (void __user *) page_address(page);
54297 + vec[i].iov_base = (__force void __user *) page_address(page);
54298 vec[i].iov_len = this_len;
54299 pages[i] = page;
54300 spd.nr_pages++;
54301 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54302 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54303 {
54304 while (!pipe->nrbufs) {
54305 - if (!pipe->writers)
54306 + if (!atomic_read(&pipe->writers))
54307 return 0;
54308
54309 - if (!pipe->waiting_writers && sd->num_spliced)
54310 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54311 return 0;
54312
54313 if (sd->flags & SPLICE_F_NONBLOCK)
54314 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54315 * out of the pipe right after the splice_to_pipe(). So set
54316 * PIPE_READERS appropriately.
54317 */
54318 - pipe->readers = 1;
54319 + atomic_set(&pipe->readers, 1);
54320
54321 current->splice_pipe = pipe;
54322 }
54323 @@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54324 .spd_release = spd_release_page,
54325 };
54326
54327 + pax_track_stack();
54328 +
54329 pipe = pipe_info(file->f_path.dentry->d_inode);
54330 if (!pipe)
54331 return -EBADF;
54332 @@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54333 ret = -ERESTARTSYS;
54334 break;
54335 }
54336 - if (!pipe->writers)
54337 + if (!atomic_read(&pipe->writers))
54338 break;
54339 - if (!pipe->waiting_writers) {
54340 + if (!atomic_read(&pipe->waiting_writers)) {
54341 if (flags & SPLICE_F_NONBLOCK) {
54342 ret = -EAGAIN;
54343 break;
54344 @@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54345 pipe_lock(pipe);
54346
54347 while (pipe->nrbufs >= PIPE_BUFFERS) {
54348 - if (!pipe->readers) {
54349 + if (!atomic_read(&pipe->readers)) {
54350 send_sig(SIGPIPE, current, 0);
54351 ret = -EPIPE;
54352 break;
54353 @@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54354 ret = -ERESTARTSYS;
54355 break;
54356 }
54357 - pipe->waiting_writers++;
54358 + atomic_inc(&pipe->waiting_writers);
54359 pipe_wait(pipe);
54360 - pipe->waiting_writers--;
54361 + atomic_dec(&pipe->waiting_writers);
54362 }
54363
54364 pipe_unlock(pipe);
54365 @@ -1786,14 +1792,14 @@ retry:
54366 pipe_double_lock(ipipe, opipe);
54367
54368 do {
54369 - if (!opipe->readers) {
54370 + if (!atomic_read(&opipe->readers)) {
54371 send_sig(SIGPIPE, current, 0);
54372 if (!ret)
54373 ret = -EPIPE;
54374 break;
54375 }
54376
54377 - if (!ipipe->nrbufs && !ipipe->writers)
54378 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
54379 break;
54380
54381 /*
54382 @@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54383 pipe_double_lock(ipipe, opipe);
54384
54385 do {
54386 - if (!opipe->readers) {
54387 + if (!atomic_read(&opipe->readers)) {
54388 send_sig(SIGPIPE, current, 0);
54389 if (!ret)
54390 ret = -EPIPE;
54391 @@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54392 * return EAGAIN if we have the potential of some data in the
54393 * future, otherwise just return 0
54394 */
54395 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
54396 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
54397 ret = -EAGAIN;
54398
54399 pipe_unlock(ipipe);
54400 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
54401 index 7118a38..70af853 100644
54402 --- a/fs/sysfs/file.c
54403 +++ b/fs/sysfs/file.c
54404 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
54405
54406 struct sysfs_open_dirent {
54407 atomic_t refcnt;
54408 - atomic_t event;
54409 + atomic_unchecked_t event;
54410 wait_queue_head_t poll;
54411 struct list_head buffers; /* goes through sysfs_buffer.list */
54412 };
54413 @@ -53,7 +53,7 @@ struct sysfs_buffer {
54414 size_t count;
54415 loff_t pos;
54416 char * page;
54417 - struct sysfs_ops * ops;
54418 + const struct sysfs_ops * ops;
54419 struct mutex mutex;
54420 int needs_read_fill;
54421 int event;
54422 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54423 {
54424 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54425 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54426 - struct sysfs_ops * ops = buffer->ops;
54427 + const struct sysfs_ops * ops = buffer->ops;
54428 int ret = 0;
54429 ssize_t count;
54430
54431 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54432 if (!sysfs_get_active_two(attr_sd))
54433 return -ENODEV;
54434
54435 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
54436 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
54437 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
54438
54439 sysfs_put_active_two(attr_sd);
54440 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
54441 {
54442 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54443 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54444 - struct sysfs_ops * ops = buffer->ops;
54445 + const struct sysfs_ops * ops = buffer->ops;
54446 int rc;
54447
54448 /* need attr_sd for attr and ops, its parent for kobj */
54449 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
54450 return -ENOMEM;
54451
54452 atomic_set(&new_od->refcnt, 0);
54453 - atomic_set(&new_od->event, 1);
54454 + atomic_set_unchecked(&new_od->event, 1);
54455 init_waitqueue_head(&new_od->poll);
54456 INIT_LIST_HEAD(&new_od->buffers);
54457 goto retry;
54458 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
54459 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
54460 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54461 struct sysfs_buffer *buffer;
54462 - struct sysfs_ops *ops;
54463 + const struct sysfs_ops *ops;
54464 int error = -EACCES;
54465 char *p;
54466
54467 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
54468
54469 sysfs_put_active_two(attr_sd);
54470
54471 - if (buffer->event != atomic_read(&od->event))
54472 + if (buffer->event != atomic_read_unchecked(&od->event))
54473 goto trigger;
54474
54475 return DEFAULT_POLLMASK;
54476 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
54477
54478 od = sd->s_attr.open;
54479 if (od) {
54480 - atomic_inc(&od->event);
54481 + atomic_inc_unchecked(&od->event);
54482 wake_up_interruptible(&od->poll);
54483 }
54484
54485 diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
54486 index 4974995..c26609c 100644
54487 --- a/fs/sysfs/mount.c
54488 +++ b/fs/sysfs/mount.c
54489 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
54490 .s_name = "",
54491 .s_count = ATOMIC_INIT(1),
54492 .s_flags = SYSFS_DIR,
54493 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54494 + .s_mode = S_IFDIR | S_IRWXU,
54495 +#else
54496 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
54497 +#endif
54498 .s_ino = 1,
54499 };
54500
54501 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
54502 index c5081ad..342ea86 100644
54503 --- a/fs/sysfs/symlink.c
54504 +++ b/fs/sysfs/symlink.c
54505 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54506
54507 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
54508 {
54509 - char *page = nd_get_link(nd);
54510 + const char *page = nd_get_link(nd);
54511 if (!IS_ERR(page))
54512 free_page((unsigned long)page);
54513 }
54514 diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
54515 index 1e06853..b06d325 100644
54516 --- a/fs/udf/balloc.c
54517 +++ b/fs/udf/balloc.c
54518 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
54519
54520 mutex_lock(&sbi->s_alloc_mutex);
54521 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54522 - if (bloc->logicalBlockNum < 0 ||
54523 - (bloc->logicalBlockNum + count) >
54524 - partmap->s_partition_len) {
54525 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54526 udf_debug("%d < %d || %d + %d > %d\n",
54527 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
54528 count, partmap->s_partition_len);
54529 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
54530
54531 mutex_lock(&sbi->s_alloc_mutex);
54532 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54533 - if (bloc->logicalBlockNum < 0 ||
54534 - (bloc->logicalBlockNum + count) >
54535 - partmap->s_partition_len) {
54536 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54537 udf_debug("%d < %d || %d + %d > %d\n",
54538 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
54539 partmap->s_partition_len);
54540 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
54541 index 6d24c2c..fff470f 100644
54542 --- a/fs/udf/inode.c
54543 +++ b/fs/udf/inode.c
54544 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
54545 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
54546 int lastblock = 0;
54547
54548 + pax_track_stack();
54549 +
54550 prev_epos.offset = udf_file_entry_alloc_offset(inode);
54551 prev_epos.block = iinfo->i_location;
54552 prev_epos.bh = NULL;
54553 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
54554 index 9215700..bf1f68e 100644
54555 --- a/fs/udf/misc.c
54556 +++ b/fs/udf/misc.c
54557 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
54558
54559 u8 udf_tag_checksum(const struct tag *t)
54560 {
54561 - u8 *data = (u8 *)t;
54562 + const u8 *data = (const u8 *)t;
54563 u8 checksum = 0;
54564 int i;
54565 for (i = 0; i < sizeof(struct tag); ++i)
54566 diff --git a/fs/utimes.c b/fs/utimes.c
54567 index e4c75db..b4df0e0 100644
54568 --- a/fs/utimes.c
54569 +++ b/fs/utimes.c
54570 @@ -1,6 +1,7 @@
54571 #include <linux/compiler.h>
54572 #include <linux/file.h>
54573 #include <linux/fs.h>
54574 +#include <linux/security.h>
54575 #include <linux/linkage.h>
54576 #include <linux/mount.h>
54577 #include <linux/namei.h>
54578 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
54579 goto mnt_drop_write_and_out;
54580 }
54581 }
54582 +
54583 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
54584 + error = -EACCES;
54585 + goto mnt_drop_write_and_out;
54586 + }
54587 +
54588 mutex_lock(&inode->i_mutex);
54589 error = notify_change(path->dentry, &newattrs);
54590 mutex_unlock(&inode->i_mutex);
54591 diff --git a/fs/xattr.c b/fs/xattr.c
54592 index 6d4f6d3..cda3958 100644
54593 --- a/fs/xattr.c
54594 +++ b/fs/xattr.c
54595 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
54596 * Extended attribute SET operations
54597 */
54598 static long
54599 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
54600 +setxattr(struct path *path, const char __user *name, const void __user *value,
54601 size_t size, int flags)
54602 {
54603 int error;
54604 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
54605 return PTR_ERR(kvalue);
54606 }
54607
54608 - error = vfs_setxattr(d, kname, kvalue, size, flags);
54609 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
54610 + error = -EACCES;
54611 + goto out;
54612 + }
54613 +
54614 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
54615 +out:
54616 kfree(kvalue);
54617 return error;
54618 }
54619 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
54620 return error;
54621 error = mnt_want_write(path.mnt);
54622 if (!error) {
54623 - error = setxattr(path.dentry, name, value, size, flags);
54624 + error = setxattr(&path, name, value, size, flags);
54625 mnt_drop_write(path.mnt);
54626 }
54627 path_put(&path);
54628 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
54629 return error;
54630 error = mnt_want_write(path.mnt);
54631 if (!error) {
54632 - error = setxattr(path.dentry, name, value, size, flags);
54633 + error = setxattr(&path, name, value, size, flags);
54634 mnt_drop_write(path.mnt);
54635 }
54636 path_put(&path);
54637 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
54638 const void __user *,value, size_t, size, int, flags)
54639 {
54640 struct file *f;
54641 - struct dentry *dentry;
54642 int error = -EBADF;
54643
54644 f = fget(fd);
54645 if (!f)
54646 return error;
54647 - dentry = f->f_path.dentry;
54648 - audit_inode(NULL, dentry);
54649 + audit_inode(NULL, f->f_path.dentry);
54650 error = mnt_want_write_file(f);
54651 if (!error) {
54652 - error = setxattr(dentry, name, value, size, flags);
54653 + error = setxattr(&f->f_path, name, value, size, flags);
54654 mnt_drop_write(f->f_path.mnt);
54655 }
54656 fput(f);
54657 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
54658 index c6ad7c7..f2847a7 100644
54659 --- a/fs/xattr_acl.c
54660 +++ b/fs/xattr_acl.c
54661 @@ -17,8 +17,8 @@
54662 struct posix_acl *
54663 posix_acl_from_xattr(const void *value, size_t size)
54664 {
54665 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
54666 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
54667 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
54668 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
54669 int count;
54670 struct posix_acl *acl;
54671 struct posix_acl_entry *acl_e;
54672 diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
54673 index 942362f..88f96f5 100644
54674 --- a/fs/xfs/linux-2.6/xfs_ioctl.c
54675 +++ b/fs/xfs/linux-2.6/xfs_ioctl.c
54676 @@ -134,7 +134,7 @@ xfs_find_handle(
54677 }
54678
54679 error = -EFAULT;
54680 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
54681 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
54682 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
54683 goto out_put;
54684
54685 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
54686 if (IS_ERR(dentry))
54687 return PTR_ERR(dentry);
54688
54689 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
54690 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
54691 if (!kbuf)
54692 goto out_dput;
54693
54694 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
54695 xfs_mount_t *mp,
54696 void __user *arg)
54697 {
54698 - xfs_fsop_geom_t fsgeo;
54699 + xfs_fsop_geom_t fsgeo;
54700 int error;
54701
54702 error = xfs_fs_geometry(mp, &fsgeo, 3);
54703 diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
54704 index bad485a..479bd32 100644
54705 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c
54706 +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
54707 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
54708 xfs_fsop_geom_t fsgeo;
54709 int error;
54710
54711 + memset(&fsgeo, 0, sizeof(fsgeo));
54712 error = xfs_fs_geometry(mp, &fsgeo, 3);
54713 if (error)
54714 return -error;
54715 diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
54716 index 1f3b4b8..6102f6d 100644
54717 --- a/fs/xfs/linux-2.6/xfs_iops.c
54718 +++ b/fs/xfs/linux-2.6/xfs_iops.c
54719 @@ -468,7 +468,7 @@ xfs_vn_put_link(
54720 struct nameidata *nd,
54721 void *p)
54722 {
54723 - char *s = nd_get_link(nd);
54724 + const char *s = nd_get_link(nd);
54725
54726 if (!IS_ERR(s))
54727 kfree(s);
54728 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
54729 index 8971fb0..5fc1eb2 100644
54730 --- a/fs/xfs/xfs_bmap.c
54731 +++ b/fs/xfs/xfs_bmap.c
54732 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
54733 int nmap,
54734 int ret_nmap);
54735 #else
54736 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
54737 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
54738 #endif /* DEBUG */
54739
54740 #if defined(XFS_RW_TRACE)
54741 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
54742 index e89734e..5e84d8d 100644
54743 --- a/fs/xfs/xfs_dir2_sf.c
54744 +++ b/fs/xfs/xfs_dir2_sf.c
54745 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
54746 }
54747
54748 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
54749 - if (filldir(dirent, sfep->name, sfep->namelen,
54750 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
54751 + char name[sfep->namelen];
54752 + memcpy(name, sfep->name, sfep->namelen);
54753 + if (filldir(dirent, name, sfep->namelen,
54754 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
54755 + *offset = off & 0x7fffffff;
54756 + return 0;
54757 + }
54758 + } else if (filldir(dirent, sfep->name, sfep->namelen,
54759 off & 0x7fffffff, ino, DT_UNKNOWN)) {
54760 *offset = off & 0x7fffffff;
54761 return 0;
54762 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
54763 index 8f32f50..859e8a3 100644
54764 --- a/fs/xfs/xfs_vnodeops.c
54765 +++ b/fs/xfs/xfs_vnodeops.c
54766 @@ -564,13 +564,17 @@ xfs_readlink(
54767
54768 xfs_ilock(ip, XFS_ILOCK_SHARED);
54769
54770 - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
54771 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
54772 -
54773 pathlen = ip->i_d.di_size;
54774 if (!pathlen)
54775 goto out;
54776
54777 + if (pathlen > MAXPATHLEN) {
54778 + xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
54779 + __func__, (unsigned long long)ip->i_ino, pathlen);
54780 + ASSERT(0);
54781 + return XFS_ERROR(EFSCORRUPTED);
54782 + }
54783 +
54784 if (ip->i_df.if_flags & XFS_IFINLINE) {
54785 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
54786 link[pathlen] = '\0';
54787 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
54788 new file mode 100644
54789 index 0000000..f27a8e8
54790 --- /dev/null
54791 +++ b/grsecurity/Kconfig
54792 @@ -0,0 +1,1036 @@
54793 +#
54794 +# grecurity configuration
54795 +#
54796 +
54797 +menu "Grsecurity"
54798 +
54799 +config GRKERNSEC
54800 + bool "Grsecurity"
54801 + select CRYPTO
54802 + select CRYPTO_SHA256
54803 + help
54804 + If you say Y here, you will be able to configure many features
54805 + that will enhance the security of your system. It is highly
54806 + recommended that you say Y here and read through the help
54807 + for each option so that you fully understand the features and
54808 + can evaluate their usefulness for your machine.
54809 +
54810 +choice
54811 + prompt "Security Level"
54812 + depends on GRKERNSEC
54813 + default GRKERNSEC_CUSTOM
54814 +
54815 +config GRKERNSEC_LOW
54816 + bool "Low"
54817 + select GRKERNSEC_LINK
54818 + select GRKERNSEC_FIFO
54819 + select GRKERNSEC_RANDNET
54820 + select GRKERNSEC_DMESG
54821 + select GRKERNSEC_CHROOT
54822 + select GRKERNSEC_CHROOT_CHDIR
54823 +
54824 + help
54825 + If you choose this option, several of the grsecurity options will
54826 + be enabled that will give you greater protection against a number
54827 + of attacks, while assuring that none of your software will have any
54828 + conflicts with the additional security measures. If you run a lot
54829 + of unusual software, or you are having problems with the higher
54830 + security levels, you should say Y here. With this option, the
54831 + following features are enabled:
54832 +
54833 + - Linking restrictions
54834 + - FIFO restrictions
54835 + - Restricted dmesg
54836 + - Enforced chdir("/") on chroot
54837 + - Runtime module disabling
54838 +
54839 +config GRKERNSEC_MEDIUM
54840 + bool "Medium"
54841 + select PAX
54842 + select PAX_EI_PAX
54843 + select PAX_PT_PAX_FLAGS
54844 + select PAX_HAVE_ACL_FLAGS
54845 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54846 + select GRKERNSEC_CHROOT
54847 + select GRKERNSEC_CHROOT_SYSCTL
54848 + select GRKERNSEC_LINK
54849 + select GRKERNSEC_FIFO
54850 + select GRKERNSEC_DMESG
54851 + select GRKERNSEC_RANDNET
54852 + select GRKERNSEC_FORKFAIL
54853 + select GRKERNSEC_TIME
54854 + select GRKERNSEC_SIGNAL
54855 + select GRKERNSEC_CHROOT
54856 + select GRKERNSEC_CHROOT_UNIX
54857 + select GRKERNSEC_CHROOT_MOUNT
54858 + select GRKERNSEC_CHROOT_PIVOT
54859 + select GRKERNSEC_CHROOT_DOUBLE
54860 + select GRKERNSEC_CHROOT_CHDIR
54861 + select GRKERNSEC_CHROOT_MKNOD
54862 + select GRKERNSEC_PROC
54863 + select GRKERNSEC_PROC_USERGROUP
54864 + select PAX_RANDUSTACK
54865 + select PAX_ASLR
54866 + select PAX_RANDMMAP
54867 + select PAX_REFCOUNT if (X86 || SPARC64)
54868 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54869 +
54870 + help
54871 + If you say Y here, several features in addition to those included
54872 + in the low additional security level will be enabled. These
54873 + features provide even more security to your system, though in rare
54874 + cases they may be incompatible with very old or poorly written
54875 + software. If you enable this option, make sure that your auth
54876 + service (identd) is running as gid 1001. With this option,
54877 + the following features (in addition to those provided in the
54878 + low additional security level) will be enabled:
54879 +
54880 + - Failed fork logging
54881 + - Time change logging
54882 + - Signal logging
54883 + - Deny mounts in chroot
54884 + - Deny double chrooting
54885 + - Deny sysctl writes in chroot
54886 + - Deny mknod in chroot
54887 + - Deny access to abstract AF_UNIX sockets out of chroot
54888 + - Deny pivot_root in chroot
54889 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
54890 + - /proc restrictions with special GID set to 10 (usually wheel)
54891 + - Address Space Layout Randomization (ASLR)
54892 + - Prevent exploitation of most refcount overflows
54893 + - Bounds checking of copying between the kernel and userland
54894 +
54895 +config GRKERNSEC_HIGH
54896 + bool "High"
54897 + select GRKERNSEC_LINK
54898 + select GRKERNSEC_FIFO
54899 + select GRKERNSEC_DMESG
54900 + select GRKERNSEC_FORKFAIL
54901 + select GRKERNSEC_TIME
54902 + select GRKERNSEC_SIGNAL
54903 + select GRKERNSEC_CHROOT
54904 + select GRKERNSEC_CHROOT_SHMAT
54905 + select GRKERNSEC_CHROOT_UNIX
54906 + select GRKERNSEC_CHROOT_MOUNT
54907 + select GRKERNSEC_CHROOT_FCHDIR
54908 + select GRKERNSEC_CHROOT_PIVOT
54909 + select GRKERNSEC_CHROOT_DOUBLE
54910 + select GRKERNSEC_CHROOT_CHDIR
54911 + select GRKERNSEC_CHROOT_MKNOD
54912 + select GRKERNSEC_CHROOT_CAPS
54913 + select GRKERNSEC_CHROOT_SYSCTL
54914 + select GRKERNSEC_CHROOT_FINDTASK
54915 + select GRKERNSEC_SYSFS_RESTRICT
54916 + select GRKERNSEC_PROC
54917 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54918 + select GRKERNSEC_HIDESYM
54919 + select GRKERNSEC_BRUTE
54920 + select GRKERNSEC_PROC_USERGROUP
54921 + select GRKERNSEC_KMEM
54922 + select GRKERNSEC_RESLOG
54923 + select GRKERNSEC_RANDNET
54924 + select GRKERNSEC_PROC_ADD
54925 + select GRKERNSEC_CHROOT_CHMOD
54926 + select GRKERNSEC_CHROOT_NICE
54927 + select GRKERNSEC_AUDIT_MOUNT
54928 + select GRKERNSEC_MODHARDEN if (MODULES)
54929 + select GRKERNSEC_HARDEN_PTRACE
54930 + select GRKERNSEC_VM86 if (X86_32)
54931 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
54932 + select PAX
54933 + select PAX_RANDUSTACK
54934 + select PAX_ASLR
54935 + select PAX_RANDMMAP
54936 + select PAX_NOEXEC
54937 + select PAX_MPROTECT
54938 + select PAX_EI_PAX
54939 + select PAX_PT_PAX_FLAGS
54940 + select PAX_HAVE_ACL_FLAGS
54941 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
54942 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
54943 + select PAX_RANDKSTACK if (X86_TSC && X86)
54944 + select PAX_SEGMEXEC if (X86_32)
54945 + select PAX_PAGEEXEC
54946 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
54947 + select PAX_EMUTRAMP if (PARISC)
54948 + select PAX_EMUSIGRT if (PARISC)
54949 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
54950 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
54951 + select PAX_REFCOUNT if (X86 || SPARC64)
54952 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54953 + help
54954 + If you say Y here, many of the features of grsecurity will be
54955 + enabled, which will protect you against many kinds of attacks
54956 + against your system. The heightened security comes at a cost
54957 + of an increased chance of incompatibilities with rare software
54958 + on your machine. Since this security level enables PaX, you should
54959 + view <http://pax.grsecurity.net> and read about the PaX
54960 + project. While you are there, download chpax and run it on
54961 + binaries that cause problems with PaX. Also remember that
54962 + since the /proc restrictions are enabled, you must run your
54963 + identd as gid 1001. This security level enables the following
54964 + features in addition to those listed in the low and medium
54965 + security levels:
54966 +
54967 + - Additional /proc restrictions
54968 + - Chmod restrictions in chroot
54969 + - No signals, ptrace, or viewing of processes outside of chroot
54970 + - Capability restrictions in chroot
54971 + - Deny fchdir out of chroot
54972 + - Priority restrictions in chroot
54973 + - Segmentation-based implementation of PaX
54974 + - Mprotect restrictions
54975 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
54976 + - Kernel stack randomization
54977 + - Mount/unmount/remount logging
54978 + - Kernel symbol hiding
54979 + - Hardening of module auto-loading
54980 + - Ptrace restrictions
54981 + - Restricted vm86 mode
54982 + - Restricted sysfs/debugfs
54983 + - Active kernel exploit response
54984 +
54985 +config GRKERNSEC_CUSTOM
54986 + bool "Custom"
54987 + help
54988 + If you say Y here, you will be able to configure every grsecurity
54989 + option, which allows you to enable many more features that aren't
54990 + covered in the basic security levels. These additional features
54991 + include TPE, socket restrictions, and the sysctl system for
54992 + grsecurity. It is advised that you read through the help for
54993 + each option to determine its usefulness in your situation.
54994 +
54995 +endchoice
54996 +
54997 +menu "Address Space Protection"
54998 +depends on GRKERNSEC
54999 +
55000 +config GRKERNSEC_KMEM
55001 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55002 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55003 + help
55004 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55005 + be written to or read from to modify or leak the contents of the running
55006 + kernel. /dev/port will also not be allowed to be opened. If you have module
55007 + support disabled, enabling this will close up four ways that are
55008 + currently used to insert malicious code into the running kernel.
55009 + Even with all these features enabled, we still highly recommend that
55010 + you use the RBAC system, as it is still possible for an attacker to
55011 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55012 + If you are not using XFree86, you may be able to stop this additional
55013 + case by enabling the 'Disable privileged I/O' option. Though nothing
55014 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55015 + but only to video memory, which is the only writing we allow in this
55016 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55017 + not be allowed to mprotect it with PROT_WRITE later.
55018 + It is highly recommended that you say Y here if you meet all the
55019 + conditions above.
55020 +
55021 +config GRKERNSEC_VM86
55022 + bool "Restrict VM86 mode"
55023 + depends on X86_32
55024 +
55025 + help
55026 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55027 + make use of a special execution mode on 32bit x86 processors called
55028 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55029 + video cards and will still work with this option enabled. The purpose
55030 + of the option is to prevent exploitation of emulation errors in
55031 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55032 + Nearly all users should be able to enable this option.
55033 +
55034 +config GRKERNSEC_IO
55035 + bool "Disable privileged I/O"
55036 + depends on X86
55037 + select RTC_CLASS
55038 + select RTC_INTF_DEV
55039 + select RTC_DRV_CMOS
55040 +
55041 + help
55042 + If you say Y here, all ioperm and iopl calls will return an error.
55043 + Ioperm and iopl can be used to modify the running kernel.
55044 + Unfortunately, some programs need this access to operate properly,
55045 + the most notable of which are XFree86 and hwclock. hwclock can be
55046 + remedied by having RTC support in the kernel, so real-time
55047 + clock support is enabled if this option is enabled, to ensure
55048 + that hwclock operates correctly. XFree86 still will not
55049 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55050 + IF YOU USE XFree86. If you use XFree86 and you still want to
55051 + protect your kernel against modification, use the RBAC system.
55052 +
55053 +config GRKERNSEC_PROC_MEMMAP
55054 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55055 + default y if (PAX_NOEXEC || PAX_ASLR)
55056 + depends on PAX_NOEXEC || PAX_ASLR
55057 + help
55058 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55059 + give no information about the addresses of its mappings if
55060 + PaX features that rely on random addresses are enabled on the task.
55061 + If you use PaX it is greatly recommended that you say Y here as it
55062 + closes up a hole that makes the full ASLR useless for suid
55063 + binaries.
55064 +
55065 +config GRKERNSEC_BRUTE
55066 + bool "Deter exploit bruteforcing"
55067 + help
55068 + If you say Y here, attempts to bruteforce exploits against forking
55069 + daemons such as apache or sshd, as well as against suid/sgid binaries
55070 + will be deterred. When a child of a forking daemon is killed by PaX
55071 + or crashes due to an illegal instruction or other suspicious signal,
55072 + the parent process will be delayed 30 seconds upon every subsequent
55073 + fork until the administrator is able to assess the situation and
55074 + restart the daemon.
55075 + In the suid/sgid case, the attempt is logged, the user has all their
55076 + processes terminated, and they are prevented from executing any further
55077 + processes for 15 minutes.
55078 + It is recommended that you also enable signal logging in the auditing
55079 + section so that logs are generated when a process triggers a suspicious
55080 + signal.
55081 + If the sysctl option is enabled, a sysctl option with name
55082 + "deter_bruteforce" is created.
55083 +
55084 +config GRKERNSEC_MODHARDEN
55085 + bool "Harden module auto-loading"
55086 + depends on MODULES
55087 + help
55088 + If you say Y here, module auto-loading in response to use of some
55089 + feature implemented by an unloaded module will be restricted to
55090 + root users. Enabling this option helps defend against attacks
55091 + by unprivileged users who abuse the auto-loading behavior to
55092 + cause a vulnerable module to load that is then exploited.
55093 +
55094 + If this option prevents a legitimate use of auto-loading for a
55095 + non-root user, the administrator can execute modprobe manually
55096 + with the exact name of the module mentioned in the alert log.
55097 + Alternatively, the administrator can add the module to the list
55098 + of modules loaded at boot by modifying init scripts.
55099 +
55100 + Modification of init scripts will most likely be needed on
55101 + Ubuntu servers with encrypted home directory support enabled,
55102 + as the first non-root user logging in will cause the ecb(aes),
55103 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55104 +
55105 +config GRKERNSEC_HIDESYM
55106 + bool "Hide kernel symbols"
55107 + help
55108 + If you say Y here, getting information on loaded modules, and
55109 + displaying all kernel symbols through a syscall will be restricted
55110 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55111 + /proc/kallsyms will be restricted to the root user. The RBAC
55112 + system can hide that entry even from root.
55113 +
55114 + This option also prevents leaking of kernel addresses through
55115 + several /proc entries.
55116 +
55117 + Note that this option is only effective provided the following
55118 + conditions are met:
55119 + 1) The kernel using grsecurity is not precompiled by some distribution
55120 + 2) You have also enabled GRKERNSEC_DMESG
55121 + 3) You are using the RBAC system and hiding other files such as your
55122 + kernel image and System.map. Alternatively, enabling this option
55123 + causes the permissions on /boot, /lib/modules, and the kernel
55124 + source directory to change at compile time to prevent
55125 + reading by non-root users.
55126 + If the above conditions are met, this option will aid in providing a
55127 + useful protection against local kernel exploitation of overflows
55128 + and arbitrary read/write vulnerabilities.
55129 +
55130 +config GRKERNSEC_KERN_LOCKOUT
55131 + bool "Active kernel exploit response"
55132 + depends on X86 || ARM || PPC || SPARC
55133 + help
55134 + If you say Y here, when a PaX alert is triggered due to suspicious
55135 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55136 + or an OOPs occurs due to bad memory accesses, instead of just
55137 + terminating the offending process (and potentially allowing
55138 + a subsequent exploit from the same user), we will take one of two
55139 + actions:
55140 + If the user was root, we will panic the system
55141 + If the user was non-root, we will log the attempt, terminate
55142 + all processes owned by the user, then prevent them from creating
55143 + any new processes until the system is restarted
55144 + This deters repeated kernel exploitation/bruteforcing attempts
55145 + and is useful for later forensics.
55146 +
55147 +endmenu
55148 +menu "Role Based Access Control Options"
55149 +depends on GRKERNSEC
55150 +
55151 +config GRKERNSEC_RBAC_DEBUG
55152 + bool
55153 +
55154 +config GRKERNSEC_NO_RBAC
55155 + bool "Disable RBAC system"
55156 + help
55157 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55158 + preventing the RBAC system from being enabled. You should only say Y
55159 + here if you have no intention of using the RBAC system, so as to prevent
55160 + an attacker with root access from misusing the RBAC system to hide files
55161 + and processes when loadable module support and /dev/[k]mem have been
55162 + locked down.
55163 +
55164 +config GRKERNSEC_ACL_HIDEKERN
55165 + bool "Hide kernel processes"
55166 + help
55167 + If you say Y here, all kernel threads will be hidden to all
55168 + processes but those whose subject has the "view hidden processes"
55169 + flag.
55170 +
55171 +config GRKERNSEC_ACL_MAXTRIES
55172 + int "Maximum tries before password lockout"
55173 + default 3
55174 + help
55175 + This option enforces the maximum number of times a user can attempt
55176 + to authorize themselves with the grsecurity RBAC system before being
55177 + denied the ability to attempt authorization again for a specified time.
55178 + The lower the number, the harder it will be to brute-force a password.
55179 +
55180 +config GRKERNSEC_ACL_TIMEOUT
55181 + int "Time to wait after max password tries, in seconds"
55182 + default 30
55183 + help
55184 + This option specifies the time the user must wait after attempting to
55185 + authorize to the RBAC system with the maximum number of invalid
55186 + passwords. The higher the number, the harder it will be to brute-force
55187 + a password.
55188 +
55189 +endmenu
55190 +menu "Filesystem Protections"
55191 +depends on GRKERNSEC
55192 +
55193 +config GRKERNSEC_PROC
55194 + bool "Proc restrictions"
55195 + help
55196 + If you say Y here, the permissions of the /proc filesystem
55197 + will be altered to enhance system security and privacy. You MUST
55198 + choose either a user only restriction or a user and group restriction.
55199 + Depending upon the option you choose, you can either restrict users to
55200 + see only the processes they themselves run, or choose a group that can
55201 + view all processes and files normally restricted to root if you choose
55202 + the "restrict to user only" option. NOTE: If you're running identd as
55203 + a non-root user, you will have to run it as the group you specify here.
55204 +
55205 +config GRKERNSEC_PROC_USER
55206 + bool "Restrict /proc to user only"
55207 + depends on GRKERNSEC_PROC
55208 + help
55209 + If you say Y here, non-root users will only be able to view their own
55210 + processes, and restricts them from viewing network-related information,
55211 + and viewing kernel symbol and module information.
55212 +
55213 +config GRKERNSEC_PROC_USERGROUP
55214 + bool "Allow special group"
55215 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55216 + help
55217 + If you say Y here, you will be able to select a group that will be
55218 + able to view all processes and network-related information. If you've
55219 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55220 + remain hidden. This option is useful if you want to run identd as
55221 + a non-root user.
55222 +
55223 +config GRKERNSEC_PROC_GID
55224 + int "GID for special group"
55225 + depends on GRKERNSEC_PROC_USERGROUP
55226 + default 1001
55227 +
55228 +config GRKERNSEC_PROC_ADD
55229 + bool "Additional restrictions"
55230 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55231 + help
55232 + If you say Y here, additional restrictions will be placed on
55233 + /proc that keep normal users from viewing device information and
55234 + slabinfo information that could be useful for exploits.
55235 +
55236 +config GRKERNSEC_LINK
55237 + bool "Linking restrictions"
55238 + help
55239 + If you say Y here, /tmp race exploits will be prevented, since users
55240 + will no longer be able to follow symlinks owned by other users in
55241 + world-writable +t directories (e.g. /tmp), unless the owner of the
55242 + symlink is the owner of the directory. users will also not be
55243 + able to hardlink to files they do not own. If the sysctl option is
55244 + enabled, a sysctl option with name "linking_restrictions" is created.
55245 +
55246 +config GRKERNSEC_FIFO
55247 + bool "FIFO restrictions"
55248 + help
55249 + If you say Y here, users will not be able to write to FIFOs they don't
55250 + own in world-writable +t directories (e.g. /tmp), unless the owner of
55251 + the FIFO is the same owner of the directory it's held in. If the sysctl
55252 + option is enabled, a sysctl option with name "fifo_restrictions" is
55253 + created.
55254 +
55255 +config GRKERNSEC_SYSFS_RESTRICT
55256 + bool "Sysfs/debugfs restriction"
55257 + depends on SYSFS
55258 + help
55259 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55260 + any filesystem normally mounted under it (e.g. debugfs) will only
55261 + be accessible by root. These filesystems generally provide access
55262 + to hardware and debug information that isn't appropriate for unprivileged
55263 + users of the system. Sysfs and debugfs have also become a large source
55264 + of new vulnerabilities, ranging from infoleaks to local compromise.
55265 + There has been very little oversight with an eye toward security involved
55266 + in adding new exporters of information to these filesystems, so their
55267 + use is discouraged.
55268 + This option is equivalent to a chmod 0700 of the mount paths.
55269 +
55270 +config GRKERNSEC_ROFS
55271 + bool "Runtime read-only mount protection"
55272 + help
55273 + If you say Y here, a sysctl option with name "romount_protect" will
55274 + be created. By setting this option to 1 at runtime, filesystems
55275 + will be protected in the following ways:
55276 + * No new writable mounts will be allowed
55277 + * Existing read-only mounts won't be able to be remounted read/write
55278 + * Write operations will be denied on all block devices
55279 + This option acts independently of grsec_lock: once it is set to 1,
55280 + it cannot be turned off. Therefore, please be mindful of the resulting
55281 + behavior if this option is enabled in an init script on a read-only
55282 + filesystem. This feature is mainly intended for secure embedded systems.
55283 +
55284 +config GRKERNSEC_CHROOT
55285 + bool "Chroot jail restrictions"
55286 + help
55287 + If you say Y here, you will be able to choose several options that will
55288 + make breaking out of a chrooted jail much more difficult. If you
55289 + encounter no software incompatibilities with the following options, it
55290 + is recommended that you enable each one.
55291 +
55292 +config GRKERNSEC_CHROOT_MOUNT
55293 + bool "Deny mounts"
55294 + depends on GRKERNSEC_CHROOT
55295 + help
55296 + If you say Y here, processes inside a chroot will not be able to
55297 + mount or remount filesystems. If the sysctl option is enabled, a
55298 + sysctl option with name "chroot_deny_mount" is created.
55299 +
55300 +config GRKERNSEC_CHROOT_DOUBLE
55301 + bool "Deny double-chroots"
55302 + depends on GRKERNSEC_CHROOT
55303 + help
55304 + If you say Y here, processes inside a chroot will not be able to chroot
55305 + again outside the chroot. This is a widely used method of breaking
55306 + out of a chroot jail and should not be allowed. If the sysctl
55307 + option is enabled, a sysctl option with name
55308 + "chroot_deny_chroot" is created.
55309 +
55310 +config GRKERNSEC_CHROOT_PIVOT
55311 + bool "Deny pivot_root in chroot"
55312 + depends on GRKERNSEC_CHROOT
55313 + help
55314 + If you say Y here, processes inside a chroot will not be able to use
55315 + a function called pivot_root() that was introduced in Linux 2.3.41. It
55316 + works similar to chroot in that it changes the root filesystem. This
55317 + function could be misused in a chrooted process to attempt to break out
55318 + of the chroot, and therefore should not be allowed. If the sysctl
55319 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
55320 + created.
55321 +
55322 +config GRKERNSEC_CHROOT_CHDIR
55323 + bool "Enforce chdir(\"/\") on all chroots"
55324 + depends on GRKERNSEC_CHROOT
55325 + help
55326 + If you say Y here, the current working directory of all newly-chrooted
55327 + applications will be set to the the root directory of the chroot.
55328 + The man page on chroot(2) states:
55329 + Note that this call does not change the current working
55330 + directory, so that `.' can be outside the tree rooted at
55331 + `/'. In particular, the super-user can escape from a
55332 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55333 +
55334 + It is recommended that you say Y here, since it's not known to break
55335 + any software. If the sysctl option is enabled, a sysctl option with
55336 + name "chroot_enforce_chdir" is created.
55337 +
55338 +config GRKERNSEC_CHROOT_CHMOD
55339 + bool "Deny (f)chmod +s"
55340 + depends on GRKERNSEC_CHROOT
55341 + help
55342 + If you say Y here, processes inside a chroot will not be able to chmod
55343 + or fchmod files to make them have suid or sgid bits. This protects
55344 + against another published method of breaking a chroot. If the sysctl
55345 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
55346 + created.
55347 +
55348 +config GRKERNSEC_CHROOT_FCHDIR
55349 + bool "Deny fchdir out of chroot"
55350 + depends on GRKERNSEC_CHROOT
55351 + help
55352 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
55353 + to a file descriptor of the chrooting process that points to a directory
55354 + outside the filesystem will be stopped. If the sysctl option
55355 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55356 +
55357 +config GRKERNSEC_CHROOT_MKNOD
55358 + bool "Deny mknod"
55359 + depends on GRKERNSEC_CHROOT
55360 + help
55361 + If you say Y here, processes inside a chroot will not be allowed to
55362 + mknod. The problem with using mknod inside a chroot is that it
55363 + would allow an attacker to create a device entry that is the same
55364 + as one on the physical root of your system, which could range from
55365 + anything from the console device to a device for your harddrive (which
55366 + they could then use to wipe the drive or steal data). It is recommended
55367 + that you say Y here, unless you run into software incompatibilities.
55368 + If the sysctl option is enabled, a sysctl option with name
55369 + "chroot_deny_mknod" is created.
55370 +
55371 +config GRKERNSEC_CHROOT_SHMAT
55372 + bool "Deny shmat() out of chroot"
55373 + depends on GRKERNSEC_CHROOT
55374 + help
55375 + If you say Y here, processes inside a chroot will not be able to attach
55376 + to shared memory segments that were created outside of the chroot jail.
55377 + It is recommended that you say Y here. If the sysctl option is enabled,
55378 + a sysctl option with name "chroot_deny_shmat" is created.
55379 +
55380 +config GRKERNSEC_CHROOT_UNIX
55381 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
55382 + depends on GRKERNSEC_CHROOT
55383 + help
55384 + If you say Y here, processes inside a chroot will not be able to
55385 + connect to abstract (meaning not belonging to a filesystem) Unix
55386 + domain sockets that were bound outside of a chroot. It is recommended
55387 + that you say Y here. If the sysctl option is enabled, a sysctl option
55388 + with name "chroot_deny_unix" is created.
55389 +
55390 +config GRKERNSEC_CHROOT_FINDTASK
55391 + bool "Protect outside processes"
55392 + depends on GRKERNSEC_CHROOT
55393 + help
55394 + If you say Y here, processes inside a chroot will not be able to
55395 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55396 + getsid, or view any process outside of the chroot. If the sysctl
55397 + option is enabled, a sysctl option with name "chroot_findtask" is
55398 + created.
55399 +
55400 +config GRKERNSEC_CHROOT_NICE
55401 + bool "Restrict priority changes"
55402 + depends on GRKERNSEC_CHROOT
55403 + help
55404 + If you say Y here, processes inside a chroot will not be able to raise
55405 + the priority of processes in the chroot, or alter the priority of
55406 + processes outside the chroot. This provides more security than simply
55407 + removing CAP_SYS_NICE from the process' capability set. If the
55408 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55409 + is created.
55410 +
55411 +config GRKERNSEC_CHROOT_SYSCTL
55412 + bool "Deny sysctl writes"
55413 + depends on GRKERNSEC_CHROOT
55414 + help
55415 + If you say Y here, an attacker in a chroot will not be able to
55416 + write to sysctl entries, either by sysctl(2) or through a /proc
55417 + interface. It is strongly recommended that you say Y here. If the
55418 + sysctl option is enabled, a sysctl option with name
55419 + "chroot_deny_sysctl" is created.
55420 +
55421 +config GRKERNSEC_CHROOT_CAPS
55422 + bool "Capability restrictions"
55423 + depends on GRKERNSEC_CHROOT
55424 + help
55425 + If you say Y here, the capabilities on all processes within a
55426 + chroot jail will be lowered to stop module insertion, raw i/o,
55427 + system and net admin tasks, rebooting the system, modifying immutable
55428 + files, modifying IPC owned by another, and changing the system time.
55429 + This is left an option because it can break some apps. Disable this
55430 + if your chrooted apps are having problems performing those kinds of
55431 + tasks. If the sysctl option is enabled, a sysctl option with
55432 + name "chroot_caps" is created.
55433 +
55434 +endmenu
55435 +menu "Kernel Auditing"
55436 +depends on GRKERNSEC
55437 +
55438 +config GRKERNSEC_AUDIT_GROUP
55439 + bool "Single group for auditing"
55440 + help
55441 + If you say Y here, the exec, chdir, and (un)mount logging features
55442 + will only operate on a group you specify. This option is recommended
55443 + if you only want to watch certain users instead of having a large
55444 + amount of logs from the entire system. If the sysctl option is enabled,
55445 + a sysctl option with name "audit_group" is created.
55446 +
55447 +config GRKERNSEC_AUDIT_GID
55448 + int "GID for auditing"
55449 + depends on GRKERNSEC_AUDIT_GROUP
55450 + default 1007
55451 +
55452 +config GRKERNSEC_EXECLOG
55453 + bool "Exec logging"
55454 + help
55455 + If you say Y here, all execve() calls will be logged (since the
55456 + other exec*() calls are frontends to execve(), all execution
55457 + will be logged). Useful for shell-servers that like to keep track
55458 + of their users. If the sysctl option is enabled, a sysctl option with
55459 + name "exec_logging" is created.
55460 + WARNING: This option when enabled will produce a LOT of logs, especially
55461 + on an active system.
55462 +
55463 +config GRKERNSEC_RESLOG
55464 + bool "Resource logging"
55465 + help
55466 + If you say Y here, all attempts to overstep resource limits will
55467 + be logged with the resource name, the requested size, and the current
55468 + limit. It is highly recommended that you say Y here. If the sysctl
55469 + option is enabled, a sysctl option with name "resource_logging" is
55470 + created. If the RBAC system is enabled, the sysctl value is ignored.
55471 +
55472 +config GRKERNSEC_CHROOT_EXECLOG
55473 + bool "Log execs within chroot"
55474 + help
55475 + If you say Y here, all executions inside a chroot jail will be logged
55476 + to syslog. This can cause a large amount of logs if certain
55477 + applications (eg. djb's daemontools) are installed on the system, and
55478 + is therefore left as an option. If the sysctl option is enabled, a
55479 + sysctl option with name "chroot_execlog" is created.
55480 +
55481 +config GRKERNSEC_AUDIT_PTRACE
55482 + bool "Ptrace logging"
55483 + help
55484 + If you say Y here, all attempts to attach to a process via ptrace
55485 + will be logged. If the sysctl option is enabled, a sysctl option
55486 + with name "audit_ptrace" is created.
55487 +
55488 +config GRKERNSEC_AUDIT_CHDIR
55489 + bool "Chdir logging"
55490 + help
55491 + If you say Y here, all chdir() calls will be logged. If the sysctl
55492 + option is enabled, a sysctl option with name "audit_chdir" is created.
55493 +
55494 +config GRKERNSEC_AUDIT_MOUNT
55495 + bool "(Un)Mount logging"
55496 + help
55497 + If you say Y here, all mounts and unmounts will be logged. If the
55498 + sysctl option is enabled, a sysctl option with name "audit_mount" is
55499 + created.
55500 +
55501 +config GRKERNSEC_SIGNAL
55502 + bool "Signal logging"
55503 + help
55504 + If you say Y here, certain important signals will be logged, such as
55505 + SIGSEGV, which will as a result inform you of when a error in a program
55506 + occurred, which in some cases could mean a possible exploit attempt.
55507 + If the sysctl option is enabled, a sysctl option with name
55508 + "signal_logging" is created.
55509 +
55510 +config GRKERNSEC_FORKFAIL
55511 + bool "Fork failure logging"
55512 + help
55513 + If you say Y here, all failed fork() attempts will be logged.
55514 + This could suggest a fork bomb, or someone attempting to overstep
55515 + their process limit. If the sysctl option is enabled, a sysctl option
55516 + with name "forkfail_logging" is created.
55517 +
55518 +config GRKERNSEC_TIME
55519 + bool "Time change logging"
55520 + help
55521 + If you say Y here, any changes of the system clock will be logged.
55522 + If the sysctl option is enabled, a sysctl option with name
55523 + "timechange_logging" is created.
55524 +
55525 +config GRKERNSEC_PROC_IPADDR
55526 + bool "/proc/<pid>/ipaddr support"
55527 + help
55528 + If you say Y here, a new entry will be added to each /proc/<pid>
55529 + directory that contains the IP address of the person using the task.
55530 + The IP is carried across local TCP and AF_UNIX stream sockets.
55531 + This information can be useful for IDS/IPSes to perform remote response
55532 + to a local attack. The entry is readable by only the owner of the
55533 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
55534 + the RBAC system), and thus does not create privacy concerns.
55535 +
55536 +config GRKERNSEC_RWXMAP_LOG
55537 + bool 'Denied RWX mmap/mprotect logging'
55538 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
55539 + help
55540 + If you say Y here, calls to mmap() and mprotect() with explicit
55541 + usage of PROT_WRITE and PROT_EXEC together will be logged when
55542 + denied by the PAX_MPROTECT feature. If the sysctl option is
55543 + enabled, a sysctl option with name "rwxmap_logging" is created.
55544 +
55545 +config GRKERNSEC_AUDIT_TEXTREL
55546 + bool 'ELF text relocations logging (READ HELP)'
55547 + depends on PAX_MPROTECT
55548 + help
55549 + If you say Y here, text relocations will be logged with the filename
55550 + of the offending library or binary. The purpose of the feature is
55551 + to help Linux distribution developers get rid of libraries and
55552 + binaries that need text relocations which hinder the future progress
55553 + of PaX. Only Linux distribution developers should say Y here, and
55554 + never on a production machine, as this option creates an information
55555 + leak that could aid an attacker in defeating the randomization of
55556 + a single memory region. If the sysctl option is enabled, a sysctl
55557 + option with name "audit_textrel" is created.
55558 +
55559 +endmenu
55560 +
55561 +menu "Executable Protections"
55562 +depends on GRKERNSEC
55563 +
55564 +config GRKERNSEC_DMESG
55565 + bool "Dmesg(8) restriction"
55566 + help
55567 + If you say Y here, non-root users will not be able to use dmesg(8)
55568 + to view up to the last 4kb of messages in the kernel's log buffer.
55569 + The kernel's log buffer often contains kernel addresses and other
55570 + identifying information useful to an attacker in fingerprinting a
55571 + system for a targeted exploit.
55572 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
55573 + created.
55574 +
55575 +config GRKERNSEC_HARDEN_PTRACE
55576 + bool "Deter ptrace-based process snooping"
55577 + help
55578 + If you say Y here, TTY sniffers and other malicious monitoring
55579 + programs implemented through ptrace will be defeated. If you
55580 + have been using the RBAC system, this option has already been
55581 + enabled for several years for all users, with the ability to make
55582 + fine-grained exceptions.
55583 +
55584 + This option only affects the ability of non-root users to ptrace
55585 + processes that are not a descendent of the ptracing process.
55586 + This means that strace ./binary and gdb ./binary will still work,
55587 + but attaching to arbitrary processes will not. If the sysctl
55588 + option is enabled, a sysctl option with name "harden_ptrace" is
55589 + created.
55590 +
55591 +config GRKERNSEC_TPE
55592 + bool "Trusted Path Execution (TPE)"
55593 + help
55594 + If you say Y here, you will be able to choose a gid to add to the
55595 + supplementary groups of users you want to mark as "untrusted."
55596 + These users will not be able to execute any files that are not in
55597 + root-owned directories writable only by root. If the sysctl option
55598 + is enabled, a sysctl option with name "tpe" is created.
55599 +
55600 +config GRKERNSEC_TPE_ALL
55601 + bool "Partially restrict all non-root users"
55602 + depends on GRKERNSEC_TPE
55603 + help
55604 + If you say Y here, all non-root users will be covered under
55605 + a weaker TPE restriction. This is separate from, and in addition to,
55606 + the main TPE options that you have selected elsewhere. Thus, if a
55607 + "trusted" GID is chosen, this restriction applies to even that GID.
55608 + Under this restriction, all non-root users will only be allowed to
55609 + execute files in directories they own that are not group or
55610 + world-writable, or in directories owned by root and writable only by
55611 + root. If the sysctl option is enabled, a sysctl option with name
55612 + "tpe_restrict_all" is created.
55613 +
55614 +config GRKERNSEC_TPE_INVERT
55615 + bool "Invert GID option"
55616 + depends on GRKERNSEC_TPE
55617 + help
55618 + If you say Y here, the group you specify in the TPE configuration will
55619 + decide what group TPE restrictions will be *disabled* for. This
55620 + option is useful if you want TPE restrictions to be applied to most
55621 + users on the system. If the sysctl option is enabled, a sysctl option
55622 + with name "tpe_invert" is created. Unlike other sysctl options, this
55623 + entry will default to on for backward-compatibility.
55624 +
55625 +config GRKERNSEC_TPE_GID
55626 + int "GID for untrusted users"
55627 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
55628 + default 1005
55629 + help
55630 + Setting this GID determines what group TPE restrictions will be
55631 + *enabled* for. If the sysctl option is enabled, a sysctl option
55632 + with name "tpe_gid" is created.
55633 +
55634 +config GRKERNSEC_TPE_GID
55635 + int "GID for trusted users"
55636 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
55637 + default 1005
55638 + help
55639 + Setting this GID determines what group TPE restrictions will be
55640 + *disabled* for. If the sysctl option is enabled, a sysctl option
55641 + with name "tpe_gid" is created.
55642 +
55643 +endmenu
55644 +menu "Network Protections"
55645 +depends on GRKERNSEC
55646 +
55647 +config GRKERNSEC_RANDNET
55648 + bool "Larger entropy pools"
55649 + help
55650 + If you say Y here, the entropy pools used for many features of Linux
55651 + and grsecurity will be doubled in size. Since several grsecurity
55652 + features use additional randomness, it is recommended that you say Y
55653 + here. Saying Y here has a similar effect as modifying
55654 + /proc/sys/kernel/random/poolsize.
55655 +
55656 +config GRKERNSEC_BLACKHOLE
55657 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
55658 + depends on NET
55659 + help
55660 + If you say Y here, neither TCP resets nor ICMP
55661 + destination-unreachable packets will be sent in response to packets
55662 + sent to ports for which no associated listening process exists.
55663 + This feature supports both IPV4 and IPV6 and exempts the
55664 + loopback interface from blackholing. Enabling this feature
55665 + makes a host more resilient to DoS attacks and reduces network
55666 + visibility against scanners.
55667 +
55668 + The blackhole feature as-implemented is equivalent to the FreeBSD
55669 + blackhole feature, as it prevents RST responses to all packets, not
55670 + just SYNs. Under most application behavior this causes no
55671 + problems, but applications (like haproxy) may not close certain
55672 + connections in a way that cleanly terminates them on the remote
55673 + end, leaving the remote host in LAST_ACK state. Because of this
55674 + side-effect and to prevent intentional LAST_ACK DoSes, this
55675 + feature also adds automatic mitigation against such attacks.
55676 + The mitigation drastically reduces the amount of time a socket
55677 + can spend in LAST_ACK state. If you're using haproxy and not
55678 + all servers it connects to have this option enabled, consider
55679 + disabling this feature on the haproxy host.
55680 +
55681 + If the sysctl option is enabled, two sysctl options with names
55682 + "ip_blackhole" and "lastack_retries" will be created.
55683 + While "ip_blackhole" takes the standard zero/non-zero on/off
55684 + toggle, "lastack_retries" uses the same kinds of values as
55685 + "tcp_retries1" and "tcp_retries2". The default value of 4
55686 + prevents a socket from lasting more than 45 seconds in LAST_ACK
55687 + state.
55688 +
55689 +config GRKERNSEC_SOCKET
55690 + bool "Socket restrictions"
55691 + depends on NET
55692 + help
55693 + If you say Y here, you will be able to choose from several options.
55694 + If you assign a GID on your system and add it to the supplementary
55695 + groups of users you want to restrict socket access to, this patch
55696 + will perform up to three things, based on the option(s) you choose.
55697 +
55698 +config GRKERNSEC_SOCKET_ALL
55699 + bool "Deny any sockets to group"
55700 + depends on GRKERNSEC_SOCKET
55701 + help
55702 + If you say Y here, you will be able to choose a GID of whose users will
55703 + be unable to connect to other hosts from your machine or run server
55704 + applications from your machine. If the sysctl option is enabled, a
55705 + sysctl option with name "socket_all" is created.
55706 +
55707 +config GRKERNSEC_SOCKET_ALL_GID
55708 + int "GID to deny all sockets for"
55709 + depends on GRKERNSEC_SOCKET_ALL
55710 + default 1004
55711 + help
55712 + Here you can choose the GID to disable socket access for. Remember to
55713 + add the users you want socket access disabled for to the GID
55714 + specified here. If the sysctl option is enabled, a sysctl option
55715 + with name "socket_all_gid" is created.
55716 +
55717 +config GRKERNSEC_SOCKET_CLIENT
55718 + bool "Deny client sockets to group"
55719 + depends on GRKERNSEC_SOCKET
55720 + help
55721 + If you say Y here, you will be able to choose a GID of whose users will
55722 + be unable to connect to other hosts from your machine, but will be
55723 + able to run servers. If this option is enabled, all users in the group
55724 + you specify will have to use passive mode when initiating ftp transfers
55725 + from the shell on your machine. If the sysctl option is enabled, a
55726 + sysctl option with name "socket_client" is created.
55727 +
55728 +config GRKERNSEC_SOCKET_CLIENT_GID
55729 + int "GID to deny client sockets for"
55730 + depends on GRKERNSEC_SOCKET_CLIENT
55731 + default 1003
55732 + help
55733 + Here you can choose the GID to disable client socket access for.
55734 + Remember to add the users you want client socket access disabled for to
55735 + the GID specified here. If the sysctl option is enabled, a sysctl
55736 + option with name "socket_client_gid" is created.
55737 +
55738 +config GRKERNSEC_SOCKET_SERVER
55739 + bool "Deny server sockets to group"
55740 + depends on GRKERNSEC_SOCKET
55741 + help
55742 + If you say Y here, you will be able to choose a GID of whose users will
55743 + be unable to run server applications from your machine. If the sysctl
55744 + option is enabled, a sysctl option with name "socket_server" is created.
55745 +
55746 +config GRKERNSEC_SOCKET_SERVER_GID
55747 + int "GID to deny server sockets for"
55748 + depends on GRKERNSEC_SOCKET_SERVER
55749 + default 1002
55750 + help
55751 + Here you can choose the GID to disable server socket access for.
55752 + Remember to add the users you want server socket access disabled for to
55753 + the GID specified here. If the sysctl option is enabled, a sysctl
55754 + option with name "socket_server_gid" is created.
55755 +
55756 +endmenu
55757 +menu "Sysctl support"
55758 +depends on GRKERNSEC && SYSCTL
55759 +
55760 +config GRKERNSEC_SYSCTL
55761 + bool "Sysctl support"
55762 + help
55763 + If you say Y here, you will be able to change the options that
55764 + grsecurity runs with at bootup, without having to recompile your
55765 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
55766 + to enable (1) or disable (0) various features. All the sysctl entries
55767 + are mutable until the "grsec_lock" entry is set to a non-zero value.
55768 + All features enabled in the kernel configuration are disabled at boot
55769 + if you do not say Y to the "Turn on features by default" option.
55770 + All options should be set at startup, and the grsec_lock entry should
55771 + be set to a non-zero value after all the options are set.
55772 + *THIS IS EXTREMELY IMPORTANT*
55773 +
55774 +config GRKERNSEC_SYSCTL_DISTRO
55775 + bool "Extra sysctl support for distro makers (READ HELP)"
55776 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
55777 + help
55778 + If you say Y here, additional sysctl options will be created
55779 + for features that affect processes running as root. Therefore,
55780 + it is critical when using this option that the grsec_lock entry be
55781 + enabled after boot. Only distros with prebuilt kernel packages
55782 + with this option enabled that can ensure grsec_lock is enabled
55783 + after boot should use this option.
55784 + *Failure to set grsec_lock after boot makes all grsec features
55785 + this option covers useless*
55786 +
55787 + Currently this option creates the following sysctl entries:
55788 + "Disable Privileged I/O": "disable_priv_io"
55789 +
55790 +config GRKERNSEC_SYSCTL_ON
55791 + bool "Turn on features by default"
55792 + depends on GRKERNSEC_SYSCTL
55793 + help
55794 + If you say Y here, instead of having all features enabled in the
55795 + kernel configuration disabled at boot time, the features will be
55796 + enabled at boot time. It is recommended you say Y here unless
55797 + there is some reason you would want all sysctl-tunable features to
55798 + be disabled by default. As mentioned elsewhere, it is important
55799 + to enable the grsec_lock entry once you have finished modifying
55800 + the sysctl entries.
55801 +
55802 +endmenu
55803 +menu "Logging Options"
55804 +depends on GRKERNSEC
55805 +
55806 +config GRKERNSEC_FLOODTIME
55807 + int "Seconds in between log messages (minimum)"
55808 + default 10
55809 + help
55810 + This option allows you to enforce the number of seconds between
55811 + grsecurity log messages. The default should be suitable for most
55812 + people, however, if you choose to change it, choose a value small enough
55813 + to allow informative logs to be produced, but large enough to
55814 + prevent flooding.
55815 +
55816 +config GRKERNSEC_FLOODBURST
55817 + int "Number of messages in a burst (maximum)"
55818 + default 6
55819 + help
55820 + This option allows you to choose the maximum number of messages allowed
55821 + within the flood time interval you chose in a separate option. The
55822 + default should be suitable for most people, however if you find that
55823 + many of your logs are being interpreted as flooding, you may want to
55824 + raise this value.
55825 +
55826 +endmenu
55827 +
55828 +endmenu
55829 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
55830 new file mode 100644
55831 index 0000000..be9ae3a
55832 --- /dev/null
55833 +++ b/grsecurity/Makefile
55834 @@ -0,0 +1,36 @@
55835 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
55836 +# during 2001-2009 it has been completely redesigned by Brad Spengler
55837 +# into an RBAC system
55838 +#
55839 +# All code in this directory and various hooks inserted throughout the kernel
55840 +# are copyright Brad Spengler - Open Source Security, Inc., and released
55841 +# under the GPL v2 or higher
55842 +
55843 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
55844 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
55845 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
55846 +
55847 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
55848 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
55849 + gracl_learn.o grsec_log.o
55850 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
55851 +
55852 +ifdef CONFIG_NET
55853 +obj-y += grsec_sock.o
55854 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
55855 +endif
55856 +
55857 +ifndef CONFIG_GRKERNSEC
55858 +obj-y += grsec_disabled.o
55859 +endif
55860 +
55861 +ifdef CONFIG_GRKERNSEC_HIDESYM
55862 +extra-y := grsec_hidesym.o
55863 +$(obj)/grsec_hidesym.o:
55864 + @-chmod -f 500 /boot
55865 + @-chmod -f 500 /lib/modules
55866 + @-chmod -f 500 /lib64/modules
55867 + @-chmod -f 500 /lib32/modules
55868 + @-chmod -f 700 .
55869 + @echo ' grsec: protected kernel image paths'
55870 +endif
55871 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
55872 new file mode 100644
55873 index 0000000..6bd68d6
55874 --- /dev/null
55875 +++ b/grsecurity/gracl.c
55876 @@ -0,0 +1,4141 @@
55877 +#include <linux/kernel.h>
55878 +#include <linux/module.h>
55879 +#include <linux/sched.h>
55880 +#include <linux/mm.h>
55881 +#include <linux/file.h>
55882 +#include <linux/fs.h>
55883 +#include <linux/namei.h>
55884 +#include <linux/mount.h>
55885 +#include <linux/tty.h>
55886 +#include <linux/proc_fs.h>
55887 +#include <linux/smp_lock.h>
55888 +#include <linux/slab.h>
55889 +#include <linux/vmalloc.h>
55890 +#include <linux/types.h>
55891 +#include <linux/sysctl.h>
55892 +#include <linux/netdevice.h>
55893 +#include <linux/ptrace.h>
55894 +#include <linux/gracl.h>
55895 +#include <linux/gralloc.h>
55896 +#include <linux/grsecurity.h>
55897 +#include <linux/grinternal.h>
55898 +#include <linux/pid_namespace.h>
55899 +#include <linux/fdtable.h>
55900 +#include <linux/percpu.h>
55901 +
55902 +#include <asm/uaccess.h>
55903 +#include <asm/errno.h>
55904 +#include <asm/mman.h>
55905 +
55906 +static struct acl_role_db acl_role_set;
55907 +static struct name_db name_set;
55908 +static struct inodev_db inodev_set;
55909 +
55910 +/* for keeping track of userspace pointers used for subjects, so we
55911 + can share references in the kernel as well
55912 +*/
55913 +
55914 +static struct dentry *real_root;
55915 +static struct vfsmount *real_root_mnt;
55916 +
55917 +static struct acl_subj_map_db subj_map_set;
55918 +
55919 +static struct acl_role_label *default_role;
55920 +
55921 +static struct acl_role_label *role_list;
55922 +
55923 +static u16 acl_sp_role_value;
55924 +
55925 +extern char *gr_shared_page[4];
55926 +static DEFINE_MUTEX(gr_dev_mutex);
55927 +DEFINE_RWLOCK(gr_inode_lock);
55928 +
55929 +struct gr_arg *gr_usermode;
55930 +
55931 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
55932 +
55933 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
55934 +extern void gr_clear_learn_entries(void);
55935 +
55936 +#ifdef CONFIG_GRKERNSEC_RESLOG
55937 +extern void gr_log_resource(const struct task_struct *task,
55938 + const int res, const unsigned long wanted, const int gt);
55939 +#endif
55940 +
55941 +unsigned char *gr_system_salt;
55942 +unsigned char *gr_system_sum;
55943 +
55944 +static struct sprole_pw **acl_special_roles = NULL;
55945 +static __u16 num_sprole_pws = 0;
55946 +
55947 +static struct acl_role_label *kernel_role = NULL;
55948 +
55949 +static unsigned int gr_auth_attempts = 0;
55950 +static unsigned long gr_auth_expires = 0UL;
55951 +
55952 +#ifdef CONFIG_NET
55953 +extern struct vfsmount *sock_mnt;
55954 +#endif
55955 +extern struct vfsmount *pipe_mnt;
55956 +extern struct vfsmount *shm_mnt;
55957 +#ifdef CONFIG_HUGETLBFS
55958 +extern struct vfsmount *hugetlbfs_vfsmount;
55959 +#endif
55960 +
55961 +static struct acl_object_label *fakefs_obj_rw;
55962 +static struct acl_object_label *fakefs_obj_rwx;
55963 +
55964 +extern int gr_init_uidset(void);
55965 +extern void gr_free_uidset(void);
55966 +extern void gr_remove_uid(uid_t uid);
55967 +extern int gr_find_uid(uid_t uid);
55968 +
55969 +__inline__ int
55970 +gr_acl_is_enabled(void)
55971 +{
55972 + return (gr_status & GR_READY);
55973 +}
55974 +
55975 +#ifdef CONFIG_BTRFS_FS
55976 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55977 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55978 +#endif
55979 +
55980 +static inline dev_t __get_dev(const struct dentry *dentry)
55981 +{
55982 +#ifdef CONFIG_BTRFS_FS
55983 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55984 + return get_btrfs_dev_from_inode(dentry->d_inode);
55985 + else
55986 +#endif
55987 + return dentry->d_inode->i_sb->s_dev;
55988 +}
55989 +
55990 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
55991 +{
55992 + return __get_dev(dentry);
55993 +}
55994 +
55995 +static char gr_task_roletype_to_char(struct task_struct *task)
55996 +{
55997 + switch (task->role->roletype &
55998 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
55999 + GR_ROLE_SPECIAL)) {
56000 + case GR_ROLE_DEFAULT:
56001 + return 'D';
56002 + case GR_ROLE_USER:
56003 + return 'U';
56004 + case GR_ROLE_GROUP:
56005 + return 'G';
56006 + case GR_ROLE_SPECIAL:
56007 + return 'S';
56008 + }
56009 +
56010 + return 'X';
56011 +}
56012 +
56013 +char gr_roletype_to_char(void)
56014 +{
56015 + return gr_task_roletype_to_char(current);
56016 +}
56017 +
56018 +__inline__ int
56019 +gr_acl_tpe_check(void)
56020 +{
56021 + if (unlikely(!(gr_status & GR_READY)))
56022 + return 0;
56023 + if (current->role->roletype & GR_ROLE_TPE)
56024 + return 1;
56025 + else
56026 + return 0;
56027 +}
56028 +
56029 +int
56030 +gr_handle_rawio(const struct inode *inode)
56031 +{
56032 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56033 + if (inode && S_ISBLK(inode->i_mode) &&
56034 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56035 + !capable(CAP_SYS_RAWIO))
56036 + return 1;
56037 +#endif
56038 + return 0;
56039 +}
56040 +
56041 +static int
56042 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56043 +{
56044 + if (likely(lena != lenb))
56045 + return 0;
56046 +
56047 + return !memcmp(a, b, lena);
56048 +}
56049 +
56050 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56051 +{
56052 + *buflen -= namelen;
56053 + if (*buflen < 0)
56054 + return -ENAMETOOLONG;
56055 + *buffer -= namelen;
56056 + memcpy(*buffer, str, namelen);
56057 + return 0;
56058 +}
56059 +
56060 +/* this must be called with vfsmount_lock and dcache_lock held */
56061 +
56062 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56063 + struct dentry *root, struct vfsmount *rootmnt,
56064 + char *buffer, int buflen)
56065 +{
56066 + char * end = buffer+buflen;
56067 + char * retval;
56068 + int namelen;
56069 +
56070 + *--end = '\0';
56071 + buflen--;
56072 +
56073 + if (buflen < 1)
56074 + goto Elong;
56075 + /* Get '/' right */
56076 + retval = end-1;
56077 + *retval = '/';
56078 +
56079 + for (;;) {
56080 + struct dentry * parent;
56081 +
56082 + if (dentry == root && vfsmnt == rootmnt)
56083 + break;
56084 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56085 + /* Global root? */
56086 + if (vfsmnt->mnt_parent == vfsmnt)
56087 + goto global_root;
56088 + dentry = vfsmnt->mnt_mountpoint;
56089 + vfsmnt = vfsmnt->mnt_parent;
56090 + continue;
56091 + }
56092 + parent = dentry->d_parent;
56093 + prefetch(parent);
56094 + namelen = dentry->d_name.len;
56095 + buflen -= namelen + 1;
56096 + if (buflen < 0)
56097 + goto Elong;
56098 + end -= namelen;
56099 + memcpy(end, dentry->d_name.name, namelen);
56100 + *--end = '/';
56101 + retval = end;
56102 + dentry = parent;
56103 + }
56104 +
56105 +out:
56106 + return retval;
56107 +
56108 +global_root:
56109 + namelen = dentry->d_name.len;
56110 + buflen -= namelen;
56111 + if (buflen < 0)
56112 + goto Elong;
56113 + retval -= namelen-1; /* hit the slash */
56114 + memcpy(retval, dentry->d_name.name, namelen);
56115 + goto out;
56116 +Elong:
56117 + retval = ERR_PTR(-ENAMETOOLONG);
56118 + goto out;
56119 +}
56120 +
56121 +static char *
56122 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56123 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56124 +{
56125 + char *retval;
56126 +
56127 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56128 + if (unlikely(IS_ERR(retval)))
56129 + retval = strcpy(buf, "<path too long>");
56130 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56131 + retval[1] = '\0';
56132 +
56133 + return retval;
56134 +}
56135 +
56136 +static char *
56137 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56138 + char *buf, int buflen)
56139 +{
56140 + char *res;
56141 +
56142 + /* we can use real_root, real_root_mnt, because this is only called
56143 + by the RBAC system */
56144 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56145 +
56146 + return res;
56147 +}
56148 +
56149 +static char *
56150 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56151 + char *buf, int buflen)
56152 +{
56153 + char *res;
56154 + struct dentry *root;
56155 + struct vfsmount *rootmnt;
56156 + struct task_struct *reaper = &init_task;
56157 +
56158 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56159 + read_lock(&reaper->fs->lock);
56160 + root = dget(reaper->fs->root.dentry);
56161 + rootmnt = mntget(reaper->fs->root.mnt);
56162 + read_unlock(&reaper->fs->lock);
56163 +
56164 + spin_lock(&dcache_lock);
56165 + spin_lock(&vfsmount_lock);
56166 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56167 + spin_unlock(&vfsmount_lock);
56168 + spin_unlock(&dcache_lock);
56169 +
56170 + dput(root);
56171 + mntput(rootmnt);
56172 + return res;
56173 +}
56174 +
56175 +static char *
56176 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56177 +{
56178 + char *ret;
56179 + spin_lock(&dcache_lock);
56180 + spin_lock(&vfsmount_lock);
56181 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56182 + PAGE_SIZE);
56183 + spin_unlock(&vfsmount_lock);
56184 + spin_unlock(&dcache_lock);
56185 + return ret;
56186 +}
56187 +
56188 +static char *
56189 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56190 +{
56191 + char *ret;
56192 + char *buf;
56193 + int buflen;
56194 +
56195 + spin_lock(&dcache_lock);
56196 + spin_lock(&vfsmount_lock);
56197 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56198 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56199 + buflen = (int)(ret - buf);
56200 + if (buflen >= 5)
56201 + prepend(&ret, &buflen, "/proc", 5);
56202 + else
56203 + ret = strcpy(buf, "<path too long>");
56204 + spin_unlock(&vfsmount_lock);
56205 + spin_unlock(&dcache_lock);
56206 + return ret;
56207 +}
56208 +
56209 +char *
56210 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
56211 +{
56212 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56213 + PAGE_SIZE);
56214 +}
56215 +
56216 +char *
56217 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
56218 +{
56219 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
56220 + PAGE_SIZE);
56221 +}
56222 +
56223 +char *
56224 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
56225 +{
56226 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
56227 + PAGE_SIZE);
56228 +}
56229 +
56230 +char *
56231 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
56232 +{
56233 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
56234 + PAGE_SIZE);
56235 +}
56236 +
56237 +char *
56238 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
56239 +{
56240 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
56241 + PAGE_SIZE);
56242 +}
56243 +
56244 +__inline__ __u32
56245 +to_gr_audit(const __u32 reqmode)
56246 +{
56247 + /* masks off auditable permission flags, then shifts them to create
56248 + auditing flags, and adds the special case of append auditing if
56249 + we're requesting write */
56250 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
56251 +}
56252 +
56253 +struct acl_subject_label *
56254 +lookup_subject_map(const struct acl_subject_label *userp)
56255 +{
56256 + unsigned int index = shash(userp, subj_map_set.s_size);
56257 + struct subject_map *match;
56258 +
56259 + match = subj_map_set.s_hash[index];
56260 +
56261 + while (match && match->user != userp)
56262 + match = match->next;
56263 +
56264 + if (match != NULL)
56265 + return match->kernel;
56266 + else
56267 + return NULL;
56268 +}
56269 +
56270 +static void
56271 +insert_subj_map_entry(struct subject_map *subjmap)
56272 +{
56273 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
56274 + struct subject_map **curr;
56275 +
56276 + subjmap->prev = NULL;
56277 +
56278 + curr = &subj_map_set.s_hash[index];
56279 + if (*curr != NULL)
56280 + (*curr)->prev = subjmap;
56281 +
56282 + subjmap->next = *curr;
56283 + *curr = subjmap;
56284 +
56285 + return;
56286 +}
56287 +
56288 +static struct acl_role_label *
56289 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
56290 + const gid_t gid)
56291 +{
56292 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
56293 + struct acl_role_label *match;
56294 + struct role_allowed_ip *ipp;
56295 + unsigned int x;
56296 + u32 curr_ip = task->signal->curr_ip;
56297 +
56298 + task->signal->saved_ip = curr_ip;
56299 +
56300 + match = acl_role_set.r_hash[index];
56301 +
56302 + while (match) {
56303 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
56304 + for (x = 0; x < match->domain_child_num; x++) {
56305 + if (match->domain_children[x] == uid)
56306 + goto found;
56307 + }
56308 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
56309 + break;
56310 + match = match->next;
56311 + }
56312 +found:
56313 + if (match == NULL) {
56314 + try_group:
56315 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
56316 + match = acl_role_set.r_hash[index];
56317 +
56318 + while (match) {
56319 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
56320 + for (x = 0; x < match->domain_child_num; x++) {
56321 + if (match->domain_children[x] == gid)
56322 + goto found2;
56323 + }
56324 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
56325 + break;
56326 + match = match->next;
56327 + }
56328 +found2:
56329 + if (match == NULL)
56330 + match = default_role;
56331 + if (match->allowed_ips == NULL)
56332 + return match;
56333 + else {
56334 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56335 + if (likely
56336 + ((ntohl(curr_ip) & ipp->netmask) ==
56337 + (ntohl(ipp->addr) & ipp->netmask)))
56338 + return match;
56339 + }
56340 + match = default_role;
56341 + }
56342 + } else if (match->allowed_ips == NULL) {
56343 + return match;
56344 + } else {
56345 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56346 + if (likely
56347 + ((ntohl(curr_ip) & ipp->netmask) ==
56348 + (ntohl(ipp->addr) & ipp->netmask)))
56349 + return match;
56350 + }
56351 + goto try_group;
56352 + }
56353 +
56354 + return match;
56355 +}
56356 +
56357 +struct acl_subject_label *
56358 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
56359 + const struct acl_role_label *role)
56360 +{
56361 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
56362 + struct acl_subject_label *match;
56363 +
56364 + match = role->subj_hash[index];
56365 +
56366 + while (match && (match->inode != ino || match->device != dev ||
56367 + (match->mode & GR_DELETED))) {
56368 + match = match->next;
56369 + }
56370 +
56371 + if (match && !(match->mode & GR_DELETED))
56372 + return match;
56373 + else
56374 + return NULL;
56375 +}
56376 +
56377 +struct acl_subject_label *
56378 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
56379 + const struct acl_role_label *role)
56380 +{
56381 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
56382 + struct acl_subject_label *match;
56383 +
56384 + match = role->subj_hash[index];
56385 +
56386 + while (match && (match->inode != ino || match->device != dev ||
56387 + !(match->mode & GR_DELETED))) {
56388 + match = match->next;
56389 + }
56390 +
56391 + if (match && (match->mode & GR_DELETED))
56392 + return match;
56393 + else
56394 + return NULL;
56395 +}
56396 +
56397 +static struct acl_object_label *
56398 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
56399 + const struct acl_subject_label *subj)
56400 +{
56401 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56402 + struct acl_object_label *match;
56403 +
56404 + match = subj->obj_hash[index];
56405 +
56406 + while (match && (match->inode != ino || match->device != dev ||
56407 + (match->mode & GR_DELETED))) {
56408 + match = match->next;
56409 + }
56410 +
56411 + if (match && !(match->mode & GR_DELETED))
56412 + return match;
56413 + else
56414 + return NULL;
56415 +}
56416 +
56417 +static struct acl_object_label *
56418 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
56419 + const struct acl_subject_label *subj)
56420 +{
56421 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56422 + struct acl_object_label *match;
56423 +
56424 + match = subj->obj_hash[index];
56425 +
56426 + while (match && (match->inode != ino || match->device != dev ||
56427 + !(match->mode & GR_DELETED))) {
56428 + match = match->next;
56429 + }
56430 +
56431 + if (match && (match->mode & GR_DELETED))
56432 + return match;
56433 +
56434 + match = subj->obj_hash[index];
56435 +
56436 + while (match && (match->inode != ino || match->device != dev ||
56437 + (match->mode & GR_DELETED))) {
56438 + match = match->next;
56439 + }
56440 +
56441 + if (match && !(match->mode & GR_DELETED))
56442 + return match;
56443 + else
56444 + return NULL;
56445 +}
56446 +
56447 +static struct name_entry *
56448 +lookup_name_entry(const char *name)
56449 +{
56450 + unsigned int len = strlen(name);
56451 + unsigned int key = full_name_hash(name, len);
56452 + unsigned int index = key % name_set.n_size;
56453 + struct name_entry *match;
56454 +
56455 + match = name_set.n_hash[index];
56456 +
56457 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
56458 + match = match->next;
56459 +
56460 + return match;
56461 +}
56462 +
56463 +static struct name_entry *
56464 +lookup_name_entry_create(const char *name)
56465 +{
56466 + unsigned int len = strlen(name);
56467 + unsigned int key = full_name_hash(name, len);
56468 + unsigned int index = key % name_set.n_size;
56469 + struct name_entry *match;
56470 +
56471 + match = name_set.n_hash[index];
56472 +
56473 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56474 + !match->deleted))
56475 + match = match->next;
56476 +
56477 + if (match && match->deleted)
56478 + return match;
56479 +
56480 + match = name_set.n_hash[index];
56481 +
56482 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56483 + match->deleted))
56484 + match = match->next;
56485 +
56486 + if (match && !match->deleted)
56487 + return match;
56488 + else
56489 + return NULL;
56490 +}
56491 +
56492 +static struct inodev_entry *
56493 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
56494 +{
56495 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
56496 + struct inodev_entry *match;
56497 +
56498 + match = inodev_set.i_hash[index];
56499 +
56500 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
56501 + match = match->next;
56502 +
56503 + return match;
56504 +}
56505 +
56506 +static void
56507 +insert_inodev_entry(struct inodev_entry *entry)
56508 +{
56509 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
56510 + inodev_set.i_size);
56511 + struct inodev_entry **curr;
56512 +
56513 + entry->prev = NULL;
56514 +
56515 + curr = &inodev_set.i_hash[index];
56516 + if (*curr != NULL)
56517 + (*curr)->prev = entry;
56518 +
56519 + entry->next = *curr;
56520 + *curr = entry;
56521 +
56522 + return;
56523 +}
56524 +
56525 +static void
56526 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
56527 +{
56528 + unsigned int index =
56529 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
56530 + struct acl_role_label **curr;
56531 + struct acl_role_label *tmp;
56532 +
56533 + curr = &acl_role_set.r_hash[index];
56534 +
56535 + /* if role was already inserted due to domains and already has
56536 + a role in the same bucket as it attached, then we need to
56537 + combine these two buckets
56538 + */
56539 + if (role->next) {
56540 + tmp = role->next;
56541 + while (tmp->next)
56542 + tmp = tmp->next;
56543 + tmp->next = *curr;
56544 + } else
56545 + role->next = *curr;
56546 + *curr = role;
56547 +
56548 + return;
56549 +}
56550 +
56551 +static void
56552 +insert_acl_role_label(struct acl_role_label *role)
56553 +{
56554 + int i;
56555 +
56556 + if (role_list == NULL) {
56557 + role_list = role;
56558 + role->prev = NULL;
56559 + } else {
56560 + role->prev = role_list;
56561 + role_list = role;
56562 + }
56563 +
56564 + /* used for hash chains */
56565 + role->next = NULL;
56566 +
56567 + if (role->roletype & GR_ROLE_DOMAIN) {
56568 + for (i = 0; i < role->domain_child_num; i++)
56569 + __insert_acl_role_label(role, role->domain_children[i]);
56570 + } else
56571 + __insert_acl_role_label(role, role->uidgid);
56572 +}
56573 +
56574 +static int
56575 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
56576 +{
56577 + struct name_entry **curr, *nentry;
56578 + struct inodev_entry *ientry;
56579 + unsigned int len = strlen(name);
56580 + unsigned int key = full_name_hash(name, len);
56581 + unsigned int index = key % name_set.n_size;
56582 +
56583 + curr = &name_set.n_hash[index];
56584 +
56585 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
56586 + curr = &((*curr)->next);
56587 +
56588 + if (*curr != NULL)
56589 + return 1;
56590 +
56591 + nentry = acl_alloc(sizeof (struct name_entry));
56592 + if (nentry == NULL)
56593 + return 0;
56594 + ientry = acl_alloc(sizeof (struct inodev_entry));
56595 + if (ientry == NULL)
56596 + return 0;
56597 + ientry->nentry = nentry;
56598 +
56599 + nentry->key = key;
56600 + nentry->name = name;
56601 + nentry->inode = inode;
56602 + nentry->device = device;
56603 + nentry->len = len;
56604 + nentry->deleted = deleted;
56605 +
56606 + nentry->prev = NULL;
56607 + curr = &name_set.n_hash[index];
56608 + if (*curr != NULL)
56609 + (*curr)->prev = nentry;
56610 + nentry->next = *curr;
56611 + *curr = nentry;
56612 +
56613 + /* insert us into the table searchable by inode/dev */
56614 + insert_inodev_entry(ientry);
56615 +
56616 + return 1;
56617 +}
56618 +
56619 +static void
56620 +insert_acl_obj_label(struct acl_object_label *obj,
56621 + struct acl_subject_label *subj)
56622 +{
56623 + unsigned int index =
56624 + fhash(obj->inode, obj->device, subj->obj_hash_size);
56625 + struct acl_object_label **curr;
56626 +
56627 +
56628 + obj->prev = NULL;
56629 +
56630 + curr = &subj->obj_hash[index];
56631 + if (*curr != NULL)
56632 + (*curr)->prev = obj;
56633 +
56634 + obj->next = *curr;
56635 + *curr = obj;
56636 +
56637 + return;
56638 +}
56639 +
56640 +static void
56641 +insert_acl_subj_label(struct acl_subject_label *obj,
56642 + struct acl_role_label *role)
56643 +{
56644 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
56645 + struct acl_subject_label **curr;
56646 +
56647 + obj->prev = NULL;
56648 +
56649 + curr = &role->subj_hash[index];
56650 + if (*curr != NULL)
56651 + (*curr)->prev = obj;
56652 +
56653 + obj->next = *curr;
56654 + *curr = obj;
56655 +
56656 + return;
56657 +}
56658 +
56659 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
56660 +
56661 +static void *
56662 +create_table(__u32 * len, int elementsize)
56663 +{
56664 + unsigned int table_sizes[] = {
56665 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
56666 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
56667 + 4194301, 8388593, 16777213, 33554393, 67108859
56668 + };
56669 + void *newtable = NULL;
56670 + unsigned int pwr = 0;
56671 +
56672 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
56673 + table_sizes[pwr] <= *len)
56674 + pwr++;
56675 +
56676 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
56677 + return newtable;
56678 +
56679 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
56680 + newtable =
56681 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
56682 + else
56683 + newtable = vmalloc(table_sizes[pwr] * elementsize);
56684 +
56685 + *len = table_sizes[pwr];
56686 +
56687 + return newtable;
56688 +}
56689 +
56690 +static int
56691 +init_variables(const struct gr_arg *arg)
56692 +{
56693 + struct task_struct *reaper = &init_task;
56694 + unsigned int stacksize;
56695 +
56696 + subj_map_set.s_size = arg->role_db.num_subjects;
56697 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
56698 + name_set.n_size = arg->role_db.num_objects;
56699 + inodev_set.i_size = arg->role_db.num_objects;
56700 +
56701 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
56702 + !name_set.n_size || !inodev_set.i_size)
56703 + return 1;
56704 +
56705 + if (!gr_init_uidset())
56706 + return 1;
56707 +
56708 + /* set up the stack that holds allocation info */
56709 +
56710 + stacksize = arg->role_db.num_pointers + 5;
56711 +
56712 + if (!acl_alloc_stack_init(stacksize))
56713 + return 1;
56714 +
56715 + /* grab reference for the real root dentry and vfsmount */
56716 + read_lock(&reaper->fs->lock);
56717 + real_root = dget(reaper->fs->root.dentry);
56718 + real_root_mnt = mntget(reaper->fs->root.mnt);
56719 + read_unlock(&reaper->fs->lock);
56720 +
56721 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56722 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
56723 +#endif
56724 +
56725 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
56726 + if (fakefs_obj_rw == NULL)
56727 + return 1;
56728 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
56729 +
56730 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
56731 + if (fakefs_obj_rwx == NULL)
56732 + return 1;
56733 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
56734 +
56735 + subj_map_set.s_hash =
56736 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
56737 + acl_role_set.r_hash =
56738 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
56739 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
56740 + inodev_set.i_hash =
56741 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
56742 +
56743 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
56744 + !name_set.n_hash || !inodev_set.i_hash)
56745 + return 1;
56746 +
56747 + memset(subj_map_set.s_hash, 0,
56748 + sizeof(struct subject_map *) * subj_map_set.s_size);
56749 + memset(acl_role_set.r_hash, 0,
56750 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
56751 + memset(name_set.n_hash, 0,
56752 + sizeof (struct name_entry *) * name_set.n_size);
56753 + memset(inodev_set.i_hash, 0,
56754 + sizeof (struct inodev_entry *) * inodev_set.i_size);
56755 +
56756 + return 0;
56757 +}
56758 +
56759 +/* free information not needed after startup
56760 + currently contains user->kernel pointer mappings for subjects
56761 +*/
56762 +
56763 +static void
56764 +free_init_variables(void)
56765 +{
56766 + __u32 i;
56767 +
56768 + if (subj_map_set.s_hash) {
56769 + for (i = 0; i < subj_map_set.s_size; i++) {
56770 + if (subj_map_set.s_hash[i]) {
56771 + kfree(subj_map_set.s_hash[i]);
56772 + subj_map_set.s_hash[i] = NULL;
56773 + }
56774 + }
56775 +
56776 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
56777 + PAGE_SIZE)
56778 + kfree(subj_map_set.s_hash);
56779 + else
56780 + vfree(subj_map_set.s_hash);
56781 + }
56782 +
56783 + return;
56784 +}
56785 +
56786 +static void
56787 +free_variables(void)
56788 +{
56789 + struct acl_subject_label *s;
56790 + struct acl_role_label *r;
56791 + struct task_struct *task, *task2;
56792 + unsigned int x;
56793 +
56794 + gr_clear_learn_entries();
56795 +
56796 + read_lock(&tasklist_lock);
56797 + do_each_thread(task2, task) {
56798 + task->acl_sp_role = 0;
56799 + task->acl_role_id = 0;
56800 + task->acl = NULL;
56801 + task->role = NULL;
56802 + } while_each_thread(task2, task);
56803 + read_unlock(&tasklist_lock);
56804 +
56805 + /* release the reference to the real root dentry and vfsmount */
56806 + if (real_root)
56807 + dput(real_root);
56808 + real_root = NULL;
56809 + if (real_root_mnt)
56810 + mntput(real_root_mnt);
56811 + real_root_mnt = NULL;
56812 +
56813 + /* free all object hash tables */
56814 +
56815 + FOR_EACH_ROLE_START(r)
56816 + if (r->subj_hash == NULL)
56817 + goto next_role;
56818 + FOR_EACH_SUBJECT_START(r, s, x)
56819 + if (s->obj_hash == NULL)
56820 + break;
56821 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56822 + kfree(s->obj_hash);
56823 + else
56824 + vfree(s->obj_hash);
56825 + FOR_EACH_SUBJECT_END(s, x)
56826 + FOR_EACH_NESTED_SUBJECT_START(r, s)
56827 + if (s->obj_hash == NULL)
56828 + break;
56829 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56830 + kfree(s->obj_hash);
56831 + else
56832 + vfree(s->obj_hash);
56833 + FOR_EACH_NESTED_SUBJECT_END(s)
56834 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
56835 + kfree(r->subj_hash);
56836 + else
56837 + vfree(r->subj_hash);
56838 + r->subj_hash = NULL;
56839 +next_role:
56840 + FOR_EACH_ROLE_END(r)
56841 +
56842 + acl_free_all();
56843 +
56844 + if (acl_role_set.r_hash) {
56845 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
56846 + PAGE_SIZE)
56847 + kfree(acl_role_set.r_hash);
56848 + else
56849 + vfree(acl_role_set.r_hash);
56850 + }
56851 + if (name_set.n_hash) {
56852 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
56853 + PAGE_SIZE)
56854 + kfree(name_set.n_hash);
56855 + else
56856 + vfree(name_set.n_hash);
56857 + }
56858 +
56859 + if (inodev_set.i_hash) {
56860 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
56861 + PAGE_SIZE)
56862 + kfree(inodev_set.i_hash);
56863 + else
56864 + vfree(inodev_set.i_hash);
56865 + }
56866 +
56867 + gr_free_uidset();
56868 +
56869 + memset(&name_set, 0, sizeof (struct name_db));
56870 + memset(&inodev_set, 0, sizeof (struct inodev_db));
56871 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
56872 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
56873 +
56874 + default_role = NULL;
56875 + role_list = NULL;
56876 +
56877 + return;
56878 +}
56879 +
56880 +static __u32
56881 +count_user_objs(struct acl_object_label *userp)
56882 +{
56883 + struct acl_object_label o_tmp;
56884 + __u32 num = 0;
56885 +
56886 + while (userp) {
56887 + if (copy_from_user(&o_tmp, userp,
56888 + sizeof (struct acl_object_label)))
56889 + break;
56890 +
56891 + userp = o_tmp.prev;
56892 + num++;
56893 + }
56894 +
56895 + return num;
56896 +}
56897 +
56898 +static struct acl_subject_label *
56899 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
56900 +
56901 +static int
56902 +copy_user_glob(struct acl_object_label *obj)
56903 +{
56904 + struct acl_object_label *g_tmp, **guser;
56905 + unsigned int len;
56906 + char *tmp;
56907 +
56908 + if (obj->globbed == NULL)
56909 + return 0;
56910 +
56911 + guser = &obj->globbed;
56912 + while (*guser) {
56913 + g_tmp = (struct acl_object_label *)
56914 + acl_alloc(sizeof (struct acl_object_label));
56915 + if (g_tmp == NULL)
56916 + return -ENOMEM;
56917 +
56918 + if (copy_from_user(g_tmp, *guser,
56919 + sizeof (struct acl_object_label)))
56920 + return -EFAULT;
56921 +
56922 + len = strnlen_user(g_tmp->filename, PATH_MAX);
56923 +
56924 + if (!len || len >= PATH_MAX)
56925 + return -EINVAL;
56926 +
56927 + if ((tmp = (char *) acl_alloc(len)) == NULL)
56928 + return -ENOMEM;
56929 +
56930 + if (copy_from_user(tmp, g_tmp->filename, len))
56931 + return -EFAULT;
56932 + tmp[len-1] = '\0';
56933 + g_tmp->filename = tmp;
56934 +
56935 + *guser = g_tmp;
56936 + guser = &(g_tmp->next);
56937 + }
56938 +
56939 + return 0;
56940 +}
56941 +
56942 +static int
56943 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
56944 + struct acl_role_label *role)
56945 +{
56946 + struct acl_object_label *o_tmp;
56947 + unsigned int len;
56948 + int ret;
56949 + char *tmp;
56950 +
56951 + while (userp) {
56952 + if ((o_tmp = (struct acl_object_label *)
56953 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
56954 + return -ENOMEM;
56955 +
56956 + if (copy_from_user(o_tmp, userp,
56957 + sizeof (struct acl_object_label)))
56958 + return -EFAULT;
56959 +
56960 + userp = o_tmp->prev;
56961 +
56962 + len = strnlen_user(o_tmp->filename, PATH_MAX);
56963 +
56964 + if (!len || len >= PATH_MAX)
56965 + return -EINVAL;
56966 +
56967 + if ((tmp = (char *) acl_alloc(len)) == NULL)
56968 + return -ENOMEM;
56969 +
56970 + if (copy_from_user(tmp, o_tmp->filename, len))
56971 + return -EFAULT;
56972 + tmp[len-1] = '\0';
56973 + o_tmp->filename = tmp;
56974 +
56975 + insert_acl_obj_label(o_tmp, subj);
56976 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
56977 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
56978 + return -ENOMEM;
56979 +
56980 + ret = copy_user_glob(o_tmp);
56981 + if (ret)
56982 + return ret;
56983 +
56984 + if (o_tmp->nested) {
56985 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
56986 + if (IS_ERR(o_tmp->nested))
56987 + return PTR_ERR(o_tmp->nested);
56988 +
56989 + /* insert into nested subject list */
56990 + o_tmp->nested->next = role->hash->first;
56991 + role->hash->first = o_tmp->nested;
56992 + }
56993 + }
56994 +
56995 + return 0;
56996 +}
56997 +
56998 +static __u32
56999 +count_user_subjs(struct acl_subject_label *userp)
57000 +{
57001 + struct acl_subject_label s_tmp;
57002 + __u32 num = 0;
57003 +
57004 + while (userp) {
57005 + if (copy_from_user(&s_tmp, userp,
57006 + sizeof (struct acl_subject_label)))
57007 + break;
57008 +
57009 + userp = s_tmp.prev;
57010 + /* do not count nested subjects against this count, since
57011 + they are not included in the hash table, but are
57012 + attached to objects. We have already counted
57013 + the subjects in userspace for the allocation
57014 + stack
57015 + */
57016 + if (!(s_tmp.mode & GR_NESTED))
57017 + num++;
57018 + }
57019 +
57020 + return num;
57021 +}
57022 +
57023 +static int
57024 +copy_user_allowedips(struct acl_role_label *rolep)
57025 +{
57026 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57027 +
57028 + ruserip = rolep->allowed_ips;
57029 +
57030 + while (ruserip) {
57031 + rlast = rtmp;
57032 +
57033 + if ((rtmp = (struct role_allowed_ip *)
57034 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57035 + return -ENOMEM;
57036 +
57037 + if (copy_from_user(rtmp, ruserip,
57038 + sizeof (struct role_allowed_ip)))
57039 + return -EFAULT;
57040 +
57041 + ruserip = rtmp->prev;
57042 +
57043 + if (!rlast) {
57044 + rtmp->prev = NULL;
57045 + rolep->allowed_ips = rtmp;
57046 + } else {
57047 + rlast->next = rtmp;
57048 + rtmp->prev = rlast;
57049 + }
57050 +
57051 + if (!ruserip)
57052 + rtmp->next = NULL;
57053 + }
57054 +
57055 + return 0;
57056 +}
57057 +
57058 +static int
57059 +copy_user_transitions(struct acl_role_label *rolep)
57060 +{
57061 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
57062 +
57063 + unsigned int len;
57064 + char *tmp;
57065 +
57066 + rusertp = rolep->transitions;
57067 +
57068 + while (rusertp) {
57069 + rlast = rtmp;
57070 +
57071 + if ((rtmp = (struct role_transition *)
57072 + acl_alloc(sizeof (struct role_transition))) == NULL)
57073 + return -ENOMEM;
57074 +
57075 + if (copy_from_user(rtmp, rusertp,
57076 + sizeof (struct role_transition)))
57077 + return -EFAULT;
57078 +
57079 + rusertp = rtmp->prev;
57080 +
57081 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57082 +
57083 + if (!len || len >= GR_SPROLE_LEN)
57084 + return -EINVAL;
57085 +
57086 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57087 + return -ENOMEM;
57088 +
57089 + if (copy_from_user(tmp, rtmp->rolename, len))
57090 + return -EFAULT;
57091 + tmp[len-1] = '\0';
57092 + rtmp->rolename = tmp;
57093 +
57094 + if (!rlast) {
57095 + rtmp->prev = NULL;
57096 + rolep->transitions = rtmp;
57097 + } else {
57098 + rlast->next = rtmp;
57099 + rtmp->prev = rlast;
57100 + }
57101 +
57102 + if (!rusertp)
57103 + rtmp->next = NULL;
57104 + }
57105 +
57106 + return 0;
57107 +}
57108 +
57109 +static struct acl_subject_label *
57110 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57111 +{
57112 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57113 + unsigned int len;
57114 + char *tmp;
57115 + __u32 num_objs;
57116 + struct acl_ip_label **i_tmp, *i_utmp2;
57117 + struct gr_hash_struct ghash;
57118 + struct subject_map *subjmap;
57119 + unsigned int i_num;
57120 + int err;
57121 +
57122 + s_tmp = lookup_subject_map(userp);
57123 +
57124 + /* we've already copied this subject into the kernel, just return
57125 + the reference to it, and don't copy it over again
57126 + */
57127 + if (s_tmp)
57128 + return(s_tmp);
57129 +
57130 + if ((s_tmp = (struct acl_subject_label *)
57131 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57132 + return ERR_PTR(-ENOMEM);
57133 +
57134 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57135 + if (subjmap == NULL)
57136 + return ERR_PTR(-ENOMEM);
57137 +
57138 + subjmap->user = userp;
57139 + subjmap->kernel = s_tmp;
57140 + insert_subj_map_entry(subjmap);
57141 +
57142 + if (copy_from_user(s_tmp, userp,
57143 + sizeof (struct acl_subject_label)))
57144 + return ERR_PTR(-EFAULT);
57145 +
57146 + len = strnlen_user(s_tmp->filename, PATH_MAX);
57147 +
57148 + if (!len || len >= PATH_MAX)
57149 + return ERR_PTR(-EINVAL);
57150 +
57151 + if ((tmp = (char *) acl_alloc(len)) == NULL)
57152 + return ERR_PTR(-ENOMEM);
57153 +
57154 + if (copy_from_user(tmp, s_tmp->filename, len))
57155 + return ERR_PTR(-EFAULT);
57156 + tmp[len-1] = '\0';
57157 + s_tmp->filename = tmp;
57158 +
57159 + if (!strcmp(s_tmp->filename, "/"))
57160 + role->root_label = s_tmp;
57161 +
57162 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57163 + return ERR_PTR(-EFAULT);
57164 +
57165 + /* copy user and group transition tables */
57166 +
57167 + if (s_tmp->user_trans_num) {
57168 + uid_t *uidlist;
57169 +
57170 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57171 + if (uidlist == NULL)
57172 + return ERR_PTR(-ENOMEM);
57173 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57174 + return ERR_PTR(-EFAULT);
57175 +
57176 + s_tmp->user_transitions = uidlist;
57177 + }
57178 +
57179 + if (s_tmp->group_trans_num) {
57180 + gid_t *gidlist;
57181 +
57182 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57183 + if (gidlist == NULL)
57184 + return ERR_PTR(-ENOMEM);
57185 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57186 + return ERR_PTR(-EFAULT);
57187 +
57188 + s_tmp->group_transitions = gidlist;
57189 + }
57190 +
57191 + /* set up object hash table */
57192 + num_objs = count_user_objs(ghash.first);
57193 +
57194 + s_tmp->obj_hash_size = num_objs;
57195 + s_tmp->obj_hash =
57196 + (struct acl_object_label **)
57197 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57198 +
57199 + if (!s_tmp->obj_hash)
57200 + return ERR_PTR(-ENOMEM);
57201 +
57202 + memset(s_tmp->obj_hash, 0,
57203 + s_tmp->obj_hash_size *
57204 + sizeof (struct acl_object_label *));
57205 +
57206 + /* add in objects */
57207 + err = copy_user_objs(ghash.first, s_tmp, role);
57208 +
57209 + if (err)
57210 + return ERR_PTR(err);
57211 +
57212 + /* set pointer for parent subject */
57213 + if (s_tmp->parent_subject) {
57214 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
57215 +
57216 + if (IS_ERR(s_tmp2))
57217 + return s_tmp2;
57218 +
57219 + s_tmp->parent_subject = s_tmp2;
57220 + }
57221 +
57222 + /* add in ip acls */
57223 +
57224 + if (!s_tmp->ip_num) {
57225 + s_tmp->ips = NULL;
57226 + goto insert;
57227 + }
57228 +
57229 + i_tmp =
57230 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
57231 + sizeof (struct acl_ip_label *));
57232 +
57233 + if (!i_tmp)
57234 + return ERR_PTR(-ENOMEM);
57235 +
57236 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
57237 + *(i_tmp + i_num) =
57238 + (struct acl_ip_label *)
57239 + acl_alloc(sizeof (struct acl_ip_label));
57240 + if (!*(i_tmp + i_num))
57241 + return ERR_PTR(-ENOMEM);
57242 +
57243 + if (copy_from_user
57244 + (&i_utmp2, s_tmp->ips + i_num,
57245 + sizeof (struct acl_ip_label *)))
57246 + return ERR_PTR(-EFAULT);
57247 +
57248 + if (copy_from_user
57249 + (*(i_tmp + i_num), i_utmp2,
57250 + sizeof (struct acl_ip_label)))
57251 + return ERR_PTR(-EFAULT);
57252 +
57253 + if ((*(i_tmp + i_num))->iface == NULL)
57254 + continue;
57255 +
57256 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
57257 + if (!len || len >= IFNAMSIZ)
57258 + return ERR_PTR(-EINVAL);
57259 + tmp = acl_alloc(len);
57260 + if (tmp == NULL)
57261 + return ERR_PTR(-ENOMEM);
57262 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
57263 + return ERR_PTR(-EFAULT);
57264 + (*(i_tmp + i_num))->iface = tmp;
57265 + }
57266 +
57267 + s_tmp->ips = i_tmp;
57268 +
57269 +insert:
57270 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
57271 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
57272 + return ERR_PTR(-ENOMEM);
57273 +
57274 + return s_tmp;
57275 +}
57276 +
57277 +static int
57278 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
57279 +{
57280 + struct acl_subject_label s_pre;
57281 + struct acl_subject_label * ret;
57282 + int err;
57283 +
57284 + while (userp) {
57285 + if (copy_from_user(&s_pre, userp,
57286 + sizeof (struct acl_subject_label)))
57287 + return -EFAULT;
57288 +
57289 + /* do not add nested subjects here, add
57290 + while parsing objects
57291 + */
57292 +
57293 + if (s_pre.mode & GR_NESTED) {
57294 + userp = s_pre.prev;
57295 + continue;
57296 + }
57297 +
57298 + ret = do_copy_user_subj(userp, role);
57299 +
57300 + err = PTR_ERR(ret);
57301 + if (IS_ERR(ret))
57302 + return err;
57303 +
57304 + insert_acl_subj_label(ret, role);
57305 +
57306 + userp = s_pre.prev;
57307 + }
57308 +
57309 + return 0;
57310 +}
57311 +
57312 +static int
57313 +copy_user_acl(struct gr_arg *arg)
57314 +{
57315 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
57316 + struct sprole_pw *sptmp;
57317 + struct gr_hash_struct *ghash;
57318 + uid_t *domainlist;
57319 + unsigned int r_num;
57320 + unsigned int len;
57321 + char *tmp;
57322 + int err = 0;
57323 + __u16 i;
57324 + __u32 num_subjs;
57325 +
57326 + /* we need a default and kernel role */
57327 + if (arg->role_db.num_roles < 2)
57328 + return -EINVAL;
57329 +
57330 + /* copy special role authentication info from userspace */
57331 +
57332 + num_sprole_pws = arg->num_sprole_pws;
57333 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
57334 +
57335 + if (!acl_special_roles) {
57336 + err = -ENOMEM;
57337 + goto cleanup;
57338 + }
57339 +
57340 + for (i = 0; i < num_sprole_pws; i++) {
57341 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
57342 + if (!sptmp) {
57343 + err = -ENOMEM;
57344 + goto cleanup;
57345 + }
57346 + if (copy_from_user(sptmp, arg->sprole_pws + i,
57347 + sizeof (struct sprole_pw))) {
57348 + err = -EFAULT;
57349 + goto cleanup;
57350 + }
57351 +
57352 + len =
57353 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
57354 +
57355 + if (!len || len >= GR_SPROLE_LEN) {
57356 + err = -EINVAL;
57357 + goto cleanup;
57358 + }
57359 +
57360 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
57361 + err = -ENOMEM;
57362 + goto cleanup;
57363 + }
57364 +
57365 + if (copy_from_user(tmp, sptmp->rolename, len)) {
57366 + err = -EFAULT;
57367 + goto cleanup;
57368 + }
57369 + tmp[len-1] = '\0';
57370 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57371 + printk(KERN_ALERT "Copying special role %s\n", tmp);
57372 +#endif
57373 + sptmp->rolename = tmp;
57374 + acl_special_roles[i] = sptmp;
57375 + }
57376 +
57377 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
57378 +
57379 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
57380 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
57381 +
57382 + if (!r_tmp) {
57383 + err = -ENOMEM;
57384 + goto cleanup;
57385 + }
57386 +
57387 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
57388 + sizeof (struct acl_role_label *))) {
57389 + err = -EFAULT;
57390 + goto cleanup;
57391 + }
57392 +
57393 + if (copy_from_user(r_tmp, r_utmp2,
57394 + sizeof (struct acl_role_label))) {
57395 + err = -EFAULT;
57396 + goto cleanup;
57397 + }
57398 +
57399 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
57400 +
57401 + if (!len || len >= PATH_MAX) {
57402 + err = -EINVAL;
57403 + goto cleanup;
57404 + }
57405 +
57406 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
57407 + err = -ENOMEM;
57408 + goto cleanup;
57409 + }
57410 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
57411 + err = -EFAULT;
57412 + goto cleanup;
57413 + }
57414 + tmp[len-1] = '\0';
57415 + r_tmp->rolename = tmp;
57416 +
57417 + if (!strcmp(r_tmp->rolename, "default")
57418 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
57419 + default_role = r_tmp;
57420 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
57421 + kernel_role = r_tmp;
57422 + }
57423 +
57424 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
57425 + err = -ENOMEM;
57426 + goto cleanup;
57427 + }
57428 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
57429 + err = -EFAULT;
57430 + goto cleanup;
57431 + }
57432 +
57433 + r_tmp->hash = ghash;
57434 +
57435 + num_subjs = count_user_subjs(r_tmp->hash->first);
57436 +
57437 + r_tmp->subj_hash_size = num_subjs;
57438 + r_tmp->subj_hash =
57439 + (struct acl_subject_label **)
57440 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
57441 +
57442 + if (!r_tmp->subj_hash) {
57443 + err = -ENOMEM;
57444 + goto cleanup;
57445 + }
57446 +
57447 + err = copy_user_allowedips(r_tmp);
57448 + if (err)
57449 + goto cleanup;
57450 +
57451 + /* copy domain info */
57452 + if (r_tmp->domain_children != NULL) {
57453 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
57454 + if (domainlist == NULL) {
57455 + err = -ENOMEM;
57456 + goto cleanup;
57457 + }
57458 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
57459 + err = -EFAULT;
57460 + goto cleanup;
57461 + }
57462 + r_tmp->domain_children = domainlist;
57463 + }
57464 +
57465 + err = copy_user_transitions(r_tmp);
57466 + if (err)
57467 + goto cleanup;
57468 +
57469 + memset(r_tmp->subj_hash, 0,
57470 + r_tmp->subj_hash_size *
57471 + sizeof (struct acl_subject_label *));
57472 +
57473 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
57474 +
57475 + if (err)
57476 + goto cleanup;
57477 +
57478 + /* set nested subject list to null */
57479 + r_tmp->hash->first = NULL;
57480 +
57481 + insert_acl_role_label(r_tmp);
57482 + }
57483 +
57484 + goto return_err;
57485 + cleanup:
57486 + free_variables();
57487 + return_err:
57488 + return err;
57489 +
57490 +}
57491 +
57492 +static int
57493 +gracl_init(struct gr_arg *args)
57494 +{
57495 + int error = 0;
57496 +
57497 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
57498 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
57499 +
57500 + if (init_variables(args)) {
57501 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
57502 + error = -ENOMEM;
57503 + free_variables();
57504 + goto out;
57505 + }
57506 +
57507 + error = copy_user_acl(args);
57508 + free_init_variables();
57509 + if (error) {
57510 + free_variables();
57511 + goto out;
57512 + }
57513 +
57514 + if ((error = gr_set_acls(0))) {
57515 + free_variables();
57516 + goto out;
57517 + }
57518 +
57519 + pax_open_kernel();
57520 + gr_status |= GR_READY;
57521 + pax_close_kernel();
57522 +
57523 + out:
57524 + return error;
57525 +}
57526 +
57527 +/* derived from glibc fnmatch() 0: match, 1: no match*/
57528 +
57529 +static int
57530 +glob_match(const char *p, const char *n)
57531 +{
57532 + char c;
57533 +
57534 + while ((c = *p++) != '\0') {
57535 + switch (c) {
57536 + case '?':
57537 + if (*n == '\0')
57538 + return 1;
57539 + else if (*n == '/')
57540 + return 1;
57541 + break;
57542 + case '\\':
57543 + if (*n != c)
57544 + return 1;
57545 + break;
57546 + case '*':
57547 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
57548 + if (*n == '/')
57549 + return 1;
57550 + else if (c == '?') {
57551 + if (*n == '\0')
57552 + return 1;
57553 + else
57554 + ++n;
57555 + }
57556 + }
57557 + if (c == '\0') {
57558 + return 0;
57559 + } else {
57560 + const char *endp;
57561 +
57562 + if ((endp = strchr(n, '/')) == NULL)
57563 + endp = n + strlen(n);
57564 +
57565 + if (c == '[') {
57566 + for (--p; n < endp; ++n)
57567 + if (!glob_match(p, n))
57568 + return 0;
57569 + } else if (c == '/') {
57570 + while (*n != '\0' && *n != '/')
57571 + ++n;
57572 + if (*n == '/' && !glob_match(p, n + 1))
57573 + return 0;
57574 + } else {
57575 + for (--p; n < endp; ++n)
57576 + if (*n == c && !glob_match(p, n))
57577 + return 0;
57578 + }
57579 +
57580 + return 1;
57581 + }
57582 + case '[':
57583 + {
57584 + int not;
57585 + char cold;
57586 +
57587 + if (*n == '\0' || *n == '/')
57588 + return 1;
57589 +
57590 + not = (*p == '!' || *p == '^');
57591 + if (not)
57592 + ++p;
57593 +
57594 + c = *p++;
57595 + for (;;) {
57596 + unsigned char fn = (unsigned char)*n;
57597 +
57598 + if (c == '\0')
57599 + return 1;
57600 + else {
57601 + if (c == fn)
57602 + goto matched;
57603 + cold = c;
57604 + c = *p++;
57605 +
57606 + if (c == '-' && *p != ']') {
57607 + unsigned char cend = *p++;
57608 +
57609 + if (cend == '\0')
57610 + return 1;
57611 +
57612 + if (cold <= fn && fn <= cend)
57613 + goto matched;
57614 +
57615 + c = *p++;
57616 + }
57617 + }
57618 +
57619 + if (c == ']')
57620 + break;
57621 + }
57622 + if (!not)
57623 + return 1;
57624 + break;
57625 + matched:
57626 + while (c != ']') {
57627 + if (c == '\0')
57628 + return 1;
57629 +
57630 + c = *p++;
57631 + }
57632 + if (not)
57633 + return 1;
57634 + }
57635 + break;
57636 + default:
57637 + if (c != *n)
57638 + return 1;
57639 + }
57640 +
57641 + ++n;
57642 + }
57643 +
57644 + if (*n == '\0')
57645 + return 0;
57646 +
57647 + if (*n == '/')
57648 + return 0;
57649 +
57650 + return 1;
57651 +}
57652 +
57653 +static struct acl_object_label *
57654 +chk_glob_label(struct acl_object_label *globbed,
57655 + struct dentry *dentry, struct vfsmount *mnt, char **path)
57656 +{
57657 + struct acl_object_label *tmp;
57658 +
57659 + if (*path == NULL)
57660 + *path = gr_to_filename_nolock(dentry, mnt);
57661 +
57662 + tmp = globbed;
57663 +
57664 + while (tmp) {
57665 + if (!glob_match(tmp->filename, *path))
57666 + return tmp;
57667 + tmp = tmp->next;
57668 + }
57669 +
57670 + return NULL;
57671 +}
57672 +
57673 +static struct acl_object_label *
57674 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57675 + const ino_t curr_ino, const dev_t curr_dev,
57676 + const struct acl_subject_label *subj, char **path, const int checkglob)
57677 +{
57678 + struct acl_subject_label *tmpsubj;
57679 + struct acl_object_label *retval;
57680 + struct acl_object_label *retval2;
57681 +
57682 + tmpsubj = (struct acl_subject_label *) subj;
57683 + read_lock(&gr_inode_lock);
57684 + do {
57685 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
57686 + if (retval) {
57687 + if (checkglob && retval->globbed) {
57688 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
57689 + (struct vfsmount *)orig_mnt, path);
57690 + if (retval2)
57691 + retval = retval2;
57692 + }
57693 + break;
57694 + }
57695 + } while ((tmpsubj = tmpsubj->parent_subject));
57696 + read_unlock(&gr_inode_lock);
57697 +
57698 + return retval;
57699 +}
57700 +
57701 +static __inline__ struct acl_object_label *
57702 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57703 + const struct dentry *curr_dentry,
57704 + const struct acl_subject_label *subj, char **path, const int checkglob)
57705 +{
57706 + int newglob = checkglob;
57707 +
57708 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
57709 + as we don't want a / * rule to match instead of the / object
57710 + don't do this for create lookups that call this function though, since they're looking up
57711 + on the parent and thus need globbing checks on all paths
57712 + */
57713 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
57714 + newglob = GR_NO_GLOB;
57715 +
57716 + return __full_lookup(orig_dentry, orig_mnt,
57717 + curr_dentry->d_inode->i_ino,
57718 + __get_dev(curr_dentry), subj, path, newglob);
57719 +}
57720 +
57721 +static struct acl_object_label *
57722 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57723 + const struct acl_subject_label *subj, char *path, const int checkglob)
57724 +{
57725 + struct dentry *dentry = (struct dentry *) l_dentry;
57726 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57727 + struct acl_object_label *retval;
57728 +
57729 + spin_lock(&dcache_lock);
57730 + spin_lock(&vfsmount_lock);
57731 +
57732 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
57733 +#ifdef CONFIG_NET
57734 + mnt == sock_mnt ||
57735 +#endif
57736 +#ifdef CONFIG_HUGETLBFS
57737 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
57738 +#endif
57739 + /* ignore Eric Biederman */
57740 + IS_PRIVATE(l_dentry->d_inode))) {
57741 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
57742 + goto out;
57743 + }
57744 +
57745 + for (;;) {
57746 + if (dentry == real_root && mnt == real_root_mnt)
57747 + break;
57748 +
57749 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57750 + if (mnt->mnt_parent == mnt)
57751 + break;
57752 +
57753 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57754 + if (retval != NULL)
57755 + goto out;
57756 +
57757 + dentry = mnt->mnt_mountpoint;
57758 + mnt = mnt->mnt_parent;
57759 + continue;
57760 + }
57761 +
57762 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57763 + if (retval != NULL)
57764 + goto out;
57765 +
57766 + dentry = dentry->d_parent;
57767 + }
57768 +
57769 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57770 +
57771 + if (retval == NULL)
57772 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
57773 +out:
57774 + spin_unlock(&vfsmount_lock);
57775 + spin_unlock(&dcache_lock);
57776 +
57777 + BUG_ON(retval == NULL);
57778 +
57779 + return retval;
57780 +}
57781 +
57782 +static __inline__ struct acl_object_label *
57783 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57784 + const struct acl_subject_label *subj)
57785 +{
57786 + char *path = NULL;
57787 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
57788 +}
57789 +
57790 +static __inline__ struct acl_object_label *
57791 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57792 + const struct acl_subject_label *subj)
57793 +{
57794 + char *path = NULL;
57795 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
57796 +}
57797 +
57798 +static __inline__ struct acl_object_label *
57799 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57800 + const struct acl_subject_label *subj, char *path)
57801 +{
57802 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
57803 +}
57804 +
57805 +static struct acl_subject_label *
57806 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57807 + const struct acl_role_label *role)
57808 +{
57809 + struct dentry *dentry = (struct dentry *) l_dentry;
57810 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57811 + struct acl_subject_label *retval;
57812 +
57813 + spin_lock(&dcache_lock);
57814 + spin_lock(&vfsmount_lock);
57815 +
57816 + for (;;) {
57817 + if (dentry == real_root && mnt == real_root_mnt)
57818 + break;
57819 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57820 + if (mnt->mnt_parent == mnt)
57821 + break;
57822 +
57823 + read_lock(&gr_inode_lock);
57824 + retval =
57825 + lookup_acl_subj_label(dentry->d_inode->i_ino,
57826 + __get_dev(dentry), role);
57827 + read_unlock(&gr_inode_lock);
57828 + if (retval != NULL)
57829 + goto out;
57830 +
57831 + dentry = mnt->mnt_mountpoint;
57832 + mnt = mnt->mnt_parent;
57833 + continue;
57834 + }
57835 +
57836 + read_lock(&gr_inode_lock);
57837 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57838 + __get_dev(dentry), role);
57839 + read_unlock(&gr_inode_lock);
57840 + if (retval != NULL)
57841 + goto out;
57842 +
57843 + dentry = dentry->d_parent;
57844 + }
57845 +
57846 + read_lock(&gr_inode_lock);
57847 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57848 + __get_dev(dentry), role);
57849 + read_unlock(&gr_inode_lock);
57850 +
57851 + if (unlikely(retval == NULL)) {
57852 + read_lock(&gr_inode_lock);
57853 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
57854 + __get_dev(real_root), role);
57855 + read_unlock(&gr_inode_lock);
57856 + }
57857 +out:
57858 + spin_unlock(&vfsmount_lock);
57859 + spin_unlock(&dcache_lock);
57860 +
57861 + BUG_ON(retval == NULL);
57862 +
57863 + return retval;
57864 +}
57865 +
57866 +static void
57867 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
57868 +{
57869 + struct task_struct *task = current;
57870 + const struct cred *cred = current_cred();
57871 +
57872 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57873 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57874 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57875 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
57876 +
57877 + return;
57878 +}
57879 +
57880 +static void
57881 +gr_log_learn_sysctl(const char *path, const __u32 mode)
57882 +{
57883 + struct task_struct *task = current;
57884 + const struct cred *cred = current_cred();
57885 +
57886 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57887 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57888 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57889 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
57890 +
57891 + return;
57892 +}
57893 +
57894 +static void
57895 +gr_log_learn_id_change(const char type, const unsigned int real,
57896 + const unsigned int effective, const unsigned int fs)
57897 +{
57898 + struct task_struct *task = current;
57899 + const struct cred *cred = current_cred();
57900 +
57901 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
57902 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57903 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57904 + type, real, effective, fs, &task->signal->saved_ip);
57905 +
57906 + return;
57907 +}
57908 +
57909 +__u32
57910 +gr_search_file(const struct dentry * dentry, const __u32 mode,
57911 + const struct vfsmount * mnt)
57912 +{
57913 + __u32 retval = mode;
57914 + struct acl_subject_label *curracl;
57915 + struct acl_object_label *currobj;
57916 +
57917 + if (unlikely(!(gr_status & GR_READY)))
57918 + return (mode & ~GR_AUDITS);
57919 +
57920 + curracl = current->acl;
57921 +
57922 + currobj = chk_obj_label(dentry, mnt, curracl);
57923 + retval = currobj->mode & mode;
57924 +
57925 + /* if we're opening a specified transfer file for writing
57926 + (e.g. /dev/initctl), then transfer our role to init
57927 + */
57928 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
57929 + current->role->roletype & GR_ROLE_PERSIST)) {
57930 + struct task_struct *task = init_pid_ns.child_reaper;
57931 +
57932 + if (task->role != current->role) {
57933 + task->acl_sp_role = 0;
57934 + task->acl_role_id = current->acl_role_id;
57935 + task->role = current->role;
57936 + rcu_read_lock();
57937 + read_lock(&grsec_exec_file_lock);
57938 + gr_apply_subject_to_task(task);
57939 + read_unlock(&grsec_exec_file_lock);
57940 + rcu_read_unlock();
57941 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
57942 + }
57943 + }
57944 +
57945 + if (unlikely
57946 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
57947 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
57948 + __u32 new_mode = mode;
57949 +
57950 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
57951 +
57952 + retval = new_mode;
57953 +
57954 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
57955 + new_mode |= GR_INHERIT;
57956 +
57957 + if (!(mode & GR_NOLEARN))
57958 + gr_log_learn(dentry, mnt, new_mode);
57959 + }
57960 +
57961 + return retval;
57962 +}
57963 +
57964 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
57965 + const struct dentry *parent,
57966 + const struct vfsmount *mnt)
57967 +{
57968 + struct name_entry *match;
57969 + struct acl_object_label *matchpo;
57970 + struct acl_subject_label *curracl;
57971 + char *path;
57972 +
57973 + if (unlikely(!(gr_status & GR_READY)))
57974 + return NULL;
57975 +
57976 + preempt_disable();
57977 + path = gr_to_filename_rbac(new_dentry, mnt);
57978 + match = lookup_name_entry_create(path);
57979 +
57980 + curracl = current->acl;
57981 +
57982 + if (match) {
57983 + read_lock(&gr_inode_lock);
57984 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
57985 + read_unlock(&gr_inode_lock);
57986 +
57987 + if (matchpo) {
57988 + preempt_enable();
57989 + return matchpo;
57990 + }
57991 + }
57992 +
57993 + // lookup parent
57994 +
57995 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
57996 +
57997 + preempt_enable();
57998 + return matchpo;
57999 +}
58000 +
58001 +__u32
58002 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58003 + const struct vfsmount * mnt, const __u32 mode)
58004 +{
58005 + struct acl_object_label *matchpo;
58006 + __u32 retval;
58007 +
58008 + if (unlikely(!(gr_status & GR_READY)))
58009 + return (mode & ~GR_AUDITS);
58010 +
58011 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
58012 +
58013 + retval = matchpo->mode & mode;
58014 +
58015 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58016 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58017 + __u32 new_mode = mode;
58018 +
58019 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58020 +
58021 + gr_log_learn(new_dentry, mnt, new_mode);
58022 + return new_mode;
58023 + }
58024 +
58025 + return retval;
58026 +}
58027 +
58028 +__u32
58029 +gr_check_link(const struct dentry * new_dentry,
58030 + const struct dentry * parent_dentry,
58031 + const struct vfsmount * parent_mnt,
58032 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58033 +{
58034 + struct acl_object_label *obj;
58035 + __u32 oldmode, newmode;
58036 + __u32 needmode;
58037 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58038 + GR_DELETE | GR_INHERIT;
58039 +
58040 + if (unlikely(!(gr_status & GR_READY)))
58041 + return (GR_CREATE | GR_LINK);
58042 +
58043 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58044 + oldmode = obj->mode;
58045 +
58046 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58047 + newmode = obj->mode;
58048 +
58049 + needmode = newmode & checkmodes;
58050 +
58051 + // old name for hardlink must have at least the permissions of the new name
58052 + if ((oldmode & needmode) != needmode)
58053 + goto bad;
58054 +
58055 + // if old name had restrictions/auditing, make sure the new name does as well
58056 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58057 +
58058 + // don't allow hardlinking of suid/sgid files without permission
58059 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58060 + needmode |= GR_SETID;
58061 +
58062 + if ((newmode & needmode) != needmode)
58063 + goto bad;
58064 +
58065 + // enforce minimum permissions
58066 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58067 + return newmode;
58068 +bad:
58069 + needmode = oldmode;
58070 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58071 + needmode |= GR_SETID;
58072 +
58073 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58074 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58075 + return (GR_CREATE | GR_LINK);
58076 + } else if (newmode & GR_SUPPRESS)
58077 + return GR_SUPPRESS;
58078 + else
58079 + return 0;
58080 +}
58081 +
58082 +int
58083 +gr_check_hidden_task(const struct task_struct *task)
58084 +{
58085 + if (unlikely(!(gr_status & GR_READY)))
58086 + return 0;
58087 +
58088 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58089 + return 1;
58090 +
58091 + return 0;
58092 +}
58093 +
58094 +int
58095 +gr_check_protected_task(const struct task_struct *task)
58096 +{
58097 + if (unlikely(!(gr_status & GR_READY) || !task))
58098 + return 0;
58099 +
58100 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58101 + task->acl != current->acl)
58102 + return 1;
58103 +
58104 + return 0;
58105 +}
58106 +
58107 +int
58108 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58109 +{
58110 + struct task_struct *p;
58111 + int ret = 0;
58112 +
58113 + if (unlikely(!(gr_status & GR_READY) || !pid))
58114 + return ret;
58115 +
58116 + read_lock(&tasklist_lock);
58117 + do_each_pid_task(pid, type, p) {
58118 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58119 + p->acl != current->acl) {
58120 + ret = 1;
58121 + goto out;
58122 + }
58123 + } while_each_pid_task(pid, type, p);
58124 +out:
58125 + read_unlock(&tasklist_lock);
58126 +
58127 + return ret;
58128 +}
58129 +
58130 +void
58131 +gr_copy_label(struct task_struct *tsk)
58132 +{
58133 + tsk->signal->used_accept = 0;
58134 + tsk->acl_sp_role = 0;
58135 + tsk->acl_role_id = current->acl_role_id;
58136 + tsk->acl = current->acl;
58137 + tsk->role = current->role;
58138 + tsk->signal->curr_ip = current->signal->curr_ip;
58139 + tsk->signal->saved_ip = current->signal->saved_ip;
58140 + if (current->exec_file)
58141 + get_file(current->exec_file);
58142 + tsk->exec_file = current->exec_file;
58143 + tsk->is_writable = current->is_writable;
58144 + if (unlikely(current->signal->used_accept)) {
58145 + current->signal->curr_ip = 0;
58146 + current->signal->saved_ip = 0;
58147 + }
58148 +
58149 + return;
58150 +}
58151 +
58152 +static void
58153 +gr_set_proc_res(struct task_struct *task)
58154 +{
58155 + struct acl_subject_label *proc;
58156 + unsigned short i;
58157 +
58158 + proc = task->acl;
58159 +
58160 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58161 + return;
58162 +
58163 + for (i = 0; i < RLIM_NLIMITS; i++) {
58164 + if (!(proc->resmask & (1 << i)))
58165 + continue;
58166 +
58167 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58168 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58169 + }
58170 +
58171 + return;
58172 +}
58173 +
58174 +extern int __gr_process_user_ban(struct user_struct *user);
58175 +
58176 +int
58177 +gr_check_user_change(int real, int effective, int fs)
58178 +{
58179 + unsigned int i;
58180 + __u16 num;
58181 + uid_t *uidlist;
58182 + int curuid;
58183 + int realok = 0;
58184 + int effectiveok = 0;
58185 + int fsok = 0;
58186 +
58187 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58188 + struct user_struct *user;
58189 +
58190 + if (real == -1)
58191 + goto skipit;
58192 +
58193 + user = find_user(real);
58194 + if (user == NULL)
58195 + goto skipit;
58196 +
58197 + if (__gr_process_user_ban(user)) {
58198 + /* for find_user */
58199 + free_uid(user);
58200 + return 1;
58201 + }
58202 +
58203 + /* for find_user */
58204 + free_uid(user);
58205 +
58206 +skipit:
58207 +#endif
58208 +
58209 + if (unlikely(!(gr_status & GR_READY)))
58210 + return 0;
58211 +
58212 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58213 + gr_log_learn_id_change('u', real, effective, fs);
58214 +
58215 + num = current->acl->user_trans_num;
58216 + uidlist = current->acl->user_transitions;
58217 +
58218 + if (uidlist == NULL)
58219 + return 0;
58220 +
58221 + if (real == -1)
58222 + realok = 1;
58223 + if (effective == -1)
58224 + effectiveok = 1;
58225 + if (fs == -1)
58226 + fsok = 1;
58227 +
58228 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
58229 + for (i = 0; i < num; i++) {
58230 + curuid = (int)uidlist[i];
58231 + if (real == curuid)
58232 + realok = 1;
58233 + if (effective == curuid)
58234 + effectiveok = 1;
58235 + if (fs == curuid)
58236 + fsok = 1;
58237 + }
58238 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
58239 + for (i = 0; i < num; i++) {
58240 + curuid = (int)uidlist[i];
58241 + if (real == curuid)
58242 + break;
58243 + if (effective == curuid)
58244 + break;
58245 + if (fs == curuid)
58246 + break;
58247 + }
58248 + /* not in deny list */
58249 + if (i == num) {
58250 + realok = 1;
58251 + effectiveok = 1;
58252 + fsok = 1;
58253 + }
58254 + }
58255 +
58256 + if (realok && effectiveok && fsok)
58257 + return 0;
58258 + else {
58259 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58260 + return 1;
58261 + }
58262 +}
58263 +
58264 +int
58265 +gr_check_group_change(int real, int effective, int fs)
58266 +{
58267 + unsigned int i;
58268 + __u16 num;
58269 + gid_t *gidlist;
58270 + int curgid;
58271 + int realok = 0;
58272 + int effectiveok = 0;
58273 + int fsok = 0;
58274 +
58275 + if (unlikely(!(gr_status & GR_READY)))
58276 + return 0;
58277 +
58278 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58279 + gr_log_learn_id_change('g', real, effective, fs);
58280 +
58281 + num = current->acl->group_trans_num;
58282 + gidlist = current->acl->group_transitions;
58283 +
58284 + if (gidlist == NULL)
58285 + return 0;
58286 +
58287 + if (real == -1)
58288 + realok = 1;
58289 + if (effective == -1)
58290 + effectiveok = 1;
58291 + if (fs == -1)
58292 + fsok = 1;
58293 +
58294 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
58295 + for (i = 0; i < num; i++) {
58296 + curgid = (int)gidlist[i];
58297 + if (real == curgid)
58298 + realok = 1;
58299 + if (effective == curgid)
58300 + effectiveok = 1;
58301 + if (fs == curgid)
58302 + fsok = 1;
58303 + }
58304 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
58305 + for (i = 0; i < num; i++) {
58306 + curgid = (int)gidlist[i];
58307 + if (real == curgid)
58308 + break;
58309 + if (effective == curgid)
58310 + break;
58311 + if (fs == curgid)
58312 + break;
58313 + }
58314 + /* not in deny list */
58315 + if (i == num) {
58316 + realok = 1;
58317 + effectiveok = 1;
58318 + fsok = 1;
58319 + }
58320 + }
58321 +
58322 + if (realok && effectiveok && fsok)
58323 + return 0;
58324 + else {
58325 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58326 + return 1;
58327 + }
58328 +}
58329 +
58330 +void
58331 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
58332 +{
58333 + struct acl_role_label *role = task->role;
58334 + struct acl_subject_label *subj = NULL;
58335 + struct acl_object_label *obj;
58336 + struct file *filp;
58337 +
58338 + if (unlikely(!(gr_status & GR_READY)))
58339 + return;
58340 +
58341 + filp = task->exec_file;
58342 +
58343 + /* kernel process, we'll give them the kernel role */
58344 + if (unlikely(!filp)) {
58345 + task->role = kernel_role;
58346 + task->acl = kernel_role->root_label;
58347 + return;
58348 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
58349 + role = lookup_acl_role_label(task, uid, gid);
58350 +
58351 + /* perform subject lookup in possibly new role
58352 + we can use this result below in the case where role == task->role
58353 + */
58354 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
58355 +
58356 + /* if we changed uid/gid, but result in the same role
58357 + and are using inheritance, don't lose the inherited subject
58358 + if current subject is other than what normal lookup
58359 + would result in, we arrived via inheritance, don't
58360 + lose subject
58361 + */
58362 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
58363 + (subj == task->acl)))
58364 + task->acl = subj;
58365 +
58366 + task->role = role;
58367 +
58368 + task->is_writable = 0;
58369 +
58370 + /* ignore additional mmap checks for processes that are writable
58371 + by the default ACL */
58372 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58373 + if (unlikely(obj->mode & GR_WRITE))
58374 + task->is_writable = 1;
58375 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58376 + if (unlikely(obj->mode & GR_WRITE))
58377 + task->is_writable = 1;
58378 +
58379 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58380 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58381 +#endif
58382 +
58383 + gr_set_proc_res(task);
58384 +
58385 + return;
58386 +}
58387 +
58388 +int
58389 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
58390 + const int unsafe_share)
58391 +{
58392 + struct task_struct *task = current;
58393 + struct acl_subject_label *newacl;
58394 + struct acl_object_label *obj;
58395 + __u32 retmode;
58396 +
58397 + if (unlikely(!(gr_status & GR_READY)))
58398 + return 0;
58399 +
58400 + newacl = chk_subj_label(dentry, mnt, task->role);
58401 +
58402 + task_lock(task);
58403 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
58404 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58405 + !(task->role->roletype & GR_ROLE_GOD) &&
58406 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
58407 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
58408 + task_unlock(task);
58409 + if (unsafe_share)
58410 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
58411 + else
58412 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58413 + return -EACCES;
58414 + }
58415 + task_unlock(task);
58416 +
58417 + obj = chk_obj_label(dentry, mnt, task->acl);
58418 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
58419 +
58420 + if (!(task->acl->mode & GR_INHERITLEARN) &&
58421 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
58422 + if (obj->nested)
58423 + task->acl = obj->nested;
58424 + else
58425 + task->acl = newacl;
58426 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
58427 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
58428 +
58429 + task->is_writable = 0;
58430 +
58431 + /* ignore additional mmap checks for processes that are writable
58432 + by the default ACL */
58433 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
58434 + if (unlikely(obj->mode & GR_WRITE))
58435 + task->is_writable = 1;
58436 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
58437 + if (unlikely(obj->mode & GR_WRITE))
58438 + task->is_writable = 1;
58439 +
58440 + gr_set_proc_res(task);
58441 +
58442 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58443 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58444 +#endif
58445 + return 0;
58446 +}
58447 +
58448 +/* always called with valid inodev ptr */
58449 +static void
58450 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
58451 +{
58452 + struct acl_object_label *matchpo;
58453 + struct acl_subject_label *matchps;
58454 + struct acl_subject_label *subj;
58455 + struct acl_role_label *role;
58456 + unsigned int x;
58457 +
58458 + FOR_EACH_ROLE_START(role)
58459 + FOR_EACH_SUBJECT_START(role, subj, x)
58460 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
58461 + matchpo->mode |= GR_DELETED;
58462 + FOR_EACH_SUBJECT_END(subj,x)
58463 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
58464 + if (subj->inode == ino && subj->device == dev)
58465 + subj->mode |= GR_DELETED;
58466 + FOR_EACH_NESTED_SUBJECT_END(subj)
58467 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
58468 + matchps->mode |= GR_DELETED;
58469 + FOR_EACH_ROLE_END(role)
58470 +
58471 + inodev->nentry->deleted = 1;
58472 +
58473 + return;
58474 +}
58475 +
58476 +void
58477 +gr_handle_delete(const ino_t ino, const dev_t dev)
58478 +{
58479 + struct inodev_entry *inodev;
58480 +
58481 + if (unlikely(!(gr_status & GR_READY)))
58482 + return;
58483 +
58484 + write_lock(&gr_inode_lock);
58485 + inodev = lookup_inodev_entry(ino, dev);
58486 + if (inodev != NULL)
58487 + do_handle_delete(inodev, ino, dev);
58488 + write_unlock(&gr_inode_lock);
58489 +
58490 + return;
58491 +}
58492 +
58493 +static void
58494 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
58495 + const ino_t newinode, const dev_t newdevice,
58496 + struct acl_subject_label *subj)
58497 +{
58498 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
58499 + struct acl_object_label *match;
58500 +
58501 + match = subj->obj_hash[index];
58502 +
58503 + while (match && (match->inode != oldinode ||
58504 + match->device != olddevice ||
58505 + !(match->mode & GR_DELETED)))
58506 + match = match->next;
58507 +
58508 + if (match && (match->inode == oldinode)
58509 + && (match->device == olddevice)
58510 + && (match->mode & GR_DELETED)) {
58511 + if (match->prev == NULL) {
58512 + subj->obj_hash[index] = match->next;
58513 + if (match->next != NULL)
58514 + match->next->prev = NULL;
58515 + } else {
58516 + match->prev->next = match->next;
58517 + if (match->next != NULL)
58518 + match->next->prev = match->prev;
58519 + }
58520 + match->prev = NULL;
58521 + match->next = NULL;
58522 + match->inode = newinode;
58523 + match->device = newdevice;
58524 + match->mode &= ~GR_DELETED;
58525 +
58526 + insert_acl_obj_label(match, subj);
58527 + }
58528 +
58529 + return;
58530 +}
58531 +
58532 +static void
58533 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
58534 + const ino_t newinode, const dev_t newdevice,
58535 + struct acl_role_label *role)
58536 +{
58537 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
58538 + struct acl_subject_label *match;
58539 +
58540 + match = role->subj_hash[index];
58541 +
58542 + while (match && (match->inode != oldinode ||
58543 + match->device != olddevice ||
58544 + !(match->mode & GR_DELETED)))
58545 + match = match->next;
58546 +
58547 + if (match && (match->inode == oldinode)
58548 + && (match->device == olddevice)
58549 + && (match->mode & GR_DELETED)) {
58550 + if (match->prev == NULL) {
58551 + role->subj_hash[index] = match->next;
58552 + if (match->next != NULL)
58553 + match->next->prev = NULL;
58554 + } else {
58555 + match->prev->next = match->next;
58556 + if (match->next != NULL)
58557 + match->next->prev = match->prev;
58558 + }
58559 + match->prev = NULL;
58560 + match->next = NULL;
58561 + match->inode = newinode;
58562 + match->device = newdevice;
58563 + match->mode &= ~GR_DELETED;
58564 +
58565 + insert_acl_subj_label(match, role);
58566 + }
58567 +
58568 + return;
58569 +}
58570 +
58571 +static void
58572 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
58573 + const ino_t newinode, const dev_t newdevice)
58574 +{
58575 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
58576 + struct inodev_entry *match;
58577 +
58578 + match = inodev_set.i_hash[index];
58579 +
58580 + while (match && (match->nentry->inode != oldinode ||
58581 + match->nentry->device != olddevice || !match->nentry->deleted))
58582 + match = match->next;
58583 +
58584 + if (match && (match->nentry->inode == oldinode)
58585 + && (match->nentry->device == olddevice) &&
58586 + match->nentry->deleted) {
58587 + if (match->prev == NULL) {
58588 + inodev_set.i_hash[index] = match->next;
58589 + if (match->next != NULL)
58590 + match->next->prev = NULL;
58591 + } else {
58592 + match->prev->next = match->next;
58593 + if (match->next != NULL)
58594 + match->next->prev = match->prev;
58595 + }
58596 + match->prev = NULL;
58597 + match->next = NULL;
58598 + match->nentry->inode = newinode;
58599 + match->nentry->device = newdevice;
58600 + match->nentry->deleted = 0;
58601 +
58602 + insert_inodev_entry(match);
58603 + }
58604 +
58605 + return;
58606 +}
58607 +
58608 +static void
58609 +__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
58610 +{
58611 + struct acl_subject_label *subj;
58612 + struct acl_role_label *role;
58613 + unsigned int x;
58614 +
58615 + FOR_EACH_ROLE_START(role)
58616 + update_acl_subj_label(matchn->inode, matchn->device,
58617 + inode, dev, role);
58618 +
58619 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
58620 + if ((subj->inode == inode) && (subj->device == dev)) {
58621 + subj->inode = inode;
58622 + subj->device = dev;
58623 + }
58624 + FOR_EACH_NESTED_SUBJECT_END(subj)
58625 + FOR_EACH_SUBJECT_START(role, subj, x)
58626 + update_acl_obj_label(matchn->inode, matchn->device,
58627 + inode, dev, subj);
58628 + FOR_EACH_SUBJECT_END(subj,x)
58629 + FOR_EACH_ROLE_END(role)
58630 +
58631 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
58632 +
58633 + return;
58634 +}
58635 +
58636 +static void
58637 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
58638 + const struct vfsmount *mnt)
58639 +{
58640 + ino_t ino = dentry->d_inode->i_ino;
58641 + dev_t dev = __get_dev(dentry);
58642 +
58643 + __do_handle_create(matchn, ino, dev);
58644 +
58645 + return;
58646 +}
58647 +
58648 +void
58649 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
58650 +{
58651 + struct name_entry *matchn;
58652 +
58653 + if (unlikely(!(gr_status & GR_READY)))
58654 + return;
58655 +
58656 + preempt_disable();
58657 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
58658 +
58659 + if (unlikely((unsigned long)matchn)) {
58660 + write_lock(&gr_inode_lock);
58661 + do_handle_create(matchn, dentry, mnt);
58662 + write_unlock(&gr_inode_lock);
58663 + }
58664 + preempt_enable();
58665 +
58666 + return;
58667 +}
58668 +
58669 +void
58670 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
58671 +{
58672 + struct name_entry *matchn;
58673 +
58674 + if (unlikely(!(gr_status & GR_READY)))
58675 + return;
58676 +
58677 + preempt_disable();
58678 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
58679 +
58680 + if (unlikely((unsigned long)matchn)) {
58681 + write_lock(&gr_inode_lock);
58682 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
58683 + write_unlock(&gr_inode_lock);
58684 + }
58685 + preempt_enable();
58686 +
58687 + return;
58688 +}
58689 +
58690 +void
58691 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58692 + struct dentry *old_dentry,
58693 + struct dentry *new_dentry,
58694 + struct vfsmount *mnt, const __u8 replace)
58695 +{
58696 + struct name_entry *matchn;
58697 + struct inodev_entry *inodev;
58698 + struct inode *inode = new_dentry->d_inode;
58699 + ino_t oldinode = old_dentry->d_inode->i_ino;
58700 + dev_t olddev = __get_dev(old_dentry);
58701 +
58702 + /* vfs_rename swaps the name and parent link for old_dentry and
58703 + new_dentry
58704 + at this point, old_dentry has the new name, parent link, and inode
58705 + for the renamed file
58706 + if a file is being replaced by a rename, new_dentry has the inode
58707 + and name for the replaced file
58708 + */
58709 +
58710 + if (unlikely(!(gr_status & GR_READY)))
58711 + return;
58712 +
58713 + preempt_disable();
58714 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
58715 +
58716 + /* we wouldn't have to check d_inode if it weren't for
58717 + NFS silly-renaming
58718 + */
58719 +
58720 + write_lock(&gr_inode_lock);
58721 + if (unlikely(replace && inode)) {
58722 + ino_t newinode = inode->i_ino;
58723 + dev_t newdev = __get_dev(new_dentry);
58724 + inodev = lookup_inodev_entry(newinode, newdev);
58725 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
58726 + do_handle_delete(inodev, newinode, newdev);
58727 + }
58728 +
58729 + inodev = lookup_inodev_entry(oldinode, olddev);
58730 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
58731 + do_handle_delete(inodev, oldinode, olddev);
58732 +
58733 + if (unlikely((unsigned long)matchn))
58734 + do_handle_create(matchn, old_dentry, mnt);
58735 +
58736 + write_unlock(&gr_inode_lock);
58737 + preempt_enable();
58738 +
58739 + return;
58740 +}
58741 +
58742 +static int
58743 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
58744 + unsigned char **sum)
58745 +{
58746 + struct acl_role_label *r;
58747 + struct role_allowed_ip *ipp;
58748 + struct role_transition *trans;
58749 + unsigned int i;
58750 + int found = 0;
58751 + u32 curr_ip = current->signal->curr_ip;
58752 +
58753 + current->signal->saved_ip = curr_ip;
58754 +
58755 + /* check transition table */
58756 +
58757 + for (trans = current->role->transitions; trans; trans = trans->next) {
58758 + if (!strcmp(rolename, trans->rolename)) {
58759 + found = 1;
58760 + break;
58761 + }
58762 + }
58763 +
58764 + if (!found)
58765 + return 0;
58766 +
58767 + /* handle special roles that do not require authentication
58768 + and check ip */
58769 +
58770 + FOR_EACH_ROLE_START(r)
58771 + if (!strcmp(rolename, r->rolename) &&
58772 + (r->roletype & GR_ROLE_SPECIAL)) {
58773 + found = 0;
58774 + if (r->allowed_ips != NULL) {
58775 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
58776 + if ((ntohl(curr_ip) & ipp->netmask) ==
58777 + (ntohl(ipp->addr) & ipp->netmask))
58778 + found = 1;
58779 + }
58780 + } else
58781 + found = 2;
58782 + if (!found)
58783 + return 0;
58784 +
58785 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
58786 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
58787 + *salt = NULL;
58788 + *sum = NULL;
58789 + return 1;
58790 + }
58791 + }
58792 + FOR_EACH_ROLE_END(r)
58793 +
58794 + for (i = 0; i < num_sprole_pws; i++) {
58795 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
58796 + *salt = acl_special_roles[i]->salt;
58797 + *sum = acl_special_roles[i]->sum;
58798 + return 1;
58799 + }
58800 + }
58801 +
58802 + return 0;
58803 +}
58804 +
58805 +static void
58806 +assign_special_role(char *rolename)
58807 +{
58808 + struct acl_object_label *obj;
58809 + struct acl_role_label *r;
58810 + struct acl_role_label *assigned = NULL;
58811 + struct task_struct *tsk;
58812 + struct file *filp;
58813 +
58814 + FOR_EACH_ROLE_START(r)
58815 + if (!strcmp(rolename, r->rolename) &&
58816 + (r->roletype & GR_ROLE_SPECIAL)) {
58817 + assigned = r;
58818 + break;
58819 + }
58820 + FOR_EACH_ROLE_END(r)
58821 +
58822 + if (!assigned)
58823 + return;
58824 +
58825 + read_lock(&tasklist_lock);
58826 + read_lock(&grsec_exec_file_lock);
58827 +
58828 + tsk = current->real_parent;
58829 + if (tsk == NULL)
58830 + goto out_unlock;
58831 +
58832 + filp = tsk->exec_file;
58833 + if (filp == NULL)
58834 + goto out_unlock;
58835 +
58836 + tsk->is_writable = 0;
58837 +
58838 + tsk->acl_sp_role = 1;
58839 + tsk->acl_role_id = ++acl_sp_role_value;
58840 + tsk->role = assigned;
58841 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
58842 +
58843 + /* ignore additional mmap checks for processes that are writable
58844 + by the default ACL */
58845 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58846 + if (unlikely(obj->mode & GR_WRITE))
58847 + tsk->is_writable = 1;
58848 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
58849 + if (unlikely(obj->mode & GR_WRITE))
58850 + tsk->is_writable = 1;
58851 +
58852 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58853 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
58854 +#endif
58855 +
58856 +out_unlock:
58857 + read_unlock(&grsec_exec_file_lock);
58858 + read_unlock(&tasklist_lock);
58859 + return;
58860 +}
58861 +
58862 +int gr_check_secure_terminal(struct task_struct *task)
58863 +{
58864 + struct task_struct *p, *p2, *p3;
58865 + struct files_struct *files;
58866 + struct fdtable *fdt;
58867 + struct file *our_file = NULL, *file;
58868 + int i;
58869 +
58870 + if (task->signal->tty == NULL)
58871 + return 1;
58872 +
58873 + files = get_files_struct(task);
58874 + if (files != NULL) {
58875 + rcu_read_lock();
58876 + fdt = files_fdtable(files);
58877 + for (i=0; i < fdt->max_fds; i++) {
58878 + file = fcheck_files(files, i);
58879 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
58880 + get_file(file);
58881 + our_file = file;
58882 + }
58883 + }
58884 + rcu_read_unlock();
58885 + put_files_struct(files);
58886 + }
58887 +
58888 + if (our_file == NULL)
58889 + return 1;
58890 +
58891 + read_lock(&tasklist_lock);
58892 + do_each_thread(p2, p) {
58893 + files = get_files_struct(p);
58894 + if (files == NULL ||
58895 + (p->signal && p->signal->tty == task->signal->tty)) {
58896 + if (files != NULL)
58897 + put_files_struct(files);
58898 + continue;
58899 + }
58900 + rcu_read_lock();
58901 + fdt = files_fdtable(files);
58902 + for (i=0; i < fdt->max_fds; i++) {
58903 + file = fcheck_files(files, i);
58904 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
58905 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
58906 + p3 = task;
58907 + while (p3->pid > 0) {
58908 + if (p3 == p)
58909 + break;
58910 + p3 = p3->real_parent;
58911 + }
58912 + if (p3 == p)
58913 + break;
58914 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
58915 + gr_handle_alertkill(p);
58916 + rcu_read_unlock();
58917 + put_files_struct(files);
58918 + read_unlock(&tasklist_lock);
58919 + fput(our_file);
58920 + return 0;
58921 + }
58922 + }
58923 + rcu_read_unlock();
58924 + put_files_struct(files);
58925 + } while_each_thread(p2, p);
58926 + read_unlock(&tasklist_lock);
58927 +
58928 + fput(our_file);
58929 + return 1;
58930 +}
58931 +
58932 +ssize_t
58933 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
58934 +{
58935 + struct gr_arg_wrapper uwrap;
58936 + unsigned char *sprole_salt = NULL;
58937 + unsigned char *sprole_sum = NULL;
58938 + int error = sizeof (struct gr_arg_wrapper);
58939 + int error2 = 0;
58940 +
58941 + mutex_lock(&gr_dev_mutex);
58942 +
58943 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
58944 + error = -EPERM;
58945 + goto out;
58946 + }
58947 +
58948 + if (count != sizeof (struct gr_arg_wrapper)) {
58949 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
58950 + error = -EINVAL;
58951 + goto out;
58952 + }
58953 +
58954 +
58955 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
58956 + gr_auth_expires = 0;
58957 + gr_auth_attempts = 0;
58958 + }
58959 +
58960 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
58961 + error = -EFAULT;
58962 + goto out;
58963 + }
58964 +
58965 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
58966 + error = -EINVAL;
58967 + goto out;
58968 + }
58969 +
58970 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
58971 + error = -EFAULT;
58972 + goto out;
58973 + }
58974 +
58975 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58976 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
58977 + time_after(gr_auth_expires, get_seconds())) {
58978 + error = -EBUSY;
58979 + goto out;
58980 + }
58981 +
58982 + /* if non-root trying to do anything other than use a special role,
58983 + do not attempt authentication, do not count towards authentication
58984 + locking
58985 + */
58986 +
58987 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
58988 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58989 + current_uid()) {
58990 + error = -EPERM;
58991 + goto out;
58992 + }
58993 +
58994 + /* ensure pw and special role name are null terminated */
58995 +
58996 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
58997 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
58998 +
58999 + /* Okay.
59000 + * We have our enough of the argument structure..(we have yet
59001 + * to copy_from_user the tables themselves) . Copy the tables
59002 + * only if we need them, i.e. for loading operations. */
59003 +
59004 + switch (gr_usermode->mode) {
59005 + case GR_STATUS:
59006 + if (gr_status & GR_READY) {
59007 + error = 1;
59008 + if (!gr_check_secure_terminal(current))
59009 + error = 3;
59010 + } else
59011 + error = 2;
59012 + goto out;
59013 + case GR_SHUTDOWN:
59014 + if ((gr_status & GR_READY)
59015 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59016 + pax_open_kernel();
59017 + gr_status &= ~GR_READY;
59018 + pax_close_kernel();
59019 +
59020 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59021 + free_variables();
59022 + memset(gr_usermode, 0, sizeof (struct gr_arg));
59023 + memset(gr_system_salt, 0, GR_SALT_LEN);
59024 + memset(gr_system_sum, 0, GR_SHA_LEN);
59025 + } else if (gr_status & GR_READY) {
59026 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59027 + error = -EPERM;
59028 + } else {
59029 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59030 + error = -EAGAIN;
59031 + }
59032 + break;
59033 + case GR_ENABLE:
59034 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59035 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59036 + else {
59037 + if (gr_status & GR_READY)
59038 + error = -EAGAIN;
59039 + else
59040 + error = error2;
59041 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59042 + }
59043 + break;
59044 + case GR_RELOAD:
59045 + if (!(gr_status & GR_READY)) {
59046 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59047 + error = -EAGAIN;
59048 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59049 + lock_kernel();
59050 +
59051 + pax_open_kernel();
59052 + gr_status &= ~GR_READY;
59053 + pax_close_kernel();
59054 +
59055 + free_variables();
59056 + if (!(error2 = gracl_init(gr_usermode))) {
59057 + unlock_kernel();
59058 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59059 + } else {
59060 + unlock_kernel();
59061 + error = error2;
59062 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59063 + }
59064 + } else {
59065 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59066 + error = -EPERM;
59067 + }
59068 + break;
59069 + case GR_SEGVMOD:
59070 + if (unlikely(!(gr_status & GR_READY))) {
59071 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59072 + error = -EAGAIN;
59073 + break;
59074 + }
59075 +
59076 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59077 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59078 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59079 + struct acl_subject_label *segvacl;
59080 + segvacl =
59081 + lookup_acl_subj_label(gr_usermode->segv_inode,
59082 + gr_usermode->segv_device,
59083 + current->role);
59084 + if (segvacl) {
59085 + segvacl->crashes = 0;
59086 + segvacl->expires = 0;
59087 + }
59088 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59089 + gr_remove_uid(gr_usermode->segv_uid);
59090 + }
59091 + } else {
59092 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59093 + error = -EPERM;
59094 + }
59095 + break;
59096 + case GR_SPROLE:
59097 + case GR_SPROLEPAM:
59098 + if (unlikely(!(gr_status & GR_READY))) {
59099 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59100 + error = -EAGAIN;
59101 + break;
59102 + }
59103 +
59104 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59105 + current->role->expires = 0;
59106 + current->role->auth_attempts = 0;
59107 + }
59108 +
59109 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59110 + time_after(current->role->expires, get_seconds())) {
59111 + error = -EBUSY;
59112 + goto out;
59113 + }
59114 +
59115 + if (lookup_special_role_auth
59116 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59117 + && ((!sprole_salt && !sprole_sum)
59118 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59119 + char *p = "";
59120 + assign_special_role(gr_usermode->sp_role);
59121 + read_lock(&tasklist_lock);
59122 + if (current->real_parent)
59123 + p = current->real_parent->role->rolename;
59124 + read_unlock(&tasklist_lock);
59125 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59126 + p, acl_sp_role_value);
59127 + } else {
59128 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59129 + error = -EPERM;
59130 + if(!(current->role->auth_attempts++))
59131 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59132 +
59133 + goto out;
59134 + }
59135 + break;
59136 + case GR_UNSPROLE:
59137 + if (unlikely(!(gr_status & GR_READY))) {
59138 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59139 + error = -EAGAIN;
59140 + break;
59141 + }
59142 +
59143 + if (current->role->roletype & GR_ROLE_SPECIAL) {
59144 + char *p = "";
59145 + int i = 0;
59146 +
59147 + read_lock(&tasklist_lock);
59148 + if (current->real_parent) {
59149 + p = current->real_parent->role->rolename;
59150 + i = current->real_parent->acl_role_id;
59151 + }
59152 + read_unlock(&tasklist_lock);
59153 +
59154 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59155 + gr_set_acls(1);
59156 + } else {
59157 + error = -EPERM;
59158 + goto out;
59159 + }
59160 + break;
59161 + default:
59162 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59163 + error = -EINVAL;
59164 + break;
59165 + }
59166 +
59167 + if (error != -EPERM)
59168 + goto out;
59169 +
59170 + if(!(gr_auth_attempts++))
59171 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59172 +
59173 + out:
59174 + mutex_unlock(&gr_dev_mutex);
59175 + return error;
59176 +}
59177 +
59178 +/* must be called with
59179 + rcu_read_lock();
59180 + read_lock(&tasklist_lock);
59181 + read_lock(&grsec_exec_file_lock);
59182 +*/
59183 +int gr_apply_subject_to_task(struct task_struct *task)
59184 +{
59185 + struct acl_object_label *obj;
59186 + char *tmpname;
59187 + struct acl_subject_label *tmpsubj;
59188 + struct file *filp;
59189 + struct name_entry *nmatch;
59190 +
59191 + filp = task->exec_file;
59192 + if (filp == NULL)
59193 + return 0;
59194 +
59195 + /* the following is to apply the correct subject
59196 + on binaries running when the RBAC system
59197 + is enabled, when the binaries have been
59198 + replaced or deleted since their execution
59199 + -----
59200 + when the RBAC system starts, the inode/dev
59201 + from exec_file will be one the RBAC system
59202 + is unaware of. It only knows the inode/dev
59203 + of the present file on disk, or the absence
59204 + of it.
59205 + */
59206 + preempt_disable();
59207 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
59208 +
59209 + nmatch = lookup_name_entry(tmpname);
59210 + preempt_enable();
59211 + tmpsubj = NULL;
59212 + if (nmatch) {
59213 + if (nmatch->deleted)
59214 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
59215 + else
59216 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
59217 + if (tmpsubj != NULL)
59218 + task->acl = tmpsubj;
59219 + }
59220 + if (tmpsubj == NULL)
59221 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
59222 + task->role);
59223 + if (task->acl) {
59224 + task->is_writable = 0;
59225 + /* ignore additional mmap checks for processes that are writable
59226 + by the default ACL */
59227 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59228 + if (unlikely(obj->mode & GR_WRITE))
59229 + task->is_writable = 1;
59230 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59231 + if (unlikely(obj->mode & GR_WRITE))
59232 + task->is_writable = 1;
59233 +
59234 + gr_set_proc_res(task);
59235 +
59236 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59237 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59238 +#endif
59239 + } else {
59240 + return 1;
59241 + }
59242 +
59243 + return 0;
59244 +}
59245 +
59246 +int
59247 +gr_set_acls(const int type)
59248 +{
59249 + struct task_struct *task, *task2;
59250 + struct acl_role_label *role = current->role;
59251 + __u16 acl_role_id = current->acl_role_id;
59252 + const struct cred *cred;
59253 + int ret;
59254 +
59255 + rcu_read_lock();
59256 + read_lock(&tasklist_lock);
59257 + read_lock(&grsec_exec_file_lock);
59258 + do_each_thread(task2, task) {
59259 + /* check to see if we're called from the exit handler,
59260 + if so, only replace ACLs that have inherited the admin
59261 + ACL */
59262 +
59263 + if (type && (task->role != role ||
59264 + task->acl_role_id != acl_role_id))
59265 + continue;
59266 +
59267 + task->acl_role_id = 0;
59268 + task->acl_sp_role = 0;
59269 +
59270 + if (task->exec_file) {
59271 + cred = __task_cred(task);
59272 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
59273 +
59274 + ret = gr_apply_subject_to_task(task);
59275 + if (ret) {
59276 + read_unlock(&grsec_exec_file_lock);
59277 + read_unlock(&tasklist_lock);
59278 + rcu_read_unlock();
59279 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
59280 + return ret;
59281 + }
59282 + } else {
59283 + // it's a kernel process
59284 + task->role = kernel_role;
59285 + task->acl = kernel_role->root_label;
59286 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
59287 + task->acl->mode &= ~GR_PROCFIND;
59288 +#endif
59289 + }
59290 + } while_each_thread(task2, task);
59291 + read_unlock(&grsec_exec_file_lock);
59292 + read_unlock(&tasklist_lock);
59293 + rcu_read_unlock();
59294 +
59295 + return 0;
59296 +}
59297 +
59298 +void
59299 +gr_learn_resource(const struct task_struct *task,
59300 + const int res, const unsigned long wanted, const int gt)
59301 +{
59302 + struct acl_subject_label *acl;
59303 + const struct cred *cred;
59304 +
59305 + if (unlikely((gr_status & GR_READY) &&
59306 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
59307 + goto skip_reslog;
59308 +
59309 +#ifdef CONFIG_GRKERNSEC_RESLOG
59310 + gr_log_resource(task, res, wanted, gt);
59311 +#endif
59312 + skip_reslog:
59313 +
59314 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
59315 + return;
59316 +
59317 + acl = task->acl;
59318 +
59319 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
59320 + !(acl->resmask & (1 << (unsigned short) res))))
59321 + return;
59322 +
59323 + if (wanted >= acl->res[res].rlim_cur) {
59324 + unsigned long res_add;
59325 +
59326 + res_add = wanted;
59327 + switch (res) {
59328 + case RLIMIT_CPU:
59329 + res_add += GR_RLIM_CPU_BUMP;
59330 + break;
59331 + case RLIMIT_FSIZE:
59332 + res_add += GR_RLIM_FSIZE_BUMP;
59333 + break;
59334 + case RLIMIT_DATA:
59335 + res_add += GR_RLIM_DATA_BUMP;
59336 + break;
59337 + case RLIMIT_STACK:
59338 + res_add += GR_RLIM_STACK_BUMP;
59339 + break;
59340 + case RLIMIT_CORE:
59341 + res_add += GR_RLIM_CORE_BUMP;
59342 + break;
59343 + case RLIMIT_RSS:
59344 + res_add += GR_RLIM_RSS_BUMP;
59345 + break;
59346 + case RLIMIT_NPROC:
59347 + res_add += GR_RLIM_NPROC_BUMP;
59348 + break;
59349 + case RLIMIT_NOFILE:
59350 + res_add += GR_RLIM_NOFILE_BUMP;
59351 + break;
59352 + case RLIMIT_MEMLOCK:
59353 + res_add += GR_RLIM_MEMLOCK_BUMP;
59354 + break;
59355 + case RLIMIT_AS:
59356 + res_add += GR_RLIM_AS_BUMP;
59357 + break;
59358 + case RLIMIT_LOCKS:
59359 + res_add += GR_RLIM_LOCKS_BUMP;
59360 + break;
59361 + case RLIMIT_SIGPENDING:
59362 + res_add += GR_RLIM_SIGPENDING_BUMP;
59363 + break;
59364 + case RLIMIT_MSGQUEUE:
59365 + res_add += GR_RLIM_MSGQUEUE_BUMP;
59366 + break;
59367 + case RLIMIT_NICE:
59368 + res_add += GR_RLIM_NICE_BUMP;
59369 + break;
59370 + case RLIMIT_RTPRIO:
59371 + res_add += GR_RLIM_RTPRIO_BUMP;
59372 + break;
59373 + case RLIMIT_RTTIME:
59374 + res_add += GR_RLIM_RTTIME_BUMP;
59375 + break;
59376 + }
59377 +
59378 + acl->res[res].rlim_cur = res_add;
59379 +
59380 + if (wanted > acl->res[res].rlim_max)
59381 + acl->res[res].rlim_max = res_add;
59382 +
59383 + /* only log the subject filename, since resource logging is supported for
59384 + single-subject learning only */
59385 + rcu_read_lock();
59386 + cred = __task_cred(task);
59387 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59388 + task->role->roletype, cred->uid, cred->gid, acl->filename,
59389 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
59390 + "", (unsigned long) res, &task->signal->saved_ip);
59391 + rcu_read_unlock();
59392 + }
59393 +
59394 + return;
59395 +}
59396 +
59397 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
59398 +void
59399 +pax_set_initial_flags(struct linux_binprm *bprm)
59400 +{
59401 + struct task_struct *task = current;
59402 + struct acl_subject_label *proc;
59403 + unsigned long flags;
59404 +
59405 + if (unlikely(!(gr_status & GR_READY)))
59406 + return;
59407 +
59408 + flags = pax_get_flags(task);
59409 +
59410 + proc = task->acl;
59411 +
59412 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
59413 + flags &= ~MF_PAX_PAGEEXEC;
59414 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
59415 + flags &= ~MF_PAX_SEGMEXEC;
59416 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
59417 + flags &= ~MF_PAX_RANDMMAP;
59418 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
59419 + flags &= ~MF_PAX_EMUTRAMP;
59420 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
59421 + flags &= ~MF_PAX_MPROTECT;
59422 +
59423 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
59424 + flags |= MF_PAX_PAGEEXEC;
59425 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
59426 + flags |= MF_PAX_SEGMEXEC;
59427 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
59428 + flags |= MF_PAX_RANDMMAP;
59429 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
59430 + flags |= MF_PAX_EMUTRAMP;
59431 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
59432 + flags |= MF_PAX_MPROTECT;
59433 +
59434 + pax_set_flags(task, flags);
59435 +
59436 + return;
59437 +}
59438 +#endif
59439 +
59440 +#ifdef CONFIG_SYSCTL
59441 +/* Eric Biederman likes breaking userland ABI and every inode-based security
59442 + system to save 35kb of memory */
59443 +
59444 +/* we modify the passed in filename, but adjust it back before returning */
59445 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
59446 +{
59447 + struct name_entry *nmatch;
59448 + char *p, *lastp = NULL;
59449 + struct acl_object_label *obj = NULL, *tmp;
59450 + struct acl_subject_label *tmpsubj;
59451 + char c = '\0';
59452 +
59453 + read_lock(&gr_inode_lock);
59454 +
59455 + p = name + len - 1;
59456 + do {
59457 + nmatch = lookup_name_entry(name);
59458 + if (lastp != NULL)
59459 + *lastp = c;
59460 +
59461 + if (nmatch == NULL)
59462 + goto next_component;
59463 + tmpsubj = current->acl;
59464 + do {
59465 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
59466 + if (obj != NULL) {
59467 + tmp = obj->globbed;
59468 + while (tmp) {
59469 + if (!glob_match(tmp->filename, name)) {
59470 + obj = tmp;
59471 + goto found_obj;
59472 + }
59473 + tmp = tmp->next;
59474 + }
59475 + goto found_obj;
59476 + }
59477 + } while ((tmpsubj = tmpsubj->parent_subject));
59478 +next_component:
59479 + /* end case */
59480 + if (p == name)
59481 + break;
59482 +
59483 + while (*p != '/')
59484 + p--;
59485 + if (p == name)
59486 + lastp = p + 1;
59487 + else {
59488 + lastp = p;
59489 + p--;
59490 + }
59491 + c = *lastp;
59492 + *lastp = '\0';
59493 + } while (1);
59494 +found_obj:
59495 + read_unlock(&gr_inode_lock);
59496 + /* obj returned will always be non-null */
59497 + return obj;
59498 +}
59499 +
59500 +/* returns 0 when allowing, non-zero on error
59501 + op of 0 is used for readdir, so we don't log the names of hidden files
59502 +*/
59503 +__u32
59504 +gr_handle_sysctl(const struct ctl_table *table, const int op)
59505 +{
59506 + ctl_table *tmp;
59507 + const char *proc_sys = "/proc/sys";
59508 + char *path;
59509 + struct acl_object_label *obj;
59510 + unsigned short len = 0, pos = 0, depth = 0, i;
59511 + __u32 err = 0;
59512 + __u32 mode = 0;
59513 +
59514 + if (unlikely(!(gr_status & GR_READY)))
59515 + return 0;
59516 +
59517 + /* for now, ignore operations on non-sysctl entries if it's not a
59518 + readdir*/
59519 + if (table->child != NULL && op != 0)
59520 + return 0;
59521 +
59522 + mode |= GR_FIND;
59523 + /* it's only a read if it's an entry, read on dirs is for readdir */
59524 + if (op & MAY_READ)
59525 + mode |= GR_READ;
59526 + if (op & MAY_WRITE)
59527 + mode |= GR_WRITE;
59528 +
59529 + preempt_disable();
59530 +
59531 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
59532 +
59533 + /* it's only a read/write if it's an actual entry, not a dir
59534 + (which are opened for readdir)
59535 + */
59536 +
59537 + /* convert the requested sysctl entry into a pathname */
59538 +
59539 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59540 + len += strlen(tmp->procname);
59541 + len++;
59542 + depth++;
59543 + }
59544 +
59545 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
59546 + /* deny */
59547 + goto out;
59548 + }
59549 +
59550 + memset(path, 0, PAGE_SIZE);
59551 +
59552 + memcpy(path, proc_sys, strlen(proc_sys));
59553 +
59554 + pos += strlen(proc_sys);
59555 +
59556 + for (; depth > 0; depth--) {
59557 + path[pos] = '/';
59558 + pos++;
59559 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59560 + if (depth == i) {
59561 + memcpy(path + pos, tmp->procname,
59562 + strlen(tmp->procname));
59563 + pos += strlen(tmp->procname);
59564 + }
59565 + i++;
59566 + }
59567 + }
59568 +
59569 + obj = gr_lookup_by_name(path, pos);
59570 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
59571 +
59572 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
59573 + ((err & mode) != mode))) {
59574 + __u32 new_mode = mode;
59575 +
59576 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59577 +
59578 + err = 0;
59579 + gr_log_learn_sysctl(path, new_mode);
59580 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
59581 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
59582 + err = -ENOENT;
59583 + } else if (!(err & GR_FIND)) {
59584 + err = -ENOENT;
59585 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
59586 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
59587 + path, (mode & GR_READ) ? " reading" : "",
59588 + (mode & GR_WRITE) ? " writing" : "");
59589 + err = -EACCES;
59590 + } else if ((err & mode) != mode) {
59591 + err = -EACCES;
59592 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
59593 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
59594 + path, (mode & GR_READ) ? " reading" : "",
59595 + (mode & GR_WRITE) ? " writing" : "");
59596 + err = 0;
59597 + } else
59598 + err = 0;
59599 +
59600 + out:
59601 + preempt_enable();
59602 +
59603 + return err;
59604 +}
59605 +#endif
59606 +
59607 +int
59608 +gr_handle_proc_ptrace(struct task_struct *task)
59609 +{
59610 + struct file *filp;
59611 + struct task_struct *tmp = task;
59612 + struct task_struct *curtemp = current;
59613 + __u32 retmode;
59614 +
59615 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59616 + if (unlikely(!(gr_status & GR_READY)))
59617 + return 0;
59618 +#endif
59619 +
59620 + read_lock(&tasklist_lock);
59621 + read_lock(&grsec_exec_file_lock);
59622 + filp = task->exec_file;
59623 +
59624 + while (tmp->pid > 0) {
59625 + if (tmp == curtemp)
59626 + break;
59627 + tmp = tmp->real_parent;
59628 + }
59629 +
59630 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59631 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
59632 + read_unlock(&grsec_exec_file_lock);
59633 + read_unlock(&tasklist_lock);
59634 + return 1;
59635 + }
59636 +
59637 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59638 + if (!(gr_status & GR_READY)) {
59639 + read_unlock(&grsec_exec_file_lock);
59640 + read_unlock(&tasklist_lock);
59641 + return 0;
59642 + }
59643 +#endif
59644 +
59645 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
59646 + read_unlock(&grsec_exec_file_lock);
59647 + read_unlock(&tasklist_lock);
59648 +
59649 + if (retmode & GR_NOPTRACE)
59650 + return 1;
59651 +
59652 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
59653 + && (current->acl != task->acl || (current->acl != current->role->root_label
59654 + && current->pid != task->pid)))
59655 + return 1;
59656 +
59657 + return 0;
59658 +}
59659 +
59660 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
59661 +{
59662 + if (unlikely(!(gr_status & GR_READY)))
59663 + return;
59664 +
59665 + if (!(current->role->roletype & GR_ROLE_GOD))
59666 + return;
59667 +
59668 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
59669 + p->role->rolename, gr_task_roletype_to_char(p),
59670 + p->acl->filename);
59671 +}
59672 +
59673 +int
59674 +gr_handle_ptrace(struct task_struct *task, const long request)
59675 +{
59676 + struct task_struct *tmp = task;
59677 + struct task_struct *curtemp = current;
59678 + __u32 retmode;
59679 +
59680 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59681 + if (unlikely(!(gr_status & GR_READY)))
59682 + return 0;
59683 +#endif
59684 +
59685 + read_lock(&tasklist_lock);
59686 + while (tmp->pid > 0) {
59687 + if (tmp == curtemp)
59688 + break;
59689 + tmp = tmp->real_parent;
59690 + }
59691 +
59692 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59693 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
59694 + read_unlock(&tasklist_lock);
59695 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59696 + return 1;
59697 + }
59698 + read_unlock(&tasklist_lock);
59699 +
59700 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59701 + if (!(gr_status & GR_READY))
59702 + return 0;
59703 +#endif
59704 +
59705 + read_lock(&grsec_exec_file_lock);
59706 + if (unlikely(!task->exec_file)) {
59707 + read_unlock(&grsec_exec_file_lock);
59708 + return 0;
59709 + }
59710 +
59711 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
59712 + read_unlock(&grsec_exec_file_lock);
59713 +
59714 + if (retmode & GR_NOPTRACE) {
59715 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59716 + return 1;
59717 + }
59718 +
59719 + if (retmode & GR_PTRACERD) {
59720 + switch (request) {
59721 + case PTRACE_POKETEXT:
59722 + case PTRACE_POKEDATA:
59723 + case PTRACE_POKEUSR:
59724 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
59725 + case PTRACE_SETREGS:
59726 + case PTRACE_SETFPREGS:
59727 +#endif
59728 +#ifdef CONFIG_X86
59729 + case PTRACE_SETFPXREGS:
59730 +#endif
59731 +#ifdef CONFIG_ALTIVEC
59732 + case PTRACE_SETVRREGS:
59733 +#endif
59734 + return 1;
59735 + default:
59736 + return 0;
59737 + }
59738 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
59739 + !(current->role->roletype & GR_ROLE_GOD) &&
59740 + (current->acl != task->acl)) {
59741 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59742 + return 1;
59743 + }
59744 +
59745 + return 0;
59746 +}
59747 +
59748 +static int is_writable_mmap(const struct file *filp)
59749 +{
59750 + struct task_struct *task = current;
59751 + struct acl_object_label *obj, *obj2;
59752 +
59753 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
59754 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
59755 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59756 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
59757 + task->role->root_label);
59758 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
59759 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
59760 + return 1;
59761 + }
59762 + }
59763 + return 0;
59764 +}
59765 +
59766 +int
59767 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
59768 +{
59769 + __u32 mode;
59770 +
59771 + if (unlikely(!file || !(prot & PROT_EXEC)))
59772 + return 1;
59773 +
59774 + if (is_writable_mmap(file))
59775 + return 0;
59776 +
59777 + mode =
59778 + gr_search_file(file->f_path.dentry,
59779 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59780 + file->f_path.mnt);
59781 +
59782 + if (!gr_tpe_allow(file))
59783 + return 0;
59784 +
59785 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59786 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59787 + return 0;
59788 + } else if (unlikely(!(mode & GR_EXEC))) {
59789 + return 0;
59790 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59791 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59792 + return 1;
59793 + }
59794 +
59795 + return 1;
59796 +}
59797 +
59798 +int
59799 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59800 +{
59801 + __u32 mode;
59802 +
59803 + if (unlikely(!file || !(prot & PROT_EXEC)))
59804 + return 1;
59805 +
59806 + if (is_writable_mmap(file))
59807 + return 0;
59808 +
59809 + mode =
59810 + gr_search_file(file->f_path.dentry,
59811 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59812 + file->f_path.mnt);
59813 +
59814 + if (!gr_tpe_allow(file))
59815 + return 0;
59816 +
59817 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59818 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59819 + return 0;
59820 + } else if (unlikely(!(mode & GR_EXEC))) {
59821 + return 0;
59822 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59823 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59824 + return 1;
59825 + }
59826 +
59827 + return 1;
59828 +}
59829 +
59830 +void
59831 +gr_acl_handle_psacct(struct task_struct *task, const long code)
59832 +{
59833 + unsigned long runtime;
59834 + unsigned long cputime;
59835 + unsigned int wday, cday;
59836 + __u8 whr, chr;
59837 + __u8 wmin, cmin;
59838 + __u8 wsec, csec;
59839 + struct timespec timeval;
59840 +
59841 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
59842 + !(task->acl->mode & GR_PROCACCT)))
59843 + return;
59844 +
59845 + do_posix_clock_monotonic_gettime(&timeval);
59846 + runtime = timeval.tv_sec - task->start_time.tv_sec;
59847 + wday = runtime / (3600 * 24);
59848 + runtime -= wday * (3600 * 24);
59849 + whr = runtime / 3600;
59850 + runtime -= whr * 3600;
59851 + wmin = runtime / 60;
59852 + runtime -= wmin * 60;
59853 + wsec = runtime;
59854 +
59855 + cputime = (task->utime + task->stime) / HZ;
59856 + cday = cputime / (3600 * 24);
59857 + cputime -= cday * (3600 * 24);
59858 + chr = cputime / 3600;
59859 + cputime -= chr * 3600;
59860 + cmin = cputime / 60;
59861 + cputime -= cmin * 60;
59862 + csec = cputime;
59863 +
59864 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
59865 +
59866 + return;
59867 +}
59868 +
59869 +void gr_set_kernel_label(struct task_struct *task)
59870 +{
59871 + if (gr_status & GR_READY) {
59872 + task->role = kernel_role;
59873 + task->acl = kernel_role->root_label;
59874 + }
59875 + return;
59876 +}
59877 +
59878 +#ifdef CONFIG_TASKSTATS
59879 +int gr_is_taskstats_denied(int pid)
59880 +{
59881 + struct task_struct *task;
59882 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59883 + const struct cred *cred;
59884 +#endif
59885 + int ret = 0;
59886 +
59887 + /* restrict taskstats viewing to un-chrooted root users
59888 + who have the 'view' subject flag if the RBAC system is enabled
59889 + */
59890 +
59891 + rcu_read_lock();
59892 + read_lock(&tasklist_lock);
59893 + task = find_task_by_vpid(pid);
59894 + if (task) {
59895 +#ifdef CONFIG_GRKERNSEC_CHROOT
59896 + if (proc_is_chrooted(task))
59897 + ret = -EACCES;
59898 +#endif
59899 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59900 + cred = __task_cred(task);
59901 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59902 + if (cred->uid != 0)
59903 + ret = -EACCES;
59904 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59905 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
59906 + ret = -EACCES;
59907 +#endif
59908 +#endif
59909 + if (gr_status & GR_READY) {
59910 + if (!(task->acl->mode & GR_VIEW))
59911 + ret = -EACCES;
59912 + }
59913 + } else
59914 + ret = -ENOENT;
59915 +
59916 + read_unlock(&tasklist_lock);
59917 + rcu_read_unlock();
59918 +
59919 + return ret;
59920 +}
59921 +#endif
59922 +
59923 +/* AUXV entries are filled via a descendant of search_binary_handler
59924 + after we've already applied the subject for the target
59925 +*/
59926 +int gr_acl_enable_at_secure(void)
59927 +{
59928 + if (unlikely(!(gr_status & GR_READY)))
59929 + return 0;
59930 +
59931 + if (current->acl->mode & GR_ATSECURE)
59932 + return 1;
59933 +
59934 + return 0;
59935 +}
59936 +
59937 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
59938 +{
59939 + struct task_struct *task = current;
59940 + struct dentry *dentry = file->f_path.dentry;
59941 + struct vfsmount *mnt = file->f_path.mnt;
59942 + struct acl_object_label *obj, *tmp;
59943 + struct acl_subject_label *subj;
59944 + unsigned int bufsize;
59945 + int is_not_root;
59946 + char *path;
59947 + dev_t dev = __get_dev(dentry);
59948 +
59949 + if (unlikely(!(gr_status & GR_READY)))
59950 + return 1;
59951 +
59952 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59953 + return 1;
59954 +
59955 + /* ignore Eric Biederman */
59956 + if (IS_PRIVATE(dentry->d_inode))
59957 + return 1;
59958 +
59959 + subj = task->acl;
59960 + do {
59961 + obj = lookup_acl_obj_label(ino, dev, subj);
59962 + if (obj != NULL)
59963 + return (obj->mode & GR_FIND) ? 1 : 0;
59964 + } while ((subj = subj->parent_subject));
59965 +
59966 + /* this is purely an optimization since we're looking for an object
59967 + for the directory we're doing a readdir on
59968 + if it's possible for any globbed object to match the entry we're
59969 + filling into the directory, then the object we find here will be
59970 + an anchor point with attached globbed objects
59971 + */
59972 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
59973 + if (obj->globbed == NULL)
59974 + return (obj->mode & GR_FIND) ? 1 : 0;
59975 +
59976 + is_not_root = ((obj->filename[0] == '/') &&
59977 + (obj->filename[1] == '\0')) ? 0 : 1;
59978 + bufsize = PAGE_SIZE - namelen - is_not_root;
59979 +
59980 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
59981 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
59982 + return 1;
59983 +
59984 + preempt_disable();
59985 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
59986 + bufsize);
59987 +
59988 + bufsize = strlen(path);
59989 +
59990 + /* if base is "/", don't append an additional slash */
59991 + if (is_not_root)
59992 + *(path + bufsize) = '/';
59993 + memcpy(path + bufsize + is_not_root, name, namelen);
59994 + *(path + bufsize + namelen + is_not_root) = '\0';
59995 +
59996 + tmp = obj->globbed;
59997 + while (tmp) {
59998 + if (!glob_match(tmp->filename, path)) {
59999 + preempt_enable();
60000 + return (tmp->mode & GR_FIND) ? 1 : 0;
60001 + }
60002 + tmp = tmp->next;
60003 + }
60004 + preempt_enable();
60005 + return (obj->mode & GR_FIND) ? 1 : 0;
60006 +}
60007 +
60008 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60009 +EXPORT_SYMBOL(gr_acl_is_enabled);
60010 +#endif
60011 +EXPORT_SYMBOL(gr_learn_resource);
60012 +EXPORT_SYMBOL(gr_set_kernel_label);
60013 +#ifdef CONFIG_SECURITY
60014 +EXPORT_SYMBOL(gr_check_user_change);
60015 +EXPORT_SYMBOL(gr_check_group_change);
60016 +#endif
60017 +
60018 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60019 new file mode 100644
60020 index 0000000..34fefda
60021 --- /dev/null
60022 +++ b/grsecurity/gracl_alloc.c
60023 @@ -0,0 +1,105 @@
60024 +#include <linux/kernel.h>
60025 +#include <linux/mm.h>
60026 +#include <linux/slab.h>
60027 +#include <linux/vmalloc.h>
60028 +#include <linux/gracl.h>
60029 +#include <linux/grsecurity.h>
60030 +
60031 +static unsigned long alloc_stack_next = 1;
60032 +static unsigned long alloc_stack_size = 1;
60033 +static void **alloc_stack;
60034 +
60035 +static __inline__ int
60036 +alloc_pop(void)
60037 +{
60038 + if (alloc_stack_next == 1)
60039 + return 0;
60040 +
60041 + kfree(alloc_stack[alloc_stack_next - 2]);
60042 +
60043 + alloc_stack_next--;
60044 +
60045 + return 1;
60046 +}
60047 +
60048 +static __inline__ int
60049 +alloc_push(void *buf)
60050 +{
60051 + if (alloc_stack_next >= alloc_stack_size)
60052 + return 1;
60053 +
60054 + alloc_stack[alloc_stack_next - 1] = buf;
60055 +
60056 + alloc_stack_next++;
60057 +
60058 + return 0;
60059 +}
60060 +
60061 +void *
60062 +acl_alloc(unsigned long len)
60063 +{
60064 + void *ret = NULL;
60065 +
60066 + if (!len || len > PAGE_SIZE)
60067 + goto out;
60068 +
60069 + ret = kmalloc(len, GFP_KERNEL);
60070 +
60071 + if (ret) {
60072 + if (alloc_push(ret)) {
60073 + kfree(ret);
60074 + ret = NULL;
60075 + }
60076 + }
60077 +
60078 +out:
60079 + return ret;
60080 +}
60081 +
60082 +void *
60083 +acl_alloc_num(unsigned long num, unsigned long len)
60084 +{
60085 + if (!len || (num > (PAGE_SIZE / len)))
60086 + return NULL;
60087 +
60088 + return acl_alloc(num * len);
60089 +}
60090 +
60091 +void
60092 +acl_free_all(void)
60093 +{
60094 + if (gr_acl_is_enabled() || !alloc_stack)
60095 + return;
60096 +
60097 + while (alloc_pop()) ;
60098 +
60099 + if (alloc_stack) {
60100 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60101 + kfree(alloc_stack);
60102 + else
60103 + vfree(alloc_stack);
60104 + }
60105 +
60106 + alloc_stack = NULL;
60107 + alloc_stack_size = 1;
60108 + alloc_stack_next = 1;
60109 +
60110 + return;
60111 +}
60112 +
60113 +int
60114 +acl_alloc_stack_init(unsigned long size)
60115 +{
60116 + if ((size * sizeof (void *)) <= PAGE_SIZE)
60117 + alloc_stack =
60118 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60119 + else
60120 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
60121 +
60122 + alloc_stack_size = size;
60123 +
60124 + if (!alloc_stack)
60125 + return 0;
60126 + else
60127 + return 1;
60128 +}
60129 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60130 new file mode 100644
60131 index 0000000..955ddfb
60132 --- /dev/null
60133 +++ b/grsecurity/gracl_cap.c
60134 @@ -0,0 +1,101 @@
60135 +#include <linux/kernel.h>
60136 +#include <linux/module.h>
60137 +#include <linux/sched.h>
60138 +#include <linux/gracl.h>
60139 +#include <linux/grsecurity.h>
60140 +#include <linux/grinternal.h>
60141 +
60142 +extern const char *captab_log[];
60143 +extern int captab_log_entries;
60144 +
60145 +int
60146 +gr_acl_is_capable(const int cap)
60147 +{
60148 + struct task_struct *task = current;
60149 + const struct cred *cred = current_cred();
60150 + struct acl_subject_label *curracl;
60151 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60152 + kernel_cap_t cap_audit = __cap_empty_set;
60153 +
60154 + if (!gr_acl_is_enabled())
60155 + return 1;
60156 +
60157 + curracl = task->acl;
60158 +
60159 + cap_drop = curracl->cap_lower;
60160 + cap_mask = curracl->cap_mask;
60161 + cap_audit = curracl->cap_invert_audit;
60162 +
60163 + while ((curracl = curracl->parent_subject)) {
60164 + /* if the cap isn't specified in the current computed mask but is specified in the
60165 + current level subject, and is lowered in the current level subject, then add
60166 + it to the set of dropped capabilities
60167 + otherwise, add the current level subject's mask to the current computed mask
60168 + */
60169 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60170 + cap_raise(cap_mask, cap);
60171 + if (cap_raised(curracl->cap_lower, cap))
60172 + cap_raise(cap_drop, cap);
60173 + if (cap_raised(curracl->cap_invert_audit, cap))
60174 + cap_raise(cap_audit, cap);
60175 + }
60176 + }
60177 +
60178 + if (!cap_raised(cap_drop, cap)) {
60179 + if (cap_raised(cap_audit, cap))
60180 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60181 + return 1;
60182 + }
60183 +
60184 + curracl = task->acl;
60185 +
60186 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60187 + && cap_raised(cred->cap_effective, cap)) {
60188 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60189 + task->role->roletype, cred->uid,
60190 + cred->gid, task->exec_file ?
60191 + gr_to_filename(task->exec_file->f_path.dentry,
60192 + task->exec_file->f_path.mnt) : curracl->filename,
60193 + curracl->filename, 0UL,
60194 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60195 + return 1;
60196 + }
60197 +
60198 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60199 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60200 + return 0;
60201 +}
60202 +
60203 +int
60204 +gr_acl_is_capable_nolog(const int cap)
60205 +{
60206 + struct acl_subject_label *curracl;
60207 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60208 +
60209 + if (!gr_acl_is_enabled())
60210 + return 1;
60211 +
60212 + curracl = current->acl;
60213 +
60214 + cap_drop = curracl->cap_lower;
60215 + cap_mask = curracl->cap_mask;
60216 +
60217 + while ((curracl = curracl->parent_subject)) {
60218 + /* if the cap isn't specified in the current computed mask but is specified in the
60219 + current level subject, and is lowered in the current level subject, then add
60220 + it to the set of dropped capabilities
60221 + otherwise, add the current level subject's mask to the current computed mask
60222 + */
60223 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60224 + cap_raise(cap_mask, cap);
60225 + if (cap_raised(curracl->cap_lower, cap))
60226 + cap_raise(cap_drop, cap);
60227 + }
60228 + }
60229 +
60230 + if (!cap_raised(cap_drop, cap))
60231 + return 1;
60232 +
60233 + return 0;
60234 +}
60235 +
60236 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60237 new file mode 100644
60238 index 0000000..d5f210c
60239 --- /dev/null
60240 +++ b/grsecurity/gracl_fs.c
60241 @@ -0,0 +1,433 @@
60242 +#include <linux/kernel.h>
60243 +#include <linux/sched.h>
60244 +#include <linux/types.h>
60245 +#include <linux/fs.h>
60246 +#include <linux/file.h>
60247 +#include <linux/stat.h>
60248 +#include <linux/grsecurity.h>
60249 +#include <linux/grinternal.h>
60250 +#include <linux/gracl.h>
60251 +
60252 +__u32
60253 +gr_acl_handle_hidden_file(const struct dentry * dentry,
60254 + const struct vfsmount * mnt)
60255 +{
60256 + __u32 mode;
60257 +
60258 + if (unlikely(!dentry->d_inode))
60259 + return GR_FIND;
60260 +
60261 + mode =
60262 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
60263 +
60264 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
60265 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60266 + return mode;
60267 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
60268 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60269 + return 0;
60270 + } else if (unlikely(!(mode & GR_FIND)))
60271 + return 0;
60272 +
60273 + return GR_FIND;
60274 +}
60275 +
60276 +__u32
60277 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
60278 + int acc_mode)
60279 +{
60280 + __u32 reqmode = GR_FIND;
60281 + __u32 mode;
60282 +
60283 + if (unlikely(!dentry->d_inode))
60284 + return reqmode;
60285 +
60286 + if (acc_mode & MAY_APPEND)
60287 + reqmode |= GR_APPEND;
60288 + else if (acc_mode & MAY_WRITE)
60289 + reqmode |= GR_WRITE;
60290 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
60291 + reqmode |= GR_READ;
60292 +
60293 + mode =
60294 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60295 + mnt);
60296 +
60297 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60298 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60299 + reqmode & GR_READ ? " reading" : "",
60300 + reqmode & GR_WRITE ? " writing" : reqmode &
60301 + GR_APPEND ? " appending" : "");
60302 + return reqmode;
60303 + } else
60304 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60305 + {
60306 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60307 + reqmode & GR_READ ? " reading" : "",
60308 + reqmode & GR_WRITE ? " writing" : reqmode &
60309 + GR_APPEND ? " appending" : "");
60310 + return 0;
60311 + } else if (unlikely((mode & reqmode) != reqmode))
60312 + return 0;
60313 +
60314 + return reqmode;
60315 +}
60316 +
60317 +__u32
60318 +gr_acl_handle_creat(const struct dentry * dentry,
60319 + const struct dentry * p_dentry,
60320 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60321 + const int imode)
60322 +{
60323 + __u32 reqmode = GR_WRITE | GR_CREATE;
60324 + __u32 mode;
60325 +
60326 + if (acc_mode & MAY_APPEND)
60327 + reqmode |= GR_APPEND;
60328 + // if a directory was required or the directory already exists, then
60329 + // don't count this open as a read
60330 + if ((acc_mode & MAY_READ) &&
60331 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
60332 + reqmode |= GR_READ;
60333 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
60334 + reqmode |= GR_SETID;
60335 +
60336 + mode =
60337 + gr_check_create(dentry, p_dentry, p_mnt,
60338 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60339 +
60340 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60341 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60342 + reqmode & GR_READ ? " reading" : "",
60343 + reqmode & GR_WRITE ? " writing" : reqmode &
60344 + GR_APPEND ? " appending" : "");
60345 + return reqmode;
60346 + } else
60347 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60348 + {
60349 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60350 + reqmode & GR_READ ? " reading" : "",
60351 + reqmode & GR_WRITE ? " writing" : reqmode &
60352 + GR_APPEND ? " appending" : "");
60353 + return 0;
60354 + } else if (unlikely((mode & reqmode) != reqmode))
60355 + return 0;
60356 +
60357 + return reqmode;
60358 +}
60359 +
60360 +__u32
60361 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
60362 + const int fmode)
60363 +{
60364 + __u32 mode, reqmode = GR_FIND;
60365 +
60366 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
60367 + reqmode |= GR_EXEC;
60368 + if (fmode & S_IWOTH)
60369 + reqmode |= GR_WRITE;
60370 + if (fmode & S_IROTH)
60371 + reqmode |= GR_READ;
60372 +
60373 + mode =
60374 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60375 + mnt);
60376 +
60377 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60378 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60379 + reqmode & GR_READ ? " reading" : "",
60380 + reqmode & GR_WRITE ? " writing" : "",
60381 + reqmode & GR_EXEC ? " executing" : "");
60382 + return reqmode;
60383 + } else
60384 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60385 + {
60386 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60387 + reqmode & GR_READ ? " reading" : "",
60388 + reqmode & GR_WRITE ? " writing" : "",
60389 + reqmode & GR_EXEC ? " executing" : "");
60390 + return 0;
60391 + } else if (unlikely((mode & reqmode) != reqmode))
60392 + return 0;
60393 +
60394 + return reqmode;
60395 +}
60396 +
60397 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
60398 +{
60399 + __u32 mode;
60400 +
60401 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
60402 +
60403 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60404 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
60405 + return mode;
60406 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60407 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
60408 + return 0;
60409 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
60410 + return 0;
60411 +
60412 + return (reqmode);
60413 +}
60414 +
60415 +__u32
60416 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
60417 +{
60418 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
60419 +}
60420 +
60421 +__u32
60422 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
60423 +{
60424 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
60425 +}
60426 +
60427 +__u32
60428 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
60429 +{
60430 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
60431 +}
60432 +
60433 +__u32
60434 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
60435 +{
60436 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
60437 +}
60438 +
60439 +__u32
60440 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
60441 + mode_t mode)
60442 +{
60443 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
60444 + return 1;
60445 +
60446 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60447 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60448 + GR_FCHMOD_ACL_MSG);
60449 + } else {
60450 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
60451 + }
60452 +}
60453 +
60454 +__u32
60455 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
60456 + mode_t mode)
60457 +{
60458 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60459 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60460 + GR_CHMOD_ACL_MSG);
60461 + } else {
60462 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
60463 + }
60464 +}
60465 +
60466 +__u32
60467 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
60468 +{
60469 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
60470 +}
60471 +
60472 +__u32
60473 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
60474 +{
60475 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
60476 +}
60477 +
60478 +__u32
60479 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
60480 +{
60481 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
60482 +}
60483 +
60484 +__u32
60485 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
60486 +{
60487 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
60488 + GR_UNIXCONNECT_ACL_MSG);
60489 +}
60490 +
60491 +/* hardlinks require at minimum create and link permission,
60492 + any additional privilege required is based on the
60493 + privilege of the file being linked to
60494 +*/
60495 +__u32
60496 +gr_acl_handle_link(const struct dentry * new_dentry,
60497 + const struct dentry * parent_dentry,
60498 + const struct vfsmount * parent_mnt,
60499 + const struct dentry * old_dentry,
60500 + const struct vfsmount * old_mnt, const char *to)
60501 +{
60502 + __u32 mode;
60503 + __u32 needmode = GR_CREATE | GR_LINK;
60504 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
60505 +
60506 + mode =
60507 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
60508 + old_mnt);
60509 +
60510 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
60511 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60512 + return mode;
60513 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60514 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60515 + return 0;
60516 + } else if (unlikely((mode & needmode) != needmode))
60517 + return 0;
60518 +
60519 + return 1;
60520 +}
60521 +
60522 +__u32
60523 +gr_acl_handle_symlink(const struct dentry * new_dentry,
60524 + const struct dentry * parent_dentry,
60525 + const struct vfsmount * parent_mnt, const char *from)
60526 +{
60527 + __u32 needmode = GR_WRITE | GR_CREATE;
60528 + __u32 mode;
60529 +
60530 + mode =
60531 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
60532 + GR_CREATE | GR_AUDIT_CREATE |
60533 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
60534 +
60535 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
60536 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60537 + return mode;
60538 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60539 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60540 + return 0;
60541 + } else if (unlikely((mode & needmode) != needmode))
60542 + return 0;
60543 +
60544 + return (GR_WRITE | GR_CREATE);
60545 +}
60546 +
60547 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
60548 +{
60549 + __u32 mode;
60550 +
60551 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60552 +
60553 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60554 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
60555 + return mode;
60556 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60557 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
60558 + return 0;
60559 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
60560 + return 0;
60561 +
60562 + return (reqmode);
60563 +}
60564 +
60565 +__u32
60566 +gr_acl_handle_mknod(const struct dentry * new_dentry,
60567 + const struct dentry * parent_dentry,
60568 + const struct vfsmount * parent_mnt,
60569 + const int mode)
60570 +{
60571 + __u32 reqmode = GR_WRITE | GR_CREATE;
60572 + if (unlikely(mode & (S_ISUID | S_ISGID)))
60573 + reqmode |= GR_SETID;
60574 +
60575 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60576 + reqmode, GR_MKNOD_ACL_MSG);
60577 +}
60578 +
60579 +__u32
60580 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
60581 + const struct dentry *parent_dentry,
60582 + const struct vfsmount *parent_mnt)
60583 +{
60584 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60585 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
60586 +}
60587 +
60588 +#define RENAME_CHECK_SUCCESS(old, new) \
60589 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
60590 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
60591 +
60592 +int
60593 +gr_acl_handle_rename(struct dentry *new_dentry,
60594 + struct dentry *parent_dentry,
60595 + const struct vfsmount *parent_mnt,
60596 + struct dentry *old_dentry,
60597 + struct inode *old_parent_inode,
60598 + struct vfsmount *old_mnt, const char *newname)
60599 +{
60600 + __u32 comp1, comp2;
60601 + int error = 0;
60602 +
60603 + if (unlikely(!gr_acl_is_enabled()))
60604 + return 0;
60605 +
60606 + if (!new_dentry->d_inode) {
60607 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
60608 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
60609 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
60610 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
60611 + GR_DELETE | GR_AUDIT_DELETE |
60612 + GR_AUDIT_READ | GR_AUDIT_WRITE |
60613 + GR_SUPPRESS, old_mnt);
60614 + } else {
60615 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
60616 + GR_CREATE | GR_DELETE |
60617 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
60618 + GR_AUDIT_READ | GR_AUDIT_WRITE |
60619 + GR_SUPPRESS, parent_mnt);
60620 + comp2 =
60621 + gr_search_file(old_dentry,
60622 + GR_READ | GR_WRITE | GR_AUDIT_READ |
60623 + GR_DELETE | GR_AUDIT_DELETE |
60624 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
60625 + }
60626 +
60627 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
60628 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
60629 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60630 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
60631 + && !(comp2 & GR_SUPPRESS)) {
60632 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60633 + error = -EACCES;
60634 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
60635 + error = -EACCES;
60636 +
60637 + return error;
60638 +}
60639 +
60640 +void
60641 +gr_acl_handle_exit(void)
60642 +{
60643 + u16 id;
60644 + char *rolename;
60645 + struct file *exec_file;
60646 +
60647 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
60648 + !(current->role->roletype & GR_ROLE_PERSIST))) {
60649 + id = current->acl_role_id;
60650 + rolename = current->role->rolename;
60651 + gr_set_acls(1);
60652 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
60653 + }
60654 +
60655 + write_lock(&grsec_exec_file_lock);
60656 + exec_file = current->exec_file;
60657 + current->exec_file = NULL;
60658 + write_unlock(&grsec_exec_file_lock);
60659 +
60660 + if (exec_file)
60661 + fput(exec_file);
60662 +}
60663 +
60664 +int
60665 +gr_acl_handle_procpidmem(const struct task_struct *task)
60666 +{
60667 + if (unlikely(!gr_acl_is_enabled()))
60668 + return 0;
60669 +
60670 + if (task != current && task->acl->mode & GR_PROTPROCFD)
60671 + return -EACCES;
60672 +
60673 + return 0;
60674 +}
60675 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
60676 new file mode 100644
60677 index 0000000..cd07b96
60678 --- /dev/null
60679 +++ b/grsecurity/gracl_ip.c
60680 @@ -0,0 +1,382 @@
60681 +#include <linux/kernel.h>
60682 +#include <asm/uaccess.h>
60683 +#include <asm/errno.h>
60684 +#include <net/sock.h>
60685 +#include <linux/file.h>
60686 +#include <linux/fs.h>
60687 +#include <linux/net.h>
60688 +#include <linux/in.h>
60689 +#include <linux/skbuff.h>
60690 +#include <linux/ip.h>
60691 +#include <linux/udp.h>
60692 +#include <linux/smp_lock.h>
60693 +#include <linux/types.h>
60694 +#include <linux/sched.h>
60695 +#include <linux/netdevice.h>
60696 +#include <linux/inetdevice.h>
60697 +#include <linux/gracl.h>
60698 +#include <linux/grsecurity.h>
60699 +#include <linux/grinternal.h>
60700 +
60701 +#define GR_BIND 0x01
60702 +#define GR_CONNECT 0x02
60703 +#define GR_INVERT 0x04
60704 +#define GR_BINDOVERRIDE 0x08
60705 +#define GR_CONNECTOVERRIDE 0x10
60706 +#define GR_SOCK_FAMILY 0x20
60707 +
60708 +static const char * gr_protocols[IPPROTO_MAX] = {
60709 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
60710 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
60711 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
60712 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
60713 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
60714 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
60715 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
60716 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
60717 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
60718 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
60719 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
60720 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
60721 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
60722 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
60723 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
60724 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
60725 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
60726 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
60727 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
60728 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
60729 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
60730 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
60731 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
60732 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
60733 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
60734 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
60735 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
60736 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
60737 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
60738 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
60739 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
60740 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
60741 + };
60742 +
60743 +static const char * gr_socktypes[SOCK_MAX] = {
60744 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
60745 + "unknown:7", "unknown:8", "unknown:9", "packet"
60746 + };
60747 +
60748 +static const char * gr_sockfamilies[AF_MAX+1] = {
60749 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
60750 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
60751 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
60752 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
60753 + };
60754 +
60755 +const char *
60756 +gr_proto_to_name(unsigned char proto)
60757 +{
60758 + return gr_protocols[proto];
60759 +}
60760 +
60761 +const char *
60762 +gr_socktype_to_name(unsigned char type)
60763 +{
60764 + return gr_socktypes[type];
60765 +}
60766 +
60767 +const char *
60768 +gr_sockfamily_to_name(unsigned char family)
60769 +{
60770 + return gr_sockfamilies[family];
60771 +}
60772 +
60773 +int
60774 +gr_search_socket(const int domain, const int type, const int protocol)
60775 +{
60776 + struct acl_subject_label *curr;
60777 + const struct cred *cred = current_cred();
60778 +
60779 + if (unlikely(!gr_acl_is_enabled()))
60780 + goto exit;
60781 +
60782 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
60783 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
60784 + goto exit; // let the kernel handle it
60785 +
60786 + curr = current->acl;
60787 +
60788 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
60789 + /* the family is allowed, if this is PF_INET allow it only if
60790 + the extra sock type/protocol checks pass */
60791 + if (domain == PF_INET)
60792 + goto inet_check;
60793 + goto exit;
60794 + } else {
60795 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60796 + __u32 fakeip = 0;
60797 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60798 + current->role->roletype, cred->uid,
60799 + cred->gid, current->exec_file ?
60800 + gr_to_filename(current->exec_file->f_path.dentry,
60801 + current->exec_file->f_path.mnt) :
60802 + curr->filename, curr->filename,
60803 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
60804 + &current->signal->saved_ip);
60805 + goto exit;
60806 + }
60807 + goto exit_fail;
60808 + }
60809 +
60810 +inet_check:
60811 + /* the rest of this checking is for IPv4 only */
60812 + if (!curr->ips)
60813 + goto exit;
60814 +
60815 + if ((curr->ip_type & (1 << type)) &&
60816 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
60817 + goto exit;
60818 +
60819 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60820 + /* we don't place acls on raw sockets , and sometimes
60821 + dgram/ip sockets are opened for ioctl and not
60822 + bind/connect, so we'll fake a bind learn log */
60823 + if (type == SOCK_RAW || type == SOCK_PACKET) {
60824 + __u32 fakeip = 0;
60825 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60826 + current->role->roletype, cred->uid,
60827 + cred->gid, current->exec_file ?
60828 + gr_to_filename(current->exec_file->f_path.dentry,
60829 + current->exec_file->f_path.mnt) :
60830 + curr->filename, curr->filename,
60831 + &fakeip, 0, type,
60832 + protocol, GR_CONNECT, &current->signal->saved_ip);
60833 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
60834 + __u32 fakeip = 0;
60835 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60836 + current->role->roletype, cred->uid,
60837 + cred->gid, current->exec_file ?
60838 + gr_to_filename(current->exec_file->f_path.dentry,
60839 + current->exec_file->f_path.mnt) :
60840 + curr->filename, curr->filename,
60841 + &fakeip, 0, type,
60842 + protocol, GR_BIND, &current->signal->saved_ip);
60843 + }
60844 + /* we'll log when they use connect or bind */
60845 + goto exit;
60846 + }
60847 +
60848 +exit_fail:
60849 + if (domain == PF_INET)
60850 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
60851 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
60852 + else
60853 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
60854 + gr_socktype_to_name(type), protocol);
60855 +
60856 + return 0;
60857 +exit:
60858 + return 1;
60859 +}
60860 +
60861 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
60862 +{
60863 + if ((ip->mode & mode) &&
60864 + (ip_port >= ip->low) &&
60865 + (ip_port <= ip->high) &&
60866 + ((ntohl(ip_addr) & our_netmask) ==
60867 + (ntohl(our_addr) & our_netmask))
60868 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
60869 + && (ip->type & (1 << type))) {
60870 + if (ip->mode & GR_INVERT)
60871 + return 2; // specifically denied
60872 + else
60873 + return 1; // allowed
60874 + }
60875 +
60876 + return 0; // not specifically allowed, may continue parsing
60877 +}
60878 +
60879 +static int
60880 +gr_search_connectbind(const int full_mode, struct sock *sk,
60881 + struct sockaddr_in *addr, const int type)
60882 +{
60883 + char iface[IFNAMSIZ] = {0};
60884 + struct acl_subject_label *curr;
60885 + struct acl_ip_label *ip;
60886 + struct inet_sock *isk;
60887 + struct net_device *dev;
60888 + struct in_device *idev;
60889 + unsigned long i;
60890 + int ret;
60891 + int mode = full_mode & (GR_BIND | GR_CONNECT);
60892 + __u32 ip_addr = 0;
60893 + __u32 our_addr;
60894 + __u32 our_netmask;
60895 + char *p;
60896 + __u16 ip_port = 0;
60897 + const struct cred *cred = current_cred();
60898 +
60899 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
60900 + return 0;
60901 +
60902 + curr = current->acl;
60903 + isk = inet_sk(sk);
60904 +
60905 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
60906 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
60907 + addr->sin_addr.s_addr = curr->inaddr_any_override;
60908 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
60909 + struct sockaddr_in saddr;
60910 + int err;
60911 +
60912 + saddr.sin_family = AF_INET;
60913 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
60914 + saddr.sin_port = isk->sport;
60915 +
60916 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60917 + if (err)
60918 + return err;
60919 +
60920 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60921 + if (err)
60922 + return err;
60923 + }
60924 +
60925 + if (!curr->ips)
60926 + return 0;
60927 +
60928 + ip_addr = addr->sin_addr.s_addr;
60929 + ip_port = ntohs(addr->sin_port);
60930 +
60931 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60932 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60933 + current->role->roletype, cred->uid,
60934 + cred->gid, current->exec_file ?
60935 + gr_to_filename(current->exec_file->f_path.dentry,
60936 + current->exec_file->f_path.mnt) :
60937 + curr->filename, curr->filename,
60938 + &ip_addr, ip_port, type,
60939 + sk->sk_protocol, mode, &current->signal->saved_ip);
60940 + return 0;
60941 + }
60942 +
60943 + for (i = 0; i < curr->ip_num; i++) {
60944 + ip = *(curr->ips + i);
60945 + if (ip->iface != NULL) {
60946 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
60947 + p = strchr(iface, ':');
60948 + if (p != NULL)
60949 + *p = '\0';
60950 + dev = dev_get_by_name(sock_net(sk), iface);
60951 + if (dev == NULL)
60952 + continue;
60953 + idev = in_dev_get(dev);
60954 + if (idev == NULL) {
60955 + dev_put(dev);
60956 + continue;
60957 + }
60958 + rcu_read_lock();
60959 + for_ifa(idev) {
60960 + if (!strcmp(ip->iface, ifa->ifa_label)) {
60961 + our_addr = ifa->ifa_address;
60962 + our_netmask = 0xffffffff;
60963 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60964 + if (ret == 1) {
60965 + rcu_read_unlock();
60966 + in_dev_put(idev);
60967 + dev_put(dev);
60968 + return 0;
60969 + } else if (ret == 2) {
60970 + rcu_read_unlock();
60971 + in_dev_put(idev);
60972 + dev_put(dev);
60973 + goto denied;
60974 + }
60975 + }
60976 + } endfor_ifa(idev);
60977 + rcu_read_unlock();
60978 + in_dev_put(idev);
60979 + dev_put(dev);
60980 + } else {
60981 + our_addr = ip->addr;
60982 + our_netmask = ip->netmask;
60983 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60984 + if (ret == 1)
60985 + return 0;
60986 + else if (ret == 2)
60987 + goto denied;
60988 + }
60989 + }
60990 +
60991 +denied:
60992 + if (mode == GR_BIND)
60993 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
60994 + else if (mode == GR_CONNECT)
60995 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
60996 +
60997 + return -EACCES;
60998 +}
60999 +
61000 +int
61001 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61002 +{
61003 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61004 +}
61005 +
61006 +int
61007 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61008 +{
61009 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61010 +}
61011 +
61012 +int gr_search_listen(struct socket *sock)
61013 +{
61014 + struct sock *sk = sock->sk;
61015 + struct sockaddr_in addr;
61016 +
61017 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61018 + addr.sin_port = inet_sk(sk)->sport;
61019 +
61020 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61021 +}
61022 +
61023 +int gr_search_accept(struct socket *sock)
61024 +{
61025 + struct sock *sk = sock->sk;
61026 + struct sockaddr_in addr;
61027 +
61028 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61029 + addr.sin_port = inet_sk(sk)->sport;
61030 +
61031 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61032 +}
61033 +
61034 +int
61035 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61036 +{
61037 + if (addr)
61038 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61039 + else {
61040 + struct sockaddr_in sin;
61041 + const struct inet_sock *inet = inet_sk(sk);
61042 +
61043 + sin.sin_addr.s_addr = inet->daddr;
61044 + sin.sin_port = inet->dport;
61045 +
61046 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61047 + }
61048 +}
61049 +
61050 +int
61051 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61052 +{
61053 + struct sockaddr_in sin;
61054 +
61055 + if (unlikely(skb->len < sizeof (struct udphdr)))
61056 + return 0; // skip this packet
61057 +
61058 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61059 + sin.sin_port = udp_hdr(skb)->source;
61060 +
61061 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61062 +}
61063 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61064 new file mode 100644
61065 index 0000000..34bdd46
61066 --- /dev/null
61067 +++ b/grsecurity/gracl_learn.c
61068 @@ -0,0 +1,208 @@
61069 +#include <linux/kernel.h>
61070 +#include <linux/mm.h>
61071 +#include <linux/sched.h>
61072 +#include <linux/poll.h>
61073 +#include <linux/smp_lock.h>
61074 +#include <linux/string.h>
61075 +#include <linux/file.h>
61076 +#include <linux/types.h>
61077 +#include <linux/vmalloc.h>
61078 +#include <linux/grinternal.h>
61079 +
61080 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61081 + size_t count, loff_t *ppos);
61082 +extern int gr_acl_is_enabled(void);
61083 +
61084 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61085 +static int gr_learn_attached;
61086 +
61087 +/* use a 512k buffer */
61088 +#define LEARN_BUFFER_SIZE (512 * 1024)
61089 +
61090 +static DEFINE_SPINLOCK(gr_learn_lock);
61091 +static DEFINE_MUTEX(gr_learn_user_mutex);
61092 +
61093 +/* we need to maintain two buffers, so that the kernel context of grlearn
61094 + uses a semaphore around the userspace copying, and the other kernel contexts
61095 + use a spinlock when copying into the buffer, since they cannot sleep
61096 +*/
61097 +static char *learn_buffer;
61098 +static char *learn_buffer_user;
61099 +static int learn_buffer_len;
61100 +static int learn_buffer_user_len;
61101 +
61102 +static ssize_t
61103 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61104 +{
61105 + DECLARE_WAITQUEUE(wait, current);
61106 + ssize_t retval = 0;
61107 +
61108 + add_wait_queue(&learn_wait, &wait);
61109 + set_current_state(TASK_INTERRUPTIBLE);
61110 + do {
61111 + mutex_lock(&gr_learn_user_mutex);
61112 + spin_lock(&gr_learn_lock);
61113 + if (learn_buffer_len)
61114 + break;
61115 + spin_unlock(&gr_learn_lock);
61116 + mutex_unlock(&gr_learn_user_mutex);
61117 + if (file->f_flags & O_NONBLOCK) {
61118 + retval = -EAGAIN;
61119 + goto out;
61120 + }
61121 + if (signal_pending(current)) {
61122 + retval = -ERESTARTSYS;
61123 + goto out;
61124 + }
61125 +
61126 + schedule();
61127 + } while (1);
61128 +
61129 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61130 + learn_buffer_user_len = learn_buffer_len;
61131 + retval = learn_buffer_len;
61132 + learn_buffer_len = 0;
61133 +
61134 + spin_unlock(&gr_learn_lock);
61135 +
61136 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61137 + retval = -EFAULT;
61138 +
61139 + mutex_unlock(&gr_learn_user_mutex);
61140 +out:
61141 + set_current_state(TASK_RUNNING);
61142 + remove_wait_queue(&learn_wait, &wait);
61143 + return retval;
61144 +}
61145 +
61146 +static unsigned int
61147 +poll_learn(struct file * file, poll_table * wait)
61148 +{
61149 + poll_wait(file, &learn_wait, wait);
61150 +
61151 + if (learn_buffer_len)
61152 + return (POLLIN | POLLRDNORM);
61153 +
61154 + return 0;
61155 +}
61156 +
61157 +void
61158 +gr_clear_learn_entries(void)
61159 +{
61160 + char *tmp;
61161 +
61162 + mutex_lock(&gr_learn_user_mutex);
61163 + spin_lock(&gr_learn_lock);
61164 + tmp = learn_buffer;
61165 + learn_buffer = NULL;
61166 + spin_unlock(&gr_learn_lock);
61167 + if (tmp)
61168 + vfree(tmp);
61169 + if (learn_buffer_user != NULL) {
61170 + vfree(learn_buffer_user);
61171 + learn_buffer_user = NULL;
61172 + }
61173 + learn_buffer_len = 0;
61174 + mutex_unlock(&gr_learn_user_mutex);
61175 +
61176 + return;
61177 +}
61178 +
61179 +void
61180 +gr_add_learn_entry(const char *fmt, ...)
61181 +{
61182 + va_list args;
61183 + unsigned int len;
61184 +
61185 + if (!gr_learn_attached)
61186 + return;
61187 +
61188 + spin_lock(&gr_learn_lock);
61189 +
61190 + /* leave a gap at the end so we know when it's "full" but don't have to
61191 + compute the exact length of the string we're trying to append
61192 + */
61193 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61194 + spin_unlock(&gr_learn_lock);
61195 + wake_up_interruptible(&learn_wait);
61196 + return;
61197 + }
61198 + if (learn_buffer == NULL) {
61199 + spin_unlock(&gr_learn_lock);
61200 + return;
61201 + }
61202 +
61203 + va_start(args, fmt);
61204 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61205 + va_end(args);
61206 +
61207 + learn_buffer_len += len + 1;
61208 +
61209 + spin_unlock(&gr_learn_lock);
61210 + wake_up_interruptible(&learn_wait);
61211 +
61212 + return;
61213 +}
61214 +
61215 +static int
61216 +open_learn(struct inode *inode, struct file *file)
61217 +{
61218 + if (file->f_mode & FMODE_READ && gr_learn_attached)
61219 + return -EBUSY;
61220 + if (file->f_mode & FMODE_READ) {
61221 + int retval = 0;
61222 + mutex_lock(&gr_learn_user_mutex);
61223 + if (learn_buffer == NULL)
61224 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61225 + if (learn_buffer_user == NULL)
61226 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61227 + if (learn_buffer == NULL) {
61228 + retval = -ENOMEM;
61229 + goto out_error;
61230 + }
61231 + if (learn_buffer_user == NULL) {
61232 + retval = -ENOMEM;
61233 + goto out_error;
61234 + }
61235 + learn_buffer_len = 0;
61236 + learn_buffer_user_len = 0;
61237 + gr_learn_attached = 1;
61238 +out_error:
61239 + mutex_unlock(&gr_learn_user_mutex);
61240 + return retval;
61241 + }
61242 + return 0;
61243 +}
61244 +
61245 +static int
61246 +close_learn(struct inode *inode, struct file *file)
61247 +{
61248 + if (file->f_mode & FMODE_READ) {
61249 + char *tmp = NULL;
61250 + mutex_lock(&gr_learn_user_mutex);
61251 + spin_lock(&gr_learn_lock);
61252 + tmp = learn_buffer;
61253 + learn_buffer = NULL;
61254 + spin_unlock(&gr_learn_lock);
61255 + if (tmp)
61256 + vfree(tmp);
61257 + if (learn_buffer_user != NULL) {
61258 + vfree(learn_buffer_user);
61259 + learn_buffer_user = NULL;
61260 + }
61261 + learn_buffer_len = 0;
61262 + learn_buffer_user_len = 0;
61263 + gr_learn_attached = 0;
61264 + mutex_unlock(&gr_learn_user_mutex);
61265 + }
61266 +
61267 + return 0;
61268 +}
61269 +
61270 +const struct file_operations grsec_fops = {
61271 + .read = read_learn,
61272 + .write = write_grsec_handler,
61273 + .open = open_learn,
61274 + .release = close_learn,
61275 + .poll = poll_learn,
61276 +};
61277 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
61278 new file mode 100644
61279 index 0000000..70b2179
61280 --- /dev/null
61281 +++ b/grsecurity/gracl_res.c
61282 @@ -0,0 +1,67 @@
61283 +#include <linux/kernel.h>
61284 +#include <linux/sched.h>
61285 +#include <linux/gracl.h>
61286 +#include <linux/grinternal.h>
61287 +
61288 +static const char *restab_log[] = {
61289 + [RLIMIT_CPU] = "RLIMIT_CPU",
61290 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
61291 + [RLIMIT_DATA] = "RLIMIT_DATA",
61292 + [RLIMIT_STACK] = "RLIMIT_STACK",
61293 + [RLIMIT_CORE] = "RLIMIT_CORE",
61294 + [RLIMIT_RSS] = "RLIMIT_RSS",
61295 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
61296 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
61297 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
61298 + [RLIMIT_AS] = "RLIMIT_AS",
61299 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
61300 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
61301 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
61302 + [RLIMIT_NICE] = "RLIMIT_NICE",
61303 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
61304 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
61305 + [GR_CRASH_RES] = "RLIMIT_CRASH"
61306 +};
61307 +
61308 +void
61309 +gr_log_resource(const struct task_struct *task,
61310 + const int res, const unsigned long wanted, const int gt)
61311 +{
61312 + const struct cred *cred;
61313 + unsigned long rlim;
61314 +
61315 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
61316 + return;
61317 +
61318 + // not yet supported resource
61319 + if (unlikely(!restab_log[res]))
61320 + return;
61321 +
61322 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
61323 + rlim = task->signal->rlim[res].rlim_max;
61324 + else
61325 + rlim = task->signal->rlim[res].rlim_cur;
61326 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
61327 + return;
61328 +
61329 + rcu_read_lock();
61330 + cred = __task_cred(task);
61331 +
61332 + if (res == RLIMIT_NPROC &&
61333 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
61334 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
61335 + goto out_rcu_unlock;
61336 + else if (res == RLIMIT_MEMLOCK &&
61337 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
61338 + goto out_rcu_unlock;
61339 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
61340 + goto out_rcu_unlock;
61341 + rcu_read_unlock();
61342 +
61343 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
61344 +
61345 + return;
61346 +out_rcu_unlock:
61347 + rcu_read_unlock();
61348 + return;
61349 +}
61350 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
61351 new file mode 100644
61352 index 0000000..1d1b734
61353 --- /dev/null
61354 +++ b/grsecurity/gracl_segv.c
61355 @@ -0,0 +1,284 @@
61356 +#include <linux/kernel.h>
61357 +#include <linux/mm.h>
61358 +#include <asm/uaccess.h>
61359 +#include <asm/errno.h>
61360 +#include <asm/mman.h>
61361 +#include <net/sock.h>
61362 +#include <linux/file.h>
61363 +#include <linux/fs.h>
61364 +#include <linux/net.h>
61365 +#include <linux/in.h>
61366 +#include <linux/smp_lock.h>
61367 +#include <linux/slab.h>
61368 +#include <linux/types.h>
61369 +#include <linux/sched.h>
61370 +#include <linux/timer.h>
61371 +#include <linux/gracl.h>
61372 +#include <linux/grsecurity.h>
61373 +#include <linux/grinternal.h>
61374 +
61375 +static struct crash_uid *uid_set;
61376 +static unsigned short uid_used;
61377 +static DEFINE_SPINLOCK(gr_uid_lock);
61378 +extern rwlock_t gr_inode_lock;
61379 +extern struct acl_subject_label *
61380 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
61381 + struct acl_role_label *role);
61382 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
61383 +
61384 +int
61385 +gr_init_uidset(void)
61386 +{
61387 + uid_set =
61388 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
61389 + uid_used = 0;
61390 +
61391 + return uid_set ? 1 : 0;
61392 +}
61393 +
61394 +void
61395 +gr_free_uidset(void)
61396 +{
61397 + if (uid_set)
61398 + kfree(uid_set);
61399 +
61400 + return;
61401 +}
61402 +
61403 +int
61404 +gr_find_uid(const uid_t uid)
61405 +{
61406 + struct crash_uid *tmp = uid_set;
61407 + uid_t buid;
61408 + int low = 0, high = uid_used - 1, mid;
61409 +
61410 + while (high >= low) {
61411 + mid = (low + high) >> 1;
61412 + buid = tmp[mid].uid;
61413 + if (buid == uid)
61414 + return mid;
61415 + if (buid > uid)
61416 + high = mid - 1;
61417 + if (buid < uid)
61418 + low = mid + 1;
61419 + }
61420 +
61421 + return -1;
61422 +}
61423 +
61424 +static __inline__ void
61425 +gr_insertsort(void)
61426 +{
61427 + unsigned short i, j;
61428 + struct crash_uid index;
61429 +
61430 + for (i = 1; i < uid_used; i++) {
61431 + index = uid_set[i];
61432 + j = i;
61433 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
61434 + uid_set[j] = uid_set[j - 1];
61435 + j--;
61436 + }
61437 + uid_set[j] = index;
61438 + }
61439 +
61440 + return;
61441 +}
61442 +
61443 +static __inline__ void
61444 +gr_insert_uid(const uid_t uid, const unsigned long expires)
61445 +{
61446 + int loc;
61447 +
61448 + if (uid_used == GR_UIDTABLE_MAX)
61449 + return;
61450 +
61451 + loc = gr_find_uid(uid);
61452 +
61453 + if (loc >= 0) {
61454 + uid_set[loc].expires = expires;
61455 + return;
61456 + }
61457 +
61458 + uid_set[uid_used].uid = uid;
61459 + uid_set[uid_used].expires = expires;
61460 + uid_used++;
61461 +
61462 + gr_insertsort();
61463 +
61464 + return;
61465 +}
61466 +
61467 +void
61468 +gr_remove_uid(const unsigned short loc)
61469 +{
61470 + unsigned short i;
61471 +
61472 + for (i = loc + 1; i < uid_used; i++)
61473 + uid_set[i - 1] = uid_set[i];
61474 +
61475 + uid_used--;
61476 +
61477 + return;
61478 +}
61479 +
61480 +int
61481 +gr_check_crash_uid(const uid_t uid)
61482 +{
61483 + int loc;
61484 + int ret = 0;
61485 +
61486 + if (unlikely(!gr_acl_is_enabled()))
61487 + return 0;
61488 +
61489 + spin_lock(&gr_uid_lock);
61490 + loc = gr_find_uid(uid);
61491 +
61492 + if (loc < 0)
61493 + goto out_unlock;
61494 +
61495 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
61496 + gr_remove_uid(loc);
61497 + else
61498 + ret = 1;
61499 +
61500 +out_unlock:
61501 + spin_unlock(&gr_uid_lock);
61502 + return ret;
61503 +}
61504 +
61505 +static __inline__ int
61506 +proc_is_setxid(const struct cred *cred)
61507 +{
61508 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
61509 + cred->uid != cred->fsuid)
61510 + return 1;
61511 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
61512 + cred->gid != cred->fsgid)
61513 + return 1;
61514 +
61515 + return 0;
61516 +}
61517 +
61518 +void
61519 +gr_handle_crash(struct task_struct *task, const int sig)
61520 +{
61521 + struct acl_subject_label *curr;
61522 + struct task_struct *tsk, *tsk2;
61523 + const struct cred *cred;
61524 + const struct cred *cred2;
61525 +
61526 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
61527 + return;
61528 +
61529 + if (unlikely(!gr_acl_is_enabled()))
61530 + return;
61531 +
61532 + curr = task->acl;
61533 +
61534 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
61535 + return;
61536 +
61537 + if (time_before_eq(curr->expires, get_seconds())) {
61538 + curr->expires = 0;
61539 + curr->crashes = 0;
61540 + }
61541 +
61542 + curr->crashes++;
61543 +
61544 + if (!curr->expires)
61545 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
61546 +
61547 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61548 + time_after(curr->expires, get_seconds())) {
61549 + rcu_read_lock();
61550 + cred = __task_cred(task);
61551 + if (cred->uid && proc_is_setxid(cred)) {
61552 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61553 + spin_lock(&gr_uid_lock);
61554 + gr_insert_uid(cred->uid, curr->expires);
61555 + spin_unlock(&gr_uid_lock);
61556 + curr->expires = 0;
61557 + curr->crashes = 0;
61558 + read_lock(&tasklist_lock);
61559 + do_each_thread(tsk2, tsk) {
61560 + cred2 = __task_cred(tsk);
61561 + if (tsk != task && cred2->uid == cred->uid)
61562 + gr_fake_force_sig(SIGKILL, tsk);
61563 + } while_each_thread(tsk2, tsk);
61564 + read_unlock(&tasklist_lock);
61565 + } else {
61566 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61567 + read_lock(&tasklist_lock);
61568 + read_lock(&grsec_exec_file_lock);
61569 + do_each_thread(tsk2, tsk) {
61570 + if (likely(tsk != task)) {
61571 + // if this thread has the same subject as the one that triggered
61572 + // RES_CRASH and it's the same binary, kill it
61573 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
61574 + gr_fake_force_sig(SIGKILL, tsk);
61575 + }
61576 + } while_each_thread(tsk2, tsk);
61577 + read_unlock(&grsec_exec_file_lock);
61578 + read_unlock(&tasklist_lock);
61579 + }
61580 + rcu_read_unlock();
61581 + }
61582 +
61583 + return;
61584 +}
61585 +
61586 +int
61587 +gr_check_crash_exec(const struct file *filp)
61588 +{
61589 + struct acl_subject_label *curr;
61590 +
61591 + if (unlikely(!gr_acl_is_enabled()))
61592 + return 0;
61593 +
61594 + read_lock(&gr_inode_lock);
61595 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
61596 + filp->f_path.dentry->d_inode->i_sb->s_dev,
61597 + current->role);
61598 + read_unlock(&gr_inode_lock);
61599 +
61600 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
61601 + (!curr->crashes && !curr->expires))
61602 + return 0;
61603 +
61604 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61605 + time_after(curr->expires, get_seconds()))
61606 + return 1;
61607 + else if (time_before_eq(curr->expires, get_seconds())) {
61608 + curr->crashes = 0;
61609 + curr->expires = 0;
61610 + }
61611 +
61612 + return 0;
61613 +}
61614 +
61615 +void
61616 +gr_handle_alertkill(struct task_struct *task)
61617 +{
61618 + struct acl_subject_label *curracl;
61619 + __u32 curr_ip;
61620 + struct task_struct *p, *p2;
61621 +
61622 + if (unlikely(!gr_acl_is_enabled()))
61623 + return;
61624 +
61625 + curracl = task->acl;
61626 + curr_ip = task->signal->curr_ip;
61627 +
61628 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
61629 + read_lock(&tasklist_lock);
61630 + do_each_thread(p2, p) {
61631 + if (p->signal->curr_ip == curr_ip)
61632 + gr_fake_force_sig(SIGKILL, p);
61633 + } while_each_thread(p2, p);
61634 + read_unlock(&tasklist_lock);
61635 + } else if (curracl->mode & GR_KILLPROC)
61636 + gr_fake_force_sig(SIGKILL, task);
61637 +
61638 + return;
61639 +}
61640 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
61641 new file mode 100644
61642 index 0000000..9d83a69
61643 --- /dev/null
61644 +++ b/grsecurity/gracl_shm.c
61645 @@ -0,0 +1,40 @@
61646 +#include <linux/kernel.h>
61647 +#include <linux/mm.h>
61648 +#include <linux/sched.h>
61649 +#include <linux/file.h>
61650 +#include <linux/ipc.h>
61651 +#include <linux/gracl.h>
61652 +#include <linux/grsecurity.h>
61653 +#include <linux/grinternal.h>
61654 +
61655 +int
61656 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61657 + const time_t shm_createtime, const uid_t cuid, const int shmid)
61658 +{
61659 + struct task_struct *task;
61660 +
61661 + if (!gr_acl_is_enabled())
61662 + return 1;
61663 +
61664 + rcu_read_lock();
61665 + read_lock(&tasklist_lock);
61666 +
61667 + task = find_task_by_vpid(shm_cprid);
61668 +
61669 + if (unlikely(!task))
61670 + task = find_task_by_vpid(shm_lapid);
61671 +
61672 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
61673 + (task->pid == shm_lapid)) &&
61674 + (task->acl->mode & GR_PROTSHM) &&
61675 + (task->acl != current->acl))) {
61676 + read_unlock(&tasklist_lock);
61677 + rcu_read_unlock();
61678 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
61679 + return 0;
61680 + }
61681 + read_unlock(&tasklist_lock);
61682 + rcu_read_unlock();
61683 +
61684 + return 1;
61685 +}
61686 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
61687 new file mode 100644
61688 index 0000000..bc0be01
61689 --- /dev/null
61690 +++ b/grsecurity/grsec_chdir.c
61691 @@ -0,0 +1,19 @@
61692 +#include <linux/kernel.h>
61693 +#include <linux/sched.h>
61694 +#include <linux/fs.h>
61695 +#include <linux/file.h>
61696 +#include <linux/grsecurity.h>
61697 +#include <linux/grinternal.h>
61698 +
61699 +void
61700 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
61701 +{
61702 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61703 + if ((grsec_enable_chdir && grsec_enable_group &&
61704 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
61705 + !grsec_enable_group)) {
61706 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
61707 + }
61708 +#endif
61709 + return;
61710 +}
61711 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
61712 new file mode 100644
61713 index 0000000..197bdd5
61714 --- /dev/null
61715 +++ b/grsecurity/grsec_chroot.c
61716 @@ -0,0 +1,386 @@
61717 +#include <linux/kernel.h>
61718 +#include <linux/module.h>
61719 +#include <linux/sched.h>
61720 +#include <linux/file.h>
61721 +#include <linux/fs.h>
61722 +#include <linux/mount.h>
61723 +#include <linux/types.h>
61724 +#include <linux/pid_namespace.h>
61725 +#include <linux/grsecurity.h>
61726 +#include <linux/grinternal.h>
61727 +
61728 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
61729 +{
61730 +#ifdef CONFIG_GRKERNSEC
61731 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
61732 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
61733 + task->gr_is_chrooted = 1;
61734 + else
61735 + task->gr_is_chrooted = 0;
61736 +
61737 + task->gr_chroot_dentry = path->dentry;
61738 +#endif
61739 + return;
61740 +}
61741 +
61742 +void gr_clear_chroot_entries(struct task_struct *task)
61743 +{
61744 +#ifdef CONFIG_GRKERNSEC
61745 + task->gr_is_chrooted = 0;
61746 + task->gr_chroot_dentry = NULL;
61747 +#endif
61748 + return;
61749 +}
61750 +
61751 +int
61752 +gr_handle_chroot_unix(const pid_t pid)
61753 +{
61754 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61755 + struct task_struct *p;
61756 +
61757 + if (unlikely(!grsec_enable_chroot_unix))
61758 + return 1;
61759 +
61760 + if (likely(!proc_is_chrooted(current)))
61761 + return 1;
61762 +
61763 + rcu_read_lock();
61764 + read_lock(&tasklist_lock);
61765 +
61766 + p = find_task_by_vpid_unrestricted(pid);
61767 + if (unlikely(p && !have_same_root(current, p))) {
61768 + read_unlock(&tasklist_lock);
61769 + rcu_read_unlock();
61770 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
61771 + return 0;
61772 + }
61773 + read_unlock(&tasklist_lock);
61774 + rcu_read_unlock();
61775 +#endif
61776 + return 1;
61777 +}
61778 +
61779 +int
61780 +gr_handle_chroot_nice(void)
61781 +{
61782 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61783 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
61784 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
61785 + return -EPERM;
61786 + }
61787 +#endif
61788 + return 0;
61789 +}
61790 +
61791 +int
61792 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
61793 +{
61794 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61795 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
61796 + && proc_is_chrooted(current)) {
61797 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
61798 + return -EACCES;
61799 + }
61800 +#endif
61801 + return 0;
61802 +}
61803 +
61804 +int
61805 +gr_handle_chroot_rawio(const struct inode *inode)
61806 +{
61807 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61808 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
61809 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
61810 + return 1;
61811 +#endif
61812 + return 0;
61813 +}
61814 +
61815 +int
61816 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
61817 +{
61818 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61819 + struct task_struct *p;
61820 + int ret = 0;
61821 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
61822 + return ret;
61823 +
61824 + read_lock(&tasklist_lock);
61825 + do_each_pid_task(pid, type, p) {
61826 + if (!have_same_root(current, p)) {
61827 + ret = 1;
61828 + goto out;
61829 + }
61830 + } while_each_pid_task(pid, type, p);
61831 +out:
61832 + read_unlock(&tasklist_lock);
61833 + return ret;
61834 +#endif
61835 + return 0;
61836 +}
61837 +
61838 +int
61839 +gr_pid_is_chrooted(struct task_struct *p)
61840 +{
61841 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61842 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
61843 + return 0;
61844 +
61845 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
61846 + !have_same_root(current, p)) {
61847 + return 1;
61848 + }
61849 +#endif
61850 + return 0;
61851 +}
61852 +
61853 +EXPORT_SYMBOL(gr_pid_is_chrooted);
61854 +
61855 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
61856 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
61857 +{
61858 + struct dentry *dentry = (struct dentry *)u_dentry;
61859 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
61860 + struct dentry *realroot;
61861 + struct vfsmount *realrootmnt;
61862 + struct dentry *currentroot;
61863 + struct vfsmount *currentmnt;
61864 + struct task_struct *reaper = &init_task;
61865 + int ret = 1;
61866 +
61867 + read_lock(&reaper->fs->lock);
61868 + realrootmnt = mntget(reaper->fs->root.mnt);
61869 + realroot = dget(reaper->fs->root.dentry);
61870 + read_unlock(&reaper->fs->lock);
61871 +
61872 + read_lock(&current->fs->lock);
61873 + currentmnt = mntget(current->fs->root.mnt);
61874 + currentroot = dget(current->fs->root.dentry);
61875 + read_unlock(&current->fs->lock);
61876 +
61877 + spin_lock(&dcache_lock);
61878 + for (;;) {
61879 + if (unlikely((dentry == realroot && mnt == realrootmnt)
61880 + || (dentry == currentroot && mnt == currentmnt)))
61881 + break;
61882 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
61883 + if (mnt->mnt_parent == mnt)
61884 + break;
61885 + dentry = mnt->mnt_mountpoint;
61886 + mnt = mnt->mnt_parent;
61887 + continue;
61888 + }
61889 + dentry = dentry->d_parent;
61890 + }
61891 + spin_unlock(&dcache_lock);
61892 +
61893 + dput(currentroot);
61894 + mntput(currentmnt);
61895 +
61896 + /* access is outside of chroot */
61897 + if (dentry == realroot && mnt == realrootmnt)
61898 + ret = 0;
61899 +
61900 + dput(realroot);
61901 + mntput(realrootmnt);
61902 + return ret;
61903 +}
61904 +#endif
61905 +
61906 +int
61907 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
61908 +{
61909 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61910 + if (!grsec_enable_chroot_fchdir)
61911 + return 1;
61912 +
61913 + if (!proc_is_chrooted(current))
61914 + return 1;
61915 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
61916 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
61917 + return 0;
61918 + }
61919 +#endif
61920 + return 1;
61921 +}
61922 +
61923 +int
61924 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61925 + const time_t shm_createtime)
61926 +{
61927 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61928 + struct task_struct *p;
61929 + time_t starttime;
61930 +
61931 + if (unlikely(!grsec_enable_chroot_shmat))
61932 + return 1;
61933 +
61934 + if (likely(!proc_is_chrooted(current)))
61935 + return 1;
61936 +
61937 + rcu_read_lock();
61938 + read_lock(&tasklist_lock);
61939 +
61940 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
61941 + starttime = p->start_time.tv_sec;
61942 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
61943 + if (have_same_root(current, p)) {
61944 + goto allow;
61945 + } else {
61946 + read_unlock(&tasklist_lock);
61947 + rcu_read_unlock();
61948 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61949 + return 0;
61950 + }
61951 + }
61952 + /* creator exited, pid reuse, fall through to next check */
61953 + }
61954 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
61955 + if (unlikely(!have_same_root(current, p))) {
61956 + read_unlock(&tasklist_lock);
61957 + rcu_read_unlock();
61958 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61959 + return 0;
61960 + }
61961 + }
61962 +
61963 +allow:
61964 + read_unlock(&tasklist_lock);
61965 + rcu_read_unlock();
61966 +#endif
61967 + return 1;
61968 +}
61969 +
61970 +void
61971 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
61972 +{
61973 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61974 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
61975 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
61976 +#endif
61977 + return;
61978 +}
61979 +
61980 +int
61981 +gr_handle_chroot_mknod(const struct dentry *dentry,
61982 + const struct vfsmount *mnt, const int mode)
61983 +{
61984 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
61985 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
61986 + proc_is_chrooted(current)) {
61987 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
61988 + return -EPERM;
61989 + }
61990 +#endif
61991 + return 0;
61992 +}
61993 +
61994 +int
61995 +gr_handle_chroot_mount(const struct dentry *dentry,
61996 + const struct vfsmount *mnt, const char *dev_name)
61997 +{
61998 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
61999 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62000 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62001 + return -EPERM;
62002 + }
62003 +#endif
62004 + return 0;
62005 +}
62006 +
62007 +int
62008 +gr_handle_chroot_pivot(void)
62009 +{
62010 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62011 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62012 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62013 + return -EPERM;
62014 + }
62015 +#endif
62016 + return 0;
62017 +}
62018 +
62019 +int
62020 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62021 +{
62022 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62023 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62024 + !gr_is_outside_chroot(dentry, mnt)) {
62025 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62026 + return -EPERM;
62027 + }
62028 +#endif
62029 + return 0;
62030 +}
62031 +
62032 +extern const char *captab_log[];
62033 +extern int captab_log_entries;
62034 +
62035 +int
62036 +gr_chroot_is_capable(const int cap)
62037 +{
62038 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62039 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62040 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62041 + if (cap_raised(chroot_caps, cap)) {
62042 + const struct cred *creds = current_cred();
62043 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62044 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62045 + }
62046 + return 0;
62047 + }
62048 + }
62049 +#endif
62050 + return 1;
62051 +}
62052 +
62053 +int
62054 +gr_chroot_is_capable_nolog(const int cap)
62055 +{
62056 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62057 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62058 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62059 + if (cap_raised(chroot_caps, cap)) {
62060 + return 0;
62061 + }
62062 + }
62063 +#endif
62064 + return 1;
62065 +}
62066 +
62067 +int
62068 +gr_handle_chroot_sysctl(const int op)
62069 +{
62070 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62071 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62072 + && (op & MAY_WRITE))
62073 + return -EACCES;
62074 +#endif
62075 + return 0;
62076 +}
62077 +
62078 +void
62079 +gr_handle_chroot_chdir(struct path *path)
62080 +{
62081 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62082 + if (grsec_enable_chroot_chdir)
62083 + set_fs_pwd(current->fs, path);
62084 +#endif
62085 + return;
62086 +}
62087 +
62088 +int
62089 +gr_handle_chroot_chmod(const struct dentry *dentry,
62090 + const struct vfsmount *mnt, const int mode)
62091 +{
62092 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62093 + /* allow chmod +s on directories, but not on files */
62094 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62095 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62096 + proc_is_chrooted(current)) {
62097 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62098 + return -EPERM;
62099 + }
62100 +#endif
62101 + return 0;
62102 +}
62103 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62104 new file mode 100644
62105 index 0000000..b81db5b
62106 --- /dev/null
62107 +++ b/grsecurity/grsec_disabled.c
62108 @@ -0,0 +1,439 @@
62109 +#include <linux/kernel.h>
62110 +#include <linux/module.h>
62111 +#include <linux/sched.h>
62112 +#include <linux/file.h>
62113 +#include <linux/fs.h>
62114 +#include <linux/kdev_t.h>
62115 +#include <linux/net.h>
62116 +#include <linux/in.h>
62117 +#include <linux/ip.h>
62118 +#include <linux/skbuff.h>
62119 +#include <linux/sysctl.h>
62120 +
62121 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62122 +void
62123 +pax_set_initial_flags(struct linux_binprm *bprm)
62124 +{
62125 + return;
62126 +}
62127 +#endif
62128 +
62129 +#ifdef CONFIG_SYSCTL
62130 +__u32
62131 +gr_handle_sysctl(const struct ctl_table * table, const int op)
62132 +{
62133 + return 0;
62134 +}
62135 +#endif
62136 +
62137 +#ifdef CONFIG_TASKSTATS
62138 +int gr_is_taskstats_denied(int pid)
62139 +{
62140 + return 0;
62141 +}
62142 +#endif
62143 +
62144 +int
62145 +gr_acl_is_enabled(void)
62146 +{
62147 + return 0;
62148 +}
62149 +
62150 +void
62151 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62152 +{
62153 + return;
62154 +}
62155 +
62156 +int
62157 +gr_handle_rawio(const struct inode *inode)
62158 +{
62159 + return 0;
62160 +}
62161 +
62162 +void
62163 +gr_acl_handle_psacct(struct task_struct *task, const long code)
62164 +{
62165 + return;
62166 +}
62167 +
62168 +int
62169 +gr_handle_ptrace(struct task_struct *task, const long request)
62170 +{
62171 + return 0;
62172 +}
62173 +
62174 +int
62175 +gr_handle_proc_ptrace(struct task_struct *task)
62176 +{
62177 + return 0;
62178 +}
62179 +
62180 +void
62181 +gr_learn_resource(const struct task_struct *task,
62182 + const int res, const unsigned long wanted, const int gt)
62183 +{
62184 + return;
62185 +}
62186 +
62187 +int
62188 +gr_set_acls(const int type)
62189 +{
62190 + return 0;
62191 +}
62192 +
62193 +int
62194 +gr_check_hidden_task(const struct task_struct *tsk)
62195 +{
62196 + return 0;
62197 +}
62198 +
62199 +int
62200 +gr_check_protected_task(const struct task_struct *task)
62201 +{
62202 + return 0;
62203 +}
62204 +
62205 +int
62206 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62207 +{
62208 + return 0;
62209 +}
62210 +
62211 +void
62212 +gr_copy_label(struct task_struct *tsk)
62213 +{
62214 + return;
62215 +}
62216 +
62217 +void
62218 +gr_set_pax_flags(struct task_struct *task)
62219 +{
62220 + return;
62221 +}
62222 +
62223 +int
62224 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62225 + const int unsafe_share)
62226 +{
62227 + return 0;
62228 +}
62229 +
62230 +void
62231 +gr_handle_delete(const ino_t ino, const dev_t dev)
62232 +{
62233 + return;
62234 +}
62235 +
62236 +void
62237 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62238 +{
62239 + return;
62240 +}
62241 +
62242 +void
62243 +gr_handle_crash(struct task_struct *task, const int sig)
62244 +{
62245 + return;
62246 +}
62247 +
62248 +int
62249 +gr_check_crash_exec(const struct file *filp)
62250 +{
62251 + return 0;
62252 +}
62253 +
62254 +int
62255 +gr_check_crash_uid(const uid_t uid)
62256 +{
62257 + return 0;
62258 +}
62259 +
62260 +void
62261 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62262 + struct dentry *old_dentry,
62263 + struct dentry *new_dentry,
62264 + struct vfsmount *mnt, const __u8 replace)
62265 +{
62266 + return;
62267 +}
62268 +
62269 +int
62270 +gr_search_socket(const int family, const int type, const int protocol)
62271 +{
62272 + return 1;
62273 +}
62274 +
62275 +int
62276 +gr_search_connectbind(const int mode, const struct socket *sock,
62277 + const struct sockaddr_in *addr)
62278 +{
62279 + return 0;
62280 +}
62281 +
62282 +void
62283 +gr_handle_alertkill(struct task_struct *task)
62284 +{
62285 + return;
62286 +}
62287 +
62288 +__u32
62289 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
62290 +{
62291 + return 1;
62292 +}
62293 +
62294 +__u32
62295 +gr_acl_handle_hidden_file(const struct dentry * dentry,
62296 + const struct vfsmount * mnt)
62297 +{
62298 + return 1;
62299 +}
62300 +
62301 +__u32
62302 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62303 + int acc_mode)
62304 +{
62305 + return 1;
62306 +}
62307 +
62308 +__u32
62309 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62310 +{
62311 + return 1;
62312 +}
62313 +
62314 +__u32
62315 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
62316 +{
62317 + return 1;
62318 +}
62319 +
62320 +int
62321 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
62322 + unsigned int *vm_flags)
62323 +{
62324 + return 1;
62325 +}
62326 +
62327 +__u32
62328 +gr_acl_handle_truncate(const struct dentry * dentry,
62329 + const struct vfsmount * mnt)
62330 +{
62331 + return 1;
62332 +}
62333 +
62334 +__u32
62335 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
62336 +{
62337 + return 1;
62338 +}
62339 +
62340 +__u32
62341 +gr_acl_handle_access(const struct dentry * dentry,
62342 + const struct vfsmount * mnt, const int fmode)
62343 +{
62344 + return 1;
62345 +}
62346 +
62347 +__u32
62348 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
62349 + mode_t mode)
62350 +{
62351 + return 1;
62352 +}
62353 +
62354 +__u32
62355 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
62356 + mode_t mode)
62357 +{
62358 + return 1;
62359 +}
62360 +
62361 +__u32
62362 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
62363 +{
62364 + return 1;
62365 +}
62366 +
62367 +__u32
62368 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
62369 +{
62370 + return 1;
62371 +}
62372 +
62373 +void
62374 +grsecurity_init(void)
62375 +{
62376 + return;
62377 +}
62378 +
62379 +__u32
62380 +gr_acl_handle_mknod(const struct dentry * new_dentry,
62381 + const struct dentry * parent_dentry,
62382 + const struct vfsmount * parent_mnt,
62383 + const int mode)
62384 +{
62385 + return 1;
62386 +}
62387 +
62388 +__u32
62389 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
62390 + const struct dentry * parent_dentry,
62391 + const struct vfsmount * parent_mnt)
62392 +{
62393 + return 1;
62394 +}
62395 +
62396 +__u32
62397 +gr_acl_handle_symlink(const struct dentry * new_dentry,
62398 + const struct dentry * parent_dentry,
62399 + const struct vfsmount * parent_mnt, const char *from)
62400 +{
62401 + return 1;
62402 +}
62403 +
62404 +__u32
62405 +gr_acl_handle_link(const struct dentry * new_dentry,
62406 + const struct dentry * parent_dentry,
62407 + const struct vfsmount * parent_mnt,
62408 + const struct dentry * old_dentry,
62409 + const struct vfsmount * old_mnt, const char *to)
62410 +{
62411 + return 1;
62412 +}
62413 +
62414 +int
62415 +gr_acl_handle_rename(const struct dentry *new_dentry,
62416 + const struct dentry *parent_dentry,
62417 + const struct vfsmount *parent_mnt,
62418 + const struct dentry *old_dentry,
62419 + const struct inode *old_parent_inode,
62420 + const struct vfsmount *old_mnt, const char *newname)
62421 +{
62422 + return 0;
62423 +}
62424 +
62425 +int
62426 +gr_acl_handle_filldir(const struct file *file, const char *name,
62427 + const int namelen, const ino_t ino)
62428 +{
62429 + return 1;
62430 +}
62431 +
62432 +int
62433 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62434 + const time_t shm_createtime, const uid_t cuid, const int shmid)
62435 +{
62436 + return 1;
62437 +}
62438 +
62439 +int
62440 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
62441 +{
62442 + return 0;
62443 +}
62444 +
62445 +int
62446 +gr_search_accept(const struct socket *sock)
62447 +{
62448 + return 0;
62449 +}
62450 +
62451 +int
62452 +gr_search_listen(const struct socket *sock)
62453 +{
62454 + return 0;
62455 +}
62456 +
62457 +int
62458 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
62459 +{
62460 + return 0;
62461 +}
62462 +
62463 +__u32
62464 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
62465 +{
62466 + return 1;
62467 +}
62468 +
62469 +__u32
62470 +gr_acl_handle_creat(const struct dentry * dentry,
62471 + const struct dentry * p_dentry,
62472 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62473 + const int imode)
62474 +{
62475 + return 1;
62476 +}
62477 +
62478 +void
62479 +gr_acl_handle_exit(void)
62480 +{
62481 + return;
62482 +}
62483 +
62484 +int
62485 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62486 +{
62487 + return 1;
62488 +}
62489 +
62490 +void
62491 +gr_set_role_label(const uid_t uid, const gid_t gid)
62492 +{
62493 + return;
62494 +}
62495 +
62496 +int
62497 +gr_acl_handle_procpidmem(const struct task_struct *task)
62498 +{
62499 + return 0;
62500 +}
62501 +
62502 +int
62503 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
62504 +{
62505 + return 0;
62506 +}
62507 +
62508 +int
62509 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
62510 +{
62511 + return 0;
62512 +}
62513 +
62514 +void
62515 +gr_set_kernel_label(struct task_struct *task)
62516 +{
62517 + return;
62518 +}
62519 +
62520 +int
62521 +gr_check_user_change(int real, int effective, int fs)
62522 +{
62523 + return 0;
62524 +}
62525 +
62526 +int
62527 +gr_check_group_change(int real, int effective, int fs)
62528 +{
62529 + return 0;
62530 +}
62531 +
62532 +int gr_acl_enable_at_secure(void)
62533 +{
62534 + return 0;
62535 +}
62536 +
62537 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
62538 +{
62539 + return dentry->d_inode->i_sb->s_dev;
62540 +}
62541 +
62542 +EXPORT_SYMBOL(gr_learn_resource);
62543 +EXPORT_SYMBOL(gr_set_kernel_label);
62544 +#ifdef CONFIG_SECURITY
62545 +EXPORT_SYMBOL(gr_check_user_change);
62546 +EXPORT_SYMBOL(gr_check_group_change);
62547 +#endif
62548 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
62549 new file mode 100644
62550 index 0000000..a96e155
62551 --- /dev/null
62552 +++ b/grsecurity/grsec_exec.c
62553 @@ -0,0 +1,204 @@
62554 +#include <linux/kernel.h>
62555 +#include <linux/sched.h>
62556 +#include <linux/file.h>
62557 +#include <linux/binfmts.h>
62558 +#include <linux/smp_lock.h>
62559 +#include <linux/fs.h>
62560 +#include <linux/types.h>
62561 +#include <linux/grdefs.h>
62562 +#include <linux/grinternal.h>
62563 +#include <linux/capability.h>
62564 +#include <linux/compat.h>
62565 +#include <linux/module.h>
62566 +
62567 +#include <asm/uaccess.h>
62568 +
62569 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62570 +static char gr_exec_arg_buf[132];
62571 +static DEFINE_MUTEX(gr_exec_arg_mutex);
62572 +#endif
62573 +
62574 +void
62575 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
62576 +{
62577 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62578 + char *grarg = gr_exec_arg_buf;
62579 + unsigned int i, x, execlen = 0;
62580 + char c;
62581 +
62582 + if (!((grsec_enable_execlog && grsec_enable_group &&
62583 + in_group_p(grsec_audit_gid))
62584 + || (grsec_enable_execlog && !grsec_enable_group)))
62585 + return;
62586 +
62587 + mutex_lock(&gr_exec_arg_mutex);
62588 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
62589 +
62590 + if (unlikely(argv == NULL))
62591 + goto log;
62592 +
62593 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
62594 + const char __user *p;
62595 + unsigned int len;
62596 +
62597 + if (copy_from_user(&p, argv + i, sizeof(p)))
62598 + goto log;
62599 + if (!p)
62600 + goto log;
62601 + len = strnlen_user(p, 128 - execlen);
62602 + if (len > 128 - execlen)
62603 + len = 128 - execlen;
62604 + else if (len > 0)
62605 + len--;
62606 + if (copy_from_user(grarg + execlen, p, len))
62607 + goto log;
62608 +
62609 + /* rewrite unprintable characters */
62610 + for (x = 0; x < len; x++) {
62611 + c = *(grarg + execlen + x);
62612 + if (c < 32 || c > 126)
62613 + *(grarg + execlen + x) = ' ';
62614 + }
62615 +
62616 + execlen += len;
62617 + *(grarg + execlen) = ' ';
62618 + *(grarg + execlen + 1) = '\0';
62619 + execlen++;
62620 + }
62621 +
62622 + log:
62623 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62624 + bprm->file->f_path.mnt, grarg);
62625 + mutex_unlock(&gr_exec_arg_mutex);
62626 +#endif
62627 + return;
62628 +}
62629 +
62630 +#ifdef CONFIG_COMPAT
62631 +void
62632 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
62633 +{
62634 +#ifdef CONFIG_GRKERNSEC_EXECLOG
62635 + char *grarg = gr_exec_arg_buf;
62636 + unsigned int i, x, execlen = 0;
62637 + char c;
62638 +
62639 + if (!((grsec_enable_execlog && grsec_enable_group &&
62640 + in_group_p(grsec_audit_gid))
62641 + || (grsec_enable_execlog && !grsec_enable_group)))
62642 + return;
62643 +
62644 + mutex_lock(&gr_exec_arg_mutex);
62645 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
62646 +
62647 + if (unlikely(argv == NULL))
62648 + goto log;
62649 +
62650 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
62651 + compat_uptr_t p;
62652 + unsigned int len;
62653 +
62654 + if (get_user(p, argv + i))
62655 + goto log;
62656 + len = strnlen_user(compat_ptr(p), 128 - execlen);
62657 + if (len > 128 - execlen)
62658 + len = 128 - execlen;
62659 + else if (len > 0)
62660 + len--;
62661 + else
62662 + goto log;
62663 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
62664 + goto log;
62665 +
62666 + /* rewrite unprintable characters */
62667 + for (x = 0; x < len; x++) {
62668 + c = *(grarg + execlen + x);
62669 + if (c < 32 || c > 126)
62670 + *(grarg + execlen + x) = ' ';
62671 + }
62672 +
62673 + execlen += len;
62674 + *(grarg + execlen) = ' ';
62675 + *(grarg + execlen + 1) = '\0';
62676 + execlen++;
62677 + }
62678 +
62679 + log:
62680 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62681 + bprm->file->f_path.mnt, grarg);
62682 + mutex_unlock(&gr_exec_arg_mutex);
62683 +#endif
62684 + return;
62685 +}
62686 +#endif
62687 +
62688 +#ifdef CONFIG_GRKERNSEC
62689 +extern int gr_acl_is_capable(const int cap);
62690 +extern int gr_acl_is_capable_nolog(const int cap);
62691 +extern int gr_chroot_is_capable(const int cap);
62692 +extern int gr_chroot_is_capable_nolog(const int cap);
62693 +#endif
62694 +
62695 +const char *captab_log[] = {
62696 + "CAP_CHOWN",
62697 + "CAP_DAC_OVERRIDE",
62698 + "CAP_DAC_READ_SEARCH",
62699 + "CAP_FOWNER",
62700 + "CAP_FSETID",
62701 + "CAP_KILL",
62702 + "CAP_SETGID",
62703 + "CAP_SETUID",
62704 + "CAP_SETPCAP",
62705 + "CAP_LINUX_IMMUTABLE",
62706 + "CAP_NET_BIND_SERVICE",
62707 + "CAP_NET_BROADCAST",
62708 + "CAP_NET_ADMIN",
62709 + "CAP_NET_RAW",
62710 + "CAP_IPC_LOCK",
62711 + "CAP_IPC_OWNER",
62712 + "CAP_SYS_MODULE",
62713 + "CAP_SYS_RAWIO",
62714 + "CAP_SYS_CHROOT",
62715 + "CAP_SYS_PTRACE",
62716 + "CAP_SYS_PACCT",
62717 + "CAP_SYS_ADMIN",
62718 + "CAP_SYS_BOOT",
62719 + "CAP_SYS_NICE",
62720 + "CAP_SYS_RESOURCE",
62721 + "CAP_SYS_TIME",
62722 + "CAP_SYS_TTY_CONFIG",
62723 + "CAP_MKNOD",
62724 + "CAP_LEASE",
62725 + "CAP_AUDIT_WRITE",
62726 + "CAP_AUDIT_CONTROL",
62727 + "CAP_SETFCAP",
62728 + "CAP_MAC_OVERRIDE",
62729 + "CAP_MAC_ADMIN"
62730 +};
62731 +
62732 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
62733 +
62734 +int gr_is_capable(const int cap)
62735 +{
62736 +#ifdef CONFIG_GRKERNSEC
62737 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
62738 + return 1;
62739 + return 0;
62740 +#else
62741 + return 1;
62742 +#endif
62743 +}
62744 +
62745 +int gr_is_capable_nolog(const int cap)
62746 +{
62747 +#ifdef CONFIG_GRKERNSEC
62748 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
62749 + return 1;
62750 + return 0;
62751 +#else
62752 + return 1;
62753 +#endif
62754 +}
62755 +
62756 +EXPORT_SYMBOL(gr_is_capable);
62757 +EXPORT_SYMBOL(gr_is_capable_nolog);
62758 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
62759 new file mode 100644
62760 index 0000000..d3ee748
62761 --- /dev/null
62762 +++ b/grsecurity/grsec_fifo.c
62763 @@ -0,0 +1,24 @@
62764 +#include <linux/kernel.h>
62765 +#include <linux/sched.h>
62766 +#include <linux/fs.h>
62767 +#include <linux/file.h>
62768 +#include <linux/grinternal.h>
62769 +
62770 +int
62771 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
62772 + const struct dentry *dir, const int flag, const int acc_mode)
62773 +{
62774 +#ifdef CONFIG_GRKERNSEC_FIFO
62775 + const struct cred *cred = current_cred();
62776 +
62777 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
62778 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
62779 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
62780 + (cred->fsuid != dentry->d_inode->i_uid)) {
62781 + if (!inode_permission(dentry->d_inode, acc_mode))
62782 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
62783 + return -EACCES;
62784 + }
62785 +#endif
62786 + return 0;
62787 +}
62788 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
62789 new file mode 100644
62790 index 0000000..8ca18bf
62791 --- /dev/null
62792 +++ b/grsecurity/grsec_fork.c
62793 @@ -0,0 +1,23 @@
62794 +#include <linux/kernel.h>
62795 +#include <linux/sched.h>
62796 +#include <linux/grsecurity.h>
62797 +#include <linux/grinternal.h>
62798 +#include <linux/errno.h>
62799 +
62800 +void
62801 +gr_log_forkfail(const int retval)
62802 +{
62803 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
62804 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
62805 + switch (retval) {
62806 + case -EAGAIN:
62807 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
62808 + break;
62809 + case -ENOMEM:
62810 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
62811 + break;
62812 + }
62813 + }
62814 +#endif
62815 + return;
62816 +}
62817 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
62818 new file mode 100644
62819 index 0000000..f813c26
62820 --- /dev/null
62821 +++ b/grsecurity/grsec_init.c
62822 @@ -0,0 +1,270 @@
62823 +#include <linux/kernel.h>
62824 +#include <linux/sched.h>
62825 +#include <linux/mm.h>
62826 +#include <linux/smp_lock.h>
62827 +#include <linux/gracl.h>
62828 +#include <linux/slab.h>
62829 +#include <linux/vmalloc.h>
62830 +#include <linux/percpu.h>
62831 +#include <linux/module.h>
62832 +
62833 +int grsec_enable_brute;
62834 +int grsec_enable_link;
62835 +int grsec_enable_dmesg;
62836 +int grsec_enable_harden_ptrace;
62837 +int grsec_enable_fifo;
62838 +int grsec_enable_execlog;
62839 +int grsec_enable_signal;
62840 +int grsec_enable_forkfail;
62841 +int grsec_enable_audit_ptrace;
62842 +int grsec_enable_time;
62843 +int grsec_enable_audit_textrel;
62844 +int grsec_enable_group;
62845 +int grsec_audit_gid;
62846 +int grsec_enable_chdir;
62847 +int grsec_enable_mount;
62848 +int grsec_enable_rofs;
62849 +int grsec_enable_chroot_findtask;
62850 +int grsec_enable_chroot_mount;
62851 +int grsec_enable_chroot_shmat;
62852 +int grsec_enable_chroot_fchdir;
62853 +int grsec_enable_chroot_double;
62854 +int grsec_enable_chroot_pivot;
62855 +int grsec_enable_chroot_chdir;
62856 +int grsec_enable_chroot_chmod;
62857 +int grsec_enable_chroot_mknod;
62858 +int grsec_enable_chroot_nice;
62859 +int grsec_enable_chroot_execlog;
62860 +int grsec_enable_chroot_caps;
62861 +int grsec_enable_chroot_sysctl;
62862 +int grsec_enable_chroot_unix;
62863 +int grsec_enable_tpe;
62864 +int grsec_tpe_gid;
62865 +int grsec_enable_blackhole;
62866 +#ifdef CONFIG_IPV6_MODULE
62867 +EXPORT_SYMBOL(grsec_enable_blackhole);
62868 +#endif
62869 +int grsec_lastack_retries;
62870 +int grsec_enable_tpe_all;
62871 +int grsec_enable_tpe_invert;
62872 +int grsec_enable_socket_all;
62873 +int grsec_socket_all_gid;
62874 +int grsec_enable_socket_client;
62875 +int grsec_socket_client_gid;
62876 +int grsec_enable_socket_server;
62877 +int grsec_socket_server_gid;
62878 +int grsec_resource_logging;
62879 +int grsec_disable_privio;
62880 +int grsec_enable_log_rwxmaps;
62881 +int grsec_lock;
62882 +
62883 +DEFINE_SPINLOCK(grsec_alert_lock);
62884 +unsigned long grsec_alert_wtime = 0;
62885 +unsigned long grsec_alert_fyet = 0;
62886 +
62887 +DEFINE_SPINLOCK(grsec_audit_lock);
62888 +
62889 +DEFINE_RWLOCK(grsec_exec_file_lock);
62890 +
62891 +char *gr_shared_page[4];
62892 +
62893 +char *gr_alert_log_fmt;
62894 +char *gr_audit_log_fmt;
62895 +char *gr_alert_log_buf;
62896 +char *gr_audit_log_buf;
62897 +
62898 +extern struct gr_arg *gr_usermode;
62899 +extern unsigned char *gr_system_salt;
62900 +extern unsigned char *gr_system_sum;
62901 +
62902 +void __init
62903 +grsecurity_init(void)
62904 +{
62905 + int j;
62906 + /* create the per-cpu shared pages */
62907 +
62908 +#ifdef CONFIG_X86
62909 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
62910 +#endif
62911 +
62912 + for (j = 0; j < 4; j++) {
62913 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
62914 + if (gr_shared_page[j] == NULL) {
62915 + panic("Unable to allocate grsecurity shared page");
62916 + return;
62917 + }
62918 + }
62919 +
62920 + /* allocate log buffers */
62921 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
62922 + if (!gr_alert_log_fmt) {
62923 + panic("Unable to allocate grsecurity alert log format buffer");
62924 + return;
62925 + }
62926 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
62927 + if (!gr_audit_log_fmt) {
62928 + panic("Unable to allocate grsecurity audit log format buffer");
62929 + return;
62930 + }
62931 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62932 + if (!gr_alert_log_buf) {
62933 + panic("Unable to allocate grsecurity alert log buffer");
62934 + return;
62935 + }
62936 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62937 + if (!gr_audit_log_buf) {
62938 + panic("Unable to allocate grsecurity audit log buffer");
62939 + return;
62940 + }
62941 +
62942 + /* allocate memory for authentication structure */
62943 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
62944 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
62945 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
62946 +
62947 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
62948 + panic("Unable to allocate grsecurity authentication structure");
62949 + return;
62950 + }
62951 +
62952 +
62953 +#ifdef CONFIG_GRKERNSEC_IO
62954 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
62955 + grsec_disable_privio = 1;
62956 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62957 + grsec_disable_privio = 1;
62958 +#else
62959 + grsec_disable_privio = 0;
62960 +#endif
62961 +#endif
62962 +
62963 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
62964 + /* for backward compatibility, tpe_invert always defaults to on if
62965 + enabled in the kernel
62966 + */
62967 + grsec_enable_tpe_invert = 1;
62968 +#endif
62969 +
62970 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62971 +#ifndef CONFIG_GRKERNSEC_SYSCTL
62972 + grsec_lock = 1;
62973 +#endif
62974 +
62975 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
62976 + grsec_enable_audit_textrel = 1;
62977 +#endif
62978 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
62979 + grsec_enable_log_rwxmaps = 1;
62980 +#endif
62981 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
62982 + grsec_enable_group = 1;
62983 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
62984 +#endif
62985 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62986 + grsec_enable_chdir = 1;
62987 +#endif
62988 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62989 + grsec_enable_harden_ptrace = 1;
62990 +#endif
62991 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
62992 + grsec_enable_mount = 1;
62993 +#endif
62994 +#ifdef CONFIG_GRKERNSEC_LINK
62995 + grsec_enable_link = 1;
62996 +#endif
62997 +#ifdef CONFIG_GRKERNSEC_BRUTE
62998 + grsec_enable_brute = 1;
62999 +#endif
63000 +#ifdef CONFIG_GRKERNSEC_DMESG
63001 + grsec_enable_dmesg = 1;
63002 +#endif
63003 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63004 + grsec_enable_blackhole = 1;
63005 + grsec_lastack_retries = 4;
63006 +#endif
63007 +#ifdef CONFIG_GRKERNSEC_FIFO
63008 + grsec_enable_fifo = 1;
63009 +#endif
63010 +#ifdef CONFIG_GRKERNSEC_EXECLOG
63011 + grsec_enable_execlog = 1;
63012 +#endif
63013 +#ifdef CONFIG_GRKERNSEC_SIGNAL
63014 + grsec_enable_signal = 1;
63015 +#endif
63016 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
63017 + grsec_enable_forkfail = 1;
63018 +#endif
63019 +#ifdef CONFIG_GRKERNSEC_TIME
63020 + grsec_enable_time = 1;
63021 +#endif
63022 +#ifdef CONFIG_GRKERNSEC_RESLOG
63023 + grsec_resource_logging = 1;
63024 +#endif
63025 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63026 + grsec_enable_chroot_findtask = 1;
63027 +#endif
63028 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63029 + grsec_enable_chroot_unix = 1;
63030 +#endif
63031 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63032 + grsec_enable_chroot_mount = 1;
63033 +#endif
63034 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63035 + grsec_enable_chroot_fchdir = 1;
63036 +#endif
63037 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63038 + grsec_enable_chroot_shmat = 1;
63039 +#endif
63040 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63041 + grsec_enable_audit_ptrace = 1;
63042 +#endif
63043 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63044 + grsec_enable_chroot_double = 1;
63045 +#endif
63046 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63047 + grsec_enable_chroot_pivot = 1;
63048 +#endif
63049 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63050 + grsec_enable_chroot_chdir = 1;
63051 +#endif
63052 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63053 + grsec_enable_chroot_chmod = 1;
63054 +#endif
63055 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63056 + grsec_enable_chroot_mknod = 1;
63057 +#endif
63058 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63059 + grsec_enable_chroot_nice = 1;
63060 +#endif
63061 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63062 + grsec_enable_chroot_execlog = 1;
63063 +#endif
63064 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63065 + grsec_enable_chroot_caps = 1;
63066 +#endif
63067 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63068 + grsec_enable_chroot_sysctl = 1;
63069 +#endif
63070 +#ifdef CONFIG_GRKERNSEC_TPE
63071 + grsec_enable_tpe = 1;
63072 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63073 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
63074 + grsec_enable_tpe_all = 1;
63075 +#endif
63076 +#endif
63077 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63078 + grsec_enable_socket_all = 1;
63079 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63080 +#endif
63081 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63082 + grsec_enable_socket_client = 1;
63083 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63084 +#endif
63085 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63086 + grsec_enable_socket_server = 1;
63087 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63088 +#endif
63089 +#endif
63090 +
63091 + return;
63092 +}
63093 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63094 new file mode 100644
63095 index 0000000..3efe141
63096 --- /dev/null
63097 +++ b/grsecurity/grsec_link.c
63098 @@ -0,0 +1,43 @@
63099 +#include <linux/kernel.h>
63100 +#include <linux/sched.h>
63101 +#include <linux/fs.h>
63102 +#include <linux/file.h>
63103 +#include <linux/grinternal.h>
63104 +
63105 +int
63106 +gr_handle_follow_link(const struct inode *parent,
63107 + const struct inode *inode,
63108 + const struct dentry *dentry, const struct vfsmount *mnt)
63109 +{
63110 +#ifdef CONFIG_GRKERNSEC_LINK
63111 + const struct cred *cred = current_cred();
63112 +
63113 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63114 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63115 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63116 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63117 + return -EACCES;
63118 + }
63119 +#endif
63120 + return 0;
63121 +}
63122 +
63123 +int
63124 +gr_handle_hardlink(const struct dentry *dentry,
63125 + const struct vfsmount *mnt,
63126 + struct inode *inode, const int mode, const char *to)
63127 +{
63128 +#ifdef CONFIG_GRKERNSEC_LINK
63129 + const struct cred *cred = current_cred();
63130 +
63131 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63132 + (!S_ISREG(mode) || (mode & S_ISUID) ||
63133 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63134 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63135 + !capable(CAP_FOWNER) && cred->uid) {
63136 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63137 + return -EPERM;
63138 + }
63139 +#endif
63140 + return 0;
63141 +}
63142 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63143 new file mode 100644
63144 index 0000000..a45d2e9
63145 --- /dev/null
63146 +++ b/grsecurity/grsec_log.c
63147 @@ -0,0 +1,322 @@
63148 +#include <linux/kernel.h>
63149 +#include <linux/sched.h>
63150 +#include <linux/file.h>
63151 +#include <linux/tty.h>
63152 +#include <linux/fs.h>
63153 +#include <linux/grinternal.h>
63154 +
63155 +#ifdef CONFIG_TREE_PREEMPT_RCU
63156 +#define DISABLE_PREEMPT() preempt_disable()
63157 +#define ENABLE_PREEMPT() preempt_enable()
63158 +#else
63159 +#define DISABLE_PREEMPT()
63160 +#define ENABLE_PREEMPT()
63161 +#endif
63162 +
63163 +#define BEGIN_LOCKS(x) \
63164 + DISABLE_PREEMPT(); \
63165 + rcu_read_lock(); \
63166 + read_lock(&tasklist_lock); \
63167 + read_lock(&grsec_exec_file_lock); \
63168 + if (x != GR_DO_AUDIT) \
63169 + spin_lock(&grsec_alert_lock); \
63170 + else \
63171 + spin_lock(&grsec_audit_lock)
63172 +
63173 +#define END_LOCKS(x) \
63174 + if (x != GR_DO_AUDIT) \
63175 + spin_unlock(&grsec_alert_lock); \
63176 + else \
63177 + spin_unlock(&grsec_audit_lock); \
63178 + read_unlock(&grsec_exec_file_lock); \
63179 + read_unlock(&tasklist_lock); \
63180 + rcu_read_unlock(); \
63181 + ENABLE_PREEMPT(); \
63182 + if (x == GR_DONT_AUDIT) \
63183 + gr_handle_alertkill(current)
63184 +
63185 +enum {
63186 + FLOODING,
63187 + NO_FLOODING
63188 +};
63189 +
63190 +extern char *gr_alert_log_fmt;
63191 +extern char *gr_audit_log_fmt;
63192 +extern char *gr_alert_log_buf;
63193 +extern char *gr_audit_log_buf;
63194 +
63195 +static int gr_log_start(int audit)
63196 +{
63197 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63198 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63199 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63200 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63201 + unsigned long curr_secs = get_seconds();
63202 +
63203 + if (audit == GR_DO_AUDIT)
63204 + goto set_fmt;
63205 +
63206 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63207 + grsec_alert_wtime = curr_secs;
63208 + grsec_alert_fyet = 0;
63209 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63210 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63211 + grsec_alert_fyet++;
63212 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63213 + grsec_alert_wtime = curr_secs;
63214 + grsec_alert_fyet++;
63215 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63216 + return FLOODING;
63217 + }
63218 + else return FLOODING;
63219 +
63220 +set_fmt:
63221 +#endif
63222 + memset(buf, 0, PAGE_SIZE);
63223 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
63224 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63225 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63226 + } else if (current->signal->curr_ip) {
63227 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63228 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63229 + } else if (gr_acl_is_enabled()) {
63230 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63231 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63232 + } else {
63233 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
63234 + strcpy(buf, fmt);
63235 + }
63236 +
63237 + return NO_FLOODING;
63238 +}
63239 +
63240 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63241 + __attribute__ ((format (printf, 2, 0)));
63242 +
63243 +static void gr_log_middle(int audit, const char *msg, va_list ap)
63244 +{
63245 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63246 + unsigned int len = strlen(buf);
63247 +
63248 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63249 +
63250 + return;
63251 +}
63252 +
63253 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63254 + __attribute__ ((format (printf, 2, 3)));
63255 +
63256 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
63257 +{
63258 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63259 + unsigned int len = strlen(buf);
63260 + va_list ap;
63261 +
63262 + va_start(ap, msg);
63263 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63264 + va_end(ap);
63265 +
63266 + return;
63267 +}
63268 +
63269 +static void gr_log_end(int audit, int append_default)
63270 +{
63271 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63272 +
63273 + if (append_default) {
63274 + unsigned int len = strlen(buf);
63275 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
63276 + }
63277 +
63278 + printk("%s\n", buf);
63279 +
63280 + return;
63281 +}
63282 +
63283 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
63284 +{
63285 + int logtype;
63286 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
63287 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
63288 + void *voidptr = NULL;
63289 + int num1 = 0, num2 = 0;
63290 + unsigned long ulong1 = 0, ulong2 = 0;
63291 + struct dentry *dentry = NULL;
63292 + struct vfsmount *mnt = NULL;
63293 + struct file *file = NULL;
63294 + struct task_struct *task = NULL;
63295 + const struct cred *cred, *pcred;
63296 + va_list ap;
63297 +
63298 + BEGIN_LOCKS(audit);
63299 + logtype = gr_log_start(audit);
63300 + if (logtype == FLOODING) {
63301 + END_LOCKS(audit);
63302 + return;
63303 + }
63304 + va_start(ap, argtypes);
63305 + switch (argtypes) {
63306 + case GR_TTYSNIFF:
63307 + task = va_arg(ap, struct task_struct *);
63308 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
63309 + break;
63310 + case GR_SYSCTL_HIDDEN:
63311 + str1 = va_arg(ap, char *);
63312 + gr_log_middle_varargs(audit, msg, result, str1);
63313 + break;
63314 + case GR_RBAC:
63315 + dentry = va_arg(ap, struct dentry *);
63316 + mnt = va_arg(ap, struct vfsmount *);
63317 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
63318 + break;
63319 + case GR_RBAC_STR:
63320 + dentry = va_arg(ap, struct dentry *);
63321 + mnt = va_arg(ap, struct vfsmount *);
63322 + str1 = va_arg(ap, char *);
63323 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
63324 + break;
63325 + case GR_STR_RBAC:
63326 + str1 = va_arg(ap, char *);
63327 + dentry = va_arg(ap, struct dentry *);
63328 + mnt = va_arg(ap, struct vfsmount *);
63329 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
63330 + break;
63331 + case GR_RBAC_MODE2:
63332 + dentry = va_arg(ap, struct dentry *);
63333 + mnt = va_arg(ap, struct vfsmount *);
63334 + str1 = va_arg(ap, char *);
63335 + str2 = va_arg(ap, char *);
63336 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
63337 + break;
63338 + case GR_RBAC_MODE3:
63339 + dentry = va_arg(ap, struct dentry *);
63340 + mnt = va_arg(ap, struct vfsmount *);
63341 + str1 = va_arg(ap, char *);
63342 + str2 = va_arg(ap, char *);
63343 + str3 = va_arg(ap, char *);
63344 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
63345 + break;
63346 + case GR_FILENAME:
63347 + dentry = va_arg(ap, struct dentry *);
63348 + mnt = va_arg(ap, struct vfsmount *);
63349 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
63350 + break;
63351 + case GR_STR_FILENAME:
63352 + str1 = va_arg(ap, char *);
63353 + dentry = va_arg(ap, struct dentry *);
63354 + mnt = va_arg(ap, struct vfsmount *);
63355 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
63356 + break;
63357 + case GR_FILENAME_STR:
63358 + dentry = va_arg(ap, struct dentry *);
63359 + mnt = va_arg(ap, struct vfsmount *);
63360 + str1 = va_arg(ap, char *);
63361 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
63362 + break;
63363 + case GR_FILENAME_TWO_INT:
63364 + dentry = va_arg(ap, struct dentry *);
63365 + mnt = va_arg(ap, struct vfsmount *);
63366 + num1 = va_arg(ap, int);
63367 + num2 = va_arg(ap, int);
63368 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
63369 + break;
63370 + case GR_FILENAME_TWO_INT_STR:
63371 + dentry = va_arg(ap, struct dentry *);
63372 + mnt = va_arg(ap, struct vfsmount *);
63373 + num1 = va_arg(ap, int);
63374 + num2 = va_arg(ap, int);
63375 + str1 = va_arg(ap, char *);
63376 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
63377 + break;
63378 + case GR_TEXTREL:
63379 + file = va_arg(ap, struct file *);
63380 + ulong1 = va_arg(ap, unsigned long);
63381 + ulong2 = va_arg(ap, unsigned long);
63382 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
63383 + break;
63384 + case GR_PTRACE:
63385 + task = va_arg(ap, struct task_struct *);
63386 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
63387 + break;
63388 + case GR_RESOURCE:
63389 + task = va_arg(ap, struct task_struct *);
63390 + cred = __task_cred(task);
63391 + pcred = __task_cred(task->real_parent);
63392 + ulong1 = va_arg(ap, unsigned long);
63393 + str1 = va_arg(ap, char *);
63394 + ulong2 = va_arg(ap, unsigned long);
63395 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63396 + break;
63397 + case GR_CAP:
63398 + task = va_arg(ap, struct task_struct *);
63399 + cred = __task_cred(task);
63400 + pcred = __task_cred(task->real_parent);
63401 + str1 = va_arg(ap, char *);
63402 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63403 + break;
63404 + case GR_SIG:
63405 + str1 = va_arg(ap, char *);
63406 + voidptr = va_arg(ap, void *);
63407 + gr_log_middle_varargs(audit, msg, str1, voidptr);
63408 + break;
63409 + case GR_SIG2:
63410 + task = va_arg(ap, struct task_struct *);
63411 + cred = __task_cred(task);
63412 + pcred = __task_cred(task->real_parent);
63413 + num1 = va_arg(ap, int);
63414 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63415 + break;
63416 + case GR_CRASH1:
63417 + task = va_arg(ap, struct task_struct *);
63418 + cred = __task_cred(task);
63419 + pcred = __task_cred(task->real_parent);
63420 + ulong1 = va_arg(ap, unsigned long);
63421 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
63422 + break;
63423 + case GR_CRASH2:
63424 + task = va_arg(ap, struct task_struct *);
63425 + cred = __task_cred(task);
63426 + pcred = __task_cred(task->real_parent);
63427 + ulong1 = va_arg(ap, unsigned long);
63428 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
63429 + break;
63430 + case GR_RWXMAP:
63431 + file = va_arg(ap, struct file *);
63432 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
63433 + break;
63434 + case GR_PSACCT:
63435 + {
63436 + unsigned int wday, cday;
63437 + __u8 whr, chr;
63438 + __u8 wmin, cmin;
63439 + __u8 wsec, csec;
63440 + char cur_tty[64] = { 0 };
63441 + char parent_tty[64] = { 0 };
63442 +
63443 + task = va_arg(ap, struct task_struct *);
63444 + wday = va_arg(ap, unsigned int);
63445 + cday = va_arg(ap, unsigned int);
63446 + whr = va_arg(ap, int);
63447 + chr = va_arg(ap, int);
63448 + wmin = va_arg(ap, int);
63449 + cmin = va_arg(ap, int);
63450 + wsec = va_arg(ap, int);
63451 + csec = va_arg(ap, int);
63452 + ulong1 = va_arg(ap, unsigned long);
63453 + cred = __task_cred(task);
63454 + pcred = __task_cred(task->real_parent);
63455 +
63456 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63457 + }
63458 + break;
63459 + default:
63460 + gr_log_middle(audit, msg, ap);
63461 + }
63462 + va_end(ap);
63463 + // these don't need DEFAULTSECARGS printed on the end
63464 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
63465 + gr_log_end(audit, 0);
63466 + else
63467 + gr_log_end(audit, 1);
63468 + END_LOCKS(audit);
63469 +}
63470 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
63471 new file mode 100644
63472 index 0000000..6c0416b
63473 --- /dev/null
63474 +++ b/grsecurity/grsec_mem.c
63475 @@ -0,0 +1,33 @@
63476 +#include <linux/kernel.h>
63477 +#include <linux/sched.h>
63478 +#include <linux/mm.h>
63479 +#include <linux/mman.h>
63480 +#include <linux/grinternal.h>
63481 +
63482 +void
63483 +gr_handle_ioperm(void)
63484 +{
63485 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
63486 + return;
63487 +}
63488 +
63489 +void
63490 +gr_handle_iopl(void)
63491 +{
63492 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
63493 + return;
63494 +}
63495 +
63496 +void
63497 +gr_handle_mem_readwrite(u64 from, u64 to)
63498 +{
63499 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
63500 + return;
63501 +}
63502 +
63503 +void
63504 +gr_handle_vm86(void)
63505 +{
63506 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
63507 + return;
63508 +}
63509 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
63510 new file mode 100644
63511 index 0000000..2131422
63512 --- /dev/null
63513 +++ b/grsecurity/grsec_mount.c
63514 @@ -0,0 +1,62 @@
63515 +#include <linux/kernel.h>
63516 +#include <linux/sched.h>
63517 +#include <linux/mount.h>
63518 +#include <linux/grsecurity.h>
63519 +#include <linux/grinternal.h>
63520 +
63521 +void
63522 +gr_log_remount(const char *devname, const int retval)
63523 +{
63524 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63525 + if (grsec_enable_mount && (retval >= 0))
63526 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
63527 +#endif
63528 + return;
63529 +}
63530 +
63531 +void
63532 +gr_log_unmount(const char *devname, const int retval)
63533 +{
63534 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63535 + if (grsec_enable_mount && (retval >= 0))
63536 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
63537 +#endif
63538 + return;
63539 +}
63540 +
63541 +void
63542 +gr_log_mount(const char *from, const char *to, const int retval)
63543 +{
63544 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63545 + if (grsec_enable_mount && (retval >= 0))
63546 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
63547 +#endif
63548 + return;
63549 +}
63550 +
63551 +int
63552 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
63553 +{
63554 +#ifdef CONFIG_GRKERNSEC_ROFS
63555 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
63556 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
63557 + return -EPERM;
63558 + } else
63559 + return 0;
63560 +#endif
63561 + return 0;
63562 +}
63563 +
63564 +int
63565 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
63566 +{
63567 +#ifdef CONFIG_GRKERNSEC_ROFS
63568 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
63569 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
63570 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
63571 + return -EPERM;
63572 + } else
63573 + return 0;
63574 +#endif
63575 + return 0;
63576 +}
63577 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
63578 new file mode 100644
63579 index 0000000..a3b12a0
63580 --- /dev/null
63581 +++ b/grsecurity/grsec_pax.c
63582 @@ -0,0 +1,36 @@
63583 +#include <linux/kernel.h>
63584 +#include <linux/sched.h>
63585 +#include <linux/mm.h>
63586 +#include <linux/file.h>
63587 +#include <linux/grinternal.h>
63588 +#include <linux/grsecurity.h>
63589 +
63590 +void
63591 +gr_log_textrel(struct vm_area_struct * vma)
63592 +{
63593 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63594 + if (grsec_enable_audit_textrel)
63595 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
63596 +#endif
63597 + return;
63598 +}
63599 +
63600 +void
63601 +gr_log_rwxmmap(struct file *file)
63602 +{
63603 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63604 + if (grsec_enable_log_rwxmaps)
63605 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
63606 +#endif
63607 + return;
63608 +}
63609 +
63610 +void
63611 +gr_log_rwxmprotect(struct file *file)
63612 +{
63613 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63614 + if (grsec_enable_log_rwxmaps)
63615 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
63616 +#endif
63617 + return;
63618 +}
63619 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
63620 new file mode 100644
63621 index 0000000..472c1d6
63622 --- /dev/null
63623 +++ b/grsecurity/grsec_ptrace.c
63624 @@ -0,0 +1,14 @@
63625 +#include <linux/kernel.h>
63626 +#include <linux/sched.h>
63627 +#include <linux/grinternal.h>
63628 +#include <linux/grsecurity.h>
63629 +
63630 +void
63631 +gr_audit_ptrace(struct task_struct *task)
63632 +{
63633 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63634 + if (grsec_enable_audit_ptrace)
63635 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
63636 +#endif
63637 + return;
63638 +}
63639 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
63640 new file mode 100644
63641 index 0000000..c648492
63642 --- /dev/null
63643 +++ b/grsecurity/grsec_sig.c
63644 @@ -0,0 +1,206 @@
63645 +#include <linux/kernel.h>
63646 +#include <linux/sched.h>
63647 +#include <linux/delay.h>
63648 +#include <linux/grsecurity.h>
63649 +#include <linux/grinternal.h>
63650 +#include <linux/hardirq.h>
63651 +
63652 +char *signames[] = {
63653 + [SIGSEGV] = "Segmentation fault",
63654 + [SIGILL] = "Illegal instruction",
63655 + [SIGABRT] = "Abort",
63656 + [SIGBUS] = "Invalid alignment/Bus error"
63657 +};
63658 +
63659 +void
63660 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
63661 +{
63662 +#ifdef CONFIG_GRKERNSEC_SIGNAL
63663 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
63664 + (sig == SIGABRT) || (sig == SIGBUS))) {
63665 + if (t->pid == current->pid) {
63666 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
63667 + } else {
63668 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
63669 + }
63670 + }
63671 +#endif
63672 + return;
63673 +}
63674 +
63675 +int
63676 +gr_handle_signal(const struct task_struct *p, const int sig)
63677 +{
63678 +#ifdef CONFIG_GRKERNSEC
63679 + /* ignore the 0 signal for protected task checks */
63680 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
63681 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
63682 + return -EPERM;
63683 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
63684 + return -EPERM;
63685 + }
63686 +#endif
63687 + return 0;
63688 +}
63689 +
63690 +#ifdef CONFIG_GRKERNSEC
63691 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
63692 +
63693 +int gr_fake_force_sig(int sig, struct task_struct *t)
63694 +{
63695 + unsigned long int flags;
63696 + int ret, blocked, ignored;
63697 + struct k_sigaction *action;
63698 +
63699 + spin_lock_irqsave(&t->sighand->siglock, flags);
63700 + action = &t->sighand->action[sig-1];
63701 + ignored = action->sa.sa_handler == SIG_IGN;
63702 + blocked = sigismember(&t->blocked, sig);
63703 + if (blocked || ignored) {
63704 + action->sa.sa_handler = SIG_DFL;
63705 + if (blocked) {
63706 + sigdelset(&t->blocked, sig);
63707 + recalc_sigpending_and_wake(t);
63708 + }
63709 + }
63710 + if (action->sa.sa_handler == SIG_DFL)
63711 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
63712 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
63713 +
63714 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
63715 +
63716 + return ret;
63717 +}
63718 +#endif
63719 +
63720 +#ifdef CONFIG_GRKERNSEC_BRUTE
63721 +#define GR_USER_BAN_TIME (15 * 60)
63722 +
63723 +static int __get_dumpable(unsigned long mm_flags)
63724 +{
63725 + int ret;
63726 +
63727 + ret = mm_flags & MMF_DUMPABLE_MASK;
63728 + return (ret >= 2) ? 2 : ret;
63729 +}
63730 +#endif
63731 +
63732 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
63733 +{
63734 +#ifdef CONFIG_GRKERNSEC_BRUTE
63735 + uid_t uid = 0;
63736 +
63737 + if (!grsec_enable_brute)
63738 + return;
63739 +
63740 + rcu_read_lock();
63741 + read_lock(&tasklist_lock);
63742 + read_lock(&grsec_exec_file_lock);
63743 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
63744 + p->real_parent->brute = 1;
63745 + else {
63746 + const struct cred *cred = __task_cred(p), *cred2;
63747 + struct task_struct *tsk, *tsk2;
63748 +
63749 + if (!__get_dumpable(mm_flags) && cred->uid) {
63750 + struct user_struct *user;
63751 +
63752 + uid = cred->uid;
63753 +
63754 + /* this is put upon execution past expiration */
63755 + user = find_user(uid);
63756 + if (user == NULL)
63757 + goto unlock;
63758 + user->banned = 1;
63759 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
63760 + if (user->ban_expires == ~0UL)
63761 + user->ban_expires--;
63762 +
63763 + do_each_thread(tsk2, tsk) {
63764 + cred2 = __task_cred(tsk);
63765 + if (tsk != p && cred2->uid == uid)
63766 + gr_fake_force_sig(SIGKILL, tsk);
63767 + } while_each_thread(tsk2, tsk);
63768 + }
63769 + }
63770 +unlock:
63771 + read_unlock(&grsec_exec_file_lock);
63772 + read_unlock(&tasklist_lock);
63773 + rcu_read_unlock();
63774 +
63775 + if (uid)
63776 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
63777 +#endif
63778 + return;
63779 +}
63780 +
63781 +void gr_handle_brute_check(void)
63782 +{
63783 +#ifdef CONFIG_GRKERNSEC_BRUTE
63784 + if (current->brute)
63785 + msleep(30 * 1000);
63786 +#endif
63787 + return;
63788 +}
63789 +
63790 +void gr_handle_kernel_exploit(void)
63791 +{
63792 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
63793 + const struct cred *cred;
63794 + struct task_struct *tsk, *tsk2;
63795 + struct user_struct *user;
63796 + uid_t uid;
63797 +
63798 + if (in_irq() || in_serving_softirq() || in_nmi())
63799 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
63800 +
63801 + uid = current_uid();
63802 +
63803 + if (uid == 0)
63804 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
63805 + else {
63806 + /* kill all the processes of this user, hold a reference
63807 + to their creds struct, and prevent them from creating
63808 + another process until system reset
63809 + */
63810 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
63811 + /* we intentionally leak this ref */
63812 + user = get_uid(current->cred->user);
63813 + if (user) {
63814 + user->banned = 1;
63815 + user->ban_expires = ~0UL;
63816 + }
63817 +
63818 + read_lock(&tasklist_lock);
63819 + do_each_thread(tsk2, tsk) {
63820 + cred = __task_cred(tsk);
63821 + if (cred->uid == uid)
63822 + gr_fake_force_sig(SIGKILL, tsk);
63823 + } while_each_thread(tsk2, tsk);
63824 + read_unlock(&tasklist_lock);
63825 + }
63826 +#endif
63827 +}
63828 +
63829 +int __gr_process_user_ban(struct user_struct *user)
63830 +{
63831 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63832 + if (unlikely(user->banned)) {
63833 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
63834 + user->banned = 0;
63835 + user->ban_expires = 0;
63836 + free_uid(user);
63837 + } else
63838 + return -EPERM;
63839 + }
63840 +#endif
63841 + return 0;
63842 +}
63843 +
63844 +int gr_process_user_ban(void)
63845 +{
63846 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63847 + return __gr_process_user_ban(current->cred->user);
63848 +#endif
63849 + return 0;
63850 +}
63851 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
63852 new file mode 100644
63853 index 0000000..7512ea9
63854 --- /dev/null
63855 +++ b/grsecurity/grsec_sock.c
63856 @@ -0,0 +1,275 @@
63857 +#include <linux/kernel.h>
63858 +#include <linux/module.h>
63859 +#include <linux/sched.h>
63860 +#include <linux/file.h>
63861 +#include <linux/net.h>
63862 +#include <linux/in.h>
63863 +#include <linux/ip.h>
63864 +#include <net/sock.h>
63865 +#include <net/inet_sock.h>
63866 +#include <linux/grsecurity.h>
63867 +#include <linux/grinternal.h>
63868 +#include <linux/gracl.h>
63869 +
63870 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
63871 +EXPORT_SYMBOL(gr_cap_rtnetlink);
63872 +
63873 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
63874 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
63875 +
63876 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
63877 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
63878 +
63879 +#ifdef CONFIG_UNIX_MODULE
63880 +EXPORT_SYMBOL(gr_acl_handle_unix);
63881 +EXPORT_SYMBOL(gr_acl_handle_mknod);
63882 +EXPORT_SYMBOL(gr_handle_chroot_unix);
63883 +EXPORT_SYMBOL(gr_handle_create);
63884 +#endif
63885 +
63886 +#ifdef CONFIG_GRKERNSEC
63887 +#define gr_conn_table_size 32749
63888 +struct conn_table_entry {
63889 + struct conn_table_entry *next;
63890 + struct signal_struct *sig;
63891 +};
63892 +
63893 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
63894 +DEFINE_SPINLOCK(gr_conn_table_lock);
63895 +
63896 +extern const char * gr_socktype_to_name(unsigned char type);
63897 +extern const char * gr_proto_to_name(unsigned char proto);
63898 +extern const char * gr_sockfamily_to_name(unsigned char family);
63899 +
63900 +static __inline__ int
63901 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
63902 +{
63903 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
63904 +}
63905 +
63906 +static __inline__ int
63907 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
63908 + __u16 sport, __u16 dport)
63909 +{
63910 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
63911 + sig->gr_sport == sport && sig->gr_dport == dport))
63912 + return 1;
63913 + else
63914 + return 0;
63915 +}
63916 +
63917 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
63918 +{
63919 + struct conn_table_entry **match;
63920 + unsigned int index;
63921 +
63922 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63923 + sig->gr_sport, sig->gr_dport,
63924 + gr_conn_table_size);
63925 +
63926 + newent->sig = sig;
63927 +
63928 + match = &gr_conn_table[index];
63929 + newent->next = *match;
63930 + *match = newent;
63931 +
63932 + return;
63933 +}
63934 +
63935 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
63936 +{
63937 + struct conn_table_entry *match, *last = NULL;
63938 + unsigned int index;
63939 +
63940 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63941 + sig->gr_sport, sig->gr_dport,
63942 + gr_conn_table_size);
63943 +
63944 + match = gr_conn_table[index];
63945 + while (match && !conn_match(match->sig,
63946 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
63947 + sig->gr_dport)) {
63948 + last = match;
63949 + match = match->next;
63950 + }
63951 +
63952 + if (match) {
63953 + if (last)
63954 + last->next = match->next;
63955 + else
63956 + gr_conn_table[index] = NULL;
63957 + kfree(match);
63958 + }
63959 +
63960 + return;
63961 +}
63962 +
63963 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
63964 + __u16 sport, __u16 dport)
63965 +{
63966 + struct conn_table_entry *match;
63967 + unsigned int index;
63968 +
63969 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
63970 +
63971 + match = gr_conn_table[index];
63972 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
63973 + match = match->next;
63974 +
63975 + if (match)
63976 + return match->sig;
63977 + else
63978 + return NULL;
63979 +}
63980 +
63981 +#endif
63982 +
63983 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
63984 +{
63985 +#ifdef CONFIG_GRKERNSEC
63986 + struct signal_struct *sig = task->signal;
63987 + struct conn_table_entry *newent;
63988 +
63989 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
63990 + if (newent == NULL)
63991 + return;
63992 + /* no bh lock needed since we are called with bh disabled */
63993 + spin_lock(&gr_conn_table_lock);
63994 + gr_del_task_from_ip_table_nolock(sig);
63995 + sig->gr_saddr = inet->rcv_saddr;
63996 + sig->gr_daddr = inet->daddr;
63997 + sig->gr_sport = inet->sport;
63998 + sig->gr_dport = inet->dport;
63999 + gr_add_to_task_ip_table_nolock(sig, newent);
64000 + spin_unlock(&gr_conn_table_lock);
64001 +#endif
64002 + return;
64003 +}
64004 +
64005 +void gr_del_task_from_ip_table(struct task_struct *task)
64006 +{
64007 +#ifdef CONFIG_GRKERNSEC
64008 + spin_lock_bh(&gr_conn_table_lock);
64009 + gr_del_task_from_ip_table_nolock(task->signal);
64010 + spin_unlock_bh(&gr_conn_table_lock);
64011 +#endif
64012 + return;
64013 +}
64014 +
64015 +void
64016 +gr_attach_curr_ip(const struct sock *sk)
64017 +{
64018 +#ifdef CONFIG_GRKERNSEC
64019 + struct signal_struct *p, *set;
64020 + const struct inet_sock *inet = inet_sk(sk);
64021 +
64022 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64023 + return;
64024 +
64025 + set = current->signal;
64026 +
64027 + spin_lock_bh(&gr_conn_table_lock);
64028 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64029 + inet->dport, inet->sport);
64030 + if (unlikely(p != NULL)) {
64031 + set->curr_ip = p->curr_ip;
64032 + set->used_accept = 1;
64033 + gr_del_task_from_ip_table_nolock(p);
64034 + spin_unlock_bh(&gr_conn_table_lock);
64035 + return;
64036 + }
64037 + spin_unlock_bh(&gr_conn_table_lock);
64038 +
64039 + set->curr_ip = inet->daddr;
64040 + set->used_accept = 1;
64041 +#endif
64042 + return;
64043 +}
64044 +
64045 +int
64046 +gr_handle_sock_all(const int family, const int type, const int protocol)
64047 +{
64048 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64049 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64050 + (family != AF_UNIX)) {
64051 + if (family == AF_INET)
64052 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64053 + else
64054 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64055 + return -EACCES;
64056 + }
64057 +#endif
64058 + return 0;
64059 +}
64060 +
64061 +int
64062 +gr_handle_sock_server(const struct sockaddr *sck)
64063 +{
64064 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64065 + if (grsec_enable_socket_server &&
64066 + in_group_p(grsec_socket_server_gid) &&
64067 + sck && (sck->sa_family != AF_UNIX) &&
64068 + (sck->sa_family != AF_LOCAL)) {
64069 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64070 + return -EACCES;
64071 + }
64072 +#endif
64073 + return 0;
64074 +}
64075 +
64076 +int
64077 +gr_handle_sock_server_other(const struct sock *sck)
64078 +{
64079 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64080 + if (grsec_enable_socket_server &&
64081 + in_group_p(grsec_socket_server_gid) &&
64082 + sck && (sck->sk_family != AF_UNIX) &&
64083 + (sck->sk_family != AF_LOCAL)) {
64084 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64085 + return -EACCES;
64086 + }
64087 +#endif
64088 + return 0;
64089 +}
64090 +
64091 +int
64092 +gr_handle_sock_client(const struct sockaddr *sck)
64093 +{
64094 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64095 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64096 + sck && (sck->sa_family != AF_UNIX) &&
64097 + (sck->sa_family != AF_LOCAL)) {
64098 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64099 + return -EACCES;
64100 + }
64101 +#endif
64102 + return 0;
64103 +}
64104 +
64105 +kernel_cap_t
64106 +gr_cap_rtnetlink(struct sock *sock)
64107 +{
64108 +#ifdef CONFIG_GRKERNSEC
64109 + if (!gr_acl_is_enabled())
64110 + return current_cap();
64111 + else if (sock->sk_protocol == NETLINK_ISCSI &&
64112 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64113 + gr_is_capable(CAP_SYS_ADMIN))
64114 + return current_cap();
64115 + else if (sock->sk_protocol == NETLINK_AUDIT &&
64116 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64117 + gr_is_capable(CAP_AUDIT_WRITE) &&
64118 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64119 + gr_is_capable(CAP_AUDIT_CONTROL))
64120 + return current_cap();
64121 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64122 + ((sock->sk_protocol == NETLINK_ROUTE) ?
64123 + gr_is_capable_nolog(CAP_NET_ADMIN) :
64124 + gr_is_capable(CAP_NET_ADMIN)))
64125 + return current_cap();
64126 + else
64127 + return __cap_empty_set;
64128 +#else
64129 + return current_cap();
64130 +#endif
64131 +}
64132 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64133 new file mode 100644
64134 index 0000000..2753505
64135 --- /dev/null
64136 +++ b/grsecurity/grsec_sysctl.c
64137 @@ -0,0 +1,479 @@
64138 +#include <linux/kernel.h>
64139 +#include <linux/sched.h>
64140 +#include <linux/sysctl.h>
64141 +#include <linux/grsecurity.h>
64142 +#include <linux/grinternal.h>
64143 +
64144 +int
64145 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64146 +{
64147 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64148 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64149 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64150 + return -EACCES;
64151 + }
64152 +#endif
64153 + return 0;
64154 +}
64155 +
64156 +#ifdef CONFIG_GRKERNSEC_ROFS
64157 +static int __maybe_unused one = 1;
64158 +#endif
64159 +
64160 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64161 +ctl_table grsecurity_table[] = {
64162 +#ifdef CONFIG_GRKERNSEC_SYSCTL
64163 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64164 +#ifdef CONFIG_GRKERNSEC_IO
64165 + {
64166 + .ctl_name = CTL_UNNUMBERED,
64167 + .procname = "disable_priv_io",
64168 + .data = &grsec_disable_privio,
64169 + .maxlen = sizeof(int),
64170 + .mode = 0600,
64171 + .proc_handler = &proc_dointvec,
64172 + },
64173 +#endif
64174 +#endif
64175 +#ifdef CONFIG_GRKERNSEC_LINK
64176 + {
64177 + .ctl_name = CTL_UNNUMBERED,
64178 + .procname = "linking_restrictions",
64179 + .data = &grsec_enable_link,
64180 + .maxlen = sizeof(int),
64181 + .mode = 0600,
64182 + .proc_handler = &proc_dointvec,
64183 + },
64184 +#endif
64185 +#ifdef CONFIG_GRKERNSEC_BRUTE
64186 + {
64187 + .ctl_name = CTL_UNNUMBERED,
64188 + .procname = "deter_bruteforce",
64189 + .data = &grsec_enable_brute,
64190 + .maxlen = sizeof(int),
64191 + .mode = 0600,
64192 + .proc_handler = &proc_dointvec,
64193 + },
64194 +#endif
64195 +#ifdef CONFIG_GRKERNSEC_FIFO
64196 + {
64197 + .ctl_name = CTL_UNNUMBERED,
64198 + .procname = "fifo_restrictions",
64199 + .data = &grsec_enable_fifo,
64200 + .maxlen = sizeof(int),
64201 + .mode = 0600,
64202 + .proc_handler = &proc_dointvec,
64203 + },
64204 +#endif
64205 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64206 + {
64207 + .ctl_name = CTL_UNNUMBERED,
64208 + .procname = "ip_blackhole",
64209 + .data = &grsec_enable_blackhole,
64210 + .maxlen = sizeof(int),
64211 + .mode = 0600,
64212 + .proc_handler = &proc_dointvec,
64213 + },
64214 + {
64215 + .ctl_name = CTL_UNNUMBERED,
64216 + .procname = "lastack_retries",
64217 + .data = &grsec_lastack_retries,
64218 + .maxlen = sizeof(int),
64219 + .mode = 0600,
64220 + .proc_handler = &proc_dointvec,
64221 + },
64222 +#endif
64223 +#ifdef CONFIG_GRKERNSEC_EXECLOG
64224 + {
64225 + .ctl_name = CTL_UNNUMBERED,
64226 + .procname = "exec_logging",
64227 + .data = &grsec_enable_execlog,
64228 + .maxlen = sizeof(int),
64229 + .mode = 0600,
64230 + .proc_handler = &proc_dointvec,
64231 + },
64232 +#endif
64233 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64234 + {
64235 + .ctl_name = CTL_UNNUMBERED,
64236 + .procname = "rwxmap_logging",
64237 + .data = &grsec_enable_log_rwxmaps,
64238 + .maxlen = sizeof(int),
64239 + .mode = 0600,
64240 + .proc_handler = &proc_dointvec,
64241 + },
64242 +#endif
64243 +#ifdef CONFIG_GRKERNSEC_SIGNAL
64244 + {
64245 + .ctl_name = CTL_UNNUMBERED,
64246 + .procname = "signal_logging",
64247 + .data = &grsec_enable_signal,
64248 + .maxlen = sizeof(int),
64249 + .mode = 0600,
64250 + .proc_handler = &proc_dointvec,
64251 + },
64252 +#endif
64253 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
64254 + {
64255 + .ctl_name = CTL_UNNUMBERED,
64256 + .procname = "forkfail_logging",
64257 + .data = &grsec_enable_forkfail,
64258 + .maxlen = sizeof(int),
64259 + .mode = 0600,
64260 + .proc_handler = &proc_dointvec,
64261 + },
64262 +#endif
64263 +#ifdef CONFIG_GRKERNSEC_TIME
64264 + {
64265 + .ctl_name = CTL_UNNUMBERED,
64266 + .procname = "timechange_logging",
64267 + .data = &grsec_enable_time,
64268 + .maxlen = sizeof(int),
64269 + .mode = 0600,
64270 + .proc_handler = &proc_dointvec,
64271 + },
64272 +#endif
64273 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64274 + {
64275 + .ctl_name = CTL_UNNUMBERED,
64276 + .procname = "chroot_deny_shmat",
64277 + .data = &grsec_enable_chroot_shmat,
64278 + .maxlen = sizeof(int),
64279 + .mode = 0600,
64280 + .proc_handler = &proc_dointvec,
64281 + },
64282 +#endif
64283 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64284 + {
64285 + .ctl_name = CTL_UNNUMBERED,
64286 + .procname = "chroot_deny_unix",
64287 + .data = &grsec_enable_chroot_unix,
64288 + .maxlen = sizeof(int),
64289 + .mode = 0600,
64290 + .proc_handler = &proc_dointvec,
64291 + },
64292 +#endif
64293 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64294 + {
64295 + .ctl_name = CTL_UNNUMBERED,
64296 + .procname = "chroot_deny_mount",
64297 + .data = &grsec_enable_chroot_mount,
64298 + .maxlen = sizeof(int),
64299 + .mode = 0600,
64300 + .proc_handler = &proc_dointvec,
64301 + },
64302 +#endif
64303 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64304 + {
64305 + .ctl_name = CTL_UNNUMBERED,
64306 + .procname = "chroot_deny_fchdir",
64307 + .data = &grsec_enable_chroot_fchdir,
64308 + .maxlen = sizeof(int),
64309 + .mode = 0600,
64310 + .proc_handler = &proc_dointvec,
64311 + },
64312 +#endif
64313 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64314 + {
64315 + .ctl_name = CTL_UNNUMBERED,
64316 + .procname = "chroot_deny_chroot",
64317 + .data = &grsec_enable_chroot_double,
64318 + .maxlen = sizeof(int),
64319 + .mode = 0600,
64320 + .proc_handler = &proc_dointvec,
64321 + },
64322 +#endif
64323 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64324 + {
64325 + .ctl_name = CTL_UNNUMBERED,
64326 + .procname = "chroot_deny_pivot",
64327 + .data = &grsec_enable_chroot_pivot,
64328 + .maxlen = sizeof(int),
64329 + .mode = 0600,
64330 + .proc_handler = &proc_dointvec,
64331 + },
64332 +#endif
64333 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64334 + {
64335 + .ctl_name = CTL_UNNUMBERED,
64336 + .procname = "chroot_enforce_chdir",
64337 + .data = &grsec_enable_chroot_chdir,
64338 + .maxlen = sizeof(int),
64339 + .mode = 0600,
64340 + .proc_handler = &proc_dointvec,
64341 + },
64342 +#endif
64343 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64344 + {
64345 + .ctl_name = CTL_UNNUMBERED,
64346 + .procname = "chroot_deny_chmod",
64347 + .data = &grsec_enable_chroot_chmod,
64348 + .maxlen = sizeof(int),
64349 + .mode = 0600,
64350 + .proc_handler = &proc_dointvec,
64351 + },
64352 +#endif
64353 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64354 + {
64355 + .ctl_name = CTL_UNNUMBERED,
64356 + .procname = "chroot_deny_mknod",
64357 + .data = &grsec_enable_chroot_mknod,
64358 + .maxlen = sizeof(int),
64359 + .mode = 0600,
64360 + .proc_handler = &proc_dointvec,
64361 + },
64362 +#endif
64363 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64364 + {
64365 + .ctl_name = CTL_UNNUMBERED,
64366 + .procname = "chroot_restrict_nice",
64367 + .data = &grsec_enable_chroot_nice,
64368 + .maxlen = sizeof(int),
64369 + .mode = 0600,
64370 + .proc_handler = &proc_dointvec,
64371 + },
64372 +#endif
64373 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64374 + {
64375 + .ctl_name = CTL_UNNUMBERED,
64376 + .procname = "chroot_execlog",
64377 + .data = &grsec_enable_chroot_execlog,
64378 + .maxlen = sizeof(int),
64379 + .mode = 0600,
64380 + .proc_handler = &proc_dointvec,
64381 + },
64382 +#endif
64383 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64384 + {
64385 + .ctl_name = CTL_UNNUMBERED,
64386 + .procname = "chroot_caps",
64387 + .data = &grsec_enable_chroot_caps,
64388 + .maxlen = sizeof(int),
64389 + .mode = 0600,
64390 + .proc_handler = &proc_dointvec,
64391 + },
64392 +#endif
64393 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64394 + {
64395 + .ctl_name = CTL_UNNUMBERED,
64396 + .procname = "chroot_deny_sysctl",
64397 + .data = &grsec_enable_chroot_sysctl,
64398 + .maxlen = sizeof(int),
64399 + .mode = 0600,
64400 + .proc_handler = &proc_dointvec,
64401 + },
64402 +#endif
64403 +#ifdef CONFIG_GRKERNSEC_TPE
64404 + {
64405 + .ctl_name = CTL_UNNUMBERED,
64406 + .procname = "tpe",
64407 + .data = &grsec_enable_tpe,
64408 + .maxlen = sizeof(int),
64409 + .mode = 0600,
64410 + .proc_handler = &proc_dointvec,
64411 + },
64412 + {
64413 + .ctl_name = CTL_UNNUMBERED,
64414 + .procname = "tpe_gid",
64415 + .data = &grsec_tpe_gid,
64416 + .maxlen = sizeof(int),
64417 + .mode = 0600,
64418 + .proc_handler = &proc_dointvec,
64419 + },
64420 +#endif
64421 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64422 + {
64423 + .ctl_name = CTL_UNNUMBERED,
64424 + .procname = "tpe_invert",
64425 + .data = &grsec_enable_tpe_invert,
64426 + .maxlen = sizeof(int),
64427 + .mode = 0600,
64428 + .proc_handler = &proc_dointvec,
64429 + },
64430 +#endif
64431 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
64432 + {
64433 + .ctl_name = CTL_UNNUMBERED,
64434 + .procname = "tpe_restrict_all",
64435 + .data = &grsec_enable_tpe_all,
64436 + .maxlen = sizeof(int),
64437 + .mode = 0600,
64438 + .proc_handler = &proc_dointvec,
64439 + },
64440 +#endif
64441 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64442 + {
64443 + .ctl_name = CTL_UNNUMBERED,
64444 + .procname = "socket_all",
64445 + .data = &grsec_enable_socket_all,
64446 + .maxlen = sizeof(int),
64447 + .mode = 0600,
64448 + .proc_handler = &proc_dointvec,
64449 + },
64450 + {
64451 + .ctl_name = CTL_UNNUMBERED,
64452 + .procname = "socket_all_gid",
64453 + .data = &grsec_socket_all_gid,
64454 + .maxlen = sizeof(int),
64455 + .mode = 0600,
64456 + .proc_handler = &proc_dointvec,
64457 + },
64458 +#endif
64459 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64460 + {
64461 + .ctl_name = CTL_UNNUMBERED,
64462 + .procname = "socket_client",
64463 + .data = &grsec_enable_socket_client,
64464 + .maxlen = sizeof(int),
64465 + .mode = 0600,
64466 + .proc_handler = &proc_dointvec,
64467 + },
64468 + {
64469 + .ctl_name = CTL_UNNUMBERED,
64470 + .procname = "socket_client_gid",
64471 + .data = &grsec_socket_client_gid,
64472 + .maxlen = sizeof(int),
64473 + .mode = 0600,
64474 + .proc_handler = &proc_dointvec,
64475 + },
64476 +#endif
64477 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64478 + {
64479 + .ctl_name = CTL_UNNUMBERED,
64480 + .procname = "socket_server",
64481 + .data = &grsec_enable_socket_server,
64482 + .maxlen = sizeof(int),
64483 + .mode = 0600,
64484 + .proc_handler = &proc_dointvec,
64485 + },
64486 + {
64487 + .ctl_name = CTL_UNNUMBERED,
64488 + .procname = "socket_server_gid",
64489 + .data = &grsec_socket_server_gid,
64490 + .maxlen = sizeof(int),
64491 + .mode = 0600,
64492 + .proc_handler = &proc_dointvec,
64493 + },
64494 +#endif
64495 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64496 + {
64497 + .ctl_name = CTL_UNNUMBERED,
64498 + .procname = "audit_group",
64499 + .data = &grsec_enable_group,
64500 + .maxlen = sizeof(int),
64501 + .mode = 0600,
64502 + .proc_handler = &proc_dointvec,
64503 + },
64504 + {
64505 + .ctl_name = CTL_UNNUMBERED,
64506 + .procname = "audit_gid",
64507 + .data = &grsec_audit_gid,
64508 + .maxlen = sizeof(int),
64509 + .mode = 0600,
64510 + .proc_handler = &proc_dointvec,
64511 + },
64512 +#endif
64513 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64514 + {
64515 + .ctl_name = CTL_UNNUMBERED,
64516 + .procname = "audit_chdir",
64517 + .data = &grsec_enable_chdir,
64518 + .maxlen = sizeof(int),
64519 + .mode = 0600,
64520 + .proc_handler = &proc_dointvec,
64521 + },
64522 +#endif
64523 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64524 + {
64525 + .ctl_name = CTL_UNNUMBERED,
64526 + .procname = "audit_mount",
64527 + .data = &grsec_enable_mount,
64528 + .maxlen = sizeof(int),
64529 + .mode = 0600,
64530 + .proc_handler = &proc_dointvec,
64531 + },
64532 +#endif
64533 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64534 + {
64535 + .ctl_name = CTL_UNNUMBERED,
64536 + .procname = "audit_textrel",
64537 + .data = &grsec_enable_audit_textrel,
64538 + .maxlen = sizeof(int),
64539 + .mode = 0600,
64540 + .proc_handler = &proc_dointvec,
64541 + },
64542 +#endif
64543 +#ifdef CONFIG_GRKERNSEC_DMESG
64544 + {
64545 + .ctl_name = CTL_UNNUMBERED,
64546 + .procname = "dmesg",
64547 + .data = &grsec_enable_dmesg,
64548 + .maxlen = sizeof(int),
64549 + .mode = 0600,
64550 + .proc_handler = &proc_dointvec,
64551 + },
64552 +#endif
64553 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64554 + {
64555 + .ctl_name = CTL_UNNUMBERED,
64556 + .procname = "chroot_findtask",
64557 + .data = &grsec_enable_chroot_findtask,
64558 + .maxlen = sizeof(int),
64559 + .mode = 0600,
64560 + .proc_handler = &proc_dointvec,
64561 + },
64562 +#endif
64563 +#ifdef CONFIG_GRKERNSEC_RESLOG
64564 + {
64565 + .ctl_name = CTL_UNNUMBERED,
64566 + .procname = "resource_logging",
64567 + .data = &grsec_resource_logging,
64568 + .maxlen = sizeof(int),
64569 + .mode = 0600,
64570 + .proc_handler = &proc_dointvec,
64571 + },
64572 +#endif
64573 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64574 + {
64575 + .ctl_name = CTL_UNNUMBERED,
64576 + .procname = "audit_ptrace",
64577 + .data = &grsec_enable_audit_ptrace,
64578 + .maxlen = sizeof(int),
64579 + .mode = 0600,
64580 + .proc_handler = &proc_dointvec,
64581 + },
64582 +#endif
64583 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64584 + {
64585 + .ctl_name = CTL_UNNUMBERED,
64586 + .procname = "harden_ptrace",
64587 + .data = &grsec_enable_harden_ptrace,
64588 + .maxlen = sizeof(int),
64589 + .mode = 0600,
64590 + .proc_handler = &proc_dointvec,
64591 + },
64592 +#endif
64593 + {
64594 + .ctl_name = CTL_UNNUMBERED,
64595 + .procname = "grsec_lock",
64596 + .data = &grsec_lock,
64597 + .maxlen = sizeof(int),
64598 + .mode = 0600,
64599 + .proc_handler = &proc_dointvec,
64600 + },
64601 +#endif
64602 +#ifdef CONFIG_GRKERNSEC_ROFS
64603 + {
64604 + .ctl_name = CTL_UNNUMBERED,
64605 + .procname = "romount_protect",
64606 + .data = &grsec_enable_rofs,
64607 + .maxlen = sizeof(int),
64608 + .mode = 0600,
64609 + .proc_handler = &proc_dointvec_minmax,
64610 + .extra1 = &one,
64611 + .extra2 = &one,
64612 + },
64613 +#endif
64614 + { .ctl_name = 0 }
64615 +};
64616 +#endif
64617 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
64618 new file mode 100644
64619 index 0000000..0dc13c3
64620 --- /dev/null
64621 +++ b/grsecurity/grsec_time.c
64622 @@ -0,0 +1,16 @@
64623 +#include <linux/kernel.h>
64624 +#include <linux/sched.h>
64625 +#include <linux/grinternal.h>
64626 +#include <linux/module.h>
64627 +
64628 +void
64629 +gr_log_timechange(void)
64630 +{
64631 +#ifdef CONFIG_GRKERNSEC_TIME
64632 + if (grsec_enable_time)
64633 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
64634 +#endif
64635 + return;
64636 +}
64637 +
64638 +EXPORT_SYMBOL(gr_log_timechange);
64639 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
64640 new file mode 100644
64641 index 0000000..4a78774
64642 --- /dev/null
64643 +++ b/grsecurity/grsec_tpe.c
64644 @@ -0,0 +1,39 @@
64645 +#include <linux/kernel.h>
64646 +#include <linux/sched.h>
64647 +#include <linux/file.h>
64648 +#include <linux/fs.h>
64649 +#include <linux/grinternal.h>
64650 +
64651 +extern int gr_acl_tpe_check(void);
64652 +
64653 +int
64654 +gr_tpe_allow(const struct file *file)
64655 +{
64656 +#ifdef CONFIG_GRKERNSEC
64657 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
64658 + const struct cred *cred = current_cred();
64659 +
64660 + if (cred->uid && ((grsec_enable_tpe &&
64661 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64662 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
64663 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
64664 +#else
64665 + in_group_p(grsec_tpe_gid)
64666 +#endif
64667 + ) || gr_acl_tpe_check()) &&
64668 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
64669 + (inode->i_mode & S_IWOTH))))) {
64670 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
64671 + return 0;
64672 + }
64673 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
64674 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
64675 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
64676 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
64677 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
64678 + return 0;
64679 + }
64680 +#endif
64681 +#endif
64682 + return 1;
64683 +}
64684 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
64685 new file mode 100644
64686 index 0000000..9f7b1ac
64687 --- /dev/null
64688 +++ b/grsecurity/grsum.c
64689 @@ -0,0 +1,61 @@
64690 +#include <linux/err.h>
64691 +#include <linux/kernel.h>
64692 +#include <linux/sched.h>
64693 +#include <linux/mm.h>
64694 +#include <linux/scatterlist.h>
64695 +#include <linux/crypto.h>
64696 +#include <linux/gracl.h>
64697 +
64698 +
64699 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
64700 +#error "crypto and sha256 must be built into the kernel"
64701 +#endif
64702 +
64703 +int
64704 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
64705 +{
64706 + char *p;
64707 + struct crypto_hash *tfm;
64708 + struct hash_desc desc;
64709 + struct scatterlist sg;
64710 + unsigned char temp_sum[GR_SHA_LEN];
64711 + volatile int retval = 0;
64712 + volatile int dummy = 0;
64713 + unsigned int i;
64714 +
64715 + sg_init_table(&sg, 1);
64716 +
64717 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
64718 + if (IS_ERR(tfm)) {
64719 + /* should never happen, since sha256 should be built in */
64720 + return 1;
64721 + }
64722 +
64723 + desc.tfm = tfm;
64724 + desc.flags = 0;
64725 +
64726 + crypto_hash_init(&desc);
64727 +
64728 + p = salt;
64729 + sg_set_buf(&sg, p, GR_SALT_LEN);
64730 + crypto_hash_update(&desc, &sg, sg.length);
64731 +
64732 + p = entry->pw;
64733 + sg_set_buf(&sg, p, strlen(p));
64734 +
64735 + crypto_hash_update(&desc, &sg, sg.length);
64736 +
64737 + crypto_hash_final(&desc, temp_sum);
64738 +
64739 + memset(entry->pw, 0, GR_PW_LEN);
64740 +
64741 + for (i = 0; i < GR_SHA_LEN; i++)
64742 + if (sum[i] != temp_sum[i])
64743 + retval = 1;
64744 + else
64745 + dummy = 1; // waste a cycle
64746 +
64747 + crypto_free_hash(tfm);
64748 +
64749 + return retval;
64750 +}
64751 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
64752 index 3cd9ccd..fe16d47 100644
64753 --- a/include/acpi/acpi_bus.h
64754 +++ b/include/acpi/acpi_bus.h
64755 @@ -107,7 +107,7 @@ struct acpi_device_ops {
64756 acpi_op_bind bind;
64757 acpi_op_unbind unbind;
64758 acpi_op_notify notify;
64759 -};
64760 +} __no_const;
64761
64762 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
64763
64764 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
64765 index f4906f6..71feb73 100644
64766 --- a/include/acpi/acpi_drivers.h
64767 +++ b/include/acpi/acpi_drivers.h
64768 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
64769 Dock Station
64770 -------------------------------------------------------------------------- */
64771 struct acpi_dock_ops {
64772 - acpi_notify_handler handler;
64773 - acpi_notify_handler uevent;
64774 + const acpi_notify_handler handler;
64775 + const acpi_notify_handler uevent;
64776 };
64777
64778 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
64779 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
64780 extern int register_dock_notifier(struct notifier_block *nb);
64781 extern void unregister_dock_notifier(struct notifier_block *nb);
64782 extern int register_hotplug_dock_device(acpi_handle handle,
64783 - struct acpi_dock_ops *ops,
64784 + const struct acpi_dock_ops *ops,
64785 void *context);
64786 extern void unregister_hotplug_dock_device(acpi_handle handle);
64787 #else
64788 @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
64789 {
64790 }
64791 static inline int register_hotplug_dock_device(acpi_handle handle,
64792 - struct acpi_dock_ops *ops,
64793 + const struct acpi_dock_ops *ops,
64794 void *context)
64795 {
64796 return -ENODEV;
64797 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
64798 index b7babf0..a9ac9fc 100644
64799 --- a/include/asm-generic/atomic-long.h
64800 +++ b/include/asm-generic/atomic-long.h
64801 @@ -22,6 +22,12 @@
64802
64803 typedef atomic64_t atomic_long_t;
64804
64805 +#ifdef CONFIG_PAX_REFCOUNT
64806 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
64807 +#else
64808 +typedef atomic64_t atomic_long_unchecked_t;
64809 +#endif
64810 +
64811 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
64812
64813 static inline long atomic_long_read(atomic_long_t *l)
64814 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64815 return (long)atomic64_read(v);
64816 }
64817
64818 +#ifdef CONFIG_PAX_REFCOUNT
64819 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64820 +{
64821 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64822 +
64823 + return (long)atomic64_read_unchecked(v);
64824 +}
64825 +#endif
64826 +
64827 static inline void atomic_long_set(atomic_long_t *l, long i)
64828 {
64829 atomic64_t *v = (atomic64_t *)l;
64830 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64831 atomic64_set(v, i);
64832 }
64833
64834 +#ifdef CONFIG_PAX_REFCOUNT
64835 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64836 +{
64837 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64838 +
64839 + atomic64_set_unchecked(v, i);
64840 +}
64841 +#endif
64842 +
64843 static inline void atomic_long_inc(atomic_long_t *l)
64844 {
64845 atomic64_t *v = (atomic64_t *)l;
64846 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64847 atomic64_inc(v);
64848 }
64849
64850 +#ifdef CONFIG_PAX_REFCOUNT
64851 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64852 +{
64853 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64854 +
64855 + atomic64_inc_unchecked(v);
64856 +}
64857 +#endif
64858 +
64859 static inline void atomic_long_dec(atomic_long_t *l)
64860 {
64861 atomic64_t *v = (atomic64_t *)l;
64862 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64863 atomic64_dec(v);
64864 }
64865
64866 +#ifdef CONFIG_PAX_REFCOUNT
64867 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64868 +{
64869 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64870 +
64871 + atomic64_dec_unchecked(v);
64872 +}
64873 +#endif
64874 +
64875 static inline void atomic_long_add(long i, atomic_long_t *l)
64876 {
64877 atomic64_t *v = (atomic64_t *)l;
64878 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64879 atomic64_add(i, v);
64880 }
64881
64882 +#ifdef CONFIG_PAX_REFCOUNT
64883 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64884 +{
64885 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64886 +
64887 + atomic64_add_unchecked(i, v);
64888 +}
64889 +#endif
64890 +
64891 static inline void atomic_long_sub(long i, atomic_long_t *l)
64892 {
64893 atomic64_t *v = (atomic64_t *)l;
64894 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
64895 return (long)atomic64_inc_return(v);
64896 }
64897
64898 +#ifdef CONFIG_PAX_REFCOUNT
64899 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
64900 +{
64901 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64902 +
64903 + return (long)atomic64_inc_return_unchecked(v);
64904 +}
64905 +#endif
64906 +
64907 static inline long atomic_long_dec_return(atomic_long_t *l)
64908 {
64909 atomic64_t *v = (atomic64_t *)l;
64910 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
64911
64912 typedef atomic_t atomic_long_t;
64913
64914 +#ifdef CONFIG_PAX_REFCOUNT
64915 +typedef atomic_unchecked_t atomic_long_unchecked_t;
64916 +#else
64917 +typedef atomic_t atomic_long_unchecked_t;
64918 +#endif
64919 +
64920 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
64921 static inline long atomic_long_read(atomic_long_t *l)
64922 {
64923 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64924 return (long)atomic_read(v);
64925 }
64926
64927 +#ifdef CONFIG_PAX_REFCOUNT
64928 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64929 +{
64930 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64931 +
64932 + return (long)atomic_read_unchecked(v);
64933 +}
64934 +#endif
64935 +
64936 static inline void atomic_long_set(atomic_long_t *l, long i)
64937 {
64938 atomic_t *v = (atomic_t *)l;
64939 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64940 atomic_set(v, i);
64941 }
64942
64943 +#ifdef CONFIG_PAX_REFCOUNT
64944 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64945 +{
64946 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64947 +
64948 + atomic_set_unchecked(v, i);
64949 +}
64950 +#endif
64951 +
64952 static inline void atomic_long_inc(atomic_long_t *l)
64953 {
64954 atomic_t *v = (atomic_t *)l;
64955 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64956 atomic_inc(v);
64957 }
64958
64959 +#ifdef CONFIG_PAX_REFCOUNT
64960 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64961 +{
64962 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64963 +
64964 + atomic_inc_unchecked(v);
64965 +}
64966 +#endif
64967 +
64968 static inline void atomic_long_dec(atomic_long_t *l)
64969 {
64970 atomic_t *v = (atomic_t *)l;
64971 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64972 atomic_dec(v);
64973 }
64974
64975 +#ifdef CONFIG_PAX_REFCOUNT
64976 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64977 +{
64978 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64979 +
64980 + atomic_dec_unchecked(v);
64981 +}
64982 +#endif
64983 +
64984 static inline void atomic_long_add(long i, atomic_long_t *l)
64985 {
64986 atomic_t *v = (atomic_t *)l;
64987 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64988 atomic_add(i, v);
64989 }
64990
64991 +#ifdef CONFIG_PAX_REFCOUNT
64992 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64993 +{
64994 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
64995 +
64996 + atomic_add_unchecked(i, v);
64997 +}
64998 +#endif
64999 +
65000 static inline void atomic_long_sub(long i, atomic_long_t *l)
65001 {
65002 atomic_t *v = (atomic_t *)l;
65003 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65004 return (long)atomic_inc_return(v);
65005 }
65006
65007 +#ifdef CONFIG_PAX_REFCOUNT
65008 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65009 +{
65010 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65011 +
65012 + return (long)atomic_inc_return_unchecked(v);
65013 +}
65014 +#endif
65015 +
65016 static inline long atomic_long_dec_return(atomic_long_t *l)
65017 {
65018 atomic_t *v = (atomic_t *)l;
65019 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65020
65021 #endif /* BITS_PER_LONG == 64 */
65022
65023 +#ifdef CONFIG_PAX_REFCOUNT
65024 +static inline void pax_refcount_needs_these_functions(void)
65025 +{
65026 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
65027 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65028 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65029 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65030 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65031 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65032 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65033 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65034 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65035 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65036 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65037 +
65038 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65039 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65040 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65041 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65042 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65043 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65044 +}
65045 +#else
65046 +#define atomic_read_unchecked(v) atomic_read(v)
65047 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65048 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65049 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65050 +#define atomic_inc_unchecked(v) atomic_inc(v)
65051 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65052 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65053 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65054 +#define atomic_dec_unchecked(v) atomic_dec(v)
65055 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65056 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65057 +
65058 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
65059 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65060 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65061 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65062 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65063 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65064 +#endif
65065 +
65066 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65067 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65068 index b18ce4f..2ee2843 100644
65069 --- a/include/asm-generic/atomic64.h
65070 +++ b/include/asm-generic/atomic64.h
65071 @@ -16,6 +16,8 @@ typedef struct {
65072 long long counter;
65073 } atomic64_t;
65074
65075 +typedef atomic64_t atomic64_unchecked_t;
65076 +
65077 #define ATOMIC64_INIT(i) { (i) }
65078
65079 extern long long atomic64_read(const atomic64_t *v);
65080 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65081 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65082 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65083
65084 +#define atomic64_read_unchecked(v) atomic64_read(v)
65085 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65086 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65087 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65088 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65089 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
65090 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65091 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
65092 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65093 +
65094 #endif /* _ASM_GENERIC_ATOMIC64_H */
65095 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65096 index d48ddf0..656a0ac 100644
65097 --- a/include/asm-generic/bug.h
65098 +++ b/include/asm-generic/bug.h
65099 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65100
65101 #else /* !CONFIG_BUG */
65102 #ifndef HAVE_ARCH_BUG
65103 -#define BUG() do {} while(0)
65104 +#define BUG() do { for (;;) ; } while(0)
65105 #endif
65106
65107 #ifndef HAVE_ARCH_BUG_ON
65108 -#define BUG_ON(condition) do { if (condition) ; } while(0)
65109 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
65110 #endif
65111
65112 #ifndef HAVE_ARCH_WARN_ON
65113 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65114 index 1bfcfe5..e04c5c9 100644
65115 --- a/include/asm-generic/cache.h
65116 +++ b/include/asm-generic/cache.h
65117 @@ -6,7 +6,7 @@
65118 * cache lines need to provide their own cache.h.
65119 */
65120
65121 -#define L1_CACHE_SHIFT 5
65122 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65123 +#define L1_CACHE_SHIFT 5UL
65124 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65125
65126 #endif /* __ASM_GENERIC_CACHE_H */
65127 diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
65128 index 6920695..41038bc 100644
65129 --- a/include/asm-generic/dma-mapping-common.h
65130 +++ b/include/asm-generic/dma-mapping-common.h
65131 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
65132 enum dma_data_direction dir,
65133 struct dma_attrs *attrs)
65134 {
65135 - struct dma_map_ops *ops = get_dma_ops(dev);
65136 + const struct dma_map_ops *ops = get_dma_ops(dev);
65137 dma_addr_t addr;
65138
65139 kmemcheck_mark_initialized(ptr, size);
65140 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
65141 enum dma_data_direction dir,
65142 struct dma_attrs *attrs)
65143 {
65144 - struct dma_map_ops *ops = get_dma_ops(dev);
65145 + const struct dma_map_ops *ops = get_dma_ops(dev);
65146
65147 BUG_ON(!valid_dma_direction(dir));
65148 if (ops->unmap_page)
65149 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
65150 int nents, enum dma_data_direction dir,
65151 struct dma_attrs *attrs)
65152 {
65153 - struct dma_map_ops *ops = get_dma_ops(dev);
65154 + const struct dma_map_ops *ops = get_dma_ops(dev);
65155 int i, ents;
65156 struct scatterlist *s;
65157
65158 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
65159 int nents, enum dma_data_direction dir,
65160 struct dma_attrs *attrs)
65161 {
65162 - struct dma_map_ops *ops = get_dma_ops(dev);
65163 + const struct dma_map_ops *ops = get_dma_ops(dev);
65164
65165 BUG_ON(!valid_dma_direction(dir));
65166 debug_dma_unmap_sg(dev, sg, nents, dir);
65167 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65168 size_t offset, size_t size,
65169 enum dma_data_direction dir)
65170 {
65171 - struct dma_map_ops *ops = get_dma_ops(dev);
65172 + const struct dma_map_ops *ops = get_dma_ops(dev);
65173 dma_addr_t addr;
65174
65175 kmemcheck_mark_initialized(page_address(page) + offset, size);
65176 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65177 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
65178 size_t size, enum dma_data_direction dir)
65179 {
65180 - struct dma_map_ops *ops = get_dma_ops(dev);
65181 + const struct dma_map_ops *ops = get_dma_ops(dev);
65182
65183 BUG_ON(!valid_dma_direction(dir));
65184 if (ops->unmap_page)
65185 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
65186 size_t size,
65187 enum dma_data_direction dir)
65188 {
65189 - struct dma_map_ops *ops = get_dma_ops(dev);
65190 + const struct dma_map_ops *ops = get_dma_ops(dev);
65191
65192 BUG_ON(!valid_dma_direction(dir));
65193 if (ops->sync_single_for_cpu)
65194 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
65195 dma_addr_t addr, size_t size,
65196 enum dma_data_direction dir)
65197 {
65198 - struct dma_map_ops *ops = get_dma_ops(dev);
65199 + const struct dma_map_ops *ops = get_dma_ops(dev);
65200
65201 BUG_ON(!valid_dma_direction(dir));
65202 if (ops->sync_single_for_device)
65203 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
65204 size_t size,
65205 enum dma_data_direction dir)
65206 {
65207 - struct dma_map_ops *ops = get_dma_ops(dev);
65208 + const struct dma_map_ops *ops = get_dma_ops(dev);
65209
65210 BUG_ON(!valid_dma_direction(dir));
65211 if (ops->sync_single_range_for_cpu) {
65212 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
65213 size_t size,
65214 enum dma_data_direction dir)
65215 {
65216 - struct dma_map_ops *ops = get_dma_ops(dev);
65217 + const struct dma_map_ops *ops = get_dma_ops(dev);
65218
65219 BUG_ON(!valid_dma_direction(dir));
65220 if (ops->sync_single_range_for_device) {
65221 @@ -155,7 +155,7 @@ static inline void
65222 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
65223 int nelems, enum dma_data_direction dir)
65224 {
65225 - struct dma_map_ops *ops = get_dma_ops(dev);
65226 + const struct dma_map_ops *ops = get_dma_ops(dev);
65227
65228 BUG_ON(!valid_dma_direction(dir));
65229 if (ops->sync_sg_for_cpu)
65230 @@ -167,7 +167,7 @@ static inline void
65231 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
65232 int nelems, enum dma_data_direction dir)
65233 {
65234 - struct dma_map_ops *ops = get_dma_ops(dev);
65235 + const struct dma_map_ops *ops = get_dma_ops(dev);
65236
65237 BUG_ON(!valid_dma_direction(dir));
65238 if (ops->sync_sg_for_device)
65239 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65240 index 0d68a1e..b74a761 100644
65241 --- a/include/asm-generic/emergency-restart.h
65242 +++ b/include/asm-generic/emergency-restart.h
65243 @@ -1,7 +1,7 @@
65244 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65245 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65246
65247 -static inline void machine_emergency_restart(void)
65248 +static inline __noreturn void machine_emergency_restart(void)
65249 {
65250 machine_restart(NULL);
65251 }
65252 diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
65253 index 3c2344f..4590a7d 100644
65254 --- a/include/asm-generic/futex.h
65255 +++ b/include/asm-generic/futex.h
65256 @@ -6,7 +6,7 @@
65257 #include <asm/errno.h>
65258
65259 static inline int
65260 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65261 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
65262 {
65263 int op = (encoded_op >> 28) & 7;
65264 int cmp = (encoded_op >> 24) & 15;
65265 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65266 }
65267
65268 static inline int
65269 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
65270 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
65271 {
65272 return -ENOSYS;
65273 }
65274 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
65275 index 1ca3efc..e3dc852 100644
65276 --- a/include/asm-generic/int-l64.h
65277 +++ b/include/asm-generic/int-l64.h
65278 @@ -46,6 +46,8 @@ typedef unsigned int u32;
65279 typedef signed long s64;
65280 typedef unsigned long u64;
65281
65282 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
65283 +
65284 #define S8_C(x) x
65285 #define U8_C(x) x ## U
65286 #define S16_C(x) x
65287 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
65288 index f394147..b6152b9 100644
65289 --- a/include/asm-generic/int-ll64.h
65290 +++ b/include/asm-generic/int-ll64.h
65291 @@ -51,6 +51,8 @@ typedef unsigned int u32;
65292 typedef signed long long s64;
65293 typedef unsigned long long u64;
65294
65295 +typedef unsigned long long intoverflow_t;
65296 +
65297 #define S8_C(x) x
65298 #define U8_C(x) x ## U
65299 #define S16_C(x) x
65300 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65301 index e5f234a..cdb16b3 100644
65302 --- a/include/asm-generic/kmap_types.h
65303 +++ b/include/asm-generic/kmap_types.h
65304 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
65305 KMAP_D(16) KM_IRQ_PTE,
65306 KMAP_D(17) KM_NMI,
65307 KMAP_D(18) KM_NMI_PTE,
65308 -KMAP_D(19) KM_TYPE_NR
65309 +KMAP_D(19) KM_CLEARPAGE,
65310 +KMAP_D(20) KM_TYPE_NR
65311 };
65312
65313 #undef KMAP_D
65314 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
65315 index 725612b..9cc513a 100644
65316 --- a/include/asm-generic/pgtable-nopmd.h
65317 +++ b/include/asm-generic/pgtable-nopmd.h
65318 @@ -1,14 +1,19 @@
65319 #ifndef _PGTABLE_NOPMD_H
65320 #define _PGTABLE_NOPMD_H
65321
65322 -#ifndef __ASSEMBLY__
65323 -
65324 #include <asm-generic/pgtable-nopud.h>
65325
65326 -struct mm_struct;
65327 -
65328 #define __PAGETABLE_PMD_FOLDED
65329
65330 +#define PMD_SHIFT PUD_SHIFT
65331 +#define PTRS_PER_PMD 1
65332 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
65333 +#define PMD_MASK (~(PMD_SIZE-1))
65334 +
65335 +#ifndef __ASSEMBLY__
65336 +
65337 +struct mm_struct;
65338 +
65339 /*
65340 * Having the pmd type consist of a pud gets the size right, and allows
65341 * us to conceptually access the pud entry that this pmd is folded into
65342 @@ -16,11 +21,6 @@ struct mm_struct;
65343 */
65344 typedef struct { pud_t pud; } pmd_t;
65345
65346 -#define PMD_SHIFT PUD_SHIFT
65347 -#define PTRS_PER_PMD 1
65348 -#define PMD_SIZE (1UL << PMD_SHIFT)
65349 -#define PMD_MASK (~(PMD_SIZE-1))
65350 -
65351 /*
65352 * The "pud_xxx()" functions here are trivial for a folded two-level
65353 * setup: the pmd is never bad, and a pmd always exists (as it's folded
65354 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
65355 index 810431d..ccc3638 100644
65356 --- a/include/asm-generic/pgtable-nopud.h
65357 +++ b/include/asm-generic/pgtable-nopud.h
65358 @@ -1,10 +1,15 @@
65359 #ifndef _PGTABLE_NOPUD_H
65360 #define _PGTABLE_NOPUD_H
65361
65362 -#ifndef __ASSEMBLY__
65363 -
65364 #define __PAGETABLE_PUD_FOLDED
65365
65366 +#define PUD_SHIFT PGDIR_SHIFT
65367 +#define PTRS_PER_PUD 1
65368 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
65369 +#define PUD_MASK (~(PUD_SIZE-1))
65370 +
65371 +#ifndef __ASSEMBLY__
65372 +
65373 /*
65374 * Having the pud type consist of a pgd gets the size right, and allows
65375 * us to conceptually access the pgd entry that this pud is folded into
65376 @@ -12,11 +17,6 @@
65377 */
65378 typedef struct { pgd_t pgd; } pud_t;
65379
65380 -#define PUD_SHIFT PGDIR_SHIFT
65381 -#define PTRS_PER_PUD 1
65382 -#define PUD_SIZE (1UL << PUD_SHIFT)
65383 -#define PUD_MASK (~(PUD_SIZE-1))
65384 -
65385 /*
65386 * The "pgd_xxx()" functions here are trivial for a folded two-level
65387 * setup: the pud is never bad, and a pud always exists (as it's folded
65388 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
65389 index e2bd73e..fea8ed3 100644
65390 --- a/include/asm-generic/pgtable.h
65391 +++ b/include/asm-generic/pgtable.h
65392 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
65393 unsigned long size);
65394 #endif
65395
65396 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
65397 +static inline unsigned long pax_open_kernel(void) { return 0; }
65398 +#endif
65399 +
65400 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
65401 +static inline unsigned long pax_close_kernel(void) { return 0; }
65402 +#endif
65403 +
65404 #endif /* !__ASSEMBLY__ */
65405
65406 #endif /* _ASM_GENERIC_PGTABLE_H */
65407 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
65408 index b6e818f..21aa58a 100644
65409 --- a/include/asm-generic/vmlinux.lds.h
65410 +++ b/include/asm-generic/vmlinux.lds.h
65411 @@ -199,6 +199,7 @@
65412 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
65413 VMLINUX_SYMBOL(__start_rodata) = .; \
65414 *(.rodata) *(.rodata.*) \
65415 + *(.data.read_only) \
65416 *(__vermagic) /* Kernel version magic */ \
65417 *(__markers_strings) /* Markers: strings */ \
65418 *(__tracepoints_strings)/* Tracepoints: strings */ \
65419 @@ -656,22 +657,24 @@
65420 * section in the linker script will go there too. @phdr should have
65421 * a leading colon.
65422 *
65423 - * Note that this macros defines __per_cpu_load as an absolute symbol.
65424 + * Note that this macros defines per_cpu_load as an absolute symbol.
65425 * If there is no need to put the percpu section at a predetermined
65426 * address, use PERCPU().
65427 */
65428 #define PERCPU_VADDR(vaddr, phdr) \
65429 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
65430 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
65431 + per_cpu_load = .; \
65432 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
65433 - LOAD_OFFSET) { \
65434 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
65435 VMLINUX_SYMBOL(__per_cpu_start) = .; \
65436 *(.data.percpu.first) \
65437 - *(.data.percpu.page_aligned) \
65438 *(.data.percpu) \
65439 + . = ALIGN(PAGE_SIZE); \
65440 + *(.data.percpu.page_aligned) \
65441 *(.data.percpu.shared_aligned) \
65442 VMLINUX_SYMBOL(__per_cpu_end) = .; \
65443 } phdr \
65444 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
65445 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
65446
65447 /**
65448 * PERCPU - define output section for percpu area, simple version
65449 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
65450 index 66713c6..98c0460 100644
65451 --- a/include/drm/drmP.h
65452 +++ b/include/drm/drmP.h
65453 @@ -71,6 +71,7 @@
65454 #include <linux/workqueue.h>
65455 #include <linux/poll.h>
65456 #include <asm/pgalloc.h>
65457 +#include <asm/local.h>
65458 #include "drm.h"
65459
65460 #include <linux/idr.h>
65461 @@ -814,7 +815,7 @@ struct drm_driver {
65462 void (*vgaarb_irq)(struct drm_device *dev, bool state);
65463
65464 /* Driver private ops for this object */
65465 - struct vm_operations_struct *gem_vm_ops;
65466 + const struct vm_operations_struct *gem_vm_ops;
65467
65468 int major;
65469 int minor;
65470 @@ -917,7 +918,7 @@ struct drm_device {
65471
65472 /** \name Usage Counters */
65473 /*@{ */
65474 - int open_count; /**< Outstanding files open */
65475 + local_t open_count; /**< Outstanding files open */
65476 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
65477 atomic_t vma_count; /**< Outstanding vma areas open */
65478 int buf_use; /**< Buffers in use -- cannot alloc */
65479 @@ -928,7 +929,7 @@ struct drm_device {
65480 /*@{ */
65481 unsigned long counters;
65482 enum drm_stat_type types[15];
65483 - atomic_t counts[15];
65484 + atomic_unchecked_t counts[15];
65485 /*@} */
65486
65487 struct list_head filelist;
65488 @@ -1016,7 +1017,7 @@ struct drm_device {
65489 struct pci_controller *hose;
65490 #endif
65491 struct drm_sg_mem *sg; /**< Scatter gather memory */
65492 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
65493 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
65494 void *dev_private; /**< device private data */
65495 void *mm_private;
65496 struct address_space *dev_mapping;
65497 @@ -1042,11 +1043,11 @@ struct drm_device {
65498 spinlock_t object_name_lock;
65499 struct idr object_name_idr;
65500 atomic_t object_count;
65501 - atomic_t object_memory;
65502 + atomic_unchecked_t object_memory;
65503 atomic_t pin_count;
65504 - atomic_t pin_memory;
65505 + atomic_unchecked_t pin_memory;
65506 atomic_t gtt_count;
65507 - atomic_t gtt_memory;
65508 + atomic_unchecked_t gtt_memory;
65509 uint32_t gtt_total;
65510 uint32_t invalidate_domains; /* domains pending invalidation */
65511 uint32_t flush_domains; /* domains pending flush */
65512 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
65513 index b29e201..3413cc9 100644
65514 --- a/include/drm/drm_crtc_helper.h
65515 +++ b/include/drm/drm_crtc_helper.h
65516 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
65517
65518 /* reload the current crtc LUT */
65519 void (*load_lut)(struct drm_crtc *crtc);
65520 -};
65521 +} __no_const;
65522
65523 struct drm_encoder_helper_funcs {
65524 void (*dpms)(struct drm_encoder *encoder, int mode);
65525 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
65526 struct drm_connector *connector);
65527 /* disable encoder when not in use - more explicit than dpms off */
65528 void (*disable)(struct drm_encoder *encoder);
65529 -};
65530 +} __no_const;
65531
65532 struct drm_connector_helper_funcs {
65533 int (*get_modes)(struct drm_connector *connector);
65534 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
65535 index b199170..6f9e64c 100644
65536 --- a/include/drm/ttm/ttm_memory.h
65537 +++ b/include/drm/ttm/ttm_memory.h
65538 @@ -47,7 +47,7 @@
65539
65540 struct ttm_mem_shrink {
65541 int (*do_shrink) (struct ttm_mem_shrink *);
65542 -};
65543 +} __no_const;
65544
65545 /**
65546 * struct ttm_mem_global - Global memory accounting structure.
65547 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
65548 index e86dfca..40cc55f 100644
65549 --- a/include/linux/a.out.h
65550 +++ b/include/linux/a.out.h
65551 @@ -39,6 +39,14 @@ enum machine_type {
65552 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
65553 };
65554
65555 +/* Constants for the N_FLAGS field */
65556 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65557 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
65558 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
65559 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
65560 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65561 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65562 +
65563 #if !defined (N_MAGIC)
65564 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
65565 #endif
65566 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
65567 index 817b237..62c10bc 100644
65568 --- a/include/linux/atmdev.h
65569 +++ b/include/linux/atmdev.h
65570 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
65571 #endif
65572
65573 struct k_atm_aal_stats {
65574 -#define __HANDLE_ITEM(i) atomic_t i
65575 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
65576 __AAL_STAT_ITEMS
65577 #undef __HANDLE_ITEM
65578 };
65579 diff --git a/include/linux/backlight.h b/include/linux/backlight.h
65580 index 0f5f578..8c4f884 100644
65581 --- a/include/linux/backlight.h
65582 +++ b/include/linux/backlight.h
65583 @@ -36,18 +36,18 @@ struct backlight_device;
65584 struct fb_info;
65585
65586 struct backlight_ops {
65587 - unsigned int options;
65588 + const unsigned int options;
65589
65590 #define BL_CORE_SUSPENDRESUME (1 << 0)
65591
65592 /* Notify the backlight driver some property has changed */
65593 - int (*update_status)(struct backlight_device *);
65594 + int (* const update_status)(struct backlight_device *);
65595 /* Return the current backlight brightness (accounting for power,
65596 fb_blank etc.) */
65597 - int (*get_brightness)(struct backlight_device *);
65598 + int (* const get_brightness)(struct backlight_device *);
65599 /* Check if given framebuffer device is the one bound to this backlight;
65600 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
65601 - int (*check_fb)(struct fb_info *);
65602 + int (* const check_fb)(struct fb_info *);
65603 };
65604
65605 /* This structure defines all the properties of a backlight */
65606 @@ -86,7 +86,7 @@ struct backlight_device {
65607 registered this device has been unloaded, and if class_get_devdata()
65608 points to something in the body of that driver, it is also invalid. */
65609 struct mutex ops_lock;
65610 - struct backlight_ops *ops;
65611 + const struct backlight_ops *ops;
65612
65613 /* The framebuffer notifier block */
65614 struct notifier_block fb_notif;
65615 @@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
65616 }
65617
65618 extern struct backlight_device *backlight_device_register(const char *name,
65619 - struct device *dev, void *devdata, struct backlight_ops *ops);
65620 + struct device *dev, void *devdata, const struct backlight_ops *ops);
65621 extern void backlight_device_unregister(struct backlight_device *bd);
65622 extern void backlight_force_update(struct backlight_device *bd,
65623 enum backlight_update_reason reason);
65624 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
65625 index a3d802e..482f69c 100644
65626 --- a/include/linux/binfmts.h
65627 +++ b/include/linux/binfmts.h
65628 @@ -83,6 +83,7 @@ struct linux_binfmt {
65629 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
65630 int (*load_shlib)(struct file *);
65631 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
65632 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
65633 unsigned long min_coredump; /* minimal dump size */
65634 int hasvdso;
65635 };
65636 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
65637 index a06bfab..a2906d2 100644
65638 --- a/include/linux/blkdev.h
65639 +++ b/include/linux/blkdev.h
65640 @@ -777,6 +777,9 @@ extern void blk_plug_device(struct request_queue *);
65641 extern void blk_plug_device_unlocked(struct request_queue *);
65642 extern int blk_remove_plug(struct request_queue *);
65643 extern void blk_recount_segments(struct request_queue *, struct bio *);
65644 +extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
65645 +extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
65646 + unsigned int, void __user *);
65647 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
65648 unsigned int, void __user *);
65649 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
65650 @@ -1278,7 +1281,7 @@ struct block_device_operations {
65651 int (*revalidate_disk) (struct gendisk *);
65652 int (*getgeo)(struct block_device *, struct hd_geometry *);
65653 struct module *owner;
65654 -};
65655 +} __do_const;
65656
65657 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
65658 unsigned long);
65659 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
65660 index 3b73b99..629d21b 100644
65661 --- a/include/linux/blktrace_api.h
65662 +++ b/include/linux/blktrace_api.h
65663 @@ -160,7 +160,7 @@ struct blk_trace {
65664 struct dentry *dir;
65665 struct dentry *dropped_file;
65666 struct dentry *msg_file;
65667 - atomic_t dropped;
65668 + atomic_unchecked_t dropped;
65669 };
65670
65671 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
65672 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
65673 index 83195fb..0b0f77d 100644
65674 --- a/include/linux/byteorder/little_endian.h
65675 +++ b/include/linux/byteorder/little_endian.h
65676 @@ -42,51 +42,51 @@
65677
65678 static inline __le64 __cpu_to_le64p(const __u64 *p)
65679 {
65680 - return (__force __le64)*p;
65681 + return (__force const __le64)*p;
65682 }
65683 static inline __u64 __le64_to_cpup(const __le64 *p)
65684 {
65685 - return (__force __u64)*p;
65686 + return (__force const __u64)*p;
65687 }
65688 static inline __le32 __cpu_to_le32p(const __u32 *p)
65689 {
65690 - return (__force __le32)*p;
65691 + return (__force const __le32)*p;
65692 }
65693 static inline __u32 __le32_to_cpup(const __le32 *p)
65694 {
65695 - return (__force __u32)*p;
65696 + return (__force const __u32)*p;
65697 }
65698 static inline __le16 __cpu_to_le16p(const __u16 *p)
65699 {
65700 - return (__force __le16)*p;
65701 + return (__force const __le16)*p;
65702 }
65703 static inline __u16 __le16_to_cpup(const __le16 *p)
65704 {
65705 - return (__force __u16)*p;
65706 + return (__force const __u16)*p;
65707 }
65708 static inline __be64 __cpu_to_be64p(const __u64 *p)
65709 {
65710 - return (__force __be64)__swab64p(p);
65711 + return (__force const __be64)__swab64p(p);
65712 }
65713 static inline __u64 __be64_to_cpup(const __be64 *p)
65714 {
65715 - return __swab64p((__u64 *)p);
65716 + return __swab64p((const __u64 *)p);
65717 }
65718 static inline __be32 __cpu_to_be32p(const __u32 *p)
65719 {
65720 - return (__force __be32)__swab32p(p);
65721 + return (__force const __be32)__swab32p(p);
65722 }
65723 static inline __u32 __be32_to_cpup(const __be32 *p)
65724 {
65725 - return __swab32p((__u32 *)p);
65726 + return __swab32p((const __u32 *)p);
65727 }
65728 static inline __be16 __cpu_to_be16p(const __u16 *p)
65729 {
65730 - return (__force __be16)__swab16p(p);
65731 + return (__force const __be16)__swab16p(p);
65732 }
65733 static inline __u16 __be16_to_cpup(const __be16 *p)
65734 {
65735 - return __swab16p((__u16 *)p);
65736 + return __swab16p((const __u16 *)p);
65737 }
65738 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
65739 #define __le64_to_cpus(x) do { (void)(x); } while (0)
65740 diff --git a/include/linux/cache.h b/include/linux/cache.h
65741 index 97e2488..e7576b9 100644
65742 --- a/include/linux/cache.h
65743 +++ b/include/linux/cache.h
65744 @@ -16,6 +16,10 @@
65745 #define __read_mostly
65746 #endif
65747
65748 +#ifndef __read_only
65749 +#define __read_only __read_mostly
65750 +#endif
65751 +
65752 #ifndef ____cacheline_aligned
65753 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
65754 #endif
65755 diff --git a/include/linux/capability.h b/include/linux/capability.h
65756 index c8f2a5f7..1618a5c 100644
65757 --- a/include/linux/capability.h
65758 +++ b/include/linux/capability.h
65759 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
65760 (security_real_capable_noaudit((t), (cap)) == 0)
65761
65762 extern int capable(int cap);
65763 +int capable_nolog(int cap);
65764
65765 /* audit system wants to get cap info from files as well */
65766 struct dentry;
65767 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
65768 index 450fa59..86019fb 100644
65769 --- a/include/linux/compiler-gcc4.h
65770 +++ b/include/linux/compiler-gcc4.h
65771 @@ -36,4 +36,16 @@
65772 the kernel context */
65773 #define __cold __attribute__((__cold__))
65774
65775 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
65776 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
65777 +#define __bos0(ptr) __bos((ptr), 0)
65778 +#define __bos1(ptr) __bos((ptr), 1)
65779 +
65780 +#if __GNUC_MINOR__ >= 5
65781 +#ifdef CONSTIFY_PLUGIN
65782 +#define __no_const __attribute__((no_const))
65783 +#define __do_const __attribute__((do_const))
65784 +#endif
65785 +#endif
65786 +
65787 #endif
65788 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
65789 index 04fb513..fd6477b 100644
65790 --- a/include/linux/compiler.h
65791 +++ b/include/linux/compiler.h
65792 @@ -5,11 +5,14 @@
65793
65794 #ifdef __CHECKER__
65795 # define __user __attribute__((noderef, address_space(1)))
65796 +# define __force_user __force __user
65797 # define __kernel /* default address space */
65798 +# define __force_kernel __force __kernel
65799 # define __safe __attribute__((safe))
65800 # define __force __attribute__((force))
65801 # define __nocast __attribute__((nocast))
65802 # define __iomem __attribute__((noderef, address_space(2)))
65803 +# define __force_iomem __force __iomem
65804 # define __acquires(x) __attribute__((context(x,0,1)))
65805 # define __releases(x) __attribute__((context(x,1,0)))
65806 # define __acquire(x) __context__(x,1)
65807 @@ -17,13 +20,34 @@
65808 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
65809 extern void __chk_user_ptr(const volatile void __user *);
65810 extern void __chk_io_ptr(const volatile void __iomem *);
65811 +#elif defined(CHECKER_PLUGIN)
65812 +//# define __user
65813 +//# define __force_user
65814 +//# define __kernel
65815 +//# define __force_kernel
65816 +# define __safe
65817 +# define __force
65818 +# define __nocast
65819 +# define __iomem
65820 +# define __force_iomem
65821 +# define __chk_user_ptr(x) (void)0
65822 +# define __chk_io_ptr(x) (void)0
65823 +# define __builtin_warning(x, y...) (1)
65824 +# define __acquires(x)
65825 +# define __releases(x)
65826 +# define __acquire(x) (void)0
65827 +# define __release(x) (void)0
65828 +# define __cond_lock(x,c) (c)
65829 #else
65830 # define __user
65831 +# define __force_user
65832 # define __kernel
65833 +# define __force_kernel
65834 # define __safe
65835 # define __force
65836 # define __nocast
65837 # define __iomem
65838 +# define __force_iomem
65839 # define __chk_user_ptr(x) (void)0
65840 # define __chk_io_ptr(x) (void)0
65841 # define __builtin_warning(x, y...) (1)
65842 @@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65843 # define __attribute_const__ /* unimplemented */
65844 #endif
65845
65846 +#ifndef __no_const
65847 +# define __no_const
65848 +#endif
65849 +
65850 +#ifndef __do_const
65851 +# define __do_const
65852 +#endif
65853 +
65854 /*
65855 * Tell gcc if a function is cold. The compiler will assume any path
65856 * directly leading to the call is unlikely.
65857 @@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65858 #define __cold
65859 #endif
65860
65861 +#ifndef __alloc_size
65862 +#define __alloc_size(...)
65863 +#endif
65864 +
65865 +#ifndef __bos
65866 +#define __bos(ptr, arg)
65867 +#endif
65868 +
65869 +#ifndef __bos0
65870 +#define __bos0(ptr)
65871 +#endif
65872 +
65873 +#ifndef __bos1
65874 +#define __bos1(ptr)
65875 +#endif
65876 +
65877 /* Simple shorthand for a section definition */
65878 #ifndef __section
65879 # define __section(S) __attribute__ ((__section__(#S)))
65880 @@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65881 * use is to mediate communication between process-level code and irq/NMI
65882 * handlers, all running on the same CPU.
65883 */
65884 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
65885 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
65886 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
65887
65888 #endif /* __LINUX_COMPILER_H */
65889 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
65890 index fd92988..a3164bd 100644
65891 --- a/include/linux/crypto.h
65892 +++ b/include/linux/crypto.h
65893 @@ -394,7 +394,7 @@ struct cipher_tfm {
65894 const u8 *key, unsigned int keylen);
65895 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65896 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65897 -};
65898 +} __no_const;
65899
65900 struct hash_tfm {
65901 int (*init)(struct hash_desc *desc);
65902 @@ -415,13 +415,13 @@ struct compress_tfm {
65903 int (*cot_decompress)(struct crypto_tfm *tfm,
65904 const u8 *src, unsigned int slen,
65905 u8 *dst, unsigned int *dlen);
65906 -};
65907 +} __no_const;
65908
65909 struct rng_tfm {
65910 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
65911 unsigned int dlen);
65912 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
65913 -};
65914 +} __no_const;
65915
65916 #define crt_ablkcipher crt_u.ablkcipher
65917 #define crt_aead crt_u.aead
65918 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
65919 index 30b93b2..cd7a8db 100644
65920 --- a/include/linux/dcache.h
65921 +++ b/include/linux/dcache.h
65922 @@ -119,6 +119,8 @@ struct dentry {
65923 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
65924 };
65925
65926 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
65927 +
65928 /*
65929 * dentry->d_lock spinlock nesting subclasses:
65930 *
65931 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
65932 index 3e9bd6a..f4e1aa0 100644
65933 --- a/include/linux/decompress/mm.h
65934 +++ b/include/linux/decompress/mm.h
65935 @@ -78,7 +78,7 @@ static void free(void *where)
65936 * warnings when not needed (indeed large_malloc / large_free are not
65937 * needed by inflate */
65938
65939 -#define malloc(a) kmalloc(a, GFP_KERNEL)
65940 +#define malloc(a) kmalloc((a), GFP_KERNEL)
65941 #define free(a) kfree(a)
65942
65943 #define large_malloc(a) vmalloc(a)
65944 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
65945 index 91b7618..92a93d32 100644
65946 --- a/include/linux/dma-mapping.h
65947 +++ b/include/linux/dma-mapping.h
65948 @@ -16,51 +16,51 @@ enum dma_data_direction {
65949 };
65950
65951 struct dma_map_ops {
65952 - void* (*alloc_coherent)(struct device *dev, size_t size,
65953 + void* (* const alloc_coherent)(struct device *dev, size_t size,
65954 dma_addr_t *dma_handle, gfp_t gfp);
65955 - void (*free_coherent)(struct device *dev, size_t size,
65956 + void (* const free_coherent)(struct device *dev, size_t size,
65957 void *vaddr, dma_addr_t dma_handle);
65958 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
65959 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
65960 unsigned long offset, size_t size,
65961 enum dma_data_direction dir,
65962 struct dma_attrs *attrs);
65963 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
65964 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
65965 size_t size, enum dma_data_direction dir,
65966 struct dma_attrs *attrs);
65967 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
65968 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
65969 int nents, enum dma_data_direction dir,
65970 struct dma_attrs *attrs);
65971 - void (*unmap_sg)(struct device *dev,
65972 + void (* const unmap_sg)(struct device *dev,
65973 struct scatterlist *sg, int nents,
65974 enum dma_data_direction dir,
65975 struct dma_attrs *attrs);
65976 - void (*sync_single_for_cpu)(struct device *dev,
65977 + void (* const sync_single_for_cpu)(struct device *dev,
65978 dma_addr_t dma_handle, size_t size,
65979 enum dma_data_direction dir);
65980 - void (*sync_single_for_device)(struct device *dev,
65981 + void (* const sync_single_for_device)(struct device *dev,
65982 dma_addr_t dma_handle, size_t size,
65983 enum dma_data_direction dir);
65984 - void (*sync_single_range_for_cpu)(struct device *dev,
65985 + void (* const sync_single_range_for_cpu)(struct device *dev,
65986 dma_addr_t dma_handle,
65987 unsigned long offset,
65988 size_t size,
65989 enum dma_data_direction dir);
65990 - void (*sync_single_range_for_device)(struct device *dev,
65991 + void (* const sync_single_range_for_device)(struct device *dev,
65992 dma_addr_t dma_handle,
65993 unsigned long offset,
65994 size_t size,
65995 enum dma_data_direction dir);
65996 - void (*sync_sg_for_cpu)(struct device *dev,
65997 + void (* const sync_sg_for_cpu)(struct device *dev,
65998 struct scatterlist *sg, int nents,
65999 enum dma_data_direction dir);
66000 - void (*sync_sg_for_device)(struct device *dev,
66001 + void (* const sync_sg_for_device)(struct device *dev,
66002 struct scatterlist *sg, int nents,
66003 enum dma_data_direction dir);
66004 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66005 - int (*dma_supported)(struct device *dev, u64 mask);
66006 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66007 + int (* const dma_supported)(struct device *dev, u64 mask);
66008 int (*set_dma_mask)(struct device *dev, u64 mask);
66009 int is_phys;
66010 -};
66011 +} __do_const;
66012
66013 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66014
66015 diff --git a/include/linux/dst.h b/include/linux/dst.h
66016 index e26fed8..b976d9f 100644
66017 --- a/include/linux/dst.h
66018 +++ b/include/linux/dst.h
66019 @@ -380,7 +380,7 @@ struct dst_node
66020 struct thread_pool *pool;
66021
66022 /* Transaction IDs live here */
66023 - atomic_long_t gen;
66024 + atomic_long_unchecked_t gen;
66025
66026 /*
66027 * How frequently and how many times transaction
66028 diff --git a/include/linux/elf.h b/include/linux/elf.h
66029 index 90a4ed0..d652617 100644
66030 --- a/include/linux/elf.h
66031 +++ b/include/linux/elf.h
66032 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66033 #define PT_GNU_EH_FRAME 0x6474e550
66034
66035 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66036 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66037 +
66038 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66039 +
66040 +/* Constants for the e_flags field */
66041 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66042 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66043 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66044 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66045 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66046 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66047
66048 /* These constants define the different elf file types */
66049 #define ET_NONE 0
66050 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66051 #define DT_DEBUG 21
66052 #define DT_TEXTREL 22
66053 #define DT_JMPREL 23
66054 +#define DT_FLAGS 30
66055 + #define DF_TEXTREL 0x00000004
66056 #define DT_ENCODING 32
66057 #define OLD_DT_LOOS 0x60000000
66058 #define DT_LOOS 0x6000000d
66059 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66060 #define PF_W 0x2
66061 #define PF_X 0x1
66062
66063 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66064 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66065 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66066 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66067 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66068 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66069 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66070 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66071 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66072 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66073 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66074 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66075 +
66076 typedef struct elf32_phdr{
66077 Elf32_Word p_type;
66078 Elf32_Off p_offset;
66079 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66080 #define EI_OSABI 7
66081 #define EI_PAD 8
66082
66083 +#define EI_PAX 14
66084 +
66085 #define ELFMAG0 0x7f /* EI_MAG */
66086 #define ELFMAG1 'E'
66087 #define ELFMAG2 'L'
66088 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66089 #define elf_phdr elf32_phdr
66090 #define elf_note elf32_note
66091 #define elf_addr_t Elf32_Off
66092 +#define elf_dyn Elf32_Dyn
66093
66094 #else
66095
66096 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66097 #define elf_phdr elf64_phdr
66098 #define elf_note elf64_note
66099 #define elf_addr_t Elf64_Off
66100 +#define elf_dyn Elf64_Dyn
66101
66102 #endif
66103
66104 diff --git a/include/linux/fs.h b/include/linux/fs.h
66105 index 1b9a47a..6fe2934 100644
66106 --- a/include/linux/fs.h
66107 +++ b/include/linux/fs.h
66108 @@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
66109 unsigned long, unsigned long);
66110
66111 struct address_space_operations {
66112 - int (*writepage)(struct page *page, struct writeback_control *wbc);
66113 - int (*readpage)(struct file *, struct page *);
66114 - void (*sync_page)(struct page *);
66115 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
66116 + int (* const readpage)(struct file *, struct page *);
66117 + void (* const sync_page)(struct page *);
66118
66119 /* Write back some dirty pages from this mapping. */
66120 - int (*writepages)(struct address_space *, struct writeback_control *);
66121 + int (* const writepages)(struct address_space *, struct writeback_control *);
66122
66123 /* Set a page dirty. Return true if this dirtied it */
66124 - int (*set_page_dirty)(struct page *page);
66125 + int (* const set_page_dirty)(struct page *page);
66126
66127 - int (*readpages)(struct file *filp, struct address_space *mapping,
66128 + int (* const readpages)(struct file *filp, struct address_space *mapping,
66129 struct list_head *pages, unsigned nr_pages);
66130
66131 - int (*write_begin)(struct file *, struct address_space *mapping,
66132 + int (* const write_begin)(struct file *, struct address_space *mapping,
66133 loff_t pos, unsigned len, unsigned flags,
66134 struct page **pagep, void **fsdata);
66135 - int (*write_end)(struct file *, struct address_space *mapping,
66136 + int (* const write_end)(struct file *, struct address_space *mapping,
66137 loff_t pos, unsigned len, unsigned copied,
66138 struct page *page, void *fsdata);
66139
66140 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
66141 - sector_t (*bmap)(struct address_space *, sector_t);
66142 - void (*invalidatepage) (struct page *, unsigned long);
66143 - int (*releasepage) (struct page *, gfp_t);
66144 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
66145 + sector_t (* const bmap)(struct address_space *, sector_t);
66146 + void (* const invalidatepage) (struct page *, unsigned long);
66147 + int (* const releasepage) (struct page *, gfp_t);
66148 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
66149 loff_t offset, unsigned long nr_segs);
66150 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
66151 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
66152 void **, unsigned long *);
66153 /* migrate the contents of a page to the specified target */
66154 - int (*migratepage) (struct address_space *,
66155 + int (* const migratepage) (struct address_space *,
66156 struct page *, struct page *);
66157 - int (*launder_page) (struct page *);
66158 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
66159 + int (* const launder_page) (struct page *);
66160 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
66161 unsigned long);
66162 - int (*error_remove_page)(struct address_space *, struct page *);
66163 + int (* const error_remove_page)(struct address_space *, struct page *);
66164 };
66165
66166 /*
66167 @@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
66168 typedef struct files_struct *fl_owner_t;
66169
66170 struct file_lock_operations {
66171 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66172 - void (*fl_release_private)(struct file_lock *);
66173 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66174 + void (* const fl_release_private)(struct file_lock *);
66175 };
66176
66177 struct lock_manager_operations {
66178 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
66179 - void (*fl_notify)(struct file_lock *); /* unblock callback */
66180 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
66181 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66182 - void (*fl_release_private)(struct file_lock *);
66183 - void (*fl_break)(struct file_lock *);
66184 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
66185 - int (*fl_change)(struct file_lock **, int);
66186 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
66187 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
66188 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
66189 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66190 + void (* const fl_release_private)(struct file_lock *);
66191 + void (* const fl_break)(struct file_lock *);
66192 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
66193 + int (* const fl_change)(struct file_lock **, int);
66194 };
66195
66196 struct lock_manager {
66197 @@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
66198 unsigned int fi_flags; /* Flags as passed from user */
66199 unsigned int fi_extents_mapped; /* Number of mapped extents */
66200 unsigned int fi_extents_max; /* Size of fiemap_extent array */
66201 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
66202 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
66203 * array */
66204 };
66205 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
66206 @@ -1512,7 +1512,8 @@ struct file_operations {
66207 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
66208 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
66209 int (*setlease)(struct file *, long, struct file_lock **);
66210 -};
66211 +} __do_const;
66212 +typedef struct file_operations __no_const file_operations_no_const;
66213
66214 struct inode_operations {
66215 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
66216 @@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
66217 unsigned long, loff_t *);
66218
66219 struct super_operations {
66220 - struct inode *(*alloc_inode)(struct super_block *sb);
66221 - void (*destroy_inode)(struct inode *);
66222 + struct inode *(* const alloc_inode)(struct super_block *sb);
66223 + void (* const destroy_inode)(struct inode *);
66224
66225 - void (*dirty_inode) (struct inode *);
66226 - int (*write_inode) (struct inode *, int);
66227 - void (*drop_inode) (struct inode *);
66228 - void (*delete_inode) (struct inode *);
66229 - void (*put_super) (struct super_block *);
66230 - void (*write_super) (struct super_block *);
66231 - int (*sync_fs)(struct super_block *sb, int wait);
66232 - int (*freeze_fs) (struct super_block *);
66233 - int (*unfreeze_fs) (struct super_block *);
66234 - int (*statfs) (struct dentry *, struct kstatfs *);
66235 - int (*remount_fs) (struct super_block *, int *, char *);
66236 - void (*clear_inode) (struct inode *);
66237 - void (*umount_begin) (struct super_block *);
66238 + void (* const dirty_inode) (struct inode *);
66239 + int (* const write_inode) (struct inode *, int);
66240 + void (* const drop_inode) (struct inode *);
66241 + void (* const delete_inode) (struct inode *);
66242 + void (* const put_super) (struct super_block *);
66243 + void (* const write_super) (struct super_block *);
66244 + int (* const sync_fs)(struct super_block *sb, int wait);
66245 + int (* const freeze_fs) (struct super_block *);
66246 + int (* const unfreeze_fs) (struct super_block *);
66247 + int (* const statfs) (struct dentry *, struct kstatfs *);
66248 + int (* const remount_fs) (struct super_block *, int *, char *);
66249 + void (* const clear_inode) (struct inode *);
66250 + void (* const umount_begin) (struct super_block *);
66251
66252 - int (*show_options)(struct seq_file *, struct vfsmount *);
66253 - int (*show_stats)(struct seq_file *, struct vfsmount *);
66254 + int (* const show_options)(struct seq_file *, struct vfsmount *);
66255 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
66256 #ifdef CONFIG_QUOTA
66257 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
66258 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66259 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
66260 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66261 #endif
66262 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66263 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66264 };
66265
66266 /*
66267 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
66268 index 78a05bf..2a7d3e1 100644
66269 --- a/include/linux/fs_struct.h
66270 +++ b/include/linux/fs_struct.h
66271 @@ -4,7 +4,7 @@
66272 #include <linux/path.h>
66273
66274 struct fs_struct {
66275 - int users;
66276 + atomic_t users;
66277 rwlock_t lock;
66278 int umask;
66279 int in_exec;
66280 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
66281 index 7be0c6f..2f63a2b 100644
66282 --- a/include/linux/fscache-cache.h
66283 +++ b/include/linux/fscache-cache.h
66284 @@ -116,7 +116,7 @@ struct fscache_operation {
66285 #endif
66286 };
66287
66288 -extern atomic_t fscache_op_debug_id;
66289 +extern atomic_unchecked_t fscache_op_debug_id;
66290 extern const struct slow_work_ops fscache_op_slow_work_ops;
66291
66292 extern void fscache_enqueue_operation(struct fscache_operation *);
66293 @@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
66294 fscache_operation_release_t release)
66295 {
66296 atomic_set(&op->usage, 1);
66297 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
66298 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
66299 op->release = release;
66300 INIT_LIST_HEAD(&op->pend_link);
66301 fscache_set_op_state(op, "Init");
66302 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
66303 index 4d6f47b..00bcedb 100644
66304 --- a/include/linux/fsnotify_backend.h
66305 +++ b/include/linux/fsnotify_backend.h
66306 @@ -86,6 +86,7 @@ struct fsnotify_ops {
66307 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
66308 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
66309 };
66310 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
66311
66312 /*
66313 * A group is a "thing" that wants to receive notification about filesystem
66314 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66315 index 4ec5e67..42f1eb9 100644
66316 --- a/include/linux/ftrace_event.h
66317 +++ b/include/linux/ftrace_event.h
66318 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
66319 int filter_type);
66320 extern int trace_define_common_fields(struct ftrace_event_call *call);
66321
66322 -#define is_signed_type(type) (((type)(-1)) < 0)
66323 +#define is_signed_type(type) (((type)(-1)) < (type)1)
66324
66325 int trace_set_clr_event(const char *system, const char *event, int set);
66326
66327 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
66328 index 297df45..b6a74ff 100644
66329 --- a/include/linux/genhd.h
66330 +++ b/include/linux/genhd.h
66331 @@ -161,7 +161,7 @@ struct gendisk {
66332
66333 struct timer_rand_state *random;
66334
66335 - atomic_t sync_io; /* RAID */
66336 + atomic_unchecked_t sync_io; /* RAID */
66337 struct work_struct async_notify;
66338 #ifdef CONFIG_BLK_DEV_INTEGRITY
66339 struct blk_integrity *integrity;
66340 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
66341 new file mode 100644
66342 index 0000000..0dc3943
66343 --- /dev/null
66344 +++ b/include/linux/gracl.h
66345 @@ -0,0 +1,317 @@
66346 +#ifndef GR_ACL_H
66347 +#define GR_ACL_H
66348 +
66349 +#include <linux/grdefs.h>
66350 +#include <linux/resource.h>
66351 +#include <linux/capability.h>
66352 +#include <linux/dcache.h>
66353 +#include <asm/resource.h>
66354 +
66355 +/* Major status information */
66356 +
66357 +#define GR_VERSION "grsecurity 2.2.2"
66358 +#define GRSECURITY_VERSION 0x2202
66359 +
66360 +enum {
66361 + GR_SHUTDOWN = 0,
66362 + GR_ENABLE = 1,
66363 + GR_SPROLE = 2,
66364 + GR_RELOAD = 3,
66365 + GR_SEGVMOD = 4,
66366 + GR_STATUS = 5,
66367 + GR_UNSPROLE = 6,
66368 + GR_PASSSET = 7,
66369 + GR_SPROLEPAM = 8,
66370 +};
66371 +
66372 +/* Password setup definitions
66373 + * kernel/grhash.c */
66374 +enum {
66375 + GR_PW_LEN = 128,
66376 + GR_SALT_LEN = 16,
66377 + GR_SHA_LEN = 32,
66378 +};
66379 +
66380 +enum {
66381 + GR_SPROLE_LEN = 64,
66382 +};
66383 +
66384 +enum {
66385 + GR_NO_GLOB = 0,
66386 + GR_REG_GLOB,
66387 + GR_CREATE_GLOB
66388 +};
66389 +
66390 +#define GR_NLIMITS 32
66391 +
66392 +/* Begin Data Structures */
66393 +
66394 +struct sprole_pw {
66395 + unsigned char *rolename;
66396 + unsigned char salt[GR_SALT_LEN];
66397 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
66398 +};
66399 +
66400 +struct name_entry {
66401 + __u32 key;
66402 + ino_t inode;
66403 + dev_t device;
66404 + char *name;
66405 + __u16 len;
66406 + __u8 deleted;
66407 + struct name_entry *prev;
66408 + struct name_entry *next;
66409 +};
66410 +
66411 +struct inodev_entry {
66412 + struct name_entry *nentry;
66413 + struct inodev_entry *prev;
66414 + struct inodev_entry *next;
66415 +};
66416 +
66417 +struct acl_role_db {
66418 + struct acl_role_label **r_hash;
66419 + __u32 r_size;
66420 +};
66421 +
66422 +struct inodev_db {
66423 + struct inodev_entry **i_hash;
66424 + __u32 i_size;
66425 +};
66426 +
66427 +struct name_db {
66428 + struct name_entry **n_hash;
66429 + __u32 n_size;
66430 +};
66431 +
66432 +struct crash_uid {
66433 + uid_t uid;
66434 + unsigned long expires;
66435 +};
66436 +
66437 +struct gr_hash_struct {
66438 + void **table;
66439 + void **nametable;
66440 + void *first;
66441 + __u32 table_size;
66442 + __u32 used_size;
66443 + int type;
66444 +};
66445 +
66446 +/* Userspace Grsecurity ACL data structures */
66447 +
66448 +struct acl_subject_label {
66449 + char *filename;
66450 + ino_t inode;
66451 + dev_t device;
66452 + __u32 mode;
66453 + kernel_cap_t cap_mask;
66454 + kernel_cap_t cap_lower;
66455 + kernel_cap_t cap_invert_audit;
66456 +
66457 + struct rlimit res[GR_NLIMITS];
66458 + __u32 resmask;
66459 +
66460 + __u8 user_trans_type;
66461 + __u8 group_trans_type;
66462 + uid_t *user_transitions;
66463 + gid_t *group_transitions;
66464 + __u16 user_trans_num;
66465 + __u16 group_trans_num;
66466 +
66467 + __u32 sock_families[2];
66468 + __u32 ip_proto[8];
66469 + __u32 ip_type;
66470 + struct acl_ip_label **ips;
66471 + __u32 ip_num;
66472 + __u32 inaddr_any_override;
66473 +
66474 + __u32 crashes;
66475 + unsigned long expires;
66476 +
66477 + struct acl_subject_label *parent_subject;
66478 + struct gr_hash_struct *hash;
66479 + struct acl_subject_label *prev;
66480 + struct acl_subject_label *next;
66481 +
66482 + struct acl_object_label **obj_hash;
66483 + __u32 obj_hash_size;
66484 + __u16 pax_flags;
66485 +};
66486 +
66487 +struct role_allowed_ip {
66488 + __u32 addr;
66489 + __u32 netmask;
66490 +
66491 + struct role_allowed_ip *prev;
66492 + struct role_allowed_ip *next;
66493 +};
66494 +
66495 +struct role_transition {
66496 + char *rolename;
66497 +
66498 + struct role_transition *prev;
66499 + struct role_transition *next;
66500 +};
66501 +
66502 +struct acl_role_label {
66503 + char *rolename;
66504 + uid_t uidgid;
66505 + __u16 roletype;
66506 +
66507 + __u16 auth_attempts;
66508 + unsigned long expires;
66509 +
66510 + struct acl_subject_label *root_label;
66511 + struct gr_hash_struct *hash;
66512 +
66513 + struct acl_role_label *prev;
66514 + struct acl_role_label *next;
66515 +
66516 + struct role_transition *transitions;
66517 + struct role_allowed_ip *allowed_ips;
66518 + uid_t *domain_children;
66519 + __u16 domain_child_num;
66520 +
66521 + struct acl_subject_label **subj_hash;
66522 + __u32 subj_hash_size;
66523 +};
66524 +
66525 +struct user_acl_role_db {
66526 + struct acl_role_label **r_table;
66527 + __u32 num_pointers; /* Number of allocations to track */
66528 + __u32 num_roles; /* Number of roles */
66529 + __u32 num_domain_children; /* Number of domain children */
66530 + __u32 num_subjects; /* Number of subjects */
66531 + __u32 num_objects; /* Number of objects */
66532 +};
66533 +
66534 +struct acl_object_label {
66535 + char *filename;
66536 + ino_t inode;
66537 + dev_t device;
66538 + __u32 mode;
66539 +
66540 + struct acl_subject_label *nested;
66541 + struct acl_object_label *globbed;
66542 +
66543 + /* next two structures not used */
66544 +
66545 + struct acl_object_label *prev;
66546 + struct acl_object_label *next;
66547 +};
66548 +
66549 +struct acl_ip_label {
66550 + char *iface;
66551 + __u32 addr;
66552 + __u32 netmask;
66553 + __u16 low, high;
66554 + __u8 mode;
66555 + __u32 type;
66556 + __u32 proto[8];
66557 +
66558 + /* next two structures not used */
66559 +
66560 + struct acl_ip_label *prev;
66561 + struct acl_ip_label *next;
66562 +};
66563 +
66564 +struct gr_arg {
66565 + struct user_acl_role_db role_db;
66566 + unsigned char pw[GR_PW_LEN];
66567 + unsigned char salt[GR_SALT_LEN];
66568 + unsigned char sum[GR_SHA_LEN];
66569 + unsigned char sp_role[GR_SPROLE_LEN];
66570 + struct sprole_pw *sprole_pws;
66571 + dev_t segv_device;
66572 + ino_t segv_inode;
66573 + uid_t segv_uid;
66574 + __u16 num_sprole_pws;
66575 + __u16 mode;
66576 +};
66577 +
66578 +struct gr_arg_wrapper {
66579 + struct gr_arg *arg;
66580 + __u32 version;
66581 + __u32 size;
66582 +};
66583 +
66584 +struct subject_map {
66585 + struct acl_subject_label *user;
66586 + struct acl_subject_label *kernel;
66587 + struct subject_map *prev;
66588 + struct subject_map *next;
66589 +};
66590 +
66591 +struct acl_subj_map_db {
66592 + struct subject_map **s_hash;
66593 + __u32 s_size;
66594 +};
66595 +
66596 +/* End Data Structures Section */
66597 +
66598 +/* Hash functions generated by empirical testing by Brad Spengler
66599 + Makes good use of the low bits of the inode. Generally 0-1 times
66600 + in loop for successful match. 0-3 for unsuccessful match.
66601 + Shift/add algorithm with modulus of table size and an XOR*/
66602 +
66603 +static __inline__ unsigned int
66604 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
66605 +{
66606 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
66607 +}
66608 +
66609 + static __inline__ unsigned int
66610 +shash(const struct acl_subject_label *userp, const unsigned int sz)
66611 +{
66612 + return ((const unsigned long)userp % sz);
66613 +}
66614 +
66615 +static __inline__ unsigned int
66616 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
66617 +{
66618 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
66619 +}
66620 +
66621 +static __inline__ unsigned int
66622 +nhash(const char *name, const __u16 len, const unsigned int sz)
66623 +{
66624 + return full_name_hash((const unsigned char *)name, len) % sz;
66625 +}
66626 +
66627 +#define FOR_EACH_ROLE_START(role) \
66628 + role = role_list; \
66629 + while (role) {
66630 +
66631 +#define FOR_EACH_ROLE_END(role) \
66632 + role = role->prev; \
66633 + }
66634 +
66635 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
66636 + subj = NULL; \
66637 + iter = 0; \
66638 + while (iter < role->subj_hash_size) { \
66639 + if (subj == NULL) \
66640 + subj = role->subj_hash[iter]; \
66641 + if (subj == NULL) { \
66642 + iter++; \
66643 + continue; \
66644 + }
66645 +
66646 +#define FOR_EACH_SUBJECT_END(subj,iter) \
66647 + subj = subj->next; \
66648 + if (subj == NULL) \
66649 + iter++; \
66650 + }
66651 +
66652 +
66653 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
66654 + subj = role->hash->first; \
66655 + while (subj != NULL) {
66656 +
66657 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
66658 + subj = subj->next; \
66659 + }
66660 +
66661 +#endif
66662 +
66663 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
66664 new file mode 100644
66665 index 0000000..323ecf2
66666 --- /dev/null
66667 +++ b/include/linux/gralloc.h
66668 @@ -0,0 +1,9 @@
66669 +#ifndef __GRALLOC_H
66670 +#define __GRALLOC_H
66671 +
66672 +void acl_free_all(void);
66673 +int acl_alloc_stack_init(unsigned long size);
66674 +void *acl_alloc(unsigned long len);
66675 +void *acl_alloc_num(unsigned long num, unsigned long len);
66676 +
66677 +#endif
66678 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
66679 new file mode 100644
66680 index 0000000..70d6cd5
66681 --- /dev/null
66682 +++ b/include/linux/grdefs.h
66683 @@ -0,0 +1,140 @@
66684 +#ifndef GRDEFS_H
66685 +#define GRDEFS_H
66686 +
66687 +/* Begin grsecurity status declarations */
66688 +
66689 +enum {
66690 + GR_READY = 0x01,
66691 + GR_STATUS_INIT = 0x00 // disabled state
66692 +};
66693 +
66694 +/* Begin ACL declarations */
66695 +
66696 +/* Role flags */
66697 +
66698 +enum {
66699 + GR_ROLE_USER = 0x0001,
66700 + GR_ROLE_GROUP = 0x0002,
66701 + GR_ROLE_DEFAULT = 0x0004,
66702 + GR_ROLE_SPECIAL = 0x0008,
66703 + GR_ROLE_AUTH = 0x0010,
66704 + GR_ROLE_NOPW = 0x0020,
66705 + GR_ROLE_GOD = 0x0040,
66706 + GR_ROLE_LEARN = 0x0080,
66707 + GR_ROLE_TPE = 0x0100,
66708 + GR_ROLE_DOMAIN = 0x0200,
66709 + GR_ROLE_PAM = 0x0400,
66710 + GR_ROLE_PERSIST = 0x800
66711 +};
66712 +
66713 +/* ACL Subject and Object mode flags */
66714 +enum {
66715 + GR_DELETED = 0x80000000
66716 +};
66717 +
66718 +/* ACL Object-only mode flags */
66719 +enum {
66720 + GR_READ = 0x00000001,
66721 + GR_APPEND = 0x00000002,
66722 + GR_WRITE = 0x00000004,
66723 + GR_EXEC = 0x00000008,
66724 + GR_FIND = 0x00000010,
66725 + GR_INHERIT = 0x00000020,
66726 + GR_SETID = 0x00000040,
66727 + GR_CREATE = 0x00000080,
66728 + GR_DELETE = 0x00000100,
66729 + GR_LINK = 0x00000200,
66730 + GR_AUDIT_READ = 0x00000400,
66731 + GR_AUDIT_APPEND = 0x00000800,
66732 + GR_AUDIT_WRITE = 0x00001000,
66733 + GR_AUDIT_EXEC = 0x00002000,
66734 + GR_AUDIT_FIND = 0x00004000,
66735 + GR_AUDIT_INHERIT= 0x00008000,
66736 + GR_AUDIT_SETID = 0x00010000,
66737 + GR_AUDIT_CREATE = 0x00020000,
66738 + GR_AUDIT_DELETE = 0x00040000,
66739 + GR_AUDIT_LINK = 0x00080000,
66740 + GR_PTRACERD = 0x00100000,
66741 + GR_NOPTRACE = 0x00200000,
66742 + GR_SUPPRESS = 0x00400000,
66743 + GR_NOLEARN = 0x00800000,
66744 + GR_INIT_TRANSFER= 0x01000000
66745 +};
66746 +
66747 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
66748 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
66749 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
66750 +
66751 +/* ACL subject-only mode flags */
66752 +enum {
66753 + GR_KILL = 0x00000001,
66754 + GR_VIEW = 0x00000002,
66755 + GR_PROTECTED = 0x00000004,
66756 + GR_LEARN = 0x00000008,
66757 + GR_OVERRIDE = 0x00000010,
66758 + /* just a placeholder, this mode is only used in userspace */
66759 + GR_DUMMY = 0x00000020,
66760 + GR_PROTSHM = 0x00000040,
66761 + GR_KILLPROC = 0x00000080,
66762 + GR_KILLIPPROC = 0x00000100,
66763 + /* just a placeholder, this mode is only used in userspace */
66764 + GR_NOTROJAN = 0x00000200,
66765 + GR_PROTPROCFD = 0x00000400,
66766 + GR_PROCACCT = 0x00000800,
66767 + GR_RELAXPTRACE = 0x00001000,
66768 + GR_NESTED = 0x00002000,
66769 + GR_INHERITLEARN = 0x00004000,
66770 + GR_PROCFIND = 0x00008000,
66771 + GR_POVERRIDE = 0x00010000,
66772 + GR_KERNELAUTH = 0x00020000,
66773 + GR_ATSECURE = 0x00040000,
66774 + GR_SHMEXEC = 0x00080000
66775 +};
66776 +
66777 +enum {
66778 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
66779 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
66780 + GR_PAX_ENABLE_MPROTECT = 0x0004,
66781 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
66782 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
66783 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
66784 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
66785 + GR_PAX_DISABLE_MPROTECT = 0x0400,
66786 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
66787 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
66788 +};
66789 +
66790 +enum {
66791 + GR_ID_USER = 0x01,
66792 + GR_ID_GROUP = 0x02,
66793 +};
66794 +
66795 +enum {
66796 + GR_ID_ALLOW = 0x01,
66797 + GR_ID_DENY = 0x02,
66798 +};
66799 +
66800 +#define GR_CRASH_RES 31
66801 +#define GR_UIDTABLE_MAX 500
66802 +
66803 +/* begin resource learning section */
66804 +enum {
66805 + GR_RLIM_CPU_BUMP = 60,
66806 + GR_RLIM_FSIZE_BUMP = 50000,
66807 + GR_RLIM_DATA_BUMP = 10000,
66808 + GR_RLIM_STACK_BUMP = 1000,
66809 + GR_RLIM_CORE_BUMP = 10000,
66810 + GR_RLIM_RSS_BUMP = 500000,
66811 + GR_RLIM_NPROC_BUMP = 1,
66812 + GR_RLIM_NOFILE_BUMP = 5,
66813 + GR_RLIM_MEMLOCK_BUMP = 50000,
66814 + GR_RLIM_AS_BUMP = 500000,
66815 + GR_RLIM_LOCKS_BUMP = 2,
66816 + GR_RLIM_SIGPENDING_BUMP = 5,
66817 + GR_RLIM_MSGQUEUE_BUMP = 10000,
66818 + GR_RLIM_NICE_BUMP = 1,
66819 + GR_RLIM_RTPRIO_BUMP = 1,
66820 + GR_RLIM_RTTIME_BUMP = 1000000
66821 +};
66822 +
66823 +#endif
66824 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
66825 new file mode 100644
66826 index 0000000..e5817d7
66827 --- /dev/null
66828 +++ b/include/linux/grinternal.h
66829 @@ -0,0 +1,218 @@
66830 +#ifndef __GRINTERNAL_H
66831 +#define __GRINTERNAL_H
66832 +
66833 +#ifdef CONFIG_GRKERNSEC
66834 +
66835 +#include <linux/fs.h>
66836 +#include <linux/mnt_namespace.h>
66837 +#include <linux/nsproxy.h>
66838 +#include <linux/gracl.h>
66839 +#include <linux/grdefs.h>
66840 +#include <linux/grmsg.h>
66841 +
66842 +void gr_add_learn_entry(const char *fmt, ...)
66843 + __attribute__ ((format (printf, 1, 2)));
66844 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
66845 + const struct vfsmount *mnt);
66846 +__u32 gr_check_create(const struct dentry *new_dentry,
66847 + const struct dentry *parent,
66848 + const struct vfsmount *mnt, const __u32 mode);
66849 +int gr_check_protected_task(const struct task_struct *task);
66850 +__u32 to_gr_audit(const __u32 reqmode);
66851 +int gr_set_acls(const int type);
66852 +int gr_apply_subject_to_task(struct task_struct *task);
66853 +int gr_acl_is_enabled(void);
66854 +char gr_roletype_to_char(void);
66855 +
66856 +void gr_handle_alertkill(struct task_struct *task);
66857 +char *gr_to_filename(const struct dentry *dentry,
66858 + const struct vfsmount *mnt);
66859 +char *gr_to_filename1(const struct dentry *dentry,
66860 + const struct vfsmount *mnt);
66861 +char *gr_to_filename2(const struct dentry *dentry,
66862 + const struct vfsmount *mnt);
66863 +char *gr_to_filename3(const struct dentry *dentry,
66864 + const struct vfsmount *mnt);
66865 +
66866 +extern int grsec_enable_harden_ptrace;
66867 +extern int grsec_enable_link;
66868 +extern int grsec_enable_fifo;
66869 +extern int grsec_enable_shm;
66870 +extern int grsec_enable_execlog;
66871 +extern int grsec_enable_signal;
66872 +extern int grsec_enable_audit_ptrace;
66873 +extern int grsec_enable_forkfail;
66874 +extern int grsec_enable_time;
66875 +extern int grsec_enable_rofs;
66876 +extern int grsec_enable_chroot_shmat;
66877 +extern int grsec_enable_chroot_mount;
66878 +extern int grsec_enable_chroot_double;
66879 +extern int grsec_enable_chroot_pivot;
66880 +extern int grsec_enable_chroot_chdir;
66881 +extern int grsec_enable_chroot_chmod;
66882 +extern int grsec_enable_chroot_mknod;
66883 +extern int grsec_enable_chroot_fchdir;
66884 +extern int grsec_enable_chroot_nice;
66885 +extern int grsec_enable_chroot_execlog;
66886 +extern int grsec_enable_chroot_caps;
66887 +extern int grsec_enable_chroot_sysctl;
66888 +extern int grsec_enable_chroot_unix;
66889 +extern int grsec_enable_tpe;
66890 +extern int grsec_tpe_gid;
66891 +extern int grsec_enable_tpe_all;
66892 +extern int grsec_enable_tpe_invert;
66893 +extern int grsec_enable_socket_all;
66894 +extern int grsec_socket_all_gid;
66895 +extern int grsec_enable_socket_client;
66896 +extern int grsec_socket_client_gid;
66897 +extern int grsec_enable_socket_server;
66898 +extern int grsec_socket_server_gid;
66899 +extern int grsec_audit_gid;
66900 +extern int grsec_enable_group;
66901 +extern int grsec_enable_audit_textrel;
66902 +extern int grsec_enable_log_rwxmaps;
66903 +extern int grsec_enable_mount;
66904 +extern int grsec_enable_chdir;
66905 +extern int grsec_resource_logging;
66906 +extern int grsec_enable_blackhole;
66907 +extern int grsec_lastack_retries;
66908 +extern int grsec_enable_brute;
66909 +extern int grsec_lock;
66910 +
66911 +extern spinlock_t grsec_alert_lock;
66912 +extern unsigned long grsec_alert_wtime;
66913 +extern unsigned long grsec_alert_fyet;
66914 +
66915 +extern spinlock_t grsec_audit_lock;
66916 +
66917 +extern rwlock_t grsec_exec_file_lock;
66918 +
66919 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
66920 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
66921 + (tsk)->exec_file->f_vfsmnt) : "/")
66922 +
66923 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
66924 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
66925 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
66926 +
66927 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
66928 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
66929 + (tsk)->exec_file->f_vfsmnt) : "/")
66930 +
66931 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
66932 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
66933 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
66934 +
66935 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
66936 +
66937 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
66938 +
66939 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
66940 + (task)->pid, (cred)->uid, \
66941 + (cred)->euid, (cred)->gid, (cred)->egid, \
66942 + gr_parent_task_fullpath(task), \
66943 + (task)->real_parent->comm, (task)->real_parent->pid, \
66944 + (pcred)->uid, (pcred)->euid, \
66945 + (pcred)->gid, (pcred)->egid
66946 +
66947 +#define GR_CHROOT_CAPS {{ \
66948 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
66949 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
66950 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
66951 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
66952 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
66953 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
66954 + CAP_TO_MASK(CAP_MAC_ADMIN) }}
66955 +
66956 +#define security_learn(normal_msg,args...) \
66957 +({ \
66958 + read_lock(&grsec_exec_file_lock); \
66959 + gr_add_learn_entry(normal_msg "\n", ## args); \
66960 + read_unlock(&grsec_exec_file_lock); \
66961 +})
66962 +
66963 +enum {
66964 + GR_DO_AUDIT,
66965 + GR_DONT_AUDIT,
66966 + GR_DONT_AUDIT_GOOD
66967 +};
66968 +
66969 +enum {
66970 + GR_TTYSNIFF,
66971 + GR_RBAC,
66972 + GR_RBAC_STR,
66973 + GR_STR_RBAC,
66974 + GR_RBAC_MODE2,
66975 + GR_RBAC_MODE3,
66976 + GR_FILENAME,
66977 + GR_SYSCTL_HIDDEN,
66978 + GR_NOARGS,
66979 + GR_ONE_INT,
66980 + GR_ONE_INT_TWO_STR,
66981 + GR_ONE_STR,
66982 + GR_STR_INT,
66983 + GR_TWO_STR_INT,
66984 + GR_TWO_INT,
66985 + GR_TWO_U64,
66986 + GR_THREE_INT,
66987 + GR_FIVE_INT_TWO_STR,
66988 + GR_TWO_STR,
66989 + GR_THREE_STR,
66990 + GR_FOUR_STR,
66991 + GR_STR_FILENAME,
66992 + GR_FILENAME_STR,
66993 + GR_FILENAME_TWO_INT,
66994 + GR_FILENAME_TWO_INT_STR,
66995 + GR_TEXTREL,
66996 + GR_PTRACE,
66997 + GR_RESOURCE,
66998 + GR_CAP,
66999 + GR_SIG,
67000 + GR_SIG2,
67001 + GR_CRASH1,
67002 + GR_CRASH2,
67003 + GR_PSACCT,
67004 + GR_RWXMAP
67005 +};
67006 +
67007 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67008 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67009 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67010 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67011 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67012 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67013 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67014 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67015 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67016 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67017 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67018 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67019 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67020 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67021 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67022 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67023 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67024 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67025 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67026 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67027 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67028 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67029 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67030 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67031 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67032 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67033 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67034 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67035 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67036 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67037 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67038 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67039 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67040 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67041 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67042 +
67043 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67044 +
67045 +#endif
67046 +
67047 +#endif
67048 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67049 new file mode 100644
67050 index 0000000..9d5fd4a
67051 --- /dev/null
67052 +++ b/include/linux/grmsg.h
67053 @@ -0,0 +1,108 @@
67054 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67055 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67056 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67057 +#define GR_STOPMOD_MSG "denied modification of module state by "
67058 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67059 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67060 +#define GR_IOPERM_MSG "denied use of ioperm() by "
67061 +#define GR_IOPL_MSG "denied use of iopl() by "
67062 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67063 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67064 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67065 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67066 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67067 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67068 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67069 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67070 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67071 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67072 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67073 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67074 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67075 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67076 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67077 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67078 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67079 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67080 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67081 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67082 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67083 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67084 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67085 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67086 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67087 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67088 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
67089 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67090 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67091 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67092 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67093 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67094 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67095 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67096 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67097 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
67098 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67099 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67100 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67101 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67102 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67103 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67104 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67105 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67106 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
67107 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67108 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67109 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67110 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67111 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67112 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67113 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67114 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67115 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67116 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67117 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67118 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67119 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67120 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67121 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67122 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67123 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67124 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67125 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67126 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
67127 +#define GR_NICE_CHROOT_MSG "denied priority change by "
67128 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67129 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67130 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67131 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67132 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67133 +#define GR_TIME_MSG "time set by "
67134 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67135 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67136 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67137 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67138 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67139 +#define GR_BIND_MSG "denied bind() by "
67140 +#define GR_CONNECT_MSG "denied connect() by "
67141 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67142 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67143 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67144 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67145 +#define GR_CAP_ACL_MSG "use of %s denied for "
67146 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67147 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67148 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67149 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67150 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67151 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67152 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67153 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67154 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67155 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67156 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67157 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67158 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67159 +#define GR_VM86_MSG "denied use of vm86 by "
67160 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67161 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67162 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67163 new file mode 100644
67164 index 0000000..24676f4
67165 --- /dev/null
67166 +++ b/include/linux/grsecurity.h
67167 @@ -0,0 +1,218 @@
67168 +#ifndef GR_SECURITY_H
67169 +#define GR_SECURITY_H
67170 +#include <linux/fs.h>
67171 +#include <linux/fs_struct.h>
67172 +#include <linux/binfmts.h>
67173 +#include <linux/gracl.h>
67174 +#include <linux/compat.h>
67175 +
67176 +/* notify of brain-dead configs */
67177 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67178 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67179 +#endif
67180 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67181 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67182 +#endif
67183 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
67184 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
67185 +#endif
67186 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
67187 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
67188 +#endif
67189 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67190 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67191 +#endif
67192 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67193 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
67194 +#endif
67195 +
67196 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
67197 +void gr_handle_brute_check(void);
67198 +void gr_handle_kernel_exploit(void);
67199 +int gr_process_user_ban(void);
67200 +
67201 +char gr_roletype_to_char(void);
67202 +
67203 +int gr_acl_enable_at_secure(void);
67204 +
67205 +int gr_check_user_change(int real, int effective, int fs);
67206 +int gr_check_group_change(int real, int effective, int fs);
67207 +
67208 +void gr_del_task_from_ip_table(struct task_struct *p);
67209 +
67210 +int gr_pid_is_chrooted(struct task_struct *p);
67211 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67212 +int gr_handle_chroot_nice(void);
67213 +int gr_handle_chroot_sysctl(const int op);
67214 +int gr_handle_chroot_setpriority(struct task_struct *p,
67215 + const int niceval);
67216 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67217 +int gr_handle_chroot_chroot(const struct dentry *dentry,
67218 + const struct vfsmount *mnt);
67219 +void gr_handle_chroot_chdir(struct path *path);
67220 +int gr_handle_chroot_chmod(const struct dentry *dentry,
67221 + const struct vfsmount *mnt, const int mode);
67222 +int gr_handle_chroot_mknod(const struct dentry *dentry,
67223 + const struct vfsmount *mnt, const int mode);
67224 +int gr_handle_chroot_mount(const struct dentry *dentry,
67225 + const struct vfsmount *mnt,
67226 + const char *dev_name);
67227 +int gr_handle_chroot_pivot(void);
67228 +int gr_handle_chroot_unix(const pid_t pid);
67229 +
67230 +int gr_handle_rawio(const struct inode *inode);
67231 +
67232 +void gr_handle_ioperm(void);
67233 +void gr_handle_iopl(void);
67234 +
67235 +int gr_tpe_allow(const struct file *file);
67236 +
67237 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67238 +void gr_clear_chroot_entries(struct task_struct *task);
67239 +
67240 +void gr_log_forkfail(const int retval);
67241 +void gr_log_timechange(void);
67242 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
67243 +void gr_log_chdir(const struct dentry *dentry,
67244 + const struct vfsmount *mnt);
67245 +void gr_log_chroot_exec(const struct dentry *dentry,
67246 + const struct vfsmount *mnt);
67247 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
67248 +#ifdef CONFIG_COMPAT
67249 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
67250 +#endif
67251 +void gr_log_remount(const char *devname, const int retval);
67252 +void gr_log_unmount(const char *devname, const int retval);
67253 +void gr_log_mount(const char *from, const char *to, const int retval);
67254 +void gr_log_textrel(struct vm_area_struct *vma);
67255 +void gr_log_rwxmmap(struct file *file);
67256 +void gr_log_rwxmprotect(struct file *file);
67257 +
67258 +int gr_handle_follow_link(const struct inode *parent,
67259 + const struct inode *inode,
67260 + const struct dentry *dentry,
67261 + const struct vfsmount *mnt);
67262 +int gr_handle_fifo(const struct dentry *dentry,
67263 + const struct vfsmount *mnt,
67264 + const struct dentry *dir, const int flag,
67265 + const int acc_mode);
67266 +int gr_handle_hardlink(const struct dentry *dentry,
67267 + const struct vfsmount *mnt,
67268 + struct inode *inode,
67269 + const int mode, const char *to);
67270 +
67271 +int gr_is_capable(const int cap);
67272 +int gr_is_capable_nolog(const int cap);
67273 +void gr_learn_resource(const struct task_struct *task, const int limit,
67274 + const unsigned long wanted, const int gt);
67275 +void gr_copy_label(struct task_struct *tsk);
67276 +void gr_handle_crash(struct task_struct *task, const int sig);
67277 +int gr_handle_signal(const struct task_struct *p, const int sig);
67278 +int gr_check_crash_uid(const uid_t uid);
67279 +int gr_check_protected_task(const struct task_struct *task);
67280 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
67281 +int gr_acl_handle_mmap(const struct file *file,
67282 + const unsigned long prot);
67283 +int gr_acl_handle_mprotect(const struct file *file,
67284 + const unsigned long prot);
67285 +int gr_check_hidden_task(const struct task_struct *tsk);
67286 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
67287 + const struct vfsmount *mnt);
67288 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
67289 + const struct vfsmount *mnt);
67290 +__u32 gr_acl_handle_access(const struct dentry *dentry,
67291 + const struct vfsmount *mnt, const int fmode);
67292 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
67293 + const struct vfsmount *mnt, mode_t mode);
67294 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
67295 + const struct vfsmount *mnt, mode_t mode);
67296 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
67297 + const struct vfsmount *mnt);
67298 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
67299 + const struct vfsmount *mnt);
67300 +int gr_handle_ptrace(struct task_struct *task, const long request);
67301 +int gr_handle_proc_ptrace(struct task_struct *task);
67302 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
67303 + const struct vfsmount *mnt);
67304 +int gr_check_crash_exec(const struct file *filp);
67305 +int gr_acl_is_enabled(void);
67306 +void gr_set_kernel_label(struct task_struct *task);
67307 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
67308 + const gid_t gid);
67309 +int gr_set_proc_label(const struct dentry *dentry,
67310 + const struct vfsmount *mnt,
67311 + const int unsafe_share);
67312 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
67313 + const struct vfsmount *mnt);
67314 +__u32 gr_acl_handle_open(const struct dentry *dentry,
67315 + const struct vfsmount *mnt, int acc_mode);
67316 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
67317 + const struct dentry *p_dentry,
67318 + const struct vfsmount *p_mnt,
67319 + int open_flags, int acc_mode, const int imode);
67320 +void gr_handle_create(const struct dentry *dentry,
67321 + const struct vfsmount *mnt);
67322 +void gr_handle_proc_create(const struct dentry *dentry,
67323 + const struct inode *inode);
67324 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
67325 + const struct dentry *parent_dentry,
67326 + const struct vfsmount *parent_mnt,
67327 + const int mode);
67328 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
67329 + const struct dentry *parent_dentry,
67330 + const struct vfsmount *parent_mnt);
67331 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
67332 + const struct vfsmount *mnt);
67333 +void gr_handle_delete(const ino_t ino, const dev_t dev);
67334 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
67335 + const struct vfsmount *mnt);
67336 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
67337 + const struct dentry *parent_dentry,
67338 + const struct vfsmount *parent_mnt,
67339 + const char *from);
67340 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
67341 + const struct dentry *parent_dentry,
67342 + const struct vfsmount *parent_mnt,
67343 + const struct dentry *old_dentry,
67344 + const struct vfsmount *old_mnt, const char *to);
67345 +int gr_acl_handle_rename(struct dentry *new_dentry,
67346 + struct dentry *parent_dentry,
67347 + const struct vfsmount *parent_mnt,
67348 + struct dentry *old_dentry,
67349 + struct inode *old_parent_inode,
67350 + struct vfsmount *old_mnt, const char *newname);
67351 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67352 + struct dentry *old_dentry,
67353 + struct dentry *new_dentry,
67354 + struct vfsmount *mnt, const __u8 replace);
67355 +__u32 gr_check_link(const struct dentry *new_dentry,
67356 + const struct dentry *parent_dentry,
67357 + const struct vfsmount *parent_mnt,
67358 + const struct dentry *old_dentry,
67359 + const struct vfsmount *old_mnt);
67360 +int gr_acl_handle_filldir(const struct file *file, const char *name,
67361 + const unsigned int namelen, const ino_t ino);
67362 +
67363 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
67364 + const struct vfsmount *mnt);
67365 +void gr_acl_handle_exit(void);
67366 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
67367 +int gr_acl_handle_procpidmem(const struct task_struct *task);
67368 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
67369 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
67370 +void gr_audit_ptrace(struct task_struct *task);
67371 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
67372 +
67373 +#ifdef CONFIG_GRKERNSEC
67374 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
67375 +void gr_handle_vm86(void);
67376 +void gr_handle_mem_readwrite(u64 from, u64 to);
67377 +
67378 +extern int grsec_enable_dmesg;
67379 +extern int grsec_disable_privio;
67380 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67381 +extern int grsec_enable_chroot_findtask;
67382 +#endif
67383 +#endif
67384 +
67385 +#endif
67386 diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
67387 index 6a87154..a3ce57b 100644
67388 --- a/include/linux/hdpu_features.h
67389 +++ b/include/linux/hdpu_features.h
67390 @@ -3,7 +3,7 @@
67391 struct cpustate_t {
67392 spinlock_t lock;
67393 int excl;
67394 - int open_count;
67395 + atomic_t open_count;
67396 unsigned char cached_val;
67397 int inited;
67398 unsigned long *set_addr;
67399 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
67400 index 211ff44..00ab6d7 100644
67401 --- a/include/linux/highmem.h
67402 +++ b/include/linux/highmem.h
67403 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
67404 kunmap_atomic(kaddr, KM_USER0);
67405 }
67406
67407 +static inline void sanitize_highpage(struct page *page)
67408 +{
67409 + void *kaddr;
67410 + unsigned long flags;
67411 +
67412 + local_irq_save(flags);
67413 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
67414 + clear_page(kaddr);
67415 + kunmap_atomic(kaddr, KM_CLEARPAGE);
67416 + local_irq_restore(flags);
67417 +}
67418 +
67419 static inline void zero_user_segments(struct page *page,
67420 unsigned start1, unsigned end1,
67421 unsigned start2, unsigned end2)
67422 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
67423 index 7b40cda..24eb44e 100644
67424 --- a/include/linux/i2c.h
67425 +++ b/include/linux/i2c.h
67426 @@ -325,6 +325,7 @@ struct i2c_algorithm {
67427 /* To determine what the adapter supports */
67428 u32 (*functionality) (struct i2c_adapter *);
67429 };
67430 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
67431
67432 /*
67433 * i2c_adapter is the structure used to identify a physical i2c bus along
67434 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
67435 index 4c4e57d..f3c5303 100644
67436 --- a/include/linux/i2o.h
67437 +++ b/include/linux/i2o.h
67438 @@ -564,7 +564,7 @@ struct i2o_controller {
67439 struct i2o_device *exec; /* Executive */
67440 #if BITS_PER_LONG == 64
67441 spinlock_t context_list_lock; /* lock for context_list */
67442 - atomic_t context_list_counter; /* needed for unique contexts */
67443 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
67444 struct list_head context_list; /* list of context id's
67445 and pointers */
67446 #endif
67447 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
67448 index 21a6f5d..dc42eab 100644
67449 --- a/include/linux/init_task.h
67450 +++ b/include/linux/init_task.h
67451 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
67452 #define INIT_IDS
67453 #endif
67454
67455 +#ifdef CONFIG_X86
67456 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
67457 +#else
67458 +#define INIT_TASK_THREAD_INFO
67459 +#endif
67460 +
67461 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
67462 /*
67463 * Because of the reduced scope of CAP_SETPCAP when filesystem
67464 @@ -156,6 +162,7 @@ extern struct cred init_cred;
67465 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
67466 .comm = "swapper", \
67467 .thread = INIT_THREAD, \
67468 + INIT_TASK_THREAD_INFO \
67469 .fs = &init_fs, \
67470 .files = &init_files, \
67471 .signal = &init_signals, \
67472 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
67473 index 4f0a72a..a849599 100644
67474 --- a/include/linux/intel-iommu.h
67475 +++ b/include/linux/intel-iommu.h
67476 @@ -296,7 +296,7 @@ struct iommu_flush {
67477 u8 fm, u64 type);
67478 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
67479 unsigned int size_order, u64 type);
67480 -};
67481 +} __no_const;
67482
67483 enum {
67484 SR_DMAR_FECTL_REG,
67485 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
67486 index c739150..be577b5 100644
67487 --- a/include/linux/interrupt.h
67488 +++ b/include/linux/interrupt.h
67489 @@ -369,7 +369,7 @@ enum
67490 /* map softirq index to softirq name. update 'softirq_to_name' in
67491 * kernel/softirq.c when adding a new softirq.
67492 */
67493 -extern char *softirq_to_name[NR_SOFTIRQS];
67494 +extern const char * const softirq_to_name[NR_SOFTIRQS];
67495
67496 /* softirq mask and active fields moved to irq_cpustat_t in
67497 * asm/hardirq.h to get better cache usage. KAO
67498 @@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
67499
67500 struct softirq_action
67501 {
67502 - void (*action)(struct softirq_action *);
67503 + void (*action)(void);
67504 };
67505
67506 asmlinkage void do_softirq(void);
67507 asmlinkage void __do_softirq(void);
67508 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
67509 +extern void open_softirq(int nr, void (*action)(void));
67510 extern void softirq_init(void);
67511 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
67512 extern void raise_softirq_irqoff(unsigned int nr);
67513 diff --git a/include/linux/irq.h b/include/linux/irq.h
67514 index 9e5f45a..025865b 100644
67515 --- a/include/linux/irq.h
67516 +++ b/include/linux/irq.h
67517 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
67518 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
67519 bool boot)
67520 {
67521 +#ifdef CONFIG_CPUMASK_OFFSTACK
67522 gfp_t gfp = GFP_ATOMIC;
67523
67524 if (boot)
67525 gfp = GFP_NOWAIT;
67526
67527 -#ifdef CONFIG_CPUMASK_OFFSTACK
67528 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
67529 return false;
67530
67531 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
67532 index 7922742..27306a2 100644
67533 --- a/include/linux/kallsyms.h
67534 +++ b/include/linux/kallsyms.h
67535 @@ -15,7 +15,8 @@
67536
67537 struct module;
67538
67539 -#ifdef CONFIG_KALLSYMS
67540 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
67541 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67542 /* Lookup the address for a symbol. Returns 0 if not found. */
67543 unsigned long kallsyms_lookup_name(const char *name);
67544
67545 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
67546 /* Stupid that this does nothing, but I didn't create this mess. */
67547 #define __print_symbol(fmt, addr)
67548 #endif /*CONFIG_KALLSYMS*/
67549 +#else /* when included by kallsyms.c, vsnprintf.c, or
67550 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
67551 +extern void __print_symbol(const char *fmt, unsigned long address);
67552 +extern int sprint_symbol(char *buffer, unsigned long address);
67553 +const char *kallsyms_lookup(unsigned long addr,
67554 + unsigned long *symbolsize,
67555 + unsigned long *offset,
67556 + char **modname, char *namebuf);
67557 +#endif
67558
67559 /* This macro allows us to keep printk typechecking */
67560 static void __check_printsym_format(const char *fmt, ...)
67561 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
67562 index 6adcc29..13369e8 100644
67563 --- a/include/linux/kgdb.h
67564 +++ b/include/linux/kgdb.h
67565 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
67566
67567 extern int kgdb_connected;
67568
67569 -extern atomic_t kgdb_setting_breakpoint;
67570 -extern atomic_t kgdb_cpu_doing_single_step;
67571 +extern atomic_unchecked_t kgdb_setting_breakpoint;
67572 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
67573
67574 extern struct task_struct *kgdb_usethread;
67575 extern struct task_struct *kgdb_contthread;
67576 @@ -235,7 +235,7 @@ struct kgdb_arch {
67577 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
67578 void (*remove_all_hw_break)(void);
67579 void (*correct_hw_break)(void);
67580 -};
67581 +} __do_const;
67582
67583 /**
67584 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
67585 @@ -257,14 +257,14 @@ struct kgdb_io {
67586 int (*init) (void);
67587 void (*pre_exception) (void);
67588 void (*post_exception) (void);
67589 -};
67590 +} __do_const;
67591
67592 -extern struct kgdb_arch arch_kgdb_ops;
67593 +extern const struct kgdb_arch arch_kgdb_ops;
67594
67595 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
67596
67597 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
67598 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
67599 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
67600 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
67601
67602 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
67603 extern int kgdb_mem2hex(char *mem, char *buf, int count);
67604 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
67605 index 384ca8b..83dd97d 100644
67606 --- a/include/linux/kmod.h
67607 +++ b/include/linux/kmod.h
67608 @@ -31,6 +31,8 @@
67609 * usually useless though. */
67610 extern int __request_module(bool wait, const char *name, ...) \
67611 __attribute__((format(printf, 2, 3)));
67612 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
67613 + __attribute__((format(printf, 3, 4)));
67614 #define request_module(mod...) __request_module(true, mod)
67615 #define request_module_nowait(mod...) __request_module(false, mod)
67616 #define try_then_request_module(x, mod...) \
67617 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
67618 index 58ae8e0..3950d3c 100644
67619 --- a/include/linux/kobject.h
67620 +++ b/include/linux/kobject.h
67621 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
67622
67623 struct kobj_type {
67624 void (*release)(struct kobject *kobj);
67625 - struct sysfs_ops *sysfs_ops;
67626 + const struct sysfs_ops *sysfs_ops;
67627 struct attribute **default_attrs;
67628 };
67629
67630 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
67631 };
67632
67633 struct kset_uevent_ops {
67634 - int (*filter)(struct kset *kset, struct kobject *kobj);
67635 - const char *(*name)(struct kset *kset, struct kobject *kobj);
67636 - int (*uevent)(struct kset *kset, struct kobject *kobj,
67637 + int (* const filter)(struct kset *kset, struct kobject *kobj);
67638 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
67639 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
67640 struct kobj_uevent_env *env);
67641 };
67642
67643 @@ -132,7 +132,7 @@ struct kobj_attribute {
67644 const char *buf, size_t count);
67645 };
67646
67647 -extern struct sysfs_ops kobj_sysfs_ops;
67648 +extern const struct sysfs_ops kobj_sysfs_ops;
67649
67650 /**
67651 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
67652 @@ -155,14 +155,14 @@ struct kset {
67653 struct list_head list;
67654 spinlock_t list_lock;
67655 struct kobject kobj;
67656 - struct kset_uevent_ops *uevent_ops;
67657 + const struct kset_uevent_ops *uevent_ops;
67658 };
67659
67660 extern void kset_init(struct kset *kset);
67661 extern int __must_check kset_register(struct kset *kset);
67662 extern void kset_unregister(struct kset *kset);
67663 extern struct kset * __must_check kset_create_and_add(const char *name,
67664 - struct kset_uevent_ops *u,
67665 + const struct kset_uevent_ops *u,
67666 struct kobject *parent_kobj);
67667
67668 static inline struct kset *to_kset(struct kobject *kobj)
67669 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
67670 index c728a50..752d821 100644
67671 --- a/include/linux/kvm_host.h
67672 +++ b/include/linux/kvm_host.h
67673 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
67674 void vcpu_load(struct kvm_vcpu *vcpu);
67675 void vcpu_put(struct kvm_vcpu *vcpu);
67676
67677 -int kvm_init(void *opaque, unsigned int vcpu_size,
67678 +int kvm_init(const void *opaque, unsigned int vcpu_size,
67679 struct module *module);
67680 void kvm_exit(void);
67681
67682 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
67683 struct kvm_guest_debug *dbg);
67684 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
67685
67686 -int kvm_arch_init(void *opaque);
67687 +int kvm_arch_init(const void *opaque);
67688 void kvm_arch_exit(void);
67689
67690 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
67691 diff --git a/include/linux/libata.h b/include/linux/libata.h
67692 index a069916..223edde 100644
67693 --- a/include/linux/libata.h
67694 +++ b/include/linux/libata.h
67695 @@ -525,11 +525,11 @@ struct ata_ioports {
67696
67697 struct ata_host {
67698 spinlock_t lock;
67699 - struct device *dev;
67700 + struct device *dev;
67701 void __iomem * const *iomap;
67702 unsigned int n_ports;
67703 void *private_data;
67704 - struct ata_port_operations *ops;
67705 + const struct ata_port_operations *ops;
67706 unsigned long flags;
67707 #ifdef CONFIG_ATA_ACPI
67708 acpi_handle acpi_handle;
67709 @@ -710,7 +710,7 @@ struct ata_link {
67710
67711 struct ata_port {
67712 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
67713 - struct ata_port_operations *ops;
67714 + const struct ata_port_operations *ops;
67715 spinlock_t *lock;
67716 /* Flags owned by the EH context. Only EH should touch these once the
67717 port is active */
67718 @@ -884,7 +884,7 @@ struct ata_port_operations {
67719 * fields must be pointers.
67720 */
67721 const struct ata_port_operations *inherits;
67722 -};
67723 +} __do_const;
67724
67725 struct ata_port_info {
67726 unsigned long flags;
67727 @@ -892,7 +892,7 @@ struct ata_port_info {
67728 unsigned long pio_mask;
67729 unsigned long mwdma_mask;
67730 unsigned long udma_mask;
67731 - struct ata_port_operations *port_ops;
67732 + const struct ata_port_operations *port_ops;
67733 void *private_data;
67734 };
67735
67736 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
67737 extern const unsigned long sata_deb_timing_hotplug[];
67738 extern const unsigned long sata_deb_timing_long[];
67739
67740 -extern struct ata_port_operations ata_dummy_port_ops;
67741 +extern const struct ata_port_operations ata_dummy_port_ops;
67742 extern const struct ata_port_info ata_dummy_port_info;
67743
67744 static inline const unsigned long *
67745 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
67746 struct scsi_host_template *sht);
67747 extern void ata_host_detach(struct ata_host *host);
67748 extern void ata_host_init(struct ata_host *, struct device *,
67749 - unsigned long, struct ata_port_operations *);
67750 + unsigned long, const struct ata_port_operations *);
67751 extern int ata_scsi_detect(struct scsi_host_template *sht);
67752 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
67753 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
67754 diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
67755 index fbc48f8..0886e57 100644
67756 --- a/include/linux/lockd/bind.h
67757 +++ b/include/linux/lockd/bind.h
67758 @@ -23,13 +23,13 @@ struct svc_rqst;
67759 * This is the set of functions for lockd->nfsd communication
67760 */
67761 struct nlmsvc_binding {
67762 - __be32 (*fopen)(struct svc_rqst *,
67763 + __be32 (* const fopen)(struct svc_rqst *,
67764 struct nfs_fh *,
67765 struct file **);
67766 - void (*fclose)(struct file *);
67767 + void (* const fclose)(struct file *);
67768 };
67769
67770 -extern struct nlmsvc_binding * nlmsvc_ops;
67771 +extern const struct nlmsvc_binding * nlmsvc_ops;
67772
67773 /*
67774 * Similar to nfs_client_initdata, but without the NFS-specific
67775 diff --git a/include/linux/mca.h b/include/linux/mca.h
67776 index 3797270..7765ede 100644
67777 --- a/include/linux/mca.h
67778 +++ b/include/linux/mca.h
67779 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
67780 int region);
67781 void * (*mca_transform_memory)(struct mca_device *,
67782 void *memory);
67783 -};
67784 +} __no_const;
67785
67786 struct mca_bus {
67787 u64 default_dma_mask;
67788 diff --git a/include/linux/memory.h b/include/linux/memory.h
67789 index 37fa19b..b597c85 100644
67790 --- a/include/linux/memory.h
67791 +++ b/include/linux/memory.h
67792 @@ -108,7 +108,7 @@ struct memory_accessor {
67793 size_t count);
67794 ssize_t (*write)(struct memory_accessor *, const char *buf,
67795 off_t offset, size_t count);
67796 -};
67797 +} __no_const;
67798
67799 /*
67800 * Kernel text modification mutex, used for code patching. Users of this lock
67801 diff --git a/include/linux/mm.h b/include/linux/mm.h
67802 index 11e5be6..1ff2423 100644
67803 --- a/include/linux/mm.h
67804 +++ b/include/linux/mm.h
67805 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
67806
67807 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
67808 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
67809 +
67810 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67811 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
67812 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
67813 +#else
67814 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
67815 +#endif
67816 +
67817 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
67818 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
67819
67820 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
67821 int set_page_dirty_lock(struct page *page);
67822 int clear_page_dirty_for_io(struct page *page);
67823
67824 -/* Is the vma a continuation of the stack vma above it? */
67825 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
67826 -{
67827 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
67828 -}
67829 -
67830 extern unsigned long move_page_tables(struct vm_area_struct *vma,
67831 unsigned long old_addr, struct vm_area_struct *new_vma,
67832 unsigned long new_addr, unsigned long len);
67833 @@ -890,6 +891,8 @@ struct shrinker {
67834 extern void register_shrinker(struct shrinker *);
67835 extern void unregister_shrinker(struct shrinker *);
67836
67837 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
67838 +
67839 int vma_wants_writenotify(struct vm_area_struct *vma);
67840
67841 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
67842 @@ -1162,6 +1165,7 @@ out:
67843 }
67844
67845 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
67846 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
67847
67848 extern unsigned long do_brk(unsigned long, unsigned long);
67849
67850 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
67851 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
67852 struct vm_area_struct **pprev);
67853
67854 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
67855 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
67856 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
67857 +
67858 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
67859 NULL if none. Assume start_addr < end_addr. */
67860 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
67861 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
67862 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
67863 }
67864
67865 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
67866 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
67867 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
67868 unsigned long pfn, unsigned long size, pgprot_t);
67869 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
67870 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
67871 extern int sysctl_memory_failure_early_kill;
67872 extern int sysctl_memory_failure_recovery;
67873 -extern atomic_long_t mce_bad_pages;
67874 +extern atomic_long_unchecked_t mce_bad_pages;
67875 +
67876 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67877 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
67878 +#else
67879 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
67880 +#endif
67881
67882 #endif /* __KERNEL__ */
67883 #endif /* _LINUX_MM_H */
67884 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
67885 index 9d12ed5..6d9707a 100644
67886 --- a/include/linux/mm_types.h
67887 +++ b/include/linux/mm_types.h
67888 @@ -186,6 +186,8 @@ struct vm_area_struct {
67889 #ifdef CONFIG_NUMA
67890 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
67891 #endif
67892 +
67893 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
67894 };
67895
67896 struct core_thread {
67897 @@ -287,6 +289,24 @@ struct mm_struct {
67898 #ifdef CONFIG_MMU_NOTIFIER
67899 struct mmu_notifier_mm *mmu_notifier_mm;
67900 #endif
67901 +
67902 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
67903 + unsigned long pax_flags;
67904 +#endif
67905 +
67906 +#ifdef CONFIG_PAX_DLRESOLVE
67907 + unsigned long call_dl_resolve;
67908 +#endif
67909 +
67910 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
67911 + unsigned long call_syscall;
67912 +#endif
67913 +
67914 +#ifdef CONFIG_PAX_ASLR
67915 + unsigned long delta_mmap; /* randomized offset */
67916 + unsigned long delta_stack; /* randomized offset */
67917 +#endif
67918 +
67919 };
67920
67921 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
67922 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
67923 index 4e02ee2..afb159e 100644
67924 --- a/include/linux/mmu_notifier.h
67925 +++ b/include/linux/mmu_notifier.h
67926 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
67927 */
67928 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
67929 ({ \
67930 - pte_t __pte; \
67931 + pte_t ___pte; \
67932 struct vm_area_struct *___vma = __vma; \
67933 unsigned long ___address = __address; \
67934 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
67935 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
67936 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
67937 - __pte; \
67938 + ___pte; \
67939 })
67940
67941 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
67942 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
67943 index 6c31a2a..4b0e930 100644
67944 --- a/include/linux/mmzone.h
67945 +++ b/include/linux/mmzone.h
67946 @@ -350,7 +350,7 @@ struct zone {
67947 unsigned long flags; /* zone flags, see below */
67948
67949 /* Zone statistics */
67950 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67951 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67952
67953 /*
67954 * prev_priority holds the scanning priority for this zone. It is
67955 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
67956 index f58e9d8..3503935 100644
67957 --- a/include/linux/mod_devicetable.h
67958 +++ b/include/linux/mod_devicetable.h
67959 @@ -12,7 +12,7 @@
67960 typedef unsigned long kernel_ulong_t;
67961 #endif
67962
67963 -#define PCI_ANY_ID (~0)
67964 +#define PCI_ANY_ID ((__u16)~0)
67965
67966 struct pci_device_id {
67967 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
67968 @@ -131,7 +131,7 @@ struct usb_device_id {
67969 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
67970 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
67971
67972 -#define HID_ANY_ID (~0)
67973 +#define HID_ANY_ID (~0U)
67974
67975 struct hid_device_id {
67976 __u16 bus;
67977 diff --git a/include/linux/module.h b/include/linux/module.h
67978 index 482efc8..642032b 100644
67979 --- a/include/linux/module.h
67980 +++ b/include/linux/module.h
67981 @@ -16,6 +16,7 @@
67982 #include <linux/kobject.h>
67983 #include <linux/moduleparam.h>
67984 #include <linux/tracepoint.h>
67985 +#include <linux/fs.h>
67986
67987 #include <asm/local.h>
67988 #include <asm/module.h>
67989 @@ -287,16 +288,16 @@ struct module
67990 int (*init)(void);
67991
67992 /* If this is non-NULL, vfree after init() returns */
67993 - void *module_init;
67994 + void *module_init_rx, *module_init_rw;
67995
67996 /* Here is the actual code + data, vfree'd on unload. */
67997 - void *module_core;
67998 + void *module_core_rx, *module_core_rw;
67999
68000 /* Here are the sizes of the init and core sections */
68001 - unsigned int init_size, core_size;
68002 + unsigned int init_size_rw, core_size_rw;
68003
68004 /* The size of the executable code in each section. */
68005 - unsigned int init_text_size, core_text_size;
68006 + unsigned int init_size_rx, core_size_rx;
68007
68008 /* Arch-specific module values */
68009 struct mod_arch_specific arch;
68010 @@ -345,6 +346,10 @@ struct module
68011 #ifdef CONFIG_EVENT_TRACING
68012 struct ftrace_event_call *trace_events;
68013 unsigned int num_trace_events;
68014 + struct file_operations trace_id;
68015 + struct file_operations trace_enable;
68016 + struct file_operations trace_format;
68017 + struct file_operations trace_filter;
68018 #endif
68019 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68020 unsigned long *ftrace_callsites;
68021 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68022 bool is_module_address(unsigned long addr);
68023 bool is_module_text_address(unsigned long addr);
68024
68025 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68026 +{
68027 +
68028 +#ifdef CONFIG_PAX_KERNEXEC
68029 + if (ktla_ktva(addr) >= (unsigned long)start &&
68030 + ktla_ktva(addr) < (unsigned long)start + size)
68031 + return 1;
68032 +#endif
68033 +
68034 + return ((void *)addr >= start && (void *)addr < start + size);
68035 +}
68036 +
68037 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68038 +{
68039 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68040 +}
68041 +
68042 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68043 +{
68044 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68045 +}
68046 +
68047 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68048 +{
68049 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68050 +}
68051 +
68052 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68053 +{
68054 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68055 +}
68056 +
68057 static inline int within_module_core(unsigned long addr, struct module *mod)
68058 {
68059 - return (unsigned long)mod->module_core <= addr &&
68060 - addr < (unsigned long)mod->module_core + mod->core_size;
68061 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68062 }
68063
68064 static inline int within_module_init(unsigned long addr, struct module *mod)
68065 {
68066 - return (unsigned long)mod->module_init <= addr &&
68067 - addr < (unsigned long)mod->module_init + mod->init_size;
68068 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68069 }
68070
68071 /* Search for module by name: must hold module_mutex. */
68072 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68073 index c1f40c2..682ca53 100644
68074 --- a/include/linux/moduleloader.h
68075 +++ b/include/linux/moduleloader.h
68076 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68077 sections. Returns NULL on failure. */
68078 void *module_alloc(unsigned long size);
68079
68080 +#ifdef CONFIG_PAX_KERNEXEC
68081 +void *module_alloc_exec(unsigned long size);
68082 +#else
68083 +#define module_alloc_exec(x) module_alloc(x)
68084 +#endif
68085 +
68086 /* Free memory returned from module_alloc. */
68087 void module_free(struct module *mod, void *module_region);
68088
68089 +#ifdef CONFIG_PAX_KERNEXEC
68090 +void module_free_exec(struct module *mod, void *module_region);
68091 +#else
68092 +#define module_free_exec(x, y) module_free((x), (y))
68093 +#endif
68094 +
68095 /* Apply the given relocation to the (simplified) ELF. Return -error
68096 or 0. */
68097 int apply_relocate(Elf_Shdr *sechdrs,
68098 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
68099 index 82a9124..8a5f622 100644
68100 --- a/include/linux/moduleparam.h
68101 +++ b/include/linux/moduleparam.h
68102 @@ -132,7 +132,7 @@ struct kparam_array
68103
68104 /* Actually copy string: maxlen param is usually sizeof(string). */
68105 #define module_param_string(name, string, len, perm) \
68106 - static const struct kparam_string __param_string_##name \
68107 + static const struct kparam_string __param_string_##name __used \
68108 = { len, string }; \
68109 __module_param_call(MODULE_PARAM_PREFIX, name, \
68110 param_set_copystring, param_get_string, \
68111 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
68112
68113 /* Comma-separated array: *nump is set to number they actually specified. */
68114 #define module_param_array_named(name, array, type, nump, perm) \
68115 - static const struct kparam_array __param_arr_##name \
68116 + static const struct kparam_array __param_arr_##name __used \
68117 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
68118 sizeof(array[0]), array }; \
68119 __module_param_call(MODULE_PARAM_PREFIX, name, \
68120 diff --git a/include/linux/mutex.h b/include/linux/mutex.h
68121 index 878cab4..c92cb3e 100644
68122 --- a/include/linux/mutex.h
68123 +++ b/include/linux/mutex.h
68124 @@ -51,7 +51,7 @@ struct mutex {
68125 spinlock_t wait_lock;
68126 struct list_head wait_list;
68127 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
68128 - struct thread_info *owner;
68129 + struct task_struct *owner;
68130 #endif
68131 #ifdef CONFIG_DEBUG_MUTEXES
68132 const char *name;
68133 diff --git a/include/linux/namei.h b/include/linux/namei.h
68134 index ec0f607..d19e675 100644
68135 --- a/include/linux/namei.h
68136 +++ b/include/linux/namei.h
68137 @@ -22,7 +22,7 @@ struct nameidata {
68138 unsigned int flags;
68139 int last_type;
68140 unsigned depth;
68141 - char *saved_names[MAX_NESTED_LINKS + 1];
68142 + const char *saved_names[MAX_NESTED_LINKS + 1];
68143
68144 /* Intent data */
68145 union {
68146 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
68147 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
68148 extern void unlock_rename(struct dentry *, struct dentry *);
68149
68150 -static inline void nd_set_link(struct nameidata *nd, char *path)
68151 +static inline void nd_set_link(struct nameidata *nd, const char *path)
68152 {
68153 nd->saved_names[nd->depth] = path;
68154 }
68155
68156 -static inline char *nd_get_link(struct nameidata *nd)
68157 +static inline const char *nd_get_link(const struct nameidata *nd)
68158 {
68159 return nd->saved_names[nd->depth];
68160 }
68161 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
68162 index 9d7e8f7..04428c5 100644
68163 --- a/include/linux/netdevice.h
68164 +++ b/include/linux/netdevice.h
68165 @@ -637,6 +637,7 @@ struct net_device_ops {
68166 u16 xid);
68167 #endif
68168 };
68169 +typedef struct net_device_ops __no_const net_device_ops_no_const;
68170
68171 /*
68172 * The DEVICE structure.
68173 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
68174 new file mode 100644
68175 index 0000000..33f4af8
68176 --- /dev/null
68177 +++ b/include/linux/netfilter/xt_gradm.h
68178 @@ -0,0 +1,9 @@
68179 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
68180 +#define _LINUX_NETFILTER_XT_GRADM_H 1
68181 +
68182 +struct xt_gradm_mtinfo {
68183 + __u16 flags;
68184 + __u16 invflags;
68185 +};
68186 +
68187 +#endif
68188 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
68189 index b359c4a..c08b334 100644
68190 --- a/include/linux/nodemask.h
68191 +++ b/include/linux/nodemask.h
68192 @@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
68193
68194 #define any_online_node(mask) \
68195 ({ \
68196 - int node; \
68197 - for_each_node_mask(node, (mask)) \
68198 - if (node_online(node)) \
68199 + int __node; \
68200 + for_each_node_mask(__node, (mask)) \
68201 + if (node_online(__node)) \
68202 break; \
68203 - node; \
68204 + __node; \
68205 })
68206
68207 #define num_online_nodes() num_node_state(N_ONLINE)
68208 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
68209 index 5171639..7cf4235 100644
68210 --- a/include/linux/oprofile.h
68211 +++ b/include/linux/oprofile.h
68212 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
68213 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
68214 char const * name, ulong * val);
68215
68216 -/** Create a file for read-only access to an atomic_t. */
68217 +/** Create a file for read-only access to an atomic_unchecked_t. */
68218 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
68219 - char const * name, atomic_t * val);
68220 + char const * name, atomic_unchecked_t * val);
68221
68222 /** create a directory */
68223 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
68224 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
68225 index 3c62ed4..8924c7c 100644
68226 --- a/include/linux/pagemap.h
68227 +++ b/include/linux/pagemap.h
68228 @@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
68229 if (((unsigned long)uaddr & PAGE_MASK) !=
68230 ((unsigned long)end & PAGE_MASK))
68231 ret = __get_user(c, end);
68232 + (void)c;
68233 }
68234 + (void)c;
68235 return ret;
68236 }
68237
68238 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
68239 index 81c9689..a567a55 100644
68240 --- a/include/linux/perf_event.h
68241 +++ b/include/linux/perf_event.h
68242 @@ -476,7 +476,7 @@ struct hw_perf_event {
68243 struct hrtimer hrtimer;
68244 };
68245 };
68246 - atomic64_t prev_count;
68247 + atomic64_unchecked_t prev_count;
68248 u64 sample_period;
68249 u64 last_period;
68250 atomic64_t period_left;
68251 @@ -557,7 +557,7 @@ struct perf_event {
68252 const struct pmu *pmu;
68253
68254 enum perf_event_active_state state;
68255 - atomic64_t count;
68256 + atomic64_unchecked_t count;
68257
68258 /*
68259 * These are the total time in nanoseconds that the event
68260 @@ -595,8 +595,8 @@ struct perf_event {
68261 * These accumulate total time (in nanoseconds) that children
68262 * events have been enabled and running, respectively.
68263 */
68264 - atomic64_t child_total_time_enabled;
68265 - atomic64_t child_total_time_running;
68266 + atomic64_unchecked_t child_total_time_enabled;
68267 + atomic64_unchecked_t child_total_time_running;
68268
68269 /*
68270 * Protect attach/detach and child_list:
68271 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
68272 index b43a9e0..b77d869 100644
68273 --- a/include/linux/pipe_fs_i.h
68274 +++ b/include/linux/pipe_fs_i.h
68275 @@ -46,9 +46,9 @@ struct pipe_inode_info {
68276 wait_queue_head_t wait;
68277 unsigned int nrbufs, curbuf;
68278 struct page *tmp_page;
68279 - unsigned int readers;
68280 - unsigned int writers;
68281 - unsigned int waiting_writers;
68282 + atomic_t readers;
68283 + atomic_t writers;
68284 + atomic_t waiting_writers;
68285 unsigned int r_counter;
68286 unsigned int w_counter;
68287 struct fasync_struct *fasync_readers;
68288 diff --git a/include/linux/poison.h b/include/linux/poison.h
68289 index 34066ff..e95d744 100644
68290 --- a/include/linux/poison.h
68291 +++ b/include/linux/poison.h
68292 @@ -19,8 +19,8 @@
68293 * under normal circumstances, used to verify that nobody uses
68294 * non-initialized list entries.
68295 */
68296 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
68297 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
68298 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
68299 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
68300
68301 /********** include/linux/timer.h **********/
68302 /*
68303 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
68304 index 4f71bf4..77ffa64 100644
68305 --- a/include/linux/posix-timers.h
68306 +++ b/include/linux/posix-timers.h
68307 @@ -67,7 +67,7 @@ struct k_itimer {
68308 };
68309
68310 struct k_clock {
68311 - int res; /* in nanoseconds */
68312 + const int res; /* in nanoseconds */
68313 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
68314 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
68315 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
68316 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
68317 index 72b1a10..13303a9 100644
68318 --- a/include/linux/preempt.h
68319 +++ b/include/linux/preempt.h
68320 @@ -110,7 +110,7 @@ struct preempt_ops {
68321 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
68322 void (*sched_out)(struct preempt_notifier *notifier,
68323 struct task_struct *next);
68324 -};
68325 +} __no_const;
68326
68327 /**
68328 * preempt_notifier - key for installing preemption notifiers
68329 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
68330 index 379eaed..1bf73e3 100644
68331 --- a/include/linux/proc_fs.h
68332 +++ b/include/linux/proc_fs.h
68333 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
68334 return proc_create_data(name, mode, parent, proc_fops, NULL);
68335 }
68336
68337 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
68338 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
68339 +{
68340 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68341 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
68342 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68343 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
68344 +#else
68345 + return proc_create_data(name, mode, parent, proc_fops, NULL);
68346 +#endif
68347 +}
68348 +
68349 +
68350 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
68351 mode_t mode, struct proc_dir_entry *base,
68352 read_proc_t *read_proc, void * data)
68353 @@ -256,7 +269,7 @@ union proc_op {
68354 int (*proc_show)(struct seq_file *m,
68355 struct pid_namespace *ns, struct pid *pid,
68356 struct task_struct *task);
68357 -};
68358 +} __no_const;
68359
68360 struct ctl_table_header;
68361 struct ctl_table;
68362 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
68363 index 7456d7d..6c1cfc9 100644
68364 --- a/include/linux/ptrace.h
68365 +++ b/include/linux/ptrace.h
68366 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
68367 extern void exit_ptrace(struct task_struct *tracer);
68368 #define PTRACE_MODE_READ 1
68369 #define PTRACE_MODE_ATTACH 2
68370 -/* Returns 0 on success, -errno on denial. */
68371 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
68372 /* Returns true on success, false on denial. */
68373 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
68374 +/* Returns true on success, false on denial. */
68375 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
68376
68377 static inline int ptrace_reparented(struct task_struct *child)
68378 {
68379 diff --git a/include/linux/random.h b/include/linux/random.h
68380 index 2948046..3262567 100644
68381 --- a/include/linux/random.h
68382 +++ b/include/linux/random.h
68383 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
68384 u32 random32(void);
68385 void srandom32(u32 seed);
68386
68387 +static inline unsigned long pax_get_random_long(void)
68388 +{
68389 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
68390 +}
68391 +
68392 #endif /* __KERNEL___ */
68393
68394 #endif /* _LINUX_RANDOM_H */
68395 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
68396 index 988e55f..17cb4ef 100644
68397 --- a/include/linux/reboot.h
68398 +++ b/include/linux/reboot.h
68399 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
68400 * Architecture-specific implementations of sys_reboot commands.
68401 */
68402
68403 -extern void machine_restart(char *cmd);
68404 -extern void machine_halt(void);
68405 -extern void machine_power_off(void);
68406 +extern void machine_restart(char *cmd) __noreturn;
68407 +extern void machine_halt(void) __noreturn;
68408 +extern void machine_power_off(void) __noreturn;
68409
68410 extern void machine_shutdown(void);
68411 struct pt_regs;
68412 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
68413 */
68414
68415 extern void kernel_restart_prepare(char *cmd);
68416 -extern void kernel_restart(char *cmd);
68417 -extern void kernel_halt(void);
68418 -extern void kernel_power_off(void);
68419 +extern void kernel_restart(char *cmd) __noreturn;
68420 +extern void kernel_halt(void) __noreturn;
68421 +extern void kernel_power_off(void) __noreturn;
68422
68423 void ctrl_alt_del(void);
68424
68425 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
68426 * Emergency restart, callable from an interrupt handler.
68427 */
68428
68429 -extern void emergency_restart(void);
68430 +extern void emergency_restart(void) __noreturn;
68431 #include <asm/emergency-restart.h>
68432
68433 #endif
68434 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
68435 index dd31e7b..5b03c5c 100644
68436 --- a/include/linux/reiserfs_fs.h
68437 +++ b/include/linux/reiserfs_fs.h
68438 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68439 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
68440
68441 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68442 -#define get_generation(s) atomic_read (&fs_generation(s))
68443 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68444 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68445 #define __fs_changed(gen,s) (gen != get_generation (s))
68446 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
68447 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
68448 */
68449
68450 struct item_operations {
68451 - int (*bytes_number) (struct item_head * ih, int block_size);
68452 - void (*decrement_key) (struct cpu_key *);
68453 - int (*is_left_mergeable) (struct reiserfs_key * ih,
68454 + int (* const bytes_number) (struct item_head * ih, int block_size);
68455 + void (* const decrement_key) (struct cpu_key *);
68456 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
68457 unsigned long bsize);
68458 - void (*print_item) (struct item_head *, char *item);
68459 - void (*check_item) (struct item_head *, char *item);
68460 + void (* const print_item) (struct item_head *, char *item);
68461 + void (* const check_item) (struct item_head *, char *item);
68462
68463 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68464 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68465 int is_affected, int insert_size);
68466 - int (*check_left) (struct virtual_item * vi, int free,
68467 + int (* const check_left) (struct virtual_item * vi, int free,
68468 int start_skip, int end_skip);
68469 - int (*check_right) (struct virtual_item * vi, int free);
68470 - int (*part_size) (struct virtual_item * vi, int from, int to);
68471 - int (*unit_num) (struct virtual_item * vi);
68472 - void (*print_vi) (struct virtual_item * vi);
68473 + int (* const check_right) (struct virtual_item * vi, int free);
68474 + int (* const part_size) (struct virtual_item * vi, int from, int to);
68475 + int (* const unit_num) (struct virtual_item * vi);
68476 + void (* const print_vi) (struct virtual_item * vi);
68477 };
68478
68479 -extern struct item_operations *item_ops[TYPE_ANY + 1];
68480 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
68481
68482 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
68483 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
68484 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
68485 index dab68bb..0688727 100644
68486 --- a/include/linux/reiserfs_fs_sb.h
68487 +++ b/include/linux/reiserfs_fs_sb.h
68488 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
68489 /* Comment? -Hans */
68490 wait_queue_head_t s_wait;
68491 /* To be obsoleted soon by per buffer seals.. -Hans */
68492 - atomic_t s_generation_counter; // increased by one every time the
68493 + atomic_unchecked_t s_generation_counter; // increased by one every time the
68494 // tree gets re-balanced
68495 unsigned long s_properties; /* File system properties. Currently holds
68496 on-disk FS format */
68497 diff --git a/include/linux/relay.h b/include/linux/relay.h
68498 index 14a86bc..17d0700 100644
68499 --- a/include/linux/relay.h
68500 +++ b/include/linux/relay.h
68501 @@ -159,7 +159,7 @@ struct rchan_callbacks
68502 * The callback should return 0 if successful, negative if not.
68503 */
68504 int (*remove_buf_file)(struct dentry *dentry);
68505 -};
68506 +} __no_const;
68507
68508 /*
68509 * CONFIG_RELAY kernel API, kernel/relay.c
68510 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
68511 index 3392c59..a746428 100644
68512 --- a/include/linux/rfkill.h
68513 +++ b/include/linux/rfkill.h
68514 @@ -144,6 +144,7 @@ struct rfkill_ops {
68515 void (*query)(struct rfkill *rfkill, void *data);
68516 int (*set_block)(void *data, bool blocked);
68517 };
68518 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
68519
68520 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
68521 /**
68522 diff --git a/include/linux/sched.h b/include/linux/sched.h
68523 index 71849bf..40217dc 100644
68524 --- a/include/linux/sched.h
68525 +++ b/include/linux/sched.h
68526 @@ -101,6 +101,7 @@ struct bio;
68527 struct fs_struct;
68528 struct bts_context;
68529 struct perf_event_context;
68530 +struct linux_binprm;
68531
68532 /*
68533 * List of flags we want to share for kernel threads,
68534 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
68535 extern signed long schedule_timeout_uninterruptible(signed long timeout);
68536 asmlinkage void __schedule(void);
68537 asmlinkage void schedule(void);
68538 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
68539 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
68540
68541 struct nsproxy;
68542 struct user_namespace;
68543 @@ -371,9 +372,12 @@ struct user_namespace;
68544 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
68545
68546 extern int sysctl_max_map_count;
68547 +extern unsigned long sysctl_heap_stack_gap;
68548
68549 #include <linux/aio.h>
68550
68551 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
68552 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
68553 extern unsigned long
68554 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
68555 unsigned long, unsigned long);
68556 @@ -666,6 +670,16 @@ struct signal_struct {
68557 struct tty_audit_buf *tty_audit_buf;
68558 #endif
68559
68560 +#ifdef CONFIG_GRKERNSEC
68561 + u32 curr_ip;
68562 + u32 saved_ip;
68563 + u32 gr_saddr;
68564 + u32 gr_daddr;
68565 + u16 gr_sport;
68566 + u16 gr_dport;
68567 + u8 used_accept:1;
68568 +#endif
68569 +
68570 int oom_adj; /* OOM kill score adjustment (bit shift) */
68571 };
68572
68573 @@ -723,6 +737,11 @@ struct user_struct {
68574 struct key *session_keyring; /* UID's default session keyring */
68575 #endif
68576
68577 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
68578 + unsigned int banned;
68579 + unsigned long ban_expires;
68580 +#endif
68581 +
68582 /* Hash table maintenance information */
68583 struct hlist_node uidhash_node;
68584 uid_t uid;
68585 @@ -1328,8 +1347,8 @@ struct task_struct {
68586 struct list_head thread_group;
68587
68588 struct completion *vfork_done; /* for vfork() */
68589 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
68590 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68591 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
68592 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68593
68594 cputime_t utime, stime, utimescaled, stimescaled;
68595 cputime_t gtime;
68596 @@ -1343,16 +1362,6 @@ struct task_struct {
68597 struct task_cputime cputime_expires;
68598 struct list_head cpu_timers[3];
68599
68600 -/* process credentials */
68601 - const struct cred *real_cred; /* objective and real subjective task
68602 - * credentials (COW) */
68603 - const struct cred *cred; /* effective (overridable) subjective task
68604 - * credentials (COW) */
68605 - struct mutex cred_guard_mutex; /* guard against foreign influences on
68606 - * credential calculations
68607 - * (notably. ptrace) */
68608 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68609 -
68610 char comm[TASK_COMM_LEN]; /* executable name excluding path
68611 - access with [gs]et_task_comm (which lock
68612 it with task_lock())
68613 @@ -1369,6 +1378,10 @@ struct task_struct {
68614 #endif
68615 /* CPU-specific state of this task */
68616 struct thread_struct thread;
68617 +/* thread_info moved to task_struct */
68618 +#ifdef CONFIG_X86
68619 + struct thread_info tinfo;
68620 +#endif
68621 /* filesystem information */
68622 struct fs_struct *fs;
68623 /* open file information */
68624 @@ -1436,6 +1449,15 @@ struct task_struct {
68625 int hardirq_context;
68626 int softirq_context;
68627 #endif
68628 +
68629 +/* process credentials */
68630 + const struct cred *real_cred; /* objective and real subjective task
68631 + * credentials (COW) */
68632 + struct mutex cred_guard_mutex; /* guard against foreign influences on
68633 + * credential calculations
68634 + * (notably. ptrace) */
68635 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68636 +
68637 #ifdef CONFIG_LOCKDEP
68638 # define MAX_LOCK_DEPTH 48UL
68639 u64 curr_chain_key;
68640 @@ -1456,6 +1478,9 @@ struct task_struct {
68641
68642 struct backing_dev_info *backing_dev_info;
68643
68644 + const struct cred *cred; /* effective (overridable) subjective task
68645 + * credentials (COW) */
68646 +
68647 struct io_context *io_context;
68648
68649 unsigned long ptrace_message;
68650 @@ -1519,6 +1544,21 @@ struct task_struct {
68651 unsigned long default_timer_slack_ns;
68652
68653 struct list_head *scm_work_list;
68654 +
68655 +#ifdef CONFIG_GRKERNSEC
68656 + /* grsecurity */
68657 + struct dentry *gr_chroot_dentry;
68658 + struct acl_subject_label *acl;
68659 + struct acl_role_label *role;
68660 + struct file *exec_file;
68661 + u16 acl_role_id;
68662 + /* is this the task that authenticated to the special role */
68663 + u8 acl_sp_role;
68664 + u8 is_writable;
68665 + u8 brute;
68666 + u8 gr_is_chrooted;
68667 +#endif
68668 +
68669 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
68670 /* Index of current stored adress in ret_stack */
68671 int curr_ret_stack;
68672 @@ -1542,6 +1582,57 @@ struct task_struct {
68673 #endif /* CONFIG_TRACING */
68674 };
68675
68676 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
68677 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
68678 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
68679 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
68680 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
68681 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
68682 +
68683 +#ifdef CONFIG_PAX_SOFTMODE
68684 +extern int pax_softmode;
68685 +#endif
68686 +
68687 +extern int pax_check_flags(unsigned long *);
68688 +
68689 +/* if tsk != current then task_lock must be held on it */
68690 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68691 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
68692 +{
68693 + if (likely(tsk->mm))
68694 + return tsk->mm->pax_flags;
68695 + else
68696 + return 0UL;
68697 +}
68698 +
68699 +/* if tsk != current then task_lock must be held on it */
68700 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
68701 +{
68702 + if (likely(tsk->mm)) {
68703 + tsk->mm->pax_flags = flags;
68704 + return 0;
68705 + }
68706 + return -EINVAL;
68707 +}
68708 +#endif
68709 +
68710 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68711 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
68712 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
68713 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
68714 +#endif
68715 +
68716 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
68717 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
68718 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
68719 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
68720 +
68721 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
68722 +extern void pax_track_stack(void);
68723 +#else
68724 +static inline void pax_track_stack(void) {}
68725 +#endif
68726 +
68727 /* Future-safe accessor for struct task_struct's cpus_allowed. */
68728 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
68729
68730 @@ -1740,7 +1831,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
68731 #define PF_DUMPCORE 0x00000200 /* dumped core */
68732 #define PF_SIGNALED 0x00000400 /* killed by a signal */
68733 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
68734 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
68735 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
68736 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
68737 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
68738 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
68739 @@ -1978,7 +2069,9 @@ void yield(void);
68740 extern struct exec_domain default_exec_domain;
68741
68742 union thread_union {
68743 +#ifndef CONFIG_X86
68744 struct thread_info thread_info;
68745 +#endif
68746 unsigned long stack[THREAD_SIZE/sizeof(long)];
68747 };
68748
68749 @@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
68750 */
68751
68752 extern struct task_struct *find_task_by_vpid(pid_t nr);
68753 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
68754 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
68755 struct pid_namespace *ns);
68756
68757 @@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
68758 extern void exit_itimers(struct signal_struct *);
68759 extern void flush_itimer_signals(void);
68760
68761 -extern NORET_TYPE void do_group_exit(int);
68762 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
68763
68764 extern void daemonize(const char *, ...);
68765 extern int allow_signal(int);
68766 @@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
68767
68768 #endif
68769
68770 -static inline int object_is_on_stack(void *obj)
68771 +static inline int object_starts_on_stack(void *obj)
68772 {
68773 - void *stack = task_stack_page(current);
68774 + const void *stack = task_stack_page(current);
68775
68776 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
68777 }
68778
68779 +#ifdef CONFIG_PAX_USERCOPY
68780 +extern int object_is_on_stack(const void *obj, unsigned long len);
68781 +#endif
68782 +
68783 extern void thread_info_cache_init(void);
68784
68785 #ifdef CONFIG_DEBUG_STACK_USAGE
68786 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
68787 index 1ee2c05..81b7ec4 100644
68788 --- a/include/linux/screen_info.h
68789 +++ b/include/linux/screen_info.h
68790 @@ -42,7 +42,8 @@ struct screen_info {
68791 __u16 pages; /* 0x32 */
68792 __u16 vesa_attributes; /* 0x34 */
68793 __u32 capabilities; /* 0x36 */
68794 - __u8 _reserved[6]; /* 0x3a */
68795 + __u16 vesapm_size; /* 0x3a */
68796 + __u8 _reserved[4]; /* 0x3c */
68797 } __attribute__((packed));
68798
68799 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
68800 diff --git a/include/linux/security.h b/include/linux/security.h
68801 index d40d23f..d739b08 100644
68802 --- a/include/linux/security.h
68803 +++ b/include/linux/security.h
68804 @@ -34,6 +34,7 @@
68805 #include <linux/key.h>
68806 #include <linux/xfrm.h>
68807 #include <linux/gfp.h>
68808 +#include <linux/grsecurity.h>
68809 #include <net/flow.h>
68810
68811 /* Maximum number of letters for an LSM name string */
68812 @@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
68813 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
68814 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
68815 extern int cap_task_setnice(struct task_struct *p, int nice);
68816 -extern int cap_syslog(int type);
68817 +extern int cap_syslog(int type, bool from_file);
68818 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
68819
68820 struct msghdr;
68821 @@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
68822 * logging to the console.
68823 * See the syslog(2) manual page for an explanation of the @type values.
68824 * @type contains the type of action.
68825 + * @from_file indicates the context of action (if it came from /proc).
68826 * Return 0 if permission is granted.
68827 * @settime:
68828 * Check permission to change the system time.
68829 @@ -1445,7 +1447,7 @@ struct security_operations {
68830 int (*sysctl) (struct ctl_table *table, int op);
68831 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
68832 int (*quota_on) (struct dentry *dentry);
68833 - int (*syslog) (int type);
68834 + int (*syslog) (int type, bool from_file);
68835 int (*settime) (struct timespec *ts, struct timezone *tz);
68836 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
68837
68838 @@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
68839 int security_sysctl(struct ctl_table *table, int op);
68840 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
68841 int security_quota_on(struct dentry *dentry);
68842 -int security_syslog(int type);
68843 +int security_syslog(int type, bool from_file);
68844 int security_settime(struct timespec *ts, struct timezone *tz);
68845 int security_vm_enough_memory(long pages);
68846 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
68847 @@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
68848 return 0;
68849 }
68850
68851 -static inline int security_syslog(int type)
68852 +static inline int security_syslog(int type, bool from_file)
68853 {
68854 - return cap_syslog(type);
68855 + return cap_syslog(type, from_file);
68856 }
68857
68858 static inline int security_settime(struct timespec *ts, struct timezone *tz)
68859 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
68860 index 8366d8f..2307490 100644
68861 --- a/include/linux/seq_file.h
68862 +++ b/include/linux/seq_file.h
68863 @@ -32,6 +32,7 @@ struct seq_operations {
68864 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
68865 int (*show) (struct seq_file *m, void *v);
68866 };
68867 +typedef struct seq_operations __no_const seq_operations_no_const;
68868
68869 #define SEQ_SKIP 1
68870
68871 diff --git a/include/linux/shm.h b/include/linux/shm.h
68872 index eca6235..c7417ed 100644
68873 --- a/include/linux/shm.h
68874 +++ b/include/linux/shm.h
68875 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
68876 pid_t shm_cprid;
68877 pid_t shm_lprid;
68878 struct user_struct *mlock_user;
68879 +#ifdef CONFIG_GRKERNSEC
68880 + time_t shm_createtime;
68881 + pid_t shm_lapid;
68882 +#endif
68883 };
68884
68885 /* shm_mode upper byte flags */
68886 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
68887 index bcdd660..6e12e11 100644
68888 --- a/include/linux/skbuff.h
68889 +++ b/include/linux/skbuff.h
68890 @@ -14,6 +14,7 @@
68891 #ifndef _LINUX_SKBUFF_H
68892 #define _LINUX_SKBUFF_H
68893
68894 +#include <linux/const.h>
68895 #include <linux/kernel.h>
68896 #include <linux/kmemcheck.h>
68897 #include <linux/compiler.h>
68898 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
68899 */
68900 static inline int skb_queue_empty(const struct sk_buff_head *list)
68901 {
68902 - return list->next == (struct sk_buff *)list;
68903 + return list->next == (const struct sk_buff *)list;
68904 }
68905
68906 /**
68907 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
68908 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
68909 const struct sk_buff *skb)
68910 {
68911 - return (skb->next == (struct sk_buff *) list);
68912 + return (skb->next == (const struct sk_buff *) list);
68913 }
68914
68915 /**
68916 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
68917 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
68918 const struct sk_buff *skb)
68919 {
68920 - return (skb->prev == (struct sk_buff *) list);
68921 + return (skb->prev == (const struct sk_buff *) list);
68922 }
68923
68924 /**
68925 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
68926 * headroom, you should not reduce this.
68927 */
68928 #ifndef NET_SKB_PAD
68929 -#define NET_SKB_PAD 32
68930 +#define NET_SKB_PAD (_AC(32,UL))
68931 #endif
68932
68933 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
68934 diff --git a/include/linux/slab.h b/include/linux/slab.h
68935 index 2da8372..a3be824 100644
68936 --- a/include/linux/slab.h
68937 +++ b/include/linux/slab.h
68938 @@ -11,12 +11,20 @@
68939
68940 #include <linux/gfp.h>
68941 #include <linux/types.h>
68942 +#include <linux/err.h>
68943
68944 /*
68945 * Flags to pass to kmem_cache_create().
68946 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
68947 */
68948 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
68949 +
68950 +#ifdef CONFIG_PAX_USERCOPY
68951 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
68952 +#else
68953 +#define SLAB_USERCOPY 0x00000000UL
68954 +#endif
68955 +
68956 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
68957 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
68958 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
68959 @@ -82,10 +90,13 @@
68960 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
68961 * Both make kfree a no-op.
68962 */
68963 -#define ZERO_SIZE_PTR ((void *)16)
68964 +#define ZERO_SIZE_PTR \
68965 +({ \
68966 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
68967 + (void *)(-MAX_ERRNO-1L); \
68968 +})
68969
68970 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
68971 - (unsigned long)ZERO_SIZE_PTR)
68972 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
68973
68974 /*
68975 * struct kmem_cache related prototypes
68976 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
68977 void kfree(const void *);
68978 void kzfree(const void *);
68979 size_t ksize(const void *);
68980 +void check_object_size(const void *ptr, unsigned long n, bool to);
68981
68982 /*
68983 * Allocator specific definitions. These are mainly used to establish optimized
68984 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
68985
68986 void __init kmem_cache_init_late(void);
68987
68988 +#define kmalloc(x, y) \
68989 +({ \
68990 + void *___retval; \
68991 + intoverflow_t ___x = (intoverflow_t)x; \
68992 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
68993 + ___retval = NULL; \
68994 + else \
68995 + ___retval = kmalloc((size_t)___x, (y)); \
68996 + ___retval; \
68997 +})
68998 +
68999 +#define kmalloc_node(x, y, z) \
69000 +({ \
69001 + void *___retval; \
69002 + intoverflow_t ___x = (intoverflow_t)x; \
69003 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69004 + ___retval = NULL; \
69005 + else \
69006 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
69007 + ___retval; \
69008 +})
69009 +
69010 +#define kzalloc(x, y) \
69011 +({ \
69012 + void *___retval; \
69013 + intoverflow_t ___x = (intoverflow_t)x; \
69014 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69015 + ___retval = NULL; \
69016 + else \
69017 + ___retval = kzalloc((size_t)___x, (y)); \
69018 + ___retval; \
69019 +})
69020 +
69021 #endif /* _LINUX_SLAB_H */
69022 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69023 index 850d057..d9dfe3c 100644
69024 --- a/include/linux/slab_def.h
69025 +++ b/include/linux/slab_def.h
69026 @@ -69,10 +69,10 @@ struct kmem_cache {
69027 unsigned long node_allocs;
69028 unsigned long node_frees;
69029 unsigned long node_overflow;
69030 - atomic_t allochit;
69031 - atomic_t allocmiss;
69032 - atomic_t freehit;
69033 - atomic_t freemiss;
69034 + atomic_unchecked_t allochit;
69035 + atomic_unchecked_t allocmiss;
69036 + atomic_unchecked_t freehit;
69037 + atomic_unchecked_t freemiss;
69038
69039 /*
69040 * If debugging is enabled, then the allocator can add additional
69041 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69042 index 5ad70a6..57f9f65 100644
69043 --- a/include/linux/slub_def.h
69044 +++ b/include/linux/slub_def.h
69045 @@ -86,7 +86,7 @@ struct kmem_cache {
69046 struct kmem_cache_order_objects max;
69047 struct kmem_cache_order_objects min;
69048 gfp_t allocflags; /* gfp flags to use on each alloc */
69049 - int refcount; /* Refcount for slab cache destroy */
69050 + atomic_t refcount; /* Refcount for slab cache destroy */
69051 void (*ctor)(void *);
69052 int inuse; /* Offset to metadata */
69053 int align; /* Alignment */
69054 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69055 #endif
69056
69057 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69058 -void *__kmalloc(size_t size, gfp_t flags);
69059 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69060
69061 #ifdef CONFIG_KMEMTRACE
69062 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69063 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69064 index 67ad11f..0bbd8af 100644
69065 --- a/include/linux/sonet.h
69066 +++ b/include/linux/sonet.h
69067 @@ -61,7 +61,7 @@ struct sonet_stats {
69068 #include <asm/atomic.h>
69069
69070 struct k_sonet_stats {
69071 -#define __HANDLE_ITEM(i) atomic_t i
69072 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
69073 __SONET_ITEMS
69074 #undef __HANDLE_ITEM
69075 };
69076 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69077 index 6f52b4d..5500323 100644
69078 --- a/include/linux/sunrpc/cache.h
69079 +++ b/include/linux/sunrpc/cache.h
69080 @@ -125,7 +125,7 @@ struct cache_detail {
69081 */
69082 struct cache_req {
69083 struct cache_deferred_req *(*defer)(struct cache_req *req);
69084 -};
69085 +} __no_const;
69086 /* this must be embedded in a deferred_request that is being
69087 * delayed awaiting cache-fill
69088 */
69089 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
69090 index 8ed9642..101ceab 100644
69091 --- a/include/linux/sunrpc/clnt.h
69092 +++ b/include/linux/sunrpc/clnt.h
69093 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
69094 {
69095 switch (sap->sa_family) {
69096 case AF_INET:
69097 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
69098 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
69099 case AF_INET6:
69100 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
69101 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
69102 }
69103 return 0;
69104 }
69105 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
69106 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
69107 const struct sockaddr *src)
69108 {
69109 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
69110 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
69111 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
69112
69113 dsin->sin_family = ssin->sin_family;
69114 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
69115 if (sa->sa_family != AF_INET6)
69116 return 0;
69117
69118 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
69119 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
69120 }
69121
69122 #endif /* __KERNEL__ */
69123 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
69124 index c14fe86..393245e 100644
69125 --- a/include/linux/sunrpc/svc_rdma.h
69126 +++ b/include/linux/sunrpc/svc_rdma.h
69127 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
69128 extern unsigned int svcrdma_max_requests;
69129 extern unsigned int svcrdma_max_req_size;
69130
69131 -extern atomic_t rdma_stat_recv;
69132 -extern atomic_t rdma_stat_read;
69133 -extern atomic_t rdma_stat_write;
69134 -extern atomic_t rdma_stat_sq_starve;
69135 -extern atomic_t rdma_stat_rq_starve;
69136 -extern atomic_t rdma_stat_rq_poll;
69137 -extern atomic_t rdma_stat_rq_prod;
69138 -extern atomic_t rdma_stat_sq_poll;
69139 -extern atomic_t rdma_stat_sq_prod;
69140 +extern atomic_unchecked_t rdma_stat_recv;
69141 +extern atomic_unchecked_t rdma_stat_read;
69142 +extern atomic_unchecked_t rdma_stat_write;
69143 +extern atomic_unchecked_t rdma_stat_sq_starve;
69144 +extern atomic_unchecked_t rdma_stat_rq_starve;
69145 +extern atomic_unchecked_t rdma_stat_rq_poll;
69146 +extern atomic_unchecked_t rdma_stat_rq_prod;
69147 +extern atomic_unchecked_t rdma_stat_sq_poll;
69148 +extern atomic_unchecked_t rdma_stat_sq_prod;
69149
69150 #define RPCRDMA_VERSION 1
69151
69152 diff --git a/include/linux/suspend.h b/include/linux/suspend.h
69153 index 5e781d8..1e62818 100644
69154 --- a/include/linux/suspend.h
69155 +++ b/include/linux/suspend.h
69156 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
69157 * which require special recovery actions in that situation.
69158 */
69159 struct platform_suspend_ops {
69160 - int (*valid)(suspend_state_t state);
69161 - int (*begin)(suspend_state_t state);
69162 - int (*prepare)(void);
69163 - int (*prepare_late)(void);
69164 - int (*enter)(suspend_state_t state);
69165 - void (*wake)(void);
69166 - void (*finish)(void);
69167 - void (*end)(void);
69168 - void (*recover)(void);
69169 + int (* const valid)(suspend_state_t state);
69170 + int (* const begin)(suspend_state_t state);
69171 + int (* const prepare)(void);
69172 + int (* const prepare_late)(void);
69173 + int (* const enter)(suspend_state_t state);
69174 + void (* const wake)(void);
69175 + void (* const finish)(void);
69176 + void (* const end)(void);
69177 + void (* const recover)(void);
69178 };
69179
69180 #ifdef CONFIG_SUSPEND
69181 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
69182 * suspend_set_ops - set platform dependent suspend operations
69183 * @ops: The new suspend operations to set.
69184 */
69185 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
69186 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
69187 extern int suspend_valid_only_mem(suspend_state_t state);
69188
69189 /**
69190 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
69191 #else /* !CONFIG_SUSPEND */
69192 #define suspend_valid_only_mem NULL
69193
69194 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
69195 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
69196 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
69197 #endif /* !CONFIG_SUSPEND */
69198
69199 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
69200 * platforms which require special recovery actions in that situation.
69201 */
69202 struct platform_hibernation_ops {
69203 - int (*begin)(void);
69204 - void (*end)(void);
69205 - int (*pre_snapshot)(void);
69206 - void (*finish)(void);
69207 - int (*prepare)(void);
69208 - int (*enter)(void);
69209 - void (*leave)(void);
69210 - int (*pre_restore)(void);
69211 - void (*restore_cleanup)(void);
69212 - void (*recover)(void);
69213 + int (* const begin)(void);
69214 + void (* const end)(void);
69215 + int (* const pre_snapshot)(void);
69216 + void (* const finish)(void);
69217 + int (* const prepare)(void);
69218 + int (* const enter)(void);
69219 + void (* const leave)(void);
69220 + int (* const pre_restore)(void);
69221 + void (* const restore_cleanup)(void);
69222 + void (* const recover)(void);
69223 };
69224
69225 #ifdef CONFIG_HIBERNATION
69226 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
69227 extern void swsusp_unset_page_free(struct page *);
69228 extern unsigned long get_safe_page(gfp_t gfp_mask);
69229
69230 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
69231 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
69232 extern int hibernate(void);
69233 extern bool system_entering_hibernation(void);
69234 #else /* CONFIG_HIBERNATION */
69235 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
69236 static inline void swsusp_set_page_free(struct page *p) {}
69237 static inline void swsusp_unset_page_free(struct page *p) {}
69238
69239 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
69240 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
69241 static inline int hibernate(void) { return -ENOSYS; }
69242 static inline bool system_entering_hibernation(void) { return false; }
69243 #endif /* CONFIG_HIBERNATION */
69244 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
69245 index 0eb6942..a805cb6 100644
69246 --- a/include/linux/sysctl.h
69247 +++ b/include/linux/sysctl.h
69248 @@ -164,7 +164,11 @@ enum
69249 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
69250 };
69251
69252 -
69253 +#ifdef CONFIG_PAX_SOFTMODE
69254 +enum {
69255 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
69256 +};
69257 +#endif
69258
69259 /* CTL_VM names: */
69260 enum
69261 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
69262
69263 extern int proc_dostring(struct ctl_table *, int,
69264 void __user *, size_t *, loff_t *);
69265 +extern int proc_dostring_modpriv(struct ctl_table *, int,
69266 + void __user *, size_t *, loff_t *);
69267 extern int proc_dointvec(struct ctl_table *, int,
69268 void __user *, size_t *, loff_t *);
69269 extern int proc_dointvec_minmax(struct ctl_table *, int,
69270 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
69271
69272 extern ctl_handler sysctl_data;
69273 extern ctl_handler sysctl_string;
69274 +extern ctl_handler sysctl_string_modpriv;
69275 extern ctl_handler sysctl_intvec;
69276 extern ctl_handler sysctl_jiffies;
69277 extern ctl_handler sysctl_ms_jiffies;
69278 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
69279 index 9d68fed..71f02cc 100644
69280 --- a/include/linux/sysfs.h
69281 +++ b/include/linux/sysfs.h
69282 @@ -75,8 +75,8 @@ struct bin_attribute {
69283 };
69284
69285 struct sysfs_ops {
69286 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
69287 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
69288 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
69289 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
69290 };
69291
69292 struct sysfs_dirent;
69293 diff --git a/include/linux/syslog.h b/include/linux/syslog.h
69294 new file mode 100644
69295 index 0000000..3891139
69296 --- /dev/null
69297 +++ b/include/linux/syslog.h
69298 @@ -0,0 +1,52 @@
69299 +/* Syslog internals
69300 + *
69301 + * Copyright 2010 Canonical, Ltd.
69302 + * Author: Kees Cook <kees.cook@canonical.com>
69303 + *
69304 + * This program is free software; you can redistribute it and/or modify
69305 + * it under the terms of the GNU General Public License as published by
69306 + * the Free Software Foundation; either version 2, or (at your option)
69307 + * any later version.
69308 + *
69309 + * This program is distributed in the hope that it will be useful,
69310 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
69311 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69312 + * GNU General Public License for more details.
69313 + *
69314 + * You should have received a copy of the GNU General Public License
69315 + * along with this program; see the file COPYING. If not, write to
69316 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
69317 + */
69318 +
69319 +#ifndef _LINUX_SYSLOG_H
69320 +#define _LINUX_SYSLOG_H
69321 +
69322 +/* Close the log. Currently a NOP. */
69323 +#define SYSLOG_ACTION_CLOSE 0
69324 +/* Open the log. Currently a NOP. */
69325 +#define SYSLOG_ACTION_OPEN 1
69326 +/* Read from the log. */
69327 +#define SYSLOG_ACTION_READ 2
69328 +/* Read all messages remaining in the ring buffer. */
69329 +#define SYSLOG_ACTION_READ_ALL 3
69330 +/* Read and clear all messages remaining in the ring buffer */
69331 +#define SYSLOG_ACTION_READ_CLEAR 4
69332 +/* Clear ring buffer. */
69333 +#define SYSLOG_ACTION_CLEAR 5
69334 +/* Disable printk's to console */
69335 +#define SYSLOG_ACTION_CONSOLE_OFF 6
69336 +/* Enable printk's to console */
69337 +#define SYSLOG_ACTION_CONSOLE_ON 7
69338 +/* Set level of messages printed to console */
69339 +#define SYSLOG_ACTION_CONSOLE_LEVEL 8
69340 +/* Return number of unread characters in the log buffer */
69341 +#define SYSLOG_ACTION_SIZE_UNREAD 9
69342 +/* Return size of the log buffer */
69343 +#define SYSLOG_ACTION_SIZE_BUFFER 10
69344 +
69345 +#define SYSLOG_FROM_CALL 0
69346 +#define SYSLOG_FROM_FILE 1
69347 +
69348 +int do_syslog(int type, char __user *buf, int count, bool from_file);
69349 +
69350 +#endif /* _LINUX_SYSLOG_H */
69351 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
69352 index a8cc4e1..98d3b85 100644
69353 --- a/include/linux/thread_info.h
69354 +++ b/include/linux/thread_info.h
69355 @@ -23,7 +23,7 @@ struct restart_block {
69356 };
69357 /* For futex_wait and futex_wait_requeue_pi */
69358 struct {
69359 - u32 *uaddr;
69360 + u32 __user *uaddr;
69361 u32 val;
69362 u32 flags;
69363 u32 bitset;
69364 diff --git a/include/linux/tty.h b/include/linux/tty.h
69365 index e9c57e9..ee6d489 100644
69366 --- a/include/linux/tty.h
69367 +++ b/include/linux/tty.h
69368 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
69369 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
69370 extern void tty_ldisc_enable(struct tty_struct *tty);
69371
69372 -
69373 /* n_tty.c */
69374 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
69375
69376 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
69377 index 0c4ee9b..9f7c426 100644
69378 --- a/include/linux/tty_ldisc.h
69379 +++ b/include/linux/tty_ldisc.h
69380 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
69381
69382 struct module *owner;
69383
69384 - int refcount;
69385 + atomic_t refcount;
69386 };
69387
69388 struct tty_ldisc {
69389 diff --git a/include/linux/types.h b/include/linux/types.h
69390 index c42724f..d190eee 100644
69391 --- a/include/linux/types.h
69392 +++ b/include/linux/types.h
69393 @@ -191,10 +191,26 @@ typedef struct {
69394 volatile int counter;
69395 } atomic_t;
69396
69397 +#ifdef CONFIG_PAX_REFCOUNT
69398 +typedef struct {
69399 + volatile int counter;
69400 +} atomic_unchecked_t;
69401 +#else
69402 +typedef atomic_t atomic_unchecked_t;
69403 +#endif
69404 +
69405 #ifdef CONFIG_64BIT
69406 typedef struct {
69407 volatile long counter;
69408 } atomic64_t;
69409 +
69410 +#ifdef CONFIG_PAX_REFCOUNT
69411 +typedef struct {
69412 + volatile long counter;
69413 +} atomic64_unchecked_t;
69414 +#else
69415 +typedef atomic64_t atomic64_unchecked_t;
69416 +#endif
69417 #endif
69418
69419 struct ustat {
69420 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
69421 index 6b58367..53a3e8e 100644
69422 --- a/include/linux/uaccess.h
69423 +++ b/include/linux/uaccess.h
69424 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69425 long ret; \
69426 mm_segment_t old_fs = get_fs(); \
69427 \
69428 - set_fs(KERNEL_DS); \
69429 pagefault_disable(); \
69430 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
69431 - pagefault_enable(); \
69432 + set_fs(KERNEL_DS); \
69433 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
69434 set_fs(old_fs); \
69435 + pagefault_enable(); \
69436 ret; \
69437 })
69438
69439 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69440 * Safely read from address @src to the buffer at @dst. If a kernel fault
69441 * happens, handle that and return -EFAULT.
69442 */
69443 -extern long probe_kernel_read(void *dst, void *src, size_t size);
69444 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
69445
69446 /*
69447 * probe_kernel_write(): safely attempt to write to a location
69448 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
69449 * Safely write to address @dst from the buffer at @src. If a kernel fault
69450 * happens, handle that and return -EFAULT.
69451 */
69452 -extern long probe_kernel_write(void *dst, void *src, size_t size);
69453 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
69454
69455 #endif /* __LINUX_UACCESS_H__ */
69456 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
69457 index 99c1b4d..bb94261 100644
69458 --- a/include/linux/unaligned/access_ok.h
69459 +++ b/include/linux/unaligned/access_ok.h
69460 @@ -6,32 +6,32 @@
69461
69462 static inline u16 get_unaligned_le16(const void *p)
69463 {
69464 - return le16_to_cpup((__le16 *)p);
69465 + return le16_to_cpup((const __le16 *)p);
69466 }
69467
69468 static inline u32 get_unaligned_le32(const void *p)
69469 {
69470 - return le32_to_cpup((__le32 *)p);
69471 + return le32_to_cpup((const __le32 *)p);
69472 }
69473
69474 static inline u64 get_unaligned_le64(const void *p)
69475 {
69476 - return le64_to_cpup((__le64 *)p);
69477 + return le64_to_cpup((const __le64 *)p);
69478 }
69479
69480 static inline u16 get_unaligned_be16(const void *p)
69481 {
69482 - return be16_to_cpup((__be16 *)p);
69483 + return be16_to_cpup((const __be16 *)p);
69484 }
69485
69486 static inline u32 get_unaligned_be32(const void *p)
69487 {
69488 - return be32_to_cpup((__be32 *)p);
69489 + return be32_to_cpup((const __be32 *)p);
69490 }
69491
69492 static inline u64 get_unaligned_be64(const void *p)
69493 {
69494 - return be64_to_cpup((__be64 *)p);
69495 + return be64_to_cpup((const __be64 *)p);
69496 }
69497
69498 static inline void put_unaligned_le16(u16 val, void *p)
69499 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
69500 index 79b9837..b5a56f9 100644
69501 --- a/include/linux/vermagic.h
69502 +++ b/include/linux/vermagic.h
69503 @@ -26,9 +26,35 @@
69504 #define MODULE_ARCH_VERMAGIC ""
69505 #endif
69506
69507 +#ifdef CONFIG_PAX_REFCOUNT
69508 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
69509 +#else
69510 +#define MODULE_PAX_REFCOUNT ""
69511 +#endif
69512 +
69513 +#ifdef CONSTIFY_PLUGIN
69514 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
69515 +#else
69516 +#define MODULE_CONSTIFY_PLUGIN ""
69517 +#endif
69518 +
69519 +#ifdef STACKLEAK_PLUGIN
69520 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
69521 +#else
69522 +#define MODULE_STACKLEAK_PLUGIN ""
69523 +#endif
69524 +
69525 +#ifdef CONFIG_GRKERNSEC
69526 +#define MODULE_GRSEC "GRSEC "
69527 +#else
69528 +#define MODULE_GRSEC ""
69529 +#endif
69530 +
69531 #define VERMAGIC_STRING \
69532 UTS_RELEASE " " \
69533 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
69534 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
69535 - MODULE_ARCH_VERMAGIC
69536 + MODULE_ARCH_VERMAGIC \
69537 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
69538 + MODULE_GRSEC
69539
69540 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
69541 index 819a634..462ac12 100644
69542 --- a/include/linux/vmalloc.h
69543 +++ b/include/linux/vmalloc.h
69544 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
69545 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
69546 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
69547 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
69548 +
69549 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69550 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
69551 +#endif
69552 +
69553 /* bits [20..32] reserved for arch specific ioremap internals */
69554
69555 /*
69556 @@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
69557
69558 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
69559
69560 +#define vmalloc(x) \
69561 +({ \
69562 + void *___retval; \
69563 + intoverflow_t ___x = (intoverflow_t)x; \
69564 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
69565 + ___retval = NULL; \
69566 + else \
69567 + ___retval = vmalloc((unsigned long)___x); \
69568 + ___retval; \
69569 +})
69570 +
69571 +#define __vmalloc(x, y, z) \
69572 +({ \
69573 + void *___retval; \
69574 + intoverflow_t ___x = (intoverflow_t)x; \
69575 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
69576 + ___retval = NULL; \
69577 + else \
69578 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
69579 + ___retval; \
69580 +})
69581 +
69582 +#define vmalloc_user(x) \
69583 +({ \
69584 + void *___retval; \
69585 + intoverflow_t ___x = (intoverflow_t)x; \
69586 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
69587 + ___retval = NULL; \
69588 + else \
69589 + ___retval = vmalloc_user((unsigned long)___x); \
69590 + ___retval; \
69591 +})
69592 +
69593 +#define vmalloc_exec(x) \
69594 +({ \
69595 + void *___retval; \
69596 + intoverflow_t ___x = (intoverflow_t)x; \
69597 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
69598 + ___retval = NULL; \
69599 + else \
69600 + ___retval = vmalloc_exec((unsigned long)___x); \
69601 + ___retval; \
69602 +})
69603 +
69604 +#define vmalloc_node(x, y) \
69605 +({ \
69606 + void *___retval; \
69607 + intoverflow_t ___x = (intoverflow_t)x; \
69608 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
69609 + ___retval = NULL; \
69610 + else \
69611 + ___retval = vmalloc_node((unsigned long)___x, (y));\
69612 + ___retval; \
69613 +})
69614 +
69615 +#define vmalloc_32(x) \
69616 +({ \
69617 + void *___retval; \
69618 + intoverflow_t ___x = (intoverflow_t)x; \
69619 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
69620 + ___retval = NULL; \
69621 + else \
69622 + ___retval = vmalloc_32((unsigned long)___x); \
69623 + ___retval; \
69624 +})
69625 +
69626 +#define vmalloc_32_user(x) \
69627 +({ \
69628 + void *___retval; \
69629 + intoverflow_t ___x = (intoverflow_t)x; \
69630 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
69631 + ___retval = NULL; \
69632 + else \
69633 + ___retval = vmalloc_32_user((unsigned long)___x);\
69634 + ___retval; \
69635 +})
69636 +
69637 #endif /* _LINUX_VMALLOC_H */
69638 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
69639 index 13070d6..aa4159a 100644
69640 --- a/include/linux/vmstat.h
69641 +++ b/include/linux/vmstat.h
69642 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
69643 /*
69644 * Zone based page accounting with per cpu differentials.
69645 */
69646 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69647 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69648
69649 static inline void zone_page_state_add(long x, struct zone *zone,
69650 enum zone_stat_item item)
69651 {
69652 - atomic_long_add(x, &zone->vm_stat[item]);
69653 - atomic_long_add(x, &vm_stat[item]);
69654 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
69655 + atomic_long_add_unchecked(x, &vm_stat[item]);
69656 }
69657
69658 static inline unsigned long global_page_state(enum zone_stat_item item)
69659 {
69660 - long x = atomic_long_read(&vm_stat[item]);
69661 + long x = atomic_long_read_unchecked(&vm_stat[item]);
69662 #ifdef CONFIG_SMP
69663 if (x < 0)
69664 x = 0;
69665 @@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
69666 static inline unsigned long zone_page_state(struct zone *zone,
69667 enum zone_stat_item item)
69668 {
69669 - long x = atomic_long_read(&zone->vm_stat[item]);
69670 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
69671 #ifdef CONFIG_SMP
69672 if (x < 0)
69673 x = 0;
69674 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
69675 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
69676 enum zone_stat_item item)
69677 {
69678 - long x = atomic_long_read(&zone->vm_stat[item]);
69679 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
69680
69681 #ifdef CONFIG_SMP
69682 int cpu;
69683 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
69684
69685 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
69686 {
69687 - atomic_long_inc(&zone->vm_stat[item]);
69688 - atomic_long_inc(&vm_stat[item]);
69689 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
69690 + atomic_long_inc_unchecked(&vm_stat[item]);
69691 }
69692
69693 static inline void __inc_zone_page_state(struct page *page,
69694 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
69695
69696 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
69697 {
69698 - atomic_long_dec(&zone->vm_stat[item]);
69699 - atomic_long_dec(&vm_stat[item]);
69700 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
69701 + atomic_long_dec_unchecked(&vm_stat[item]);
69702 }
69703
69704 static inline void __dec_zone_page_state(struct page *page,
69705 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
69706 index 5c84af8..1a3b6e2 100644
69707 --- a/include/linux/xattr.h
69708 +++ b/include/linux/xattr.h
69709 @@ -33,6 +33,11 @@
69710 #define XATTR_USER_PREFIX "user."
69711 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
69712
69713 +/* User namespace */
69714 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
69715 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
69716 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
69717 +
69718 struct inode;
69719 struct dentry;
69720
69721 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
69722 index eed5fcc..5080d24 100644
69723 --- a/include/media/saa7146_vv.h
69724 +++ b/include/media/saa7146_vv.h
69725 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
69726 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
69727
69728 /* the extension can override this */
69729 - struct v4l2_ioctl_ops ops;
69730 + v4l2_ioctl_ops_no_const ops;
69731 /* pointer to the saa7146 core ops */
69732 const struct v4l2_ioctl_ops *core_ops;
69733
69734 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
69735 index 73c9867..2da8837 100644
69736 --- a/include/media/v4l2-dev.h
69737 +++ b/include/media/v4l2-dev.h
69738 @@ -34,7 +34,7 @@ struct v4l2_device;
69739 #define V4L2_FL_UNREGISTERED (0)
69740
69741 struct v4l2_file_operations {
69742 - struct module *owner;
69743 + struct module * const owner;
69744 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
69745 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
69746 unsigned int (*poll) (struct file *, struct poll_table_struct *);
69747 @@ -46,6 +46,7 @@ struct v4l2_file_operations {
69748 int (*open) (struct file *);
69749 int (*release) (struct file *);
69750 };
69751 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
69752
69753 /*
69754 * Newer version of video_device, handled by videodev2.c
69755 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
69756 index 5d5d550..f559ef1 100644
69757 --- a/include/media/v4l2-device.h
69758 +++ b/include/media/v4l2-device.h
69759 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
69760 this function returns 0. If the name ends with a digit (e.g. cx18),
69761 then the name will be set to cx18-0 since cx180 looks really odd. */
69762 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
69763 - atomic_t *instance);
69764 + atomic_unchecked_t *instance);
69765
69766 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
69767 Since the parent disappears this ensures that v4l2_dev doesn't have an
69768 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
69769 index 7a4529d..7244290 100644
69770 --- a/include/media/v4l2-ioctl.h
69771 +++ b/include/media/v4l2-ioctl.h
69772 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
69773 long (*vidioc_default) (struct file *file, void *fh,
69774 int cmd, void *arg);
69775 };
69776 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
69777
69778
69779 /* v4l debugging and diagnostics */
69780 diff --git a/include/net/flow.h b/include/net/flow.h
69781 index 809970b..c3df4f3 100644
69782 --- a/include/net/flow.h
69783 +++ b/include/net/flow.h
69784 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
69785 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
69786 u8 dir, flow_resolve_t resolver);
69787 extern void flow_cache_flush(void);
69788 -extern atomic_t flow_cache_genid;
69789 +extern atomic_unchecked_t flow_cache_genid;
69790
69791 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
69792 {
69793 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
69794 index 15e1f8fe..668837c 100644
69795 --- a/include/net/inetpeer.h
69796 +++ b/include/net/inetpeer.h
69797 @@ -24,7 +24,7 @@ struct inet_peer
69798 __u32 dtime; /* the time of last use of not
69799 * referenced entries */
69800 atomic_t refcnt;
69801 - atomic_t rid; /* Frag reception counter */
69802 + atomic_unchecked_t rid; /* Frag reception counter */
69803 __u32 tcp_ts;
69804 unsigned long tcp_ts_stamp;
69805 };
69806 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
69807 index 98978e7..2243a3d 100644
69808 --- a/include/net/ip_vs.h
69809 +++ b/include/net/ip_vs.h
69810 @@ -365,7 +365,7 @@ struct ip_vs_conn {
69811 struct ip_vs_conn *control; /* Master control connection */
69812 atomic_t n_control; /* Number of controlled ones */
69813 struct ip_vs_dest *dest; /* real server */
69814 - atomic_t in_pkts; /* incoming packet counter */
69815 + atomic_unchecked_t in_pkts; /* incoming packet counter */
69816
69817 /* packet transmitter for different forwarding methods. If it
69818 mangles the packet, it must return NF_DROP or better NF_STOLEN,
69819 @@ -466,7 +466,7 @@ struct ip_vs_dest {
69820 union nf_inet_addr addr; /* IP address of the server */
69821 __be16 port; /* port number of the server */
69822 volatile unsigned flags; /* dest status flags */
69823 - atomic_t conn_flags; /* flags to copy to conn */
69824 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
69825 atomic_t weight; /* server weight */
69826
69827 atomic_t refcnt; /* reference counter */
69828 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
69829 index 69b610a..fe3962c 100644
69830 --- a/include/net/irda/ircomm_core.h
69831 +++ b/include/net/irda/ircomm_core.h
69832 @@ -51,7 +51,7 @@ typedef struct {
69833 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
69834 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
69835 struct ircomm_info *);
69836 -} call_t;
69837 +} __no_const call_t;
69838
69839 struct ircomm_cb {
69840 irda_queue_t queue;
69841 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
69842 index eea2e61..08c692d 100644
69843 --- a/include/net/irda/ircomm_tty.h
69844 +++ b/include/net/irda/ircomm_tty.h
69845 @@ -35,6 +35,7 @@
69846 #include <linux/termios.h>
69847 #include <linux/timer.h>
69848 #include <linux/tty.h> /* struct tty_struct */
69849 +#include <asm/local.h>
69850
69851 #include <net/irda/irias_object.h>
69852 #include <net/irda/ircomm_core.h>
69853 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
69854 unsigned short close_delay;
69855 unsigned short closing_wait; /* time to wait before closing */
69856
69857 - int open_count;
69858 - int blocked_open; /* # of blocked opens */
69859 + local_t open_count;
69860 + local_t blocked_open; /* # of blocked opens */
69861
69862 /* Protect concurent access to :
69863 * o self->open_count
69864 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
69865 index f82a1e8..82d81e8 100644
69866 --- a/include/net/iucv/af_iucv.h
69867 +++ b/include/net/iucv/af_iucv.h
69868 @@ -87,7 +87,7 @@ struct iucv_sock {
69869 struct iucv_sock_list {
69870 struct hlist_head head;
69871 rwlock_t lock;
69872 - atomic_t autobind_name;
69873 + atomic_unchecked_t autobind_name;
69874 };
69875
69876 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
69877 diff --git a/include/net/lapb.h b/include/net/lapb.h
69878 index 96cb5dd..25e8d4f 100644
69879 --- a/include/net/lapb.h
69880 +++ b/include/net/lapb.h
69881 @@ -95,7 +95,7 @@ struct lapb_cb {
69882 struct sk_buff_head write_queue;
69883 struct sk_buff_head ack_queue;
69884 unsigned char window;
69885 - struct lapb_register_struct callbacks;
69886 + struct lapb_register_struct *callbacks;
69887
69888 /* FRMR control information */
69889 struct lapb_frame frmr_data;
69890 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
69891 index 3817fda..cdb2343 100644
69892 --- a/include/net/neighbour.h
69893 +++ b/include/net/neighbour.h
69894 @@ -131,7 +131,7 @@ struct neigh_ops
69895 int (*connected_output)(struct sk_buff*);
69896 int (*hh_output)(struct sk_buff*);
69897 int (*queue_xmit)(struct sk_buff*);
69898 -};
69899 +} __do_const;
69900
69901 struct pneigh_entry
69902 {
69903 diff --git a/include/net/netlink.h b/include/net/netlink.h
69904 index c344646..4778c71 100644
69905 --- a/include/net/netlink.h
69906 +++ b/include/net/netlink.h
69907 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
69908 {
69909 return (remaining >= (int) sizeof(struct nlmsghdr) &&
69910 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
69911 - nlh->nlmsg_len <= remaining);
69912 + nlh->nlmsg_len <= (unsigned int)remaining);
69913 }
69914
69915 /**
69916 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
69917 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
69918 {
69919 if (mark)
69920 - skb_trim(skb, (unsigned char *) mark - skb->data);
69921 + skb_trim(skb, (const unsigned char *) mark - skb->data);
69922 }
69923
69924 /**
69925 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
69926 index 9a4b8b7..e49e077 100644
69927 --- a/include/net/netns/ipv4.h
69928 +++ b/include/net/netns/ipv4.h
69929 @@ -54,7 +54,7 @@ struct netns_ipv4 {
69930 int current_rt_cache_rebuild_count;
69931
69932 struct timer_list rt_secret_timer;
69933 - atomic_t rt_genid;
69934 + atomic_unchecked_t rt_genid;
69935
69936 #ifdef CONFIG_IP_MROUTE
69937 struct sock *mroute_sk;
69938 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
69939 index 8a6d529..171f401 100644
69940 --- a/include/net/sctp/sctp.h
69941 +++ b/include/net/sctp/sctp.h
69942 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
69943
69944 #else /* SCTP_DEBUG */
69945
69946 -#define SCTP_DEBUG_PRINTK(whatever...)
69947 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
69948 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
69949 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
69950 #define SCTP_ENABLE_DEBUG
69951 #define SCTP_DISABLE_DEBUG
69952 #define SCTP_ASSERT(expr, str, func)
69953 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
69954 index d97f689..f3b90ab 100644
69955 --- a/include/net/secure_seq.h
69956 +++ b/include/net/secure_seq.h
69957 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
69958 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
69959 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
69960 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
69961 - __be16 dport);
69962 + __be16 dport);
69963 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
69964 __be16 sport, __be16 dport);
69965 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
69966 - __be16 sport, __be16 dport);
69967 + __be16 sport, __be16 dport);
69968 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
69969 - __be16 sport, __be16 dport);
69970 + __be16 sport, __be16 dport);
69971 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
69972 - __be16 sport, __be16 dport);
69973 + __be16 sport, __be16 dport);
69974
69975 #endif /* _NET_SECURE_SEQ */
69976 diff --git a/include/net/sock.h b/include/net/sock.h
69977 index 9f96394..76fc9c7 100644
69978 --- a/include/net/sock.h
69979 +++ b/include/net/sock.h
69980 @@ -272,7 +272,7 @@ struct sock {
69981 rwlock_t sk_callback_lock;
69982 int sk_err,
69983 sk_err_soft;
69984 - atomic_t sk_drops;
69985 + atomic_unchecked_t sk_drops;
69986 unsigned short sk_ack_backlog;
69987 unsigned short sk_max_ack_backlog;
69988 __u32 sk_priority;
69989 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
69990 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
69991 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
69992 #else
69993 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
69994 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
69995 int inc)
69996 {
69997 }
69998 diff --git a/include/net/tcp.h b/include/net/tcp.h
69999 index 6cfe18b..dd21acb 100644
70000 --- a/include/net/tcp.h
70001 +++ b/include/net/tcp.h
70002 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70003 struct tcp_seq_afinfo {
70004 char *name;
70005 sa_family_t family;
70006 - struct file_operations seq_fops;
70007 - struct seq_operations seq_ops;
70008 + file_operations_no_const seq_fops;
70009 + seq_operations_no_const seq_ops;
70010 };
70011
70012 struct tcp_iter_state {
70013 diff --git a/include/net/udp.h b/include/net/udp.h
70014 index f98abd2..b4b042f 100644
70015 --- a/include/net/udp.h
70016 +++ b/include/net/udp.h
70017 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70018 char *name;
70019 sa_family_t family;
70020 struct udp_table *udp_table;
70021 - struct file_operations seq_fops;
70022 - struct seq_operations seq_ops;
70023 + file_operations_no_const seq_fops;
70024 + seq_operations_no_const seq_ops;
70025 };
70026
70027 struct udp_iter_state {
70028 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70029 index cbb822e..e9c1cbe 100644
70030 --- a/include/rdma/iw_cm.h
70031 +++ b/include/rdma/iw_cm.h
70032 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
70033 int backlog);
70034
70035 int (*destroy_listen)(struct iw_cm_id *cm_id);
70036 -};
70037 +} __no_const;
70038
70039 /**
70040 * iw_create_cm_id - Create an IW CM identifier.
70041 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
70042 index 09a124b..caa8ca8 100644
70043 --- a/include/scsi/libfc.h
70044 +++ b/include/scsi/libfc.h
70045 @@ -675,6 +675,7 @@ struct libfc_function_template {
70046 */
70047 void (*disc_stop_final) (struct fc_lport *);
70048 };
70049 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
70050
70051 /* information used by the discovery layer */
70052 struct fc_disc {
70053 @@ -707,7 +708,7 @@ struct fc_lport {
70054 struct fc_disc disc;
70055
70056 /* Operational Information */
70057 - struct libfc_function_template tt;
70058 + libfc_function_template_no_const tt;
70059 u8 link_up;
70060 u8 qfull;
70061 enum fc_lport_state state;
70062 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
70063 index de8e180..f15e0d7 100644
70064 --- a/include/scsi/scsi_device.h
70065 +++ b/include/scsi/scsi_device.h
70066 @@ -156,9 +156,9 @@ struct scsi_device {
70067 unsigned int max_device_blocked; /* what device_blocked counts down from */
70068 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
70069
70070 - atomic_t iorequest_cnt;
70071 - atomic_t iodone_cnt;
70072 - atomic_t ioerr_cnt;
70073 + atomic_unchecked_t iorequest_cnt;
70074 + atomic_unchecked_t iodone_cnt;
70075 + atomic_unchecked_t ioerr_cnt;
70076
70077 struct device sdev_gendev,
70078 sdev_dev;
70079 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
70080 index fc50bd6..81ba9cb 100644
70081 --- a/include/scsi/scsi_transport_fc.h
70082 +++ b/include/scsi/scsi_transport_fc.h
70083 @@ -708,7 +708,7 @@ struct fc_function_template {
70084 unsigned long show_host_system_hostname:1;
70085
70086 unsigned long disable_target_scan:1;
70087 -};
70088 +} __do_const;
70089
70090
70091 /**
70092 diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
70093 index 3dae3f7..8440d6f 100644
70094 --- a/include/sound/ac97_codec.h
70095 +++ b/include/sound/ac97_codec.h
70096 @@ -419,15 +419,15 @@
70097 struct snd_ac97;
70098
70099 struct snd_ac97_build_ops {
70100 - int (*build_3d) (struct snd_ac97 *ac97);
70101 - int (*build_specific) (struct snd_ac97 *ac97);
70102 - int (*build_spdif) (struct snd_ac97 *ac97);
70103 - int (*build_post_spdif) (struct snd_ac97 *ac97);
70104 + int (* const build_3d) (struct snd_ac97 *ac97);
70105 + int (* const build_specific) (struct snd_ac97 *ac97);
70106 + int (* const build_spdif) (struct snd_ac97 *ac97);
70107 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
70108 #ifdef CONFIG_PM
70109 - void (*suspend) (struct snd_ac97 *ac97);
70110 - void (*resume) (struct snd_ac97 *ac97);
70111 + void (* const suspend) (struct snd_ac97 *ac97);
70112 + void (* const resume) (struct snd_ac97 *ac97);
70113 #endif
70114 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70115 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70116 };
70117
70118 struct snd_ac97_bus_ops {
70119 @@ -477,7 +477,7 @@ struct snd_ac97_template {
70120
70121 struct snd_ac97 {
70122 /* -- lowlevel (hardware) driver specific -- */
70123 - struct snd_ac97_build_ops * build_ops;
70124 + const struct snd_ac97_build_ops * build_ops;
70125 void *private_data;
70126 void (*private_free) (struct snd_ac97 *ac97);
70127 /* --- */
70128 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
70129 index 891cf1a..a94ba2b 100644
70130 --- a/include/sound/ak4xxx-adda.h
70131 +++ b/include/sound/ak4xxx-adda.h
70132 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
70133 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
70134 unsigned char val);
70135 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
70136 -};
70137 +} __no_const;
70138
70139 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
70140
70141 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
70142 index 8c05e47..2b5df97 100644
70143 --- a/include/sound/hwdep.h
70144 +++ b/include/sound/hwdep.h
70145 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
70146 struct snd_hwdep_dsp_status *status);
70147 int (*dsp_load)(struct snd_hwdep *hw,
70148 struct snd_hwdep_dsp_image *image);
70149 -};
70150 +} __no_const;
70151
70152 struct snd_hwdep {
70153 struct snd_card *card;
70154 diff --git a/include/sound/info.h b/include/sound/info.h
70155 index 112e894..6fda5b5 100644
70156 --- a/include/sound/info.h
70157 +++ b/include/sound/info.h
70158 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
70159 struct snd_info_buffer *buffer);
70160 void (*write)(struct snd_info_entry *entry,
70161 struct snd_info_buffer *buffer);
70162 -};
70163 +} __no_const;
70164
70165 struct snd_info_entry_ops {
70166 int (*open)(struct snd_info_entry *entry,
70167 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
70168 index de6d981..590a550 100644
70169 --- a/include/sound/pcm.h
70170 +++ b/include/sound/pcm.h
70171 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
70172 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
70173 int (*ack)(struct snd_pcm_substream *substream);
70174 };
70175 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
70176
70177 /*
70178 *
70179 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
70180 index 736eac7..fe8a80f 100644
70181 --- a/include/sound/sb16_csp.h
70182 +++ b/include/sound/sb16_csp.h
70183 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
70184 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
70185 int (*csp_stop) (struct snd_sb_csp * p);
70186 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
70187 -};
70188 +} __no_const;
70189
70190 /*
70191 * CSP private data
70192 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
70193 index 444cd6b..3327cc5 100644
70194 --- a/include/sound/ymfpci.h
70195 +++ b/include/sound/ymfpci.h
70196 @@ -358,7 +358,7 @@ struct snd_ymfpci {
70197 spinlock_t reg_lock;
70198 spinlock_t voice_lock;
70199 wait_queue_head_t interrupt_sleep;
70200 - atomic_t interrupt_sleep_count;
70201 + atomic_unchecked_t interrupt_sleep_count;
70202 struct snd_info_entry *proc_entry;
70203 const struct firmware *dsp_microcode;
70204 const struct firmware *controller_microcode;
70205 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
70206 index b89f9db..f097b38 100644
70207 --- a/include/trace/events/irq.h
70208 +++ b/include/trace/events/irq.h
70209 @@ -34,7 +34,7 @@
70210 */
70211 TRACE_EVENT(irq_handler_entry,
70212
70213 - TP_PROTO(int irq, struct irqaction *action),
70214 + TP_PROTO(int irq, const struct irqaction *action),
70215
70216 TP_ARGS(irq, action),
70217
70218 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
70219 */
70220 TRACE_EVENT(irq_handler_exit,
70221
70222 - TP_PROTO(int irq, struct irqaction *action, int ret),
70223 + TP_PROTO(int irq, const struct irqaction *action, int ret),
70224
70225 TP_ARGS(irq, action, ret),
70226
70227 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
70228 */
70229 TRACE_EVENT(softirq_entry,
70230
70231 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70232 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70233
70234 TP_ARGS(h, vec),
70235
70236 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
70237 */
70238 TRACE_EVENT(softirq_exit,
70239
70240 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70241 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70242
70243 TP_ARGS(h, vec),
70244
70245 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
70246 index 0993a22..32ba2fe 100644
70247 --- a/include/video/uvesafb.h
70248 +++ b/include/video/uvesafb.h
70249 @@ -177,6 +177,7 @@ struct uvesafb_par {
70250 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
70251 u8 pmi_setpal; /* PMI for palette changes */
70252 u16 *pmi_base; /* protected mode interface location */
70253 + u8 *pmi_code; /* protected mode code location */
70254 void *pmi_start;
70255 void *pmi_pal;
70256 u8 *vbe_state_orig; /*
70257 diff --git a/init/Kconfig b/init/Kconfig
70258 index d72691b..3996e54 100644
70259 --- a/init/Kconfig
70260 +++ b/init/Kconfig
70261 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
70262
70263 config COMPAT_BRK
70264 bool "Disable heap randomization"
70265 - default y
70266 + default n
70267 help
70268 Randomizing heap placement makes heap exploits harder, but it
70269 also breaks ancient binaries (including anything libc5 based).
70270 diff --git a/init/do_mounts.c b/init/do_mounts.c
70271 index bb008d0..4fa3933 100644
70272 --- a/init/do_mounts.c
70273 +++ b/init/do_mounts.c
70274 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
70275
70276 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
70277 {
70278 - int err = sys_mount(name, "/root", fs, flags, data);
70279 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
70280 if (err)
70281 return err;
70282
70283 - sys_chdir("/root");
70284 + sys_chdir((__force const char __user *)"/root");
70285 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
70286 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
70287 current->fs->pwd.mnt->mnt_sb->s_type->name,
70288 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
70289 va_start(args, fmt);
70290 vsprintf(buf, fmt, args);
70291 va_end(args);
70292 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
70293 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
70294 if (fd >= 0) {
70295 sys_ioctl(fd, FDEJECT, 0);
70296 sys_close(fd);
70297 }
70298 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
70299 - fd = sys_open("/dev/console", O_RDWR, 0);
70300 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
70301 if (fd >= 0) {
70302 sys_ioctl(fd, TCGETS, (long)&termios);
70303 termios.c_lflag &= ~ICANON;
70304 sys_ioctl(fd, TCSETSF, (long)&termios);
70305 - sys_read(fd, &c, 1);
70306 + sys_read(fd, (char __user *)&c, 1);
70307 termios.c_lflag |= ICANON;
70308 sys_ioctl(fd, TCSETSF, (long)&termios);
70309 sys_close(fd);
70310 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
70311 mount_root();
70312 out:
70313 devtmpfs_mount("dev");
70314 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
70315 - sys_chroot(".");
70316 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
70317 + sys_chroot((__force char __user *)".");
70318 }
70319 diff --git a/init/do_mounts.h b/init/do_mounts.h
70320 index f5b978a..69dbfe8 100644
70321 --- a/init/do_mounts.h
70322 +++ b/init/do_mounts.h
70323 @@ -15,15 +15,15 @@ extern int root_mountflags;
70324
70325 static inline int create_dev(char *name, dev_t dev)
70326 {
70327 - sys_unlink(name);
70328 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
70329 + sys_unlink((char __force_user *)name);
70330 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
70331 }
70332
70333 #if BITS_PER_LONG == 32
70334 static inline u32 bstat(char *name)
70335 {
70336 struct stat64 stat;
70337 - if (sys_stat64(name, &stat) != 0)
70338 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
70339 return 0;
70340 if (!S_ISBLK(stat.st_mode))
70341 return 0;
70342 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
70343 static inline u32 bstat(char *name)
70344 {
70345 struct stat stat;
70346 - if (sys_newstat(name, &stat) != 0)
70347 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
70348 return 0;
70349 if (!S_ISBLK(stat.st_mode))
70350 return 0;
70351 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
70352 index 614241b..4da046b 100644
70353 --- a/init/do_mounts_initrd.c
70354 +++ b/init/do_mounts_initrd.c
70355 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
70356 sys_close(old_fd);sys_close(root_fd);
70357 sys_close(0);sys_close(1);sys_close(2);
70358 sys_setsid();
70359 - (void) sys_open("/dev/console",O_RDWR,0);
70360 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
70361 (void) sys_dup(0);
70362 (void) sys_dup(0);
70363 return kernel_execve(shell, argv, envp_init);
70364 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
70365 create_dev("/dev/root.old", Root_RAM0);
70366 /* mount initrd on rootfs' /root */
70367 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
70368 - sys_mkdir("/old", 0700);
70369 - root_fd = sys_open("/", 0, 0);
70370 - old_fd = sys_open("/old", 0, 0);
70371 + sys_mkdir((const char __force_user *)"/old", 0700);
70372 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
70373 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
70374 /* move initrd over / and chdir/chroot in initrd root */
70375 - sys_chdir("/root");
70376 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
70377 - sys_chroot(".");
70378 + sys_chdir((const char __force_user *)"/root");
70379 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
70380 + sys_chroot((const char __force_user *)".");
70381
70382 /*
70383 * In case that a resume from disk is carried out by linuxrc or one of
70384 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
70385
70386 /* move initrd to rootfs' /old */
70387 sys_fchdir(old_fd);
70388 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
70389 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
70390 /* switch root and cwd back to / of rootfs */
70391 sys_fchdir(root_fd);
70392 - sys_chroot(".");
70393 + sys_chroot((const char __force_user *)".");
70394 sys_close(old_fd);
70395 sys_close(root_fd);
70396
70397 if (new_decode_dev(real_root_dev) == Root_RAM0) {
70398 - sys_chdir("/old");
70399 + sys_chdir((const char __force_user *)"/old");
70400 return;
70401 }
70402
70403 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
70404 mount_root();
70405
70406 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
70407 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
70408 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
70409 if (!error)
70410 printk("okay\n");
70411 else {
70412 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
70413 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
70414 if (error == -ENOENT)
70415 printk("/initrd does not exist. Ignored.\n");
70416 else
70417 printk("failed\n");
70418 printk(KERN_NOTICE "Unmounting old root\n");
70419 - sys_umount("/old", MNT_DETACH);
70420 + sys_umount((char __force_user *)"/old", MNT_DETACH);
70421 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
70422 if (fd < 0) {
70423 error = fd;
70424 @@ -119,11 +119,11 @@ int __init initrd_load(void)
70425 * mounted in the normal path.
70426 */
70427 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
70428 - sys_unlink("/initrd.image");
70429 + sys_unlink((const char __force_user *)"/initrd.image");
70430 handle_initrd();
70431 return 1;
70432 }
70433 }
70434 - sys_unlink("/initrd.image");
70435 + sys_unlink((const char __force_user *)"/initrd.image");
70436 return 0;
70437 }
70438 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
70439 index 69aebbf..c0bf6a7 100644
70440 --- a/init/do_mounts_md.c
70441 +++ b/init/do_mounts_md.c
70442 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
70443 partitioned ? "_d" : "", minor,
70444 md_setup_args[ent].device_names);
70445
70446 - fd = sys_open(name, 0, 0);
70447 + fd = sys_open((char __force_user *)name, 0, 0);
70448 if (fd < 0) {
70449 printk(KERN_ERR "md: open failed - cannot start "
70450 "array %s\n", name);
70451 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
70452 * array without it
70453 */
70454 sys_close(fd);
70455 - fd = sys_open(name, 0, 0);
70456 + fd = sys_open((char __force_user *)name, 0, 0);
70457 sys_ioctl(fd, BLKRRPART, 0);
70458 }
70459 sys_close(fd);
70460 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
70461
70462 wait_for_device_probe();
70463
70464 - fd = sys_open("/dev/md0", 0, 0);
70465 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
70466 if (fd >= 0) {
70467 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
70468 sys_close(fd);
70469 diff --git a/init/initramfs.c b/init/initramfs.c
70470 index 1fd59b8..a01b079 100644
70471 --- a/init/initramfs.c
70472 +++ b/init/initramfs.c
70473 @@ -74,7 +74,7 @@ static void __init free_hash(void)
70474 }
70475 }
70476
70477 -static long __init do_utime(char __user *filename, time_t mtime)
70478 +static long __init do_utime(__force char __user *filename, time_t mtime)
70479 {
70480 struct timespec t[2];
70481
70482 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
70483 struct dir_entry *de, *tmp;
70484 list_for_each_entry_safe(de, tmp, &dir_list, list) {
70485 list_del(&de->list);
70486 - do_utime(de->name, de->mtime);
70487 + do_utime((char __force_user *)de->name, de->mtime);
70488 kfree(de->name);
70489 kfree(de);
70490 }
70491 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
70492 if (nlink >= 2) {
70493 char *old = find_link(major, minor, ino, mode, collected);
70494 if (old)
70495 - return (sys_link(old, collected) < 0) ? -1 : 1;
70496 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
70497 }
70498 return 0;
70499 }
70500 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
70501 {
70502 struct stat st;
70503
70504 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
70505 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
70506 if (S_ISDIR(st.st_mode))
70507 - sys_rmdir(path);
70508 + sys_rmdir((char __force_user *)path);
70509 else
70510 - sys_unlink(path);
70511 + sys_unlink((char __force_user *)path);
70512 }
70513 }
70514
70515 @@ -305,7 +305,7 @@ static int __init do_name(void)
70516 int openflags = O_WRONLY|O_CREAT;
70517 if (ml != 1)
70518 openflags |= O_TRUNC;
70519 - wfd = sys_open(collected, openflags, mode);
70520 + wfd = sys_open((char __force_user *)collected, openflags, mode);
70521
70522 if (wfd >= 0) {
70523 sys_fchown(wfd, uid, gid);
70524 @@ -317,17 +317,17 @@ static int __init do_name(void)
70525 }
70526 }
70527 } else if (S_ISDIR(mode)) {
70528 - sys_mkdir(collected, mode);
70529 - sys_chown(collected, uid, gid);
70530 - sys_chmod(collected, mode);
70531 + sys_mkdir((char __force_user *)collected, mode);
70532 + sys_chown((char __force_user *)collected, uid, gid);
70533 + sys_chmod((char __force_user *)collected, mode);
70534 dir_add(collected, mtime);
70535 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
70536 S_ISFIFO(mode) || S_ISSOCK(mode)) {
70537 if (maybe_link() == 0) {
70538 - sys_mknod(collected, mode, rdev);
70539 - sys_chown(collected, uid, gid);
70540 - sys_chmod(collected, mode);
70541 - do_utime(collected, mtime);
70542 + sys_mknod((char __force_user *)collected, mode, rdev);
70543 + sys_chown((char __force_user *)collected, uid, gid);
70544 + sys_chmod((char __force_user *)collected, mode);
70545 + do_utime((char __force_user *)collected, mtime);
70546 }
70547 }
70548 return 0;
70549 @@ -336,15 +336,15 @@ static int __init do_name(void)
70550 static int __init do_copy(void)
70551 {
70552 if (count >= body_len) {
70553 - sys_write(wfd, victim, body_len);
70554 + sys_write(wfd, (char __force_user *)victim, body_len);
70555 sys_close(wfd);
70556 - do_utime(vcollected, mtime);
70557 + do_utime((char __force_user *)vcollected, mtime);
70558 kfree(vcollected);
70559 eat(body_len);
70560 state = SkipIt;
70561 return 0;
70562 } else {
70563 - sys_write(wfd, victim, count);
70564 + sys_write(wfd, (char __force_user *)victim, count);
70565 body_len -= count;
70566 eat(count);
70567 return 1;
70568 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
70569 {
70570 collected[N_ALIGN(name_len) + body_len] = '\0';
70571 clean_path(collected, 0);
70572 - sys_symlink(collected + N_ALIGN(name_len), collected);
70573 - sys_lchown(collected, uid, gid);
70574 - do_utime(collected, mtime);
70575 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
70576 + sys_lchown((char __force_user *)collected, uid, gid);
70577 + do_utime((char __force_user *)collected, mtime);
70578 state = SkipIt;
70579 next_state = Reset;
70580 return 0;
70581 diff --git a/init/main.c b/init/main.c
70582 index 1eb4bd5..da8c6f5 100644
70583 --- a/init/main.c
70584 +++ b/init/main.c
70585 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
70586 #ifdef CONFIG_TC
70587 extern void tc_init(void);
70588 #endif
70589 +extern void grsecurity_init(void);
70590
70591 enum system_states system_state __read_mostly;
70592 EXPORT_SYMBOL(system_state);
70593 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
70594
70595 __setup("reset_devices", set_reset_devices);
70596
70597 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
70598 +extern char pax_enter_kernel_user[];
70599 +extern char pax_exit_kernel_user[];
70600 +extern pgdval_t clone_pgd_mask;
70601 +#endif
70602 +
70603 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
70604 +static int __init setup_pax_nouderef(char *str)
70605 +{
70606 +#ifdef CONFIG_X86_32
70607 + unsigned int cpu;
70608 + struct desc_struct *gdt;
70609 +
70610 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
70611 + gdt = get_cpu_gdt_table(cpu);
70612 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
70613 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
70614 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
70615 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
70616 + }
70617 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
70618 +#else
70619 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
70620 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
70621 + clone_pgd_mask = ~(pgdval_t)0UL;
70622 +#endif
70623 +
70624 + return 0;
70625 +}
70626 +early_param("pax_nouderef", setup_pax_nouderef);
70627 +#endif
70628 +
70629 +#ifdef CONFIG_PAX_SOFTMODE
70630 +int pax_softmode;
70631 +
70632 +static int __init setup_pax_softmode(char *str)
70633 +{
70634 + get_option(&str, &pax_softmode);
70635 + return 1;
70636 +}
70637 +__setup("pax_softmode=", setup_pax_softmode);
70638 +#endif
70639 +
70640 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
70641 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
70642 static const char *panic_later, *panic_param;
70643 @@ -705,52 +749,53 @@ int initcall_debug;
70644 core_param(initcall_debug, initcall_debug, bool, 0644);
70645
70646 static char msgbuf[64];
70647 -static struct boot_trace_call call;
70648 -static struct boot_trace_ret ret;
70649 +static struct boot_trace_call trace_call;
70650 +static struct boot_trace_ret trace_ret;
70651
70652 int do_one_initcall(initcall_t fn)
70653 {
70654 int count = preempt_count();
70655 ktime_t calltime, delta, rettime;
70656 + const char *msg1 = "", *msg2 = "";
70657
70658 if (initcall_debug) {
70659 - call.caller = task_pid_nr(current);
70660 - printk("calling %pF @ %i\n", fn, call.caller);
70661 + trace_call.caller = task_pid_nr(current);
70662 + printk("calling %pF @ %i\n", fn, trace_call.caller);
70663 calltime = ktime_get();
70664 - trace_boot_call(&call, fn);
70665 + trace_boot_call(&trace_call, fn);
70666 enable_boot_trace();
70667 }
70668
70669 - ret.result = fn();
70670 + trace_ret.result = fn();
70671
70672 if (initcall_debug) {
70673 disable_boot_trace();
70674 rettime = ktime_get();
70675 delta = ktime_sub(rettime, calltime);
70676 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
70677 - trace_boot_ret(&ret, fn);
70678 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
70679 + trace_boot_ret(&trace_ret, fn);
70680 printk("initcall %pF returned %d after %Ld usecs\n", fn,
70681 - ret.result, ret.duration);
70682 + trace_ret.result, trace_ret.duration);
70683 }
70684
70685 msgbuf[0] = 0;
70686
70687 - if (ret.result && ret.result != -ENODEV && initcall_debug)
70688 - sprintf(msgbuf, "error code %d ", ret.result);
70689 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
70690 + sprintf(msgbuf, "error code %d ", trace_ret.result);
70691
70692 if (preempt_count() != count) {
70693 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
70694 + msg1 = " preemption imbalance";
70695 preempt_count() = count;
70696 }
70697 if (irqs_disabled()) {
70698 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
70699 + msg2 = " disabled interrupts";
70700 local_irq_enable();
70701 }
70702 - if (msgbuf[0]) {
70703 - printk("initcall %pF returned with %s\n", fn, msgbuf);
70704 + if (msgbuf[0] || *msg1 || *msg2) {
70705 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
70706 }
70707
70708 - return ret.result;
70709 + return trace_ret.result;
70710 }
70711
70712
70713 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
70714 if (!ramdisk_execute_command)
70715 ramdisk_execute_command = "/init";
70716
70717 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
70718 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
70719 ramdisk_execute_command = NULL;
70720 prepare_namespace();
70721 }
70722
70723 + grsecurity_init();
70724 +
70725 /*
70726 * Ok, we have completed the initial bootup, and
70727 * we're essentially up and running. Get rid of the
70728 diff --git a/init/noinitramfs.c b/init/noinitramfs.c
70729 index f4c1a3a..96c19bd 100644
70730 --- a/init/noinitramfs.c
70731 +++ b/init/noinitramfs.c
70732 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
70733 {
70734 int err;
70735
70736 - err = sys_mkdir("/dev", 0755);
70737 + err = sys_mkdir((const char __user *)"/dev", 0755);
70738 if (err < 0)
70739 goto out;
70740
70741 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
70742 if (err < 0)
70743 goto out;
70744
70745 - err = sys_mkdir("/root", 0700);
70746 + err = sys_mkdir((const char __user *)"/root", 0700);
70747 if (err < 0)
70748 goto out;
70749
70750 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
70751 index d01bc14..8df81db 100644
70752 --- a/ipc/mqueue.c
70753 +++ b/ipc/mqueue.c
70754 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
70755 mq_bytes = (mq_msg_tblsz +
70756 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
70757
70758 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
70759 spin_lock(&mq_lock);
70760 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
70761 u->mq_bytes + mq_bytes >
70762 diff --git a/ipc/msg.c b/ipc/msg.c
70763 index 779f762..4af9e36 100644
70764 --- a/ipc/msg.c
70765 +++ b/ipc/msg.c
70766 @@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
70767 return security_msg_queue_associate(msq, msgflg);
70768 }
70769
70770 +static struct ipc_ops msg_ops = {
70771 + .getnew = newque,
70772 + .associate = msg_security,
70773 + .more_checks = NULL
70774 +};
70775 +
70776 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
70777 {
70778 struct ipc_namespace *ns;
70779 - struct ipc_ops msg_ops;
70780 struct ipc_params msg_params;
70781
70782 ns = current->nsproxy->ipc_ns;
70783
70784 - msg_ops.getnew = newque;
70785 - msg_ops.associate = msg_security;
70786 - msg_ops.more_checks = NULL;
70787 -
70788 msg_params.key = key;
70789 msg_params.flg = msgflg;
70790
70791 diff --git a/ipc/sem.c b/ipc/sem.c
70792 index b781007..f738b04 100644
70793 --- a/ipc/sem.c
70794 +++ b/ipc/sem.c
70795 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
70796 return 0;
70797 }
70798
70799 +static struct ipc_ops sem_ops = {
70800 + .getnew = newary,
70801 + .associate = sem_security,
70802 + .more_checks = sem_more_checks
70803 +};
70804 +
70805 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70806 {
70807 struct ipc_namespace *ns;
70808 - struct ipc_ops sem_ops;
70809 struct ipc_params sem_params;
70810
70811 ns = current->nsproxy->ipc_ns;
70812 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70813 if (nsems < 0 || nsems > ns->sc_semmsl)
70814 return -EINVAL;
70815
70816 - sem_ops.getnew = newary;
70817 - sem_ops.associate = sem_security;
70818 - sem_ops.more_checks = sem_more_checks;
70819 -
70820 sem_params.key = key;
70821 sem_params.flg = semflg;
70822 sem_params.u.nsems = nsems;
70823 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
70824 ushort* sem_io = fast_sem_io;
70825 int nsems;
70826
70827 + pax_track_stack();
70828 +
70829 sma = sem_lock_check(ns, semid);
70830 if (IS_ERR(sma))
70831 return PTR_ERR(sma);
70832 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
70833 unsigned long jiffies_left = 0;
70834 struct ipc_namespace *ns;
70835
70836 + pax_track_stack();
70837 +
70838 ns = current->nsproxy->ipc_ns;
70839
70840 if (nsops < 1 || semid < 0)
70841 diff --git a/ipc/shm.c b/ipc/shm.c
70842 index d30732c..7379456 100644
70843 --- a/ipc/shm.c
70844 +++ b/ipc/shm.c
70845 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
70846 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70847 #endif
70848
70849 +#ifdef CONFIG_GRKERNSEC
70850 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70851 + const time_t shm_createtime, const uid_t cuid,
70852 + const int shmid);
70853 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70854 + const time_t shm_createtime);
70855 +#endif
70856 +
70857 void shm_init_ns(struct ipc_namespace *ns)
70858 {
70859 ns->shm_ctlmax = SHMMAX;
70860 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
70861 shp->shm_lprid = 0;
70862 shp->shm_atim = shp->shm_dtim = 0;
70863 shp->shm_ctim = get_seconds();
70864 +#ifdef CONFIG_GRKERNSEC
70865 + {
70866 + struct timespec timeval;
70867 + do_posix_clock_monotonic_gettime(&timeval);
70868 +
70869 + shp->shm_createtime = timeval.tv_sec;
70870 + }
70871 +#endif
70872 shp->shm_segsz = size;
70873 shp->shm_nattch = 0;
70874 shp->shm_file = file;
70875 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
70876 return 0;
70877 }
70878
70879 +static struct ipc_ops shm_ops = {
70880 + .getnew = newseg,
70881 + .associate = shm_security,
70882 + .more_checks = shm_more_checks
70883 +};
70884 +
70885 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
70886 {
70887 struct ipc_namespace *ns;
70888 - struct ipc_ops shm_ops;
70889 struct ipc_params shm_params;
70890
70891 ns = current->nsproxy->ipc_ns;
70892
70893 - shm_ops.getnew = newseg;
70894 - shm_ops.associate = shm_security;
70895 - shm_ops.more_checks = shm_more_checks;
70896 -
70897 shm_params.key = key;
70898 shm_params.flg = shmflg;
70899 shm_params.u.size = size;
70900 @@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
70901 if (err)
70902 goto out_unlock;
70903
70904 +#ifdef CONFIG_GRKERNSEC
70905 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
70906 + shp->shm_perm.cuid, shmid) ||
70907 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
70908 + err = -EACCES;
70909 + goto out_unlock;
70910 + }
70911 +#endif
70912 +
70913 path.dentry = dget(shp->shm_file->f_path.dentry);
70914 path.mnt = shp->shm_file->f_path.mnt;
70915 shp->shm_nattch++;
70916 +#ifdef CONFIG_GRKERNSEC
70917 + shp->shm_lapid = current->pid;
70918 +#endif
70919 size = i_size_read(path.dentry->d_inode);
70920 shm_unlock(shp);
70921
70922 diff --git a/kernel/acct.c b/kernel/acct.c
70923 index a6605ca..ca91111 100644
70924 --- a/kernel/acct.c
70925 +++ b/kernel/acct.c
70926 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
70927 */
70928 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
70929 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
70930 - file->f_op->write(file, (char *)&ac,
70931 + file->f_op->write(file, (char __force_user *)&ac,
70932 sizeof(acct_t), &file->f_pos);
70933 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
70934 set_fs(fs);
70935 diff --git a/kernel/audit.c b/kernel/audit.c
70936 index 5feed23..48415fd 100644
70937 --- a/kernel/audit.c
70938 +++ b/kernel/audit.c
70939 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
70940 3) suppressed due to audit_rate_limit
70941 4) suppressed due to audit_backlog_limit
70942 */
70943 -static atomic_t audit_lost = ATOMIC_INIT(0);
70944 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
70945
70946 /* The netlink socket. */
70947 static struct sock *audit_sock;
70948 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
70949 unsigned long now;
70950 int print;
70951
70952 - atomic_inc(&audit_lost);
70953 + atomic_inc_unchecked(&audit_lost);
70954
70955 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
70956
70957 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
70958 printk(KERN_WARNING
70959 "audit: audit_lost=%d audit_rate_limit=%d "
70960 "audit_backlog_limit=%d\n",
70961 - atomic_read(&audit_lost),
70962 + atomic_read_unchecked(&audit_lost),
70963 audit_rate_limit,
70964 audit_backlog_limit);
70965 audit_panic(message);
70966 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
70967 status_set.pid = audit_pid;
70968 status_set.rate_limit = audit_rate_limit;
70969 status_set.backlog_limit = audit_backlog_limit;
70970 - status_set.lost = atomic_read(&audit_lost);
70971 + status_set.lost = atomic_read_unchecked(&audit_lost);
70972 status_set.backlog = skb_queue_len(&audit_skb_queue);
70973 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
70974 &status_set, sizeof(status_set));
70975 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
70976 spin_unlock_irq(&tsk->sighand->siglock);
70977 }
70978 read_unlock(&tasklist_lock);
70979 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
70980 - &s, sizeof(s));
70981 +
70982 + if (!err)
70983 + audit_send_reply(NETLINK_CB(skb).pid, seq,
70984 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
70985 break;
70986 }
70987 case AUDIT_TTY_SET: {
70988 @@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
70989 avail = audit_expand(ab,
70990 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
70991 if (!avail)
70992 - goto out;
70993 + goto out_va_end;
70994 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
70995 }
70996 - va_end(args2);
70997 if (len > 0)
70998 skb_put(skb, len);
70999 +out_va_end:
71000 + va_end(args2);
71001 out:
71002 return;
71003 }
71004 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71005 index 267e484..ac41bc3 100644
71006 --- a/kernel/auditsc.c
71007 +++ b/kernel/auditsc.c
71008 @@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71009 struct audit_buffer **ab,
71010 struct audit_aux_data_execve *axi)
71011 {
71012 - int i;
71013 - size_t len, len_sent = 0;
71014 + int i, len;
71015 + size_t len_sent = 0;
71016 const char __user *p;
71017 char *buf;
71018
71019 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71020 }
71021
71022 /* global counter which is incremented every time something logs in */
71023 -static atomic_t session_id = ATOMIC_INIT(0);
71024 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71025
71026 /**
71027 * audit_set_loginuid - set a task's audit_context loginuid
71028 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
71029 */
71030 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
71031 {
71032 - unsigned int sessionid = atomic_inc_return(&session_id);
71033 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
71034 struct audit_context *context = task->audit_context;
71035
71036 if (context && context->in_syscall) {
71037 diff --git a/kernel/capability.c b/kernel/capability.c
71038 index 8a944f5..db5001e 100644
71039 --- a/kernel/capability.c
71040 +++ b/kernel/capability.c
71041 @@ -305,10 +305,26 @@ int capable(int cap)
71042 BUG();
71043 }
71044
71045 - if (security_capable(cap) == 0) {
71046 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
71047 current->flags |= PF_SUPERPRIV;
71048 return 1;
71049 }
71050 return 0;
71051 }
71052 +
71053 +int capable_nolog(int cap)
71054 +{
71055 + if (unlikely(!cap_valid(cap))) {
71056 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
71057 + BUG();
71058 + }
71059 +
71060 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
71061 + current->flags |= PF_SUPERPRIV;
71062 + return 1;
71063 + }
71064 + return 0;
71065 +}
71066 +
71067 EXPORT_SYMBOL(capable);
71068 +EXPORT_SYMBOL(capable_nolog);
71069 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
71070 index 1fbcc74..7000012 100644
71071 --- a/kernel/cgroup.c
71072 +++ b/kernel/cgroup.c
71073 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
71074 struct hlist_head *hhead;
71075 struct cg_cgroup_link *link;
71076
71077 + pax_track_stack();
71078 +
71079 /* First see if we already have a cgroup group that matches
71080 * the desired set */
71081 read_lock(&css_set_lock);
71082 diff --git a/kernel/compat.c b/kernel/compat.c
71083 index 8bc5578..186e44a 100644
71084 --- a/kernel/compat.c
71085 +++ b/kernel/compat.c
71086 @@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
71087 mm_segment_t oldfs;
71088 long ret;
71089
71090 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
71091 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
71092 oldfs = get_fs();
71093 set_fs(KERNEL_DS);
71094 ret = hrtimer_nanosleep_restart(restart);
71095 @@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
71096 oldfs = get_fs();
71097 set_fs(KERNEL_DS);
71098 ret = hrtimer_nanosleep(&tu,
71099 - rmtp ? (struct timespec __user *)&rmt : NULL,
71100 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
71101 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
71102 set_fs(oldfs);
71103
71104 @@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
71105 mm_segment_t old_fs = get_fs();
71106
71107 set_fs(KERNEL_DS);
71108 - ret = sys_sigpending((old_sigset_t __user *) &s);
71109 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
71110 set_fs(old_fs);
71111 if (ret == 0)
71112 ret = put_user(s, set);
71113 @@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
71114 old_fs = get_fs();
71115 set_fs(KERNEL_DS);
71116 ret = sys_sigprocmask(how,
71117 - set ? (old_sigset_t __user *) &s : NULL,
71118 - oset ? (old_sigset_t __user *) &s : NULL);
71119 + set ? (old_sigset_t __force_user *) &s : NULL,
71120 + oset ? (old_sigset_t __force_user *) &s : NULL);
71121 set_fs(old_fs);
71122 if (ret == 0)
71123 if (oset)
71124 @@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
71125 mm_segment_t old_fs = get_fs();
71126
71127 set_fs(KERNEL_DS);
71128 - ret = sys_old_getrlimit(resource, &r);
71129 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
71130 set_fs(old_fs);
71131
71132 if (!ret) {
71133 @@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
71134 mm_segment_t old_fs = get_fs();
71135
71136 set_fs(KERNEL_DS);
71137 - ret = sys_getrusage(who, (struct rusage __user *) &r);
71138 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
71139 set_fs(old_fs);
71140
71141 if (ret)
71142 @@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
71143 set_fs (KERNEL_DS);
71144 ret = sys_wait4(pid,
71145 (stat_addr ?
71146 - (unsigned int __user *) &status : NULL),
71147 - options, (struct rusage __user *) &r);
71148 + (unsigned int __force_user *) &status : NULL),
71149 + options, (struct rusage __force_user *) &r);
71150 set_fs (old_fs);
71151
71152 if (ret > 0) {
71153 @@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
71154 memset(&info, 0, sizeof(info));
71155
71156 set_fs(KERNEL_DS);
71157 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
71158 - uru ? (struct rusage __user *)&ru : NULL);
71159 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
71160 + uru ? (struct rusage __force_user *)&ru : NULL);
71161 set_fs(old_fs);
71162
71163 if ((ret < 0) || (info.si_signo == 0))
71164 @@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
71165 oldfs = get_fs();
71166 set_fs(KERNEL_DS);
71167 err = sys_timer_settime(timer_id, flags,
71168 - (struct itimerspec __user *) &newts,
71169 - (struct itimerspec __user *) &oldts);
71170 + (struct itimerspec __force_user *) &newts,
71171 + (struct itimerspec __force_user *) &oldts);
71172 set_fs(oldfs);
71173 if (!err && old && put_compat_itimerspec(old, &oldts))
71174 return -EFAULT;
71175 @@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
71176 oldfs = get_fs();
71177 set_fs(KERNEL_DS);
71178 err = sys_timer_gettime(timer_id,
71179 - (struct itimerspec __user *) &ts);
71180 + (struct itimerspec __force_user *) &ts);
71181 set_fs(oldfs);
71182 if (!err && put_compat_itimerspec(setting, &ts))
71183 return -EFAULT;
71184 @@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
71185 oldfs = get_fs();
71186 set_fs(KERNEL_DS);
71187 err = sys_clock_settime(which_clock,
71188 - (struct timespec __user *) &ts);
71189 + (struct timespec __force_user *) &ts);
71190 set_fs(oldfs);
71191 return err;
71192 }
71193 @@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
71194 oldfs = get_fs();
71195 set_fs(KERNEL_DS);
71196 err = sys_clock_gettime(which_clock,
71197 - (struct timespec __user *) &ts);
71198 + (struct timespec __force_user *) &ts);
71199 set_fs(oldfs);
71200 if (!err && put_compat_timespec(&ts, tp))
71201 return -EFAULT;
71202 @@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
71203 oldfs = get_fs();
71204 set_fs(KERNEL_DS);
71205 err = sys_clock_getres(which_clock,
71206 - (struct timespec __user *) &ts);
71207 + (struct timespec __force_user *) &ts);
71208 set_fs(oldfs);
71209 if (!err && tp && put_compat_timespec(&ts, tp))
71210 return -EFAULT;
71211 @@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
71212 long err;
71213 mm_segment_t oldfs;
71214 struct timespec tu;
71215 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
71216 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
71217
71218 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
71219 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
71220 oldfs = get_fs();
71221 set_fs(KERNEL_DS);
71222 err = clock_nanosleep_restart(restart);
71223 @@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
71224 oldfs = get_fs();
71225 set_fs(KERNEL_DS);
71226 err = sys_clock_nanosleep(which_clock, flags,
71227 - (struct timespec __user *) &in,
71228 - (struct timespec __user *) &out);
71229 + (struct timespec __force_user *) &in,
71230 + (struct timespec __force_user *) &out);
71231 set_fs(oldfs);
71232
71233 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
71234 diff --git a/kernel/configs.c b/kernel/configs.c
71235 index abaee68..047facd 100644
71236 --- a/kernel/configs.c
71237 +++ b/kernel/configs.c
71238 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
71239 struct proc_dir_entry *entry;
71240
71241 /* create the current config file */
71242 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
71243 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
71244 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
71245 + &ikconfig_file_ops);
71246 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71247 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
71248 + &ikconfig_file_ops);
71249 +#endif
71250 +#else
71251 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
71252 &ikconfig_file_ops);
71253 +#endif
71254 +
71255 if (!entry)
71256 return -ENOMEM;
71257
71258 diff --git a/kernel/cpu.c b/kernel/cpu.c
71259 index 3f2f04f..4e53ded 100644
71260 --- a/kernel/cpu.c
71261 +++ b/kernel/cpu.c
71262 @@ -20,7 +20,7 @@
71263 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
71264 static DEFINE_MUTEX(cpu_add_remove_lock);
71265
71266 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
71267 +static RAW_NOTIFIER_HEAD(cpu_chain);
71268
71269 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
71270 * Should always be manipulated under cpu_add_remove_lock
71271 diff --git a/kernel/cred.c b/kernel/cred.c
71272 index 0b5b5fc..419b86a 100644
71273 --- a/kernel/cred.c
71274 +++ b/kernel/cred.c
71275 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
71276 */
71277 void __put_cred(struct cred *cred)
71278 {
71279 + pax_track_stack();
71280 +
71281 kdebug("__put_cred(%p{%d,%d})", cred,
71282 atomic_read(&cred->usage),
71283 read_cred_subscribers(cred));
71284 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
71285 {
71286 struct cred *cred;
71287
71288 + pax_track_stack();
71289 +
71290 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
71291 atomic_read(&tsk->cred->usage),
71292 read_cred_subscribers(tsk->cred));
71293 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct task_struct *task)
71294 {
71295 const struct cred *cred;
71296
71297 + pax_track_stack();
71298 +
71299 rcu_read_lock();
71300
71301 do {
71302 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
71303 {
71304 struct cred *new;
71305
71306 + pax_track_stack();
71307 +
71308 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
71309 if (!new)
71310 return NULL;
71311 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
71312 const struct cred *old;
71313 struct cred *new;
71314
71315 + pax_track_stack();
71316 +
71317 validate_process_creds();
71318
71319 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71320 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
71321 struct thread_group_cred *tgcred = NULL;
71322 struct cred *new;
71323
71324 + pax_track_stack();
71325 +
71326 #ifdef CONFIG_KEYS
71327 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
71328 if (!tgcred)
71329 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
71330 struct cred *new;
71331 int ret;
71332
71333 + pax_track_stack();
71334 +
71335 mutex_init(&p->cred_guard_mutex);
71336
71337 if (
71338 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
71339 struct task_struct *task = current;
71340 const struct cred *old = task->real_cred;
71341
71342 + pax_track_stack();
71343 +
71344 kdebug("commit_creds(%p{%d,%d})", new,
71345 atomic_read(&new->usage),
71346 read_cred_subscribers(new));
71347 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
71348
71349 get_cred(new); /* we will require a ref for the subj creds too */
71350
71351 + gr_set_role_label(task, new->uid, new->gid);
71352 +
71353 /* dumpability changes */
71354 if (old->euid != new->euid ||
71355 old->egid != new->egid ||
71356 @@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
71357 key_fsgid_changed(task);
71358
71359 /* do it
71360 - * - What if a process setreuid()'s and this brings the
71361 - * new uid over his NPROC rlimit? We can check this now
71362 - * cheaply with the new uid cache, so if it matters
71363 - * we should be checking for it. -DaveM
71364 + * RLIMIT_NPROC limits on user->processes have already been checked
71365 + * in set_user().
71366 */
71367 alter_cred_subscribers(new, 2);
71368 if (new->user != old->user)
71369 @@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
71370 */
71371 void abort_creds(struct cred *new)
71372 {
71373 + pax_track_stack();
71374 +
71375 kdebug("abort_creds(%p{%d,%d})", new,
71376 atomic_read(&new->usage),
71377 read_cred_subscribers(new));
71378 @@ -629,6 +647,8 @@ const struct cred *override_creds(const struct cred *new)
71379 {
71380 const struct cred *old = current->cred;
71381
71382 + pax_track_stack();
71383 +
71384 kdebug("override_creds(%p{%d,%d})", new,
71385 atomic_read(&new->usage),
71386 read_cred_subscribers(new));
71387 @@ -658,6 +678,8 @@ void revert_creds(const struct cred *old)
71388 {
71389 const struct cred *override = current->cred;
71390
71391 + pax_track_stack();
71392 +
71393 kdebug("revert_creds(%p{%d,%d})", old,
71394 atomic_read(&old->usage),
71395 read_cred_subscribers(old));
71396 @@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
71397 const struct cred *old;
71398 struct cred *new;
71399
71400 + pax_track_stack();
71401 +
71402 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71403 if (!new)
71404 return NULL;
71405 @@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
71406 */
71407 int set_security_override(struct cred *new, u32 secid)
71408 {
71409 + pax_track_stack();
71410 +
71411 return security_kernel_act_as(new, secid);
71412 }
71413 EXPORT_SYMBOL(set_security_override);
71414 @@ -777,6 +803,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
71415 u32 secid;
71416 int ret;
71417
71418 + pax_track_stack();
71419 +
71420 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
71421 if (ret < 0)
71422 return ret;
71423 diff --git a/kernel/exit.c b/kernel/exit.c
71424 index 0f8fae3..9344a56 100644
71425 --- a/kernel/exit.c
71426 +++ b/kernel/exit.c
71427 @@ -55,6 +55,10 @@
71428 #include <asm/pgtable.h>
71429 #include <asm/mmu_context.h>
71430
71431 +#ifdef CONFIG_GRKERNSEC
71432 +extern rwlock_t grsec_exec_file_lock;
71433 +#endif
71434 +
71435 static void exit_mm(struct task_struct * tsk);
71436
71437 static void __unhash_process(struct task_struct *p)
71438 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
71439 struct task_struct *leader;
71440 int zap_leader;
71441 repeat:
71442 +#ifdef CONFIG_NET
71443 + gr_del_task_from_ip_table(p);
71444 +#endif
71445 +
71446 tracehook_prepare_release_task(p);
71447 /* don't need to get the RCU readlock here - the process is dead and
71448 * can't be modifying its own credentials */
71449 @@ -397,7 +405,7 @@ int allow_signal(int sig)
71450 * know it'll be handled, so that they don't get converted to
71451 * SIGKILL or just silently dropped.
71452 */
71453 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
71454 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
71455 recalc_sigpending();
71456 spin_unlock_irq(&current->sighand->siglock);
71457 return 0;
71458 @@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
71459 vsnprintf(current->comm, sizeof(current->comm), name, args);
71460 va_end(args);
71461
71462 +#ifdef CONFIG_GRKERNSEC
71463 + write_lock(&grsec_exec_file_lock);
71464 + if (current->exec_file) {
71465 + fput(current->exec_file);
71466 + current->exec_file = NULL;
71467 + }
71468 + write_unlock(&grsec_exec_file_lock);
71469 +#endif
71470 +
71471 + gr_set_kernel_label(current);
71472 +
71473 /*
71474 * If we were started as result of loading a module, close all of the
71475 * user space pages. We don't need them, and if we didn't close them
71476 @@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
71477 struct task_struct *tsk = current;
71478 int group_dead;
71479
71480 - profile_task_exit(tsk);
71481 -
71482 - WARN_ON(atomic_read(&tsk->fs_excl));
71483 -
71484 + /*
71485 + * Check this first since set_fs() below depends on
71486 + * current_thread_info(), which we better not access when we're in
71487 + * interrupt context. Other than that, we want to do the set_fs()
71488 + * as early as possible.
71489 + */
71490 if (unlikely(in_interrupt()))
71491 panic("Aiee, killing interrupt handler!");
71492 - if (unlikely(!tsk->pid))
71493 - panic("Attempted to kill the idle task!");
71494
71495 /*
71496 - * If do_exit is called because this processes oopsed, it's possible
71497 + * If do_exit is called because this processes Oops'ed, it's possible
71498 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
71499 * continuing. Amongst other possible reasons, this is to prevent
71500 * mm_release()->clear_child_tid() from writing to a user-controlled
71501 @@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
71502 */
71503 set_fs(USER_DS);
71504
71505 + profile_task_exit(tsk);
71506 +
71507 + WARN_ON(atomic_read(&tsk->fs_excl));
71508 +
71509 + if (unlikely(!tsk->pid))
71510 + panic("Attempted to kill the idle task!");
71511 +
71512 tracehook_report_exit(&code);
71513
71514 validate_creds_for_do_exit(tsk);
71515 @@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
71516 tsk->exit_code = code;
71517 taskstats_exit(tsk, group_dead);
71518
71519 + gr_acl_handle_psacct(tsk, code);
71520 + gr_acl_handle_exit();
71521 +
71522 exit_mm(tsk);
71523
71524 if (group_dead)
71525 @@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
71526
71527 if (unlikely(wo->wo_flags & WNOWAIT)) {
71528 int exit_code = p->exit_code;
71529 - int why, status;
71530 + int why;
71531
71532 get_task_struct(p);
71533 read_unlock(&tasklist_lock);
71534 diff --git a/kernel/fork.c b/kernel/fork.c
71535 index 4bde56f..29a9bab 100644
71536 --- a/kernel/fork.c
71537 +++ b/kernel/fork.c
71538 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
71539 *stackend = STACK_END_MAGIC; /* for overflow detection */
71540
71541 #ifdef CONFIG_CC_STACKPROTECTOR
71542 - tsk->stack_canary = get_random_int();
71543 + tsk->stack_canary = pax_get_random_long();
71544 #endif
71545
71546 /* One for us, one for whoever does the "release_task()" (usually parent) */
71547 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71548 mm->locked_vm = 0;
71549 mm->mmap = NULL;
71550 mm->mmap_cache = NULL;
71551 - mm->free_area_cache = oldmm->mmap_base;
71552 - mm->cached_hole_size = ~0UL;
71553 + mm->free_area_cache = oldmm->free_area_cache;
71554 + mm->cached_hole_size = oldmm->cached_hole_size;
71555 mm->map_count = 0;
71556 cpumask_clear(mm_cpumask(mm));
71557 mm->mm_rb = RB_ROOT;
71558 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71559 tmp->vm_flags &= ~VM_LOCKED;
71560 tmp->vm_mm = mm;
71561 tmp->vm_next = tmp->vm_prev = NULL;
71562 + tmp->vm_mirror = NULL;
71563 anon_vma_link(tmp);
71564 file = tmp->vm_file;
71565 if (file) {
71566 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71567 if (retval)
71568 goto out;
71569 }
71570 +
71571 +#ifdef CONFIG_PAX_SEGMEXEC
71572 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
71573 + struct vm_area_struct *mpnt_m;
71574 +
71575 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
71576 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
71577 +
71578 + if (!mpnt->vm_mirror)
71579 + continue;
71580 +
71581 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
71582 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
71583 + mpnt->vm_mirror = mpnt_m;
71584 + } else {
71585 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
71586 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
71587 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
71588 + mpnt->vm_mirror->vm_mirror = mpnt;
71589 + }
71590 + }
71591 + BUG_ON(mpnt_m);
71592 + }
71593 +#endif
71594 +
71595 /* a new mm has just been created */
71596 arch_dup_mmap(oldmm, mm);
71597 retval = 0;
71598 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
71599 write_unlock(&fs->lock);
71600 return -EAGAIN;
71601 }
71602 - fs->users++;
71603 + atomic_inc(&fs->users);
71604 write_unlock(&fs->lock);
71605 return 0;
71606 }
71607 tsk->fs = copy_fs_struct(fs);
71608 if (!tsk->fs)
71609 return -ENOMEM;
71610 + gr_set_chroot_entries(tsk, &tsk->fs->root);
71611 return 0;
71612 }
71613
71614 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
71615 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
71616 #endif
71617 retval = -EAGAIN;
71618 +
71619 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
71620 +
71621 if (atomic_read(&p->real_cred->user->processes) >=
71622 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
71623 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
71624 - p->real_cred->user != INIT_USER)
71625 + if (p->real_cred->user != INIT_USER &&
71626 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
71627 goto bad_fork_free;
71628 }
71629 + current->flags &= ~PF_NPROC_EXCEEDED;
71630
71631 retval = copy_creds(p, clone_flags);
71632 if (retval < 0)
71633 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
71634 goto bad_fork_free_pid;
71635 }
71636
71637 + gr_copy_label(p);
71638 +
71639 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
71640 /*
71641 * Clear TID on mm_release()?
71642 @@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
71643 bad_fork_free:
71644 free_task(p);
71645 fork_out:
71646 + gr_log_forkfail(retval);
71647 +
71648 return ERR_PTR(retval);
71649 }
71650
71651 @@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
71652 if (clone_flags & CLONE_PARENT_SETTID)
71653 put_user(nr, parent_tidptr);
71654
71655 + gr_handle_brute_check();
71656 +
71657 if (clone_flags & CLONE_VFORK) {
71658 p->vfork_done = &vfork;
71659 init_completion(&vfork);
71660 @@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
71661 return 0;
71662
71663 /* don't need lock here; in the worst case we'll do useless copy */
71664 - if (fs->users == 1)
71665 + if (atomic_read(&fs->users) == 1)
71666 return 0;
71667
71668 *new_fsp = copy_fs_struct(fs);
71669 @@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
71670 fs = current->fs;
71671 write_lock(&fs->lock);
71672 current->fs = new_fs;
71673 - if (--fs->users)
71674 + gr_set_chroot_entries(current, &current->fs->root);
71675 + if (atomic_dec_return(&fs->users))
71676 new_fs = NULL;
71677 else
71678 new_fs = fs;
71679 diff --git a/kernel/futex.c b/kernel/futex.c
71680 index fb98c9f..333faec 100644
71681 --- a/kernel/futex.c
71682 +++ b/kernel/futex.c
71683 @@ -54,6 +54,7 @@
71684 #include <linux/mount.h>
71685 #include <linux/pagemap.h>
71686 #include <linux/syscalls.h>
71687 +#include <linux/ptrace.h>
71688 #include <linux/signal.h>
71689 #include <linux/module.h>
71690 #include <linux/magic.h>
71691 @@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
71692 struct page *page;
71693 int err, ro = 0;
71694
71695 +#ifdef CONFIG_PAX_SEGMEXEC
71696 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
71697 + return -EFAULT;
71698 +#endif
71699 +
71700 /*
71701 * The futex address must be "naturally" aligned.
71702 */
71703 @@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
71704 struct futex_q q;
71705 int ret;
71706
71707 + pax_track_stack();
71708 +
71709 if (!bitset)
71710 return -EINVAL;
71711
71712 @@ -1871,7 +1879,7 @@ retry:
71713
71714 restart = &current_thread_info()->restart_block;
71715 restart->fn = futex_wait_restart;
71716 - restart->futex.uaddr = (u32 *)uaddr;
71717 + restart->futex.uaddr = uaddr;
71718 restart->futex.val = val;
71719 restart->futex.time = abs_time->tv64;
71720 restart->futex.bitset = bitset;
71721 @@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
71722 struct futex_q q;
71723 int res, ret;
71724
71725 + pax_track_stack();
71726 +
71727 if (!bitset)
71728 return -EINVAL;
71729
71730 @@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
71731 if (!p)
71732 goto err_unlock;
71733 ret = -EPERM;
71734 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71735 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
71736 + goto err_unlock;
71737 +#endif
71738 pcred = __task_cred(p);
71739 if (cred->euid != pcred->euid &&
71740 cred->euid != pcred->uid &&
71741 @@ -2489,7 +2503,7 @@ retry:
71742 */
71743 static inline int fetch_robust_entry(struct robust_list __user **entry,
71744 struct robust_list __user * __user *head,
71745 - int *pi)
71746 + unsigned int *pi)
71747 {
71748 unsigned long uentry;
71749
71750 @@ -2670,6 +2684,7 @@ static int __init futex_init(void)
71751 {
71752 u32 curval;
71753 int i;
71754 + mm_segment_t oldfs;
71755
71756 /*
71757 * This will fail and we want it. Some arch implementations do
71758 @@ -2681,7 +2696,10 @@ static int __init futex_init(void)
71759 * implementation, the non functional ones will return
71760 * -ENOSYS.
71761 */
71762 + oldfs = get_fs();
71763 + set_fs(USER_DS);
71764 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
71765 + set_fs(oldfs);
71766 if (curval == -EFAULT)
71767 futex_cmpxchg_enabled = 1;
71768
71769 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
71770 index 2357165..eb25501 100644
71771 --- a/kernel/futex_compat.c
71772 +++ b/kernel/futex_compat.c
71773 @@ -10,6 +10,7 @@
71774 #include <linux/compat.h>
71775 #include <linux/nsproxy.h>
71776 #include <linux/futex.h>
71777 +#include <linux/ptrace.h>
71778
71779 #include <asm/uaccess.h>
71780
71781 @@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
71782 {
71783 struct compat_robust_list_head __user *head;
71784 unsigned long ret;
71785 - const struct cred *cred = current_cred(), *pcred;
71786 + const struct cred *cred = current_cred();
71787 + const struct cred *pcred;
71788
71789 if (!futex_cmpxchg_enabled)
71790 return -ENOSYS;
71791 @@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
71792 if (!p)
71793 goto err_unlock;
71794 ret = -EPERM;
71795 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71796 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
71797 + goto err_unlock;
71798 +#endif
71799 pcred = __task_cred(p);
71800 if (cred->euid != pcred->euid &&
71801 cred->euid != pcred->uid &&
71802 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
71803 index 9b22d03..6295b62 100644
71804 --- a/kernel/gcov/base.c
71805 +++ b/kernel/gcov/base.c
71806 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
71807 }
71808
71809 #ifdef CONFIG_MODULES
71810 -static inline int within(void *addr, void *start, unsigned long size)
71811 -{
71812 - return ((addr >= start) && (addr < start + size));
71813 -}
71814 -
71815 /* Update list and generate events when modules are unloaded. */
71816 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
71817 void *data)
71818 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
71819 prev = NULL;
71820 /* Remove entries located in module from linked list. */
71821 for (info = gcov_info_head; info; info = info->next) {
71822 - if (within(info, mod->module_core, mod->core_size)) {
71823 + if (within_module_core_rw((unsigned long)info, mod)) {
71824 if (prev)
71825 prev->next = info->next;
71826 else
71827 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
71828 index a6e9d00..a0da4f9 100644
71829 --- a/kernel/hrtimer.c
71830 +++ b/kernel/hrtimer.c
71831 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
71832 local_irq_restore(flags);
71833 }
71834
71835 -static void run_hrtimer_softirq(struct softirq_action *h)
71836 +static void run_hrtimer_softirq(void)
71837 {
71838 hrtimer_peek_ahead_timers();
71839 }
71840 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
71841 index 8b6b8b6..6bc87df 100644
71842 --- a/kernel/kallsyms.c
71843 +++ b/kernel/kallsyms.c
71844 @@ -11,6 +11,9 @@
71845 * Changed the compression method from stem compression to "table lookup"
71846 * compression (see scripts/kallsyms.c for a more complete description)
71847 */
71848 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71849 +#define __INCLUDED_BY_HIDESYM 1
71850 +#endif
71851 #include <linux/kallsyms.h>
71852 #include <linux/module.h>
71853 #include <linux/init.h>
71854 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
71855
71856 static inline int is_kernel_inittext(unsigned long addr)
71857 {
71858 + if (system_state != SYSTEM_BOOTING)
71859 + return 0;
71860 +
71861 if (addr >= (unsigned long)_sinittext
71862 && addr <= (unsigned long)_einittext)
71863 return 1;
71864 return 0;
71865 }
71866
71867 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71868 +#ifdef CONFIG_MODULES
71869 +static inline int is_module_text(unsigned long addr)
71870 +{
71871 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
71872 + return 1;
71873 +
71874 + addr = ktla_ktva(addr);
71875 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
71876 +}
71877 +#else
71878 +static inline int is_module_text(unsigned long addr)
71879 +{
71880 + return 0;
71881 +}
71882 +#endif
71883 +#endif
71884 +
71885 static inline int is_kernel_text(unsigned long addr)
71886 {
71887 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
71888 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
71889
71890 static inline int is_kernel(unsigned long addr)
71891 {
71892 +
71893 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71894 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
71895 + return 1;
71896 +
71897 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
71898 +#else
71899 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
71900 +#endif
71901 +
71902 return 1;
71903 return in_gate_area_no_task(addr);
71904 }
71905
71906 static int is_ksym_addr(unsigned long addr)
71907 {
71908 +
71909 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71910 + if (is_module_text(addr))
71911 + return 0;
71912 +#endif
71913 +
71914 if (all_var)
71915 return is_kernel(addr);
71916
71917 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
71918
71919 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
71920 {
71921 - iter->name[0] = '\0';
71922 iter->nameoff = get_symbol_offset(new_pos);
71923 iter->pos = new_pos;
71924 }
71925 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
71926 {
71927 struct kallsym_iter *iter = m->private;
71928
71929 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71930 + if (current_uid())
71931 + return 0;
71932 +#endif
71933 +
71934 /* Some debugging symbols have no name. Ignore them. */
71935 if (!iter->name[0])
71936 return 0;
71937 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
71938 struct kallsym_iter *iter;
71939 int ret;
71940
71941 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
71942 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
71943 if (!iter)
71944 return -ENOMEM;
71945 reset_iter(iter, 0);
71946 diff --git a/kernel/kexec.c b/kernel/kexec.c
71947 index f336e21..9c1c20b 100644
71948 --- a/kernel/kexec.c
71949 +++ b/kernel/kexec.c
71950 @@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
71951 unsigned long flags)
71952 {
71953 struct compat_kexec_segment in;
71954 - struct kexec_segment out, __user *ksegments;
71955 + struct kexec_segment out;
71956 + struct kexec_segment __user *ksegments;
71957 unsigned long i, result;
71958
71959 /* Don't allow clients that don't understand the native
71960 diff --git a/kernel/kgdb.c b/kernel/kgdb.c
71961 index 53dae4b..9ba3743 100644
71962 --- a/kernel/kgdb.c
71963 +++ b/kernel/kgdb.c
71964 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
71965 /* Guard for recursive entry */
71966 static int exception_level;
71967
71968 -static struct kgdb_io *kgdb_io_ops;
71969 +static const struct kgdb_io *kgdb_io_ops;
71970 static DEFINE_SPINLOCK(kgdb_registration_lock);
71971
71972 /* kgdb console driver is loaded */
71973 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
71974 */
71975 static atomic_t passive_cpu_wait[NR_CPUS];
71976 static atomic_t cpu_in_kgdb[NR_CPUS];
71977 -atomic_t kgdb_setting_breakpoint;
71978 +atomic_unchecked_t kgdb_setting_breakpoint;
71979
71980 struct task_struct *kgdb_usethread;
71981 struct task_struct *kgdb_contthread;
71982 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
71983 sizeof(unsigned long)];
71984
71985 /* to keep track of the CPU which is doing the single stepping*/
71986 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
71987 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
71988
71989 /*
71990 * If you are debugging a problem where roundup (the collection of
71991 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
71992 return 0;
71993 if (kgdb_connected)
71994 return 1;
71995 - if (atomic_read(&kgdb_setting_breakpoint))
71996 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
71997 return 1;
71998 if (print_wait)
71999 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
72000 @@ -1426,8 +1426,8 @@ acquirelock:
72001 * instance of the exception handler wanted to come into the
72002 * debugger on a different CPU via a single step
72003 */
72004 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
72005 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
72006 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
72007 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
72008
72009 atomic_set(&kgdb_active, -1);
72010 touch_softlockup_watchdog();
72011 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
72012 *
72013 * Register it with the KGDB core.
72014 */
72015 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
72016 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
72017 {
72018 int err;
72019
72020 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
72021 *
72022 * Unregister it with the KGDB core.
72023 */
72024 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
72025 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
72026 {
72027 BUG_ON(kgdb_connected);
72028
72029 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
72030 */
72031 void kgdb_breakpoint(void)
72032 {
72033 - atomic_set(&kgdb_setting_breakpoint, 1);
72034 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
72035 wmb(); /* Sync point before breakpoint */
72036 arch_kgdb_breakpoint();
72037 wmb(); /* Sync point after breakpoint */
72038 - atomic_set(&kgdb_setting_breakpoint, 0);
72039 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
72040 }
72041 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
72042
72043 diff --git a/kernel/kmod.c b/kernel/kmod.c
72044 index d206078..e27ba6a 100644
72045 --- a/kernel/kmod.c
72046 +++ b/kernel/kmod.c
72047 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
72048 * If module auto-loading support is disabled then this function
72049 * becomes a no-operation.
72050 */
72051 -int __request_module(bool wait, const char *fmt, ...)
72052 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
72053 {
72054 - va_list args;
72055 char module_name[MODULE_NAME_LEN];
72056 unsigned int max_modprobes;
72057 int ret;
72058 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
72059 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
72060 static char *envp[] = { "HOME=/",
72061 "TERM=linux",
72062 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
72063 @@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
72064 if (ret)
72065 return ret;
72066
72067 - va_start(args, fmt);
72068 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
72069 - va_end(args);
72070 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
72071 if (ret >= MODULE_NAME_LEN)
72072 return -ENAMETOOLONG;
72073
72074 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72075 + if (!current_uid()) {
72076 + /* hack to workaround consolekit/udisks stupidity */
72077 + read_lock(&tasklist_lock);
72078 + if (!strcmp(current->comm, "mount") &&
72079 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
72080 + read_unlock(&tasklist_lock);
72081 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
72082 + return -EPERM;
72083 + }
72084 + read_unlock(&tasklist_lock);
72085 + }
72086 +#endif
72087 +
72088 /* If modprobe needs a service that is in a module, we get a recursive
72089 * loop. Limit the number of running kmod threads to max_threads/2 or
72090 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
72091 @@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
72092 atomic_dec(&kmod_concurrent);
72093 return ret;
72094 }
72095 +
72096 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
72097 +{
72098 + va_list args;
72099 + int ret;
72100 +
72101 + va_start(args, fmt);
72102 + ret = ____request_module(wait, module_param, fmt, args);
72103 + va_end(args);
72104 +
72105 + return ret;
72106 +}
72107 +
72108 +int __request_module(bool wait, const char *fmt, ...)
72109 +{
72110 + va_list args;
72111 + int ret;
72112 +
72113 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72114 + if (current_uid()) {
72115 + char module_param[MODULE_NAME_LEN];
72116 +
72117 + memset(module_param, 0, sizeof(module_param));
72118 +
72119 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
72120 +
72121 + va_start(args, fmt);
72122 + ret = ____request_module(wait, module_param, fmt, args);
72123 + va_end(args);
72124 +
72125 + return ret;
72126 + }
72127 +#endif
72128 +
72129 + va_start(args, fmt);
72130 + ret = ____request_module(wait, NULL, fmt, args);
72131 + va_end(args);
72132 +
72133 + return ret;
72134 +}
72135 +
72136 +
72137 EXPORT_SYMBOL(__request_module);
72138 #endif /* CONFIG_MODULES */
72139
72140 @@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
72141 *
72142 * Thus the __user pointer cast is valid here.
72143 */
72144 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
72145 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
72146
72147 /*
72148 * If ret is 0, either ____call_usermodehelper failed and the
72149 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
72150 index 5240d75..5a6fb33 100644
72151 --- a/kernel/kprobes.c
72152 +++ b/kernel/kprobes.c
72153 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
72154 * kernel image and loaded module images reside. This is required
72155 * so x86_64 can correctly handle the %rip-relative fixups.
72156 */
72157 - kip->insns = module_alloc(PAGE_SIZE);
72158 + kip->insns = module_alloc_exec(PAGE_SIZE);
72159 if (!kip->insns) {
72160 kfree(kip);
72161 return NULL;
72162 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
72163 */
72164 if (!list_is_singular(&kprobe_insn_pages)) {
72165 list_del(&kip->list);
72166 - module_free(NULL, kip->insns);
72167 + module_free_exec(NULL, kip->insns);
72168 kfree(kip);
72169 }
72170 return 1;
72171 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
72172 {
72173 int i, err = 0;
72174 unsigned long offset = 0, size = 0;
72175 - char *modname, namebuf[128];
72176 + char *modname, namebuf[KSYM_NAME_LEN];
72177 const char *symbol_name;
72178 void *addr;
72179 struct kprobe_blackpoint *kb;
72180 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
72181 const char *sym = NULL;
72182 unsigned int i = *(loff_t *) v;
72183 unsigned long offset = 0;
72184 - char *modname, namebuf[128];
72185 + char *modname, namebuf[KSYM_NAME_LEN];
72186
72187 head = &kprobe_table[i];
72188 preempt_disable();
72189 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
72190 index d86fe89..d12fc66 100644
72191 --- a/kernel/lockdep.c
72192 +++ b/kernel/lockdep.c
72193 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
72194 /*
72195 * Various lockdep statistics:
72196 */
72197 -atomic_t chain_lookup_hits;
72198 -atomic_t chain_lookup_misses;
72199 -atomic_t hardirqs_on_events;
72200 -atomic_t hardirqs_off_events;
72201 -atomic_t redundant_hardirqs_on;
72202 -atomic_t redundant_hardirqs_off;
72203 -atomic_t softirqs_on_events;
72204 -atomic_t softirqs_off_events;
72205 -atomic_t redundant_softirqs_on;
72206 -atomic_t redundant_softirqs_off;
72207 -atomic_t nr_unused_locks;
72208 -atomic_t nr_cyclic_checks;
72209 -atomic_t nr_find_usage_forwards_checks;
72210 -atomic_t nr_find_usage_backwards_checks;
72211 +atomic_unchecked_t chain_lookup_hits;
72212 +atomic_unchecked_t chain_lookup_misses;
72213 +atomic_unchecked_t hardirqs_on_events;
72214 +atomic_unchecked_t hardirqs_off_events;
72215 +atomic_unchecked_t redundant_hardirqs_on;
72216 +atomic_unchecked_t redundant_hardirqs_off;
72217 +atomic_unchecked_t softirqs_on_events;
72218 +atomic_unchecked_t softirqs_off_events;
72219 +atomic_unchecked_t redundant_softirqs_on;
72220 +atomic_unchecked_t redundant_softirqs_off;
72221 +atomic_unchecked_t nr_unused_locks;
72222 +atomic_unchecked_t nr_cyclic_checks;
72223 +atomic_unchecked_t nr_find_usage_forwards_checks;
72224 +atomic_unchecked_t nr_find_usage_backwards_checks;
72225 #endif
72226
72227 /*
72228 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
72229 int i;
72230 #endif
72231
72232 +#ifdef CONFIG_PAX_KERNEXEC
72233 + start = ktla_ktva(start);
72234 +#endif
72235 +
72236 /*
72237 * static variable?
72238 */
72239 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
72240 */
72241 for_each_possible_cpu(i) {
72242 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
72243 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
72244 - + per_cpu_offset(i);
72245 + end = start + PERCPU_ENOUGH_ROOM;
72246
72247 if ((addr >= start) && (addr < end))
72248 return 1;
72249 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
72250 if (!static_obj(lock->key)) {
72251 debug_locks_off();
72252 printk("INFO: trying to register non-static key.\n");
72253 + printk("lock:%pS key:%pS.\n", lock, lock->key);
72254 printk("the code is fine but needs lockdep annotation.\n");
72255 printk("turning off the locking correctness validator.\n");
72256 dump_stack();
72257 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
72258 if (!class)
72259 return 0;
72260 }
72261 - debug_atomic_inc((atomic_t *)&class->ops);
72262 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
72263 if (very_verbose(class)) {
72264 printk("\nacquire class [%p] %s", class->key, class->name);
72265 if (class->name_version > 1)
72266 diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
72267 index a2ee95a..092f0f2 100644
72268 --- a/kernel/lockdep_internals.h
72269 +++ b/kernel/lockdep_internals.h
72270 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
72271 /*
72272 * Various lockdep statistics:
72273 */
72274 -extern atomic_t chain_lookup_hits;
72275 -extern atomic_t chain_lookup_misses;
72276 -extern atomic_t hardirqs_on_events;
72277 -extern atomic_t hardirqs_off_events;
72278 -extern atomic_t redundant_hardirqs_on;
72279 -extern atomic_t redundant_hardirqs_off;
72280 -extern atomic_t softirqs_on_events;
72281 -extern atomic_t softirqs_off_events;
72282 -extern atomic_t redundant_softirqs_on;
72283 -extern atomic_t redundant_softirqs_off;
72284 -extern atomic_t nr_unused_locks;
72285 -extern atomic_t nr_cyclic_checks;
72286 -extern atomic_t nr_cyclic_check_recursions;
72287 -extern atomic_t nr_find_usage_forwards_checks;
72288 -extern atomic_t nr_find_usage_forwards_recursions;
72289 -extern atomic_t nr_find_usage_backwards_checks;
72290 -extern atomic_t nr_find_usage_backwards_recursions;
72291 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
72292 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
72293 -# define debug_atomic_read(ptr) atomic_read(ptr)
72294 +extern atomic_unchecked_t chain_lookup_hits;
72295 +extern atomic_unchecked_t chain_lookup_misses;
72296 +extern atomic_unchecked_t hardirqs_on_events;
72297 +extern atomic_unchecked_t hardirqs_off_events;
72298 +extern atomic_unchecked_t redundant_hardirqs_on;
72299 +extern atomic_unchecked_t redundant_hardirqs_off;
72300 +extern atomic_unchecked_t softirqs_on_events;
72301 +extern atomic_unchecked_t softirqs_off_events;
72302 +extern atomic_unchecked_t redundant_softirqs_on;
72303 +extern atomic_unchecked_t redundant_softirqs_off;
72304 +extern atomic_unchecked_t nr_unused_locks;
72305 +extern atomic_unchecked_t nr_cyclic_checks;
72306 +extern atomic_unchecked_t nr_cyclic_check_recursions;
72307 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
72308 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
72309 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
72310 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
72311 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
72312 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
72313 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
72314 #else
72315 # define debug_atomic_inc(ptr) do { } while (0)
72316 # define debug_atomic_dec(ptr) do { } while (0)
72317 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
72318 index d4aba4f..02a353f 100644
72319 --- a/kernel/lockdep_proc.c
72320 +++ b/kernel/lockdep_proc.c
72321 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
72322
72323 static void print_name(struct seq_file *m, struct lock_class *class)
72324 {
72325 - char str[128];
72326 + char str[KSYM_NAME_LEN];
72327 const char *name = class->name;
72328
72329 if (!name) {
72330 diff --git a/kernel/module.c b/kernel/module.c
72331 index 4b270e6..2226274 100644
72332 --- a/kernel/module.c
72333 +++ b/kernel/module.c
72334 @@ -55,6 +55,7 @@
72335 #include <linux/async.h>
72336 #include <linux/percpu.h>
72337 #include <linux/kmemleak.h>
72338 +#include <linux/grsecurity.h>
72339
72340 #define CREATE_TRACE_POINTS
72341 #include <trace/events/module.h>
72342 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
72343 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
72344
72345 /* Bounds of module allocation, for speeding __module_address */
72346 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
72347 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
72348 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
72349
72350 int register_module_notifier(struct notifier_block * nb)
72351 {
72352 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72353 return true;
72354
72355 list_for_each_entry_rcu(mod, &modules, list) {
72356 - struct symsearch arr[] = {
72357 + struct symsearch modarr[] = {
72358 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
72359 NOT_GPL_ONLY, false },
72360 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
72361 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72362 #endif
72363 };
72364
72365 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
72366 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
72367 return true;
72368 }
72369 return false;
72370 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
72371 void *ptr;
72372 int cpu;
72373
72374 - if (align > PAGE_SIZE) {
72375 + if (align-1 >= PAGE_SIZE) {
72376 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
72377 name, align, PAGE_SIZE);
72378 align = PAGE_SIZE;
72379 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
72380 * /sys/module/foo/sections stuff
72381 * J. Corbet <corbet@lwn.net>
72382 */
72383 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
72384 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72385
72386 static inline bool sect_empty(const Elf_Shdr *sect)
72387 {
72388 @@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
72389 destroy_params(mod->kp, mod->num_kp);
72390
72391 /* This may be NULL, but that's OK */
72392 - module_free(mod, mod->module_init);
72393 + module_free(mod, mod->module_init_rw);
72394 + module_free_exec(mod, mod->module_init_rx);
72395 kfree(mod->args);
72396 if (mod->percpu)
72397 percpu_modfree(mod->percpu);
72398 @@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
72399 percpu_modfree(mod->refptr);
72400 #endif
72401 /* Free lock-classes: */
72402 - lockdep_free_key_range(mod->module_core, mod->core_size);
72403 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
72404 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
72405
72406 /* Finally, free the core (containing the module structure) */
72407 - module_free(mod, mod->module_core);
72408 + module_free_exec(mod, mod->module_core_rx);
72409 + module_free(mod, mod->module_core_rw);
72410
72411 #ifdef CONFIG_MPU
72412 update_protections(current->mm);
72413 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72414 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
72415 int ret = 0;
72416 const struct kernel_symbol *ksym;
72417 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72418 + int is_fs_load = 0;
72419 + int register_filesystem_found = 0;
72420 + char *p;
72421 +
72422 + p = strstr(mod->args, "grsec_modharden_fs");
72423 +
72424 + if (p) {
72425 + char *endptr = p + strlen("grsec_modharden_fs");
72426 + /* copy \0 as well */
72427 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
72428 + is_fs_load = 1;
72429 + }
72430 +#endif
72431 +
72432
72433 for (i = 1; i < n; i++) {
72434 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72435 + const char *name = strtab + sym[i].st_name;
72436 +
72437 + /* it's a real shame this will never get ripped and copied
72438 + upstream! ;(
72439 + */
72440 + if (is_fs_load && !strcmp(name, "register_filesystem"))
72441 + register_filesystem_found = 1;
72442 +#endif
72443 switch (sym[i].st_shndx) {
72444 case SHN_COMMON:
72445 /* We compiled with -fno-common. These are not
72446 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72447 strtab + sym[i].st_name, mod);
72448 /* Ok if resolved. */
72449 if (ksym) {
72450 + pax_open_kernel();
72451 sym[i].st_value = ksym->value;
72452 + pax_close_kernel();
72453 break;
72454 }
72455
72456 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72457 secbase = (unsigned long)mod->percpu;
72458 else
72459 secbase = sechdrs[sym[i].st_shndx].sh_addr;
72460 + pax_open_kernel();
72461 sym[i].st_value += secbase;
72462 + pax_close_kernel();
72463 break;
72464 }
72465 }
72466
72467 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72468 + if (is_fs_load && !register_filesystem_found) {
72469 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
72470 + ret = -EPERM;
72471 + }
72472 +#endif
72473 +
72474 return ret;
72475 }
72476
72477 @@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
72478 || s->sh_entsize != ~0UL
72479 || strstarts(secstrings + s->sh_name, ".init"))
72480 continue;
72481 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
72482 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72483 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
72484 + else
72485 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
72486 DEBUGP("\t%s\n", secstrings + s->sh_name);
72487 }
72488 - if (m == 0)
72489 - mod->core_text_size = mod->core_size;
72490 }
72491
72492 DEBUGP("Init section allocation order:\n");
72493 @@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
72494 || s->sh_entsize != ~0UL
72495 || !strstarts(secstrings + s->sh_name, ".init"))
72496 continue;
72497 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
72498 - | INIT_OFFSET_MASK);
72499 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72500 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
72501 + else
72502 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
72503 + s->sh_entsize |= INIT_OFFSET_MASK;
72504 DEBUGP("\t%s\n", secstrings + s->sh_name);
72505 }
72506 - if (m == 0)
72507 - mod->init_text_size = mod->init_size;
72508 }
72509 }
72510
72511 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
72512
72513 /* As per nm */
72514 static char elf_type(const Elf_Sym *sym,
72515 - Elf_Shdr *sechdrs,
72516 - const char *secstrings,
72517 - struct module *mod)
72518 + const Elf_Shdr *sechdrs,
72519 + const char *secstrings)
72520 {
72521 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
72522 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
72523 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
72524
72525 /* Put symbol section at end of init part of module. */
72526 symsect->sh_flags |= SHF_ALLOC;
72527 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
72528 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
72529 symindex) | INIT_OFFSET_MASK;
72530 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
72531
72532 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
72533 }
72534
72535 /* Append room for core symbols at end of core part. */
72536 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
72537 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
72538 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
72539 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
72540
72541 /* Put string table section at end of init part of module. */
72542 strsect->sh_flags |= SHF_ALLOC;
72543 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
72544 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
72545 strindex) | INIT_OFFSET_MASK;
72546 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
72547
72548 /* Append room for core symbols' strings at end of core part. */
72549 - *pstroffs = mod->core_size;
72550 + *pstroffs = mod->core_size_rx;
72551 __set_bit(0, strmap);
72552 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
72553 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
72554
72555 return symoffs;
72556 }
72557 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
72558 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
72559 mod->strtab = (void *)sechdrs[strindex].sh_addr;
72560
72561 + pax_open_kernel();
72562 +
72563 /* Set types up while we still have access to sections. */
72564 for (i = 0; i < mod->num_symtab; i++)
72565 mod->symtab[i].st_info
72566 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
72567 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
72568
72569 - mod->core_symtab = dst = mod->module_core + symoffs;
72570 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
72571 src = mod->symtab;
72572 *dst = *src;
72573 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
72574 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
72575 }
72576 mod->core_num_syms = ndst;
72577
72578 - mod->core_strtab = s = mod->module_core + stroffs;
72579 + mod->core_strtab = s = mod->module_core_rx + stroffs;
72580 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
72581 if (test_bit(i, strmap))
72582 *++s = mod->strtab[i];
72583 +
72584 + pax_close_kernel();
72585 }
72586 #else
72587 static inline unsigned long layout_symtab(struct module *mod,
72588 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
72589 #endif
72590 }
72591
72592 -static void *module_alloc_update_bounds(unsigned long size)
72593 +static void *module_alloc_update_bounds_rw(unsigned long size)
72594 {
72595 void *ret = module_alloc(size);
72596
72597 if (ret) {
72598 /* Update module bounds. */
72599 - if ((unsigned long)ret < module_addr_min)
72600 - module_addr_min = (unsigned long)ret;
72601 - if ((unsigned long)ret + size > module_addr_max)
72602 - module_addr_max = (unsigned long)ret + size;
72603 + if ((unsigned long)ret < module_addr_min_rw)
72604 + module_addr_min_rw = (unsigned long)ret;
72605 + if ((unsigned long)ret + size > module_addr_max_rw)
72606 + module_addr_max_rw = (unsigned long)ret + size;
72607 + }
72608 + return ret;
72609 +}
72610 +
72611 +static void *module_alloc_update_bounds_rx(unsigned long size)
72612 +{
72613 + void *ret = module_alloc_exec(size);
72614 +
72615 + if (ret) {
72616 + /* Update module bounds. */
72617 + if ((unsigned long)ret < module_addr_min_rx)
72618 + module_addr_min_rx = (unsigned long)ret;
72619 + if ((unsigned long)ret + size > module_addr_max_rx)
72620 + module_addr_max_rx = (unsigned long)ret + size;
72621 }
72622 return ret;
72623 }
72624 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
72625 unsigned int i;
72626
72627 /* only scan the sections containing data */
72628 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
72629 - (unsigned long)mod->module_core,
72630 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
72631 + (unsigned long)mod->module_core_rw,
72632 sizeof(struct module), GFP_KERNEL);
72633
72634 for (i = 1; i < hdr->e_shnum; i++) {
72635 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
72636 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
72637 continue;
72638
72639 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
72640 - (unsigned long)mod->module_core,
72641 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
72642 + (unsigned long)mod->module_core_rw,
72643 sechdrs[i].sh_size, GFP_KERNEL);
72644 }
72645 }
72646 @@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
72647 Elf_Ehdr *hdr;
72648 Elf_Shdr *sechdrs;
72649 char *secstrings, *args, *modmagic, *strtab = NULL;
72650 - char *staging;
72651 + char *staging, *license;
72652 unsigned int i;
72653 unsigned int symindex = 0;
72654 unsigned int strindex = 0;
72655 @@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
72656 goto free_hdr;
72657 }
72658
72659 + license = get_modinfo(sechdrs, infoindex, "license");
72660 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
72661 + if (!license || !license_is_gpl_compatible(license)) {
72662 + err -ENOEXEC;
72663 + goto free_hdr;
72664 + }
72665 +#endif
72666 +
72667 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
72668 /* This is allowed: modprobe --force will invalidate it. */
72669 if (!modmagic) {
72670 @@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
72671 secstrings, &stroffs, strmap);
72672
72673 /* Do the allocs. */
72674 - ptr = module_alloc_update_bounds(mod->core_size);
72675 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
72676 /*
72677 * The pointer to this block is stored in the module structure
72678 * which is inside the block. Just mark it as not being a
72679 @@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
72680 err = -ENOMEM;
72681 goto free_percpu;
72682 }
72683 - memset(ptr, 0, mod->core_size);
72684 - mod->module_core = ptr;
72685 + memset(ptr, 0, mod->core_size_rw);
72686 + mod->module_core_rw = ptr;
72687
72688 - ptr = module_alloc_update_bounds(mod->init_size);
72689 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
72690 /*
72691 * The pointer to this block is stored in the module structure
72692 * which is inside the block. This block doesn't need to be
72693 * scanned as it contains data and code that will be freed
72694 * after the module is initialized.
72695 */
72696 - kmemleak_ignore(ptr);
72697 - if (!ptr && mod->init_size) {
72698 + kmemleak_not_leak(ptr);
72699 + if (!ptr && mod->init_size_rw) {
72700 err = -ENOMEM;
72701 - goto free_core;
72702 + goto free_core_rw;
72703 }
72704 - memset(ptr, 0, mod->init_size);
72705 - mod->module_init = ptr;
72706 + memset(ptr, 0, mod->init_size_rw);
72707 + mod->module_init_rw = ptr;
72708 +
72709 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
72710 + kmemleak_not_leak(ptr);
72711 + if (!ptr) {
72712 + err = -ENOMEM;
72713 + goto free_init_rw;
72714 + }
72715 +
72716 + pax_open_kernel();
72717 + memset(ptr, 0, mod->core_size_rx);
72718 + pax_close_kernel();
72719 + mod->module_core_rx = ptr;
72720 +
72721 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
72722 + kmemleak_not_leak(ptr);
72723 + if (!ptr && mod->init_size_rx) {
72724 + err = -ENOMEM;
72725 + goto free_core_rx;
72726 + }
72727 +
72728 + pax_open_kernel();
72729 + memset(ptr, 0, mod->init_size_rx);
72730 + pax_close_kernel();
72731 + mod->module_init_rx = ptr;
72732
72733 /* Transfer each section which specifies SHF_ALLOC */
72734 DEBUGP("final section addresses:\n");
72735 @@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
72736 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
72737 continue;
72738
72739 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
72740 - dest = mod->module_init
72741 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72742 - else
72743 - dest = mod->module_core + sechdrs[i].sh_entsize;
72744 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
72745 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
72746 + dest = mod->module_init_rw
72747 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72748 + else
72749 + dest = mod->module_init_rx
72750 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
72751 + } else {
72752 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
72753 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
72754 + else
72755 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
72756 + }
72757
72758 - if (sechdrs[i].sh_type != SHT_NOBITS)
72759 - memcpy(dest, (void *)sechdrs[i].sh_addr,
72760 - sechdrs[i].sh_size);
72761 + if (sechdrs[i].sh_type != SHT_NOBITS) {
72762 +
72763 +#ifdef CONFIG_PAX_KERNEXEC
72764 +#ifdef CONFIG_X86_64
72765 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
72766 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
72767 +#endif
72768 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
72769 + pax_open_kernel();
72770 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
72771 + pax_close_kernel();
72772 + } else
72773 +#endif
72774 +
72775 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
72776 + }
72777 /* Update sh_addr to point to copy in image. */
72778 - sechdrs[i].sh_addr = (unsigned long)dest;
72779 +
72780 +#ifdef CONFIG_PAX_KERNEXEC
72781 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
72782 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
72783 + else
72784 +#endif
72785 +
72786 + sechdrs[i].sh_addr = (unsigned long)dest;
72787 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
72788 }
72789 /* Module has been moved. */
72790 @@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
72791 mod->name);
72792 if (!mod->refptr) {
72793 err = -ENOMEM;
72794 - goto free_init;
72795 + goto free_init_rx;
72796 }
72797 #endif
72798 /* Now we've moved module, initialize linked lists, etc. */
72799 @@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
72800 goto free_unload;
72801
72802 /* Set up license info based on the info section */
72803 - set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
72804 + set_license(mod, license);
72805
72806 /*
72807 * ndiswrapper is under GPL by itself, but loads proprietary modules.
72808 @@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
72809 /* Set up MODINFO_ATTR fields */
72810 setup_modinfo(mod, sechdrs, infoindex);
72811
72812 + mod->args = args;
72813 +
72814 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72815 + {
72816 + char *p, *p2;
72817 +
72818 + if (strstr(mod->args, "grsec_modharden_netdev")) {
72819 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
72820 + err = -EPERM;
72821 + goto cleanup;
72822 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
72823 + p += strlen("grsec_modharden_normal");
72824 + p2 = strstr(p, "_");
72825 + if (p2) {
72826 + *p2 = '\0';
72827 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
72828 + *p2 = '_';
72829 + }
72830 + err = -EPERM;
72831 + goto cleanup;
72832 + }
72833 + }
72834 +#endif
72835 +
72836 +
72837 /* Fix up syms, so that st_value is a pointer to location. */
72838 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
72839 mod);
72840 @@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
72841
72842 /* Now do relocations. */
72843 for (i = 1; i < hdr->e_shnum; i++) {
72844 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
72845 unsigned int info = sechdrs[i].sh_info;
72846 + strtab = (char *)sechdrs[strindex].sh_addr;
72847
72848 /* Not a valid relocation section? */
72849 if (info >= hdr->e_shnum)
72850 @@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
72851 * Do it before processing of module parameters, so the module
72852 * can provide parameter accessor functions of its own.
72853 */
72854 - if (mod->module_init)
72855 - flush_icache_range((unsigned long)mod->module_init,
72856 - (unsigned long)mod->module_init
72857 - + mod->init_size);
72858 - flush_icache_range((unsigned long)mod->module_core,
72859 - (unsigned long)mod->module_core + mod->core_size);
72860 + if (mod->module_init_rx)
72861 + flush_icache_range((unsigned long)mod->module_init_rx,
72862 + (unsigned long)mod->module_init_rx
72863 + + mod->init_size_rx);
72864 + flush_icache_range((unsigned long)mod->module_core_rx,
72865 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
72866
72867 set_fs(old_fs);
72868
72869 - mod->args = args;
72870 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
72871 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
72872 mod->name);
72873 @@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
72874 free_unload:
72875 module_unload_free(mod);
72876 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
72877 + free_init_rx:
72878 percpu_modfree(mod->refptr);
72879 - free_init:
72880 #endif
72881 - module_free(mod, mod->module_init);
72882 - free_core:
72883 - module_free(mod, mod->module_core);
72884 + module_free_exec(mod, mod->module_init_rx);
72885 + free_core_rx:
72886 + module_free_exec(mod, mod->module_core_rx);
72887 + free_init_rw:
72888 + module_free(mod, mod->module_init_rw);
72889 + free_core_rw:
72890 + module_free(mod, mod->module_core_rw);
72891 /* mod will be freed with core. Don't access it beyond this line! */
72892 free_percpu:
72893 if (percpu)
72894 @@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
72895 mod->symtab = mod->core_symtab;
72896 mod->strtab = mod->core_strtab;
72897 #endif
72898 - module_free(mod, mod->module_init);
72899 - mod->module_init = NULL;
72900 - mod->init_size = 0;
72901 - mod->init_text_size = 0;
72902 + module_free(mod, mod->module_init_rw);
72903 + module_free_exec(mod, mod->module_init_rx);
72904 + mod->module_init_rw = NULL;
72905 + mod->module_init_rx = NULL;
72906 + mod->init_size_rw = 0;
72907 + mod->init_size_rx = 0;
72908 mutex_unlock(&module_mutex);
72909
72910 return 0;
72911 @@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
72912 unsigned long nextval;
72913
72914 /* At worse, next value is at end of module */
72915 - if (within_module_init(addr, mod))
72916 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
72917 + if (within_module_init_rx(addr, mod))
72918 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
72919 + else if (within_module_init_rw(addr, mod))
72920 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
72921 + else if (within_module_core_rx(addr, mod))
72922 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
72923 + else if (within_module_core_rw(addr, mod))
72924 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
72925 else
72926 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
72927 + return NULL;
72928
72929 /* Scan for closest preceeding symbol, and next symbol. (ELF
72930 starts real symbols at 1). */
72931 @@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
72932 char buf[8];
72933
72934 seq_printf(m, "%s %u",
72935 - mod->name, mod->init_size + mod->core_size);
72936 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
72937 print_unload_info(m, mod);
72938
72939 /* Informative for users. */
72940 @@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
72941 mod->state == MODULE_STATE_COMING ? "Loading":
72942 "Live");
72943 /* Used by oprofile and other similar tools. */
72944 - seq_printf(m, " 0x%p", mod->module_core);
72945 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
72946
72947 /* Taints info */
72948 if (mod->taints)
72949 @@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
72950
72951 static int __init proc_modules_init(void)
72952 {
72953 +#ifndef CONFIG_GRKERNSEC_HIDESYM
72954 +#ifdef CONFIG_GRKERNSEC_PROC_USER
72955 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
72956 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72957 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
72958 +#else
72959 proc_create("modules", 0, NULL, &proc_modules_operations);
72960 +#endif
72961 +#else
72962 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
72963 +#endif
72964 return 0;
72965 }
72966 module_init(proc_modules_init);
72967 @@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
72968 {
72969 struct module *mod;
72970
72971 - if (addr < module_addr_min || addr > module_addr_max)
72972 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
72973 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
72974 return NULL;
72975
72976 list_for_each_entry_rcu(mod, &modules, list)
72977 - if (within_module_core(addr, mod)
72978 - || within_module_init(addr, mod))
72979 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
72980 return mod;
72981 return NULL;
72982 }
72983 @@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
72984 */
72985 struct module *__module_text_address(unsigned long addr)
72986 {
72987 - struct module *mod = __module_address(addr);
72988 + struct module *mod;
72989 +
72990 +#ifdef CONFIG_X86_32
72991 + addr = ktla_ktva(addr);
72992 +#endif
72993 +
72994 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
72995 + return NULL;
72996 +
72997 + mod = __module_address(addr);
72998 +
72999 if (mod) {
73000 /* Make sure it's within the text section. */
73001 - if (!within(addr, mod->module_init, mod->init_text_size)
73002 - && !within(addr, mod->module_core, mod->core_text_size))
73003 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
73004 mod = NULL;
73005 }
73006 return mod;
73007 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
73008 index ec815a9..fe46e99 100644
73009 --- a/kernel/mutex-debug.c
73010 +++ b/kernel/mutex-debug.c
73011 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
73012 }
73013
73014 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73015 - struct thread_info *ti)
73016 + struct task_struct *task)
73017 {
73018 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
73019
73020 /* Mark the current thread as blocked on the lock: */
73021 - ti->task->blocked_on = waiter;
73022 + task->blocked_on = waiter;
73023 }
73024
73025 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73026 - struct thread_info *ti)
73027 + struct task_struct *task)
73028 {
73029 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
73030 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
73031 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
73032 - ti->task->blocked_on = NULL;
73033 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
73034 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
73035 + task->blocked_on = NULL;
73036
73037 list_del_init(&waiter->list);
73038 waiter->task = NULL;
73039 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
73040 return;
73041
73042 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
73043 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
73044 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
73045 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
73046 mutex_clear_owner(lock);
73047 }
73048 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
73049 index 6b2d735..372d3c4 100644
73050 --- a/kernel/mutex-debug.h
73051 +++ b/kernel/mutex-debug.h
73052 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
73053 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
73054 extern void debug_mutex_add_waiter(struct mutex *lock,
73055 struct mutex_waiter *waiter,
73056 - struct thread_info *ti);
73057 + struct task_struct *task);
73058 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73059 - struct thread_info *ti);
73060 + struct task_struct *task);
73061 extern void debug_mutex_unlock(struct mutex *lock);
73062 extern void debug_mutex_init(struct mutex *lock, const char *name,
73063 struct lock_class_key *key);
73064
73065 static inline void mutex_set_owner(struct mutex *lock)
73066 {
73067 - lock->owner = current_thread_info();
73068 + lock->owner = current;
73069 }
73070
73071 static inline void mutex_clear_owner(struct mutex *lock)
73072 diff --git a/kernel/mutex.c b/kernel/mutex.c
73073 index f85644c..5ee9f77 100644
73074 --- a/kernel/mutex.c
73075 +++ b/kernel/mutex.c
73076 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73077 */
73078
73079 for (;;) {
73080 - struct thread_info *owner;
73081 + struct task_struct *owner;
73082
73083 /*
73084 * If we own the BKL, then don't spin. The owner of
73085 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73086 spin_lock_mutex(&lock->wait_lock, flags);
73087
73088 debug_mutex_lock_common(lock, &waiter);
73089 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
73090 + debug_mutex_add_waiter(lock, &waiter, task);
73091
73092 /* add waiting tasks to the end of the waitqueue (FIFO): */
73093 list_add_tail(&waiter.list, &lock->wait_list);
73094 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73095 * TASK_UNINTERRUPTIBLE case.)
73096 */
73097 if (unlikely(signal_pending_state(state, task))) {
73098 - mutex_remove_waiter(lock, &waiter,
73099 - task_thread_info(task));
73100 + mutex_remove_waiter(lock, &waiter, task);
73101 mutex_release(&lock->dep_map, 1, ip);
73102 spin_unlock_mutex(&lock->wait_lock, flags);
73103
73104 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73105 done:
73106 lock_acquired(&lock->dep_map, ip);
73107 /* got the lock - rejoice! */
73108 - mutex_remove_waiter(lock, &waiter, current_thread_info());
73109 + mutex_remove_waiter(lock, &waiter, task);
73110 mutex_set_owner(lock);
73111
73112 /* set it to 0 if there are no waiters left: */
73113 diff --git a/kernel/mutex.h b/kernel/mutex.h
73114 index 67578ca..4115fbf 100644
73115 --- a/kernel/mutex.h
73116 +++ b/kernel/mutex.h
73117 @@ -19,7 +19,7 @@
73118 #ifdef CONFIG_SMP
73119 static inline void mutex_set_owner(struct mutex *lock)
73120 {
73121 - lock->owner = current_thread_info();
73122 + lock->owner = current;
73123 }
73124
73125 static inline void mutex_clear_owner(struct mutex *lock)
73126 diff --git a/kernel/panic.c b/kernel/panic.c
73127 index 96b45d0..ff70a46 100644
73128 --- a/kernel/panic.c
73129 +++ b/kernel/panic.c
73130 @@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
73131 va_end(args);
73132 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
73133 #ifdef CONFIG_DEBUG_BUGVERBOSE
73134 - dump_stack();
73135 + /*
73136 + * Avoid nested stack-dumping if a panic occurs during oops processing
73137 + */
73138 + if (!oops_in_progress)
73139 + dump_stack();
73140 #endif
73141
73142 /*
73143 @@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
73144 const char *board;
73145
73146 printk(KERN_WARNING "------------[ cut here ]------------\n");
73147 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
73148 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
73149 board = dmi_get_system_info(DMI_PRODUCT_NAME);
73150 if (board)
73151 printk(KERN_WARNING "Hardware name: %s\n", board);
73152 @@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
73153 */
73154 void __stack_chk_fail(void)
73155 {
73156 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
73157 + dump_stack();
73158 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
73159 __builtin_return_address(0));
73160 }
73161 EXPORT_SYMBOL(__stack_chk_fail);
73162 diff --git a/kernel/params.c b/kernel/params.c
73163 index d656c27..21e452c 100644
73164 --- a/kernel/params.c
73165 +++ b/kernel/params.c
73166 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
73167 return ret;
73168 }
73169
73170 -static struct sysfs_ops module_sysfs_ops = {
73171 +static const struct sysfs_ops module_sysfs_ops = {
73172 .show = module_attr_show,
73173 .store = module_attr_store,
73174 };
73175 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
73176 return 0;
73177 }
73178
73179 -static struct kset_uevent_ops module_uevent_ops = {
73180 +static const struct kset_uevent_ops module_uevent_ops = {
73181 .filter = uevent_filter,
73182 };
73183
73184 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
73185 index 37ebc14..9c121d9 100644
73186 --- a/kernel/perf_event.c
73187 +++ b/kernel/perf_event.c
73188 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
73189 */
73190 int sysctl_perf_event_sample_rate __read_mostly = 100000;
73191
73192 -static atomic64_t perf_event_id;
73193 +static atomic64_unchecked_t perf_event_id;
73194
73195 /*
73196 * Lock for (sysadmin-configurable) event reservations:
73197 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
73198 * In order to keep per-task stats reliable we need to flip the event
73199 * values when we flip the contexts.
73200 */
73201 - value = atomic64_read(&next_event->count);
73202 - value = atomic64_xchg(&event->count, value);
73203 - atomic64_set(&next_event->count, value);
73204 + value = atomic64_read_unchecked(&next_event->count);
73205 + value = atomic64_xchg_unchecked(&event->count, value);
73206 + atomic64_set_unchecked(&next_event->count, value);
73207
73208 swap(event->total_time_enabled, next_event->total_time_enabled);
73209 swap(event->total_time_running, next_event->total_time_running);
73210 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
73211 update_event_times(event);
73212 }
73213
73214 - return atomic64_read(&event->count);
73215 + return atomic64_read_unchecked(&event->count);
73216 }
73217
73218 /*
73219 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
73220 values[n++] = 1 + leader->nr_siblings;
73221 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73222 values[n++] = leader->total_time_enabled +
73223 - atomic64_read(&leader->child_total_time_enabled);
73224 + atomic64_read_unchecked(&leader->child_total_time_enabled);
73225 }
73226 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73227 values[n++] = leader->total_time_running +
73228 - atomic64_read(&leader->child_total_time_running);
73229 + atomic64_read_unchecked(&leader->child_total_time_running);
73230 }
73231
73232 size = n * sizeof(u64);
73233 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
73234 values[n++] = perf_event_read_value(event);
73235 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73236 values[n++] = event->total_time_enabled +
73237 - atomic64_read(&event->child_total_time_enabled);
73238 + atomic64_read_unchecked(&event->child_total_time_enabled);
73239 }
73240 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73241 values[n++] = event->total_time_running +
73242 - atomic64_read(&event->child_total_time_running);
73243 + atomic64_read_unchecked(&event->child_total_time_running);
73244 }
73245 if (read_format & PERF_FORMAT_ID)
73246 values[n++] = primary_event_id(event);
73247 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
73248 static void perf_event_reset(struct perf_event *event)
73249 {
73250 (void)perf_event_read(event);
73251 - atomic64_set(&event->count, 0);
73252 + atomic64_set_unchecked(&event->count, 0);
73253 perf_event_update_userpage(event);
73254 }
73255
73256 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
73257 ++userpg->lock;
73258 barrier();
73259 userpg->index = perf_event_index(event);
73260 - userpg->offset = atomic64_read(&event->count);
73261 + userpg->offset = atomic64_read_unchecked(&event->count);
73262 if (event->state == PERF_EVENT_STATE_ACTIVE)
73263 - userpg->offset -= atomic64_read(&event->hw.prev_count);
73264 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
73265
73266 userpg->time_enabled = event->total_time_enabled +
73267 - atomic64_read(&event->child_total_time_enabled);
73268 + atomic64_read_unchecked(&event->child_total_time_enabled);
73269
73270 userpg->time_running = event->total_time_running +
73271 - atomic64_read(&event->child_total_time_running);
73272 + atomic64_read_unchecked(&event->child_total_time_running);
73273
73274 barrier();
73275 ++userpg->lock;
73276 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
73277 u64 values[4];
73278 int n = 0;
73279
73280 - values[n++] = atomic64_read(&event->count);
73281 + values[n++] = atomic64_read_unchecked(&event->count);
73282 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73283 values[n++] = event->total_time_enabled +
73284 - atomic64_read(&event->child_total_time_enabled);
73285 + atomic64_read_unchecked(&event->child_total_time_enabled);
73286 }
73287 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73288 values[n++] = event->total_time_running +
73289 - atomic64_read(&event->child_total_time_running);
73290 + atomic64_read_unchecked(&event->child_total_time_running);
73291 }
73292 if (read_format & PERF_FORMAT_ID)
73293 values[n++] = primary_event_id(event);
73294 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
73295 if (leader != event)
73296 leader->pmu->read(leader);
73297
73298 - values[n++] = atomic64_read(&leader->count);
73299 + values[n++] = atomic64_read_unchecked(&leader->count);
73300 if (read_format & PERF_FORMAT_ID)
73301 values[n++] = primary_event_id(leader);
73302
73303 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
73304 if (sub != event)
73305 sub->pmu->read(sub);
73306
73307 - values[n++] = atomic64_read(&sub->count);
73308 + values[n++] = atomic64_read_unchecked(&sub->count);
73309 if (read_format & PERF_FORMAT_ID)
73310 values[n++] = primary_event_id(sub);
73311
73312 @@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
73313 * need to add enough zero bytes after the string to handle
73314 * the 64bit alignment we do later.
73315 */
73316 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
73317 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
73318 if (!buf) {
73319 name = strncpy(tmp, "//enomem", sizeof(tmp));
73320 goto got_name;
73321 }
73322 - name = d_path(&file->f_path, buf, PATH_MAX);
73323 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
73324 if (IS_ERR(name)) {
73325 name = strncpy(tmp, "//toolong", sizeof(tmp));
73326 goto got_name;
73327 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
73328 {
73329 struct hw_perf_event *hwc = &event->hw;
73330
73331 - atomic64_add(nr, &event->count);
73332 + atomic64_add_unchecked(nr, &event->count);
73333
73334 if (!hwc->sample_period)
73335 return;
73336 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
73337 u64 now;
73338
73339 now = cpu_clock(cpu);
73340 - prev = atomic64_read(&event->hw.prev_count);
73341 - atomic64_set(&event->hw.prev_count, now);
73342 - atomic64_add(now - prev, &event->count);
73343 + prev = atomic64_read_unchecked(&event->hw.prev_count);
73344 + atomic64_set_unchecked(&event->hw.prev_count, now);
73345 + atomic64_add_unchecked(now - prev, &event->count);
73346 }
73347
73348 static int cpu_clock_perf_event_enable(struct perf_event *event)
73349 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
73350 struct hw_perf_event *hwc = &event->hw;
73351 int cpu = raw_smp_processor_id();
73352
73353 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
73354 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
73355 perf_swevent_start_hrtimer(event);
73356
73357 return 0;
73358 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
73359 u64 prev;
73360 s64 delta;
73361
73362 - prev = atomic64_xchg(&event->hw.prev_count, now);
73363 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
73364 delta = now - prev;
73365 - atomic64_add(delta, &event->count);
73366 + atomic64_add_unchecked(delta, &event->count);
73367 }
73368
73369 static int task_clock_perf_event_enable(struct perf_event *event)
73370 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
73371
73372 now = event->ctx->time;
73373
73374 - atomic64_set(&hwc->prev_count, now);
73375 + atomic64_set_unchecked(&hwc->prev_count, now);
73376
73377 perf_swevent_start_hrtimer(event);
73378
73379 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
73380 event->parent = parent_event;
73381
73382 event->ns = get_pid_ns(current->nsproxy->pid_ns);
73383 - event->id = atomic64_inc_return(&perf_event_id);
73384 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
73385
73386 event->state = PERF_EVENT_STATE_INACTIVE;
73387
73388 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
73389 if (child_event->attr.inherit_stat)
73390 perf_event_read_event(child_event, child);
73391
73392 - child_val = atomic64_read(&child_event->count);
73393 + child_val = atomic64_read_unchecked(&child_event->count);
73394
73395 /*
73396 * Add back the child's count to the parent's count:
73397 */
73398 - atomic64_add(child_val, &parent_event->count);
73399 - atomic64_add(child_event->total_time_enabled,
73400 + atomic64_add_unchecked(child_val, &parent_event->count);
73401 + atomic64_add_unchecked(child_event->total_time_enabled,
73402 &parent_event->child_total_time_enabled);
73403 - atomic64_add(child_event->total_time_running,
73404 + atomic64_add_unchecked(child_event->total_time_running,
73405 &parent_event->child_total_time_running);
73406
73407 /*
73408 diff --git a/kernel/pid.c b/kernel/pid.c
73409 index fce7198..4f23a7e 100644
73410 --- a/kernel/pid.c
73411 +++ b/kernel/pid.c
73412 @@ -33,6 +33,7 @@
73413 #include <linux/rculist.h>
73414 #include <linux/bootmem.h>
73415 #include <linux/hash.h>
73416 +#include <linux/security.h>
73417 #include <linux/pid_namespace.h>
73418 #include <linux/init_task.h>
73419 #include <linux/syscalls.h>
73420 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
73421
73422 int pid_max = PID_MAX_DEFAULT;
73423
73424 -#define RESERVED_PIDS 300
73425 +#define RESERVED_PIDS 500
73426
73427 int pid_max_min = RESERVED_PIDS + 1;
73428 int pid_max_max = PID_MAX_LIMIT;
73429 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
73430 */
73431 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
73432 {
73433 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73434 + struct task_struct *task;
73435 +
73436 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73437 +
73438 + if (gr_pid_is_chrooted(task))
73439 + return NULL;
73440 +
73441 + return task;
73442 }
73443
73444 struct task_struct *find_task_by_vpid(pid_t vnr)
73445 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
73446 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
73447 }
73448
73449 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
73450 +{
73451 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
73452 +}
73453 +
73454 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
73455 {
73456 struct pid *pid;
73457 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
73458 index 5c9dc22..d271117 100644
73459 --- a/kernel/posix-cpu-timers.c
73460 +++ b/kernel/posix-cpu-timers.c
73461 @@ -6,6 +6,7 @@
73462 #include <linux/posix-timers.h>
73463 #include <linux/errno.h>
73464 #include <linux/math64.h>
73465 +#include <linux/security.h>
73466 #include <asm/uaccess.h>
73467 #include <linux/kernel_stat.h>
73468 #include <trace/events/timer.h>
73469 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
73470
73471 static __init int init_posix_cpu_timers(void)
73472 {
73473 - struct k_clock process = {
73474 + static struct k_clock process = {
73475 .clock_getres = process_cpu_clock_getres,
73476 .clock_get = process_cpu_clock_get,
73477 .clock_set = do_posix_clock_nosettime,
73478 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
73479 .nsleep = process_cpu_nsleep,
73480 .nsleep_restart = process_cpu_nsleep_restart,
73481 };
73482 - struct k_clock thread = {
73483 + static struct k_clock thread = {
73484 .clock_getres = thread_cpu_clock_getres,
73485 .clock_get = thread_cpu_clock_get,
73486 .clock_set = do_posix_clock_nosettime,
73487 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
73488 index 5e76d22..cf1baeb 100644
73489 --- a/kernel/posix-timers.c
73490 +++ b/kernel/posix-timers.c
73491 @@ -42,6 +42,7 @@
73492 #include <linux/compiler.h>
73493 #include <linux/idr.h>
73494 #include <linux/posix-timers.h>
73495 +#include <linux/grsecurity.h>
73496 #include <linux/syscalls.h>
73497 #include <linux/wait.h>
73498 #include <linux/workqueue.h>
73499 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
73500 * which we beg off on and pass to do_sys_settimeofday().
73501 */
73502
73503 -static struct k_clock posix_clocks[MAX_CLOCKS];
73504 +static struct k_clock *posix_clocks[MAX_CLOCKS];
73505
73506 /*
73507 * These ones are defined below.
73508 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
73509 */
73510 #define CLOCK_DISPATCH(clock, call, arglist) \
73511 ((clock) < 0 ? posix_cpu_##call arglist : \
73512 - (posix_clocks[clock].call != NULL \
73513 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
73514 + (posix_clocks[clock]->call != NULL \
73515 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
73516
73517 /*
73518 * Default clock hook functions when the struct k_clock passed
73519 @@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
73520 struct timespec *tp)
73521 {
73522 tp->tv_sec = 0;
73523 - tp->tv_nsec = posix_clocks[which_clock].res;
73524 + tp->tv_nsec = posix_clocks[which_clock]->res;
73525 return 0;
73526 }
73527
73528 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
73529 return 0;
73530 if ((unsigned) which_clock >= MAX_CLOCKS)
73531 return 1;
73532 - if (posix_clocks[which_clock].clock_getres != NULL)
73533 + if (posix_clocks[which_clock] == NULL)
73534 return 0;
73535 - if (posix_clocks[which_clock].res != 0)
73536 + if (posix_clocks[which_clock]->clock_getres != NULL)
73537 + return 0;
73538 + if (posix_clocks[which_clock]->res != 0)
73539 return 0;
73540 return 1;
73541 }
73542 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
73543 */
73544 static __init int init_posix_timers(void)
73545 {
73546 - struct k_clock clock_realtime = {
73547 + static struct k_clock clock_realtime = {
73548 .clock_getres = hrtimer_get_res,
73549 };
73550 - struct k_clock clock_monotonic = {
73551 + static struct k_clock clock_monotonic = {
73552 .clock_getres = hrtimer_get_res,
73553 .clock_get = posix_ktime_get_ts,
73554 .clock_set = do_posix_clock_nosettime,
73555 };
73556 - struct k_clock clock_monotonic_raw = {
73557 + static struct k_clock clock_monotonic_raw = {
73558 .clock_getres = hrtimer_get_res,
73559 .clock_get = posix_get_monotonic_raw,
73560 .clock_set = do_posix_clock_nosettime,
73561 .timer_create = no_timer_create,
73562 .nsleep = no_nsleep,
73563 };
73564 - struct k_clock clock_realtime_coarse = {
73565 + static struct k_clock clock_realtime_coarse = {
73566 .clock_getres = posix_get_coarse_res,
73567 .clock_get = posix_get_realtime_coarse,
73568 .clock_set = do_posix_clock_nosettime,
73569 .timer_create = no_timer_create,
73570 .nsleep = no_nsleep,
73571 };
73572 - struct k_clock clock_monotonic_coarse = {
73573 + static struct k_clock clock_monotonic_coarse = {
73574 .clock_getres = posix_get_coarse_res,
73575 .clock_get = posix_get_monotonic_coarse,
73576 .clock_set = do_posix_clock_nosettime,
73577 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
73578 .nsleep = no_nsleep,
73579 };
73580
73581 + pax_track_stack();
73582 +
73583 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
73584 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
73585 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
73586 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
73587 return;
73588 }
73589
73590 - posix_clocks[clock_id] = *new_clock;
73591 + posix_clocks[clock_id] = new_clock;
73592 }
73593 EXPORT_SYMBOL_GPL(register_posix_clock);
73594
73595 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
73596 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
73597 return -EFAULT;
73598
73599 + /* only the CLOCK_REALTIME clock can be set, all other clocks
73600 + have their clock_set fptr set to a nosettime dummy function
73601 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
73602 + call common_clock_set, which calls do_sys_settimeofday, which
73603 + we hook
73604 + */
73605 +
73606 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
73607 }
73608
73609 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
73610 index 04a9e90..bc355aa 100644
73611 --- a/kernel/power/hibernate.c
73612 +++ b/kernel/power/hibernate.c
73613 @@ -48,14 +48,14 @@ enum {
73614
73615 static int hibernation_mode = HIBERNATION_SHUTDOWN;
73616
73617 -static struct platform_hibernation_ops *hibernation_ops;
73618 +static const struct platform_hibernation_ops *hibernation_ops;
73619
73620 /**
73621 * hibernation_set_ops - set the global hibernate operations
73622 * @ops: the hibernation operations to use in subsequent hibernation transitions
73623 */
73624
73625 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
73626 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
73627 {
73628 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
73629 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
73630 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
73631 index e8b3370..484c2e4 100644
73632 --- a/kernel/power/poweroff.c
73633 +++ b/kernel/power/poweroff.c
73634 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
73635 .enable_mask = SYSRQ_ENABLE_BOOT,
73636 };
73637
73638 -static int pm_sysrq_init(void)
73639 +static int __init pm_sysrq_init(void)
73640 {
73641 register_sysrq_key('o', &sysrq_poweroff_op);
73642 return 0;
73643 diff --git a/kernel/power/process.c b/kernel/power/process.c
73644 index e7cd671..56d5f459 100644
73645 --- a/kernel/power/process.c
73646 +++ b/kernel/power/process.c
73647 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
73648 struct timeval start, end;
73649 u64 elapsed_csecs64;
73650 unsigned int elapsed_csecs;
73651 + bool timedout = false;
73652
73653 do_gettimeofday(&start);
73654
73655 end_time = jiffies + TIMEOUT;
73656 do {
73657 todo = 0;
73658 + if (time_after(jiffies, end_time))
73659 + timedout = true;
73660 read_lock(&tasklist_lock);
73661 do_each_thread(g, p) {
73662 if (frozen(p) || !freezeable(p))
73663 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
73664 * It is "frozen enough". If the task does wake
73665 * up, it will immediately call try_to_freeze.
73666 */
73667 - if (!task_is_stopped_or_traced(p) &&
73668 - !freezer_should_skip(p))
73669 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
73670 todo++;
73671 + if (timedout) {
73672 + printk(KERN_ERR "Task refusing to freeze:\n");
73673 + sched_show_task(p);
73674 + }
73675 + }
73676 } while_each_thread(g, p);
73677 read_unlock(&tasklist_lock);
73678 yield(); /* Yield is okay here */
73679 - if (time_after(jiffies, end_time))
73680 - break;
73681 - } while (todo);
73682 + } while (todo && !timedout);
73683
73684 do_gettimeofday(&end);
73685 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
73686 diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
73687 index 40dd021..fb30ceb 100644
73688 --- a/kernel/power/suspend.c
73689 +++ b/kernel/power/suspend.c
73690 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
73691 [PM_SUSPEND_MEM] = "mem",
73692 };
73693
73694 -static struct platform_suspend_ops *suspend_ops;
73695 +static const struct platform_suspend_ops *suspend_ops;
73696
73697 /**
73698 * suspend_set_ops - Set the global suspend method table.
73699 * @ops: Pointer to ops structure.
73700 */
73701 -void suspend_set_ops(struct platform_suspend_ops *ops)
73702 +void suspend_set_ops(const struct platform_suspend_ops *ops)
73703 {
73704 mutex_lock(&pm_mutex);
73705 suspend_ops = ops;
73706 diff --git a/kernel/printk.c b/kernel/printk.c
73707 index 4cade47..4d17900 100644
73708 --- a/kernel/printk.c
73709 +++ b/kernel/printk.c
73710 @@ -33,6 +33,7 @@
73711 #include <linux/bootmem.h>
73712 #include <linux/syscalls.h>
73713 #include <linux/kexec.h>
73714 +#include <linux/syslog.h>
73715
73716 #include <asm/uaccess.h>
73717
73718 @@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
73719 }
73720 #endif
73721
73722 -/*
73723 - * Commands to do_syslog:
73724 - *
73725 - * 0 -- Close the log. Currently a NOP.
73726 - * 1 -- Open the log. Currently a NOP.
73727 - * 2 -- Read from the log.
73728 - * 3 -- Read all messages remaining in the ring buffer.
73729 - * 4 -- Read and clear all messages remaining in the ring buffer
73730 - * 5 -- Clear ring buffer.
73731 - * 6 -- Disable printk's to console
73732 - * 7 -- Enable printk's to console
73733 - * 8 -- Set level of messages printed to console
73734 - * 9 -- Return number of unread characters in the log buffer
73735 - * 10 -- Return size of the log buffer
73736 - */
73737 -int do_syslog(int type, char __user *buf, int len)
73738 +int do_syslog(int type, char __user *buf, int len, bool from_file)
73739 {
73740 unsigned i, j, limit, count;
73741 int do_clear = 0;
73742 char c;
73743 int error = 0;
73744
73745 - error = security_syslog(type);
73746 +#ifdef CONFIG_GRKERNSEC_DMESG
73747 + if (grsec_enable_dmesg &&
73748 + (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
73749 + !capable(CAP_SYS_ADMIN))
73750 + return -EPERM;
73751 +#endif
73752 +
73753 + error = security_syslog(type, from_file);
73754 if (error)
73755 return error;
73756
73757 switch (type) {
73758 - case 0: /* Close log */
73759 + case SYSLOG_ACTION_CLOSE: /* Close log */
73760 break;
73761 - case 1: /* Open log */
73762 + case SYSLOG_ACTION_OPEN: /* Open log */
73763 break;
73764 - case 2: /* Read from log */
73765 + case SYSLOG_ACTION_READ: /* Read from log */
73766 error = -EINVAL;
73767 if (!buf || len < 0)
73768 goto out;
73769 @@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
73770 if (!error)
73771 error = i;
73772 break;
73773 - case 4: /* Read/clear last kernel messages */
73774 + /* Read/clear last kernel messages */
73775 + case SYSLOG_ACTION_READ_CLEAR:
73776 do_clear = 1;
73777 /* FALL THRU */
73778 - case 3: /* Read last kernel messages */
73779 + /* Read last kernel messages */
73780 + case SYSLOG_ACTION_READ_ALL:
73781 error = -EINVAL;
73782 if (!buf || len < 0)
73783 goto out;
73784 @@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
73785 }
73786 }
73787 break;
73788 - case 5: /* Clear ring buffer */
73789 + /* Clear ring buffer */
73790 + case SYSLOG_ACTION_CLEAR:
73791 logged_chars = 0;
73792 break;
73793 - case 6: /* Disable logging to console */
73794 + /* Disable logging to console */
73795 + case SYSLOG_ACTION_CONSOLE_OFF:
73796 if (saved_console_loglevel == -1)
73797 saved_console_loglevel = console_loglevel;
73798 console_loglevel = minimum_console_loglevel;
73799 break;
73800 - case 7: /* Enable logging to console */
73801 + /* Enable logging to console */
73802 + case SYSLOG_ACTION_CONSOLE_ON:
73803 if (saved_console_loglevel != -1) {
73804 console_loglevel = saved_console_loglevel;
73805 saved_console_loglevel = -1;
73806 }
73807 break;
73808 - case 8: /* Set level of messages printed to console */
73809 + /* Set level of messages printed to console */
73810 + case SYSLOG_ACTION_CONSOLE_LEVEL:
73811 error = -EINVAL;
73812 if (len < 1 || len > 8)
73813 goto out;
73814 @@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
73815 saved_console_loglevel = -1;
73816 error = 0;
73817 break;
73818 - case 9: /* Number of chars in the log buffer */
73819 + /* Number of chars in the log buffer */
73820 + case SYSLOG_ACTION_SIZE_UNREAD:
73821 error = log_end - log_start;
73822 break;
73823 - case 10: /* Size of the log buffer */
73824 + /* Size of the log buffer */
73825 + case SYSLOG_ACTION_SIZE_BUFFER:
73826 error = log_buf_len;
73827 break;
73828 default:
73829 @@ -415,7 +416,7 @@ out:
73830
73831 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
73832 {
73833 - return do_syslog(type, buf, len);
73834 + return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
73835 }
73836
73837 /*
73838 diff --git a/kernel/profile.c b/kernel/profile.c
73839 index dfadc5b..7f59404 100644
73840 --- a/kernel/profile.c
73841 +++ b/kernel/profile.c
73842 @@ -39,7 +39,7 @@ struct profile_hit {
73843 /* Oprofile timer tick hook */
73844 static int (*timer_hook)(struct pt_regs *) __read_mostly;
73845
73846 -static atomic_t *prof_buffer;
73847 +static atomic_unchecked_t *prof_buffer;
73848 static unsigned long prof_len, prof_shift;
73849
73850 int prof_on __read_mostly;
73851 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
73852 hits[i].pc = 0;
73853 continue;
73854 }
73855 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
73856 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
73857 hits[i].hits = hits[i].pc = 0;
73858 }
73859 }
73860 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
73861 * Add the current hit(s) and flush the write-queue out
73862 * to the global buffer:
73863 */
73864 - atomic_add(nr_hits, &prof_buffer[pc]);
73865 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
73866 for (i = 0; i < NR_PROFILE_HIT; ++i) {
73867 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
73868 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
73869 hits[i].pc = hits[i].hits = 0;
73870 }
73871 out:
73872 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
73873 if (prof_on != type || !prof_buffer)
73874 return;
73875 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
73876 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
73877 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
73878 }
73879 #endif /* !CONFIG_SMP */
73880 EXPORT_SYMBOL_GPL(profile_hits);
73881 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
73882 return -EFAULT;
73883 buf++; p++; count--; read++;
73884 }
73885 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
73886 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
73887 if (copy_to_user(buf, (void *)pnt, count))
73888 return -EFAULT;
73889 read += count;
73890 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
73891 }
73892 #endif
73893 profile_discard_flip_buffers();
73894 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
73895 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
73896 return count;
73897 }
73898
73899 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
73900 index 05625f6..733bf70 100644
73901 --- a/kernel/ptrace.c
73902 +++ b/kernel/ptrace.c
73903 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
73904 return ret;
73905 }
73906
73907 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73908 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
73909 + unsigned int log)
73910 {
73911 const struct cred *cred = current_cred(), *tcred;
73912
73913 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73914 cred->gid != tcred->egid ||
73915 cred->gid != tcred->sgid ||
73916 cred->gid != tcred->gid) &&
73917 - !capable(CAP_SYS_PTRACE)) {
73918 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
73919 + (log && !capable(CAP_SYS_PTRACE)))
73920 + ) {
73921 rcu_read_unlock();
73922 return -EPERM;
73923 }
73924 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
73925 smp_rmb();
73926 if (task->mm)
73927 dumpable = get_dumpable(task->mm);
73928 - if (!dumpable && !capable(CAP_SYS_PTRACE))
73929 + if (!dumpable &&
73930 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
73931 + (log && !capable(CAP_SYS_PTRACE))))
73932 return -EPERM;
73933
73934 return security_ptrace_access_check(task, mode);
73935 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
73936 {
73937 int err;
73938 task_lock(task);
73939 - err = __ptrace_may_access(task, mode);
73940 + err = __ptrace_may_access(task, mode, 0);
73941 + task_unlock(task);
73942 + return !err;
73943 +}
73944 +
73945 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
73946 +{
73947 + int err;
73948 + task_lock(task);
73949 + err = __ptrace_may_access(task, mode, 1);
73950 task_unlock(task);
73951 return !err;
73952 }
73953 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
73954 goto out;
73955
73956 task_lock(task);
73957 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
73958 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
73959 task_unlock(task);
73960 if (retval)
73961 goto unlock_creds;
73962 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
73963 goto unlock_tasklist;
73964
73965 task->ptrace = PT_PTRACED;
73966 - if (capable(CAP_SYS_PTRACE))
73967 + if (capable_nolog(CAP_SYS_PTRACE))
73968 task->ptrace |= PT_PTRACE_CAP;
73969
73970 __ptrace_link(task, current);
73971 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
73972 {
73973 int copied = 0;
73974
73975 + pax_track_stack();
73976 +
73977 while (len > 0) {
73978 char buf[128];
73979 int this_len, retval;
73980 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
73981 {
73982 int copied = 0;
73983
73984 + pax_track_stack();
73985 +
73986 while (len > 0) {
73987 char buf[128];
73988 int this_len, retval;
73989 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
73990 int ret = -EIO;
73991 siginfo_t siginfo;
73992
73993 + pax_track_stack();
73994 +
73995 switch (request) {
73996 case PTRACE_PEEKTEXT:
73997 case PTRACE_PEEKDATA:
73998 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
73999 ret = ptrace_setoptions(child, data);
74000 break;
74001 case PTRACE_GETEVENTMSG:
74002 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
74003 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
74004 break;
74005
74006 case PTRACE_GETSIGINFO:
74007 ret = ptrace_getsiginfo(child, &siginfo);
74008 if (!ret)
74009 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
74010 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
74011 &siginfo);
74012 break;
74013
74014 case PTRACE_SETSIGINFO:
74015 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
74016 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
74017 sizeof siginfo))
74018 ret = -EFAULT;
74019 else
74020 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
74021 goto out;
74022 }
74023
74024 + if (gr_handle_ptrace(child, request)) {
74025 + ret = -EPERM;
74026 + goto out_put_task_struct;
74027 + }
74028 +
74029 if (request == PTRACE_ATTACH) {
74030 ret = ptrace_attach(child);
74031 /*
74032 * Some architectures need to do book-keeping after
74033 * a ptrace attach.
74034 */
74035 - if (!ret)
74036 + if (!ret) {
74037 arch_ptrace_attach(child);
74038 + gr_audit_ptrace(child);
74039 + }
74040 goto out_put_task_struct;
74041 }
74042
74043 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
74044 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
74045 if (copied != sizeof(tmp))
74046 return -EIO;
74047 - return put_user(tmp, (unsigned long __user *)data);
74048 + return put_user(tmp, (__force unsigned long __user *)data);
74049 }
74050
74051 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
74052 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
74053 siginfo_t siginfo;
74054 int ret;
74055
74056 + pax_track_stack();
74057 +
74058 switch (request) {
74059 case PTRACE_PEEKTEXT:
74060 case PTRACE_PEEKDATA:
74061 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
74062 goto out;
74063 }
74064
74065 + if (gr_handle_ptrace(child, request)) {
74066 + ret = -EPERM;
74067 + goto out_put_task_struct;
74068 + }
74069 +
74070 if (request == PTRACE_ATTACH) {
74071 ret = ptrace_attach(child);
74072 /*
74073 * Some architectures need to do book-keeping after
74074 * a ptrace attach.
74075 */
74076 - if (!ret)
74077 + if (!ret) {
74078 arch_ptrace_attach(child);
74079 + gr_audit_ptrace(child);
74080 + }
74081 goto out_put_task_struct;
74082 }
74083
74084 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
74085 index 697c0a0..2402696 100644
74086 --- a/kernel/rcutorture.c
74087 +++ b/kernel/rcutorture.c
74088 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
74089 { 0 };
74090 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
74091 { 0 };
74092 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74093 -static atomic_t n_rcu_torture_alloc;
74094 -static atomic_t n_rcu_torture_alloc_fail;
74095 -static atomic_t n_rcu_torture_free;
74096 -static atomic_t n_rcu_torture_mberror;
74097 -static atomic_t n_rcu_torture_error;
74098 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74099 +static atomic_unchecked_t n_rcu_torture_alloc;
74100 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
74101 +static atomic_unchecked_t n_rcu_torture_free;
74102 +static atomic_unchecked_t n_rcu_torture_mberror;
74103 +static atomic_unchecked_t n_rcu_torture_error;
74104 static long n_rcu_torture_timers;
74105 static struct list_head rcu_torture_removed;
74106 static cpumask_var_t shuffle_tmp_mask;
74107 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
74108
74109 spin_lock_bh(&rcu_torture_lock);
74110 if (list_empty(&rcu_torture_freelist)) {
74111 - atomic_inc(&n_rcu_torture_alloc_fail);
74112 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
74113 spin_unlock_bh(&rcu_torture_lock);
74114 return NULL;
74115 }
74116 - atomic_inc(&n_rcu_torture_alloc);
74117 + atomic_inc_unchecked(&n_rcu_torture_alloc);
74118 p = rcu_torture_freelist.next;
74119 list_del_init(p);
74120 spin_unlock_bh(&rcu_torture_lock);
74121 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
74122 static void
74123 rcu_torture_free(struct rcu_torture *p)
74124 {
74125 - atomic_inc(&n_rcu_torture_free);
74126 + atomic_inc_unchecked(&n_rcu_torture_free);
74127 spin_lock_bh(&rcu_torture_lock);
74128 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
74129 spin_unlock_bh(&rcu_torture_lock);
74130 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
74131 i = rp->rtort_pipe_count;
74132 if (i > RCU_TORTURE_PIPE_LEN)
74133 i = RCU_TORTURE_PIPE_LEN;
74134 - atomic_inc(&rcu_torture_wcount[i]);
74135 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74136 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74137 rp->rtort_mbtest = 0;
74138 rcu_torture_free(rp);
74139 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
74140 i = rp->rtort_pipe_count;
74141 if (i > RCU_TORTURE_PIPE_LEN)
74142 i = RCU_TORTURE_PIPE_LEN;
74143 - atomic_inc(&rcu_torture_wcount[i]);
74144 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74145 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74146 rp->rtort_mbtest = 0;
74147 list_del(&rp->rtort_free);
74148 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
74149 i = old_rp->rtort_pipe_count;
74150 if (i > RCU_TORTURE_PIPE_LEN)
74151 i = RCU_TORTURE_PIPE_LEN;
74152 - atomic_inc(&rcu_torture_wcount[i]);
74153 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
74154 old_rp->rtort_pipe_count++;
74155 cur_ops->deferred_free(old_rp);
74156 }
74157 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
74158 return;
74159 }
74160 if (p->rtort_mbtest == 0)
74161 - atomic_inc(&n_rcu_torture_mberror);
74162 + atomic_inc_unchecked(&n_rcu_torture_mberror);
74163 spin_lock(&rand_lock);
74164 cur_ops->read_delay(&rand);
74165 n_rcu_torture_timers++;
74166 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
74167 continue;
74168 }
74169 if (p->rtort_mbtest == 0)
74170 - atomic_inc(&n_rcu_torture_mberror);
74171 + atomic_inc_unchecked(&n_rcu_torture_mberror);
74172 cur_ops->read_delay(&rand);
74173 preempt_disable();
74174 pipe_count = p->rtort_pipe_count;
74175 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
74176 rcu_torture_current,
74177 rcu_torture_current_version,
74178 list_empty(&rcu_torture_freelist),
74179 - atomic_read(&n_rcu_torture_alloc),
74180 - atomic_read(&n_rcu_torture_alloc_fail),
74181 - atomic_read(&n_rcu_torture_free),
74182 - atomic_read(&n_rcu_torture_mberror),
74183 + atomic_read_unchecked(&n_rcu_torture_alloc),
74184 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
74185 + atomic_read_unchecked(&n_rcu_torture_free),
74186 + atomic_read_unchecked(&n_rcu_torture_mberror),
74187 n_rcu_torture_timers);
74188 - if (atomic_read(&n_rcu_torture_mberror) != 0)
74189 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
74190 cnt += sprintf(&page[cnt], " !!!");
74191 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
74192 if (i > 1) {
74193 cnt += sprintf(&page[cnt], "!!! ");
74194 - atomic_inc(&n_rcu_torture_error);
74195 + atomic_inc_unchecked(&n_rcu_torture_error);
74196 WARN_ON_ONCE(1);
74197 }
74198 cnt += sprintf(&page[cnt], "Reader Pipe: ");
74199 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
74200 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
74201 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74202 cnt += sprintf(&page[cnt], " %d",
74203 - atomic_read(&rcu_torture_wcount[i]));
74204 + atomic_read_unchecked(&rcu_torture_wcount[i]));
74205 }
74206 cnt += sprintf(&page[cnt], "\n");
74207 if (cur_ops->stats)
74208 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
74209
74210 if (cur_ops->cleanup)
74211 cur_ops->cleanup();
74212 - if (atomic_read(&n_rcu_torture_error))
74213 + if (atomic_read_unchecked(&n_rcu_torture_error))
74214 rcu_torture_print_module_parms("End of test: FAILURE");
74215 else
74216 rcu_torture_print_module_parms("End of test: SUCCESS");
74217 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
74218
74219 rcu_torture_current = NULL;
74220 rcu_torture_current_version = 0;
74221 - atomic_set(&n_rcu_torture_alloc, 0);
74222 - atomic_set(&n_rcu_torture_alloc_fail, 0);
74223 - atomic_set(&n_rcu_torture_free, 0);
74224 - atomic_set(&n_rcu_torture_mberror, 0);
74225 - atomic_set(&n_rcu_torture_error, 0);
74226 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
74227 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
74228 + atomic_set_unchecked(&n_rcu_torture_free, 0);
74229 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
74230 + atomic_set_unchecked(&n_rcu_torture_error, 0);
74231 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
74232 - atomic_set(&rcu_torture_wcount[i], 0);
74233 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
74234 for_each_possible_cpu(cpu) {
74235 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74236 per_cpu(rcu_torture_count, cpu)[i] = 0;
74237 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
74238 index 683c4f3..97f54c6 100644
74239 --- a/kernel/rcutree.c
74240 +++ b/kernel/rcutree.c
74241 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
74242 /*
74243 * Do softirq processing for the current CPU.
74244 */
74245 -static void rcu_process_callbacks(struct softirq_action *unused)
74246 +static void rcu_process_callbacks(void)
74247 {
74248 /*
74249 * Memory references from any prior RCU read-side critical sections
74250 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
74251 index c03edf7..ac1b341 100644
74252 --- a/kernel/rcutree_plugin.h
74253 +++ b/kernel/rcutree_plugin.h
74254 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
74255 */
74256 void __rcu_read_lock(void)
74257 {
74258 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
74259 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
74260 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
74261 }
74262 EXPORT_SYMBOL_GPL(__rcu_read_lock);
74263 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
74264 struct task_struct *t = current;
74265
74266 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
74267 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
74268 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
74269 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
74270 rcu_read_unlock_special(t);
74271 }
74272 diff --git a/kernel/relay.c b/kernel/relay.c
74273 index 760c262..a9fd241 100644
74274 --- a/kernel/relay.c
74275 +++ b/kernel/relay.c
74276 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct file *in,
74277 unsigned int flags,
74278 int *nonpad_ret)
74279 {
74280 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
74281 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
74282 struct rchan_buf *rbuf = in->private_data;
74283 unsigned int subbuf_size = rbuf->chan->subbuf_size;
74284 uint64_t pos = (uint64_t) *ppos;
74285 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct file *in,
74286 .ops = &relay_pipe_buf_ops,
74287 .spd_release = relay_page_release,
74288 };
74289 + ssize_t ret;
74290 +
74291 + pax_track_stack();
74292
74293 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
74294 return 0;
74295 diff --git a/kernel/resource.c b/kernel/resource.c
74296 index fb11a58..4e61ae1 100644
74297 --- a/kernel/resource.c
74298 +++ b/kernel/resource.c
74299 @@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
74300
74301 static int __init ioresources_init(void)
74302 {
74303 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74304 +#ifdef CONFIG_GRKERNSEC_PROC_USER
74305 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
74306 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
74307 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74308 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
74309 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
74310 +#endif
74311 +#else
74312 proc_create("ioports", 0, NULL, &proc_ioports_operations);
74313 proc_create("iomem", 0, NULL, &proc_iomem_operations);
74314 +#endif
74315 return 0;
74316 }
74317 __initcall(ioresources_init);
74318 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
74319 index a56f629..1fc4989 100644
74320 --- a/kernel/rtmutex-tester.c
74321 +++ b/kernel/rtmutex-tester.c
74322 @@ -21,7 +21,7 @@
74323 #define MAX_RT_TEST_MUTEXES 8
74324
74325 static spinlock_t rttest_lock;
74326 -static atomic_t rttest_event;
74327 +static atomic_unchecked_t rttest_event;
74328
74329 struct test_thread_data {
74330 int opcode;
74331 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74332
74333 case RTTEST_LOCKCONT:
74334 td->mutexes[td->opdata] = 1;
74335 - td->event = atomic_add_return(1, &rttest_event);
74336 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74337 return 0;
74338
74339 case RTTEST_RESET:
74340 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74341 return 0;
74342
74343 case RTTEST_RESETEVENT:
74344 - atomic_set(&rttest_event, 0);
74345 + atomic_set_unchecked(&rttest_event, 0);
74346 return 0;
74347
74348 default:
74349 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74350 return ret;
74351
74352 td->mutexes[id] = 1;
74353 - td->event = atomic_add_return(1, &rttest_event);
74354 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74355 rt_mutex_lock(&mutexes[id]);
74356 - td->event = atomic_add_return(1, &rttest_event);
74357 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74358 td->mutexes[id] = 4;
74359 return 0;
74360
74361 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74362 return ret;
74363
74364 td->mutexes[id] = 1;
74365 - td->event = atomic_add_return(1, &rttest_event);
74366 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74367 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
74368 - td->event = atomic_add_return(1, &rttest_event);
74369 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74370 td->mutexes[id] = ret ? 0 : 4;
74371 return ret ? -EINTR : 0;
74372
74373 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74374 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
74375 return ret;
74376
74377 - td->event = atomic_add_return(1, &rttest_event);
74378 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74379 rt_mutex_unlock(&mutexes[id]);
74380 - td->event = atomic_add_return(1, &rttest_event);
74381 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74382 td->mutexes[id] = 0;
74383 return 0;
74384
74385 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74386 break;
74387
74388 td->mutexes[dat] = 2;
74389 - td->event = atomic_add_return(1, &rttest_event);
74390 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74391 break;
74392
74393 case RTTEST_LOCKBKL:
74394 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74395 return;
74396
74397 td->mutexes[dat] = 3;
74398 - td->event = atomic_add_return(1, &rttest_event);
74399 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74400 break;
74401
74402 case RTTEST_LOCKNOWAIT:
74403 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74404 return;
74405
74406 td->mutexes[dat] = 1;
74407 - td->event = atomic_add_return(1, &rttest_event);
74408 + td->event = atomic_add_return_unchecked(1, &rttest_event);
74409 return;
74410
74411 case RTTEST_LOCKBKL:
74412 diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
74413 index 29bd4ba..8c5de90 100644
74414 --- a/kernel/rtmutex.c
74415 +++ b/kernel/rtmutex.c
74416 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
74417 */
74418 spin_lock_irqsave(&pendowner->pi_lock, flags);
74419
74420 - WARN_ON(!pendowner->pi_blocked_on);
74421 + BUG_ON(!pendowner->pi_blocked_on);
74422 WARN_ON(pendowner->pi_blocked_on != waiter);
74423 WARN_ON(pendowner->pi_blocked_on->lock != lock);
74424
74425 diff --git a/kernel/sched.c b/kernel/sched.c
74426 index 0591df8..6e343c3 100644
74427 --- a/kernel/sched.c
74428 +++ b/kernel/sched.c
74429 @@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
74430 {
74431 unsigned long flags;
74432 struct rq *rq;
74433 - int cpu = get_cpu();
74434
74435 #ifdef CONFIG_SMP
74436 + int cpu = get_cpu();
74437 +
74438 rq = task_rq_lock(p, &flags);
74439 p->state = TASK_WAKING;
74440
74441 @@ -5043,7 +5044,7 @@ out:
74442 * In CONFIG_NO_HZ case, the idle load balance owner will do the
74443 * rebalancing for all the cpus for whom scheduler ticks are stopped.
74444 */
74445 -static void run_rebalance_domains(struct softirq_action *h)
74446 +static void run_rebalance_domains(void)
74447 {
74448 int this_cpu = smp_processor_id();
74449 struct rq *this_rq = cpu_rq(this_cpu);
74450 @@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
74451 struct rq *rq;
74452 int cpu;
74453
74454 + pax_track_stack();
74455 +
74456 need_resched:
74457 preempt_disable();
74458 cpu = smp_processor_id();
74459 @@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
74460 * Look out! "owner" is an entirely speculative pointer
74461 * access and not reliable.
74462 */
74463 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74464 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
74465 {
74466 unsigned int cpu;
74467 struct rq *rq;
74468 @@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74469 * DEBUG_PAGEALLOC could have unmapped it if
74470 * the mutex owner just released it and exited.
74471 */
74472 - if (probe_kernel_address(&owner->cpu, cpu))
74473 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
74474 return 0;
74475 #else
74476 - cpu = owner->cpu;
74477 + cpu = task_thread_info(owner)->cpu;
74478 #endif
74479
74480 /*
74481 @@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74482 /*
74483 * Is that owner really running on that cpu?
74484 */
74485 - if (task_thread_info(rq->curr) != owner || need_resched())
74486 + if (rq->curr != owner || need_resched())
74487 return 0;
74488
74489 cpu_relax();
74490 @@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p, const int nice)
74491 /* convert nice value [19,-20] to rlimit style value [1,40] */
74492 int nice_rlim = 20 - nice;
74493
74494 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
74495 +
74496 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
74497 capable(CAP_SYS_NICE));
74498 }
74499 @@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
74500 if (nice > 19)
74501 nice = 19;
74502
74503 - if (increment < 0 && !can_nice(current, nice))
74504 + if (increment < 0 && (!can_nice(current, nice) ||
74505 + gr_handle_chroot_nice()))
74506 return -EPERM;
74507
74508 retval = security_task_setnice(current, nice);
74509 @@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
74510 long power;
74511 int weight;
74512
74513 - WARN_ON(!sd || !sd->groups);
74514 + BUG_ON(!sd || !sd->groups);
74515
74516 if (cpu != group_first_cpu(sd->groups))
74517 return;
74518 diff --git a/kernel/signal.c b/kernel/signal.c
74519 index 2494827..cda80a0 100644
74520 --- a/kernel/signal.c
74521 +++ b/kernel/signal.c
74522 @@ -41,12 +41,12 @@
74523
74524 static struct kmem_cache *sigqueue_cachep;
74525
74526 -static void __user *sig_handler(struct task_struct *t, int sig)
74527 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
74528 {
74529 return t->sighand->action[sig - 1].sa.sa_handler;
74530 }
74531
74532 -static int sig_handler_ignored(void __user *handler, int sig)
74533 +static int sig_handler_ignored(__sighandler_t handler, int sig)
74534 {
74535 /* Is it explicitly or implicitly ignored? */
74536 return handler == SIG_IGN ||
74537 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
74538 static int sig_task_ignored(struct task_struct *t, int sig,
74539 int from_ancestor_ns)
74540 {
74541 - void __user *handler;
74542 + __sighandler_t handler;
74543
74544 handler = sig_handler(t, sig);
74545
74546 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
74547 */
74548 user = get_uid(__task_cred(t)->user);
74549 atomic_inc(&user->sigpending);
74550 +
74551 + if (!override_rlimit)
74552 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
74553 if (override_rlimit ||
74554 atomic_read(&user->sigpending) <=
74555 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
74556 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
74557
74558 int unhandled_signal(struct task_struct *tsk, int sig)
74559 {
74560 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
74561 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
74562 if (is_global_init(tsk))
74563 return 1;
74564 if (handler != SIG_IGN && handler != SIG_DFL)
74565 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
74566 }
74567 }
74568
74569 + /* allow glibc communication via tgkill to other threads in our
74570 + thread group */
74571 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
74572 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
74573 + && gr_handle_signal(t, sig))
74574 + return -EPERM;
74575 +
74576 return security_task_kill(t, info, sig, 0);
74577 }
74578
74579 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
74580 return send_signal(sig, info, p, 1);
74581 }
74582
74583 -static int
74584 +int
74585 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74586 {
74587 return send_signal(sig, info, t, 0);
74588 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74589 unsigned long int flags;
74590 int ret, blocked, ignored;
74591 struct k_sigaction *action;
74592 + int is_unhandled = 0;
74593
74594 spin_lock_irqsave(&t->sighand->siglock, flags);
74595 action = &t->sighand->action[sig-1];
74596 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74597 }
74598 if (action->sa.sa_handler == SIG_DFL)
74599 t->signal->flags &= ~SIGNAL_UNKILLABLE;
74600 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
74601 + is_unhandled = 1;
74602 ret = specific_send_sig_info(sig, info, t);
74603 spin_unlock_irqrestore(&t->sighand->siglock, flags);
74604
74605 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
74606 + normal operation */
74607 + if (is_unhandled) {
74608 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
74609 + gr_handle_crash(t, sig);
74610 + }
74611 +
74612 return ret;
74613 }
74614
74615 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
74616 {
74617 int ret = check_kill_permission(sig, info, p);
74618
74619 - if (!ret && sig)
74620 + if (!ret && sig) {
74621 ret = do_send_sig_info(sig, info, p, true);
74622 + if (!ret)
74623 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
74624 + }
74625
74626 return ret;
74627 }
74628 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
74629 {
74630 siginfo_t info;
74631
74632 + pax_track_stack();
74633 +
74634 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
74635
74636 memset(&info, 0, sizeof info);
74637 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
74638 int error = -ESRCH;
74639
74640 rcu_read_lock();
74641 - p = find_task_by_vpid(pid);
74642 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
74643 + /* allow glibc communication via tgkill to other threads in our
74644 + thread group */
74645 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
74646 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
74647 + p = find_task_by_vpid_unrestricted(pid);
74648 + else
74649 +#endif
74650 + p = find_task_by_vpid(pid);
74651 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
74652 error = check_kill_permission(sig, info, p);
74653 /*
74654 diff --git a/kernel/smp.c b/kernel/smp.c
74655 index aa9cff3..631a0de 100644
74656 --- a/kernel/smp.c
74657 +++ b/kernel/smp.c
74658 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
74659 }
74660 EXPORT_SYMBOL(smp_call_function);
74661
74662 -void ipi_call_lock(void)
74663 +void ipi_call_lock(void) __acquires(call_function.lock)
74664 {
74665 spin_lock(&call_function.lock);
74666 }
74667
74668 -void ipi_call_unlock(void)
74669 +void ipi_call_unlock(void) __releases(call_function.lock)
74670 {
74671 spin_unlock(&call_function.lock);
74672 }
74673
74674 -void ipi_call_lock_irq(void)
74675 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
74676 {
74677 spin_lock_irq(&call_function.lock);
74678 }
74679
74680 -void ipi_call_unlock_irq(void)
74681 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
74682 {
74683 spin_unlock_irq(&call_function.lock);
74684 }
74685 diff --git a/kernel/softirq.c b/kernel/softirq.c
74686 index 04a0252..580c512 100644
74687 --- a/kernel/softirq.c
74688 +++ b/kernel/softirq.c
74689 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
74690
74691 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
74692
74693 -char *softirq_to_name[NR_SOFTIRQS] = {
74694 +const char * const softirq_to_name[NR_SOFTIRQS] = {
74695 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
74696 "TASKLET", "SCHED", "HRTIMER", "RCU"
74697 };
74698 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
74699
74700 asmlinkage void __do_softirq(void)
74701 {
74702 - struct softirq_action *h;
74703 + const struct softirq_action *h;
74704 __u32 pending;
74705 int max_restart = MAX_SOFTIRQ_RESTART;
74706 int cpu;
74707 @@ -233,7 +233,7 @@ restart:
74708 kstat_incr_softirqs_this_cpu(h - softirq_vec);
74709
74710 trace_softirq_entry(h, softirq_vec);
74711 - h->action(h);
74712 + h->action();
74713 trace_softirq_exit(h, softirq_vec);
74714 if (unlikely(prev_count != preempt_count())) {
74715 printk(KERN_ERR "huh, entered softirq %td %s %p"
74716 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
74717 local_irq_restore(flags);
74718 }
74719
74720 -void open_softirq(int nr, void (*action)(struct softirq_action *))
74721 +void open_softirq(int nr, void (*action)(void))
74722 {
74723 - softirq_vec[nr].action = action;
74724 + pax_open_kernel();
74725 + *(void **)&softirq_vec[nr].action = action;
74726 + pax_close_kernel();
74727 }
74728
74729 /*
74730 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
74731
74732 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
74733
74734 -static void tasklet_action(struct softirq_action *a)
74735 +static void tasklet_action(void)
74736 {
74737 struct tasklet_struct *list;
74738
74739 @@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
74740 }
74741 }
74742
74743 -static void tasklet_hi_action(struct softirq_action *a)
74744 +static void tasklet_hi_action(void)
74745 {
74746 struct tasklet_struct *list;
74747
74748 diff --git a/kernel/sys.c b/kernel/sys.c
74749 index e9512b1..3c265de 100644
74750 --- a/kernel/sys.c
74751 +++ b/kernel/sys.c
74752 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
74753 error = -EACCES;
74754 goto out;
74755 }
74756 +
74757 + if (gr_handle_chroot_setpriority(p, niceval)) {
74758 + error = -EACCES;
74759 + goto out;
74760 + }
74761 +
74762 no_nice = security_task_setnice(p, niceval);
74763 if (no_nice) {
74764 error = no_nice;
74765 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
74766 !(user = find_user(who)))
74767 goto out_unlock; /* No processes for this user */
74768
74769 - do_each_thread(g, p)
74770 + do_each_thread(g, p) {
74771 if (__task_cred(p)->uid == who)
74772 error = set_one_prio(p, niceval, error);
74773 - while_each_thread(g, p);
74774 + } while_each_thread(g, p);
74775 if (who != cred->uid)
74776 free_uid(user); /* For find_user() */
74777 break;
74778 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
74779 !(user = find_user(who)))
74780 goto out_unlock; /* No processes for this user */
74781
74782 - do_each_thread(g, p)
74783 + do_each_thread(g, p) {
74784 if (__task_cred(p)->uid == who) {
74785 niceval = 20 - task_nice(p);
74786 if (niceval > retval)
74787 retval = niceval;
74788 }
74789 - while_each_thread(g, p);
74790 + } while_each_thread(g, p);
74791 if (who != cred->uid)
74792 free_uid(user); /* for find_user() */
74793 break;
74794 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
74795 goto error;
74796 }
74797
74798 + if (gr_check_group_change(new->gid, new->egid, -1))
74799 + goto error;
74800 +
74801 if (rgid != (gid_t) -1 ||
74802 (egid != (gid_t) -1 && egid != old->gid))
74803 new->sgid = new->egid;
74804 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
74805 goto error;
74806
74807 retval = -EPERM;
74808 +
74809 + if (gr_check_group_change(gid, gid, gid))
74810 + goto error;
74811 +
74812 if (capable(CAP_SETGID))
74813 new->gid = new->egid = new->sgid = new->fsgid = gid;
74814 else if (gid == old->gid || gid == old->sgid)
74815 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
74816 if (!new_user)
74817 return -EAGAIN;
74818
74819 + /*
74820 + * We don't fail in case of NPROC limit excess here because too many
74821 + * poorly written programs don't check set*uid() return code, assuming
74822 + * it never fails if called by root. We may still enforce NPROC limit
74823 + * for programs doing set*uid()+execve() by harmlessly deferring the
74824 + * failure to the execve() stage.
74825 + */
74826 if (atomic_read(&new_user->processes) >=
74827 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
74828 - new_user != INIT_USER) {
74829 - free_uid(new_user);
74830 - return -EAGAIN;
74831 - }
74832 + new_user != INIT_USER)
74833 + current->flags |= PF_NPROC_EXCEEDED;
74834 + else
74835 + current->flags &= ~PF_NPROC_EXCEEDED;
74836
74837 free_uid(new->user);
74838 new->user = new_user;
74839 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
74840 goto error;
74841 }
74842
74843 + if (gr_check_user_change(new->uid, new->euid, -1))
74844 + goto error;
74845 +
74846 if (new->uid != old->uid) {
74847 retval = set_user(new);
74848 if (retval < 0)
74849 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
74850 goto error;
74851
74852 retval = -EPERM;
74853 +
74854 + if (gr_check_crash_uid(uid))
74855 + goto error;
74856 + if (gr_check_user_change(uid, uid, uid))
74857 + goto error;
74858 +
74859 if (capable(CAP_SETUID)) {
74860 new->suid = new->uid = uid;
74861 if (uid != old->uid) {
74862 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
74863 goto error;
74864 }
74865
74866 + if (gr_check_user_change(ruid, euid, -1))
74867 + goto error;
74868 +
74869 if (ruid != (uid_t) -1) {
74870 new->uid = ruid;
74871 if (ruid != old->uid) {
74872 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
74873 goto error;
74874 }
74875
74876 + if (gr_check_group_change(rgid, egid, -1))
74877 + goto error;
74878 +
74879 if (rgid != (gid_t) -1)
74880 new->gid = rgid;
74881 if (egid != (gid_t) -1)
74882 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
74883 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
74884 goto error;
74885
74886 + if (gr_check_user_change(-1, -1, uid))
74887 + goto error;
74888 +
74889 if (uid == old->uid || uid == old->euid ||
74890 uid == old->suid || uid == old->fsuid ||
74891 capable(CAP_SETUID)) {
74892 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
74893 if (gid == old->gid || gid == old->egid ||
74894 gid == old->sgid || gid == old->fsgid ||
74895 capable(CAP_SETGID)) {
74896 + if (gr_check_group_change(-1, -1, gid))
74897 + goto error;
74898 +
74899 if (gid != old_fsgid) {
74900 new->fsgid = gid;
74901 goto change_okay;
74902 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
74903 error = get_dumpable(me->mm);
74904 break;
74905 case PR_SET_DUMPABLE:
74906 - if (arg2 < 0 || arg2 > 1) {
74907 + if (arg2 > 1) {
74908 error = -EINVAL;
74909 break;
74910 }
74911 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
74912 index b8bd058..ab6a76be 100644
74913 --- a/kernel/sysctl.c
74914 +++ b/kernel/sysctl.c
74915 @@ -63,6 +63,13 @@
74916 static int deprecated_sysctl_warning(struct __sysctl_args *args);
74917
74918 #if defined(CONFIG_SYSCTL)
74919 +#include <linux/grsecurity.h>
74920 +#include <linux/grinternal.h>
74921 +
74922 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
74923 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
74924 + const int op);
74925 +extern int gr_handle_chroot_sysctl(const int op);
74926
74927 /* External variables not in a header file. */
74928 extern int C_A_D;
74929 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
74930 static int proc_taint(struct ctl_table *table, int write,
74931 void __user *buffer, size_t *lenp, loff_t *ppos);
74932 #endif
74933 +extern ctl_table grsecurity_table[];
74934
74935 static struct ctl_table root_table[];
74936 static struct ctl_table_root sysctl_table_root;
74937 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
74938 int sysctl_legacy_va_layout;
74939 #endif
74940
74941 +#ifdef CONFIG_PAX_SOFTMODE
74942 +static ctl_table pax_table[] = {
74943 + {
74944 + .ctl_name = CTL_UNNUMBERED,
74945 + .procname = "softmode",
74946 + .data = &pax_softmode,
74947 + .maxlen = sizeof(unsigned int),
74948 + .mode = 0600,
74949 + .proc_handler = &proc_dointvec,
74950 + },
74951 +
74952 + { .ctl_name = 0 }
74953 +};
74954 +#endif
74955 +
74956 extern int prove_locking;
74957 extern int lock_stat;
74958
74959 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
74960 #endif
74961
74962 static struct ctl_table kern_table[] = {
74963 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
74964 + {
74965 + .ctl_name = CTL_UNNUMBERED,
74966 + .procname = "grsecurity",
74967 + .mode = 0500,
74968 + .child = grsecurity_table,
74969 + },
74970 +#endif
74971 +
74972 +#ifdef CONFIG_PAX_SOFTMODE
74973 + {
74974 + .ctl_name = CTL_UNNUMBERED,
74975 + .procname = "pax",
74976 + .mode = 0500,
74977 + .child = pax_table,
74978 + },
74979 +#endif
74980 +
74981 {
74982 .ctl_name = CTL_UNNUMBERED,
74983 .procname = "sched_child_runs_first",
74984 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
74985 .data = &modprobe_path,
74986 .maxlen = KMOD_PATH_LEN,
74987 .mode = 0644,
74988 - .proc_handler = &proc_dostring,
74989 - .strategy = &sysctl_string,
74990 + .proc_handler = &proc_dostring_modpriv,
74991 + .strategy = &sysctl_string_modpriv,
74992 },
74993 {
74994 .ctl_name = CTL_UNNUMBERED,
74995 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
74996 .mode = 0644,
74997 .proc_handler = &proc_dointvec
74998 },
74999 + {
75000 + .procname = "heap_stack_gap",
75001 + .data = &sysctl_heap_stack_gap,
75002 + .maxlen = sizeof(sysctl_heap_stack_gap),
75003 + .mode = 0644,
75004 + .proc_handler = proc_doulongvec_minmax,
75005 + },
75006 #else
75007 {
75008 .ctl_name = CTL_UNNUMBERED,
75009 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
75010 return 0;
75011 }
75012
75013 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
75014 +
75015 static int parse_table(int __user *name, int nlen,
75016 void __user *oldval, size_t __user *oldlenp,
75017 void __user *newval, size_t newlen,
75018 @@ -1821,7 +1871,7 @@ repeat:
75019 if (n == table->ctl_name) {
75020 int error;
75021 if (table->child) {
75022 - if (sysctl_perm(root, table, MAY_EXEC))
75023 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
75024 return -EPERM;
75025 name++;
75026 nlen--;
75027 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
75028 int error;
75029 int mode;
75030
75031 + if (table->parent != NULL && table->parent->procname != NULL &&
75032 + table->procname != NULL &&
75033 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
75034 + return -EACCES;
75035 + if (gr_handle_chroot_sysctl(op))
75036 + return -EACCES;
75037 + error = gr_handle_sysctl(table, op);
75038 + if (error)
75039 + return error;
75040 +
75041 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75042 + if (error)
75043 + return error;
75044 +
75045 + if (root->permissions)
75046 + mode = root->permissions(root, current->nsproxy, table);
75047 + else
75048 + mode = table->mode;
75049 +
75050 + return test_perm(mode, op);
75051 +}
75052 +
75053 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
75054 +{
75055 + int error;
75056 + int mode;
75057 +
75058 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75059 if (error)
75060 return error;
75061 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
75062 buffer, lenp, ppos);
75063 }
75064
75065 +int proc_dostring_modpriv(struct ctl_table *table, int write,
75066 + void __user *buffer, size_t *lenp, loff_t *ppos)
75067 +{
75068 + if (write && !capable(CAP_SYS_MODULE))
75069 + return -EPERM;
75070 +
75071 + return _proc_do_string(table->data, table->maxlen, write,
75072 + buffer, lenp, ppos);
75073 +}
75074 +
75075
75076 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
75077 int *valp,
75078 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
75079 vleft = table->maxlen / sizeof(unsigned long);
75080 left = *lenp;
75081
75082 - for (; left && vleft--; i++, min++, max++, first=0) {
75083 + for (; left && vleft--; i++, first=0) {
75084 if (write) {
75085 while (left) {
75086 char c;
75087 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
75088 return -ENOSYS;
75089 }
75090
75091 +int proc_dostring_modpriv(struct ctl_table *table, int write,
75092 + void __user *buffer, size_t *lenp, loff_t *ppos)
75093 +{
75094 + return -ENOSYS;
75095 +}
75096 +
75097 int proc_dointvec(struct ctl_table *table, int write,
75098 void __user *buffer, size_t *lenp, loff_t *ppos)
75099 {
75100 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
75101 return 1;
75102 }
75103
75104 +int sysctl_string_modpriv(struct ctl_table *table,
75105 + void __user *oldval, size_t __user *oldlenp,
75106 + void __user *newval, size_t newlen)
75107 +{
75108 + if (newval && newlen && !capable(CAP_SYS_MODULE))
75109 + return -EPERM;
75110 +
75111 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
75112 +}
75113 +
75114 /*
75115 * This function makes sure that all of the integers in the vector
75116 * are between the minimum and maximum values given in the arrays
75117 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
75118 return -ENOSYS;
75119 }
75120
75121 +int sysctl_string_modpriv(struct ctl_table *table,
75122 + void __user *oldval, size_t __user *oldlenp,
75123 + void __user *newval, size_t newlen)
75124 +{
75125 + return -ENOSYS;
75126 +}
75127 +
75128 int sysctl_intvec(struct ctl_table *table,
75129 void __user *oldval, size_t __user *oldlenp,
75130 void __user *newval, size_t newlen)
75131 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
75132 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
75133 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
75134 EXPORT_SYMBOL(proc_dostring);
75135 +EXPORT_SYMBOL(proc_dostring_modpriv);
75136 EXPORT_SYMBOL(proc_doulongvec_minmax);
75137 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
75138 EXPORT_SYMBOL(register_sysctl_table);
75139 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
75140 EXPORT_SYMBOL(sysctl_jiffies);
75141 EXPORT_SYMBOL(sysctl_ms_jiffies);
75142 EXPORT_SYMBOL(sysctl_string);
75143 +EXPORT_SYMBOL(sysctl_string_modpriv);
75144 EXPORT_SYMBOL(sysctl_data);
75145 EXPORT_SYMBOL(unregister_sysctl_table);
75146 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
75147 index 469193c..ea3ecb2 100644
75148 --- a/kernel/sysctl_check.c
75149 +++ b/kernel/sysctl_check.c
75150 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
75151 } else {
75152 if ((table->strategy == sysctl_data) ||
75153 (table->strategy == sysctl_string) ||
75154 + (table->strategy == sysctl_string_modpriv) ||
75155 (table->strategy == sysctl_intvec) ||
75156 (table->strategy == sysctl_jiffies) ||
75157 (table->strategy == sysctl_ms_jiffies) ||
75158 (table->proc_handler == proc_dostring) ||
75159 + (table->proc_handler == proc_dostring_modpriv) ||
75160 (table->proc_handler == proc_dointvec) ||
75161 (table->proc_handler == proc_dointvec_minmax) ||
75162 (table->proc_handler == proc_dointvec_jiffies) ||
75163 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
75164 index a4ef542..798bcd7 100644
75165 --- a/kernel/taskstats.c
75166 +++ b/kernel/taskstats.c
75167 @@ -26,9 +26,12 @@
75168 #include <linux/cgroup.h>
75169 #include <linux/fs.h>
75170 #include <linux/file.h>
75171 +#include <linux/grsecurity.h>
75172 #include <net/genetlink.h>
75173 #include <asm/atomic.h>
75174
75175 +extern int gr_is_taskstats_denied(int pid);
75176 +
75177 /*
75178 * Maximum length of a cpumask that can be specified in
75179 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
75180 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
75181 size_t size;
75182 cpumask_var_t mask;
75183
75184 + if (gr_is_taskstats_denied(current->pid))
75185 + return -EACCES;
75186 +
75187 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
75188 return -ENOMEM;
75189
75190 diff --git a/kernel/time.c b/kernel/time.c
75191 index 33df60e..ca768bd 100644
75192 --- a/kernel/time.c
75193 +++ b/kernel/time.c
75194 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
75195 return error;
75196
75197 if (tz) {
75198 + /* we log in do_settimeofday called below, so don't log twice
75199 + */
75200 + if (!tv)
75201 + gr_log_timechange();
75202 +
75203 /* SMP safe, global irq locking makes it work. */
75204 sys_tz = *tz;
75205 update_vsyscall_tz();
75206 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
75207 * Avoid unnecessary multiplications/divisions in the
75208 * two most common HZ cases:
75209 */
75210 -unsigned int inline jiffies_to_msecs(const unsigned long j)
75211 +inline unsigned int jiffies_to_msecs(const unsigned long j)
75212 {
75213 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
75214 return (MSEC_PER_SEC / HZ) * j;
75215 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
75216 }
75217 EXPORT_SYMBOL(jiffies_to_msecs);
75218
75219 -unsigned int inline jiffies_to_usecs(const unsigned long j)
75220 +inline unsigned int jiffies_to_usecs(const unsigned long j)
75221 {
75222 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
75223 return (USEC_PER_SEC / HZ) * j;
75224 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
75225 index 57b953f..06f149f 100644
75226 --- a/kernel/time/tick-broadcast.c
75227 +++ b/kernel/time/tick-broadcast.c
75228 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
75229 * then clear the broadcast bit.
75230 */
75231 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
75232 - int cpu = smp_processor_id();
75233 + cpu = smp_processor_id();
75234
75235 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
75236 tick_broadcast_clear_oneshot(cpu);
75237 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
75238 index 4a71cff..ffb5548 100644
75239 --- a/kernel/time/timekeeping.c
75240 +++ b/kernel/time/timekeeping.c
75241 @@ -14,6 +14,7 @@
75242 #include <linux/init.h>
75243 #include <linux/mm.h>
75244 #include <linux/sched.h>
75245 +#include <linux/grsecurity.h>
75246 #include <linux/sysdev.h>
75247 #include <linux/clocksource.h>
75248 #include <linux/jiffies.h>
75249 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
75250 */
75251 struct timespec ts = xtime;
75252 timespec_add_ns(&ts, nsec);
75253 - ACCESS_ONCE(xtime_cache) = ts;
75254 + ACCESS_ONCE_RW(xtime_cache) = ts;
75255 }
75256
75257 /* must hold xtime_lock */
75258 @@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
75259 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
75260 return -EINVAL;
75261
75262 + gr_log_timechange();
75263 +
75264 write_seqlock_irqsave(&xtime_lock, flags);
75265
75266 timekeeping_forward_now();
75267 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
75268 index 54c0dda..e9095d9 100644
75269 --- a/kernel/time/timer_list.c
75270 +++ b/kernel/time/timer_list.c
75271 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
75272
75273 static void print_name_offset(struct seq_file *m, void *sym)
75274 {
75275 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75276 + SEQ_printf(m, "<%p>", NULL);
75277 +#else
75278 char symname[KSYM_NAME_LEN];
75279
75280 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
75281 SEQ_printf(m, "<%p>", sym);
75282 else
75283 SEQ_printf(m, "%s", symname);
75284 +#endif
75285 }
75286
75287 static void
75288 @@ -112,7 +116,11 @@ next_one:
75289 static void
75290 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
75291 {
75292 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75293 + SEQ_printf(m, " .base: %p\n", NULL);
75294 +#else
75295 SEQ_printf(m, " .base: %p\n", base);
75296 +#endif
75297 SEQ_printf(m, " .index: %d\n",
75298 base->index);
75299 SEQ_printf(m, " .resolution: %Lu nsecs\n",
75300 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
75301 {
75302 struct proc_dir_entry *pe;
75303
75304 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75305 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
75306 +#else
75307 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
75308 +#endif
75309 if (!pe)
75310 return -ENOMEM;
75311 return 0;
75312 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
75313 index ee5681f..634089b 100644
75314 --- a/kernel/time/timer_stats.c
75315 +++ b/kernel/time/timer_stats.c
75316 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
75317 static unsigned long nr_entries;
75318 static struct entry entries[MAX_ENTRIES];
75319
75320 -static atomic_t overflow_count;
75321 +static atomic_unchecked_t overflow_count;
75322
75323 /*
75324 * The entries are in a hash-table, for fast lookup:
75325 @@ -140,7 +140,7 @@ static void reset_entries(void)
75326 nr_entries = 0;
75327 memset(entries, 0, sizeof(entries));
75328 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
75329 - atomic_set(&overflow_count, 0);
75330 + atomic_set_unchecked(&overflow_count, 0);
75331 }
75332
75333 static struct entry *alloc_entry(void)
75334 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75335 if (likely(entry))
75336 entry->count++;
75337 else
75338 - atomic_inc(&overflow_count);
75339 + atomic_inc_unchecked(&overflow_count);
75340
75341 out_unlock:
75342 spin_unlock_irqrestore(lock, flags);
75343 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75344
75345 static void print_name_offset(struct seq_file *m, unsigned long addr)
75346 {
75347 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75348 + seq_printf(m, "<%p>", NULL);
75349 +#else
75350 char symname[KSYM_NAME_LEN];
75351
75352 if (lookup_symbol_name(addr, symname) < 0)
75353 seq_printf(m, "<%p>", (void *)addr);
75354 else
75355 seq_printf(m, "%s", symname);
75356 +#endif
75357 }
75358
75359 static int tstats_show(struct seq_file *m, void *v)
75360 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
75361
75362 seq_puts(m, "Timer Stats Version: v0.2\n");
75363 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
75364 - if (atomic_read(&overflow_count))
75365 + if (atomic_read_unchecked(&overflow_count))
75366 seq_printf(m, "Overflow: %d entries\n",
75367 - atomic_read(&overflow_count));
75368 + atomic_read_unchecked(&overflow_count));
75369
75370 for (i = 0; i < nr_entries; i++) {
75371 entry = entries + i;
75372 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
75373 {
75374 struct proc_dir_entry *pe;
75375
75376 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
75377 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
75378 +#else
75379 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
75380 +#endif
75381 if (!pe)
75382 return -ENOMEM;
75383 return 0;
75384 diff --git a/kernel/timer.c b/kernel/timer.c
75385 index cb3c1f1..8bf5526 100644
75386 --- a/kernel/timer.c
75387 +++ b/kernel/timer.c
75388 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
75389 /*
75390 * This function runs timers and the timer-tq in bottom half context.
75391 */
75392 -static void run_timer_softirq(struct softirq_action *h)
75393 +static void run_timer_softirq(void)
75394 {
75395 struct tvec_base *base = __get_cpu_var(tvec_bases);
75396
75397 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
75398 index d9d6206..f19467e 100644
75399 --- a/kernel/trace/blktrace.c
75400 +++ b/kernel/trace/blktrace.c
75401 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
75402 struct blk_trace *bt = filp->private_data;
75403 char buf[16];
75404
75405 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
75406 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
75407
75408 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
75409 }
75410 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
75411 return 1;
75412
75413 bt = buf->chan->private_data;
75414 - atomic_inc(&bt->dropped);
75415 + atomic_inc_unchecked(&bt->dropped);
75416 return 0;
75417 }
75418
75419 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
75420
75421 bt->dir = dir;
75422 bt->dev = dev;
75423 - atomic_set(&bt->dropped, 0);
75424 + atomic_set_unchecked(&bt->dropped, 0);
75425
75426 ret = -EIO;
75427 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
75428 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
75429 index 4872937..c794d40 100644
75430 --- a/kernel/trace/ftrace.c
75431 +++ b/kernel/trace/ftrace.c
75432 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
75433
75434 ip = rec->ip;
75435
75436 + ret = ftrace_arch_code_modify_prepare();
75437 + FTRACE_WARN_ON(ret);
75438 + if (ret)
75439 + return 0;
75440 +
75441 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
75442 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
75443 if (ret) {
75444 ftrace_bug(ret, ip);
75445 rec->flags |= FTRACE_FL_FAILED;
75446 - return 0;
75447 }
75448 - return 1;
75449 + return ret ? 0 : 1;
75450 }
75451
75452 /*
75453 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
75454 index e749a05..19c6e94 100644
75455 --- a/kernel/trace/ring_buffer.c
75456 +++ b/kernel/trace/ring_buffer.c
75457 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
75458 * the reader page). But if the next page is a header page,
75459 * its flags will be non zero.
75460 */
75461 -static int inline
75462 +static inline int
75463 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
75464 struct buffer_page *page, struct list_head *list)
75465 {
75466 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
75467 index a2a2d1f..7f32b09 100644
75468 --- a/kernel/trace/trace.c
75469 +++ b/kernel/trace/trace.c
75470 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
75471 size_t rem;
75472 unsigned int i;
75473
75474 + pax_track_stack();
75475 +
75476 /* copy the tracer to avoid using a global lock all around */
75477 mutex_lock(&trace_types_lock);
75478 if (unlikely(old_tracer != current_trace && current_trace)) {
75479 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
75480 int entries, size, i;
75481 size_t ret;
75482
75483 + pax_track_stack();
75484 +
75485 if (*ppos & (PAGE_SIZE - 1)) {
75486 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
75487 return -EINVAL;
75488 @@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
75489 };
75490 #endif
75491
75492 -static struct dentry *d_tracer;
75493 -
75494 struct dentry *tracing_init_dentry(void)
75495 {
75496 + static struct dentry *d_tracer;
75497 static int once;
75498
75499 if (d_tracer)
75500 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
75501 return d_tracer;
75502 }
75503
75504 -static struct dentry *d_percpu;
75505 -
75506 struct dentry *tracing_dentry_percpu(void)
75507 {
75508 + static struct dentry *d_percpu;
75509 static int once;
75510 struct dentry *d_tracer;
75511
75512 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
75513 index d128f65..f37b4af 100644
75514 --- a/kernel/trace/trace_events.c
75515 +++ b/kernel/trace/trace_events.c
75516 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
75517 * Modules must own their file_operations to keep up with
75518 * reference counting.
75519 */
75520 +
75521 struct ftrace_module_file_ops {
75522 struct list_head list;
75523 struct module *mod;
75524 - struct file_operations id;
75525 - struct file_operations enable;
75526 - struct file_operations format;
75527 - struct file_operations filter;
75528 };
75529
75530 static void remove_subsystem_dir(const char *name)
75531 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
75532
75533 file_ops->mod = mod;
75534
75535 - file_ops->id = ftrace_event_id_fops;
75536 - file_ops->id.owner = mod;
75537 -
75538 - file_ops->enable = ftrace_enable_fops;
75539 - file_ops->enable.owner = mod;
75540 -
75541 - file_ops->filter = ftrace_event_filter_fops;
75542 - file_ops->filter.owner = mod;
75543 -
75544 - file_ops->format = ftrace_event_format_fops;
75545 - file_ops->format.owner = mod;
75546 + pax_open_kernel();
75547 + *(void **)&mod->trace_id.owner = mod;
75548 + *(void **)&mod->trace_enable.owner = mod;
75549 + *(void **)&mod->trace_filter.owner = mod;
75550 + *(void **)&mod->trace_format.owner = mod;
75551 + pax_close_kernel();
75552
75553 list_add(&file_ops->list, &ftrace_module_file_list);
75554
75555 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
75556 call->mod = mod;
75557 list_add(&call->list, &ftrace_events);
75558 event_create_dir(call, d_events,
75559 - &file_ops->id, &file_ops->enable,
75560 - &file_ops->filter, &file_ops->format);
75561 + &mod->trace_id, &mod->trace_enable,
75562 + &mod->trace_filter, &mod->trace_format);
75563 }
75564 }
75565
75566 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
75567 index 0acd834..b800b56 100644
75568 --- a/kernel/trace/trace_mmiotrace.c
75569 +++ b/kernel/trace/trace_mmiotrace.c
75570 @@ -23,7 +23,7 @@ struct header_iter {
75571 static struct trace_array *mmio_trace_array;
75572 static bool overrun_detected;
75573 static unsigned long prev_overruns;
75574 -static atomic_t dropped_count;
75575 +static atomic_unchecked_t dropped_count;
75576
75577 static void mmio_reset_data(struct trace_array *tr)
75578 {
75579 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
75580
75581 static unsigned long count_overruns(struct trace_iterator *iter)
75582 {
75583 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
75584 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
75585 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
75586
75587 if (over > prev_overruns)
75588 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
75589 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
75590 sizeof(*entry), 0, pc);
75591 if (!event) {
75592 - atomic_inc(&dropped_count);
75593 + atomic_inc_unchecked(&dropped_count);
75594 return;
75595 }
75596 entry = ring_buffer_event_data(event);
75597 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
75598 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
75599 sizeof(*entry), 0, pc);
75600 if (!event) {
75601 - atomic_inc(&dropped_count);
75602 + atomic_inc_unchecked(&dropped_count);
75603 return;
75604 }
75605 entry = ring_buffer_event_data(event);
75606 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
75607 index b6c12c6..41fdc53 100644
75608 --- a/kernel/trace/trace_output.c
75609 +++ b/kernel/trace/trace_output.c
75610 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
75611 return 0;
75612 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
75613 if (!IS_ERR(p)) {
75614 - p = mangle_path(s->buffer + s->len, p, "\n");
75615 + p = mangle_path(s->buffer + s->len, p, "\n\\");
75616 if (p) {
75617 s->len = p - s->buffer;
75618 return 1;
75619 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
75620 index 8504ac7..ecf0adb 100644
75621 --- a/kernel/trace/trace_stack.c
75622 +++ b/kernel/trace/trace_stack.c
75623 @@ -50,7 +50,7 @@ static inline void check_stack(void)
75624 return;
75625
75626 /* we do not handle interrupt stacks yet */
75627 - if (!object_is_on_stack(&this_size))
75628 + if (!object_starts_on_stack(&this_size))
75629 return;
75630
75631 local_irq_save(flags);
75632 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
75633 index 40cafb0..d5ead43 100644
75634 --- a/kernel/trace/trace_workqueue.c
75635 +++ b/kernel/trace/trace_workqueue.c
75636 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
75637 int cpu;
75638 pid_t pid;
75639 /* Can be inserted from interrupt or user context, need to be atomic */
75640 - atomic_t inserted;
75641 + atomic_unchecked_t inserted;
75642 /*
75643 * Don't need to be atomic, works are serialized in a single workqueue thread
75644 * on a single CPU.
75645 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
75646 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
75647 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
75648 if (node->pid == wq_thread->pid) {
75649 - atomic_inc(&node->inserted);
75650 + atomic_inc_unchecked(&node->inserted);
75651 goto found;
75652 }
75653 }
75654 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
75655 tsk = get_pid_task(pid, PIDTYPE_PID);
75656 if (tsk) {
75657 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
75658 - atomic_read(&cws->inserted), cws->executed,
75659 + atomic_read_unchecked(&cws->inserted), cws->executed,
75660 tsk->comm);
75661 put_task_struct(tsk);
75662 }
75663 diff --git a/kernel/user.c b/kernel/user.c
75664 index 1b91701..8795237 100644
75665 --- a/kernel/user.c
75666 +++ b/kernel/user.c
75667 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
75668 spin_lock_irq(&uidhash_lock);
75669 up = uid_hash_find(uid, hashent);
75670 if (up) {
75671 + put_user_ns(ns);
75672 key_put(new->uid_keyring);
75673 key_put(new->session_keyring);
75674 kmem_cache_free(uid_cachep, new);
75675 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
75676 index 234ceb1..ad74049 100644
75677 --- a/lib/Kconfig.debug
75678 +++ b/lib/Kconfig.debug
75679 @@ -905,7 +905,7 @@ config LATENCYTOP
75680 select STACKTRACE
75681 select SCHEDSTATS
75682 select SCHED_DEBUG
75683 - depends on HAVE_LATENCYTOP_SUPPORT
75684 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
75685 help
75686 Enable this option if you want to use the LatencyTOP tool
75687 to find out which userspace is blocking on what kernel operations.
75688 diff --git a/lib/bitmap.c b/lib/bitmap.c
75689 index 7025658..8d14cab 100644
75690 --- a/lib/bitmap.c
75691 +++ b/lib/bitmap.c
75692 @@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
75693 {
75694 int c, old_c, totaldigits, ndigits, nchunks, nbits;
75695 u32 chunk;
75696 - const char __user *ubuf = buf;
75697 + const char __user *ubuf = (const char __force_user *)buf;
75698
75699 bitmap_zero(maskp, nmaskbits);
75700
75701 @@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
75702 {
75703 if (!access_ok(VERIFY_READ, ubuf, ulen))
75704 return -EFAULT;
75705 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
75706 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
75707 }
75708 EXPORT_SYMBOL(bitmap_parse_user);
75709
75710 diff --git a/lib/bug.c b/lib/bug.c
75711 index 300e41a..2779eb0 100644
75712 --- a/lib/bug.c
75713 +++ b/lib/bug.c
75714 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
75715 return BUG_TRAP_TYPE_NONE;
75716
75717 bug = find_bug(bugaddr);
75718 + if (!bug)
75719 + return BUG_TRAP_TYPE_NONE;
75720
75721 printk(KERN_EMERG "------------[ cut here ]------------\n");
75722
75723 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
75724 index 2b413db..e21d207 100644
75725 --- a/lib/debugobjects.c
75726 +++ b/lib/debugobjects.c
75727 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
75728 if (limit > 4)
75729 return;
75730
75731 - is_on_stack = object_is_on_stack(addr);
75732 + is_on_stack = object_starts_on_stack(addr);
75733 if (is_on_stack == onstack)
75734 return;
75735
75736 diff --git a/lib/devres.c b/lib/devres.c
75737 index 72c8909..7543868 100644
75738 --- a/lib/devres.c
75739 +++ b/lib/devres.c
75740 @@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
75741 {
75742 iounmap(addr);
75743 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
75744 - (void *)addr));
75745 + (void __force *)addr));
75746 }
75747 EXPORT_SYMBOL(devm_iounmap);
75748
75749 @@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
75750 {
75751 ioport_unmap(addr);
75752 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
75753 - devm_ioport_map_match, (void *)addr));
75754 + devm_ioport_map_match, (void __force *)addr));
75755 }
75756 EXPORT_SYMBOL(devm_ioport_unmap);
75757
75758 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
75759 index 084e879..0674448 100644
75760 --- a/lib/dma-debug.c
75761 +++ b/lib/dma-debug.c
75762 @@ -861,7 +861,7 @@ out:
75763
75764 static void check_for_stack(struct device *dev, void *addr)
75765 {
75766 - if (object_is_on_stack(addr))
75767 + if (object_starts_on_stack(addr))
75768 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
75769 "stack [addr=%p]\n", addr);
75770 }
75771 diff --git a/lib/idr.c b/lib/idr.c
75772 index eda7ba3..915dfae 100644
75773 --- a/lib/idr.c
75774 +++ b/lib/idr.c
75775 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
75776 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
75777
75778 /* if already at the top layer, we need to grow */
75779 - if (id >= 1 << (idp->layers * IDR_BITS)) {
75780 + if (id >= (1 << (idp->layers * IDR_BITS))) {
75781 *starting_id = id;
75782 return IDR_NEED_TO_GROW;
75783 }
75784 diff --git a/lib/inflate.c b/lib/inflate.c
75785 index d102559..4215f31 100644
75786 --- a/lib/inflate.c
75787 +++ b/lib/inflate.c
75788 @@ -266,7 +266,7 @@ static void free(void *where)
75789 malloc_ptr = free_mem_ptr;
75790 }
75791 #else
75792 -#define malloc(a) kmalloc(a, GFP_KERNEL)
75793 +#define malloc(a) kmalloc((a), GFP_KERNEL)
75794 #define free(a) kfree(a)
75795 #endif
75796
75797 diff --git a/lib/kobject.c b/lib/kobject.c
75798 index b512b74..8115eb1 100644
75799 --- a/lib/kobject.c
75800 +++ b/lib/kobject.c
75801 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
75802 return ret;
75803 }
75804
75805 -struct sysfs_ops kobj_sysfs_ops = {
75806 +const struct sysfs_ops kobj_sysfs_ops = {
75807 .show = kobj_attr_show,
75808 .store = kobj_attr_store,
75809 };
75810 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
75811 * If the kset was not able to be created, NULL will be returned.
75812 */
75813 static struct kset *kset_create(const char *name,
75814 - struct kset_uevent_ops *uevent_ops,
75815 + const struct kset_uevent_ops *uevent_ops,
75816 struct kobject *parent_kobj)
75817 {
75818 struct kset *kset;
75819 @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
75820 * If the kset was not able to be created, NULL will be returned.
75821 */
75822 struct kset *kset_create_and_add(const char *name,
75823 - struct kset_uevent_ops *uevent_ops,
75824 + const struct kset_uevent_ops *uevent_ops,
75825 struct kobject *parent_kobj)
75826 {
75827 struct kset *kset;
75828 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
75829 index 507b821..0bf8ed0 100644
75830 --- a/lib/kobject_uevent.c
75831 +++ b/lib/kobject_uevent.c
75832 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
75833 const char *subsystem;
75834 struct kobject *top_kobj;
75835 struct kset *kset;
75836 - struct kset_uevent_ops *uevent_ops;
75837 + const struct kset_uevent_ops *uevent_ops;
75838 u64 seq;
75839 int i = 0;
75840 int retval = 0;
75841 diff --git a/lib/kref.c b/lib/kref.c
75842 index 9ecd6e8..12c94c1 100644
75843 --- a/lib/kref.c
75844 +++ b/lib/kref.c
75845 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
75846 */
75847 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
75848 {
75849 - WARN_ON(release == NULL);
75850 + BUG_ON(release == NULL);
75851 WARN_ON(release == (void (*)(struct kref *))kfree);
75852
75853 if (atomic_dec_and_test(&kref->refcount)) {
75854 diff --git a/lib/parser.c b/lib/parser.c
75855 index b00d020..1b34325 100644
75856 --- a/lib/parser.c
75857 +++ b/lib/parser.c
75858 @@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
75859 char *buf;
75860 int ret;
75861
75862 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
75863 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
75864 if (!buf)
75865 return -ENOMEM;
75866 memcpy(buf, s->from, s->to - s->from);
75867 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
75868 index 92cdd99..a8149d7 100644
75869 --- a/lib/radix-tree.c
75870 +++ b/lib/radix-tree.c
75871 @@ -81,7 +81,7 @@ struct radix_tree_preload {
75872 int nr;
75873 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
75874 };
75875 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
75876 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
75877
75878 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
75879 {
75880 diff --git a/lib/random32.c b/lib/random32.c
75881 index 217d5c4..45aba8a 100644
75882 --- a/lib/random32.c
75883 +++ b/lib/random32.c
75884 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
75885 */
75886 static inline u32 __seed(u32 x, u32 m)
75887 {
75888 - return (x < m) ? x + m : x;
75889 + return (x <= m) ? x + m + 1 : x;
75890 }
75891
75892 /**
75893 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
75894 index 33bed5e..1477e46 100644
75895 --- a/lib/vsprintf.c
75896 +++ b/lib/vsprintf.c
75897 @@ -16,6 +16,9 @@
75898 * - scnprintf and vscnprintf
75899 */
75900
75901 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75902 +#define __INCLUDED_BY_HIDESYM 1
75903 +#endif
75904 #include <stdarg.h>
75905 #include <linux/module.h>
75906 #include <linux/types.h>
75907 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
75908 return buf;
75909 }
75910
75911 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
75912 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
75913 {
75914 int len, i;
75915
75916 if ((unsigned long)s < PAGE_SIZE)
75917 - s = "<NULL>";
75918 + s = "(null)";
75919
75920 len = strnlen(s, spec.precision);
75921
75922 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
75923 unsigned long value = (unsigned long) ptr;
75924 #ifdef CONFIG_KALLSYMS
75925 char sym[KSYM_SYMBOL_LEN];
75926 - if (ext != 'f' && ext != 's')
75927 + if (ext != 'f' && ext != 's' && ext != 'a')
75928 sprint_symbol(sym, value);
75929 else
75930 kallsyms_lookup(value, NULL, NULL, NULL, sym);
75931 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
75932 * - 'f' For simple symbolic function names without offset
75933 * - 'S' For symbolic direct pointers with offset
75934 * - 's' For symbolic direct pointers without offset
75935 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
75936 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
75937 * - 'R' For a struct resource pointer, it prints the range of
75938 * addresses (not the name nor the flags)
75939 * - 'M' For a 6-byte MAC address, it prints the address in the
75940 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75941 struct printf_spec spec)
75942 {
75943 if (!ptr)
75944 - return string(buf, end, "(null)", spec);
75945 + return string(buf, end, "(nil)", spec);
75946
75947 switch (*fmt) {
75948 case 'F':
75949 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
75950 case 's':
75951 /* Fallthrough */
75952 case 'S':
75953 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75954 + break;
75955 +#else
75956 + return symbol_string(buf, end, ptr, spec, *fmt);
75957 +#endif
75958 + case 'a':
75959 + /* Fallthrough */
75960 + case 'A':
75961 return symbol_string(buf, end, ptr, spec, *fmt);
75962 case 'R':
75963 return resource_string(buf, end, ptr, spec);
75964 @@ -1445,7 +1458,7 @@ do { \
75965 size_t len;
75966 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
75967 || (unsigned long)save_str < PAGE_SIZE)
75968 - save_str = "<NULL>";
75969 + save_str = "(null)";
75970 len = strlen(save_str);
75971 if (str + len + 1 < end)
75972 memcpy(str, save_str, len + 1);
75973 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75974 typeof(type) value; \
75975 if (sizeof(type) == 8) { \
75976 args = PTR_ALIGN(args, sizeof(u32)); \
75977 - *(u32 *)&value = *(u32 *)args; \
75978 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
75979 + *(u32 *)&value = *(const u32 *)args; \
75980 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
75981 } else { \
75982 args = PTR_ALIGN(args, sizeof(type)); \
75983 - value = *(typeof(type) *)args; \
75984 + value = *(const typeof(type) *)args; \
75985 } \
75986 args += sizeof(type); \
75987 value; \
75988 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
75989 const char *str_arg = args;
75990 size_t len = strlen(str_arg);
75991 args += len + 1;
75992 - str = string(str, end, (char *)str_arg, spec);
75993 + str = string(str, end, str_arg, spec);
75994 break;
75995 }
75996
75997 diff --git a/localversion-grsec b/localversion-grsec
75998 new file mode 100644
75999 index 0000000..7cd6065
76000 --- /dev/null
76001 +++ b/localversion-grsec
76002 @@ -0,0 +1 @@
76003 +-grsec
76004 diff --git a/mm/Kconfig b/mm/Kconfig
76005 index 2c19c0b..f3c3f83 100644
76006 --- a/mm/Kconfig
76007 +++ b/mm/Kconfig
76008 @@ -228,7 +228,7 @@ config KSM
76009 config DEFAULT_MMAP_MIN_ADDR
76010 int "Low address space to protect from user allocation"
76011 depends on MMU
76012 - default 4096
76013 + default 65536
76014 help
76015 This is the portion of low virtual memory which should be protected
76016 from userspace allocation. Keeping a user from writing to low pages
76017 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
76018 index 67a33a5..094dcf1 100644
76019 --- a/mm/backing-dev.c
76020 +++ b/mm/backing-dev.c
76021 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
76022 list_add_tail_rcu(&wb->list, &bdi->wb_list);
76023 spin_unlock(&bdi->wb_lock);
76024
76025 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
76026 + tsk->flags |= PF_SWAPWRITE;
76027 set_freezable();
76028
76029 /*
76030 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
76031 * Add the default flusher task that gets created for any bdi
76032 * that has dirty data pending writeout
76033 */
76034 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76035 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76036 {
76037 if (!bdi_cap_writeback_dirty(bdi))
76038 return;
76039 diff --git a/mm/filemap.c b/mm/filemap.c
76040 index a1fe378..e26702f 100644
76041 --- a/mm/filemap.c
76042 +++ b/mm/filemap.c
76043 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
76044 struct address_space *mapping = file->f_mapping;
76045
76046 if (!mapping->a_ops->readpage)
76047 - return -ENOEXEC;
76048 + return -ENODEV;
76049 file_accessed(file);
76050 vma->vm_ops = &generic_file_vm_ops;
76051 vma->vm_flags |= VM_CAN_NONLINEAR;
76052 @@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
76053 *pos = i_size_read(inode);
76054
76055 if (limit != RLIM_INFINITY) {
76056 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
76057 if (*pos >= limit) {
76058 send_sig(SIGXFSZ, current, 0);
76059 return -EFBIG;
76060 diff --git a/mm/fremap.c b/mm/fremap.c
76061 index b6ec85a..a24ac22 100644
76062 --- a/mm/fremap.c
76063 +++ b/mm/fremap.c
76064 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76065 retry:
76066 vma = find_vma(mm, start);
76067
76068 +#ifdef CONFIG_PAX_SEGMEXEC
76069 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
76070 + goto out;
76071 +#endif
76072 +
76073 /*
76074 * Make sure the vma is shared, that it supports prefaulting,
76075 * and that the remapped range is valid and fully within
76076 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76077 /*
76078 * drop PG_Mlocked flag for over-mapped range
76079 */
76080 - unsigned int saved_flags = vma->vm_flags;
76081 + unsigned long saved_flags = vma->vm_flags;
76082 munlock_vma_pages_range(vma, start, start + size);
76083 vma->vm_flags = saved_flags;
76084 }
76085 diff --git a/mm/highmem.c b/mm/highmem.c
76086 index 9c1e627..5ca9447 100644
76087 --- a/mm/highmem.c
76088 +++ b/mm/highmem.c
76089 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
76090 * So no dangers, even with speculative execution.
76091 */
76092 page = pte_page(pkmap_page_table[i]);
76093 + pax_open_kernel();
76094 pte_clear(&init_mm, (unsigned long)page_address(page),
76095 &pkmap_page_table[i]);
76096 -
76097 + pax_close_kernel();
76098 set_page_address(page, NULL);
76099 need_flush = 1;
76100 }
76101 @@ -177,9 +178,11 @@ start:
76102 }
76103 }
76104 vaddr = PKMAP_ADDR(last_pkmap_nr);
76105 +
76106 + pax_open_kernel();
76107 set_pte_at(&init_mm, vaddr,
76108 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
76109 -
76110 + pax_close_kernel();
76111 pkmap_count[last_pkmap_nr] = 1;
76112 set_page_address(page, (void *)vaddr);
76113
76114 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
76115 index 5e1e508..ac70275 100644
76116 --- a/mm/hugetlb.c
76117 +++ b/mm/hugetlb.c
76118 @@ -869,6 +869,7 @@ free:
76119 list_del(&page->lru);
76120 enqueue_huge_page(h, page);
76121 }
76122 + spin_unlock(&hugetlb_lock);
76123
76124 /* Free unnecessary surplus pages to the buddy allocator */
76125 if (!list_empty(&surplus_list)) {
76126 @@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
76127 return 1;
76128 }
76129
76130 +#ifdef CONFIG_PAX_SEGMEXEC
76131 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
76132 +{
76133 + struct mm_struct *mm = vma->vm_mm;
76134 + struct vm_area_struct *vma_m;
76135 + unsigned long address_m;
76136 + pte_t *ptep_m;
76137 +
76138 + vma_m = pax_find_mirror_vma(vma);
76139 + if (!vma_m)
76140 + return;
76141 +
76142 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76143 + address_m = address + SEGMEXEC_TASK_SIZE;
76144 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
76145 + get_page(page_m);
76146 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
76147 +}
76148 +#endif
76149 +
76150 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
76151 unsigned long address, pte_t *ptep, pte_t pte,
76152 struct page *pagecache_page)
76153 @@ -2004,6 +2025,11 @@ retry_avoidcopy:
76154 huge_ptep_clear_flush(vma, address, ptep);
76155 set_huge_pte_at(mm, address, ptep,
76156 make_huge_pte(vma, new_page, 1));
76157 +
76158 +#ifdef CONFIG_PAX_SEGMEXEC
76159 + pax_mirror_huge_pte(vma, address, new_page);
76160 +#endif
76161 +
76162 /* Make the old page be freed below */
76163 new_page = old_page;
76164 }
76165 @@ -2135,6 +2161,10 @@ retry:
76166 && (vma->vm_flags & VM_SHARED)));
76167 set_huge_pte_at(mm, address, ptep, new_pte);
76168
76169 +#ifdef CONFIG_PAX_SEGMEXEC
76170 + pax_mirror_huge_pte(vma, address, page);
76171 +#endif
76172 +
76173 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
76174 /* Optimization, do the COW without a second fault */
76175 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
76176 @@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76177 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
76178 struct hstate *h = hstate_vma(vma);
76179
76180 +#ifdef CONFIG_PAX_SEGMEXEC
76181 + struct vm_area_struct *vma_m;
76182 +
76183 + vma_m = pax_find_mirror_vma(vma);
76184 + if (vma_m) {
76185 + unsigned long address_m;
76186 +
76187 + if (vma->vm_start > vma_m->vm_start) {
76188 + address_m = address;
76189 + address -= SEGMEXEC_TASK_SIZE;
76190 + vma = vma_m;
76191 + h = hstate_vma(vma);
76192 + } else
76193 + address_m = address + SEGMEXEC_TASK_SIZE;
76194 +
76195 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
76196 + return VM_FAULT_OOM;
76197 + address_m &= HPAGE_MASK;
76198 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
76199 + }
76200 +#endif
76201 +
76202 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
76203 if (!ptep)
76204 return VM_FAULT_OOM;
76205 diff --git a/mm/internal.h b/mm/internal.h
76206 index f03e8e2..7354343 100644
76207 --- a/mm/internal.h
76208 +++ b/mm/internal.h
76209 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
76210 * in mm/page_alloc.c
76211 */
76212 extern void __free_pages_bootmem(struct page *page, unsigned int order);
76213 +extern void free_compound_page(struct page *page);
76214 extern void prep_compound_page(struct page *page, unsigned long order);
76215
76216
76217 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
76218 index c346660..b47382f 100644
76219 --- a/mm/kmemleak.c
76220 +++ b/mm/kmemleak.c
76221 @@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
76222
76223 for (i = 0; i < object->trace_len; i++) {
76224 void *ptr = (void *)object->trace[i];
76225 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
76226 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
76227 }
76228 }
76229
76230 diff --git a/mm/maccess.c b/mm/maccess.c
76231 index 9073695..1127f348 100644
76232 --- a/mm/maccess.c
76233 +++ b/mm/maccess.c
76234 @@ -14,7 +14,7 @@
76235 * Safely read from address @src to the buffer at @dst. If a kernel fault
76236 * happens, handle that and return -EFAULT.
76237 */
76238 -long probe_kernel_read(void *dst, void *src, size_t size)
76239 +long probe_kernel_read(void *dst, const void *src, size_t size)
76240 {
76241 long ret;
76242 mm_segment_t old_fs = get_fs();
76243 @@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
76244 set_fs(KERNEL_DS);
76245 pagefault_disable();
76246 ret = __copy_from_user_inatomic(dst,
76247 - (__force const void __user *)src, size);
76248 + (const void __force_user *)src, size);
76249 pagefault_enable();
76250 set_fs(old_fs);
76251
76252 @@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
76253 * Safely write to address @dst from the buffer at @src. If a kernel fault
76254 * happens, handle that and return -EFAULT.
76255 */
76256 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
76257 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
76258 {
76259 long ret;
76260 mm_segment_t old_fs = get_fs();
76261
76262 set_fs(KERNEL_DS);
76263 pagefault_disable();
76264 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
76265 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
76266 pagefault_enable();
76267 set_fs(old_fs);
76268
76269 diff --git a/mm/madvise.c b/mm/madvise.c
76270 index 35b1479..499f7d4 100644
76271 --- a/mm/madvise.c
76272 +++ b/mm/madvise.c
76273 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
76274 pgoff_t pgoff;
76275 unsigned long new_flags = vma->vm_flags;
76276
76277 +#ifdef CONFIG_PAX_SEGMEXEC
76278 + struct vm_area_struct *vma_m;
76279 +#endif
76280 +
76281 switch (behavior) {
76282 case MADV_NORMAL:
76283 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
76284 @@ -103,6 +107,13 @@ success:
76285 /*
76286 * vm_flags is protected by the mmap_sem held in write mode.
76287 */
76288 +
76289 +#ifdef CONFIG_PAX_SEGMEXEC
76290 + vma_m = pax_find_mirror_vma(vma);
76291 + if (vma_m)
76292 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
76293 +#endif
76294 +
76295 vma->vm_flags = new_flags;
76296
76297 out:
76298 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
76299 struct vm_area_struct ** prev,
76300 unsigned long start, unsigned long end)
76301 {
76302 +
76303 +#ifdef CONFIG_PAX_SEGMEXEC
76304 + struct vm_area_struct *vma_m;
76305 +#endif
76306 +
76307 *prev = vma;
76308 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
76309 return -EINVAL;
76310 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
76311 zap_page_range(vma, start, end - start, &details);
76312 } else
76313 zap_page_range(vma, start, end - start, NULL);
76314 +
76315 +#ifdef CONFIG_PAX_SEGMEXEC
76316 + vma_m = pax_find_mirror_vma(vma);
76317 + if (vma_m) {
76318 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
76319 + struct zap_details details = {
76320 + .nonlinear_vma = vma_m,
76321 + .last_index = ULONG_MAX,
76322 + };
76323 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
76324 + } else
76325 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
76326 + }
76327 +#endif
76328 +
76329 return 0;
76330 }
76331
76332 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
76333 if (end < start)
76334 goto out;
76335
76336 +#ifdef CONFIG_PAX_SEGMEXEC
76337 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
76338 + if (end > SEGMEXEC_TASK_SIZE)
76339 + goto out;
76340 + } else
76341 +#endif
76342 +
76343 + if (end > TASK_SIZE)
76344 + goto out;
76345 +
76346 error = 0;
76347 if (end == start)
76348 goto out;
76349 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
76350 index 8aeba53..b4a4198 100644
76351 --- a/mm/memory-failure.c
76352 +++ b/mm/memory-failure.c
76353 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
76354
76355 int sysctl_memory_failure_recovery __read_mostly = 1;
76356
76357 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76358 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76359
76360 /*
76361 * Send all the processes who have the page mapped an ``action optional''
76362 @@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
76363 si.si_signo = SIGBUS;
76364 si.si_errno = 0;
76365 si.si_code = BUS_MCEERR_AO;
76366 - si.si_addr = (void *)addr;
76367 + si.si_addr = (void __user *)addr;
76368 #ifdef __ARCH_SI_TRAPNO
76369 si.si_trapno = trapno;
76370 #endif
76371 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
76372 return 0;
76373 }
76374
76375 - atomic_long_add(1, &mce_bad_pages);
76376 + atomic_long_add_unchecked(1, &mce_bad_pages);
76377
76378 /*
76379 * We need/can do nothing about count=0 pages.
76380 diff --git a/mm/memory.c b/mm/memory.c
76381 index 6c836d3..48f3264 100644
76382 --- a/mm/memory.c
76383 +++ b/mm/memory.c
76384 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
76385 return;
76386
76387 pmd = pmd_offset(pud, start);
76388 +
76389 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
76390 pud_clear(pud);
76391 pmd_free_tlb(tlb, pmd, start);
76392 +#endif
76393 +
76394 }
76395
76396 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76397 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76398 if (end - 1 > ceiling - 1)
76399 return;
76400
76401 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
76402 pud = pud_offset(pgd, start);
76403 pgd_clear(pgd);
76404 pud_free_tlb(tlb, pud, start);
76405 +#endif
76406 +
76407 }
76408
76409 /*
76410 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76411 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
76412 i = 0;
76413
76414 - do {
76415 + while (nr_pages) {
76416 struct vm_area_struct *vma;
76417
76418 - vma = find_extend_vma(mm, start);
76419 + vma = find_vma(mm, start);
76420 if (!vma && in_gate_area(tsk, start)) {
76421 unsigned long pg = start & PAGE_MASK;
76422 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
76423 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76424 continue;
76425 }
76426
76427 - if (!vma ||
76428 + if (!vma || start < vma->vm_start ||
76429 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
76430 !(vm_flags & vma->vm_flags))
76431 return i ? : -EFAULT;
76432 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76433 start += PAGE_SIZE;
76434 nr_pages--;
76435 } while (nr_pages && start < vma->vm_end);
76436 - } while (nr_pages);
76437 + }
76438 return i;
76439 }
76440
76441 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
76442 page_add_file_rmap(page);
76443 set_pte_at(mm, addr, pte, mk_pte(page, prot));
76444
76445 +#ifdef CONFIG_PAX_SEGMEXEC
76446 + pax_mirror_file_pte(vma, addr, page, ptl);
76447 +#endif
76448 +
76449 retval = 0;
76450 pte_unmap_unlock(pte, ptl);
76451 return retval;
76452 @@ -1560,10 +1571,22 @@ out:
76453 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
76454 struct page *page)
76455 {
76456 +
76457 +#ifdef CONFIG_PAX_SEGMEXEC
76458 + struct vm_area_struct *vma_m;
76459 +#endif
76460 +
76461 if (addr < vma->vm_start || addr >= vma->vm_end)
76462 return -EFAULT;
76463 if (!page_count(page))
76464 return -EINVAL;
76465 +
76466 +#ifdef CONFIG_PAX_SEGMEXEC
76467 + vma_m = pax_find_mirror_vma(vma);
76468 + if (vma_m)
76469 + vma_m->vm_flags |= VM_INSERTPAGE;
76470 +#endif
76471 +
76472 vma->vm_flags |= VM_INSERTPAGE;
76473 return insert_page(vma, addr, page, vma->vm_page_prot);
76474 }
76475 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
76476 unsigned long pfn)
76477 {
76478 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
76479 + BUG_ON(vma->vm_mirror);
76480
76481 if (addr < vma->vm_start || addr >= vma->vm_end)
76482 return -EFAULT;
76483 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
76484 copy_user_highpage(dst, src, va, vma);
76485 }
76486
76487 +#ifdef CONFIG_PAX_SEGMEXEC
76488 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
76489 +{
76490 + struct mm_struct *mm = vma->vm_mm;
76491 + spinlock_t *ptl;
76492 + pte_t *pte, entry;
76493 +
76494 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
76495 + entry = *pte;
76496 + if (!pte_present(entry)) {
76497 + if (!pte_none(entry)) {
76498 + BUG_ON(pte_file(entry));
76499 + free_swap_and_cache(pte_to_swp_entry(entry));
76500 + pte_clear_not_present_full(mm, address, pte, 0);
76501 + }
76502 + } else {
76503 + struct page *page;
76504 +
76505 + flush_cache_page(vma, address, pte_pfn(entry));
76506 + entry = ptep_clear_flush(vma, address, pte);
76507 + BUG_ON(pte_dirty(entry));
76508 + page = vm_normal_page(vma, address, entry);
76509 + if (page) {
76510 + update_hiwater_rss(mm);
76511 + if (PageAnon(page))
76512 + dec_mm_counter(mm, anon_rss);
76513 + else
76514 + dec_mm_counter(mm, file_rss);
76515 + page_remove_rmap(page);
76516 + page_cache_release(page);
76517 + }
76518 + }
76519 + pte_unmap_unlock(pte, ptl);
76520 +}
76521 +
76522 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
76523 + *
76524 + * the ptl of the lower mapped page is held on entry and is not released on exit
76525 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
76526 + */
76527 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
76528 +{
76529 + struct mm_struct *mm = vma->vm_mm;
76530 + unsigned long address_m;
76531 + spinlock_t *ptl_m;
76532 + struct vm_area_struct *vma_m;
76533 + pmd_t *pmd_m;
76534 + pte_t *pte_m, entry_m;
76535 +
76536 + BUG_ON(!page_m || !PageAnon(page_m));
76537 +
76538 + vma_m = pax_find_mirror_vma(vma);
76539 + if (!vma_m)
76540 + return;
76541 +
76542 + BUG_ON(!PageLocked(page_m));
76543 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76544 + address_m = address + SEGMEXEC_TASK_SIZE;
76545 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
76546 + pte_m = pte_offset_map_nested(pmd_m, address_m);
76547 + ptl_m = pte_lockptr(mm, pmd_m);
76548 + if (ptl != ptl_m) {
76549 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
76550 + if (!pte_none(*pte_m))
76551 + goto out;
76552 + }
76553 +
76554 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
76555 + page_cache_get(page_m);
76556 + page_add_anon_rmap(page_m, vma_m, address_m);
76557 + inc_mm_counter(mm, anon_rss);
76558 + set_pte_at(mm, address_m, pte_m, entry_m);
76559 + update_mmu_cache(vma_m, address_m, entry_m);
76560 +out:
76561 + if (ptl != ptl_m)
76562 + spin_unlock(ptl_m);
76563 + pte_unmap_nested(pte_m);
76564 + unlock_page(page_m);
76565 +}
76566 +
76567 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
76568 +{
76569 + struct mm_struct *mm = vma->vm_mm;
76570 + unsigned long address_m;
76571 + spinlock_t *ptl_m;
76572 + struct vm_area_struct *vma_m;
76573 + pmd_t *pmd_m;
76574 + pte_t *pte_m, entry_m;
76575 +
76576 + BUG_ON(!page_m || PageAnon(page_m));
76577 +
76578 + vma_m = pax_find_mirror_vma(vma);
76579 + if (!vma_m)
76580 + return;
76581 +
76582 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76583 + address_m = address + SEGMEXEC_TASK_SIZE;
76584 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
76585 + pte_m = pte_offset_map_nested(pmd_m, address_m);
76586 + ptl_m = pte_lockptr(mm, pmd_m);
76587 + if (ptl != ptl_m) {
76588 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
76589 + if (!pte_none(*pte_m))
76590 + goto out;
76591 + }
76592 +
76593 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
76594 + page_cache_get(page_m);
76595 + page_add_file_rmap(page_m);
76596 + inc_mm_counter(mm, file_rss);
76597 + set_pte_at(mm, address_m, pte_m, entry_m);
76598 + update_mmu_cache(vma_m, address_m, entry_m);
76599 +out:
76600 + if (ptl != ptl_m)
76601 + spin_unlock(ptl_m);
76602 + pte_unmap_nested(pte_m);
76603 +}
76604 +
76605 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
76606 +{
76607 + struct mm_struct *mm = vma->vm_mm;
76608 + unsigned long address_m;
76609 + spinlock_t *ptl_m;
76610 + struct vm_area_struct *vma_m;
76611 + pmd_t *pmd_m;
76612 + pte_t *pte_m, entry_m;
76613 +
76614 + vma_m = pax_find_mirror_vma(vma);
76615 + if (!vma_m)
76616 + return;
76617 +
76618 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76619 + address_m = address + SEGMEXEC_TASK_SIZE;
76620 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
76621 + pte_m = pte_offset_map_nested(pmd_m, address_m);
76622 + ptl_m = pte_lockptr(mm, pmd_m);
76623 + if (ptl != ptl_m) {
76624 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
76625 + if (!pte_none(*pte_m))
76626 + goto out;
76627 + }
76628 +
76629 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
76630 + set_pte_at(mm, address_m, pte_m, entry_m);
76631 +out:
76632 + if (ptl != ptl_m)
76633 + spin_unlock(ptl_m);
76634 + pte_unmap_nested(pte_m);
76635 +}
76636 +
76637 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
76638 +{
76639 + struct page *page_m;
76640 + pte_t entry;
76641 +
76642 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
76643 + goto out;
76644 +
76645 + entry = *pte;
76646 + page_m = vm_normal_page(vma, address, entry);
76647 + if (!page_m)
76648 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
76649 + else if (PageAnon(page_m)) {
76650 + if (pax_find_mirror_vma(vma)) {
76651 + pte_unmap_unlock(pte, ptl);
76652 + lock_page(page_m);
76653 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
76654 + if (pte_same(entry, *pte))
76655 + pax_mirror_anon_pte(vma, address, page_m, ptl);
76656 + else
76657 + unlock_page(page_m);
76658 + }
76659 + } else
76660 + pax_mirror_file_pte(vma, address, page_m, ptl);
76661 +
76662 +out:
76663 + pte_unmap_unlock(pte, ptl);
76664 +}
76665 +#endif
76666 +
76667 /*
76668 * This routine handles present pages, when users try to write
76669 * to a shared page. It is done by copying the page to a new address
76670 @@ -2156,6 +2360,12 @@ gotten:
76671 */
76672 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
76673 if (likely(pte_same(*page_table, orig_pte))) {
76674 +
76675 +#ifdef CONFIG_PAX_SEGMEXEC
76676 + if (pax_find_mirror_vma(vma))
76677 + BUG_ON(!trylock_page(new_page));
76678 +#endif
76679 +
76680 if (old_page) {
76681 if (!PageAnon(old_page)) {
76682 dec_mm_counter(mm, file_rss);
76683 @@ -2207,6 +2417,10 @@ gotten:
76684 page_remove_rmap(old_page);
76685 }
76686
76687 +#ifdef CONFIG_PAX_SEGMEXEC
76688 + pax_mirror_anon_pte(vma, address, new_page, ptl);
76689 +#endif
76690 +
76691 /* Free the old page.. */
76692 new_page = old_page;
76693 ret |= VM_FAULT_WRITE;
76694 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
76695 swap_free(entry);
76696 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
76697 try_to_free_swap(page);
76698 +
76699 +#ifdef CONFIG_PAX_SEGMEXEC
76700 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
76701 +#endif
76702 +
76703 unlock_page(page);
76704
76705 if (flags & FAULT_FLAG_WRITE) {
76706 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
76707
76708 /* No need to invalidate - it was non-present before */
76709 update_mmu_cache(vma, address, pte);
76710 +
76711 +#ifdef CONFIG_PAX_SEGMEXEC
76712 + pax_mirror_anon_pte(vma, address, page, ptl);
76713 +#endif
76714 +
76715 unlock:
76716 pte_unmap_unlock(page_table, ptl);
76717 out:
76718 @@ -2632,40 +2856,6 @@ out_release:
76719 }
76720
76721 /*
76722 - * This is like a special single-page "expand_{down|up}wards()",
76723 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
76724 - * doesn't hit another vma.
76725 - */
76726 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
76727 -{
76728 - address &= PAGE_MASK;
76729 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
76730 - struct vm_area_struct *prev = vma->vm_prev;
76731 -
76732 - /*
76733 - * Is there a mapping abutting this one below?
76734 - *
76735 - * That's only ok if it's the same stack mapping
76736 - * that has gotten split..
76737 - */
76738 - if (prev && prev->vm_end == address)
76739 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
76740 -
76741 - expand_stack(vma, address - PAGE_SIZE);
76742 - }
76743 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
76744 - struct vm_area_struct *next = vma->vm_next;
76745 -
76746 - /* As VM_GROWSDOWN but s/below/above/ */
76747 - if (next && next->vm_start == address + PAGE_SIZE)
76748 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
76749 -
76750 - expand_upwards(vma, address + PAGE_SIZE);
76751 - }
76752 - return 0;
76753 -}
76754 -
76755 -/*
76756 * We enter with non-exclusive mmap_sem (to exclude vma changes,
76757 * but allow concurrent faults), and pte mapped but not yet locked.
76758 * We return with mmap_sem still held, but pte unmapped and unlocked.
76759 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
76760 unsigned long address, pte_t *page_table, pmd_t *pmd,
76761 unsigned int flags)
76762 {
76763 - struct page *page;
76764 + struct page *page = NULL;
76765 spinlock_t *ptl;
76766 pte_t entry;
76767
76768 - pte_unmap(page_table);
76769 -
76770 - /* Check if we need to add a guard page to the stack */
76771 - if (check_stack_guard_page(vma, address) < 0)
76772 - return VM_FAULT_SIGBUS;
76773 -
76774 - /* Use the zero-page for reads */
76775 if (!(flags & FAULT_FLAG_WRITE)) {
76776 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
76777 vma->vm_page_prot));
76778 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
76779 + ptl = pte_lockptr(mm, pmd);
76780 + spin_lock(ptl);
76781 if (!pte_none(*page_table))
76782 goto unlock;
76783 goto setpte;
76784 }
76785
76786 /* Allocate our own private page. */
76787 + pte_unmap(page_table);
76788 +
76789 if (unlikely(anon_vma_prepare(vma)))
76790 goto oom;
76791 page = alloc_zeroed_user_highpage_movable(vma, address);
76792 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
76793 if (!pte_none(*page_table))
76794 goto release;
76795
76796 +#ifdef CONFIG_PAX_SEGMEXEC
76797 + if (pax_find_mirror_vma(vma))
76798 + BUG_ON(!trylock_page(page));
76799 +#endif
76800 +
76801 inc_mm_counter(mm, anon_rss);
76802 page_add_new_anon_rmap(page, vma, address);
76803 setpte:
76804 @@ -2720,6 +2911,12 @@ setpte:
76805
76806 /* No need to invalidate - it was non-present before */
76807 update_mmu_cache(vma, address, entry);
76808 +
76809 +#ifdef CONFIG_PAX_SEGMEXEC
76810 + if (page)
76811 + pax_mirror_anon_pte(vma, address, page, ptl);
76812 +#endif
76813 +
76814 unlock:
76815 pte_unmap_unlock(page_table, ptl);
76816 return 0;
76817 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76818 */
76819 /* Only go through if we didn't race with anybody else... */
76820 if (likely(pte_same(*page_table, orig_pte))) {
76821 +
76822 +#ifdef CONFIG_PAX_SEGMEXEC
76823 + if (anon && pax_find_mirror_vma(vma))
76824 + BUG_ON(!trylock_page(page));
76825 +#endif
76826 +
76827 flush_icache_page(vma, page);
76828 entry = mk_pte(page, vma->vm_page_prot);
76829 if (flags & FAULT_FLAG_WRITE)
76830 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76831
76832 /* no need to invalidate: a not-present page won't be cached */
76833 update_mmu_cache(vma, address, entry);
76834 +
76835 +#ifdef CONFIG_PAX_SEGMEXEC
76836 + if (anon)
76837 + pax_mirror_anon_pte(vma, address, page, ptl);
76838 + else
76839 + pax_mirror_file_pte(vma, address, page, ptl);
76840 +#endif
76841 +
76842 } else {
76843 if (charged)
76844 mem_cgroup_uncharge_page(page);
76845 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
76846 if (flags & FAULT_FLAG_WRITE)
76847 flush_tlb_page(vma, address);
76848 }
76849 +
76850 +#ifdef CONFIG_PAX_SEGMEXEC
76851 + pax_mirror_pte(vma, address, pte, pmd, ptl);
76852 + return 0;
76853 +#endif
76854 +
76855 unlock:
76856 pte_unmap_unlock(pte, ptl);
76857 return 0;
76858 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76859 pmd_t *pmd;
76860 pte_t *pte;
76861
76862 +#ifdef CONFIG_PAX_SEGMEXEC
76863 + struct vm_area_struct *vma_m;
76864 +#endif
76865 +
76866 __set_current_state(TASK_RUNNING);
76867
76868 count_vm_event(PGFAULT);
76869 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76870 if (unlikely(is_vm_hugetlb_page(vma)))
76871 return hugetlb_fault(mm, vma, address, flags);
76872
76873 +#ifdef CONFIG_PAX_SEGMEXEC
76874 + vma_m = pax_find_mirror_vma(vma);
76875 + if (vma_m) {
76876 + unsigned long address_m;
76877 + pgd_t *pgd_m;
76878 + pud_t *pud_m;
76879 + pmd_t *pmd_m;
76880 +
76881 + if (vma->vm_start > vma_m->vm_start) {
76882 + address_m = address;
76883 + address -= SEGMEXEC_TASK_SIZE;
76884 + vma = vma_m;
76885 + } else
76886 + address_m = address + SEGMEXEC_TASK_SIZE;
76887 +
76888 + pgd_m = pgd_offset(mm, address_m);
76889 + pud_m = pud_alloc(mm, pgd_m, address_m);
76890 + if (!pud_m)
76891 + return VM_FAULT_OOM;
76892 + pmd_m = pmd_alloc(mm, pud_m, address_m);
76893 + if (!pmd_m)
76894 + return VM_FAULT_OOM;
76895 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
76896 + return VM_FAULT_OOM;
76897 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
76898 + }
76899 +#endif
76900 +
76901 pgd = pgd_offset(mm, address);
76902 pud = pud_alloc(mm, pgd, address);
76903 if (!pud)
76904 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
76905 gate_vma.vm_start = FIXADDR_USER_START;
76906 gate_vma.vm_end = FIXADDR_USER_END;
76907 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
76908 - gate_vma.vm_page_prot = __P101;
76909 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
76910 /*
76911 * Make sure the vDSO gets into every core dump.
76912 * Dumping its contents makes post-mortem fully interpretable later
76913 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
76914 index 3c6e3e2..ad9871c 100644
76915 --- a/mm/mempolicy.c
76916 +++ b/mm/mempolicy.c
76917 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
76918 struct vm_area_struct *next;
76919 int err;
76920
76921 +#ifdef CONFIG_PAX_SEGMEXEC
76922 + struct vm_area_struct *vma_m;
76923 +#endif
76924 +
76925 err = 0;
76926 for (; vma && vma->vm_start < end; vma = next) {
76927 next = vma->vm_next;
76928 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
76929 err = policy_vma(vma, new);
76930 if (err)
76931 break;
76932 +
76933 +#ifdef CONFIG_PAX_SEGMEXEC
76934 + vma_m = pax_find_mirror_vma(vma);
76935 + if (vma_m) {
76936 + err = policy_vma(vma_m, new);
76937 + if (err)
76938 + break;
76939 + }
76940 +#endif
76941 +
76942 }
76943 return err;
76944 }
76945 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
76946
76947 if (end < start)
76948 return -EINVAL;
76949 +
76950 +#ifdef CONFIG_PAX_SEGMEXEC
76951 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
76952 + if (end > SEGMEXEC_TASK_SIZE)
76953 + return -EINVAL;
76954 + } else
76955 +#endif
76956 +
76957 + if (end > TASK_SIZE)
76958 + return -EINVAL;
76959 +
76960 if (end == start)
76961 return 0;
76962
76963 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76964 if (!mm)
76965 return -EINVAL;
76966
76967 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76968 + if (mm != current->mm &&
76969 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
76970 + err = -EPERM;
76971 + goto out;
76972 + }
76973 +#endif
76974 +
76975 /*
76976 * Check if this process has the right to modify the specified
76977 * process. The right exists if the process has administrative
76978 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
76979 rcu_read_lock();
76980 tcred = __task_cred(task);
76981 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
76982 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
76983 - !capable(CAP_SYS_NICE)) {
76984 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
76985 rcu_read_unlock();
76986 err = -EPERM;
76987 goto out;
76988 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, void *v)
76989
76990 if (file) {
76991 seq_printf(m, " file=");
76992 - seq_path(m, &file->f_path, "\n\t= ");
76993 + seq_path(m, &file->f_path, "\n\t\\= ");
76994 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
76995 seq_printf(m, " heap");
76996 } else if (vma->vm_start <= mm->start_stack &&
76997 diff --git a/mm/migrate.c b/mm/migrate.c
76998 index aaca868..2ebecdc 100644
76999 --- a/mm/migrate.c
77000 +++ b/mm/migrate.c
77001 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
77002 unsigned long chunk_start;
77003 int err;
77004
77005 + pax_track_stack();
77006 +
77007 task_nodes = cpuset_mems_allowed(task);
77008
77009 err = -ENOMEM;
77010 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77011 if (!mm)
77012 return -EINVAL;
77013
77014 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77015 + if (mm != current->mm &&
77016 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77017 + err = -EPERM;
77018 + goto out;
77019 + }
77020 +#endif
77021 +
77022 /*
77023 * Check if this process has the right to modify the specified
77024 * process. The right exists if the process has administrative
77025 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77026 rcu_read_lock();
77027 tcred = __task_cred(task);
77028 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77029 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
77030 - !capable(CAP_SYS_NICE)) {
77031 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77032 rcu_read_unlock();
77033 err = -EPERM;
77034 goto out;
77035 diff --git a/mm/mlock.c b/mm/mlock.c
77036 index 2d846cf..98134d2 100644
77037 --- a/mm/mlock.c
77038 +++ b/mm/mlock.c
77039 @@ -13,6 +13,7 @@
77040 #include <linux/pagemap.h>
77041 #include <linux/mempolicy.h>
77042 #include <linux/syscalls.h>
77043 +#include <linux/security.h>
77044 #include <linux/sched.h>
77045 #include <linux/module.h>
77046 #include <linux/rmap.h>
77047 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
77048 }
77049 }
77050
77051 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
77052 -{
77053 - return (vma->vm_flags & VM_GROWSDOWN) &&
77054 - (vma->vm_start == addr) &&
77055 - !vma_stack_continue(vma->vm_prev, addr);
77056 -}
77057 -
77058 /**
77059 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
77060 * @vma: target vma
77061 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
77062 if (vma->vm_flags & VM_WRITE)
77063 gup_flags |= FOLL_WRITE;
77064
77065 - /* We don't try to access the guard page of a stack vma */
77066 - if (stack_guard_page(vma, start)) {
77067 - addr += PAGE_SIZE;
77068 - nr_pages--;
77069 - }
77070 -
77071 while (nr_pages > 0) {
77072 int i;
77073
77074 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
77075 {
77076 unsigned long nstart, end, tmp;
77077 struct vm_area_struct * vma, * prev;
77078 - int error;
77079 + int error = -EINVAL;
77080
77081 len = PAGE_ALIGN(len);
77082 end = start + len;
77083 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
77084 return -EINVAL;
77085 if (end == start)
77086 return 0;
77087 + if (end > TASK_SIZE)
77088 + return -EINVAL;
77089 +
77090 vma = find_vma_prev(current->mm, start, &prev);
77091 if (!vma || vma->vm_start > start)
77092 return -ENOMEM;
77093 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
77094 for (nstart = start ; ; ) {
77095 unsigned int newflags;
77096
77097 +#ifdef CONFIG_PAX_SEGMEXEC
77098 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77099 + break;
77100 +#endif
77101 +
77102 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
77103
77104 newflags = vma->vm_flags | VM_LOCKED;
77105 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
77106 lock_limit >>= PAGE_SHIFT;
77107
77108 /* check against resource limits */
77109 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
77110 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
77111 error = do_mlock(start, len, 1);
77112 up_write(&current->mm->mmap_sem);
77113 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
77114 static int do_mlockall(int flags)
77115 {
77116 struct vm_area_struct * vma, * prev = NULL;
77117 - unsigned int def_flags = 0;
77118
77119 if (flags & MCL_FUTURE)
77120 - def_flags = VM_LOCKED;
77121 - current->mm->def_flags = def_flags;
77122 + current->mm->def_flags |= VM_LOCKED;
77123 + else
77124 + current->mm->def_flags &= ~VM_LOCKED;
77125 if (flags == MCL_FUTURE)
77126 goto out;
77127
77128 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
77129 - unsigned int newflags;
77130 + unsigned long newflags;
77131
77132 +#ifdef CONFIG_PAX_SEGMEXEC
77133 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77134 + break;
77135 +#endif
77136 +
77137 + BUG_ON(vma->vm_end > TASK_SIZE);
77138 newflags = vma->vm_flags | VM_LOCKED;
77139 if (!(flags & MCL_CURRENT))
77140 newflags &= ~VM_LOCKED;
77141 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
77142 lock_limit >>= PAGE_SHIFT;
77143
77144 ret = -ENOMEM;
77145 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
77146 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
77147 capable(CAP_IPC_LOCK))
77148 ret = do_mlockall(flags);
77149 diff --git a/mm/mmap.c b/mm/mmap.c
77150 index 4b80cbf..c5ce1df 100644
77151 --- a/mm/mmap.c
77152 +++ b/mm/mmap.c
77153 @@ -45,6 +45,16 @@
77154 #define arch_rebalance_pgtables(addr, len) (addr)
77155 #endif
77156
77157 +static inline void verify_mm_writelocked(struct mm_struct *mm)
77158 +{
77159 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
77160 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
77161 + up_read(&mm->mmap_sem);
77162 + BUG();
77163 + }
77164 +#endif
77165 +}
77166 +
77167 static void unmap_region(struct mm_struct *mm,
77168 struct vm_area_struct *vma, struct vm_area_struct *prev,
77169 unsigned long start, unsigned long end);
77170 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
77171 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
77172 *
77173 */
77174 -pgprot_t protection_map[16] = {
77175 +pgprot_t protection_map[16] __read_only = {
77176 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
77177 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
77178 };
77179
77180 pgprot_t vm_get_page_prot(unsigned long vm_flags)
77181 {
77182 - return __pgprot(pgprot_val(protection_map[vm_flags &
77183 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
77184 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
77185 pgprot_val(arch_vm_get_page_prot(vm_flags)));
77186 +
77187 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77188 + if (!nx_enabled &&
77189 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
77190 + (vm_flags & (VM_READ | VM_WRITE)))
77191 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
77192 +#endif
77193 +
77194 + return prot;
77195 }
77196 EXPORT_SYMBOL(vm_get_page_prot);
77197
77198 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
77199 int sysctl_overcommit_ratio = 50; /* default is 50% */
77200 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
77201 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
77202 struct percpu_counter vm_committed_as;
77203
77204 /*
77205 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
77206 struct vm_area_struct *next = vma->vm_next;
77207
77208 might_sleep();
77209 + BUG_ON(vma->vm_mirror);
77210 if (vma->vm_ops && vma->vm_ops->close)
77211 vma->vm_ops->close(vma);
77212 if (vma->vm_file) {
77213 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
77214 * not page aligned -Ram Gupta
77215 */
77216 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
77217 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
77218 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
77219 (mm->end_data - mm->start_data) > rlim)
77220 goto out;
77221 @@ -704,6 +726,12 @@ static int
77222 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
77223 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
77224 {
77225 +
77226 +#ifdef CONFIG_PAX_SEGMEXEC
77227 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
77228 + return 0;
77229 +#endif
77230 +
77231 if (is_mergeable_vma(vma, file, vm_flags) &&
77232 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
77233 if (vma->vm_pgoff == vm_pgoff)
77234 @@ -723,6 +751,12 @@ static int
77235 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
77236 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
77237 {
77238 +
77239 +#ifdef CONFIG_PAX_SEGMEXEC
77240 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
77241 + return 0;
77242 +#endif
77243 +
77244 if (is_mergeable_vma(vma, file, vm_flags) &&
77245 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
77246 pgoff_t vm_pglen;
77247 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
77248 struct vm_area_struct *vma_merge(struct mm_struct *mm,
77249 struct vm_area_struct *prev, unsigned long addr,
77250 unsigned long end, unsigned long vm_flags,
77251 - struct anon_vma *anon_vma, struct file *file,
77252 + struct anon_vma *anon_vma, struct file *file,
77253 pgoff_t pgoff, struct mempolicy *policy)
77254 {
77255 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
77256 struct vm_area_struct *area, *next;
77257
77258 +#ifdef CONFIG_PAX_SEGMEXEC
77259 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
77260 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
77261 +
77262 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
77263 +#endif
77264 +
77265 /*
77266 * We later require that vma->vm_flags == vm_flags,
77267 * so this tests vma->vm_flags & VM_SPECIAL, too.
77268 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77269 if (next && next->vm_end == end) /* cases 6, 7, 8 */
77270 next = next->vm_next;
77271
77272 +#ifdef CONFIG_PAX_SEGMEXEC
77273 + if (prev)
77274 + prev_m = pax_find_mirror_vma(prev);
77275 + if (area)
77276 + area_m = pax_find_mirror_vma(area);
77277 + if (next)
77278 + next_m = pax_find_mirror_vma(next);
77279 +#endif
77280 +
77281 /*
77282 * Can it merge with the predecessor?
77283 */
77284 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77285 /* cases 1, 6 */
77286 vma_adjust(prev, prev->vm_start,
77287 next->vm_end, prev->vm_pgoff, NULL);
77288 - } else /* cases 2, 5, 7 */
77289 +
77290 +#ifdef CONFIG_PAX_SEGMEXEC
77291 + if (prev_m)
77292 + vma_adjust(prev_m, prev_m->vm_start,
77293 + next_m->vm_end, prev_m->vm_pgoff, NULL);
77294 +#endif
77295 +
77296 + } else { /* cases 2, 5, 7 */
77297 vma_adjust(prev, prev->vm_start,
77298 end, prev->vm_pgoff, NULL);
77299 +
77300 +#ifdef CONFIG_PAX_SEGMEXEC
77301 + if (prev_m)
77302 + vma_adjust(prev_m, prev_m->vm_start,
77303 + end_m, prev_m->vm_pgoff, NULL);
77304 +#endif
77305 +
77306 + }
77307 return prev;
77308 }
77309
77310 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77311 mpol_equal(policy, vma_policy(next)) &&
77312 can_vma_merge_before(next, vm_flags,
77313 anon_vma, file, pgoff+pglen)) {
77314 - if (prev && addr < prev->vm_end) /* case 4 */
77315 + if (prev && addr < prev->vm_end) { /* case 4 */
77316 vma_adjust(prev, prev->vm_start,
77317 addr, prev->vm_pgoff, NULL);
77318 - else /* cases 3, 8 */
77319 +
77320 +#ifdef CONFIG_PAX_SEGMEXEC
77321 + if (prev_m)
77322 + vma_adjust(prev_m, prev_m->vm_start,
77323 + addr_m, prev_m->vm_pgoff, NULL);
77324 +#endif
77325 +
77326 + } else { /* cases 3, 8 */
77327 vma_adjust(area, addr, next->vm_end,
77328 next->vm_pgoff - pglen, NULL);
77329 +
77330 +#ifdef CONFIG_PAX_SEGMEXEC
77331 + if (area_m)
77332 + vma_adjust(area_m, addr_m, next_m->vm_end,
77333 + next_m->vm_pgoff - pglen, NULL);
77334 +#endif
77335 +
77336 + }
77337 return area;
77338 }
77339
77340 @@ -898,14 +978,11 @@ none:
77341 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
77342 struct file *file, long pages)
77343 {
77344 - const unsigned long stack_flags
77345 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
77346 -
77347 if (file) {
77348 mm->shared_vm += pages;
77349 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
77350 mm->exec_vm += pages;
77351 - } else if (flags & stack_flags)
77352 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
77353 mm->stack_vm += pages;
77354 if (flags & (VM_RESERVED|VM_IO))
77355 mm->reserved_vm += pages;
77356 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77357 * (the exception is when the underlying filesystem is noexec
77358 * mounted, in which case we dont add PROT_EXEC.)
77359 */
77360 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
77361 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
77362 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
77363 prot |= PROT_EXEC;
77364
77365 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77366 /* Obtain the address to map to. we verify (or select) it and ensure
77367 * that it represents a valid section of the address space.
77368 */
77369 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
77370 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
77371 if (addr & ~PAGE_MASK)
77372 return addr;
77373
77374 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77375 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
77376 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
77377
77378 +#ifdef CONFIG_PAX_MPROTECT
77379 + if (mm->pax_flags & MF_PAX_MPROTECT) {
77380 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
77381 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
77382 + gr_log_rwxmmap(file);
77383 +
77384 +#ifdef CONFIG_PAX_EMUPLT
77385 + vm_flags &= ~VM_EXEC;
77386 +#else
77387 + return -EPERM;
77388 +#endif
77389 +
77390 + }
77391 +
77392 + if (!(vm_flags & VM_EXEC))
77393 + vm_flags &= ~VM_MAYEXEC;
77394 +#else
77395 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
77396 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
77397 +#endif
77398 + else
77399 + vm_flags &= ~VM_MAYWRITE;
77400 + }
77401 +#endif
77402 +
77403 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77404 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
77405 + vm_flags &= ~VM_PAGEEXEC;
77406 +#endif
77407 +
77408 if (flags & MAP_LOCKED)
77409 if (!can_do_mlock())
77410 return -EPERM;
77411 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77412 locked += mm->locked_vm;
77413 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
77414 lock_limit >>= PAGE_SHIFT;
77415 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
77416 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
77417 return -EAGAIN;
77418 }
77419 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77420 if (error)
77421 return error;
77422
77423 + if (!gr_acl_handle_mmap(file, prot))
77424 + return -EACCES;
77425 +
77426 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
77427 }
77428 EXPORT_SYMBOL(do_mmap_pgoff);
77429 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
77430 */
77431 int vma_wants_writenotify(struct vm_area_struct *vma)
77432 {
77433 - unsigned int vm_flags = vma->vm_flags;
77434 + unsigned long vm_flags = vma->vm_flags;
77435
77436 /* If it was private or non-writable, the write bit is already clear */
77437 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
77438 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
77439 return 0;
77440
77441 /* The backer wishes to know when pages are first written to? */
77442 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
77443 unsigned long charged = 0;
77444 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
77445
77446 +#ifdef CONFIG_PAX_SEGMEXEC
77447 + struct vm_area_struct *vma_m = NULL;
77448 +#endif
77449 +
77450 + /*
77451 + * mm->mmap_sem is required to protect against another thread
77452 + * changing the mappings in case we sleep.
77453 + */
77454 + verify_mm_writelocked(mm);
77455 +
77456 /* Clear old maps */
77457 error = -ENOMEM;
77458 -munmap_back:
77459 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77460 if (vma && vma->vm_start < addr + len) {
77461 if (do_munmap(mm, addr, len))
77462 return -ENOMEM;
77463 - goto munmap_back;
77464 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77465 + BUG_ON(vma && vma->vm_start < addr + len);
77466 }
77467
77468 /* Check against address space limit. */
77469 @@ -1173,6 +1294,16 @@ munmap_back:
77470 goto unacct_error;
77471 }
77472
77473 +#ifdef CONFIG_PAX_SEGMEXEC
77474 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
77475 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77476 + if (!vma_m) {
77477 + error = -ENOMEM;
77478 + goto free_vma;
77479 + }
77480 + }
77481 +#endif
77482 +
77483 vma->vm_mm = mm;
77484 vma->vm_start = addr;
77485 vma->vm_end = addr + len;
77486 @@ -1195,6 +1326,19 @@ munmap_back:
77487 error = file->f_op->mmap(file, vma);
77488 if (error)
77489 goto unmap_and_free_vma;
77490 +
77491 +#ifdef CONFIG_PAX_SEGMEXEC
77492 + if (vma_m && (vm_flags & VM_EXECUTABLE))
77493 + added_exe_file_vma(mm);
77494 +#endif
77495 +
77496 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77497 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
77498 + vma->vm_flags |= VM_PAGEEXEC;
77499 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77500 + }
77501 +#endif
77502 +
77503 if (vm_flags & VM_EXECUTABLE)
77504 added_exe_file_vma(mm);
77505
77506 @@ -1218,6 +1362,11 @@ munmap_back:
77507 vma_link(mm, vma, prev, rb_link, rb_parent);
77508 file = vma->vm_file;
77509
77510 +#ifdef CONFIG_PAX_SEGMEXEC
77511 + if (vma_m)
77512 + pax_mirror_vma(vma_m, vma);
77513 +#endif
77514 +
77515 /* Once vma denies write, undo our temporary denial count */
77516 if (correct_wcount)
77517 atomic_inc(&inode->i_writecount);
77518 @@ -1226,6 +1375,7 @@ out:
77519
77520 mm->total_vm += len >> PAGE_SHIFT;
77521 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
77522 + track_exec_limit(mm, addr, addr + len, vm_flags);
77523 if (vm_flags & VM_LOCKED) {
77524 /*
77525 * makes pages present; downgrades, drops, reacquires mmap_sem
77526 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
77527 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
77528 charged = 0;
77529 free_vma:
77530 +
77531 +#ifdef CONFIG_PAX_SEGMEXEC
77532 + if (vma_m)
77533 + kmem_cache_free(vm_area_cachep, vma_m);
77534 +#endif
77535 +
77536 kmem_cache_free(vm_area_cachep, vma);
77537 unacct_error:
77538 if (charged)
77539 @@ -1255,6 +1411,44 @@ unacct_error:
77540 return error;
77541 }
77542
77543 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
77544 +{
77545 + if (!vma) {
77546 +#ifdef CONFIG_STACK_GROWSUP
77547 + if (addr > sysctl_heap_stack_gap)
77548 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
77549 + else
77550 + vma = find_vma(current->mm, 0);
77551 + if (vma && (vma->vm_flags & VM_GROWSUP))
77552 + return false;
77553 +#endif
77554 + return true;
77555 + }
77556 +
77557 + if (addr + len > vma->vm_start)
77558 + return false;
77559 +
77560 + if (vma->vm_flags & VM_GROWSDOWN)
77561 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
77562 +#ifdef CONFIG_STACK_GROWSUP
77563 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
77564 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
77565 +#endif
77566 +
77567 + return true;
77568 +}
77569 +
77570 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
77571 +{
77572 + if (vma->vm_start < len)
77573 + return -ENOMEM;
77574 + if (!(vma->vm_flags & VM_GROWSDOWN))
77575 + return vma->vm_start - len;
77576 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
77577 + return vma->vm_start - len - sysctl_heap_stack_gap;
77578 + return -ENOMEM;
77579 +}
77580 +
77581 /* Get an address range which is currently unmapped.
77582 * For shmat() with addr=0.
77583 *
77584 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
77585 if (flags & MAP_FIXED)
77586 return addr;
77587
77588 +#ifdef CONFIG_PAX_RANDMMAP
77589 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
77590 +#endif
77591 +
77592 if (addr) {
77593 addr = PAGE_ALIGN(addr);
77594 - vma = find_vma(mm, addr);
77595 - if (TASK_SIZE - len >= addr &&
77596 - (!vma || addr + len <= vma->vm_start))
77597 - return addr;
77598 + if (TASK_SIZE - len >= addr) {
77599 + vma = find_vma(mm, addr);
77600 + if (check_heap_stack_gap(vma, addr, len))
77601 + return addr;
77602 + }
77603 }
77604 if (len > mm->cached_hole_size) {
77605 - start_addr = addr = mm->free_area_cache;
77606 + start_addr = addr = mm->free_area_cache;
77607 } else {
77608 - start_addr = addr = TASK_UNMAPPED_BASE;
77609 - mm->cached_hole_size = 0;
77610 + start_addr = addr = mm->mmap_base;
77611 + mm->cached_hole_size = 0;
77612 }
77613
77614 full_search:
77615 @@ -1303,34 +1502,40 @@ full_search:
77616 * Start a new search - just in case we missed
77617 * some holes.
77618 */
77619 - if (start_addr != TASK_UNMAPPED_BASE) {
77620 - addr = TASK_UNMAPPED_BASE;
77621 - start_addr = addr;
77622 + if (start_addr != mm->mmap_base) {
77623 + start_addr = addr = mm->mmap_base;
77624 mm->cached_hole_size = 0;
77625 goto full_search;
77626 }
77627 return -ENOMEM;
77628 }
77629 - if (!vma || addr + len <= vma->vm_start) {
77630 - /*
77631 - * Remember the place where we stopped the search:
77632 - */
77633 - mm->free_area_cache = addr + len;
77634 - return addr;
77635 - }
77636 + if (check_heap_stack_gap(vma, addr, len))
77637 + break;
77638 if (addr + mm->cached_hole_size < vma->vm_start)
77639 mm->cached_hole_size = vma->vm_start - addr;
77640 addr = vma->vm_end;
77641 }
77642 +
77643 + /*
77644 + * Remember the place where we stopped the search:
77645 + */
77646 + mm->free_area_cache = addr + len;
77647 + return addr;
77648 }
77649 #endif
77650
77651 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
77652 {
77653 +
77654 +#ifdef CONFIG_PAX_SEGMEXEC
77655 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
77656 + return;
77657 +#endif
77658 +
77659 /*
77660 * Is this a new hole at the lowest possible address?
77661 */
77662 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
77663 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
77664 mm->free_area_cache = addr;
77665 mm->cached_hole_size = ~0UL;
77666 }
77667 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77668 {
77669 struct vm_area_struct *vma;
77670 struct mm_struct *mm = current->mm;
77671 - unsigned long addr = addr0;
77672 + unsigned long base = mm->mmap_base, addr = addr0;
77673
77674 /* requested length too big for entire address space */
77675 if (len > TASK_SIZE)
77676 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77677 if (flags & MAP_FIXED)
77678 return addr;
77679
77680 +#ifdef CONFIG_PAX_RANDMMAP
77681 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
77682 +#endif
77683 +
77684 /* requesting a specific address */
77685 if (addr) {
77686 addr = PAGE_ALIGN(addr);
77687 - vma = find_vma(mm, addr);
77688 - if (TASK_SIZE - len >= addr &&
77689 - (!vma || addr + len <= vma->vm_start))
77690 - return addr;
77691 + if (TASK_SIZE - len >= addr) {
77692 + vma = find_vma(mm, addr);
77693 + if (check_heap_stack_gap(vma, addr, len))
77694 + return addr;
77695 + }
77696 }
77697
77698 /* check if free_area_cache is useful for us */
77699 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77700 /* make sure it can fit in the remaining address space */
77701 if (addr > len) {
77702 vma = find_vma(mm, addr-len);
77703 - if (!vma || addr <= vma->vm_start)
77704 + if (check_heap_stack_gap(vma, addr - len, len))
77705 /* remember the address as a hint for next time */
77706 return (mm->free_area_cache = addr-len);
77707 }
77708 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77709 * return with success:
77710 */
77711 vma = find_vma(mm, addr);
77712 - if (!vma || addr+len <= vma->vm_start)
77713 + if (check_heap_stack_gap(vma, addr, len))
77714 /* remember the address as a hint for next time */
77715 return (mm->free_area_cache = addr);
77716
77717 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
77718 mm->cached_hole_size = vma->vm_start - addr;
77719
77720 /* try just below the current vma->vm_start */
77721 - addr = vma->vm_start-len;
77722 - } while (len < vma->vm_start);
77723 + addr = skip_heap_stack_gap(vma, len);
77724 + } while (!IS_ERR_VALUE(addr));
77725
77726 bottomup:
77727 /*
77728 @@ -1414,13 +1624,21 @@ bottomup:
77729 * can happen with large stack limits and large mmap()
77730 * allocations.
77731 */
77732 + mm->mmap_base = TASK_UNMAPPED_BASE;
77733 +
77734 +#ifdef CONFIG_PAX_RANDMMAP
77735 + if (mm->pax_flags & MF_PAX_RANDMMAP)
77736 + mm->mmap_base += mm->delta_mmap;
77737 +#endif
77738 +
77739 + mm->free_area_cache = mm->mmap_base;
77740 mm->cached_hole_size = ~0UL;
77741 - mm->free_area_cache = TASK_UNMAPPED_BASE;
77742 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
77743 /*
77744 * Restore the topdown base:
77745 */
77746 - mm->free_area_cache = mm->mmap_base;
77747 + mm->mmap_base = base;
77748 + mm->free_area_cache = base;
77749 mm->cached_hole_size = ~0UL;
77750
77751 return addr;
77752 @@ -1429,6 +1647,12 @@ bottomup:
77753
77754 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
77755 {
77756 +
77757 +#ifdef CONFIG_PAX_SEGMEXEC
77758 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
77759 + return;
77760 +#endif
77761 +
77762 /*
77763 * Is this a new hole at the highest possible address?
77764 */
77765 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
77766 mm->free_area_cache = addr;
77767
77768 /* dont allow allocations above current base */
77769 - if (mm->free_area_cache > mm->mmap_base)
77770 + if (mm->free_area_cache > mm->mmap_base) {
77771 mm->free_area_cache = mm->mmap_base;
77772 + mm->cached_hole_size = ~0UL;
77773 + }
77774 }
77775
77776 unsigned long
77777 @@ -1545,6 +1771,27 @@ out:
77778 return prev ? prev->vm_next : vma;
77779 }
77780
77781 +#ifdef CONFIG_PAX_SEGMEXEC
77782 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
77783 +{
77784 + struct vm_area_struct *vma_m;
77785 +
77786 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
77787 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
77788 + BUG_ON(vma->vm_mirror);
77789 + return NULL;
77790 + }
77791 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
77792 + vma_m = vma->vm_mirror;
77793 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
77794 + BUG_ON(vma->vm_file != vma_m->vm_file);
77795 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
77796 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
77797 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
77798 + return vma_m;
77799 +}
77800 +#endif
77801 +
77802 /*
77803 * Verify that the stack growth is acceptable and
77804 * update accounting. This is shared with both the
77805 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77806 return -ENOMEM;
77807
77808 /* Stack limit test */
77809 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
77810 if (size > rlim[RLIMIT_STACK].rlim_cur)
77811 return -ENOMEM;
77812
77813 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77814 unsigned long limit;
77815 locked = mm->locked_vm + grow;
77816 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
77817 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
77818 if (locked > limit && !capable(CAP_IPC_LOCK))
77819 return -ENOMEM;
77820 }
77821 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
77822 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
77823 * vma is the last one with address > vma->vm_end. Have to extend vma.
77824 */
77825 +#ifndef CONFIG_IA64
77826 +static
77827 +#endif
77828 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
77829 {
77830 int error;
77831 + bool locknext;
77832
77833 if (!(vma->vm_flags & VM_GROWSUP))
77834 return -EFAULT;
77835
77836 + /* Also guard against wrapping around to address 0. */
77837 + if (address < PAGE_ALIGN(address+1))
77838 + address = PAGE_ALIGN(address+1);
77839 + else
77840 + return -ENOMEM;
77841 +
77842 /*
77843 * We must make sure the anon_vma is allocated
77844 * so that the anon_vma locking is not a noop.
77845 */
77846 if (unlikely(anon_vma_prepare(vma)))
77847 return -ENOMEM;
77848 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
77849 + if (locknext && anon_vma_prepare(vma->vm_next))
77850 + return -ENOMEM;
77851 anon_vma_lock(vma);
77852 + if (locknext)
77853 + anon_vma_lock(vma->vm_next);
77854
77855 /*
77856 * vma->vm_start/vm_end cannot change under us because the caller
77857 * is required to hold the mmap_sem in read mode. We need the
77858 - * anon_vma lock to serialize against concurrent expand_stacks.
77859 - * Also guard against wrapping around to address 0.
77860 + * anon_vma locks to serialize against concurrent expand_stacks
77861 + * and expand_upwards.
77862 */
77863 - if (address < PAGE_ALIGN(address+4))
77864 - address = PAGE_ALIGN(address+4);
77865 - else {
77866 - anon_vma_unlock(vma);
77867 - return -ENOMEM;
77868 - }
77869 error = 0;
77870
77871 /* Somebody else might have raced and expanded it already */
77872 - if (address > vma->vm_end) {
77873 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
77874 + error = -ENOMEM;
77875 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
77876 unsigned long size, grow;
77877
77878 size = address - vma->vm_start;
77879 @@ -1643,6 +1903,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
77880 vma->vm_end = address;
77881 }
77882 }
77883 + if (locknext)
77884 + anon_vma_unlock(vma->vm_next);
77885 anon_vma_unlock(vma);
77886 return error;
77887 }
77888 @@ -1655,6 +1917,8 @@ static int expand_downwards(struct vm_area_struct *vma,
77889 unsigned long address)
77890 {
77891 int error;
77892 + bool lockprev = false;
77893 + struct vm_area_struct *prev;
77894
77895 /*
77896 * We must make sure the anon_vma is allocated
77897 @@ -1668,6 +1932,15 @@ static int expand_downwards(struct vm_area_struct *vma,
77898 if (error)
77899 return error;
77900
77901 + prev = vma->vm_prev;
77902 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
77903 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
77904 +#endif
77905 + if (lockprev && anon_vma_prepare(prev))
77906 + return -ENOMEM;
77907 + if (lockprev)
77908 + anon_vma_lock(prev);
77909 +
77910 anon_vma_lock(vma);
77911
77912 /*
77913 @@ -1677,9 +1950,17 @@ static int expand_downwards(struct vm_area_struct *vma,
77914 */
77915
77916 /* Somebody else might have raced and expanded it already */
77917 - if (address < vma->vm_start) {
77918 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
77919 + error = -ENOMEM;
77920 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
77921 unsigned long size, grow;
77922
77923 +#ifdef CONFIG_PAX_SEGMEXEC
77924 + struct vm_area_struct *vma_m;
77925 +
77926 + vma_m = pax_find_mirror_vma(vma);
77927 +#endif
77928 +
77929 size = vma->vm_end - address;
77930 grow = (vma->vm_start - address) >> PAGE_SHIFT;
77931
77932 @@ -1689,10 +1970,22 @@ static int expand_downwards(struct vm_area_struct *vma,
77933 if (!error) {
77934 vma->vm_start = address;
77935 vma->vm_pgoff -= grow;
77936 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
77937 +
77938 +#ifdef CONFIG_PAX_SEGMEXEC
77939 + if (vma_m) {
77940 + vma_m->vm_start -= grow << PAGE_SHIFT;
77941 + vma_m->vm_pgoff -= grow;
77942 + }
77943 +#endif
77944 +
77945 +
77946 }
77947 }
77948 }
77949 anon_vma_unlock(vma);
77950 + if (lockprev)
77951 + anon_vma_unlock(prev);
77952 return error;
77953 }
77954
77955 @@ -1768,6 +2061,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
77956 do {
77957 long nrpages = vma_pages(vma);
77958
77959 +#ifdef CONFIG_PAX_SEGMEXEC
77960 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
77961 + vma = remove_vma(vma);
77962 + continue;
77963 + }
77964 +#endif
77965 +
77966 mm->total_vm -= nrpages;
77967 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
77968 vma = remove_vma(vma);
77969 @@ -1813,6 +2113,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
77970 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
77971 vma->vm_prev = NULL;
77972 do {
77973 +
77974 +#ifdef CONFIG_PAX_SEGMEXEC
77975 + if (vma->vm_mirror) {
77976 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
77977 + vma->vm_mirror->vm_mirror = NULL;
77978 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
77979 + vma->vm_mirror = NULL;
77980 + }
77981 +#endif
77982 +
77983 rb_erase(&vma->vm_rb, &mm->mm_rb);
77984 mm->map_count--;
77985 tail_vma = vma;
77986 @@ -1840,10 +2150,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
77987 struct mempolicy *pol;
77988 struct vm_area_struct *new;
77989
77990 +#ifdef CONFIG_PAX_SEGMEXEC
77991 + struct vm_area_struct *vma_m, *new_m = NULL;
77992 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
77993 +#endif
77994 +
77995 if (is_vm_hugetlb_page(vma) && (addr &
77996 ~(huge_page_mask(hstate_vma(vma)))))
77997 return -EINVAL;
77998
77999 +#ifdef CONFIG_PAX_SEGMEXEC
78000 + vma_m = pax_find_mirror_vma(vma);
78001 +
78002 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78003 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
78004 + if (mm->map_count >= sysctl_max_map_count-1)
78005 + return -ENOMEM;
78006 + } else
78007 +#endif
78008 +
78009 if (mm->map_count >= sysctl_max_map_count)
78010 return -ENOMEM;
78011
78012 @@ -1851,6 +2176,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78013 if (!new)
78014 return -ENOMEM;
78015
78016 +#ifdef CONFIG_PAX_SEGMEXEC
78017 + if (vma_m) {
78018 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78019 + if (!new_m) {
78020 + kmem_cache_free(vm_area_cachep, new);
78021 + return -ENOMEM;
78022 + }
78023 + }
78024 +#endif
78025 +
78026 /* most fields are the same, copy all, and then fixup */
78027 *new = *vma;
78028
78029 @@ -1861,8 +2196,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78030 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
78031 }
78032
78033 +#ifdef CONFIG_PAX_SEGMEXEC
78034 + if (vma_m) {
78035 + *new_m = *vma_m;
78036 + new_m->vm_mirror = new;
78037 + new->vm_mirror = new_m;
78038 +
78039 + if (new_below)
78040 + new_m->vm_end = addr_m;
78041 + else {
78042 + new_m->vm_start = addr_m;
78043 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
78044 + }
78045 + }
78046 +#endif
78047 +
78048 pol = mpol_dup(vma_policy(vma));
78049 if (IS_ERR(pol)) {
78050 +
78051 +#ifdef CONFIG_PAX_SEGMEXEC
78052 + if (new_m)
78053 + kmem_cache_free(vm_area_cachep, new_m);
78054 +#endif
78055 +
78056 kmem_cache_free(vm_area_cachep, new);
78057 return PTR_ERR(pol);
78058 }
78059 @@ -1883,6 +2239,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78060 else
78061 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
78062
78063 +#ifdef CONFIG_PAX_SEGMEXEC
78064 + if (vma_m) {
78065 + mpol_get(pol);
78066 + vma_set_policy(new_m, pol);
78067 +
78068 + if (new_m->vm_file) {
78069 + get_file(new_m->vm_file);
78070 + if (vma_m->vm_flags & VM_EXECUTABLE)
78071 + added_exe_file_vma(mm);
78072 + }
78073 +
78074 + if (new_m->vm_ops && new_m->vm_ops->open)
78075 + new_m->vm_ops->open(new_m);
78076 +
78077 + if (new_below)
78078 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
78079 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
78080 + else
78081 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
78082 + }
78083 +#endif
78084 +
78085 return 0;
78086 }
78087
78088 @@ -1891,11 +2269,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78089 * work. This now handles partial unmappings.
78090 * Jeremy Fitzhardinge <jeremy@goop.org>
78091 */
78092 +#ifdef CONFIG_PAX_SEGMEXEC
78093 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78094 {
78095 + int ret = __do_munmap(mm, start, len);
78096 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
78097 + return ret;
78098 +
78099 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
78100 +}
78101 +
78102 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78103 +#else
78104 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78105 +#endif
78106 +{
78107 unsigned long end;
78108 struct vm_area_struct *vma, *prev, *last;
78109
78110 + /*
78111 + * mm->mmap_sem is required to protect against another thread
78112 + * changing the mappings in case we sleep.
78113 + */
78114 + verify_mm_writelocked(mm);
78115 +
78116 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
78117 return -EINVAL;
78118
78119 @@ -1959,6 +2356,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78120 /* Fix up all other VM information */
78121 remove_vma_list(mm, vma);
78122
78123 + track_exec_limit(mm, start, end, 0UL);
78124 +
78125 return 0;
78126 }
78127
78128 @@ -1971,22 +2370,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
78129
78130 profile_munmap(addr);
78131
78132 +#ifdef CONFIG_PAX_SEGMEXEC
78133 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
78134 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
78135 + return -EINVAL;
78136 +#endif
78137 +
78138 down_write(&mm->mmap_sem);
78139 ret = do_munmap(mm, addr, len);
78140 up_write(&mm->mmap_sem);
78141 return ret;
78142 }
78143
78144 -static inline void verify_mm_writelocked(struct mm_struct *mm)
78145 -{
78146 -#ifdef CONFIG_DEBUG_VM
78147 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78148 - WARN_ON(1);
78149 - up_read(&mm->mmap_sem);
78150 - }
78151 -#endif
78152 -}
78153 -
78154 /*
78155 * this is really a simplified "do_mmap". it only handles
78156 * anonymous maps. eventually we may be able to do some
78157 @@ -2000,6 +2395,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78158 struct rb_node ** rb_link, * rb_parent;
78159 pgoff_t pgoff = addr >> PAGE_SHIFT;
78160 int error;
78161 + unsigned long charged;
78162
78163 len = PAGE_ALIGN(len);
78164 if (!len)
78165 @@ -2011,16 +2407,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78166
78167 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
78168
78169 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
78170 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
78171 + flags &= ~VM_EXEC;
78172 +
78173 +#ifdef CONFIG_PAX_MPROTECT
78174 + if (mm->pax_flags & MF_PAX_MPROTECT)
78175 + flags &= ~VM_MAYEXEC;
78176 +#endif
78177 +
78178 + }
78179 +#endif
78180 +
78181 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
78182 if (error & ~PAGE_MASK)
78183 return error;
78184
78185 + charged = len >> PAGE_SHIFT;
78186 +
78187 /*
78188 * mlock MCL_FUTURE?
78189 */
78190 if (mm->def_flags & VM_LOCKED) {
78191 unsigned long locked, lock_limit;
78192 - locked = len >> PAGE_SHIFT;
78193 + locked = charged;
78194 locked += mm->locked_vm;
78195 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78196 lock_limit >>= PAGE_SHIFT;
78197 @@ -2037,22 +2447,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78198 /*
78199 * Clear old maps. this also does some error checking for us
78200 */
78201 - munmap_back:
78202 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78203 if (vma && vma->vm_start < addr + len) {
78204 if (do_munmap(mm, addr, len))
78205 return -ENOMEM;
78206 - goto munmap_back;
78207 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78208 + BUG_ON(vma && vma->vm_start < addr + len);
78209 }
78210
78211 /* Check against address space limits *after* clearing old maps... */
78212 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
78213 + if (!may_expand_vm(mm, charged))
78214 return -ENOMEM;
78215
78216 if (mm->map_count > sysctl_max_map_count)
78217 return -ENOMEM;
78218
78219 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
78220 + if (security_vm_enough_memory(charged))
78221 return -ENOMEM;
78222
78223 /* Can we just expand an old private anonymous mapping? */
78224 @@ -2066,7 +2476,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78225 */
78226 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78227 if (!vma) {
78228 - vm_unacct_memory(len >> PAGE_SHIFT);
78229 + vm_unacct_memory(charged);
78230 return -ENOMEM;
78231 }
78232
78233 @@ -2078,11 +2488,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78234 vma->vm_page_prot = vm_get_page_prot(flags);
78235 vma_link(mm, vma, prev, rb_link, rb_parent);
78236 out:
78237 - mm->total_vm += len >> PAGE_SHIFT;
78238 + mm->total_vm += charged;
78239 if (flags & VM_LOCKED) {
78240 if (!mlock_vma_pages_range(vma, addr, addr + len))
78241 - mm->locked_vm += (len >> PAGE_SHIFT);
78242 + mm->locked_vm += charged;
78243 }
78244 + track_exec_limit(mm, addr, addr + len, flags);
78245 return addr;
78246 }
78247
78248 @@ -2129,8 +2540,10 @@ void exit_mmap(struct mm_struct *mm)
78249 * Walk the list again, actually closing and freeing it,
78250 * with preemption enabled, without holding any MM locks.
78251 */
78252 - while (vma)
78253 + while (vma) {
78254 + vma->vm_mirror = NULL;
78255 vma = remove_vma(vma);
78256 + }
78257
78258 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
78259 }
78260 @@ -2144,6 +2557,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
78261 struct vm_area_struct * __vma, * prev;
78262 struct rb_node ** rb_link, * rb_parent;
78263
78264 +#ifdef CONFIG_PAX_SEGMEXEC
78265 + struct vm_area_struct *vma_m = NULL;
78266 +#endif
78267 +
78268 /*
78269 * The vm_pgoff of a purely anonymous vma should be irrelevant
78270 * until its first write fault, when page's anon_vma and index
78271 @@ -2166,7 +2583,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
78272 if ((vma->vm_flags & VM_ACCOUNT) &&
78273 security_vm_enough_memory_mm(mm, vma_pages(vma)))
78274 return -ENOMEM;
78275 +
78276 +#ifdef CONFIG_PAX_SEGMEXEC
78277 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
78278 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78279 + if (!vma_m)
78280 + return -ENOMEM;
78281 + }
78282 +#endif
78283 +
78284 vma_link(mm, vma, prev, rb_link, rb_parent);
78285 +
78286 +#ifdef CONFIG_PAX_SEGMEXEC
78287 + if (vma_m)
78288 + pax_mirror_vma(vma_m, vma);
78289 +#endif
78290 +
78291 return 0;
78292 }
78293
78294 @@ -2184,6 +2616,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
78295 struct rb_node **rb_link, *rb_parent;
78296 struct mempolicy *pol;
78297
78298 + BUG_ON(vma->vm_mirror);
78299 +
78300 /*
78301 * If anonymous vma has not yet been faulted, update new pgoff
78302 * to match new location, to increase its chance of merging.
78303 @@ -2227,6 +2661,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
78304 return new_vma;
78305 }
78306
78307 +#ifdef CONFIG_PAX_SEGMEXEC
78308 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
78309 +{
78310 + struct vm_area_struct *prev_m;
78311 + struct rb_node **rb_link_m, *rb_parent_m;
78312 + struct mempolicy *pol_m;
78313 +
78314 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
78315 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
78316 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
78317 + *vma_m = *vma;
78318 + pol_m = vma_policy(vma_m);
78319 + mpol_get(pol_m);
78320 + vma_set_policy(vma_m, pol_m);
78321 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
78322 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
78323 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
78324 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
78325 + if (vma_m->vm_file)
78326 + get_file(vma_m->vm_file);
78327 + if (vma_m->vm_ops && vma_m->vm_ops->open)
78328 + vma_m->vm_ops->open(vma_m);
78329 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
78330 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
78331 + vma_m->vm_mirror = vma;
78332 + vma->vm_mirror = vma_m;
78333 +}
78334 +#endif
78335 +
78336 /*
78337 * Return true if the calling process may expand its vm space by the passed
78338 * number of pages
78339 @@ -2237,7 +2700,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
78340 unsigned long lim;
78341
78342 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
78343 -
78344 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
78345 if (cur + npages > lim)
78346 return 0;
78347 return 1;
78348 @@ -2307,6 +2770,22 @@ int install_special_mapping(struct mm_struct *mm,
78349 vma->vm_start = addr;
78350 vma->vm_end = addr + len;
78351
78352 +#ifdef CONFIG_PAX_MPROTECT
78353 + if (mm->pax_flags & MF_PAX_MPROTECT) {
78354 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
78355 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
78356 + return -EPERM;
78357 + if (!(vm_flags & VM_EXEC))
78358 + vm_flags &= ~VM_MAYEXEC;
78359 +#else
78360 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78361 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78362 +#endif
78363 + else
78364 + vm_flags &= ~VM_MAYWRITE;
78365 + }
78366 +#endif
78367 +
78368 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
78369 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78370
78371 diff --git a/mm/mprotect.c b/mm/mprotect.c
78372 index 1737c7e..c7faeb4 100644
78373 --- a/mm/mprotect.c
78374 +++ b/mm/mprotect.c
78375 @@ -24,10 +24,16 @@
78376 #include <linux/mmu_notifier.h>
78377 #include <linux/migrate.h>
78378 #include <linux/perf_event.h>
78379 +
78380 +#ifdef CONFIG_PAX_MPROTECT
78381 +#include <linux/elf.h>
78382 +#endif
78383 +
78384 #include <asm/uaccess.h>
78385 #include <asm/pgtable.h>
78386 #include <asm/cacheflush.h>
78387 #include <asm/tlbflush.h>
78388 +#include <asm/mmu_context.h>
78389
78390 #ifndef pgprot_modify
78391 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
78392 @@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
78393 flush_tlb_range(vma, start, end);
78394 }
78395
78396 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
78397 +/* called while holding the mmap semaphor for writing except stack expansion */
78398 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
78399 +{
78400 + unsigned long oldlimit, newlimit = 0UL;
78401 +
78402 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
78403 + return;
78404 +
78405 + spin_lock(&mm->page_table_lock);
78406 + oldlimit = mm->context.user_cs_limit;
78407 + if ((prot & VM_EXEC) && oldlimit < end)
78408 + /* USER_CS limit moved up */
78409 + newlimit = end;
78410 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
78411 + /* USER_CS limit moved down */
78412 + newlimit = start;
78413 +
78414 + if (newlimit) {
78415 + mm->context.user_cs_limit = newlimit;
78416 +
78417 +#ifdef CONFIG_SMP
78418 + wmb();
78419 + cpus_clear(mm->context.cpu_user_cs_mask);
78420 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
78421 +#endif
78422 +
78423 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
78424 + }
78425 + spin_unlock(&mm->page_table_lock);
78426 + if (newlimit == end) {
78427 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
78428 +
78429 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
78430 + if (is_vm_hugetlb_page(vma))
78431 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
78432 + else
78433 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
78434 + }
78435 +}
78436 +#endif
78437 +
78438 int
78439 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78440 unsigned long start, unsigned long end, unsigned long newflags)
78441 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78442 int error;
78443 int dirty_accountable = 0;
78444
78445 +#ifdef CONFIG_PAX_SEGMEXEC
78446 + struct vm_area_struct *vma_m = NULL;
78447 + unsigned long start_m, end_m;
78448 +
78449 + start_m = start + SEGMEXEC_TASK_SIZE;
78450 + end_m = end + SEGMEXEC_TASK_SIZE;
78451 +#endif
78452 +
78453 if (newflags == oldflags) {
78454 *pprev = vma;
78455 return 0;
78456 }
78457
78458 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
78459 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
78460 +
78461 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
78462 + return -ENOMEM;
78463 +
78464 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
78465 + return -ENOMEM;
78466 + }
78467 +
78468 /*
78469 * If we make a private mapping writable we increase our commit;
78470 * but (without finer accounting) cannot reduce our commit if we
78471 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78472 }
78473 }
78474
78475 +#ifdef CONFIG_PAX_SEGMEXEC
78476 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
78477 + if (start != vma->vm_start) {
78478 + error = split_vma(mm, vma, start, 1);
78479 + if (error)
78480 + goto fail;
78481 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
78482 + *pprev = (*pprev)->vm_next;
78483 + }
78484 +
78485 + if (end != vma->vm_end) {
78486 + error = split_vma(mm, vma, end, 0);
78487 + if (error)
78488 + goto fail;
78489 + }
78490 +
78491 + if (pax_find_mirror_vma(vma)) {
78492 + error = __do_munmap(mm, start_m, end_m - start_m);
78493 + if (error)
78494 + goto fail;
78495 + } else {
78496 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78497 + if (!vma_m) {
78498 + error = -ENOMEM;
78499 + goto fail;
78500 + }
78501 + vma->vm_flags = newflags;
78502 + pax_mirror_vma(vma_m, vma);
78503 + }
78504 + }
78505 +#endif
78506 +
78507 /*
78508 * First try to merge with previous and/or next vma.
78509 */
78510 @@ -195,9 +293,21 @@ success:
78511 * vm_flags and vm_page_prot are protected by the mmap_sem
78512 * held in write mode.
78513 */
78514 +
78515 +#ifdef CONFIG_PAX_SEGMEXEC
78516 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
78517 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
78518 +#endif
78519 +
78520 vma->vm_flags = newflags;
78521 +
78522 +#ifdef CONFIG_PAX_MPROTECT
78523 + if (mm->binfmt && mm->binfmt->handle_mprotect)
78524 + mm->binfmt->handle_mprotect(vma, newflags);
78525 +#endif
78526 +
78527 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
78528 - vm_get_page_prot(newflags));
78529 + vm_get_page_prot(vma->vm_flags));
78530
78531 if (vma_wants_writenotify(vma)) {
78532 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
78533 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78534 end = start + len;
78535 if (end <= start)
78536 return -ENOMEM;
78537 +
78538 +#ifdef CONFIG_PAX_SEGMEXEC
78539 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
78540 + if (end > SEGMEXEC_TASK_SIZE)
78541 + return -EINVAL;
78542 + } else
78543 +#endif
78544 +
78545 + if (end > TASK_SIZE)
78546 + return -EINVAL;
78547 +
78548 if (!arch_validate_prot(prot))
78549 return -EINVAL;
78550
78551 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78552 /*
78553 * Does the application expect PROT_READ to imply PROT_EXEC:
78554 */
78555 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
78556 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
78557 prot |= PROT_EXEC;
78558
78559 vm_flags = calc_vm_prot_bits(prot);
78560 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78561 if (start > vma->vm_start)
78562 prev = vma;
78563
78564 +#ifdef CONFIG_PAX_MPROTECT
78565 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
78566 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
78567 +#endif
78568 +
78569 for (nstart = start ; ; ) {
78570 unsigned long newflags;
78571
78572 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78573
78574 /* newflags >> 4 shift VM_MAY% in place of VM_% */
78575 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
78576 + if (prot & (PROT_WRITE | PROT_EXEC))
78577 + gr_log_rwxmprotect(vma->vm_file);
78578 +
78579 + error = -EACCES;
78580 + goto out;
78581 + }
78582 +
78583 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
78584 error = -EACCES;
78585 goto out;
78586 }
78587 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78588 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
78589 if (error)
78590 goto out;
78591 +
78592 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
78593 +
78594 nstart = tmp;
78595
78596 if (nstart < prev->vm_end)
78597 diff --git a/mm/mremap.c b/mm/mremap.c
78598 index 3e98d79..1706cec 100644
78599 --- a/mm/mremap.c
78600 +++ b/mm/mremap.c
78601 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
78602 continue;
78603 pte = ptep_clear_flush(vma, old_addr, old_pte);
78604 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
78605 +
78606 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
78607 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
78608 + pte = pte_exprotect(pte);
78609 +#endif
78610 +
78611 set_pte_at(mm, new_addr, new_pte, pte);
78612 }
78613
78614 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
78615 if (is_vm_hugetlb_page(vma))
78616 goto Einval;
78617
78618 +#ifdef CONFIG_PAX_SEGMEXEC
78619 + if (pax_find_mirror_vma(vma))
78620 + goto Einval;
78621 +#endif
78622 +
78623 /* We can't remap across vm area boundaries */
78624 if (old_len > vma->vm_end - addr)
78625 goto Efault;
78626 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
78627 unsigned long ret = -EINVAL;
78628 unsigned long charged = 0;
78629 unsigned long map_flags;
78630 + unsigned long pax_task_size = TASK_SIZE;
78631
78632 if (new_addr & ~PAGE_MASK)
78633 goto out;
78634
78635 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
78636 +#ifdef CONFIG_PAX_SEGMEXEC
78637 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
78638 + pax_task_size = SEGMEXEC_TASK_SIZE;
78639 +#endif
78640 +
78641 + pax_task_size -= PAGE_SIZE;
78642 +
78643 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
78644 goto out;
78645
78646 /* Check if the location we're moving into overlaps the
78647 * old location at all, and fail if it does.
78648 */
78649 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
78650 - goto out;
78651 -
78652 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
78653 + if (addr + old_len > new_addr && new_addr + new_len > addr)
78654 goto out;
78655
78656 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
78657 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
78658 struct vm_area_struct *vma;
78659 unsigned long ret = -EINVAL;
78660 unsigned long charged = 0;
78661 + unsigned long pax_task_size = TASK_SIZE;
78662
78663 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
78664 goto out;
78665 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
78666 if (!new_len)
78667 goto out;
78668
78669 +#ifdef CONFIG_PAX_SEGMEXEC
78670 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
78671 + pax_task_size = SEGMEXEC_TASK_SIZE;
78672 +#endif
78673 +
78674 + pax_task_size -= PAGE_SIZE;
78675 +
78676 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
78677 + old_len > pax_task_size || addr > pax_task_size-old_len)
78678 + goto out;
78679 +
78680 if (flags & MREMAP_FIXED) {
78681 if (flags & MREMAP_MAYMOVE)
78682 ret = mremap_to(addr, old_len, new_addr, new_len);
78683 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
78684 addr + new_len);
78685 }
78686 ret = addr;
78687 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
78688 goto out;
78689 }
78690 }
78691 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
78692 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
78693 if (ret)
78694 goto out;
78695 +
78696 + map_flags = vma->vm_flags;
78697 ret = move_vma(vma, addr, old_len, new_len, new_addr);
78698 + if (!(ret & ~PAGE_MASK)) {
78699 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
78700 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
78701 + }
78702 }
78703 out:
78704 if (ret & ~PAGE_MASK)
78705 diff --git a/mm/nommu.c b/mm/nommu.c
78706 index 406e8d4..53970d3 100644
78707 --- a/mm/nommu.c
78708 +++ b/mm/nommu.c
78709 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
78710 int sysctl_overcommit_ratio = 50; /* default is 50% */
78711 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
78712 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
78713 -int heap_stack_gap = 0;
78714
78715 atomic_long_t mmap_pages_allocated;
78716
78717 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
78718 EXPORT_SYMBOL(find_vma);
78719
78720 /*
78721 - * find a VMA
78722 - * - we don't extend stack VMAs under NOMMU conditions
78723 - */
78724 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
78725 -{
78726 - return find_vma(mm, addr);
78727 -}
78728 -
78729 -/*
78730 * expand a stack to a given address
78731 * - not supported under NOMMU conditions
78732 */
78733 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
78734 index 3ecab7e..594a471 100644
78735 --- a/mm/page_alloc.c
78736 +++ b/mm/page_alloc.c
78737 @@ -289,7 +289,7 @@ out:
78738 * This usage means that zero-order pages may not be compound.
78739 */
78740
78741 -static void free_compound_page(struct page *page)
78742 +void free_compound_page(struct page *page)
78743 {
78744 __free_pages_ok(page, compound_order(page));
78745 }
78746 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
78747 int bad = 0;
78748 int wasMlocked = __TestClearPageMlocked(page);
78749
78750 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
78751 + unsigned long index = 1UL << order;
78752 +#endif
78753 +
78754 kmemcheck_free_shadow(page, order);
78755
78756 for (i = 0 ; i < (1 << order) ; ++i)
78757 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
78758 debug_check_no_obj_freed(page_address(page),
78759 PAGE_SIZE << order);
78760 }
78761 +
78762 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
78763 + for (; index; --index)
78764 + sanitize_highpage(page + index - 1);
78765 +#endif
78766 +
78767 arch_free_page(page, order);
78768 kernel_map_pages(page, 1 << order, 0);
78769
78770 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
78771 arch_alloc_page(page, order);
78772 kernel_map_pages(page, 1 << order, 1);
78773
78774 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
78775 if (gfp_flags & __GFP_ZERO)
78776 prep_zero_page(page, order, gfp_flags);
78777 +#endif
78778
78779 if (order && (gfp_flags & __GFP_COMP))
78780 prep_compound_page(page, order);
78781 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
78782 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
78783 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
78784 }
78785 +
78786 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
78787 + sanitize_highpage(page);
78788 +#endif
78789 +
78790 arch_free_page(page, 0);
78791 kernel_map_pages(page, 1, 0);
78792
78793 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
78794 int cpu;
78795 struct zone *zone;
78796
78797 + pax_track_stack();
78798 +
78799 for_each_populated_zone(zone) {
78800 show_node(zone);
78801 printk("%s per-cpu:\n", zone->name);
78802 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
78803 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
78804 }
78805 #else
78806 -static void inline setup_usemap(struct pglist_data *pgdat,
78807 +static inline void setup_usemap(struct pglist_data *pgdat,
78808 struct zone *zone, unsigned long zonesize) {}
78809 #endif /* CONFIG_SPARSEMEM */
78810
78811 diff --git a/mm/percpu.c b/mm/percpu.c
78812 index c90614a..5f7b7b8 100644
78813 --- a/mm/percpu.c
78814 +++ b/mm/percpu.c
78815 @@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
78816 static unsigned int pcpu_high_unit_cpu __read_mostly;
78817
78818 /* the address of the first chunk which starts with the kernel static area */
78819 -void *pcpu_base_addr __read_mostly;
78820 +void *pcpu_base_addr __read_only;
78821 EXPORT_SYMBOL_GPL(pcpu_base_addr);
78822
78823 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
78824 diff --git a/mm/rmap.c b/mm/rmap.c
78825 index dd43373..d848cd7 100644
78826 --- a/mm/rmap.c
78827 +++ b/mm/rmap.c
78828 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
78829 /* page_table_lock to protect against threads */
78830 spin_lock(&mm->page_table_lock);
78831 if (likely(!vma->anon_vma)) {
78832 +
78833 +#ifdef CONFIG_PAX_SEGMEXEC
78834 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
78835 +
78836 + if (vma_m) {
78837 + BUG_ON(vma_m->anon_vma);
78838 + vma_m->anon_vma = anon_vma;
78839 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
78840 + }
78841 +#endif
78842 +
78843 vma->anon_vma = anon_vma;
78844 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
78845 allocated = NULL;
78846 diff --git a/mm/shmem.c b/mm/shmem.c
78847 index 3e0005b..1d659a8 100644
78848 --- a/mm/shmem.c
78849 +++ b/mm/shmem.c
78850 @@ -31,7 +31,7 @@
78851 #include <linux/swap.h>
78852 #include <linux/ima.h>
78853
78854 -static struct vfsmount *shm_mnt;
78855 +struct vfsmount *shm_mnt;
78856
78857 #ifdef CONFIG_SHMEM
78858 /*
78859 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
78860 goto unlock;
78861 }
78862 entry = shmem_swp_entry(info, index, NULL);
78863 + if (!entry)
78864 + goto unlock;
78865 if (entry->val) {
78866 /*
78867 * The more uptodate page coming down from a stacked
78868 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
78869 struct vm_area_struct pvma;
78870 struct page *page;
78871
78872 + pax_track_stack();
78873 +
78874 spol = mpol_cond_copy(&mpol,
78875 mpol_shared_policy_lookup(&info->policy, idx));
78876
78877 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
78878
78879 info = SHMEM_I(inode);
78880 inode->i_size = len-1;
78881 - if (len <= (char *)inode - (char *)info) {
78882 + if (len <= (char *)inode - (char *)info && len <= 64) {
78883 /* do it inline */
78884 memcpy(info, symname, len);
78885 inode->i_op = &shmem_symlink_inline_operations;
78886 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
78887 int err = -ENOMEM;
78888
78889 /* Round up to L1_CACHE_BYTES to resist false sharing */
78890 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
78891 - L1_CACHE_BYTES), GFP_KERNEL);
78892 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
78893 if (!sbinfo)
78894 return -ENOMEM;
78895
78896 diff --git a/mm/slab.c b/mm/slab.c
78897 index c8d466a..909e01e 100644
78898 --- a/mm/slab.c
78899 +++ b/mm/slab.c
78900 @@ -174,7 +174,7 @@
78901
78902 /* Legal flag mask for kmem_cache_create(). */
78903 #if DEBUG
78904 -# define CREATE_MASK (SLAB_RED_ZONE | \
78905 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
78906 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
78907 SLAB_CACHE_DMA | \
78908 SLAB_STORE_USER | \
78909 @@ -182,7 +182,7 @@
78910 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
78911 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
78912 #else
78913 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
78914 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
78915 SLAB_CACHE_DMA | \
78916 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
78917 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
78918 @@ -308,7 +308,7 @@ struct kmem_list3 {
78919 * Need this for bootstrapping a per node allocator.
78920 */
78921 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
78922 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
78923 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
78924 #define CACHE_CACHE 0
78925 #define SIZE_AC MAX_NUMNODES
78926 #define SIZE_L3 (2 * MAX_NUMNODES)
78927 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
78928 if ((x)->max_freeable < i) \
78929 (x)->max_freeable = i; \
78930 } while (0)
78931 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
78932 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
78933 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
78934 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
78935 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
78936 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
78937 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
78938 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
78939 #else
78940 #define STATS_INC_ACTIVE(x) do { } while (0)
78941 #define STATS_DEC_ACTIVE(x) do { } while (0)
78942 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
78943 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
78944 */
78945 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
78946 - const struct slab *slab, void *obj)
78947 + const struct slab *slab, const void *obj)
78948 {
78949 u32 offset = (obj - slab->s_mem);
78950 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
78951 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
78952 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
78953 sizes[INDEX_AC].cs_size,
78954 ARCH_KMALLOC_MINALIGN,
78955 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78956 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78957 NULL);
78958
78959 if (INDEX_AC != INDEX_L3) {
78960 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
78961 kmem_cache_create(names[INDEX_L3].name,
78962 sizes[INDEX_L3].cs_size,
78963 ARCH_KMALLOC_MINALIGN,
78964 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78965 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78966 NULL);
78967 }
78968
78969 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
78970 sizes->cs_cachep = kmem_cache_create(names->name,
78971 sizes->cs_size,
78972 ARCH_KMALLOC_MINALIGN,
78973 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
78974 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
78975 NULL);
78976 }
78977 #ifdef CONFIG_ZONE_DMA
78978 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
78979 }
78980 /* cpu stats */
78981 {
78982 - unsigned long allochit = atomic_read(&cachep->allochit);
78983 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
78984 - unsigned long freehit = atomic_read(&cachep->freehit);
78985 - unsigned long freemiss = atomic_read(&cachep->freemiss);
78986 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
78987 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
78988 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
78989 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
78990
78991 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
78992 allochit, allocmiss, freehit, freemiss);
78993 @@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
78994
78995 static int __init slab_proc_init(void)
78996 {
78997 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
78998 + mode_t gr_mode = S_IRUGO;
78999 +
79000 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
79001 + gr_mode = S_IRUSR;
79002 +#endif
79003 +
79004 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
79005 #ifdef CONFIG_DEBUG_SLAB_LEAK
79006 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
79007 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
79008 #endif
79009 return 0;
79010 }
79011 module_init(slab_proc_init);
79012 #endif
79013
79014 +void check_object_size(const void *ptr, unsigned long n, bool to)
79015 +{
79016 +
79017 +#ifdef CONFIG_PAX_USERCOPY
79018 + struct page *page;
79019 + struct kmem_cache *cachep = NULL;
79020 + struct slab *slabp;
79021 + unsigned int objnr;
79022 + unsigned long offset;
79023 + const char *type;
79024 +
79025 + if (!n)
79026 + return;
79027 +
79028 + type = "<null>";
79029 + if (ZERO_OR_NULL_PTR(ptr))
79030 + goto report;
79031 +
79032 + if (!virt_addr_valid(ptr))
79033 + return;
79034 +
79035 + page = virt_to_head_page(ptr);
79036 +
79037 + type = "<process stack>";
79038 + if (!PageSlab(page)) {
79039 + if (object_is_on_stack(ptr, n) == -1)
79040 + goto report;
79041 + return;
79042 + }
79043 +
79044 + cachep = page_get_cache(page);
79045 + type = cachep->name;
79046 + if (!(cachep->flags & SLAB_USERCOPY))
79047 + goto report;
79048 +
79049 + slabp = page_get_slab(page);
79050 + objnr = obj_to_index(cachep, slabp, ptr);
79051 + BUG_ON(objnr >= cachep->num);
79052 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
79053 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
79054 + return;
79055 +
79056 +report:
79057 + pax_report_usercopy(ptr, n, to, type);
79058 +#endif
79059 +
79060 +}
79061 +EXPORT_SYMBOL(check_object_size);
79062 +
79063 /**
79064 * ksize - get the actual amount of memory allocated for a given object
79065 * @objp: Pointer to the object
79066 diff --git a/mm/slob.c b/mm/slob.c
79067 index 837ebd6..4712174 100644
79068 --- a/mm/slob.c
79069 +++ b/mm/slob.c
79070 @@ -29,7 +29,7 @@
79071 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
79072 * alloc_pages() directly, allocating compound pages so the page order
79073 * does not have to be separately tracked, and also stores the exact
79074 - * allocation size in page->private so that it can be used to accurately
79075 + * allocation size in slob_page->size so that it can be used to accurately
79076 * provide ksize(). These objects are detected in kfree() because slob_page()
79077 * is false for them.
79078 *
79079 @@ -58,6 +58,7 @@
79080 */
79081
79082 #include <linux/kernel.h>
79083 +#include <linux/sched.h>
79084 #include <linux/slab.h>
79085 #include <linux/mm.h>
79086 #include <linux/swap.h> /* struct reclaim_state */
79087 @@ -100,7 +101,8 @@ struct slob_page {
79088 unsigned long flags; /* mandatory */
79089 atomic_t _count; /* mandatory */
79090 slobidx_t units; /* free units left in page */
79091 - unsigned long pad[2];
79092 + unsigned long pad[1];
79093 + unsigned long size; /* size when >=PAGE_SIZE */
79094 slob_t *free; /* first free slob_t in page */
79095 struct list_head list; /* linked list of free pages */
79096 };
79097 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
79098 */
79099 static inline int is_slob_page(struct slob_page *sp)
79100 {
79101 - return PageSlab((struct page *)sp);
79102 + return PageSlab((struct page *)sp) && !sp->size;
79103 }
79104
79105 static inline void set_slob_page(struct slob_page *sp)
79106 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
79107
79108 static inline struct slob_page *slob_page(const void *addr)
79109 {
79110 - return (struct slob_page *)virt_to_page(addr);
79111 + return (struct slob_page *)virt_to_head_page(addr);
79112 }
79113
79114 /*
79115 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
79116 /*
79117 * Return the size of a slob block.
79118 */
79119 -static slobidx_t slob_units(slob_t *s)
79120 +static slobidx_t slob_units(const slob_t *s)
79121 {
79122 if (s->units > 0)
79123 return s->units;
79124 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
79125 /*
79126 * Return the next free slob block pointer after this one.
79127 */
79128 -static slob_t *slob_next(slob_t *s)
79129 +static slob_t *slob_next(const slob_t *s)
79130 {
79131 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
79132 slobidx_t next;
79133 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
79134 /*
79135 * Returns true if s is the last free block in its page.
79136 */
79137 -static int slob_last(slob_t *s)
79138 +static int slob_last(const slob_t *s)
79139 {
79140 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
79141 }
79142 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
79143 if (!page)
79144 return NULL;
79145
79146 + set_slob_page(page);
79147 return page_address(page);
79148 }
79149
79150 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
79151 if (!b)
79152 return NULL;
79153 sp = slob_page(b);
79154 - set_slob_page(sp);
79155
79156 spin_lock_irqsave(&slob_lock, flags);
79157 sp->units = SLOB_UNITS(PAGE_SIZE);
79158 sp->free = b;
79159 + sp->size = 0;
79160 INIT_LIST_HEAD(&sp->list);
79161 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
79162 set_slob_page_free(sp, slob_list);
79163 @@ -475,10 +478,9 @@ out:
79164 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
79165 #endif
79166
79167 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79168 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
79169 {
79170 - unsigned int *m;
79171 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79172 + slob_t *m;
79173 void *ret;
79174
79175 lockdep_trace_alloc(gfp);
79176 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79177
79178 if (!m)
79179 return NULL;
79180 - *m = size;
79181 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
79182 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
79183 + m[0].units = size;
79184 + m[1].units = align;
79185 ret = (void *)m + align;
79186
79187 trace_kmalloc_node(_RET_IP_, ret,
79188 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79189
79190 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
79191 if (ret) {
79192 - struct page *page;
79193 - page = virt_to_page(ret);
79194 - page->private = size;
79195 + struct slob_page *sp;
79196 + sp = slob_page(ret);
79197 + sp->size = size;
79198 }
79199
79200 trace_kmalloc_node(_RET_IP_, ret,
79201 size, PAGE_SIZE << order, gfp, node);
79202 }
79203
79204 - kmemleak_alloc(ret, size, 1, gfp);
79205 + return ret;
79206 +}
79207 +
79208 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79209 +{
79210 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79211 + void *ret = __kmalloc_node_align(size, gfp, node, align);
79212 +
79213 + if (!ZERO_OR_NULL_PTR(ret))
79214 + kmemleak_alloc(ret, size, 1, gfp);
79215 return ret;
79216 }
79217 EXPORT_SYMBOL(__kmalloc_node);
79218 @@ -528,13 +542,92 @@ void kfree(const void *block)
79219 sp = slob_page(block);
79220 if (is_slob_page(sp)) {
79221 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79222 - unsigned int *m = (unsigned int *)(block - align);
79223 - slob_free(m, *m + align);
79224 - } else
79225 + slob_t *m = (slob_t *)(block - align);
79226 + slob_free(m, m[0].units + align);
79227 + } else {
79228 + clear_slob_page(sp);
79229 + free_slob_page(sp);
79230 + sp->size = 0;
79231 put_page(&sp->page);
79232 + }
79233 }
79234 EXPORT_SYMBOL(kfree);
79235
79236 +void check_object_size(const void *ptr, unsigned long n, bool to)
79237 +{
79238 +
79239 +#ifdef CONFIG_PAX_USERCOPY
79240 + struct slob_page *sp;
79241 + const slob_t *free;
79242 + const void *base;
79243 + unsigned long flags;
79244 + const char *type;
79245 +
79246 + if (!n)
79247 + return;
79248 +
79249 + type = "<null>";
79250 + if (ZERO_OR_NULL_PTR(ptr))
79251 + goto report;
79252 +
79253 + if (!virt_addr_valid(ptr))
79254 + return;
79255 +
79256 + type = "<process stack>";
79257 + sp = slob_page(ptr);
79258 + if (!PageSlab((struct page*)sp)) {
79259 + if (object_is_on_stack(ptr, n) == -1)
79260 + goto report;
79261 + return;
79262 + }
79263 +
79264 + type = "<slob>";
79265 + if (sp->size) {
79266 + base = page_address(&sp->page);
79267 + if (base <= ptr && n <= sp->size - (ptr - base))
79268 + return;
79269 + goto report;
79270 + }
79271 +
79272 + /* some tricky double walking to find the chunk */
79273 + spin_lock_irqsave(&slob_lock, flags);
79274 + base = (void *)((unsigned long)ptr & PAGE_MASK);
79275 + free = sp->free;
79276 +
79277 + while (!slob_last(free) && (void *)free <= ptr) {
79278 + base = free + slob_units(free);
79279 + free = slob_next(free);
79280 + }
79281 +
79282 + while (base < (void *)free) {
79283 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
79284 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
79285 + int offset;
79286 +
79287 + if (ptr < base + align)
79288 + break;
79289 +
79290 + offset = ptr - base - align;
79291 + if (offset >= m) {
79292 + base += size;
79293 + continue;
79294 + }
79295 +
79296 + if (n > m - offset)
79297 + break;
79298 +
79299 + spin_unlock_irqrestore(&slob_lock, flags);
79300 + return;
79301 + }
79302 +
79303 + spin_unlock_irqrestore(&slob_lock, flags);
79304 +report:
79305 + pax_report_usercopy(ptr, n, to, type);
79306 +#endif
79307 +
79308 +}
79309 +EXPORT_SYMBOL(check_object_size);
79310 +
79311 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
79312 size_t ksize(const void *block)
79313 {
79314 @@ -547,10 +640,10 @@ size_t ksize(const void *block)
79315 sp = slob_page(block);
79316 if (is_slob_page(sp)) {
79317 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79318 - unsigned int *m = (unsigned int *)(block - align);
79319 - return SLOB_UNITS(*m) * SLOB_UNIT;
79320 + slob_t *m = (slob_t *)(block - align);
79321 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
79322 } else
79323 - return sp->page.private;
79324 + return sp->size;
79325 }
79326 EXPORT_SYMBOL(ksize);
79327
79328 @@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79329 {
79330 struct kmem_cache *c;
79331
79332 +#ifdef CONFIG_PAX_USERCOPY
79333 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
79334 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
79335 +#else
79336 c = slob_alloc(sizeof(struct kmem_cache),
79337 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
79338 +#endif
79339
79340 if (c) {
79341 c->name = name;
79342 @@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
79343 {
79344 void *b;
79345
79346 +#ifdef CONFIG_PAX_USERCOPY
79347 + b = __kmalloc_node_align(c->size, flags, node, c->align);
79348 +#else
79349 if (c->size < PAGE_SIZE) {
79350 b = slob_alloc(c->size, flags, c->align, node);
79351 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79352 SLOB_UNITS(c->size) * SLOB_UNIT,
79353 flags, node);
79354 } else {
79355 + struct slob_page *sp;
79356 +
79357 b = slob_new_pages(flags, get_order(c->size), node);
79358 + sp = slob_page(b);
79359 + sp->size = c->size;
79360 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79361 PAGE_SIZE << get_order(c->size),
79362 flags, node);
79363 }
79364 +#endif
79365
79366 if (c->ctor)
79367 c->ctor(b);
79368 @@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
79369
79370 static void __kmem_cache_free(void *b, int size)
79371 {
79372 - if (size < PAGE_SIZE)
79373 + struct slob_page *sp = slob_page(b);
79374 +
79375 + if (is_slob_page(sp))
79376 slob_free(b, size);
79377 - else
79378 + else {
79379 + clear_slob_page(sp);
79380 + free_slob_page(sp);
79381 + sp->size = 0;
79382 slob_free_pages(b, get_order(size));
79383 + }
79384 }
79385
79386 static void kmem_rcu_free(struct rcu_head *head)
79387 @@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
79388
79389 void kmem_cache_free(struct kmem_cache *c, void *b)
79390 {
79391 + int size = c->size;
79392 +
79393 +#ifdef CONFIG_PAX_USERCOPY
79394 + if (size + c->align < PAGE_SIZE) {
79395 + size += c->align;
79396 + b -= c->align;
79397 + }
79398 +#endif
79399 +
79400 kmemleak_free_recursive(b, c->flags);
79401 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
79402 struct slob_rcu *slob_rcu;
79403 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
79404 + slob_rcu = b + (size - sizeof(struct slob_rcu));
79405 INIT_RCU_HEAD(&slob_rcu->head);
79406 - slob_rcu->size = c->size;
79407 + slob_rcu->size = size;
79408 call_rcu(&slob_rcu->head, kmem_rcu_free);
79409 } else {
79410 - __kmem_cache_free(b, c->size);
79411 + __kmem_cache_free(b, size);
79412 }
79413
79414 +#ifdef CONFIG_PAX_USERCOPY
79415 + trace_kfree(_RET_IP_, b);
79416 +#else
79417 trace_kmem_cache_free(_RET_IP_, b);
79418 +#endif
79419 +
79420 }
79421 EXPORT_SYMBOL(kmem_cache_free);
79422
79423 diff --git a/mm/slub.c b/mm/slub.c
79424 index 4996fc7..87e01d0 100644
79425 --- a/mm/slub.c
79426 +++ b/mm/slub.c
79427 @@ -201,7 +201,7 @@ struct track {
79428
79429 enum track_item { TRACK_ALLOC, TRACK_FREE };
79430
79431 -#ifdef CONFIG_SLUB_DEBUG
79432 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79433 static int sysfs_slab_add(struct kmem_cache *);
79434 static int sysfs_slab_alias(struct kmem_cache *, const char *);
79435 static void sysfs_slab_remove(struct kmem_cache *);
79436 @@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
79437 if (!t->addr)
79438 return;
79439
79440 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
79441 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
79442 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
79443 }
79444
79445 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
79446
79447 page = virt_to_head_page(x);
79448
79449 + BUG_ON(!PageSlab(page));
79450 +
79451 slab_free(s, page, x, _RET_IP_);
79452
79453 trace_kmem_cache_free(_RET_IP_, x);
79454 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
79455 * Merge control. If this is set then no merging of slab caches will occur.
79456 * (Could be removed. This was introduced to pacify the merge skeptics.)
79457 */
79458 -static int slub_nomerge;
79459 +static int slub_nomerge = 1;
79460
79461 /*
79462 * Calculate the order of allocation given an slab object size.
79463 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
79464 * list to avoid pounding the page allocator excessively.
79465 */
79466 set_min_partial(s, ilog2(s->size));
79467 - s->refcount = 1;
79468 + atomic_set(&s->refcount, 1);
79469 #ifdef CONFIG_NUMA
79470 s->remote_node_defrag_ratio = 1000;
79471 #endif
79472 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
79473 void kmem_cache_destroy(struct kmem_cache *s)
79474 {
79475 down_write(&slub_lock);
79476 - s->refcount--;
79477 - if (!s->refcount) {
79478 + if (atomic_dec_and_test(&s->refcount)) {
79479 list_del(&s->list);
79480 up_write(&slub_lock);
79481 if (kmem_cache_close(s)) {
79482 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
79483 __setup("slub_nomerge", setup_slub_nomerge);
79484
79485 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
79486 - const char *name, int size, gfp_t gfp_flags)
79487 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
79488 {
79489 - unsigned int flags = 0;
79490 -
79491 if (gfp_flags & SLUB_DMA)
79492 - flags = SLAB_CACHE_DMA;
79493 + flags |= SLAB_CACHE_DMA;
79494
79495 /*
79496 * This function is called with IRQs disabled during early-boot on
79497 @@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
79498 EXPORT_SYMBOL(__kmalloc_node);
79499 #endif
79500
79501 +void check_object_size(const void *ptr, unsigned long n, bool to)
79502 +{
79503 +
79504 +#ifdef CONFIG_PAX_USERCOPY
79505 + struct page *page;
79506 + struct kmem_cache *s = NULL;
79507 + unsigned long offset;
79508 + const char *type;
79509 +
79510 + if (!n)
79511 + return;
79512 +
79513 + type = "<null>";
79514 + if (ZERO_OR_NULL_PTR(ptr))
79515 + goto report;
79516 +
79517 + if (!virt_addr_valid(ptr))
79518 + return;
79519 +
79520 + page = get_object_page(ptr);
79521 +
79522 + type = "<process stack>";
79523 + if (!page) {
79524 + if (object_is_on_stack(ptr, n) == -1)
79525 + goto report;
79526 + return;
79527 + }
79528 +
79529 + s = page->slab;
79530 + type = s->name;
79531 + if (!(s->flags & SLAB_USERCOPY))
79532 + goto report;
79533 +
79534 + offset = (ptr - page_address(page)) % s->size;
79535 + if (offset <= s->objsize && n <= s->objsize - offset)
79536 + return;
79537 +
79538 +report:
79539 + pax_report_usercopy(ptr, n, to, type);
79540 +#endif
79541 +
79542 +}
79543 +EXPORT_SYMBOL(check_object_size);
79544 +
79545 size_t ksize(const void *object)
79546 {
79547 struct page *page;
79548 @@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
79549 * kmem_cache_open for slab_state == DOWN.
79550 */
79551 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
79552 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
79553 - kmalloc_caches[0].refcount = -1;
79554 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
79555 + atomic_set(&kmalloc_caches[0].refcount, -1);
79556 caches++;
79557
79558 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
79559 @@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
79560 /* Caches that are not of the two-to-the-power-of size */
79561 if (KMALLOC_MIN_SIZE <= 32) {
79562 create_kmalloc_cache(&kmalloc_caches[1],
79563 - "kmalloc-96", 96, GFP_NOWAIT);
79564 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
79565 caches++;
79566 }
79567 if (KMALLOC_MIN_SIZE <= 64) {
79568 create_kmalloc_cache(&kmalloc_caches[2],
79569 - "kmalloc-192", 192, GFP_NOWAIT);
79570 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
79571 caches++;
79572 }
79573
79574 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
79575 create_kmalloc_cache(&kmalloc_caches[i],
79576 - "kmalloc", 1 << i, GFP_NOWAIT);
79577 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
79578 caches++;
79579 }
79580
79581 @@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
79582 /*
79583 * We may have set a slab to be unmergeable during bootstrap.
79584 */
79585 - if (s->refcount < 0)
79586 + if (atomic_read(&s->refcount) < 0)
79587 return 1;
79588
79589 return 0;
79590 @@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79591 if (s) {
79592 int cpu;
79593
79594 - s->refcount++;
79595 + atomic_inc(&s->refcount);
79596 /*
79597 * Adjust the object sizes so that we clear
79598 * the complete object on kzalloc.
79599 @@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79600
79601 if (sysfs_slab_alias(s, name)) {
79602 down_write(&slub_lock);
79603 - s->refcount--;
79604 + atomic_dec(&s->refcount);
79605 up_write(&slub_lock);
79606 goto err;
79607 }
79608 @@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
79609
79610 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
79611 {
79612 - return sprintf(buf, "%d\n", s->refcount - 1);
79613 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
79614 }
79615 SLAB_ATTR_RO(aliases);
79616
79617 @@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
79618 kfree(s);
79619 }
79620
79621 -static struct sysfs_ops slab_sysfs_ops = {
79622 +static const struct sysfs_ops slab_sysfs_ops = {
79623 .show = slab_attr_show,
79624 .store = slab_attr_store,
79625 };
79626 @@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
79627 return 0;
79628 }
79629
79630 -static struct kset_uevent_ops slab_uevent_ops = {
79631 +static const struct kset_uevent_ops slab_uevent_ops = {
79632 .filter = uevent_filter,
79633 };
79634
79635 @@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
79636 return name;
79637 }
79638
79639 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79640 static int sysfs_slab_add(struct kmem_cache *s)
79641 {
79642 int err;
79643 @@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
79644 kobject_del(&s->kobj);
79645 kobject_put(&s->kobj);
79646 }
79647 +#endif
79648
79649 /*
79650 * Need to buffer aliases during bootup until sysfs becomes
79651 @@ -4632,6 +4677,7 @@ struct saved_alias {
79652
79653 static struct saved_alias *alias_list;
79654
79655 +#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79656 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
79657 {
79658 struct saved_alias *al;
79659 @@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
79660 alias_list = al;
79661 return 0;
79662 }
79663 +#endif
79664
79665 static int __init slab_sysfs_init(void)
79666 {
79667 @@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
79668
79669 static int __init slab_proc_init(void)
79670 {
79671 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
79672 + mode_t gr_mode = S_IRUGO;
79673 +
79674 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
79675 + gr_mode = S_IRUSR;
79676 +#endif
79677 +
79678 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
79679 return 0;
79680 }
79681 module_init(slab_proc_init);
79682 diff --git a/mm/swap.c b/mm/swap.c
79683 index 308e57d..5de19c0 100644
79684 --- a/mm/swap.c
79685 +++ b/mm/swap.c
79686 @@ -30,6 +30,7 @@
79687 #include <linux/notifier.h>
79688 #include <linux/backing-dev.h>
79689 #include <linux/memcontrol.h>
79690 +#include <linux/hugetlb.h>
79691
79692 #include "internal.h"
79693
79694 @@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
79695 compound_page_dtor *dtor;
79696
79697 dtor = get_compound_page_dtor(page);
79698 + if (!PageHuge(page))
79699 + BUG_ON(dtor != free_compound_page);
79700 (*dtor)(page);
79701 }
79702 }
79703 diff --git a/mm/util.c b/mm/util.c
79704 index e48b493..24a601d 100644
79705 --- a/mm/util.c
79706 +++ b/mm/util.c
79707 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
79708 void arch_pick_mmap_layout(struct mm_struct *mm)
79709 {
79710 mm->mmap_base = TASK_UNMAPPED_BASE;
79711 +
79712 +#ifdef CONFIG_PAX_RANDMMAP
79713 + if (mm->pax_flags & MF_PAX_RANDMMAP)
79714 + mm->mmap_base += mm->delta_mmap;
79715 +#endif
79716 +
79717 mm->get_unmapped_area = arch_get_unmapped_area;
79718 mm->unmap_area = arch_unmap_area;
79719 }
79720 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
79721 index f34ffd0..e60c44f 100644
79722 --- a/mm/vmalloc.c
79723 +++ b/mm/vmalloc.c
79724 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
79725
79726 pte = pte_offset_kernel(pmd, addr);
79727 do {
79728 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
79729 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
79730 +
79731 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79732 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
79733 + BUG_ON(!pte_exec(*pte));
79734 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
79735 + continue;
79736 + }
79737 +#endif
79738 +
79739 + {
79740 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
79741 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
79742 + }
79743 } while (pte++, addr += PAGE_SIZE, addr != end);
79744 }
79745
79746 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
79747 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
79748 {
79749 pte_t *pte;
79750 + int ret = -ENOMEM;
79751
79752 /*
79753 * nr is a running index into the array which helps higher level
79754 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
79755 pte = pte_alloc_kernel(pmd, addr);
79756 if (!pte)
79757 return -ENOMEM;
79758 +
79759 + pax_open_kernel();
79760 do {
79761 struct page *page = pages[*nr];
79762
79763 - if (WARN_ON(!pte_none(*pte)))
79764 - return -EBUSY;
79765 - if (WARN_ON(!page))
79766 - return -ENOMEM;
79767 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79768 + if (!(pgprot_val(prot) & _PAGE_NX))
79769 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
79770 + else
79771 +#endif
79772 +
79773 + if (WARN_ON(!pte_none(*pte))) {
79774 + ret = -EBUSY;
79775 + goto out;
79776 + }
79777 + if (WARN_ON(!page)) {
79778 + ret = -ENOMEM;
79779 + goto out;
79780 + }
79781 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
79782 (*nr)++;
79783 } while (pte++, addr += PAGE_SIZE, addr != end);
79784 - return 0;
79785 + ret = 0;
79786 +out:
79787 + pax_close_kernel();
79788 + return ret;
79789 }
79790
79791 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
79792 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
79793 * and fall back on vmalloc() if that fails. Others
79794 * just put it in the vmalloc space.
79795 */
79796 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
79797 +#ifdef CONFIG_MODULES
79798 +#ifdef MODULES_VADDR
79799 unsigned long addr = (unsigned long)x;
79800 if (addr >= MODULES_VADDR && addr < MODULES_END)
79801 return 1;
79802 #endif
79803 +
79804 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79805 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
79806 + return 1;
79807 +#endif
79808 +
79809 +#endif
79810 +
79811 return is_vmalloc_addr(x);
79812 }
79813
79814 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
79815
79816 if (!pgd_none(*pgd)) {
79817 pud_t *pud = pud_offset(pgd, addr);
79818 +#ifdef CONFIG_X86
79819 + if (!pud_large(*pud))
79820 +#endif
79821 if (!pud_none(*pud)) {
79822 pmd_t *pmd = pmd_offset(pud, addr);
79823 +#ifdef CONFIG_X86
79824 + if (!pmd_large(*pmd))
79825 +#endif
79826 if (!pmd_none(*pmd)) {
79827 pte_t *ptep, pte;
79828
79829 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
79830 struct rb_node *tmp;
79831
79832 while (*p) {
79833 - struct vmap_area *tmp;
79834 + struct vmap_area *varea;
79835
79836 parent = *p;
79837 - tmp = rb_entry(parent, struct vmap_area, rb_node);
79838 - if (va->va_start < tmp->va_end)
79839 + varea = rb_entry(parent, struct vmap_area, rb_node);
79840 + if (va->va_start < varea->va_end)
79841 p = &(*p)->rb_left;
79842 - else if (va->va_end > tmp->va_start)
79843 + else if (va->va_end > varea->va_start)
79844 p = &(*p)->rb_right;
79845 else
79846 BUG();
79847 @@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
79848 struct vm_struct *area;
79849
79850 BUG_ON(in_interrupt());
79851 +
79852 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79853 + if (flags & VM_KERNEXEC) {
79854 + if (start != VMALLOC_START || end != VMALLOC_END)
79855 + return NULL;
79856 + start = (unsigned long)MODULES_EXEC_VADDR;
79857 + end = (unsigned long)MODULES_EXEC_END;
79858 + }
79859 +#endif
79860 +
79861 if (flags & VM_IOREMAP) {
79862 int bit = fls(size);
79863
79864 @@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
79865 if (count > totalram_pages)
79866 return NULL;
79867
79868 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79869 + if (!(pgprot_val(prot) & _PAGE_NX))
79870 + flags |= VM_KERNEXEC;
79871 +#endif
79872 +
79873 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
79874 __builtin_return_address(0));
79875 if (!area)
79876 @@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
79877 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
79878 return NULL;
79879
79880 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79881 + if (!(pgprot_val(prot) & _PAGE_NX))
79882 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
79883 + VMALLOC_START, VMALLOC_END, node,
79884 + gfp_mask, caller);
79885 + else
79886 +#endif
79887 +
79888 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
79889 VMALLOC_START, VMALLOC_END, node,
79890 gfp_mask, caller);
79891 @@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
79892 return addr;
79893 }
79894
79895 +#undef __vmalloc
79896 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
79897 {
79898 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
79899 @@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
79900 * For tight control over page level allocator and protection flags
79901 * use __vmalloc() instead.
79902 */
79903 +#undef vmalloc
79904 void *vmalloc(unsigned long size)
79905 {
79906 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
79907 @@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
79908 * The resulting memory area is zeroed so it can be mapped to userspace
79909 * without leaking data.
79910 */
79911 +#undef vmalloc_user
79912 void *vmalloc_user(unsigned long size)
79913 {
79914 struct vm_struct *area;
79915 @@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
79916 * For tight control over page level allocator and protection flags
79917 * use __vmalloc() instead.
79918 */
79919 +#undef vmalloc_node
79920 void *vmalloc_node(unsigned long size, int node)
79921 {
79922 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
79923 @@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
79924 * For tight control over page level allocator and protection flags
79925 * use __vmalloc() instead.
79926 */
79927 -
79928 +#undef vmalloc_exec
79929 void *vmalloc_exec(unsigned long size)
79930 {
79931 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
79932 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
79933 -1, __builtin_return_address(0));
79934 }
79935
79936 @@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
79937 * Allocate enough 32bit PA addressable pages to cover @size from the
79938 * page level allocator and map them into contiguous kernel virtual space.
79939 */
79940 +#undef vmalloc_32
79941 void *vmalloc_32(unsigned long size)
79942 {
79943 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
79944 @@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
79945 * The resulting memory area is 32bit addressable and zeroed so it can be
79946 * mapped to userspace without leaking data.
79947 */
79948 +#undef vmalloc_32_user
79949 void *vmalloc_32_user(unsigned long size)
79950 {
79951 struct vm_struct *area;
79952 @@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
79953 unsigned long uaddr = vma->vm_start;
79954 unsigned long usize = vma->vm_end - vma->vm_start;
79955
79956 + BUG_ON(vma->vm_mirror);
79957 +
79958 if ((PAGE_SIZE-1) & (unsigned long)addr)
79959 return -EINVAL;
79960
79961 diff --git a/mm/vmstat.c b/mm/vmstat.c
79962 index 42d76c6..5643dc4 100644
79963 --- a/mm/vmstat.c
79964 +++ b/mm/vmstat.c
79965 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
79966 *
79967 * vm_stat contains the global counters
79968 */
79969 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79970 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79971 EXPORT_SYMBOL(vm_stat);
79972
79973 #ifdef CONFIG_SMP
79974 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
79975 v = p->vm_stat_diff[i];
79976 p->vm_stat_diff[i] = 0;
79977 local_irq_restore(flags);
79978 - atomic_long_add(v, &zone->vm_stat[i]);
79979 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
79980 global_diff[i] += v;
79981 #ifdef CONFIG_NUMA
79982 /* 3 seconds idle till flush */
79983 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
79984
79985 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
79986 if (global_diff[i])
79987 - atomic_long_add(global_diff[i], &vm_stat[i]);
79988 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
79989 }
79990
79991 #endif
79992 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
79993 start_cpu_timer(cpu);
79994 #endif
79995 #ifdef CONFIG_PROC_FS
79996 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
79997 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
79998 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
79999 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
80000 + {
80001 + mode_t gr_mode = S_IRUGO;
80002 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
80003 + gr_mode = S_IRUSR;
80004 +#endif
80005 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
80006 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
80007 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
80008 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
80009 +#else
80010 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
80011 +#endif
80012 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
80013 + }
80014 #endif
80015 return 0;
80016 }
80017 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
80018 index a29c5ab..6143f20 100644
80019 --- a/net/8021q/vlan.c
80020 +++ b/net/8021q/vlan.c
80021 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
80022 err = -EPERM;
80023 if (!capable(CAP_NET_ADMIN))
80024 break;
80025 - if ((args.u.name_type >= 0) &&
80026 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
80027 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
80028 struct vlan_net *vn;
80029
80030 vn = net_generic(net, vlan_net_id);
80031 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
80032 index a2d2984..f9eb711 100644
80033 --- a/net/9p/trans_fd.c
80034 +++ b/net/9p/trans_fd.c
80035 @@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
80036 oldfs = get_fs();
80037 set_fs(get_ds());
80038 /* The cast to a user pointer is valid due to the set_fs() */
80039 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
80040 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
80041 set_fs(oldfs);
80042
80043 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
80044 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
80045 index 02cc7e7..4514f1b 100644
80046 --- a/net/atm/atm_misc.c
80047 +++ b/net/atm/atm_misc.c
80048 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
80049 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
80050 return 1;
80051 atm_return(vcc,truesize);
80052 - atomic_inc(&vcc->stats->rx_drop);
80053 + atomic_inc_unchecked(&vcc->stats->rx_drop);
80054 return 0;
80055 }
80056
80057 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
80058 }
80059 }
80060 atm_return(vcc,guess);
80061 - atomic_inc(&vcc->stats->rx_drop);
80062 + atomic_inc_unchecked(&vcc->stats->rx_drop);
80063 return NULL;
80064 }
80065
80066 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
80067
80068 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80069 {
80070 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80071 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
80072 __SONET_ITEMS
80073 #undef __HANDLE_ITEM
80074 }
80075 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80076
80077 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80078 {
80079 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
80080 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
80081 __SONET_ITEMS
80082 #undef __HANDLE_ITEM
80083 }
80084 diff --git a/net/atm/lec.h b/net/atm/lec.h
80085 index 9d14d19..5c145f3 100644
80086 --- a/net/atm/lec.h
80087 +++ b/net/atm/lec.h
80088 @@ -48,7 +48,7 @@ struct lane2_ops {
80089 const u8 *tlvs, u32 sizeoftlvs);
80090 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
80091 const u8 *tlvs, u32 sizeoftlvs);
80092 -};
80093 +} __no_const;
80094
80095 /*
80096 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
80097 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
80098 index 0919a88..a23d54e 100644
80099 --- a/net/atm/mpc.h
80100 +++ b/net/atm/mpc.h
80101 @@ -33,7 +33,7 @@ struct mpoa_client {
80102 struct mpc_parameters parameters; /* parameters for this client */
80103
80104 const struct net_device_ops *old_ops;
80105 - struct net_device_ops new_ops;
80106 + net_device_ops_no_const new_ops;
80107 };
80108
80109
80110 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
80111 index 4504a4b..1733f1e 100644
80112 --- a/net/atm/mpoa_caches.c
80113 +++ b/net/atm/mpoa_caches.c
80114 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
80115 struct timeval now;
80116 struct k_message msg;
80117
80118 + pax_track_stack();
80119 +
80120 do_gettimeofday(&now);
80121
80122 write_lock_irq(&client->egress_lock);
80123 diff --git a/net/atm/proc.c b/net/atm/proc.c
80124 index ab8419a..aa91497 100644
80125 --- a/net/atm/proc.c
80126 +++ b/net/atm/proc.c
80127 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
80128 const struct k_atm_aal_stats *stats)
80129 {
80130 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
80131 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
80132 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
80133 - atomic_read(&stats->rx_drop));
80134 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
80135 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
80136 + atomic_read_unchecked(&stats->rx_drop));
80137 }
80138
80139 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
80140 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
80141 {
80142 struct sock *sk = sk_atm(vcc);
80143
80144 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80145 + seq_printf(seq, "%p ", NULL);
80146 +#else
80147 seq_printf(seq, "%p ", vcc);
80148 +#endif
80149 +
80150 if (!vcc->dev)
80151 seq_printf(seq, "Unassigned ");
80152 else
80153 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
80154 {
80155 if (!vcc->dev)
80156 seq_printf(seq, sizeof(void *) == 4 ?
80157 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80158 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
80159 +#else
80160 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
80161 +#endif
80162 else
80163 seq_printf(seq, "%3d %3d %5d ",
80164 vcc->dev->number, vcc->vpi, vcc->vci);
80165 diff --git a/net/atm/resources.c b/net/atm/resources.c
80166 index 56b7322..c48b84e 100644
80167 --- a/net/atm/resources.c
80168 +++ b/net/atm/resources.c
80169 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
80170 static void copy_aal_stats(struct k_atm_aal_stats *from,
80171 struct atm_aal_stats *to)
80172 {
80173 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80174 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
80175 __AAL_STAT_ITEMS
80176 #undef __HANDLE_ITEM
80177 }
80178 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
80179 static void subtract_aal_stats(struct k_atm_aal_stats *from,
80180 struct atm_aal_stats *to)
80181 {
80182 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
80183 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
80184 __AAL_STAT_ITEMS
80185 #undef __HANDLE_ITEM
80186 }
80187 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
80188 index 8567d47..bba2292 100644
80189 --- a/net/bridge/br_private.h
80190 +++ b/net/bridge/br_private.h
80191 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
80192
80193 #ifdef CONFIG_SYSFS
80194 /* br_sysfs_if.c */
80195 -extern struct sysfs_ops brport_sysfs_ops;
80196 +extern const struct sysfs_ops brport_sysfs_ops;
80197 extern int br_sysfs_addif(struct net_bridge_port *p);
80198
80199 /* br_sysfs_br.c */
80200 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
80201 index 9a52ac5..c97538e 100644
80202 --- a/net/bridge/br_stp_if.c
80203 +++ b/net/bridge/br_stp_if.c
80204 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
80205 char *envp[] = { NULL };
80206
80207 if (br->stp_enabled == BR_USER_STP) {
80208 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
80209 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
80210 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
80211 br->dev->name, r);
80212
80213 diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
80214 index 820643a..ce77fb3 100644
80215 --- a/net/bridge/br_sysfs_if.c
80216 +++ b/net/bridge/br_sysfs_if.c
80217 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
80218 return ret;
80219 }
80220
80221 -struct sysfs_ops brport_sysfs_ops = {
80222 +const struct sysfs_ops brport_sysfs_ops = {
80223 .show = brport_show,
80224 .store = brport_store,
80225 };
80226 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
80227 index d73d47f..72df42a 100644
80228 --- a/net/bridge/netfilter/ebtables.c
80229 +++ b/net/bridge/netfilter/ebtables.c
80230 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
80231 unsigned int entries_size, nentries;
80232 char *entries;
80233
80234 + pax_track_stack();
80235 +
80236 if (cmd == EBT_SO_GET_ENTRIES) {
80237 entries_size = t->private->entries_size;
80238 nentries = t->private->nentries;
80239 diff --git a/net/can/bcm.c b/net/can/bcm.c
80240 index 2ffd2e0..72a7486 100644
80241 --- a/net/can/bcm.c
80242 +++ b/net/can/bcm.c
80243 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
80244 struct bcm_sock *bo = bcm_sk(sk);
80245 struct bcm_op *op;
80246
80247 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80248 + seq_printf(m, ">>> socket %p", NULL);
80249 + seq_printf(m, " / sk %p", NULL);
80250 + seq_printf(m, " / bo %p", NULL);
80251 +#else
80252 seq_printf(m, ">>> socket %p", sk->sk_socket);
80253 seq_printf(m, " / sk %p", sk);
80254 seq_printf(m, " / bo %p", bo);
80255 +#endif
80256 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
80257 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
80258 seq_printf(m, " <<<\n");
80259 diff --git a/net/compat.c b/net/compat.c
80260 index 9559afc..ccd74e1 100644
80261 --- a/net/compat.c
80262 +++ b/net/compat.c
80263 @@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
80264 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
80265 __get_user(kmsg->msg_flags, &umsg->msg_flags))
80266 return -EFAULT;
80267 - kmsg->msg_name = compat_ptr(tmp1);
80268 - kmsg->msg_iov = compat_ptr(tmp2);
80269 - kmsg->msg_control = compat_ptr(tmp3);
80270 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
80271 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
80272 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
80273 return 0;
80274 }
80275
80276 @@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80277 kern_msg->msg_name = NULL;
80278
80279 tot_len = iov_from_user_compat_to_kern(kern_iov,
80280 - (struct compat_iovec __user *)kern_msg->msg_iov,
80281 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
80282 kern_msg->msg_iovlen);
80283 if (tot_len >= 0)
80284 kern_msg->msg_iov = kern_iov;
80285 @@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80286
80287 #define CMSG_COMPAT_FIRSTHDR(msg) \
80288 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
80289 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
80290 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
80291 (struct compat_cmsghdr __user *)NULL)
80292
80293 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
80294 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
80295 (ucmlen) <= (unsigned long) \
80296 ((mhdr)->msg_controllen - \
80297 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
80298 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
80299
80300 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
80301 struct compat_cmsghdr __user *cmsg, int cmsg_len)
80302 {
80303 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
80304 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
80305 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
80306 msg->msg_controllen)
80307 return NULL;
80308 return (struct compat_cmsghdr __user *)ptr;
80309 @@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80310 {
80311 struct compat_timeval ctv;
80312 struct compat_timespec cts[3];
80313 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80314 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80315 struct compat_cmsghdr cmhdr;
80316 int cmlen;
80317
80318 @@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80319
80320 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
80321 {
80322 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80323 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80324 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
80325 int fdnum = scm->fp->count;
80326 struct file **fp = scm->fp->fp;
80327 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
80328 len = sizeof(ktime);
80329 old_fs = get_fs();
80330 set_fs(KERNEL_DS);
80331 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
80332 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
80333 set_fs(old_fs);
80334
80335 if (!err) {
80336 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80337 case MCAST_JOIN_GROUP:
80338 case MCAST_LEAVE_GROUP:
80339 {
80340 - struct compat_group_req __user *gr32 = (void *)optval;
80341 + struct compat_group_req __user *gr32 = (void __user *)optval;
80342 struct group_req __user *kgr =
80343 compat_alloc_user_space(sizeof(struct group_req));
80344 u32 interface;
80345 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80346 case MCAST_BLOCK_SOURCE:
80347 case MCAST_UNBLOCK_SOURCE:
80348 {
80349 - struct compat_group_source_req __user *gsr32 = (void *)optval;
80350 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
80351 struct group_source_req __user *kgsr = compat_alloc_user_space(
80352 sizeof(struct group_source_req));
80353 u32 interface;
80354 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80355 }
80356 case MCAST_MSFILTER:
80357 {
80358 - struct compat_group_filter __user *gf32 = (void *)optval;
80359 + struct compat_group_filter __user *gf32 = (void __user *)optval;
80360 struct group_filter __user *kgf;
80361 u32 interface, fmode, numsrc;
80362
80363 diff --git a/net/core/dev.c b/net/core/dev.c
80364 index 84a0705..575db4c 100644
80365 --- a/net/core/dev.c
80366 +++ b/net/core/dev.c
80367 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
80368 if (no_module && capable(CAP_NET_ADMIN))
80369 no_module = request_module("netdev-%s", name);
80370 if (no_module && capable(CAP_SYS_MODULE)) {
80371 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
80372 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
80373 +#else
80374 if (!request_module("%s", name))
80375 pr_err("Loading kernel module for a network device "
80376 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
80377 "instead\n", name);
80378 +#endif
80379 }
80380 }
80381 EXPORT_SYMBOL(dev_load);
80382 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
80383
80384 struct dev_gso_cb {
80385 void (*destructor)(struct sk_buff *skb);
80386 -};
80387 +} __no_const;
80388
80389 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
80390
80391 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
80392 }
80393 EXPORT_SYMBOL(netif_rx_ni);
80394
80395 -static void net_tx_action(struct softirq_action *h)
80396 +static void net_tx_action(void)
80397 {
80398 struct softnet_data *sd = &__get_cpu_var(softnet_data);
80399
80400 @@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
80401 EXPORT_SYMBOL(netif_napi_del);
80402
80403
80404 -static void net_rx_action(struct softirq_action *h)
80405 +static void net_rx_action(void)
80406 {
80407 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
80408 unsigned long time_limit = jiffies + 2;
80409 diff --git a/net/core/flow.c b/net/core/flow.c
80410 index 9601587..8c4824e 100644
80411 --- a/net/core/flow.c
80412 +++ b/net/core/flow.c
80413 @@ -35,11 +35,11 @@ struct flow_cache_entry {
80414 atomic_t *object_ref;
80415 };
80416
80417 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
80418 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
80419
80420 static u32 flow_hash_shift;
80421 #define flow_hash_size (1 << flow_hash_shift)
80422 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
80423 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
80424
80425 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
80426
80427 @@ -52,7 +52,7 @@ struct flow_percpu_info {
80428 u32 hash_rnd;
80429 int count;
80430 };
80431 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
80432 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
80433
80434 #define flow_hash_rnd_recalc(cpu) \
80435 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
80436 @@ -69,7 +69,7 @@ struct flow_flush_info {
80437 atomic_t cpuleft;
80438 struct completion completion;
80439 };
80440 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
80441 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
80442
80443 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
80444
80445 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
80446 if (fle->family == family &&
80447 fle->dir == dir &&
80448 flow_key_compare(key, &fle->key) == 0) {
80449 - if (fle->genid == atomic_read(&flow_cache_genid)) {
80450 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
80451 void *ret = fle->object;
80452
80453 if (ret)
80454 @@ -228,7 +228,7 @@ nocache:
80455 err = resolver(net, key, family, dir, &obj, &obj_ref);
80456
80457 if (fle && !err) {
80458 - fle->genid = atomic_read(&flow_cache_genid);
80459 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
80460
80461 if (fle->object)
80462 atomic_dec(fle->object_ref);
80463 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
80464
80465 fle = flow_table(cpu)[i];
80466 for (; fle; fle = fle->next) {
80467 - unsigned genid = atomic_read(&flow_cache_genid);
80468 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
80469
80470 if (!fle->object || fle->genid == genid)
80471 continue;
80472 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
80473 index d4fd895..ac9b1e6 100644
80474 --- a/net/core/rtnetlink.c
80475 +++ b/net/core/rtnetlink.c
80476 @@ -57,7 +57,7 @@ struct rtnl_link
80477 {
80478 rtnl_doit_func doit;
80479 rtnl_dumpit_func dumpit;
80480 -};
80481 +} __no_const;
80482
80483 static DEFINE_MUTEX(rtnl_mutex);
80484
80485 diff --git a/net/core/scm.c b/net/core/scm.c
80486 index d98eafc..1a190a9 100644
80487 --- a/net/core/scm.c
80488 +++ b/net/core/scm.c
80489 @@ -191,7 +191,7 @@ error:
80490 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80491 {
80492 struct cmsghdr __user *cm
80493 - = (__force struct cmsghdr __user *)msg->msg_control;
80494 + = (struct cmsghdr __force_user *)msg->msg_control;
80495 struct cmsghdr cmhdr;
80496 int cmlen = CMSG_LEN(len);
80497 int err;
80498 @@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80499 err = -EFAULT;
80500 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
80501 goto out;
80502 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
80503 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
80504 goto out;
80505 cmlen = CMSG_SPACE(len);
80506 if (msg->msg_controllen < cmlen)
80507 @@ -229,7 +229,7 @@ out:
80508 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
80509 {
80510 struct cmsghdr __user *cm
80511 - = (__force struct cmsghdr __user*)msg->msg_control;
80512 + = (struct cmsghdr __force_user *)msg->msg_control;
80513
80514 int fdmax = 0;
80515 int fdnum = scm->fp->count;
80516 @@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
80517 if (fdnum < fdmax)
80518 fdmax = fdnum;
80519
80520 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
80521 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
80522 i++, cmfptr++)
80523 {
80524 int new_fd;
80525 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
80526 index 45329d7..626aaa6 100644
80527 --- a/net/core/secure_seq.c
80528 +++ b/net/core/secure_seq.c
80529 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
80530 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
80531
80532 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
80533 - __be16 dport)
80534 + __be16 dport)
80535 {
80536 u32 secret[MD5_MESSAGE_BYTES / 4];
80537 u32 hash[MD5_DIGEST_WORDS];
80538 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
80539 secret[i] = net_secret[i];
80540
80541 md5_transform(hash, secret);
80542 -
80543 return hash[0];
80544 }
80545 #endif
80546 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
80547 index a807f8c..65f906f 100644
80548 --- a/net/core/skbuff.c
80549 +++ b/net/core/skbuff.c
80550 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
80551 struct sk_buff *frag_iter;
80552 struct sock *sk = skb->sk;
80553
80554 + pax_track_stack();
80555 +
80556 /*
80557 * __skb_splice_bits() only fails if the output has no room left,
80558 * so no point in going over the frag_list for the error case.
80559 diff --git a/net/core/sock.c b/net/core/sock.c
80560 index 6605e75..3acebda 100644
80561 --- a/net/core/sock.c
80562 +++ b/net/core/sock.c
80563 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
80564 break;
80565
80566 case SO_PEERCRED:
80567 + {
80568 + struct ucred peercred;
80569 if (len > sizeof(sk->sk_peercred))
80570 len = sizeof(sk->sk_peercred);
80571 - if (copy_to_user(optval, &sk->sk_peercred, len))
80572 + peercred = sk->sk_peercred;
80573 + if (copy_to_user(optval, &peercred, len))
80574 return -EFAULT;
80575 goto lenout;
80576 + }
80577
80578 case SO_PEERNAME:
80579 {
80580 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
80581 */
80582 smp_wmb();
80583 atomic_set(&sk->sk_refcnt, 1);
80584 - atomic_set(&sk->sk_drops, 0);
80585 + atomic_set_unchecked(&sk->sk_drops, 0);
80586 }
80587 EXPORT_SYMBOL(sock_init_data);
80588
80589 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
80590 index 2036568..c55883d 100644
80591 --- a/net/decnet/sysctl_net_decnet.c
80592 +++ b/net/decnet/sysctl_net_decnet.c
80593 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
80594
80595 if (len > *lenp) len = *lenp;
80596
80597 - if (copy_to_user(buffer, addr, len))
80598 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
80599 return -EFAULT;
80600
80601 *lenp = len;
80602 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
80603
80604 if (len > *lenp) len = *lenp;
80605
80606 - if (copy_to_user(buffer, devname, len))
80607 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
80608 return -EFAULT;
80609
80610 *lenp = len;
80611 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
80612 index 39a2d29..f39c0fe 100644
80613 --- a/net/econet/Kconfig
80614 +++ b/net/econet/Kconfig
80615 @@ -4,7 +4,7 @@
80616
80617 config ECONET
80618 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
80619 - depends on EXPERIMENTAL && INET
80620 + depends on EXPERIMENTAL && INET && BROKEN
80621 ---help---
80622 Econet is a fairly old and slow networking protocol mainly used by
80623 Acorn computers to access file and print servers. It uses native
80624 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
80625 index a413b1b..380849c 100644
80626 --- a/net/ieee802154/dgram.c
80627 +++ b/net/ieee802154/dgram.c
80628 @@ -318,7 +318,7 @@ out:
80629 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
80630 {
80631 if (sock_queue_rcv_skb(sk, skb) < 0) {
80632 - atomic_inc(&sk->sk_drops);
80633 + atomic_inc_unchecked(&sk->sk_drops);
80634 kfree_skb(skb);
80635 return NET_RX_DROP;
80636 }
80637 diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
80638 index 30e74ee..bfc6ee0 100644
80639 --- a/net/ieee802154/raw.c
80640 +++ b/net/ieee802154/raw.c
80641 @@ -206,7 +206,7 @@ out:
80642 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
80643 {
80644 if (sock_queue_rcv_skb(sk, skb) < 0) {
80645 - atomic_inc(&sk->sk_drops);
80646 + atomic_inc_unchecked(&sk->sk_drops);
80647 kfree_skb(skb);
80648 return NET_RX_DROP;
80649 }
80650 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
80651 index dba56d2..acee5d6 100644
80652 --- a/net/ipv4/inet_diag.c
80653 +++ b/net/ipv4/inet_diag.c
80654 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
80655 r->idiag_retrans = 0;
80656
80657 r->id.idiag_if = sk->sk_bound_dev_if;
80658 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80659 + r->id.idiag_cookie[0] = 0;
80660 + r->id.idiag_cookie[1] = 0;
80661 +#else
80662 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
80663 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
80664 +#endif
80665
80666 r->id.idiag_sport = inet->sport;
80667 r->id.idiag_dport = inet->dport;
80668 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
80669 r->idiag_family = tw->tw_family;
80670 r->idiag_retrans = 0;
80671 r->id.idiag_if = tw->tw_bound_dev_if;
80672 +
80673 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80674 + r->id.idiag_cookie[0] = 0;
80675 + r->id.idiag_cookie[1] = 0;
80676 +#else
80677 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
80678 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
80679 +#endif
80680 +
80681 r->id.idiag_sport = tw->tw_sport;
80682 r->id.idiag_dport = tw->tw_dport;
80683 r->id.idiag_src[0] = tw->tw_rcv_saddr;
80684 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
80685 if (sk == NULL)
80686 goto unlock;
80687
80688 +#ifndef CONFIG_GRKERNSEC_HIDESYM
80689 err = -ESTALE;
80690 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
80691 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
80692 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
80693 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
80694 goto out;
80695 +#endif
80696
80697 err = -ENOMEM;
80698 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
80699 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
80700 r->idiag_retrans = req->retrans;
80701
80702 r->id.idiag_if = sk->sk_bound_dev_if;
80703 +
80704 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80705 + r->id.idiag_cookie[0] = 0;
80706 + r->id.idiag_cookie[1] = 0;
80707 +#else
80708 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
80709 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
80710 +#endif
80711
80712 tmo = req->expires - jiffies;
80713 if (tmo < 0)
80714 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
80715 index d717267..56de7e7 100644
80716 --- a/net/ipv4/inet_hashtables.c
80717 +++ b/net/ipv4/inet_hashtables.c
80718 @@ -18,12 +18,15 @@
80719 #include <linux/sched.h>
80720 #include <linux/slab.h>
80721 #include <linux/wait.h>
80722 +#include <linux/security.h>
80723
80724 #include <net/inet_connection_sock.h>
80725 #include <net/inet_hashtables.h>
80726 #include <net/secure_seq.h>
80727 #include <net/ip.h>
80728
80729 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
80730 +
80731 /*
80732 * Allocate and initialize a new local port bind bucket.
80733 * The bindhash mutex for snum's hash chain must be held here.
80734 @@ -491,6 +494,8 @@ ok:
80735 }
80736 spin_unlock(&head->lock);
80737
80738 + gr_update_task_in_ip_table(current, inet_sk(sk));
80739 +
80740 if (tw) {
80741 inet_twsk_deschedule(tw, death_row);
80742 inet_twsk_put(tw);
80743 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
80744 index 13b229f..6956484 100644
80745 --- a/net/ipv4/inetpeer.c
80746 +++ b/net/ipv4/inetpeer.c
80747 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
80748 struct inet_peer *p, *n;
80749 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
80750
80751 + pax_track_stack();
80752 +
80753 /* Look up for the address quickly. */
80754 read_lock_bh(&peer_pool_lock);
80755 p = lookup(daddr, NULL);
80756 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
80757 return NULL;
80758 n->v4daddr = daddr;
80759 atomic_set(&n->refcnt, 1);
80760 - atomic_set(&n->rid, 0);
80761 + atomic_set_unchecked(&n->rid, 0);
80762 n->ip_id_count = secure_ip_id(daddr);
80763 n->tcp_ts_stamp = 0;
80764
80765 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
80766 index d3fe10b..feeafc9 100644
80767 --- a/net/ipv4/ip_fragment.c
80768 +++ b/net/ipv4/ip_fragment.c
80769 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
80770 return 0;
80771
80772 start = qp->rid;
80773 - end = atomic_inc_return(&peer->rid);
80774 + end = atomic_inc_return_unchecked(&peer->rid);
80775 qp->rid = end;
80776
80777 rc = qp->q.fragments && (end - start) > max;
80778 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
80779 index e982b5c..f079d75 100644
80780 --- a/net/ipv4/ip_sockglue.c
80781 +++ b/net/ipv4/ip_sockglue.c
80782 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
80783 int val;
80784 int len;
80785
80786 + pax_track_stack();
80787 +
80788 if (level != SOL_IP)
80789 return -EOPNOTSUPP;
80790
80791 @@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
80792 if (sk->sk_type != SOCK_STREAM)
80793 return -ENOPROTOOPT;
80794
80795 - msg.msg_control = optval;
80796 + msg.msg_control = (void __force_kernel *)optval;
80797 msg.msg_controllen = len;
80798 msg.msg_flags = 0;
80799
80800 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
80801 index f8d04c2..c1188f2 100644
80802 --- a/net/ipv4/ipconfig.c
80803 +++ b/net/ipv4/ipconfig.c
80804 @@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
80805
80806 mm_segment_t oldfs = get_fs();
80807 set_fs(get_ds());
80808 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
80809 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
80810 set_fs(oldfs);
80811 return res;
80812 }
80813 @@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
80814
80815 mm_segment_t oldfs = get_fs();
80816 set_fs(get_ds());
80817 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
80818 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
80819 set_fs(oldfs);
80820 return res;
80821 }
80822 @@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
80823
80824 mm_segment_t oldfs = get_fs();
80825 set_fs(get_ds());
80826 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
80827 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
80828 set_fs(oldfs);
80829 return res;
80830 }
80831 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
80832 index c8b0cc3..4da5ae2 100644
80833 --- a/net/ipv4/netfilter/arp_tables.c
80834 +++ b/net/ipv4/netfilter/arp_tables.c
80835 @@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
80836 private = &tmp;
80837 }
80838 #endif
80839 + memset(&info, 0, sizeof(info));
80840 info.valid_hooks = t->valid_hooks;
80841 memcpy(info.hook_entry, private->hook_entry,
80842 sizeof(info.hook_entry));
80843 diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
80844 index c156db2..e772975 100644
80845 --- a/net/ipv4/netfilter/ip_queue.c
80846 +++ b/net/ipv4/netfilter/ip_queue.c
80847 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
80848
80849 if (v->data_len < sizeof(*user_iph))
80850 return 0;
80851 + if (v->data_len > 65535)
80852 + return -EMSGSIZE;
80853 +
80854 diff = v->data_len - e->skb->len;
80855 if (diff < 0) {
80856 if (pskb_trim(e->skb, v->data_len))
80857 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
80858 static inline void
80859 __ipq_rcv_skb(struct sk_buff *skb)
80860 {
80861 - int status, type, pid, flags, nlmsglen, skblen;
80862 + int status, type, pid, flags;
80863 + unsigned int nlmsglen, skblen;
80864 struct nlmsghdr *nlh;
80865
80866 skblen = skb->len;
80867 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
80868 index 0606db1..02e7e4c 100644
80869 --- a/net/ipv4/netfilter/ip_tables.c
80870 +++ b/net/ipv4/netfilter/ip_tables.c
80871 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
80872 private = &tmp;
80873 }
80874 #endif
80875 + memset(&info, 0, sizeof(info));
80876 info.valid_hooks = t->valid_hooks;
80877 memcpy(info.hook_entry, private->hook_entry,
80878 sizeof(info.hook_entry));
80879 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
80880 index d9521f6..3c3eb25 100644
80881 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
80882 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
80883 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
80884
80885 *len = 0;
80886
80887 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
80888 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
80889 if (*octets == NULL) {
80890 if (net_ratelimit())
80891 printk("OOM in bsalg (%d)\n", __LINE__);
80892 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
80893 index ab996f9..3da5f96 100644
80894 --- a/net/ipv4/raw.c
80895 +++ b/net/ipv4/raw.c
80896 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
80897 /* Charge it to the socket. */
80898
80899 if (sock_queue_rcv_skb(sk, skb) < 0) {
80900 - atomic_inc(&sk->sk_drops);
80901 + atomic_inc_unchecked(&sk->sk_drops);
80902 kfree_skb(skb);
80903 return NET_RX_DROP;
80904 }
80905 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
80906 int raw_rcv(struct sock *sk, struct sk_buff *skb)
80907 {
80908 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
80909 - atomic_inc(&sk->sk_drops);
80910 + atomic_inc_unchecked(&sk->sk_drops);
80911 kfree_skb(skb);
80912 return NET_RX_DROP;
80913 }
80914 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
80915
80916 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
80917 {
80918 + struct icmp_filter filter;
80919 +
80920 + if (optlen < 0)
80921 + return -EINVAL;
80922 if (optlen > sizeof(struct icmp_filter))
80923 optlen = sizeof(struct icmp_filter);
80924 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
80925 + if (copy_from_user(&filter, optval, optlen))
80926 return -EFAULT;
80927 + raw_sk(sk)->filter = filter;
80928 +
80929 return 0;
80930 }
80931
80932 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
80933 {
80934 int len, ret = -EFAULT;
80935 + struct icmp_filter filter;
80936
80937 if (get_user(len, optlen))
80938 goto out;
80939 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
80940 if (len > sizeof(struct icmp_filter))
80941 len = sizeof(struct icmp_filter);
80942 ret = -EFAULT;
80943 - if (put_user(len, optlen) ||
80944 - copy_to_user(optval, &raw_sk(sk)->filter, len))
80945 + filter = raw_sk(sk)->filter;
80946 + if (put_user(len, optlen) || len > sizeof filter ||
80947 + copy_to_user(optval, &filter, len))
80948 goto out;
80949 ret = 0;
80950 out: return ret;
80951 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
80952 sk_wmem_alloc_get(sp),
80953 sk_rmem_alloc_get(sp),
80954 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
80955 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
80956 + atomic_read(&sp->sk_refcnt),
80957 +#ifdef CONFIG_GRKERNSEC_HIDESYM
80958 + NULL,
80959 +#else
80960 + sp,
80961 +#endif
80962 + atomic_read_unchecked(&sp->sk_drops));
80963 }
80964
80965 static int raw_seq_show(struct seq_file *seq, void *v)
80966 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
80967 index 58f141b..b759702 100644
80968 --- a/net/ipv4/route.c
80969 +++ b/net/ipv4/route.c
80970 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
80971
80972 static inline int rt_genid(struct net *net)
80973 {
80974 - return atomic_read(&net->ipv4.rt_genid);
80975 + return atomic_read_unchecked(&net->ipv4.rt_genid);
80976 }
80977
80978 #ifdef CONFIG_PROC_FS
80979 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
80980 unsigned char shuffle;
80981
80982 get_random_bytes(&shuffle, sizeof(shuffle));
80983 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
80984 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
80985 }
80986
80987 /*
80988 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
80989
80990 static __net_init int rt_secret_timer_init(struct net *net)
80991 {
80992 - atomic_set(&net->ipv4.rt_genid,
80993 + atomic_set_unchecked(&net->ipv4.rt_genid,
80994 (int) ((num_physpages ^ (num_physpages>>8)) ^
80995 (jiffies ^ (jiffies >> 7))));
80996
80997 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
80998 index f095659..adc892a 100644
80999 --- a/net/ipv4/tcp.c
81000 +++ b/net/ipv4/tcp.c
81001 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
81002 int val;
81003 int err = 0;
81004
81005 + pax_track_stack();
81006 +
81007 /* This is a string value all the others are int's */
81008 if (optname == TCP_CONGESTION) {
81009 char name[TCP_CA_NAME_MAX];
81010 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
81011 struct tcp_sock *tp = tcp_sk(sk);
81012 int val, len;
81013
81014 + pax_track_stack();
81015 +
81016 if (get_user(len, optlen))
81017 return -EFAULT;
81018
81019 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
81020 index 6fc7961..33bad4a 100644
81021 --- a/net/ipv4/tcp_ipv4.c
81022 +++ b/net/ipv4/tcp_ipv4.c
81023 @@ -85,6 +85,9 @@
81024 int sysctl_tcp_tw_reuse __read_mostly;
81025 int sysctl_tcp_low_latency __read_mostly;
81026
81027 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81028 +extern int grsec_enable_blackhole;
81029 +#endif
81030
81031 #ifdef CONFIG_TCP_MD5SIG
81032 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
81033 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
81034 return 0;
81035
81036 reset:
81037 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81038 + if (!grsec_enable_blackhole)
81039 +#endif
81040 tcp_v4_send_reset(rsk, skb);
81041 discard:
81042 kfree_skb(skb);
81043 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
81044 TCP_SKB_CB(skb)->sacked = 0;
81045
81046 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81047 - if (!sk)
81048 + if (!sk) {
81049 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81050 + ret = 1;
81051 +#endif
81052 goto no_tcp_socket;
81053 + }
81054
81055 process:
81056 - if (sk->sk_state == TCP_TIME_WAIT)
81057 + if (sk->sk_state == TCP_TIME_WAIT) {
81058 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81059 + ret = 2;
81060 +#endif
81061 goto do_time_wait;
81062 + }
81063
81064 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
81065 goto discard_and_relse;
81066 @@ -1651,6 +1665,10 @@ no_tcp_socket:
81067 bad_packet:
81068 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81069 } else {
81070 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81071 + if (!grsec_enable_blackhole || (ret == 1 &&
81072 + (skb->dev->flags & IFF_LOOPBACK)))
81073 +#endif
81074 tcp_v4_send_reset(NULL, skb);
81075 }
81076
81077 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
81078 0, /* non standard timer */
81079 0, /* open_requests have no inode */
81080 atomic_read(&sk->sk_refcnt),
81081 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81082 + NULL,
81083 +#else
81084 req,
81085 +#endif
81086 len);
81087 }
81088
81089 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
81090 sock_i_uid(sk),
81091 icsk->icsk_probes_out,
81092 sock_i_ino(sk),
81093 - atomic_read(&sk->sk_refcnt), sk,
81094 + atomic_read(&sk->sk_refcnt),
81095 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81096 + NULL,
81097 +#else
81098 + sk,
81099 +#endif
81100 jiffies_to_clock_t(icsk->icsk_rto),
81101 jiffies_to_clock_t(icsk->icsk_ack.ato),
81102 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
81103 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
81104 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
81105 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
81106 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
81107 - atomic_read(&tw->tw_refcnt), tw, len);
81108 + atomic_read(&tw->tw_refcnt),
81109 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81110 + NULL,
81111 +#else
81112 + tw,
81113 +#endif
81114 + len);
81115 }
81116
81117 #define TMPSZ 150
81118 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
81119 index 4c03598..e09a8e8 100644
81120 --- a/net/ipv4/tcp_minisocks.c
81121 +++ b/net/ipv4/tcp_minisocks.c
81122 @@ -26,6 +26,10 @@
81123 #include <net/inet_common.h>
81124 #include <net/xfrm.h>
81125
81126 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81127 +extern int grsec_enable_blackhole;
81128 +#endif
81129 +
81130 #ifdef CONFIG_SYSCTL
81131 #define SYNC_INIT 0 /* let the user enable it */
81132 #else
81133 @@ -672,6 +676,10 @@ listen_overflow:
81134
81135 embryonic_reset:
81136 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
81137 +
81138 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81139 + if (!grsec_enable_blackhole)
81140 +#endif
81141 if (!(flg & TCP_FLAG_RST))
81142 req->rsk_ops->send_reset(sk, skb);
81143
81144 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
81145 index af83bdf..ec91cb2 100644
81146 --- a/net/ipv4/tcp_output.c
81147 +++ b/net/ipv4/tcp_output.c
81148 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
81149 __u8 *md5_hash_location;
81150 int mss;
81151
81152 + pax_track_stack();
81153 +
81154 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
81155 if (skb == NULL)
81156 return NULL;
81157 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
81158 index 59f5b5e..193860f 100644
81159 --- a/net/ipv4/tcp_probe.c
81160 +++ b/net/ipv4/tcp_probe.c
81161 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
81162 if (cnt + width >= len)
81163 break;
81164
81165 - if (copy_to_user(buf + cnt, tbuf, width))
81166 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
81167 return -EFAULT;
81168 cnt += width;
81169 }
81170 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
81171 index 57d5501..a9ed13a 100644
81172 --- a/net/ipv4/tcp_timer.c
81173 +++ b/net/ipv4/tcp_timer.c
81174 @@ -21,6 +21,10 @@
81175 #include <linux/module.h>
81176 #include <net/tcp.h>
81177
81178 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81179 +extern int grsec_lastack_retries;
81180 +#endif
81181 +
81182 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
81183 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
81184 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
81185 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
81186 }
81187 }
81188
81189 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81190 + if ((sk->sk_state == TCP_LAST_ACK) &&
81191 + (grsec_lastack_retries > 0) &&
81192 + (grsec_lastack_retries < retry_until))
81193 + retry_until = grsec_lastack_retries;
81194 +#endif
81195 +
81196 if (retransmits_timed_out(sk, retry_until)) {
81197 /* Has it gone just too far? */
81198 tcp_write_err(sk);
81199 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
81200 index 0ac8833..58d8c43 100644
81201 --- a/net/ipv4/udp.c
81202 +++ b/net/ipv4/udp.c
81203 @@ -86,6 +86,7 @@
81204 #include <linux/types.h>
81205 #include <linux/fcntl.h>
81206 #include <linux/module.h>
81207 +#include <linux/security.h>
81208 #include <linux/socket.h>
81209 #include <linux/sockios.h>
81210 #include <linux/igmp.h>
81211 @@ -106,6 +107,10 @@
81212 #include <net/xfrm.h>
81213 #include "udp_impl.h"
81214
81215 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81216 +extern int grsec_enable_blackhole;
81217 +#endif
81218 +
81219 struct udp_table udp_table;
81220 EXPORT_SYMBOL(udp_table);
81221
81222 @@ -371,6 +376,9 @@ found:
81223 return s;
81224 }
81225
81226 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
81227 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
81228 +
81229 /*
81230 * This routine is called by the ICMP module when it gets some
81231 * sort of error condition. If err < 0 then the socket should
81232 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
81233 dport = usin->sin_port;
81234 if (dport == 0)
81235 return -EINVAL;
81236 +
81237 + err = gr_search_udp_sendmsg(sk, usin);
81238 + if (err)
81239 + return err;
81240 } else {
81241 if (sk->sk_state != TCP_ESTABLISHED)
81242 return -EDESTADDRREQ;
81243 +
81244 + err = gr_search_udp_sendmsg(sk, NULL);
81245 + if (err)
81246 + return err;
81247 +
81248 daddr = inet->daddr;
81249 dport = inet->dport;
81250 /* Open fast path for connected socket.
81251 @@ -945,6 +962,10 @@ try_again:
81252 if (!skb)
81253 goto out;
81254
81255 + err = gr_search_udp_recvmsg(sk, skb);
81256 + if (err)
81257 + goto out_free;
81258 +
81259 ulen = skb->len - sizeof(struct udphdr);
81260 copied = len;
81261 if (copied > ulen)
81262 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
81263 if (rc == -ENOMEM) {
81264 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
81265 is_udplite);
81266 - atomic_inc(&sk->sk_drops);
81267 + atomic_inc_unchecked(&sk->sk_drops);
81268 }
81269 goto drop;
81270 }
81271 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
81272 goto csum_error;
81273
81274 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
81275 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81276 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
81277 +#endif
81278 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
81279
81280 /*
81281 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
81282 sk_wmem_alloc_get(sp),
81283 sk_rmem_alloc_get(sp),
81284 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81285 - atomic_read(&sp->sk_refcnt), sp,
81286 - atomic_read(&sp->sk_drops), len);
81287 + atomic_read(&sp->sk_refcnt),
81288 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81289 + NULL,
81290 +#else
81291 + sp,
81292 +#endif
81293 + atomic_read_unchecked(&sp->sk_drops), len);
81294 }
81295
81296 int udp4_seq_show(struct seq_file *seq, void *v)
81297 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
81298 index 8ac3d09..fc58c5f 100644
81299 --- a/net/ipv6/addrconf.c
81300 +++ b/net/ipv6/addrconf.c
81301 @@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
81302 p.iph.ihl = 5;
81303 p.iph.protocol = IPPROTO_IPV6;
81304 p.iph.ttl = 64;
81305 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
81306 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
81307
81308 if (ops->ndo_do_ioctl) {
81309 mm_segment_t oldfs = get_fs();
81310 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
81311 index cc4797d..7cfdfcc 100644
81312 --- a/net/ipv6/inet6_connection_sock.c
81313 +++ b/net/ipv6/inet6_connection_sock.c
81314 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
81315 #ifdef CONFIG_XFRM
81316 {
81317 struct rt6_info *rt = (struct rt6_info *)dst;
81318 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
81319 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
81320 }
81321 #endif
81322 }
81323 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
81324 #ifdef CONFIG_XFRM
81325 if (dst) {
81326 struct rt6_info *rt = (struct rt6_info *)dst;
81327 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
81328 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
81329 sk->sk_dst_cache = NULL;
81330 dst_release(dst);
81331 dst = NULL;
81332 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
81333 index 093e9b2..f72cddb 100644
81334 --- a/net/ipv6/inet6_hashtables.c
81335 +++ b/net/ipv6/inet6_hashtables.c
81336 @@ -119,7 +119,7 @@ out:
81337 }
81338 EXPORT_SYMBOL(__inet6_lookup_established);
81339
81340 -static int inline compute_score(struct sock *sk, struct net *net,
81341 +static inline int compute_score(struct sock *sk, struct net *net,
81342 const unsigned short hnum,
81343 const struct in6_addr *daddr,
81344 const int dif)
81345 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
81346 index 4f7aaf6..f7acf45 100644
81347 --- a/net/ipv6/ipv6_sockglue.c
81348 +++ b/net/ipv6/ipv6_sockglue.c
81349 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
81350 int val, valbool;
81351 int retv = -ENOPROTOOPT;
81352
81353 + pax_track_stack();
81354 +
81355 if (optval == NULL)
81356 val=0;
81357 else {
81358 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81359 int len;
81360 int val;
81361
81362 + pax_track_stack();
81363 +
81364 if (ip6_mroute_opt(optname))
81365 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
81366
81367 @@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81368 if (sk->sk_type != SOCK_STREAM)
81369 return -ENOPROTOOPT;
81370
81371 - msg.msg_control = optval;
81372 + msg.msg_control = (void __force_kernel *)optval;
81373 msg.msg_controllen = len;
81374 msg.msg_flags = 0;
81375
81376 diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
81377 index 1cf3f0c..1d4376f 100644
81378 --- a/net/ipv6/netfilter/ip6_queue.c
81379 +++ b/net/ipv6/netfilter/ip6_queue.c
81380 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81381
81382 if (v->data_len < sizeof(*user_iph))
81383 return 0;
81384 + if (v->data_len > 65535)
81385 + return -EMSGSIZE;
81386 +
81387 diff = v->data_len - e->skb->len;
81388 if (diff < 0) {
81389 if (pskb_trim(e->skb, v->data_len))
81390 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
81391 static inline void
81392 __ipq_rcv_skb(struct sk_buff *skb)
81393 {
81394 - int status, type, pid, flags, nlmsglen, skblen;
81395 + int status, type, pid, flags;
81396 + unsigned int nlmsglen, skblen;
81397 struct nlmsghdr *nlh;
81398
81399 skblen = skb->len;
81400 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
81401 index 78b5a36..7f37433 100644
81402 --- a/net/ipv6/netfilter/ip6_tables.c
81403 +++ b/net/ipv6/netfilter/ip6_tables.c
81404 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81405 private = &tmp;
81406 }
81407 #endif
81408 + memset(&info, 0, sizeof(info));
81409 info.valid_hooks = t->valid_hooks;
81410 memcpy(info.hook_entry, private->hook_entry,
81411 sizeof(info.hook_entry));
81412 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
81413 index 4f24570..b813b34 100644
81414 --- a/net/ipv6/raw.c
81415 +++ b/net/ipv6/raw.c
81416 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
81417 {
81418 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
81419 skb_checksum_complete(skb)) {
81420 - atomic_inc(&sk->sk_drops);
81421 + atomic_inc_unchecked(&sk->sk_drops);
81422 kfree_skb(skb);
81423 return NET_RX_DROP;
81424 }
81425
81426 /* Charge it to the socket. */
81427 if (sock_queue_rcv_skb(sk,skb)<0) {
81428 - atomic_inc(&sk->sk_drops);
81429 + atomic_inc_unchecked(&sk->sk_drops);
81430 kfree_skb(skb);
81431 return NET_RX_DROP;
81432 }
81433 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81434 struct raw6_sock *rp = raw6_sk(sk);
81435
81436 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
81437 - atomic_inc(&sk->sk_drops);
81438 + atomic_inc_unchecked(&sk->sk_drops);
81439 kfree_skb(skb);
81440 return NET_RX_DROP;
81441 }
81442 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81443
81444 if (inet->hdrincl) {
81445 if (skb_checksum_complete(skb)) {
81446 - atomic_inc(&sk->sk_drops);
81447 + atomic_inc_unchecked(&sk->sk_drops);
81448 kfree_skb(skb);
81449 return NET_RX_DROP;
81450 }
81451 @@ -518,7 +518,7 @@ csum_copy_err:
81452 as some normal condition.
81453 */
81454 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
81455 - atomic_inc(&sk->sk_drops);
81456 + atomic_inc_unchecked(&sk->sk_drops);
81457 goto out;
81458 }
81459
81460 @@ -600,7 +600,7 @@ out:
81461 return err;
81462 }
81463
81464 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
81465 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
81466 struct flowi *fl, struct rt6_info *rt,
81467 unsigned int flags)
81468 {
81469 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
81470 u16 proto;
81471 int err;
81472
81473 + pax_track_stack();
81474 +
81475 /* Rough check on arithmetic overflow,
81476 better check is made in ip6_append_data().
81477 */
81478 @@ -916,12 +918,17 @@ do_confirm:
81479 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
81480 char __user *optval, int optlen)
81481 {
81482 + struct icmp6_filter filter;
81483 +
81484 switch (optname) {
81485 case ICMPV6_FILTER:
81486 + if (optlen < 0)
81487 + return -EINVAL;
81488 if (optlen > sizeof(struct icmp6_filter))
81489 optlen = sizeof(struct icmp6_filter);
81490 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
81491 + if (copy_from_user(&filter, optval, optlen))
81492 return -EFAULT;
81493 + raw6_sk(sk)->filter = filter;
81494 return 0;
81495 default:
81496 return -ENOPROTOOPT;
81497 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
81498 char __user *optval, int __user *optlen)
81499 {
81500 int len;
81501 + struct icmp6_filter filter;
81502
81503 switch (optname) {
81504 case ICMPV6_FILTER:
81505 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
81506 len = sizeof(struct icmp6_filter);
81507 if (put_user(len, optlen))
81508 return -EFAULT;
81509 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
81510 + filter = raw6_sk(sk)->filter;
81511 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
81512 return -EFAULT;
81513 return 0;
81514 default:
81515 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81516 0, 0L, 0,
81517 sock_i_uid(sp), 0,
81518 sock_i_ino(sp),
81519 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81520 + atomic_read(&sp->sk_refcnt),
81521 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81522 + NULL,
81523 +#else
81524 + sp,
81525 +#endif
81526 + atomic_read_unchecked(&sp->sk_drops));
81527 }
81528
81529 static int raw6_seq_show(struct seq_file *seq, void *v)
81530 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
81531 index faae6df..d4430c1 100644
81532 --- a/net/ipv6/tcp_ipv6.c
81533 +++ b/net/ipv6/tcp_ipv6.c
81534 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
81535 }
81536 #endif
81537
81538 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81539 +extern int grsec_enable_blackhole;
81540 +#endif
81541 +
81542 static void tcp_v6_hash(struct sock *sk)
81543 {
81544 if (sk->sk_state != TCP_CLOSE) {
81545 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
81546 return 0;
81547
81548 reset:
81549 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81550 + if (!grsec_enable_blackhole)
81551 +#endif
81552 tcp_v6_send_reset(sk, skb);
81553 discard:
81554 if (opt_skb)
81555 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
81556 TCP_SKB_CB(skb)->sacked = 0;
81557
81558 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81559 - if (!sk)
81560 + if (!sk) {
81561 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81562 + ret = 1;
81563 +#endif
81564 goto no_tcp_socket;
81565 + }
81566
81567 process:
81568 - if (sk->sk_state == TCP_TIME_WAIT)
81569 + if (sk->sk_state == TCP_TIME_WAIT) {
81570 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81571 + ret = 2;
81572 +#endif
81573 goto do_time_wait;
81574 + }
81575
81576 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
81577 goto discard_and_relse;
81578 @@ -1701,6 +1716,10 @@ no_tcp_socket:
81579 bad_packet:
81580 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81581 } else {
81582 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81583 + if (!grsec_enable_blackhole || (ret == 1 &&
81584 + (skb->dev->flags & IFF_LOOPBACK)))
81585 +#endif
81586 tcp_v6_send_reset(NULL, skb);
81587 }
81588
81589 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
81590 uid,
81591 0, /* non standard timer */
81592 0, /* open_requests have no inode */
81593 - 0, req);
81594 + 0,
81595 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81596 + NULL
81597 +#else
81598 + req
81599 +#endif
81600 + );
81601 }
81602
81603 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
81604 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
81605 sock_i_uid(sp),
81606 icsk->icsk_probes_out,
81607 sock_i_ino(sp),
81608 - atomic_read(&sp->sk_refcnt), sp,
81609 + atomic_read(&sp->sk_refcnt),
81610 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81611 + NULL,
81612 +#else
81613 + sp,
81614 +#endif
81615 jiffies_to_clock_t(icsk->icsk_rto),
81616 jiffies_to_clock_t(icsk->icsk_ack.ato),
81617 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
81618 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
81619 dest->s6_addr32[2], dest->s6_addr32[3], destp,
81620 tw->tw_substate, 0, 0,
81621 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
81622 - atomic_read(&tw->tw_refcnt), tw);
81623 + atomic_read(&tw->tw_refcnt),
81624 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81625 + NULL
81626 +#else
81627 + tw
81628 +#endif
81629 + );
81630 }
81631
81632 static int tcp6_seq_show(struct seq_file *seq, void *v)
81633 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
81634 index 9cc6289..052c521 100644
81635 --- a/net/ipv6/udp.c
81636 +++ b/net/ipv6/udp.c
81637 @@ -49,6 +49,10 @@
81638 #include <linux/seq_file.h>
81639 #include "udp_impl.h"
81640
81641 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81642 +extern int grsec_enable_blackhole;
81643 +#endif
81644 +
81645 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
81646 {
81647 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
81648 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
81649 if (rc == -ENOMEM) {
81650 UDP6_INC_STATS_BH(sock_net(sk),
81651 UDP_MIB_RCVBUFERRORS, is_udplite);
81652 - atomic_inc(&sk->sk_drops);
81653 + atomic_inc_unchecked(&sk->sk_drops);
81654 }
81655 goto drop;
81656 }
81657 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
81658 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
81659 proto == IPPROTO_UDPLITE);
81660
81661 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81662 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
81663 +#endif
81664 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
81665
81666 kfree_skb(skb);
81667 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
81668 0, 0L, 0,
81669 sock_i_uid(sp), 0,
81670 sock_i_ino(sp),
81671 - atomic_read(&sp->sk_refcnt), sp,
81672 - atomic_read(&sp->sk_drops));
81673 + atomic_read(&sp->sk_refcnt),
81674 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81675 + NULL,
81676 +#else
81677 + sp,
81678 +#endif
81679 + atomic_read_unchecked(&sp->sk_drops));
81680 }
81681
81682 int udp6_seq_show(struct seq_file *seq, void *v)
81683 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
81684 index 811984d..11f59b7 100644
81685 --- a/net/irda/ircomm/ircomm_tty.c
81686 +++ b/net/irda/ircomm/ircomm_tty.c
81687 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
81688 add_wait_queue(&self->open_wait, &wait);
81689
81690 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
81691 - __FILE__,__LINE__, tty->driver->name, self->open_count );
81692 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
81693
81694 /* As far as I can see, we protect open_count - Jean II */
81695 spin_lock_irqsave(&self->spinlock, flags);
81696 if (!tty_hung_up_p(filp)) {
81697 extra_count = 1;
81698 - self->open_count--;
81699 + local_dec(&self->open_count);
81700 }
81701 spin_unlock_irqrestore(&self->spinlock, flags);
81702 - self->blocked_open++;
81703 + local_inc(&self->blocked_open);
81704
81705 while (1) {
81706 if (tty->termios->c_cflag & CBAUD) {
81707 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
81708 }
81709
81710 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
81711 - __FILE__,__LINE__, tty->driver->name, self->open_count );
81712 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
81713
81714 schedule();
81715 }
81716 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
81717 if (extra_count) {
81718 /* ++ is not atomic, so this should be protected - Jean II */
81719 spin_lock_irqsave(&self->spinlock, flags);
81720 - self->open_count++;
81721 + local_inc(&self->open_count);
81722 spin_unlock_irqrestore(&self->spinlock, flags);
81723 }
81724 - self->blocked_open--;
81725 + local_dec(&self->blocked_open);
81726
81727 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
81728 - __FILE__,__LINE__, tty->driver->name, self->open_count);
81729 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
81730
81731 if (!retval)
81732 self->flags |= ASYNC_NORMAL_ACTIVE;
81733 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
81734 }
81735 /* ++ is not atomic, so this should be protected - Jean II */
81736 spin_lock_irqsave(&self->spinlock, flags);
81737 - self->open_count++;
81738 + local_inc(&self->open_count);
81739
81740 tty->driver_data = self;
81741 self->tty = tty;
81742 spin_unlock_irqrestore(&self->spinlock, flags);
81743
81744 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
81745 - self->line, self->open_count);
81746 + self->line, local_read(&self->open_count));
81747
81748 /* Not really used by us, but lets do it anyway */
81749 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
81750 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
81751 return;
81752 }
81753
81754 - if ((tty->count == 1) && (self->open_count != 1)) {
81755 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
81756 /*
81757 * Uh, oh. tty->count is 1, which means that the tty
81758 * structure will be freed. state->count should always
81759 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
81760 */
81761 IRDA_DEBUG(0, "%s(), bad serial port count; "
81762 "tty->count is 1, state->count is %d\n", __func__ ,
81763 - self->open_count);
81764 - self->open_count = 1;
81765 + local_read(&self->open_count));
81766 + local_set(&self->open_count, 1);
81767 }
81768
81769 - if (--self->open_count < 0) {
81770 + if (local_dec_return(&self->open_count) < 0) {
81771 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
81772 - __func__, self->line, self->open_count);
81773 - self->open_count = 0;
81774 + __func__, self->line, local_read(&self->open_count));
81775 + local_set(&self->open_count, 0);
81776 }
81777 - if (self->open_count) {
81778 + if (local_read(&self->open_count)) {
81779 spin_unlock_irqrestore(&self->spinlock, flags);
81780
81781 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
81782 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
81783 tty->closing = 0;
81784 self->tty = NULL;
81785
81786 - if (self->blocked_open) {
81787 + if (local_read(&self->blocked_open)) {
81788 if (self->close_delay)
81789 schedule_timeout_interruptible(self->close_delay);
81790 wake_up_interruptible(&self->open_wait);
81791 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
81792 spin_lock_irqsave(&self->spinlock, flags);
81793 self->flags &= ~ASYNC_NORMAL_ACTIVE;
81794 self->tty = NULL;
81795 - self->open_count = 0;
81796 + local_set(&self->open_count, 0);
81797 spin_unlock_irqrestore(&self->spinlock, flags);
81798
81799 wake_up_interruptible(&self->open_wait);
81800 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
81801 seq_putc(m, '\n');
81802
81803 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
81804 - seq_printf(m, "Open count: %d\n", self->open_count);
81805 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
81806 seq_printf(m, "Max data size: %d\n", self->max_data_size);
81807 seq_printf(m, "Max header size: %d\n", self->max_header_size);
81808
81809 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
81810 index bada1b9..f325943 100644
81811 --- a/net/iucv/af_iucv.c
81812 +++ b/net/iucv/af_iucv.c
81813 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
81814
81815 write_lock_bh(&iucv_sk_list.lock);
81816
81817 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
81818 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
81819 while (__iucv_get_sock_by_name(name)) {
81820 sprintf(name, "%08x",
81821 - atomic_inc_return(&iucv_sk_list.autobind_name));
81822 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
81823 }
81824
81825 write_unlock_bh(&iucv_sk_list.lock);
81826 diff --git a/net/key/af_key.c b/net/key/af_key.c
81827 index 4e98193..439b449 100644
81828 --- a/net/key/af_key.c
81829 +++ b/net/key/af_key.c
81830 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
81831 struct xfrm_migrate m[XFRM_MAX_DEPTH];
81832 struct xfrm_kmaddress k;
81833
81834 + pax_track_stack();
81835 +
81836 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
81837 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
81838 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
81839 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
81840 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
81841 else
81842 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
81843 +#ifdef CONFIG_GRKERNSEC_HIDESYM
81844 + NULL,
81845 +#else
81846 s,
81847 +#endif
81848 atomic_read(&s->sk_refcnt),
81849 sk_rmem_alloc_get(s),
81850 sk_wmem_alloc_get(s),
81851 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
81852 index bda96d1..c038b72 100644
81853 --- a/net/lapb/lapb_iface.c
81854 +++ b/net/lapb/lapb_iface.c
81855 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
81856 goto out;
81857
81858 lapb->dev = dev;
81859 - lapb->callbacks = *callbacks;
81860 + lapb->callbacks = callbacks;
81861
81862 __lapb_insert_cb(lapb);
81863
81864 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
81865
81866 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
81867 {
81868 - if (lapb->callbacks.connect_confirmation)
81869 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
81870 + if (lapb->callbacks->connect_confirmation)
81871 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
81872 }
81873
81874 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
81875 {
81876 - if (lapb->callbacks.connect_indication)
81877 - lapb->callbacks.connect_indication(lapb->dev, reason);
81878 + if (lapb->callbacks->connect_indication)
81879 + lapb->callbacks->connect_indication(lapb->dev, reason);
81880 }
81881
81882 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
81883 {
81884 - if (lapb->callbacks.disconnect_confirmation)
81885 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
81886 + if (lapb->callbacks->disconnect_confirmation)
81887 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
81888 }
81889
81890 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
81891 {
81892 - if (lapb->callbacks.disconnect_indication)
81893 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
81894 + if (lapb->callbacks->disconnect_indication)
81895 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
81896 }
81897
81898 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
81899 {
81900 - if (lapb->callbacks.data_indication)
81901 - return lapb->callbacks.data_indication(lapb->dev, skb);
81902 + if (lapb->callbacks->data_indication)
81903 + return lapb->callbacks->data_indication(lapb->dev, skb);
81904
81905 kfree_skb(skb);
81906 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
81907 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
81908 {
81909 int used = 0;
81910
81911 - if (lapb->callbacks.data_transmit) {
81912 - lapb->callbacks.data_transmit(lapb->dev, skb);
81913 + if (lapb->callbacks->data_transmit) {
81914 + lapb->callbacks->data_transmit(lapb->dev, skb);
81915 used = 1;
81916 }
81917
81918 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
81919 index fe2d3f8..e57f683 100644
81920 --- a/net/mac80211/cfg.c
81921 +++ b/net/mac80211/cfg.c
81922 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
81923 return err;
81924 }
81925
81926 -struct cfg80211_ops mac80211_config_ops = {
81927 +const struct cfg80211_ops mac80211_config_ops = {
81928 .add_virtual_intf = ieee80211_add_iface,
81929 .del_virtual_intf = ieee80211_del_iface,
81930 .change_virtual_intf = ieee80211_change_iface,
81931 diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
81932 index 7d7879f..2d51f62 100644
81933 --- a/net/mac80211/cfg.h
81934 +++ b/net/mac80211/cfg.h
81935 @@ -4,6 +4,6 @@
81936 #ifndef __CFG_H
81937 #define __CFG_H
81938
81939 -extern struct cfg80211_ops mac80211_config_ops;
81940 +extern const struct cfg80211_ops mac80211_config_ops;
81941
81942 #endif /* __CFG_H */
81943 diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
81944 index 99c7525..9cb4937 100644
81945 --- a/net/mac80211/debugfs_key.c
81946 +++ b/net/mac80211/debugfs_key.c
81947 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
81948 size_t count, loff_t *ppos)
81949 {
81950 struct ieee80211_key *key = file->private_data;
81951 - int i, res, bufsize = 2 * key->conf.keylen + 2;
81952 + int i, bufsize = 2 * key->conf.keylen + 2;
81953 char *buf = kmalloc(bufsize, GFP_KERNEL);
81954 char *p = buf;
81955 + ssize_t res;
81956 +
81957 + if (buf == NULL)
81958 + return -ENOMEM;
81959
81960 for (i = 0; i < key->conf.keylen; i++)
81961 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
81962 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
81963 index 33a2e89..08650c8 100644
81964 --- a/net/mac80211/debugfs_sta.c
81965 +++ b/net/mac80211/debugfs_sta.c
81966 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
81967 int i;
81968 struct sta_info *sta = file->private_data;
81969
81970 + pax_track_stack();
81971 +
81972 spin_lock_bh(&sta->lock);
81973 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
81974 sta->ampdu_mlme.dialog_token_allocator + 1);
81975 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
81976 index ca62bfe..6657a03 100644
81977 --- a/net/mac80211/ieee80211_i.h
81978 +++ b/net/mac80211/ieee80211_i.h
81979 @@ -25,6 +25,7 @@
81980 #include <linux/etherdevice.h>
81981 #include <net/cfg80211.h>
81982 #include <net/mac80211.h>
81983 +#include <asm/local.h>
81984 #include "key.h"
81985 #include "sta_info.h"
81986
81987 @@ -635,7 +636,7 @@ struct ieee80211_local {
81988 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
81989 spinlock_t queue_stop_reason_lock;
81990
81991 - int open_count;
81992 + local_t open_count;
81993 int monitors, cooked_mntrs;
81994 /* number of interfaces with corresponding FIF_ flags */
81995 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
81996 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
81997 index 079c500..eb3c6d4 100644
81998 --- a/net/mac80211/iface.c
81999 +++ b/net/mac80211/iface.c
82000 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
82001 break;
82002 }
82003
82004 - if (local->open_count == 0) {
82005 + if (local_read(&local->open_count) == 0) {
82006 res = drv_start(local);
82007 if (res)
82008 goto err_del_bss;
82009 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
82010 * Validate the MAC address for this device.
82011 */
82012 if (!is_valid_ether_addr(dev->dev_addr)) {
82013 - if (!local->open_count)
82014 + if (!local_read(&local->open_count))
82015 drv_stop(local);
82016 return -EADDRNOTAVAIL;
82017 }
82018 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
82019
82020 hw_reconf_flags |= __ieee80211_recalc_idle(local);
82021
82022 - local->open_count++;
82023 + local_inc(&local->open_count);
82024 if (hw_reconf_flags) {
82025 ieee80211_hw_config(local, hw_reconf_flags);
82026 /*
82027 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
82028 err_del_interface:
82029 drv_remove_interface(local, &conf);
82030 err_stop:
82031 - if (!local->open_count)
82032 + if (!local_read(&local->open_count))
82033 drv_stop(local);
82034 err_del_bss:
82035 sdata->bss = NULL;
82036 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
82037 WARN_ON(!list_empty(&sdata->u.ap.vlans));
82038 }
82039
82040 - local->open_count--;
82041 + local_dec(&local->open_count);
82042
82043 switch (sdata->vif.type) {
82044 case NL80211_IFTYPE_AP_VLAN:
82045 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
82046
82047 ieee80211_recalc_ps(local, -1);
82048
82049 - if (local->open_count == 0) {
82050 + if (local_read(&local->open_count) == 0) {
82051 ieee80211_clear_tx_pending(local);
82052 ieee80211_stop_device(local);
82053
82054 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
82055 index 2dfe176..74e4388 100644
82056 --- a/net/mac80211/main.c
82057 +++ b/net/mac80211/main.c
82058 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
82059 local->hw.conf.power_level = power;
82060 }
82061
82062 - if (changed && local->open_count) {
82063 + if (changed && local_read(&local->open_count)) {
82064 ret = drv_config(local, changed);
82065 /*
82066 * Goal:
82067 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
82068 index e67eea7..fcc227e 100644
82069 --- a/net/mac80211/mlme.c
82070 +++ b/net/mac80211/mlme.c
82071 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
82072 bool have_higher_than_11mbit = false, newsta = false;
82073 u16 ap_ht_cap_flags;
82074
82075 + pax_track_stack();
82076 +
82077 /*
82078 * AssocResp and ReassocResp have identical structure, so process both
82079 * of them in this function.
82080 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
82081 index e535f1c..4d733d1 100644
82082 --- a/net/mac80211/pm.c
82083 +++ b/net/mac80211/pm.c
82084 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
82085 }
82086
82087 /* stop hardware - this must stop RX */
82088 - if (local->open_count)
82089 + if (local_read(&local->open_count))
82090 ieee80211_stop_device(local);
82091
82092 local->suspended = true;
82093 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
82094 index b33efc4..0a2efb6 100644
82095 --- a/net/mac80211/rate.c
82096 +++ b/net/mac80211/rate.c
82097 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
82098 struct rate_control_ref *ref, *old;
82099
82100 ASSERT_RTNL();
82101 - if (local->open_count)
82102 + if (local_read(&local->open_count))
82103 return -EBUSY;
82104
82105 ref = rate_control_alloc(name, local);
82106 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
82107 index b1d7904..57e4da7 100644
82108 --- a/net/mac80211/tx.c
82109 +++ b/net/mac80211/tx.c
82110 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
82111 return cpu_to_le16(dur);
82112 }
82113
82114 -static int inline is_ieee80211_device(struct ieee80211_local *local,
82115 +static inline int is_ieee80211_device(struct ieee80211_local *local,
82116 struct net_device *dev)
82117 {
82118 return local == wdev_priv(dev->ieee80211_ptr);
82119 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
82120 index 31b1085..48fb26d 100644
82121 --- a/net/mac80211/util.c
82122 +++ b/net/mac80211/util.c
82123 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
82124 local->resuming = true;
82125
82126 /* restart hardware */
82127 - if (local->open_count) {
82128 + if (local_read(&local->open_count)) {
82129 /*
82130 * Upon resume hardware can sometimes be goofy due to
82131 * various platform / driver / bus issues, so restarting
82132 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
82133 index 634d14a..b35a608 100644
82134 --- a/net/netfilter/Kconfig
82135 +++ b/net/netfilter/Kconfig
82136 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
82137
82138 To compile it as a module, choose M here. If unsure, say N.
82139
82140 +config NETFILTER_XT_MATCH_GRADM
82141 + tristate '"gradm" match support'
82142 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
82143 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
82144 + ---help---
82145 + The gradm match allows to match on grsecurity RBAC being enabled.
82146 + It is useful when iptables rules are applied early on bootup to
82147 + prevent connections to the machine (except from a trusted host)
82148 + while the RBAC system is disabled.
82149 +
82150 config NETFILTER_XT_MATCH_HASHLIMIT
82151 tristate '"hashlimit" match support'
82152 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
82153 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
82154 index 49f62ee..a17b2c6 100644
82155 --- a/net/netfilter/Makefile
82156 +++ b/net/netfilter/Makefile
82157 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
82158 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
82159 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
82160 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
82161 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
82162 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
82163 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
82164 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
82165 diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
82166 index 3c7e427..724043c 100644
82167 --- a/net/netfilter/ipvs/ip_vs_app.c
82168 +++ b/net/netfilter/ipvs/ip_vs_app.c
82169 @@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
82170 .open = ip_vs_app_open,
82171 .read = seq_read,
82172 .llseek = seq_lseek,
82173 - .release = seq_release,
82174 + .release = seq_release_net,
82175 };
82176 #endif
82177
82178 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
82179 index 95682e5..457dbac 100644
82180 --- a/net/netfilter/ipvs/ip_vs_conn.c
82181 +++ b/net/netfilter/ipvs/ip_vs_conn.c
82182 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
82183 /* if the connection is not template and is created
82184 * by sync, preserve the activity flag.
82185 */
82186 - cp->flags |= atomic_read(&dest->conn_flags) &
82187 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
82188 (~IP_VS_CONN_F_INACTIVE);
82189 else
82190 - cp->flags |= atomic_read(&dest->conn_flags);
82191 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
82192 cp->dest = dest;
82193
82194 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
82195 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
82196 atomic_set(&cp->refcnt, 1);
82197
82198 atomic_set(&cp->n_control, 0);
82199 - atomic_set(&cp->in_pkts, 0);
82200 + atomic_set_unchecked(&cp->in_pkts, 0);
82201
82202 atomic_inc(&ip_vs_conn_count);
82203 if (flags & IP_VS_CONN_F_NO_CPORT)
82204 @@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
82205 .open = ip_vs_conn_open,
82206 .read = seq_read,
82207 .llseek = seq_lseek,
82208 - .release = seq_release,
82209 + .release = seq_release_net,
82210 };
82211
82212 static const char *ip_vs_origin_name(unsigned flags)
82213 @@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
82214 .open = ip_vs_conn_sync_open,
82215 .read = seq_read,
82216 .llseek = seq_lseek,
82217 - .release = seq_release,
82218 + .release = seq_release_net,
82219 };
82220
82221 #endif
82222 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
82223
82224 /* Don't drop the entry if its number of incoming packets is not
82225 located in [0, 8] */
82226 - i = atomic_read(&cp->in_pkts);
82227 + i = atomic_read_unchecked(&cp->in_pkts);
82228 if (i > 8 || i < 0) return 0;
82229
82230 if (!todrop_rate[i]) return 0;
82231 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
82232 index b95699f..5fee919 100644
82233 --- a/net/netfilter/ipvs/ip_vs_core.c
82234 +++ b/net/netfilter/ipvs/ip_vs_core.c
82235 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
82236 ret = cp->packet_xmit(skb, cp, pp);
82237 /* do not touch skb anymore */
82238
82239 - atomic_inc(&cp->in_pkts);
82240 + atomic_inc_unchecked(&cp->in_pkts);
82241 ip_vs_conn_put(cp);
82242 return ret;
82243 }
82244 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
82245 * Sync connection if it is about to close to
82246 * encorage the standby servers to update the connections timeout
82247 */
82248 - pkts = atomic_add_return(1, &cp->in_pkts);
82249 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
82250 if (af == AF_INET &&
82251 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
82252 (((cp->protocol != IPPROTO_TCP ||
82253 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
82254 index 02b2610..2d89424 100644
82255 --- a/net/netfilter/ipvs/ip_vs_ctl.c
82256 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
82257 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
82258 ip_vs_rs_hash(dest);
82259 write_unlock_bh(&__ip_vs_rs_lock);
82260 }
82261 - atomic_set(&dest->conn_flags, conn_flags);
82262 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
82263
82264 /* bind the service */
82265 if (!dest->svc) {
82266 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82267 " %-7s %-6d %-10d %-10d\n",
82268 &dest->addr.in6,
82269 ntohs(dest->port),
82270 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82271 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82272 atomic_read(&dest->weight),
82273 atomic_read(&dest->activeconns),
82274 atomic_read(&dest->inactconns));
82275 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82276 "%-7s %-6d %-10d %-10d\n",
82277 ntohl(dest->addr.ip),
82278 ntohs(dest->port),
82279 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82280 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82281 atomic_read(&dest->weight),
82282 atomic_read(&dest->activeconns),
82283 atomic_read(&dest->inactconns));
82284 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
82285 .open = ip_vs_info_open,
82286 .read = seq_read,
82287 .llseek = seq_lseek,
82288 - .release = seq_release_private,
82289 + .release = seq_release_net,
82290 };
82291
82292 #endif
82293 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
82294 .open = ip_vs_stats_seq_open,
82295 .read = seq_read,
82296 .llseek = seq_lseek,
82297 - .release = single_release,
82298 + .release = single_release_net,
82299 };
82300
82301 #endif
82302 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
82303
82304 entry.addr = dest->addr.ip;
82305 entry.port = dest->port;
82306 - entry.conn_flags = atomic_read(&dest->conn_flags);
82307 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
82308 entry.weight = atomic_read(&dest->weight);
82309 entry.u_threshold = dest->u_threshold;
82310 entry.l_threshold = dest->l_threshold;
82311 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
82312 unsigned char arg[128];
82313 int ret = 0;
82314
82315 + pax_track_stack();
82316 +
82317 if (!capable(CAP_NET_ADMIN))
82318 return -EPERM;
82319
82320 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
82321 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
82322
82323 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
82324 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
82325 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
82326 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
82327 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
82328 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
82329 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
82330 index e177f0d..55e8581 100644
82331 --- a/net/netfilter/ipvs/ip_vs_sync.c
82332 +++ b/net/netfilter/ipvs/ip_vs_sync.c
82333 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
82334
82335 if (opt)
82336 memcpy(&cp->in_seq, opt, sizeof(*opt));
82337 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
82338 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
82339 cp->state = state;
82340 cp->old_state = cp->state;
82341 /*
82342 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
82343 index 30b3189..e2e4b55 100644
82344 --- a/net/netfilter/ipvs/ip_vs_xmit.c
82345 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
82346 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
82347 else
82348 rc = NF_ACCEPT;
82349 /* do not touch skb anymore */
82350 - atomic_inc(&cp->in_pkts);
82351 + atomic_inc_unchecked(&cp->in_pkts);
82352 goto out;
82353 }
82354
82355 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
82356 else
82357 rc = NF_ACCEPT;
82358 /* do not touch skb anymore */
82359 - atomic_inc(&cp->in_pkts);
82360 + atomic_inc_unchecked(&cp->in_pkts);
82361 goto out;
82362 }
82363
82364 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
82365 index d521718..d0fd7a1 100644
82366 --- a/net/netfilter/nf_conntrack_netlink.c
82367 +++ b/net/netfilter/nf_conntrack_netlink.c
82368 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
82369 static int
82370 ctnetlink_parse_tuple(const struct nlattr * const cda[],
82371 struct nf_conntrack_tuple *tuple,
82372 - enum ctattr_tuple type, u_int8_t l3num)
82373 + enum ctattr_type type, u_int8_t l3num)
82374 {
82375 struct nlattr *tb[CTA_TUPLE_MAX+1];
82376 int err;
82377 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
82378 index f900dc3..5e45346 100644
82379 --- a/net/netfilter/nfnetlink_log.c
82380 +++ b/net/netfilter/nfnetlink_log.c
82381 @@ -68,7 +68,7 @@ struct nfulnl_instance {
82382 };
82383
82384 static DEFINE_RWLOCK(instances_lock);
82385 -static atomic_t global_seq;
82386 +static atomic_unchecked_t global_seq;
82387
82388 #define INSTANCE_BUCKETS 16
82389 static struct hlist_head instance_table[INSTANCE_BUCKETS];
82390 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
82391 /* global sequence number */
82392 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
82393 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
82394 - htonl(atomic_inc_return(&global_seq)));
82395 + htonl(atomic_inc_return_unchecked(&global_seq)));
82396
82397 if (data_len) {
82398 struct nlattr *nla;
82399 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
82400 new file mode 100644
82401 index 0000000..b1bac76
82402 --- /dev/null
82403 +++ b/net/netfilter/xt_gradm.c
82404 @@ -0,0 +1,51 @@
82405 +/*
82406 + * gradm match for netfilter
82407 + * Copyright © Zbigniew Krzystolik, 2010
82408 + *
82409 + * This program is free software; you can redistribute it and/or modify
82410 + * it under the terms of the GNU General Public License; either version
82411 + * 2 or 3 as published by the Free Software Foundation.
82412 + */
82413 +#include <linux/module.h>
82414 +#include <linux/moduleparam.h>
82415 +#include <linux/skbuff.h>
82416 +#include <linux/netfilter/x_tables.h>
82417 +#include <linux/grsecurity.h>
82418 +#include <linux/netfilter/xt_gradm.h>
82419 +
82420 +static bool
82421 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
82422 +{
82423 + const struct xt_gradm_mtinfo *info = par->matchinfo;
82424 + bool retval = false;
82425 + if (gr_acl_is_enabled())
82426 + retval = true;
82427 + return retval ^ info->invflags;
82428 +}
82429 +
82430 +static struct xt_match gradm_mt_reg __read_mostly = {
82431 + .name = "gradm",
82432 + .revision = 0,
82433 + .family = NFPROTO_UNSPEC,
82434 + .match = gradm_mt,
82435 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
82436 + .me = THIS_MODULE,
82437 +};
82438 +
82439 +static int __init gradm_mt_init(void)
82440 +{
82441 + return xt_register_match(&gradm_mt_reg);
82442 +}
82443 +
82444 +static void __exit gradm_mt_exit(void)
82445 +{
82446 + xt_unregister_match(&gradm_mt_reg);
82447 +}
82448 +
82449 +module_init(gradm_mt_init);
82450 +module_exit(gradm_mt_exit);
82451 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
82452 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
82453 +MODULE_LICENSE("GPL");
82454 +MODULE_ALIAS("ipt_gradm");
82455 +MODULE_ALIAS("ip6t_gradm");
82456 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
82457 index 5a7dcdf..24a3578 100644
82458 --- a/net/netlink/af_netlink.c
82459 +++ b/net/netlink/af_netlink.c
82460 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock *sk)
82461 sk->sk_error_report(sk);
82462 }
82463 }
82464 - atomic_inc(&sk->sk_drops);
82465 + atomic_inc_unchecked(&sk->sk_drops);
82466 }
82467
82468 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
82469 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
82470 struct netlink_sock *nlk = nlk_sk(s);
82471
82472 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
82473 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82474 + NULL,
82475 +#else
82476 s,
82477 +#endif
82478 s->sk_protocol,
82479 nlk->pid,
82480 nlk->groups ? (u32)nlk->groups[0] : 0,
82481 sk_rmem_alloc_get(s),
82482 sk_wmem_alloc_get(s),
82483 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82484 + NULL,
82485 +#else
82486 nlk->cb,
82487 +#endif
82488 atomic_read(&s->sk_refcnt),
82489 - atomic_read(&s->sk_drops)
82490 + atomic_read_unchecked(&s->sk_drops)
82491 );
82492
82493 }
82494 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
82495 index 7a83495..ab0062f 100644
82496 --- a/net/netrom/af_netrom.c
82497 +++ b/net/netrom/af_netrom.c
82498 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
82499 struct sock *sk = sock->sk;
82500 struct nr_sock *nr = nr_sk(sk);
82501
82502 + memset(sax, 0, sizeof(*sax));
82503 lock_sock(sk);
82504 if (peer != 0) {
82505 if (sk->sk_state != TCP_ESTABLISHED) {
82506 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
82507 *uaddr_len = sizeof(struct full_sockaddr_ax25);
82508 } else {
82509 sax->fsa_ax25.sax25_family = AF_NETROM;
82510 - sax->fsa_ax25.sax25_ndigis = 0;
82511 sax->fsa_ax25.sax25_call = nr->source_addr;
82512 *uaddr_len = sizeof(struct sockaddr_ax25);
82513 }
82514 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
82515 index 35cfa79..4e78ff7 100644
82516 --- a/net/packet/af_packet.c
82517 +++ b/net/packet/af_packet.c
82518 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_file *seq, void *v)
82519
82520 seq_printf(seq,
82521 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
82522 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82523 + NULL,
82524 +#else
82525 s,
82526 +#endif
82527 atomic_read(&s->sk_refcnt),
82528 s->sk_type,
82529 ntohs(po->num),
82530 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
82531 index 519ff9d..a422a90 100644
82532 --- a/net/phonet/af_phonet.c
82533 +++ b/net/phonet/af_phonet.c
82534 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
82535 {
82536 struct phonet_protocol *pp;
82537
82538 - if (protocol >= PHONET_NPROTO)
82539 + if (protocol < 0 || protocol >= PHONET_NPROTO)
82540 return NULL;
82541
82542 spin_lock(&proto_tab_lock);
82543 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_register(int protocol,
82544 {
82545 int err = 0;
82546
82547 - if (protocol >= PHONET_NPROTO)
82548 + if (protocol < 0 || protocol >= PHONET_NPROTO)
82549 return -EINVAL;
82550
82551 err = proto_register(pp->prot, 1);
82552 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
82553 index ef5c75c..2b6c2fa 100644
82554 --- a/net/phonet/datagram.c
82555 +++ b/net/phonet/datagram.c
82556 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
82557 if (err < 0) {
82558 kfree_skb(skb);
82559 if (err == -ENOMEM)
82560 - atomic_inc(&sk->sk_drops);
82561 + atomic_inc_unchecked(&sk->sk_drops);
82562 }
82563 return err ? NET_RX_DROP : NET_RX_SUCCESS;
82564 }
82565 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
82566 index 9cdd35e..16cd850 100644
82567 --- a/net/phonet/pep.c
82568 +++ b/net/phonet/pep.c
82569 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
82570
82571 case PNS_PEP_CTRL_REQ:
82572 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
82573 - atomic_inc(&sk->sk_drops);
82574 + atomic_inc_unchecked(&sk->sk_drops);
82575 break;
82576 }
82577 __skb_pull(skb, 4);
82578 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
82579 if (!err)
82580 return 0;
82581 if (err == -ENOMEM)
82582 - atomic_inc(&sk->sk_drops);
82583 + atomic_inc_unchecked(&sk->sk_drops);
82584 break;
82585 }
82586
82587 if (pn->rx_credits == 0) {
82588 - atomic_inc(&sk->sk_drops);
82589 + atomic_inc_unchecked(&sk->sk_drops);
82590 err = -ENOBUFS;
82591 break;
82592 }
82593 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
82594 index aa5b5a9..c09b4f8 100644
82595 --- a/net/phonet/socket.c
82596 +++ b/net/phonet/socket.c
82597 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
82598 sk->sk_state,
82599 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
82600 sock_i_uid(sk), sock_i_ino(sk),
82601 - atomic_read(&sk->sk_refcnt), sk,
82602 - atomic_read(&sk->sk_drops), &len);
82603 + atomic_read(&sk->sk_refcnt),
82604 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82605 + NULL,
82606 +#else
82607 + sk,
82608 +#endif
82609 + atomic_read_unchecked(&sk->sk_drops), &len);
82610 }
82611 seq_printf(seq, "%*s\n", 127 - len, "");
82612 return 0;
82613 diff --git a/net/rds/Kconfig b/net/rds/Kconfig
82614 index ec753b3..821187c 100644
82615 --- a/net/rds/Kconfig
82616 +++ b/net/rds/Kconfig
82617 @@ -1,7 +1,7 @@
82618
82619 config RDS
82620 tristate "The RDS Protocol (EXPERIMENTAL)"
82621 - depends on INET && EXPERIMENTAL
82622 + depends on INET && EXPERIMENTAL && BROKEN
82623 ---help---
82624 The RDS (Reliable Datagram Sockets) protocol provides reliable,
82625 sequenced delivery of datagrams over Infiniband, iWARP,
82626 diff --git a/net/rds/cong.c b/net/rds/cong.c
82627 index dd2711d..1c7ed12 100644
82628 --- a/net/rds/cong.c
82629 +++ b/net/rds/cong.c
82630 @@ -77,7 +77,7 @@
82631 * finds that the saved generation number is smaller than the global generation
82632 * number, it wakes up the process.
82633 */
82634 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
82635 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
82636
82637 /*
82638 * Congestion monitoring
82639 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
82640 rdsdebug("waking map %p for %pI4\n",
82641 map, &map->m_addr);
82642 rds_stats_inc(s_cong_update_received);
82643 - atomic_inc(&rds_cong_generation);
82644 + atomic_inc_unchecked(&rds_cong_generation);
82645 if (waitqueue_active(&map->m_waitq))
82646 wake_up(&map->m_waitq);
82647 if (waitqueue_active(&rds_poll_waitq))
82648 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
82649
82650 int rds_cong_updated_since(unsigned long *recent)
82651 {
82652 - unsigned long gen = atomic_read(&rds_cong_generation);
82653 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
82654
82655 if (likely(*recent == gen))
82656 return 0;
82657 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
82658 index de4a1b1..94ec861 100644
82659 --- a/net/rds/iw_rdma.c
82660 +++ b/net/rds/iw_rdma.c
82661 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
82662 struct rdma_cm_id *pcm_id;
82663 int rc;
82664
82665 + pax_track_stack();
82666 +
82667 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
82668 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
82669
82670 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
82671 index b5198ae..8b9fb90 100644
82672 --- a/net/rds/tcp.c
82673 +++ b/net/rds/tcp.c
82674 @@ -57,7 +57,7 @@ void rds_tcp_nonagle(struct socket *sock)
82675 int val = 1;
82676
82677 set_fs(KERNEL_DS);
82678 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
82679 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
82680 sizeof(val));
82681 set_fs(oldfs);
82682 }
82683 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
82684 index ab545e0..4079b3b 100644
82685 --- a/net/rds/tcp_send.c
82686 +++ b/net/rds/tcp_send.c
82687 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
82688
82689 oldfs = get_fs();
82690 set_fs(KERNEL_DS);
82691 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
82692 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
82693 sizeof(val));
82694 set_fs(oldfs);
82695 }
82696 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
82697 index a86afce..8657bce 100644
82698 --- a/net/rxrpc/af_rxrpc.c
82699 +++ b/net/rxrpc/af_rxrpc.c
82700 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ops;
82701 __be32 rxrpc_epoch;
82702
82703 /* current debugging ID */
82704 -atomic_t rxrpc_debug_id;
82705 +atomic_unchecked_t rxrpc_debug_id;
82706
82707 /* count of skbs currently in use */
82708 atomic_t rxrpc_n_skbs;
82709 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
82710 index b4a2209..539106c 100644
82711 --- a/net/rxrpc/ar-ack.c
82712 +++ b/net/rxrpc/ar-ack.c
82713 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
82714
82715 _enter("{%d,%d,%d,%d},",
82716 call->acks_hard, call->acks_unacked,
82717 - atomic_read(&call->sequence),
82718 + atomic_read_unchecked(&call->sequence),
82719 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
82720
82721 stop = 0;
82722 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
82723
82724 /* each Tx packet has a new serial number */
82725 sp->hdr.serial =
82726 - htonl(atomic_inc_return(&call->conn->serial));
82727 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
82728
82729 hdr = (struct rxrpc_header *) txb->head;
82730 hdr->serial = sp->hdr.serial;
82731 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
82732 */
82733 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
82734 {
82735 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
82736 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
82737 }
82738
82739 /*
82740 @@ -627,7 +627,7 @@ process_further:
82741
82742 latest = ntohl(sp->hdr.serial);
82743 hard = ntohl(ack.firstPacket);
82744 - tx = atomic_read(&call->sequence);
82745 + tx = atomic_read_unchecked(&call->sequence);
82746
82747 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
82748 latest,
82749 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_struct *work)
82750 u32 abort_code = RX_PROTOCOL_ERROR;
82751 u8 *acks = NULL;
82752
82753 + pax_track_stack();
82754 +
82755 //printk("\n--------------------\n");
82756 _enter("{%d,%s,%lx} [%lu]",
82757 call->debug_id, rxrpc_call_states[call->state], call->events,
82758 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
82759 goto maybe_reschedule;
82760
82761 send_ACK_with_skew:
82762 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
82763 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
82764 ntohl(ack.serial));
82765 send_ACK:
82766 mtu = call->conn->trans->peer->if_mtu;
82767 @@ -1171,7 +1173,7 @@ send_ACK:
82768 ackinfo.rxMTU = htonl(5692);
82769 ackinfo.jumbo_max = htonl(4);
82770
82771 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
82772 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
82773 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
82774 ntohl(hdr.serial),
82775 ntohs(ack.maxSkew),
82776 @@ -1189,7 +1191,7 @@ send_ACK:
82777 send_message:
82778 _debug("send message");
82779
82780 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
82781 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
82782 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
82783 send_message_2:
82784
82785 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
82786 index bc0019f..e1b4b24 100644
82787 --- a/net/rxrpc/ar-call.c
82788 +++ b/net/rxrpc/ar-call.c
82789 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
82790 spin_lock_init(&call->lock);
82791 rwlock_init(&call->state_lock);
82792 atomic_set(&call->usage, 1);
82793 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
82794 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82795 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
82796
82797 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
82798 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
82799 index 9f1ce84..ff8d061 100644
82800 --- a/net/rxrpc/ar-connection.c
82801 +++ b/net/rxrpc/ar-connection.c
82802 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
82803 rwlock_init(&conn->lock);
82804 spin_lock_init(&conn->state_lock);
82805 atomic_set(&conn->usage, 1);
82806 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
82807 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82808 conn->avail_calls = RXRPC_MAXCALLS;
82809 conn->size_align = 4;
82810 conn->header_size = sizeof(struct rxrpc_header);
82811 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
82812 index 0505cdc..f0748ce 100644
82813 --- a/net/rxrpc/ar-connevent.c
82814 +++ b/net/rxrpc/ar-connevent.c
82815 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
82816
82817 len = iov[0].iov_len + iov[1].iov_len;
82818
82819 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
82820 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
82821 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
82822
82823 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
82824 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
82825 index f98c802..9e8488e 100644
82826 --- a/net/rxrpc/ar-input.c
82827 +++ b/net/rxrpc/ar-input.c
82828 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
82829 /* track the latest serial number on this connection for ACK packet
82830 * information */
82831 serial = ntohl(sp->hdr.serial);
82832 - hi_serial = atomic_read(&call->conn->hi_serial);
82833 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
82834 while (serial > hi_serial)
82835 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
82836 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
82837 serial);
82838
82839 /* request ACK generation for any ACK or DATA packet that requests
82840 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
82841 index 7043b29..06edcdf 100644
82842 --- a/net/rxrpc/ar-internal.h
82843 +++ b/net/rxrpc/ar-internal.h
82844 @@ -272,8 +272,8 @@ struct rxrpc_connection {
82845 int error; /* error code for local abort */
82846 int debug_id; /* debug ID for printks */
82847 unsigned call_counter; /* call ID counter */
82848 - atomic_t serial; /* packet serial number counter */
82849 - atomic_t hi_serial; /* highest serial number received */
82850 + atomic_unchecked_t serial; /* packet serial number counter */
82851 + atomic_unchecked_t hi_serial; /* highest serial number received */
82852 u8 avail_calls; /* number of calls available */
82853 u8 size_align; /* data size alignment (for security) */
82854 u8 header_size; /* rxrpc + security header size */
82855 @@ -346,7 +346,7 @@ struct rxrpc_call {
82856 spinlock_t lock;
82857 rwlock_t state_lock; /* lock for state transition */
82858 atomic_t usage;
82859 - atomic_t sequence; /* Tx data packet sequence counter */
82860 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
82861 u32 abort_code; /* local/remote abort code */
82862 enum { /* current state of call */
82863 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
82864 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
82865 */
82866 extern atomic_t rxrpc_n_skbs;
82867 extern __be32 rxrpc_epoch;
82868 -extern atomic_t rxrpc_debug_id;
82869 +extern atomic_unchecked_t rxrpc_debug_id;
82870 extern struct workqueue_struct *rxrpc_workqueue;
82871
82872 /*
82873 diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
82874 index 74697b2..10f9b77 100644
82875 --- a/net/rxrpc/ar-key.c
82876 +++ b/net/rxrpc/ar-key.c
82877 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
82878 return ret;
82879
82880 plen -= sizeof(*token);
82881 - token = kmalloc(sizeof(*token), GFP_KERNEL);
82882 + token = kzalloc(sizeof(*token), GFP_KERNEL);
82883 if (!token)
82884 return -ENOMEM;
82885
82886 - token->kad = kmalloc(plen, GFP_KERNEL);
82887 + token->kad = kzalloc(plen, GFP_KERNEL);
82888 if (!token->kad) {
82889 kfree(token);
82890 return -ENOMEM;
82891 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
82892 goto error;
82893
82894 ret = -ENOMEM;
82895 - token = kmalloc(sizeof(*token), GFP_KERNEL);
82896 + token = kzalloc(sizeof(*token), GFP_KERNEL);
82897 if (!token)
82898 goto error;
82899 - token->kad = kmalloc(plen, GFP_KERNEL);
82900 + token->kad = kzalloc(plen, GFP_KERNEL);
82901 if (!token->kad)
82902 goto error_free;
82903
82904 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
82905 index 807535f..5b7f19e 100644
82906 --- a/net/rxrpc/ar-local.c
82907 +++ b/net/rxrpc/ar-local.c
82908 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
82909 spin_lock_init(&local->lock);
82910 rwlock_init(&local->services_lock);
82911 atomic_set(&local->usage, 1);
82912 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
82913 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82914 memcpy(&local->srx, srx, sizeof(*srx));
82915 }
82916
82917 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
82918 index cc9102c..7d3888e 100644
82919 --- a/net/rxrpc/ar-output.c
82920 +++ b/net/rxrpc/ar-output.c
82921 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
82922 sp->hdr.cid = call->cid;
82923 sp->hdr.callNumber = call->call_id;
82924 sp->hdr.seq =
82925 - htonl(atomic_inc_return(&call->sequence));
82926 + htonl(atomic_inc_return_unchecked(&call->sequence));
82927 sp->hdr.serial =
82928 - htonl(atomic_inc_return(&conn->serial));
82929 + htonl(atomic_inc_return_unchecked(&conn->serial));
82930 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
82931 sp->hdr.userStatus = 0;
82932 sp->hdr.securityIndex = conn->security_ix;
82933 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
82934 index edc026c..4bd4e2d 100644
82935 --- a/net/rxrpc/ar-peer.c
82936 +++ b/net/rxrpc/ar-peer.c
82937 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
82938 INIT_LIST_HEAD(&peer->error_targets);
82939 spin_lock_init(&peer->lock);
82940 atomic_set(&peer->usage, 1);
82941 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
82942 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82943 memcpy(&peer->srx, srx, sizeof(*srx));
82944
82945 rxrpc_assess_MTU_size(peer);
82946 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
82947 index 38047f7..9f48511 100644
82948 --- a/net/rxrpc/ar-proc.c
82949 +++ b/net/rxrpc/ar-proc.c
82950 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
82951 atomic_read(&conn->usage),
82952 rxrpc_conn_states[conn->state],
82953 key_serial(conn->key),
82954 - atomic_read(&conn->serial),
82955 - atomic_read(&conn->hi_serial));
82956 + atomic_read_unchecked(&conn->serial),
82957 + atomic_read_unchecked(&conn->hi_serial));
82958
82959 return 0;
82960 }
82961 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
82962 index 0936e1a..437c640 100644
82963 --- a/net/rxrpc/ar-transport.c
82964 +++ b/net/rxrpc/ar-transport.c
82965 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
82966 spin_lock_init(&trans->client_lock);
82967 rwlock_init(&trans->conn_lock);
82968 atomic_set(&trans->usage, 1);
82969 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
82970 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
82971
82972 if (peer->srx.transport.family == AF_INET) {
82973 switch (peer->srx.transport_type) {
82974 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
82975 index 713ac59..306f6ae 100644
82976 --- a/net/rxrpc/rxkad.c
82977 +++ b/net/rxrpc/rxkad.c
82978 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
82979 u16 check;
82980 int nsg;
82981
82982 + pax_track_stack();
82983 +
82984 sp = rxrpc_skb(skb);
82985
82986 _enter("");
82987 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
82988 u16 check;
82989 int nsg;
82990
82991 + pax_track_stack();
82992 +
82993 _enter("");
82994
82995 sp = rxrpc_skb(skb);
82996 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
82997
82998 len = iov[0].iov_len + iov[1].iov_len;
82999
83000 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
83001 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83002 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
83003
83004 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
83005 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
83006
83007 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
83008
83009 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
83010 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
83011 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
83012
83013 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
83014 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
83015 index 914c419..7a16d2c 100644
83016 --- a/net/sctp/auth.c
83017 +++ b/net/sctp/auth.c
83018 @@ -81,7 +81,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
83019 struct sctp_auth_bytes *key;
83020
83021 /* Verify that we are not going to overflow INT_MAX */
83022 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
83023 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
83024 return NULL;
83025
83026 /* Allocate the shared key */
83027 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
83028 index d093cbf..9fc36fc 100644
83029 --- a/net/sctp/proc.c
83030 +++ b/net/sctp/proc.c
83031 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
83032 sctp_for_each_hentry(epb, node, &head->chain) {
83033 ep = sctp_ep(epb);
83034 sk = epb->sk;
83035 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
83036 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
83037 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83038 + NULL, NULL,
83039 +#else
83040 + ep, sk,
83041 +#endif
83042 sctp_sk(sk)->type, sk->sk_state, hash,
83043 epb->bind_addr.port,
83044 sock_i_uid(sk), sock_i_ino(sk));
83045 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
83046 seq_printf(seq,
83047 "%8p %8p %-3d %-3d %-2d %-4d "
83048 "%4d %8d %8d %7d %5lu %-5d %5d ",
83049 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
83050 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83051 + NULL, NULL,
83052 +#else
83053 + assoc, sk,
83054 +#endif
83055 + sctp_sk(sk)->type, sk->sk_state,
83056 assoc->state, hash,
83057 assoc->assoc_id,
83058 assoc->sndbuf_used,
83059 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
83060 index 3a95fcb..c40fc1d 100644
83061 --- a/net/sctp/socket.c
83062 +++ b/net/sctp/socket.c
83063 @@ -5802,7 +5802,6 @@ pp_found:
83064 */
83065 int reuse = sk->sk_reuse;
83066 struct sock *sk2;
83067 - struct hlist_node *node;
83068
83069 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
83070 if (pp->fastreuse && sk->sk_reuse &&
83071 diff --git a/net/socket.c b/net/socket.c
83072 index d449812..4ac08d3c 100644
83073 --- a/net/socket.c
83074 +++ b/net/socket.c
83075 @@ -87,6 +87,7 @@
83076 #include <linux/wireless.h>
83077 #include <linux/nsproxy.h>
83078 #include <linux/magic.h>
83079 +#include <linux/in.h>
83080
83081 #include <asm/uaccess.h>
83082 #include <asm/unistd.h>
83083 @@ -97,6 +98,21 @@
83084 #include <net/sock.h>
83085 #include <linux/netfilter.h>
83086
83087 +extern void gr_attach_curr_ip(const struct sock *sk);
83088 +extern int gr_handle_sock_all(const int family, const int type,
83089 + const int protocol);
83090 +extern int gr_handle_sock_server(const struct sockaddr *sck);
83091 +extern int gr_handle_sock_server_other(const struct sock *sck);
83092 +extern int gr_handle_sock_client(const struct sockaddr *sck);
83093 +extern int gr_search_connect(struct socket * sock,
83094 + struct sockaddr_in * addr);
83095 +extern int gr_search_bind(struct socket * sock,
83096 + struct sockaddr_in * addr);
83097 +extern int gr_search_listen(struct socket * sock);
83098 +extern int gr_search_accept(struct socket * sock);
83099 +extern int gr_search_socket(const int domain, const int type,
83100 + const int protocol);
83101 +
83102 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
83103 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
83104 unsigned long nr_segs, loff_t pos);
83105 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type,
83106 mnt);
83107 }
83108
83109 -static struct vfsmount *sock_mnt __read_mostly;
83110 +struct vfsmount *sock_mnt __read_mostly;
83111
83112 static struct file_system_type sock_fs_type = {
83113 .name = "sockfs",
83114 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
83115 return -EAFNOSUPPORT;
83116 if (type < 0 || type >= SOCK_MAX)
83117 return -EINVAL;
83118 + if (protocol < 0)
83119 + return -EINVAL;
83120
83121 /* Compatibility.
83122
83123 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
83124 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
83125 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
83126
83127 + if(!gr_search_socket(family, type, protocol)) {
83128 + retval = -EACCES;
83129 + goto out;
83130 + }
83131 +
83132 + if (gr_handle_sock_all(family, type, protocol)) {
83133 + retval = -EACCES;
83134 + goto out;
83135 + }
83136 +
83137 retval = sock_create(family, type, protocol, &sock);
83138 if (retval < 0)
83139 goto out;
83140 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
83141 if (sock) {
83142 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
83143 if (err >= 0) {
83144 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
83145 + err = -EACCES;
83146 + goto error;
83147 + }
83148 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
83149 + if (err)
83150 + goto error;
83151 +
83152 err = security_socket_bind(sock,
83153 (struct sockaddr *)&address,
83154 addrlen);
83155 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
83156 (struct sockaddr *)
83157 &address, addrlen);
83158 }
83159 +error:
83160 fput_light(sock->file, fput_needed);
83161 }
83162 return err;
83163 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
83164 if ((unsigned)backlog > somaxconn)
83165 backlog = somaxconn;
83166
83167 + if (gr_handle_sock_server_other(sock->sk)) {
83168 + err = -EPERM;
83169 + goto error;
83170 + }
83171 +
83172 + err = gr_search_listen(sock);
83173 + if (err)
83174 + goto error;
83175 +
83176 err = security_socket_listen(sock, backlog);
83177 if (!err)
83178 err = sock->ops->listen(sock, backlog);
83179
83180 +error:
83181 fput_light(sock->file, fput_needed);
83182 }
83183 return err;
83184 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
83185 newsock->type = sock->type;
83186 newsock->ops = sock->ops;
83187
83188 + if (gr_handle_sock_server_other(sock->sk)) {
83189 + err = -EPERM;
83190 + sock_release(newsock);
83191 + goto out_put;
83192 + }
83193 +
83194 + err = gr_search_accept(sock);
83195 + if (err) {
83196 + sock_release(newsock);
83197 + goto out_put;
83198 + }
83199 +
83200 /*
83201 * We don't need try_module_get here, as the listening socket (sock)
83202 * has the protocol module (sock->ops->owner) held.
83203 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
83204 fd_install(newfd, newfile);
83205 err = newfd;
83206
83207 + gr_attach_curr_ip(newsock->sk);
83208 +
83209 out_put:
83210 fput_light(sock->file, fput_needed);
83211 out:
83212 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
83213 int, addrlen)
83214 {
83215 struct socket *sock;
83216 + struct sockaddr *sck;
83217 struct sockaddr_storage address;
83218 int err, fput_needed;
83219
83220 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
83221 if (err < 0)
83222 goto out_put;
83223
83224 + sck = (struct sockaddr *)&address;
83225 +
83226 + if (gr_handle_sock_client(sck)) {
83227 + err = -EACCES;
83228 + goto out_put;
83229 + }
83230 +
83231 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
83232 + if (err)
83233 + goto out_put;
83234 +
83235 err =
83236 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
83237 if (err)
83238 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
83239 int err, ctl_len, iov_size, total_len;
83240 int fput_needed;
83241
83242 + pax_track_stack();
83243 +
83244 err = -EFAULT;
83245 if (MSG_CMSG_COMPAT & flags) {
83246 if (get_compat_msghdr(&msg_sys, msg_compat))
83247 @@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
83248 * kernel msghdr to use the kernel address space)
83249 */
83250
83251 - uaddr = (__force void __user *)msg_sys.msg_name;
83252 + uaddr = (void __force_user *)msg_sys.msg_name;
83253 uaddr_len = COMPAT_NAMELEN(msg);
83254 if (MSG_CMSG_COMPAT & flags) {
83255 err = verify_compat_iovec(&msg_sys, iov,
83256 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
83257 index ac94477..8afe5c3 100644
83258 --- a/net/sunrpc/sched.c
83259 +++ b/net/sunrpc/sched.c
83260 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *word)
83261 #ifdef RPC_DEBUG
83262 static void rpc_task_set_debuginfo(struct rpc_task *task)
83263 {
83264 - static atomic_t rpc_pid;
83265 + static atomic_unchecked_t rpc_pid;
83266
83267 task->tk_magic = RPC_TASK_MAGIC_ID;
83268 - task->tk_pid = atomic_inc_return(&rpc_pid);
83269 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
83270 }
83271 #else
83272 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
83273 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
83274 index 35fb68b..236a8bf 100644
83275 --- a/net/sunrpc/xprtrdma/svc_rdma.c
83276 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
83277 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
83278 static unsigned int min_max_inline = 4096;
83279 static unsigned int max_max_inline = 65536;
83280
83281 -atomic_t rdma_stat_recv;
83282 -atomic_t rdma_stat_read;
83283 -atomic_t rdma_stat_write;
83284 -atomic_t rdma_stat_sq_starve;
83285 -atomic_t rdma_stat_rq_starve;
83286 -atomic_t rdma_stat_rq_poll;
83287 -atomic_t rdma_stat_rq_prod;
83288 -atomic_t rdma_stat_sq_poll;
83289 -atomic_t rdma_stat_sq_prod;
83290 +atomic_unchecked_t rdma_stat_recv;
83291 +atomic_unchecked_t rdma_stat_read;
83292 +atomic_unchecked_t rdma_stat_write;
83293 +atomic_unchecked_t rdma_stat_sq_starve;
83294 +atomic_unchecked_t rdma_stat_rq_starve;
83295 +atomic_unchecked_t rdma_stat_rq_poll;
83296 +atomic_unchecked_t rdma_stat_rq_prod;
83297 +atomic_unchecked_t rdma_stat_sq_poll;
83298 +atomic_unchecked_t rdma_stat_sq_prod;
83299
83300 /* Temporary NFS request map and context caches */
83301 struct kmem_cache *svc_rdma_map_cachep;
83302 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *table, int write,
83303 len -= *ppos;
83304 if (len > *lenp)
83305 len = *lenp;
83306 - if (len && copy_to_user(buffer, str_buf, len))
83307 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
83308 return -EFAULT;
83309 *lenp = len;
83310 *ppos += len;
83311 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = {
83312 {
83313 .procname = "rdma_stat_read",
83314 .data = &rdma_stat_read,
83315 - .maxlen = sizeof(atomic_t),
83316 + .maxlen = sizeof(atomic_unchecked_t),
83317 .mode = 0644,
83318 .proc_handler = &read_reset_stat,
83319 },
83320 {
83321 .procname = "rdma_stat_recv",
83322 .data = &rdma_stat_recv,
83323 - .maxlen = sizeof(atomic_t),
83324 + .maxlen = sizeof(atomic_unchecked_t),
83325 .mode = 0644,
83326 .proc_handler = &read_reset_stat,
83327 },
83328 {
83329 .procname = "rdma_stat_write",
83330 .data = &rdma_stat_write,
83331 - .maxlen = sizeof(atomic_t),
83332 + .maxlen = sizeof(atomic_unchecked_t),
83333 .mode = 0644,
83334 .proc_handler = &read_reset_stat,
83335 },
83336 {
83337 .procname = "rdma_stat_sq_starve",
83338 .data = &rdma_stat_sq_starve,
83339 - .maxlen = sizeof(atomic_t),
83340 + .maxlen = sizeof(atomic_unchecked_t),
83341 .mode = 0644,
83342 .proc_handler = &read_reset_stat,
83343 },
83344 {
83345 .procname = "rdma_stat_rq_starve",
83346 .data = &rdma_stat_rq_starve,
83347 - .maxlen = sizeof(atomic_t),
83348 + .maxlen = sizeof(atomic_unchecked_t),
83349 .mode = 0644,
83350 .proc_handler = &read_reset_stat,
83351 },
83352 {
83353 .procname = "rdma_stat_rq_poll",
83354 .data = &rdma_stat_rq_poll,
83355 - .maxlen = sizeof(atomic_t),
83356 + .maxlen = sizeof(atomic_unchecked_t),
83357 .mode = 0644,
83358 .proc_handler = &read_reset_stat,
83359 },
83360 {
83361 .procname = "rdma_stat_rq_prod",
83362 .data = &rdma_stat_rq_prod,
83363 - .maxlen = sizeof(atomic_t),
83364 + .maxlen = sizeof(atomic_unchecked_t),
83365 .mode = 0644,
83366 .proc_handler = &read_reset_stat,
83367 },
83368 {
83369 .procname = "rdma_stat_sq_poll",
83370 .data = &rdma_stat_sq_poll,
83371 - .maxlen = sizeof(atomic_t),
83372 + .maxlen = sizeof(atomic_unchecked_t),
83373 .mode = 0644,
83374 .proc_handler = &read_reset_stat,
83375 },
83376 {
83377 .procname = "rdma_stat_sq_prod",
83378 .data = &rdma_stat_sq_prod,
83379 - .maxlen = sizeof(atomic_t),
83380 + .maxlen = sizeof(atomic_unchecked_t),
83381 .mode = 0644,
83382 .proc_handler = &read_reset_stat,
83383 },
83384 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83385 index 9e88438..8ed5cf0 100644
83386 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83387 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
83388 @@ -495,7 +495,7 @@ next_sge:
83389 svc_rdma_put_context(ctxt, 0);
83390 goto out;
83391 }
83392 - atomic_inc(&rdma_stat_read);
83393 + atomic_inc_unchecked(&rdma_stat_read);
83394
83395 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
83396 chl_map->ch[ch_no].count -= read_wr.num_sge;
83397 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
83398 dto_q);
83399 list_del_init(&ctxt->dto_q);
83400 } else {
83401 - atomic_inc(&rdma_stat_rq_starve);
83402 + atomic_inc_unchecked(&rdma_stat_rq_starve);
83403 clear_bit(XPT_DATA, &xprt->xpt_flags);
83404 ctxt = NULL;
83405 }
83406 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
83407 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
83408 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
83409 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
83410 - atomic_inc(&rdma_stat_recv);
83411 + atomic_inc_unchecked(&rdma_stat_recv);
83412
83413 /* Build up the XDR from the receive buffers. */
83414 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
83415 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83416 index f11be72..7aad4e8 100644
83417 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83418 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
83419 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
83420 write_wr.wr.rdma.remote_addr = to;
83421
83422 /* Post It */
83423 - atomic_inc(&rdma_stat_write);
83424 + atomic_inc_unchecked(&rdma_stat_write);
83425 if (svc_rdma_send(xprt, &write_wr))
83426 goto err;
83427 return 0;
83428 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
83429 index 3fa5751..030ba89 100644
83430 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
83431 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
83432 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
83433 return;
83434
83435 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
83436 - atomic_inc(&rdma_stat_rq_poll);
83437 + atomic_inc_unchecked(&rdma_stat_rq_poll);
83438
83439 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
83440 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
83441 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
83442 }
83443
83444 if (ctxt)
83445 - atomic_inc(&rdma_stat_rq_prod);
83446 + atomic_inc_unchecked(&rdma_stat_rq_prod);
83447
83448 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
83449 /*
83450 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
83451 return;
83452
83453 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
83454 - atomic_inc(&rdma_stat_sq_poll);
83455 + atomic_inc_unchecked(&rdma_stat_sq_poll);
83456 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
83457 if (wc.status != IB_WC_SUCCESS)
83458 /* Close the transport */
83459 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
83460 }
83461
83462 if (ctxt)
83463 - atomic_inc(&rdma_stat_sq_prod);
83464 + atomic_inc_unchecked(&rdma_stat_sq_prod);
83465 }
83466
83467 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
83468 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
83469 spin_lock_bh(&xprt->sc_lock);
83470 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
83471 spin_unlock_bh(&xprt->sc_lock);
83472 - atomic_inc(&rdma_stat_sq_starve);
83473 + atomic_inc_unchecked(&rdma_stat_sq_starve);
83474
83475 /* See if we can opportunistically reap SQ WR to make room */
83476 sq_cq_reap(xprt);
83477 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
83478 index 0b15d72..7934fbb 100644
83479 --- a/net/sysctl_net.c
83480 +++ b/net/sysctl_net.c
83481 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
83482 struct ctl_table *table)
83483 {
83484 /* Allow network administrator to have same access as root. */
83485 - if (capable(CAP_NET_ADMIN)) {
83486 + if (capable_nolog(CAP_NET_ADMIN)) {
83487 int mode = (table->mode >> 6) & 7;
83488 return (mode << 6) | (mode << 3) | mode;
83489 }
83490 diff --git a/net/tipc/link.c b/net/tipc/link.c
83491 index dd4c18b..f40d38d 100644
83492 --- a/net/tipc/link.c
83493 +++ b/net/tipc/link.c
83494 @@ -1418,7 +1418,7 @@ again:
83495
83496 if (!sect_rest) {
83497 sect_rest = msg_sect[++curr_sect].iov_len;
83498 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
83499 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
83500 }
83501
83502 if (sect_rest < fragm_rest)
83503 @@ -1437,7 +1437,7 @@ error:
83504 }
83505 } else
83506 skb_copy_to_linear_data_offset(buf, fragm_crs,
83507 - sect_crs, sz);
83508 + (const void __force_kernel *)sect_crs, sz);
83509 sect_crs += sz;
83510 sect_rest -= sz;
83511 fragm_crs += sz;
83512 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
83513 index 0747d8a..e8bf3f3 100644
83514 --- a/net/tipc/subscr.c
83515 +++ b/net/tipc/subscr.c
83516 @@ -104,7 +104,7 @@ static void subscr_send_event(struct subscription *sub,
83517 {
83518 struct iovec msg_sect;
83519
83520 - msg_sect.iov_base = (void *)&sub->evt;
83521 + msg_sect.iov_base = (void __force_user *)&sub->evt;
83522 msg_sect.iov_len = sizeof(struct tipc_event);
83523
83524 sub->evt.event = htohl(event, sub->swap);
83525 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
83526 index db8d51a..608692d 100644
83527 --- a/net/unix/af_unix.c
83528 +++ b/net/unix/af_unix.c
83529 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(struct net *net,
83530 err = -ECONNREFUSED;
83531 if (!S_ISSOCK(inode->i_mode))
83532 goto put_fail;
83533 +
83534 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
83535 + err = -EACCES;
83536 + goto put_fail;
83537 + }
83538 +
83539 u = unix_find_socket_byinode(net, inode);
83540 if (!u)
83541 goto put_fail;
83542 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(struct net *net,
83543 if (u) {
83544 struct dentry *dentry;
83545 dentry = unix_sk(u)->dentry;
83546 +
83547 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
83548 + err = -EPERM;
83549 + sock_put(u);
83550 + goto fail;
83551 + }
83552 +
83553 if (dentry)
83554 touch_atime(unix_sk(u)->mnt, dentry);
83555 } else
83556 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
83557 err = security_path_mknod(&nd.path, dentry, mode, 0);
83558 if (err)
83559 goto out_mknod_drop_write;
83560 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
83561 + err = -EACCES;
83562 + goto out_mknod_drop_write;
83563 + }
83564 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
83565 out_mknod_drop_write:
83566 mnt_drop_write(nd.path.mnt);
83567 if (err)
83568 goto out_mknod_dput;
83569 +
83570 + gr_handle_create(dentry, nd.path.mnt);
83571 +
83572 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
83573 dput(nd.path.dentry);
83574 nd.path.dentry = dentry;
83575 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file *seq, void *v)
83576 unix_state_lock(s);
83577
83578 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
83579 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83580 + NULL,
83581 +#else
83582 s,
83583 +#endif
83584 atomic_read(&s->sk_refcnt),
83585 0,
83586 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
83587 diff --git a/net/wireless/core.h b/net/wireless/core.h
83588 index 376798f..109a61f 100644
83589 --- a/net/wireless/core.h
83590 +++ b/net/wireless/core.h
83591 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
83592 struct mutex mtx;
83593
83594 /* rfkill support */
83595 - struct rfkill_ops rfkill_ops;
83596 + rfkill_ops_no_const rfkill_ops;
83597 struct rfkill *rfkill;
83598 struct work_struct rfkill_sync;
83599
83600 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
83601 index a2e4c60..0979cbe 100644
83602 --- a/net/wireless/wext.c
83603 +++ b/net/wireless/wext.c
83604 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
83605 */
83606
83607 /* Support for very large requests */
83608 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
83609 - (user_length > descr->max_tokens)) {
83610 + if (user_length > descr->max_tokens) {
83611 /* Allow userspace to GET more than max so
83612 * we can support any size GET requests.
83613 * There is still a limit : -ENOMEM.
83614 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
83615 }
83616 }
83617
83618 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
83619 - /*
83620 - * If this is a GET, but not NOMAX, it means that the extra
83621 - * data is not bounded by userspace, but by max_tokens. Thus
83622 - * set the length to max_tokens. This matches the extra data
83623 - * allocation.
83624 - * The driver should fill it with the number of tokens it
83625 - * provided, and it may check iwp->length rather than having
83626 - * knowledge of max_tokens. If the driver doesn't change the
83627 - * iwp->length, this ioctl just copies back max_token tokens
83628 - * filled with zeroes. Hopefully the driver isn't claiming
83629 - * them to be valid data.
83630 - */
83631 - iwp->length = descr->max_tokens;
83632 - }
83633 -
83634 err = handler(dev, info, (union iwreq_data *) iwp, extra);
83635
83636 iwp->length += essid_compat;
83637 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
83638 index cb81ca3..e15d49a 100644
83639 --- a/net/xfrm/xfrm_policy.c
83640 +++ b/net/xfrm/xfrm_policy.c
83641 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
83642 hlist_add_head(&policy->bydst, chain);
83643 xfrm_pol_hold(policy);
83644 net->xfrm.policy_count[dir]++;
83645 - atomic_inc(&flow_cache_genid);
83646 + atomic_inc_unchecked(&flow_cache_genid);
83647 if (delpol)
83648 __xfrm_policy_unlink(delpol, dir);
83649 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
83650 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
83651 write_unlock_bh(&xfrm_policy_lock);
83652
83653 if (ret && delete) {
83654 - atomic_inc(&flow_cache_genid);
83655 + atomic_inc_unchecked(&flow_cache_genid);
83656 xfrm_policy_kill(ret);
83657 }
83658 return ret;
83659 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
83660 write_unlock_bh(&xfrm_policy_lock);
83661
83662 if (ret && delete) {
83663 - atomic_inc(&flow_cache_genid);
83664 + atomic_inc_unchecked(&flow_cache_genid);
83665 xfrm_policy_kill(ret);
83666 }
83667 return ret;
83668 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
83669 }
83670
83671 }
83672 - atomic_inc(&flow_cache_genid);
83673 + atomic_inc_unchecked(&flow_cache_genid);
83674 out:
83675 write_unlock_bh(&xfrm_policy_lock);
83676 return err;
83677 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
83678 write_unlock_bh(&xfrm_policy_lock);
83679 if (pol) {
83680 if (dir < XFRM_POLICY_MAX)
83681 - atomic_inc(&flow_cache_genid);
83682 + atomic_inc_unchecked(&flow_cache_genid);
83683 xfrm_policy_kill(pol);
83684 return 0;
83685 }
83686 @@ -1477,7 +1477,7 @@ free_dst:
83687 goto out;
83688 }
83689
83690 -static int inline
83691 +static inline int
83692 xfrm_dst_alloc_copy(void **target, void *src, int size)
83693 {
83694 if (!*target) {
83695 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void *src, int size)
83696 return 0;
83697 }
83698
83699 -static int inline
83700 +static inline int
83701 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
83702 {
83703 #ifdef CONFIG_XFRM_SUB_POLICY
83704 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
83705 #endif
83706 }
83707
83708 -static int inline
83709 +static inline int
83710 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
83711 {
83712 #ifdef CONFIG_XFRM_SUB_POLICY
83713 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
83714 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
83715
83716 restart:
83717 - genid = atomic_read(&flow_cache_genid);
83718 + genid = atomic_read_unchecked(&flow_cache_genid);
83719 policy = NULL;
83720 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
83721 pols[pi] = NULL;
83722 @@ -1680,7 +1680,7 @@ restart:
83723 goto error;
83724 }
83725 if (nx == -EAGAIN ||
83726 - genid != atomic_read(&flow_cache_genid)) {
83727 + genid != atomic_read_unchecked(&flow_cache_genid)) {
83728 xfrm_pols_put(pols, npols);
83729 goto restart;
83730 }
83731 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
83732 index b95a2d6..85c4d78 100644
83733 --- a/net/xfrm/xfrm_user.c
83734 +++ b/net/xfrm/xfrm_user.c
83735 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
83736 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
83737 int i;
83738
83739 + pax_track_stack();
83740 +
83741 if (xp->xfrm_nr == 0)
83742 return 0;
83743
83744 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
83745 int err;
83746 int n = 0;
83747
83748 + pax_track_stack();
83749 +
83750 if (attrs[XFRMA_MIGRATE] == NULL)
83751 return -EINVAL;
83752
83753 diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
83754 index 45b7d56..19e828c 100644
83755 --- a/samples/kobject/kset-example.c
83756 +++ b/samples/kobject/kset-example.c
83757 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kobject *kobj,
83758 }
83759
83760 /* Our custom sysfs_ops that we will associate with our ktype later on */
83761 -static struct sysfs_ops foo_sysfs_ops = {
83762 +static const struct sysfs_ops foo_sysfs_ops = {
83763 .show = foo_attr_show,
83764 .store = foo_attr_store,
83765 };
83766 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
83767 index 341b589..405aed3 100644
83768 --- a/scripts/Makefile.build
83769 +++ b/scripts/Makefile.build
83770 @@ -59,7 +59,7 @@ endif
83771 endif
83772
83773 # Do not include host rules unless needed
83774 -ifneq ($(hostprogs-y)$(hostprogs-m),)
83775 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
83776 include scripts/Makefile.host
83777 endif
83778
83779 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
83780 index 6f89fbb..53adc9c 100644
83781 --- a/scripts/Makefile.clean
83782 +++ b/scripts/Makefile.clean
83783 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
83784 __clean-files := $(extra-y) $(always) \
83785 $(targets) $(clean-files) \
83786 $(host-progs) \
83787 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
83788 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
83789 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
83790
83791 # as clean-files is given relative to the current directory, this adds
83792 # a $(obj) prefix, except for absolute paths
83793 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
83794 index 1ac414f..a1c1451 100644
83795 --- a/scripts/Makefile.host
83796 +++ b/scripts/Makefile.host
83797 @@ -31,6 +31,7 @@
83798 # Note: Shared libraries consisting of C++ files are not supported
83799
83800 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
83801 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
83802
83803 # C code
83804 # Executables compiled from a single .c file
83805 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
83806 # Shared libaries (only .c supported)
83807 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
83808 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
83809 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
83810 # Remove .so files from "xxx-objs"
83811 host-cobjs := $(filter-out %.so,$(host-cobjs))
83812
83813 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
83814 index 6bf21f8..c0546b3 100644
83815 --- a/scripts/basic/fixdep.c
83816 +++ b/scripts/basic/fixdep.c
83817 @@ -162,7 +162,7 @@ static void grow_config(int len)
83818 /*
83819 * Lookup a value in the configuration string.
83820 */
83821 -static int is_defined_config(const char * name, int len)
83822 +static int is_defined_config(const char * name, unsigned int len)
83823 {
83824 const char * pconfig;
83825 const char * plast = str_config + len_config - len;
83826 @@ -199,7 +199,7 @@ static void clear_config(void)
83827 /*
83828 * Record the use of a CONFIG_* word.
83829 */
83830 -static void use_config(char *m, int slen)
83831 +static void use_config(char *m, unsigned int slen)
83832 {
83833 char s[PATH_MAX];
83834 char *p;
83835 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen)
83836
83837 static void parse_config_file(char *map, size_t len)
83838 {
83839 - int *end = (int *) (map + len);
83840 + unsigned int *end = (unsigned int *) (map + len);
83841 /* start at +1, so that p can never be < map */
83842 - int *m = (int *) map + 1;
83843 + unsigned int *m = (unsigned int *) map + 1;
83844 char *p, *q;
83845
83846 for (; m < end; m++) {
83847 @@ -371,7 +371,7 @@ static void print_deps(void)
83848 static void traps(void)
83849 {
83850 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
83851 - int *p = (int *)test;
83852 + unsigned int *p = (unsigned int *)test;
83853
83854 if (*p != INT_CONF) {
83855 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
83856 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
83857 new file mode 100644
83858 index 0000000..8729101
83859 --- /dev/null
83860 +++ b/scripts/gcc-plugin.sh
83861 @@ -0,0 +1,2 @@
83862 +#!/bin/sh
83863 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
83864 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
83865 index 62a9025..65b82ad 100644
83866 --- a/scripts/mod/file2alias.c
83867 +++ b/scripts/mod/file2alias.c
83868 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
83869 unsigned long size, unsigned long id_size,
83870 void *symval)
83871 {
83872 - int i;
83873 + unsigned int i;
83874
83875 if (size % id_size || size < id_size) {
83876 if (cross_build != 0)
83877 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
83878 /* USB is special because the bcdDevice can be matched against a numeric range */
83879 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
83880 static void do_usb_entry(struct usb_device_id *id,
83881 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
83882 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
83883 unsigned char range_lo, unsigned char range_hi,
83884 struct module *mod)
83885 {
83886 @@ -151,7 +151,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
83887 {
83888 unsigned int devlo, devhi;
83889 unsigned char chi, clo;
83890 - int ndigits;
83891 + unsigned int ndigits;
83892
83893 id->match_flags = TO_NATIVE(id->match_flags);
83894 id->idVendor = TO_NATIVE(id->idVendor);
83895 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
83896 for (i = 0; i < count; i++) {
83897 const char *id = (char *)devs[i].id;
83898 char acpi_id[sizeof(devs[0].id)];
83899 - int j;
83900 + unsigned int j;
83901
83902 buf_printf(&mod->dev_table_buf,
83903 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
83904 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
83905
83906 for (j = 0; j < PNP_MAX_DEVICES; j++) {
83907 const char *id = (char *)card->devs[j].id;
83908 - int i2, j2;
83909 + unsigned int i2, j2;
83910 int dup = 0;
83911
83912 if (!id[0])
83913 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
83914 /* add an individual alias for every device entry */
83915 if (!dup) {
83916 char acpi_id[sizeof(card->devs[0].id)];
83917 - int k;
83918 + unsigned int k;
83919
83920 buf_printf(&mod->dev_table_buf,
83921 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
83922 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, const char *s)
83923 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
83924 char *alias)
83925 {
83926 - int i, j;
83927 + unsigned int i, j;
83928
83929 sprintf(alias, "dmi*");
83930
83931 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
83932 index 03efeab..0888989 100644
83933 --- a/scripts/mod/modpost.c
83934 +++ b/scripts/mod/modpost.c
83935 @@ -835,6 +835,7 @@ enum mismatch {
83936 INIT_TO_EXIT,
83937 EXIT_TO_INIT,
83938 EXPORT_TO_INIT_EXIT,
83939 + DATA_TO_TEXT
83940 };
83941
83942 struct sectioncheck {
83943 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[] = {
83944 .fromsec = { "__ksymtab*", NULL },
83945 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
83946 .mismatch = EXPORT_TO_INIT_EXIT
83947 +},
83948 +/* Do not reference code from writable data */
83949 +{
83950 + .fromsec = { DATA_SECTIONS, NULL },
83951 + .tosec = { TEXT_SECTIONS, NULL },
83952 + .mismatch = DATA_TO_TEXT
83953 }
83954 };
83955
83956 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
83957 continue;
83958 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
83959 continue;
83960 - if (sym->st_value == addr)
83961 - return sym;
83962 /* Find a symbol nearby - addr are maybe negative */
83963 d = sym->st_value - addr;
83964 + if (d == 0)
83965 + return sym;
83966 if (d < 0)
83967 d = addr - sym->st_value;
83968 if (d < distance) {
83969 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const char *modname, enum mismatch mismatch,
83970 "Fix this by removing the %sannotation of %s "
83971 "or drop the export.\n",
83972 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
83973 + case DATA_TO_TEXT:
83974 +/*
83975 + fprintf(stderr,
83976 + "The variable %s references\n"
83977 + "the %s %s%s%s\n",
83978 + fromsym, to, sec2annotation(tosec), tosym, to_p);
83979 +*/
83980 + break;
83981 case NO_MISMATCH:
83982 /* To get warnings on missing members */
83983 break;
83984 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
83985 static void check_sec_ref(struct module *mod, const char *modname,
83986 struct elf_info *elf)
83987 {
83988 - int i;
83989 + unsigned int i;
83990 Elf_Shdr *sechdrs = elf->sechdrs;
83991
83992 /* Walk through all sections */
83993 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
83994 va_end(ap);
83995 }
83996
83997 -void buf_write(struct buffer *buf, const char *s, int len)
83998 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
83999 {
84000 if (buf->size - buf->pos < len) {
84001 buf->size += len + SZ;
84002 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
84003 if (fstat(fileno(file), &st) < 0)
84004 goto close_write;
84005
84006 - if (st.st_size != b->pos)
84007 + if (st.st_size != (off_t)b->pos)
84008 goto close_write;
84009
84010 tmp = NOFAIL(malloc(b->pos));
84011 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
84012 index 09f58e3..4b66092 100644
84013 --- a/scripts/mod/modpost.h
84014 +++ b/scripts/mod/modpost.h
84015 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
84016
84017 struct buffer {
84018 char *p;
84019 - int pos;
84020 - int size;
84021 + unsigned int pos;
84022 + unsigned int size;
84023 };
84024
84025 void __attribute__((format(printf, 2, 3)))
84026 buf_printf(struct buffer *buf, const char *fmt, ...);
84027
84028 void
84029 -buf_write(struct buffer *buf, const char *s, int len);
84030 +buf_write(struct buffer *buf, const char *s, unsigned int len);
84031
84032 struct module {
84033 struct module *next;
84034 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
84035 index ecf9c7d..d52b38e 100644
84036 --- a/scripts/mod/sumversion.c
84037 +++ b/scripts/mod/sumversion.c
84038 @@ -455,7 +455,7 @@ static void write_version(const char *filename, const char *sum,
84039 goto out;
84040 }
84041
84042 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
84043 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
84044 warn("writing sum in %s failed: %s\n",
84045 filename, strerror(errno));
84046 goto out;
84047 diff --git a/scripts/package/mkspec b/scripts/package/mkspec
84048 index 47bdd2f..d4d4e93 100755
84049 --- a/scripts/package/mkspec
84050 +++ b/scripts/package/mkspec
84051 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
84052 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
84053 echo "%endif"
84054
84055 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
84056 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
84057 echo "%ifarch ia64"
84058 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
84059 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
84060 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
84061 index 5c11312..72742b5 100644
84062 --- a/scripts/pnmtologo.c
84063 +++ b/scripts/pnmtologo.c
84064 @@ -237,14 +237,14 @@ static void write_header(void)
84065 fprintf(out, " * Linux logo %s\n", logoname);
84066 fputs(" */\n\n", out);
84067 fputs("#include <linux/linux_logo.h>\n\n", out);
84068 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
84069 + fprintf(out, "static unsigned char %s_data[] = {\n",
84070 logoname);
84071 }
84072
84073 static void write_footer(void)
84074 {
84075 fputs("\n};\n\n", out);
84076 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
84077 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
84078 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
84079 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
84080 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
84081 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
84082 fputs("\n};\n\n", out);
84083
84084 /* write logo clut */
84085 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
84086 + fprintf(out, "static unsigned char %s_clut[] = {\n",
84087 logoname);
84088 write_hex_cnt = 0;
84089 for (i = 0; i < logo_clutsize; i++) {
84090 diff --git a/scripts/tags.sh b/scripts/tags.sh
84091 index d52f7a0..269eb1b 100755
84092 --- a/scripts/tags.sh
84093 +++ b/scripts/tags.sh
84094 @@ -93,6 +93,11 @@ docscope()
84095 cscope -b -f cscope.out
84096 }
84097
84098 +dogtags()
84099 +{
84100 + all_sources | gtags -f -
84101 +}
84102 +
84103 exuberant()
84104 {
84105 all_sources | xargs $1 -a \
84106 @@ -164,6 +169,10 @@ case "$1" in
84107 docscope
84108 ;;
84109
84110 + "gtags")
84111 + dogtags
84112 + ;;
84113 +
84114 "tags")
84115 rm -f tags
84116 xtags ctags
84117 diff --git a/security/Kconfig b/security/Kconfig
84118 index fb363cd..0524cf3 100644
84119 --- a/security/Kconfig
84120 +++ b/security/Kconfig
84121 @@ -4,6 +4,625 @@
84122
84123 menu "Security options"
84124
84125 +source grsecurity/Kconfig
84126 +
84127 +menu "PaX"
84128 +
84129 + config ARCH_TRACK_EXEC_LIMIT
84130 + bool
84131 +
84132 + config PAX_KERNEXEC_PLUGIN
84133 + bool
84134 +
84135 + config PAX_PER_CPU_PGD
84136 + bool
84137 +
84138 + config TASK_SIZE_MAX_SHIFT
84139 + int
84140 + depends on X86_64
84141 + default 47 if !PAX_PER_CPU_PGD
84142 + default 42 if PAX_PER_CPU_PGD
84143 +
84144 + config PAX_ENABLE_PAE
84145 + bool
84146 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
84147 +
84148 +config PAX
84149 + bool "Enable various PaX features"
84150 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
84151 + help
84152 + This allows you to enable various PaX features. PaX adds
84153 + intrusion prevention mechanisms to the kernel that reduce
84154 + the risks posed by exploitable memory corruption bugs.
84155 +
84156 +menu "PaX Control"
84157 + depends on PAX
84158 +
84159 +config PAX_SOFTMODE
84160 + bool 'Support soft mode'
84161 + help
84162 + Enabling this option will allow you to run PaX in soft mode, that
84163 + is, PaX features will not be enforced by default, only on executables
84164 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
84165 + support as they are the only way to mark executables for soft mode use.
84166 +
84167 + Soft mode can be activated by using the "pax_softmode=1" kernel command
84168 + line option on boot. Furthermore you can control various PaX features
84169 + at runtime via the entries in /proc/sys/kernel/pax.
84170 +
84171 +config PAX_EI_PAX
84172 + bool 'Use legacy ELF header marking'
84173 + help
84174 + Enabling this option will allow you to control PaX features on
84175 + a per executable basis via the 'chpax' utility available at
84176 + http://pax.grsecurity.net/. The control flags will be read from
84177 + an otherwise reserved part of the ELF header. This marking has
84178 + numerous drawbacks (no support for soft-mode, toolchain does not
84179 + know about the non-standard use of the ELF header) therefore it
84180 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
84181 + support.
84182 +
84183 + If you have applications not marked by the PT_PAX_FLAGS ELF program
84184 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
84185 + option otherwise they will not get any protection.
84186 +
84187 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
84188 + support as well, they will override the legacy EI_PAX marks.
84189 +
84190 +config PAX_PT_PAX_FLAGS
84191 + bool 'Use ELF program header marking'
84192 + help
84193 + Enabling this option will allow you to control PaX features on
84194 + a per executable basis via the 'paxctl' utility available at
84195 + http://pax.grsecurity.net/. The control flags will be read from
84196 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
84197 + has the benefits of supporting both soft mode and being fully
84198 + integrated into the toolchain (the binutils patch is available
84199 + from http://pax.grsecurity.net).
84200 +
84201 + If you have applications not marked by the PT_PAX_FLAGS ELF program
84202 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
84203 + support otherwise they will not get any protection.
84204 +
84205 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
84206 + must make sure that the marks are the same if a binary has both marks.
84207 +
84208 + Note that if you enable the legacy EI_PAX marking support as well,
84209 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
84210 +
84211 +config PAX_XATTR_PAX_FLAGS
84212 + bool 'Use filesystem extended attributes marking'
84213 + depends on EXPERT
84214 + select CIFS_XATTR if CIFS
84215 + select EXT2_FS_XATTR if EXT2_FS
84216 + select EXT3_FS_XATTR if EXT3_FS
84217 + select EXT4_FS_XATTR if EXT4_FS
84218 + select JFFS2_FS_XATTR if JFFS2_FS
84219 + select REISERFS_FS_XATTR if REISERFS_FS
84220 + select UBIFS_FS_XATTR if UBIFS_FS
84221 + help
84222 + Enabling this option will allow you to control PaX features on
84223 + a per executable basis via the 'setfattr' utility. The control
84224 + flags will be read from the user.pax.flags extended attribute of
84225 + the file. This marking has the benefit of supporting binary-only
84226 + applications that self-check themselves (e.g., skype) and would
84227 + not tolerate chpax/paxctl changes. The main drawback is that
84228 + extended attributes are not supported by some filesystems (e.g.,
84229 + isofs, squashfs, tmpfs, udf, vfat) so copying files through such
84230 + filesystems will lose the extended attributes and these PaX markings.
84231 +
84232 + If you have applications not marked by the PT_PAX_FLAGS ELF program
84233 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
84234 + support otherwise they will not get any protection.
84235 +
84236 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
84237 + must make sure that the marks are the same if a binary has both marks.
84238 +
84239 + Note that if you enable the legacy EI_PAX marking support as well,
84240 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
84241 +
84242 +choice
84243 + prompt 'MAC system integration'
84244 + default PAX_HAVE_ACL_FLAGS
84245 + help
84246 + Mandatory Access Control systems have the option of controlling
84247 + PaX flags on a per executable basis, choose the method supported
84248 + by your particular system.
84249 +
84250 + - "none": if your MAC system does not interact with PaX,
84251 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
84252 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
84253 +
84254 + NOTE: this option is for developers/integrators only.
84255 +
84256 + config PAX_NO_ACL_FLAGS
84257 + bool 'none'
84258 +
84259 + config PAX_HAVE_ACL_FLAGS
84260 + bool 'direct'
84261 +
84262 + config PAX_HOOK_ACL_FLAGS
84263 + bool 'hook'
84264 +endchoice
84265 +
84266 +endmenu
84267 +
84268 +menu "Non-executable pages"
84269 + depends on PAX
84270 +
84271 +config PAX_NOEXEC
84272 + bool "Enforce non-executable pages"
84273 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
84274 + help
84275 + By design some architectures do not allow for protecting memory
84276 + pages against execution or even if they do, Linux does not make
84277 + use of this feature. In practice this means that if a page is
84278 + readable (such as the stack or heap) it is also executable.
84279 +
84280 + There is a well known exploit technique that makes use of this
84281 + fact and a common programming mistake where an attacker can
84282 + introduce code of his choice somewhere in the attacked program's
84283 + memory (typically the stack or the heap) and then execute it.
84284 +
84285 + If the attacked program was running with different (typically
84286 + higher) privileges than that of the attacker, then he can elevate
84287 + his own privilege level (e.g. get a root shell, write to files for
84288 + which he does not have write access to, etc).
84289 +
84290 + Enabling this option will let you choose from various features
84291 + that prevent the injection and execution of 'foreign' code in
84292 + a program.
84293 +
84294 + This will also break programs that rely on the old behaviour and
84295 + expect that dynamically allocated memory via the malloc() family
84296 + of functions is executable (which it is not). Notable examples
84297 + are the XFree86 4.x server, the java runtime and wine.
84298 +
84299 +config PAX_PAGEEXEC
84300 + bool "Paging based non-executable pages"
84301 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
84302 + select S390_SWITCH_AMODE if S390
84303 + select S390_EXEC_PROTECT if S390
84304 + select ARCH_TRACK_EXEC_LIMIT if X86_32
84305 + help
84306 + This implementation is based on the paging feature of the CPU.
84307 + On i386 without hardware non-executable bit support there is a
84308 + variable but usually low performance impact, however on Intel's
84309 + P4 core based CPUs it is very high so you should not enable this
84310 + for kernels meant to be used on such CPUs.
84311 +
84312 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
84313 + with hardware non-executable bit support there is no performance
84314 + impact, on ppc the impact is negligible.
84315 +
84316 + Note that several architectures require various emulations due to
84317 + badly designed userland ABIs, this will cause a performance impact
84318 + but will disappear as soon as userland is fixed. For example, ppc
84319 + userland MUST have been built with secure-plt by a recent toolchain.
84320 +
84321 +config PAX_SEGMEXEC
84322 + bool "Segmentation based non-executable pages"
84323 + depends on PAX_NOEXEC && X86_32
84324 + help
84325 + This implementation is based on the segmentation feature of the
84326 + CPU and has a very small performance impact, however applications
84327 + will be limited to a 1.5 GB address space instead of the normal
84328 + 3 GB.
84329 +
84330 +config PAX_EMUTRAMP
84331 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
84332 + default y if PARISC
84333 + help
84334 + There are some programs and libraries that for one reason or
84335 + another attempt to execute special small code snippets from
84336 + non-executable memory pages. Most notable examples are the
84337 + signal handler return code generated by the kernel itself and
84338 + the GCC trampolines.
84339 +
84340 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
84341 + such programs will no longer work under your kernel.
84342 +
84343 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
84344 + utilities to enable trampoline emulation for the affected programs
84345 + yet still have the protection provided by the non-executable pages.
84346 +
84347 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
84348 + your system will not even boot.
84349 +
84350 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
84351 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
84352 + for the affected files.
84353 +
84354 + NOTE: enabling this feature *may* open up a loophole in the
84355 + protection provided by non-executable pages that an attacker
84356 + could abuse. Therefore the best solution is to not have any
84357 + files on your system that would require this option. This can
84358 + be achieved by not using libc5 (which relies on the kernel
84359 + signal handler return code) and not using or rewriting programs
84360 + that make use of the nested function implementation of GCC.
84361 + Skilled users can just fix GCC itself so that it implements
84362 + nested function calls in a way that does not interfere with PaX.
84363 +
84364 +config PAX_EMUSIGRT
84365 + bool "Automatically emulate sigreturn trampolines"
84366 + depends on PAX_EMUTRAMP && PARISC
84367 + default y
84368 + help
84369 + Enabling this option will have the kernel automatically detect
84370 + and emulate signal return trampolines executing on the stack
84371 + that would otherwise lead to task termination.
84372 +
84373 + This solution is intended as a temporary one for users with
84374 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
84375 + Modula-3 runtime, etc) or executables linked to such, basically
84376 + everything that does not specify its own SA_RESTORER function in
84377 + normal executable memory like glibc 2.1+ does.
84378 +
84379 + On parisc you MUST enable this option, otherwise your system will
84380 + not even boot.
84381 +
84382 + NOTE: this feature cannot be disabled on a per executable basis
84383 + and since it *does* open up a loophole in the protection provided
84384 + by non-executable pages, the best solution is to not have any
84385 + files on your system that would require this option.
84386 +
84387 +config PAX_MPROTECT
84388 + bool "Restrict mprotect()"
84389 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
84390 + help
84391 + Enabling this option will prevent programs from
84392 + - changing the executable status of memory pages that were
84393 + not originally created as executable,
84394 + - making read-only executable pages writable again,
84395 + - creating executable pages from anonymous memory,
84396 + - making read-only-after-relocations (RELRO) data pages writable again.
84397 +
84398 + You should say Y here to complete the protection provided by
84399 + the enforcement of non-executable pages.
84400 +
84401 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
84402 + this feature on a per file basis.
84403 +
84404 +config PAX_MPROTECT_COMPAT
84405 + bool "Use legacy/compat protection demoting (read help)"
84406 + depends on PAX_MPROTECT
84407 + default n
84408 + help
84409 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
84410 + by sending the proper error code to the application. For some broken
84411 + userland, this can cause problems with Python or other applications. The
84412 + current implementation however allows for applications like clamav to
84413 + detect if JIT compilation/execution is allowed and to fall back gracefully
84414 + to an interpreter-based mode if it does not. While we encourage everyone
84415 + to use the current implementation as-is and push upstream to fix broken
84416 + userland (note that the RWX logging option can assist with this), in some
84417 + environments this may not be possible. Having to disable MPROTECT
84418 + completely on certain binaries reduces the security benefit of PaX,
84419 + so this option is provided for those environments to revert to the old
84420 + behavior.
84421 +
84422 +config PAX_ELFRELOCS
84423 + bool "Allow ELF text relocations (read help)"
84424 + depends on PAX_MPROTECT
84425 + default n
84426 + help
84427 + Non-executable pages and mprotect() restrictions are effective
84428 + in preventing the introduction of new executable code into an
84429 + attacked task's address space. There remain only two venues
84430 + for this kind of attack: if the attacker can execute already
84431 + existing code in the attacked task then he can either have it
84432 + create and mmap() a file containing his code or have it mmap()
84433 + an already existing ELF library that does not have position
84434 + independent code in it and use mprotect() on it to make it
84435 + writable and copy his code there. While protecting against
84436 + the former approach is beyond PaX, the latter can be prevented
84437 + by having only PIC ELF libraries on one's system (which do not
84438 + need to relocate their code). If you are sure this is your case,
84439 + as is the case with all modern Linux distributions, then leave
84440 + this option disabled. You should say 'n' here.
84441 +
84442 +config PAX_ETEXECRELOCS
84443 + bool "Allow ELF ET_EXEC text relocations"
84444 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
84445 + select PAX_ELFRELOCS
84446 + default y
84447 + help
84448 + On some architectures there are incorrectly created applications
84449 + that require text relocations and would not work without enabling
84450 + this option. If you are an alpha, ia64 or parisc user, you should
84451 + enable this option and disable it once you have made sure that
84452 + none of your applications need it.
84453 +
84454 +config PAX_EMUPLT
84455 + bool "Automatically emulate ELF PLT"
84456 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
84457 + default y
84458 + help
84459 + Enabling this option will have the kernel automatically detect
84460 + and emulate the Procedure Linkage Table entries in ELF files.
84461 + On some architectures such entries are in writable memory, and
84462 + become non-executable leading to task termination. Therefore
84463 + it is mandatory that you enable this option on alpha, parisc,
84464 + sparc and sparc64, otherwise your system would not even boot.
84465 +
84466 + NOTE: this feature *does* open up a loophole in the protection
84467 + provided by the non-executable pages, therefore the proper
84468 + solution is to modify the toolchain to produce a PLT that does
84469 + not need to be writable.
84470 +
84471 +config PAX_DLRESOLVE
84472 + bool 'Emulate old glibc resolver stub'
84473 + depends on PAX_EMUPLT && SPARC
84474 + default n
84475 + help
84476 + This option is needed if userland has an old glibc (before 2.4)
84477 + that puts a 'save' instruction into the runtime generated resolver
84478 + stub that needs special emulation.
84479 +
84480 +config PAX_KERNEXEC
84481 + bool "Enforce non-executable kernel pages"
84482 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
84483 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
84484 + select PAX_KERNEXEC_PLUGIN if X86_64
84485 + help
84486 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
84487 + that is, enabling this option will make it harder to inject
84488 + and execute 'foreign' code in kernel memory itself.
84489 +
84490 + Note that on x86_64 kernels there is a known regression when
84491 + this feature and KVM/VMX are both enabled in the host kernel.
84492 +
84493 +choice
84494 + prompt "Return Address Instrumentation Method"
84495 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
84496 + depends on PAX_KERNEXEC_PLUGIN
84497 + help
84498 + Select the method used to instrument function pointer dereferences.
84499 + Note that binary modules cannot be instrumented by this approach.
84500 +
84501 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
84502 + bool "bts"
84503 + help
84504 + This method is compatible with binary only modules but has
84505 + a higher runtime overhead.
84506 +
84507 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
84508 + bool "or"
84509 + depends on !PARAVIRT
84510 + help
84511 + This method is incompatible with binary only modules but has
84512 + a lower runtime overhead.
84513 +endchoice
84514 +
84515 +config PAX_KERNEXEC_PLUGIN_METHOD
84516 + string
84517 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
84518 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
84519 + default ""
84520 +
84521 +config PAX_KERNEXEC_MODULE_TEXT
84522 + int "Minimum amount of memory reserved for module code"
84523 + default "4"
84524 + depends on PAX_KERNEXEC && X86_32 && MODULES
84525 + help
84526 + Due to implementation details the kernel must reserve a fixed
84527 + amount of memory for module code at compile time that cannot be
84528 + changed at runtime. Here you can specify the minimum amount
84529 + in MB that will be reserved. Due to the same implementation
84530 + details this size will always be rounded up to the next 2/4 MB
84531 + boundary (depends on PAE) so the actually available memory for
84532 + module code will usually be more than this minimum.
84533 +
84534 + The default 4 MB should be enough for most users but if you have
84535 + an excessive number of modules (e.g., most distribution configs
84536 + compile many drivers as modules) or use huge modules such as
84537 + nvidia's kernel driver, you will need to adjust this amount.
84538 + A good rule of thumb is to look at your currently loaded kernel
84539 + modules and add up their sizes.
84540 +
84541 +endmenu
84542 +
84543 +menu "Address Space Layout Randomization"
84544 + depends on PAX
84545 +
84546 +config PAX_ASLR
84547 + bool "Address Space Layout Randomization"
84548 + help
84549 + Many if not most exploit techniques rely on the knowledge of
84550 + certain addresses in the attacked program. The following options
84551 + will allow the kernel to apply a certain amount of randomization
84552 + to specific parts of the program thereby forcing an attacker to
84553 + guess them in most cases. Any failed guess will most likely crash
84554 + the attacked program which allows the kernel to detect such attempts
84555 + and react on them. PaX itself provides no reaction mechanisms,
84556 + instead it is strongly encouraged that you make use of Nergal's
84557 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
84558 + (http://www.grsecurity.net/) built-in crash detection features or
84559 + develop one yourself.
84560 +
84561 + By saying Y here you can choose to randomize the following areas:
84562 + - top of the task's kernel stack
84563 + - top of the task's userland stack
84564 + - base address for mmap() requests that do not specify one
84565 + (this includes all libraries)
84566 + - base address of the main executable
84567 +
84568 + It is strongly recommended to say Y here as address space layout
84569 + randomization has negligible impact on performance yet it provides
84570 + a very effective protection.
84571 +
84572 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
84573 + this feature on a per file basis.
84574 +
84575 +config PAX_RANDKSTACK
84576 + bool "Randomize kernel stack base"
84577 + depends on X86_TSC && X86
84578 + help
84579 + By saying Y here the kernel will randomize every task's kernel
84580 + stack on every system call. This will not only force an attacker
84581 + to guess it but also prevent him from making use of possible
84582 + leaked information about it.
84583 +
84584 + Since the kernel stack is a rather scarce resource, randomization
84585 + may cause unexpected stack overflows, therefore you should very
84586 + carefully test your system. Note that once enabled in the kernel
84587 + configuration, this feature cannot be disabled on a per file basis.
84588 +
84589 +config PAX_RANDUSTACK
84590 + bool "Randomize user stack base"
84591 + depends on PAX_ASLR
84592 + help
84593 + By saying Y here the kernel will randomize every task's userland
84594 + stack. The randomization is done in two steps where the second
84595 + one may apply a big amount of shift to the top of the stack and
84596 + cause problems for programs that want to use lots of memory (more
84597 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
84598 + For this reason the second step can be controlled by 'chpax' or
84599 + 'paxctl' on a per file basis.
84600 +
84601 +config PAX_RANDMMAP
84602 + bool "Randomize mmap() base"
84603 + depends on PAX_ASLR
84604 + help
84605 + By saying Y here the kernel will use a randomized base address for
84606 + mmap() requests that do not specify one themselves. As a result
84607 + all dynamically loaded libraries will appear at random addresses
84608 + and therefore be harder to exploit by a technique where an attacker
84609 + attempts to execute library code for his purposes (e.g. spawn a
84610 + shell from an exploited program that is running at an elevated
84611 + privilege level).
84612 +
84613 + Furthermore, if a program is relinked as a dynamic ELF file, its
84614 + base address will be randomized as well, completing the full
84615 + randomization of the address space layout. Attacking such programs
84616 + becomes a guess game. You can find an example of doing this at
84617 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
84618 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
84619 +
84620 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
84621 + feature on a per file basis.
84622 +
84623 +endmenu
84624 +
84625 +menu "Miscellaneous hardening features"
84626 +
84627 +config PAX_MEMORY_SANITIZE
84628 + bool "Sanitize all freed memory"
84629 + help
84630 + By saying Y here the kernel will erase memory pages as soon as they
84631 + are freed. This in turn reduces the lifetime of data stored in the
84632 + pages, making it less likely that sensitive information such as
84633 + passwords, cryptographic secrets, etc stay in memory for too long.
84634 +
84635 + This is especially useful for programs whose runtime is short, long
84636 + lived processes and the kernel itself benefit from this as long as
84637 + they operate on whole memory pages and ensure timely freeing of pages
84638 + that may hold sensitive information.
84639 +
84640 + The tradeoff is performance impact, on a single CPU system kernel
84641 + compilation sees a 3% slowdown, other systems and workloads may vary
84642 + and you are advised to test this feature on your expected workload
84643 + before deploying it.
84644 +
84645 + Note that this feature does not protect data stored in live pages,
84646 + e.g., process memory swapped to disk may stay there for a long time.
84647 +
84648 +config PAX_MEMORY_STACKLEAK
84649 + bool "Sanitize kernel stack"
84650 + depends on X86
84651 + help
84652 + By saying Y here the kernel will erase the kernel stack before it
84653 + returns from a system call. This in turn reduces the information
84654 + that a kernel stack leak bug can reveal.
84655 +
84656 + Note that such a bug can still leak information that was put on
84657 + the stack by the current system call (the one eventually triggering
84658 + the bug) but traces of earlier system calls on the kernel stack
84659 + cannot leak anymore.
84660 +
84661 + The tradeoff is performance impact, on a single CPU system kernel
84662 + compilation sees a 1% slowdown, other systems and workloads may vary
84663 + and you are advised to test this feature on your expected workload
84664 + before deploying it.
84665 +
84666 + Note: full support for this feature requires gcc with plugin support
84667 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
84668 + versions means that functions with large enough stack frames may
84669 + leave uninitialized memory behind that may be exposed to a later
84670 + syscall leaking the stack.
84671 +
84672 +config PAX_MEMORY_UDEREF
84673 + bool "Prevent invalid userland pointer dereference"
84674 + depends on X86 && !UML_X86 && !XEN
84675 + select PAX_PER_CPU_PGD if X86_64
84676 + help
84677 + By saying Y here the kernel will be prevented from dereferencing
84678 + userland pointers in contexts where the kernel expects only kernel
84679 + pointers. This is both a useful runtime debugging feature and a
84680 + security measure that prevents exploiting a class of kernel bugs.
84681 +
84682 + The tradeoff is that some virtualization solutions may experience
84683 + a huge slowdown and therefore you should not enable this feature
84684 + for kernels meant to run in such environments. Whether a given VM
84685 + solution is affected or not is best determined by simply trying it
84686 + out, the performance impact will be obvious right on boot as this
84687 + mechanism engages from very early on. A good rule of thumb is that
84688 + VMs running on CPUs without hardware virtualization support (i.e.,
84689 + the majority of IA-32 CPUs) will likely experience the slowdown.
84690 +
84691 +config PAX_REFCOUNT
84692 + bool "Prevent various kernel object reference counter overflows"
84693 + depends on GRKERNSEC && (X86 || SPARC64)
84694 + help
84695 + By saying Y here the kernel will detect and prevent overflowing
84696 + various (but not all) kinds of object reference counters. Such
84697 + overflows can normally occur due to bugs only and are often, if
84698 + not always, exploitable.
84699 +
84700 + The tradeoff is that data structures protected by an overflowed
84701 + refcount will never be freed and therefore will leak memory. Note
84702 + that this leak also happens even without this protection but in
84703 + that case the overflow can eventually trigger the freeing of the
84704 + data structure while it is still being used elsewhere, resulting
84705 + in the exploitable situation that this feature prevents.
84706 +
84707 + Since this has a negligible performance impact, you should enable
84708 + this feature.
84709 +
84710 +config PAX_USERCOPY
84711 + bool "Harden heap object copies between kernel and userland"
84712 + depends on X86 || PPC || SPARC || ARM
84713 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
84714 + help
84715 + By saying Y here the kernel will enforce the size of heap objects
84716 + when they are copied in either direction between the kernel and
84717 + userland, even if only a part of the heap object is copied.
84718 +
84719 + Specifically, this checking prevents information leaking from the
84720 + kernel heap during kernel to userland copies (if the kernel heap
84721 + object is otherwise fully initialized) and prevents kernel heap
84722 + overflows during userland to kernel copies.
84723 +
84724 + Note that the current implementation provides the strictest bounds
84725 + checks for the SLUB allocator.
84726 +
84727 + Enabling this option also enables per-slab cache protection against
84728 + data in a given cache being copied into/out of via userland
84729 + accessors. Though the whitelist of regions will be reduced over
84730 + time, it notably protects important data structures like task structs.
84731 +
84732 +
84733 + If frame pointers are enabled on x86, this option will also
84734 + restrict copies into and out of the kernel stack to local variables
84735 + within a single frame.
84736 +
84737 + Since this has a negligible performance impact, you should enable
84738 + this feature.
84739 +
84740 +endmenu
84741 +
84742 +endmenu
84743 +
84744 config KEYS
84745 bool "Enable access key retention support"
84746 help
84747 @@ -146,7 +765,7 @@ config INTEL_TXT
84748 config LSM_MMAP_MIN_ADDR
84749 int "Low address space for LSM to protect from user allocation"
84750 depends on SECURITY && SECURITY_SELINUX
84751 - default 65536
84752 + default 32768
84753 help
84754 This is the portion of low virtual memory which should be protected
84755 from userspace allocation. Keeping a user from writing to low pages
84756 diff --git a/security/capability.c b/security/capability.c
84757 index fce07a7..5f12858 100644
84758 --- a/security/capability.c
84759 +++ b/security/capability.c
84760 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *lsmrule)
84761 }
84762 #endif /* CONFIG_AUDIT */
84763
84764 -struct security_operations default_security_ops = {
84765 +struct security_operations default_security_ops __read_only = {
84766 .name = "default",
84767 };
84768
84769 diff --git a/security/commoncap.c b/security/commoncap.c
84770 index fe30751..aaba312 100644
84771 --- a/security/commoncap.c
84772 +++ b/security/commoncap.c
84773 @@ -27,6 +27,8 @@
84774 #include <linux/sched.h>
84775 #include <linux/prctl.h>
84776 #include <linux/securebits.h>
84777 +#include <linux/syslog.h>
84778 +#include <net/sock.h>
84779
84780 /*
84781 * If a non-root user executes a setuid-root binary in
84782 @@ -50,9 +52,18 @@ static void warn_setuid_and_fcaps_mixed(char *fname)
84783 }
84784 }
84785
84786 +#ifdef CONFIG_NET
84787 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
84788 +#endif
84789 +
84790 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
84791 {
84792 +#ifdef CONFIG_NET
84793 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
84794 +#else
84795 NETLINK_CB(skb).eff_cap = current_cap();
84796 +#endif
84797 +
84798 return 0;
84799 }
84800
84801 @@ -582,6 +593,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
84802 {
84803 const struct cred *cred = current_cred();
84804
84805 + if (gr_acl_enable_at_secure())
84806 + return 1;
84807 +
84808 if (cred->uid != 0) {
84809 if (bprm->cap_effective)
84810 return 1;
84811 @@ -956,13 +970,18 @@ error:
84812 /**
84813 * cap_syslog - Determine whether syslog function is permitted
84814 * @type: Function requested
84815 + * @from_file: Whether this request came from an open file (i.e. /proc)
84816 *
84817 * Determine whether the current process is permitted to use a particular
84818 * syslog function, returning 0 if permission is granted, -ve if not.
84819 */
84820 -int cap_syslog(int type)
84821 +int cap_syslog(int type, bool from_file)
84822 {
84823 - if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
84824 + /* /proc/kmsg can open be opened by CAP_SYS_ADMIN */
84825 + if (type != SYSLOG_ACTION_OPEN && from_file)
84826 + return 0;
84827 + if ((type != SYSLOG_ACTION_READ_ALL &&
84828 + type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
84829 return -EPERM;
84830 return 0;
84831 }
84832 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
84833 index 165eb53..b1db4eb 100644
84834 --- a/security/integrity/ima/ima.h
84835 +++ b/security/integrity/ima/ima.h
84836 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
84837 extern spinlock_t ima_queue_lock;
84838
84839 struct ima_h_table {
84840 - atomic_long_t len; /* number of stored measurements in the list */
84841 - atomic_long_t violations;
84842 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
84843 + atomic_long_unchecked_t violations;
84844 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
84845 };
84846 extern struct ima_h_table ima_htable;
84847 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
84848 index 3cd58b6..b4c284f 100644
84849 --- a/security/integrity/ima/ima_api.c
84850 +++ b/security/integrity/ima/ima_api.c
84851 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
84852 int result;
84853
84854 /* can overflow, only indicator */
84855 - atomic_long_inc(&ima_htable.violations);
84856 + atomic_long_inc_unchecked(&ima_htable.violations);
84857
84858 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
84859 if (!entry) {
84860 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
84861 index 0c72c9c..433e29b 100644
84862 --- a/security/integrity/ima/ima_fs.c
84863 +++ b/security/integrity/ima/ima_fs.c
84864 @@ -27,12 +27,12 @@
84865 static int valid_policy = 1;
84866 #define TMPBUFLEN 12
84867 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
84868 - loff_t *ppos, atomic_long_t *val)
84869 + loff_t *ppos, atomic_long_unchecked_t *val)
84870 {
84871 char tmpbuf[TMPBUFLEN];
84872 ssize_t len;
84873
84874 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
84875 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
84876 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
84877 }
84878
84879 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
84880 index a0880e9..93e4ebb 100644
84881 --- a/security/integrity/ima/ima_queue.c
84882 +++ b/security/integrity/ima/ima_queue.c
84883 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
84884 INIT_LIST_HEAD(&qe->later);
84885 list_add_tail_rcu(&qe->later, &ima_measurements);
84886
84887 - atomic_long_inc(&ima_htable.len);
84888 + atomic_long_inc_unchecked(&ima_htable.len);
84889 key = ima_hash_key(entry->digest);
84890 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
84891 return 0;
84892 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
84893 index e031952..c9a535d 100644
84894 --- a/security/keys/keyring.c
84895 +++ b/security/keys/keyring.c
84896 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
84897 ret = -EFAULT;
84898
84899 for (loop = 0; loop < klist->nkeys; loop++) {
84900 + key_serial_t serial;
84901 key = klist->keys[loop];
84902 + serial = key->serial;
84903
84904 tmp = sizeof(key_serial_t);
84905 if (tmp > buflen)
84906 tmp = buflen;
84907
84908 - if (copy_to_user(buffer,
84909 - &key->serial,
84910 - tmp) != 0)
84911 + if (copy_to_user(buffer, &serial, tmp))
84912 goto error;
84913
84914 buflen -= tmp;
84915 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
84916 index 931cfda..e71808a 100644
84917 --- a/security/keys/process_keys.c
84918 +++ b/security/keys/process_keys.c
84919 @@ -208,7 +208,7 @@ static int install_process_keyring(void)
84920 ret = install_process_keyring_to_cred(new);
84921 if (ret < 0) {
84922 abort_creds(new);
84923 - return ret != -EEXIST ?: 0;
84924 + return ret != -EEXIST ? ret : 0;
84925 }
84926
84927 return commit_creds(new);
84928 diff --git a/security/min_addr.c b/security/min_addr.c
84929 index d9f9425..c28cef4 100644
84930 --- a/security/min_addr.c
84931 +++ b/security/min_addr.c
84932 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
84933 */
84934 static void update_mmap_min_addr(void)
84935 {
84936 +#ifndef SPARC
84937 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
84938 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
84939 mmap_min_addr = dac_mmap_min_addr;
84940 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
84941 #else
84942 mmap_min_addr = dac_mmap_min_addr;
84943 #endif
84944 +#endif
84945 }
84946
84947 /*
84948 diff --git a/security/root_plug.c b/security/root_plug.c
84949 index 2f7ffa6..0455400 100644
84950 --- a/security/root_plug.c
84951 +++ b/security/root_plug.c
84952 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm)
84953 return 0;
84954 }
84955
84956 -static struct security_operations rootplug_security_ops = {
84957 +static struct security_operations rootplug_security_ops __read_only = {
84958 .bprm_check_security = rootplug_bprm_check_security,
84959 };
84960
84961 diff --git a/security/security.c b/security/security.c
84962 index c4c6732..7abf13b 100644
84963 --- a/security/security.c
84964 +++ b/security/security.c
84965 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1];
84966 extern struct security_operations default_security_ops;
84967 extern void security_fixup_ops(struct security_operations *ops);
84968
84969 -struct security_operations *security_ops; /* Initialized to NULL */
84970 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
84971
84972 static inline int verify(struct security_operations *ops)
84973 {
84974 @@ -106,7 +106,7 @@ int __init security_module_enable(struct security_operations *ops)
84975 * If there is already a security module registered with the kernel,
84976 * an error will be returned. Otherwise %0 is returned on success.
84977 */
84978 -int register_security(struct security_operations *ops)
84979 +int __init register_security(struct security_operations *ops)
84980 {
84981 if (verify(ops)) {
84982 printk(KERN_DEBUG "%s could not verify "
84983 @@ -199,9 +199,9 @@ int security_quota_on(struct dentry *dentry)
84984 return security_ops->quota_on(dentry);
84985 }
84986
84987 -int security_syslog(int type)
84988 +int security_syslog(int type, bool from_file)
84989 {
84990 - return security_ops->syslog(type);
84991 + return security_ops->syslog(type, from_file);
84992 }
84993
84994 int security_settime(struct timespec *ts, struct timezone *tz)
84995 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
84996 index a106754..ca3a589 100644
84997 --- a/security/selinux/hooks.c
84998 +++ b/security/selinux/hooks.c
84999 @@ -76,6 +76,7 @@
85000 #include <linux/selinux.h>
85001 #include <linux/mutex.h>
85002 #include <linux/posix-timers.h>
85003 +#include <linux/syslog.h>
85004
85005 #include "avc.h"
85006 #include "objsec.h"
85007 @@ -131,7 +132,7 @@ int selinux_enabled = 1;
85008 * Minimal support for a secondary security module,
85009 * just to allow the use of the capability module.
85010 */
85011 -static struct security_operations *secondary_ops;
85012 +static struct security_operations *secondary_ops __read_only;
85013
85014 /* Lists of inode and superblock security structures initialized
85015 before the policy was loaded. */
85016 @@ -2050,29 +2051,30 @@ static int selinux_quota_on(struct dentry *dentry)
85017 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
85018 }
85019
85020 -static int selinux_syslog(int type)
85021 +static int selinux_syslog(int type, bool from_file)
85022 {
85023 int rc;
85024
85025 - rc = cap_syslog(type);
85026 + rc = cap_syslog(type, from_file);
85027 if (rc)
85028 return rc;
85029
85030 switch (type) {
85031 - case 3: /* Read last kernel messages */
85032 - case 10: /* Return size of the log buffer */
85033 + case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
85034 + case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
85035 rc = task_has_system(current, SYSTEM__SYSLOG_READ);
85036 break;
85037 - case 6: /* Disable logging to console */
85038 - case 7: /* Enable logging to console */
85039 - case 8: /* Set level of messages printed to console */
85040 + case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
85041 + case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
85042 + /* Set level of messages printed to console */
85043 + case SYSLOG_ACTION_CONSOLE_LEVEL:
85044 rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
85045 break;
85046 - case 0: /* Close log */
85047 - case 1: /* Open log */
85048 - case 2: /* Read from log */
85049 - case 4: /* Read/clear last kernel messages */
85050 - case 5: /* Clear ring buffer */
85051 + case SYSLOG_ACTION_CLOSE: /* Close log */
85052 + case SYSLOG_ACTION_OPEN: /* Open log */
85053 + case SYSLOG_ACTION_READ: /* Read from log */
85054 + case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
85055 + case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
85056 default:
85057 rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
85058 break;
85059 @@ -5457,7 +5459,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
85060
85061 #endif
85062
85063 -static struct security_operations selinux_ops = {
85064 +static struct security_operations selinux_ops __read_only = {
85065 .name = "selinux",
85066
85067 .ptrace_access_check = selinux_ptrace_access_check,
85068 @@ -5841,7 +5843,9 @@ int selinux_disable(void)
85069 avc_disable();
85070
85071 /* Reset security_ops to the secondary module, dummy or capability. */
85072 + pax_open_kernel();
85073 security_ops = secondary_ops;
85074 + pax_close_kernel();
85075
85076 /* Unregister netfilter hooks. */
85077 selinux_nf_ip_exit();
85078 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
85079 index 13128f9..c23c736 100644
85080 --- a/security/selinux/include/xfrm.h
85081 +++ b/security/selinux/include/xfrm.h
85082 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
85083
85084 static inline void selinux_xfrm_notify_policyload(void)
85085 {
85086 - atomic_inc(&flow_cache_genid);
85087 + atomic_inc_unchecked(&flow_cache_genid);
85088 }
85089 #else
85090 static inline int selinux_xfrm_enabled(void)
85091 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
85092 index ff17820..d68084c 100644
85093 --- a/security/selinux/ss/services.c
85094 +++ b/security/selinux/ss/services.c
85095 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, size_t len)
85096 int rc = 0;
85097 struct policy_file file = { data, len }, *fp = &file;
85098
85099 + pax_track_stack();
85100 +
85101 if (!ss_initialized) {
85102 avtab_cache_init();
85103 if (policydb_read(&policydb, fp)) {
85104 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
85105 index c33b6bb..b51f19e 100644
85106 --- a/security/smack/smack_lsm.c
85107 +++ b/security/smack/smack_lsm.c
85108 @@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
85109 *
85110 * Returns 0 on success, error code otherwise.
85111 */
85112 -static int smack_syslog(int type)
85113 +static int smack_syslog(int type, bool from_file)
85114 {
85115 int rc;
85116 char *sp = current_security();
85117
85118 - rc = cap_syslog(type);
85119 + rc = cap_syslog(type, from_file);
85120 if (rc != 0)
85121 return rc;
85122
85123 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
85124 return 0;
85125 }
85126
85127 -struct security_operations smack_ops = {
85128 +struct security_operations smack_ops __read_only = {
85129 .name = "smack",
85130
85131 .ptrace_access_check = smack_ptrace_access_check,
85132 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
85133 index 9548a09..9a5f384 100644
85134 --- a/security/tomoyo/tomoyo.c
85135 +++ b/security/tomoyo/tomoyo.c
85136 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
85137 * tomoyo_security_ops is a "struct security_operations" which is used for
85138 * registering TOMOYO.
85139 */
85140 -static struct security_operations tomoyo_security_ops = {
85141 +static struct security_operations tomoyo_security_ops __read_only = {
85142 .name = "tomoyo",
85143 .cred_alloc_blank = tomoyo_cred_alloc_blank,
85144 .cred_prepare = tomoyo_cred_prepare,
85145 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
85146 index 84bb07d..c2ab6b6 100644
85147 --- a/sound/aoa/codecs/onyx.c
85148 +++ b/sound/aoa/codecs/onyx.c
85149 @@ -53,7 +53,7 @@ struct onyx {
85150 spdif_locked:1,
85151 analog_locked:1,
85152 original_mute:2;
85153 - int open_count;
85154 + local_t open_count;
85155 struct codec_info *codec_info;
85156
85157 /* mutex serializes concurrent access to the device
85158 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_item *cii,
85159 struct onyx *onyx = cii->codec_data;
85160
85161 mutex_lock(&onyx->mutex);
85162 - onyx->open_count++;
85163 + local_inc(&onyx->open_count);
85164 mutex_unlock(&onyx->mutex);
85165
85166 return 0;
85167 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_item *cii,
85168 struct onyx *onyx = cii->codec_data;
85169
85170 mutex_lock(&onyx->mutex);
85171 - onyx->open_count--;
85172 - if (!onyx->open_count)
85173 + if (local_dec_and_test(&onyx->open_count))
85174 onyx->spdif_locked = onyx->analog_locked = 0;
85175 mutex_unlock(&onyx->mutex);
85176
85177 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
85178 index ffd2025..df062c9 100644
85179 --- a/sound/aoa/codecs/onyx.h
85180 +++ b/sound/aoa/codecs/onyx.h
85181 @@ -11,6 +11,7 @@
85182 #include <linux/i2c.h>
85183 #include <asm/pmac_low_i2c.h>
85184 #include <asm/prom.h>
85185 +#include <asm/local.h>
85186
85187 /* PCM3052 register definitions */
85188
85189 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
85190 index d9c9635..bc0a5a2 100644
85191 --- a/sound/core/oss/pcm_oss.c
85192 +++ b/sound/core/oss/pcm_oss.c
85193 @@ -1395,7 +1395,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
85194 }
85195 } else {
85196 tmp = snd_pcm_oss_write2(substream,
85197 - (const char __force *)buf,
85198 + (const char __force_kernel *)buf,
85199 runtime->oss.period_bytes, 0);
85200 if (tmp <= 0)
85201 goto err;
85202 @@ -1483,7 +1483,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
85203 xfer += tmp;
85204 runtime->oss.buffer_used -= tmp;
85205 } else {
85206 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
85207 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
85208 runtime->oss.period_bytes, 0);
85209 if (tmp <= 0)
85210 goto err;
85211 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
85212 index 038232d..7dd9e5c 100644
85213 --- a/sound/core/pcm_compat.c
85214 +++ b/sound/core/pcm_compat.c
85215 @@ -30,7 +30,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
85216 int err;
85217
85218 fs = snd_enter_user();
85219 - err = snd_pcm_delay(substream, &delay);
85220 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
85221 snd_leave_user(fs);
85222 if (err < 0)
85223 return err;
85224 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
85225 index e6d2d97..4843949 100644
85226 --- a/sound/core/pcm_native.c
85227 +++ b/sound/core/pcm_native.c
85228 @@ -2747,11 +2747,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
85229 switch (substream->stream) {
85230 case SNDRV_PCM_STREAM_PLAYBACK:
85231 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
85232 - (void __user *)arg);
85233 + (void __force_user *)arg);
85234 break;
85235 case SNDRV_PCM_STREAM_CAPTURE:
85236 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
85237 - (void __user *)arg);
85238 + (void __force_user *)arg);
85239 break;
85240 default:
85241 result = -EINVAL;
85242 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
85243 index 1f99767..14636533 100644
85244 --- a/sound/core/seq/seq_device.c
85245 +++ b/sound/core/seq/seq_device.c
85246 @@ -63,7 +63,7 @@ struct ops_list {
85247 int argsize; /* argument size */
85248
85249 /* operators */
85250 - struct snd_seq_dev_ops ops;
85251 + struct snd_seq_dev_ops *ops;
85252
85253 /* registred devices */
85254 struct list_head dev_list; /* list of devices */
85255 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
85256
85257 mutex_lock(&ops->reg_mutex);
85258 /* copy driver operators */
85259 - ops->ops = *entry;
85260 + ops->ops = entry;
85261 ops->driver |= DRIVER_LOADED;
85262 ops->argsize = argsize;
85263
85264 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
85265 dev->name, ops->id, ops->argsize, dev->argsize);
85266 return -EINVAL;
85267 }
85268 - if (ops->ops.init_device(dev) >= 0) {
85269 + if (ops->ops->init_device(dev) >= 0) {
85270 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
85271 ops->num_init_devices++;
85272 } else {
85273 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
85274 dev->name, ops->id, ops->argsize, dev->argsize);
85275 return -EINVAL;
85276 }
85277 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
85278 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
85279 dev->status = SNDRV_SEQ_DEVICE_FREE;
85280 dev->driver_data = NULL;
85281 ops->num_init_devices--;
85282 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
85283 index 9284829..ac8e8b2 100644
85284 --- a/sound/drivers/mts64.c
85285 +++ b/sound/drivers/mts64.c
85286 @@ -27,6 +27,7 @@
85287 #include <sound/initval.h>
85288 #include <sound/rawmidi.h>
85289 #include <sound/control.h>
85290 +#include <asm/local.h>
85291
85292 #define CARD_NAME "Miditerminal 4140"
85293 #define DRIVER_NAME "MTS64"
85294 @@ -65,7 +66,7 @@ struct mts64 {
85295 struct pardevice *pardev;
85296 int pardev_claimed;
85297
85298 - int open_count;
85299 + local_t open_count;
85300 int current_midi_output_port;
85301 int current_midi_input_port;
85302 u8 mode[MTS64_NUM_INPUT_PORTS];
85303 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
85304 {
85305 struct mts64 *mts = substream->rmidi->private_data;
85306
85307 - if (mts->open_count == 0) {
85308 + if (local_read(&mts->open_count) == 0) {
85309 /* We don't need a spinlock here, because this is just called
85310 if the device has not been opened before.
85311 So there aren't any IRQs from the device */
85312 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
85313
85314 msleep(50);
85315 }
85316 - ++(mts->open_count);
85317 + local_inc(&mts->open_count);
85318
85319 return 0;
85320 }
85321 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
85322 struct mts64 *mts = substream->rmidi->private_data;
85323 unsigned long flags;
85324
85325 - --(mts->open_count);
85326 - if (mts->open_count == 0) {
85327 + if (local_dec_return(&mts->open_count) == 0) {
85328 /* We need the spinlock_irqsave here because we can still
85329 have IRQs at this point */
85330 spin_lock_irqsave(&mts->lock, flags);
85331 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
85332
85333 msleep(500);
85334
85335 - } else if (mts->open_count < 0)
85336 - mts->open_count = 0;
85337 + } else if (local_read(&mts->open_count) < 0)
85338 + local_set(&mts->open_count, 0);
85339
85340 return 0;
85341 }
85342 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
85343 index 01997f2..cbc1195 100644
85344 --- a/sound/drivers/opl4/opl4_lib.c
85345 +++ b/sound/drivers/opl4/opl4_lib.c
85346 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
85347 MODULE_DESCRIPTION("OPL4 driver");
85348 MODULE_LICENSE("GPL");
85349
85350 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
85351 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
85352 {
85353 int timeout = 10;
85354 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
85355 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
85356 index 60158e2..0a0cc1a 100644
85357 --- a/sound/drivers/portman2x4.c
85358 +++ b/sound/drivers/portman2x4.c
85359 @@ -46,6 +46,7 @@
85360 #include <sound/initval.h>
85361 #include <sound/rawmidi.h>
85362 #include <sound/control.h>
85363 +#include <asm/local.h>
85364
85365 #define CARD_NAME "Portman 2x4"
85366 #define DRIVER_NAME "portman"
85367 @@ -83,7 +84,7 @@ struct portman {
85368 struct pardevice *pardev;
85369 int pardev_claimed;
85370
85371 - int open_count;
85372 + local_t open_count;
85373 int mode[PORTMAN_NUM_INPUT_PORTS];
85374 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
85375 };
85376 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
85377 index 02f79d2..8691d43 100644
85378 --- a/sound/isa/cmi8330.c
85379 +++ b/sound/isa/cmi8330.c
85380 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
85381
85382 struct snd_pcm *pcm;
85383 struct snd_cmi8330_stream {
85384 - struct snd_pcm_ops ops;
85385 + snd_pcm_ops_no_const ops;
85386 snd_pcm_open_callback_t open;
85387 void *private_data; /* sb or wss */
85388 } streams[2];
85389 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
85390 index 733b014..56ce96f 100644
85391 --- a/sound/oss/sb_audio.c
85392 +++ b/sound/oss/sb_audio.c
85393 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
85394 buf16 = (signed short *)(localbuf + localoffs);
85395 while (c)
85396 {
85397 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
85398 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
85399 if (copy_from_user(lbuf8,
85400 userbuf+useroffs + p,
85401 locallen))
85402 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
85403 index 3136c88..28ad950 100644
85404 --- a/sound/oss/swarm_cs4297a.c
85405 +++ b/sound/oss/swarm_cs4297a.c
85406 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
85407 {
85408 struct cs4297a_state *s;
85409 u32 pwr, id;
85410 - mm_segment_t fs;
85411 int rval;
85412 #ifndef CONFIG_BCM_CS4297A_CSWARM
85413 u64 cfg;
85414 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
85415 if (!rval) {
85416 char *sb1250_duart_present;
85417
85418 +#if 0
85419 + mm_segment_t fs;
85420 fs = get_fs();
85421 set_fs(KERNEL_DS);
85422 -#if 0
85423 val = SOUND_MASK_LINE;
85424 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
85425 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
85426 val = initvol[i].vol;
85427 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
85428 }
85429 + set_fs(fs);
85430 // cs4297a_write_ac97(s, 0x18, 0x0808);
85431 #else
85432 // cs4297a_write_ac97(s, 0x5e, 0x180);
85433 cs4297a_write_ac97(s, 0x02, 0x0808);
85434 cs4297a_write_ac97(s, 0x18, 0x0808);
85435 #endif
85436 - set_fs(fs);
85437
85438 list_add(&s->list, &cs4297a_devs);
85439
85440 diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
85441 index 78288db..0406809 100644
85442 --- a/sound/pci/ac97/ac97_codec.c
85443 +++ b/sound/pci/ac97/ac97_codec.c
85444 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
85445 }
85446
85447 /* build_ops to do nothing */
85448 -static struct snd_ac97_build_ops null_build_ops;
85449 +static const struct snd_ac97_build_ops null_build_ops;
85450
85451 #ifdef CONFIG_SND_AC97_POWER_SAVE
85452 static void do_update_power(struct work_struct *work)
85453 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
85454 index eeb2e23..82bf625 100644
85455 --- a/sound/pci/ac97/ac97_patch.c
85456 +++ b/sound/pci/ac97/ac97_patch.c
85457 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97)
85458 return 0;
85459 }
85460
85461 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
85462 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
85463 .build_spdif = patch_yamaha_ymf743_build_spdif,
85464 .build_3d = patch_yamaha_ymf7x3_3d,
85465 };
85466 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
85467 return 0;
85468 }
85469
85470 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
85471 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
85472 .build_3d = patch_yamaha_ymf7x3_3d,
85473 .build_post_spdif = patch_yamaha_ymf753_post_spdif
85474 };
85475 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
85476 return 0;
85477 }
85478
85479 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
85480 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
85481 .build_specific = patch_wolfson_wm9703_specific,
85482 };
85483
85484 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
85485 return 0;
85486 }
85487
85488 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
85489 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
85490 .build_specific = patch_wolfson_wm9704_specific,
85491 };
85492
85493 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97)
85494 return 0;
85495 }
85496
85497 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
85498 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
85499 .build_specific = patch_wolfson_wm9705_specific,
85500 };
85501
85502 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
85503 return 0;
85504 }
85505
85506 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
85507 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
85508 .build_specific = patch_wolfson_wm9711_specific,
85509 };
85510
85511 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97)
85512 }
85513 #endif
85514
85515 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
85516 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
85517 .build_specific = patch_wolfson_wm9713_specific,
85518 .build_3d = patch_wolfson_wm9713_3d,
85519 #ifdef CONFIG_PM
85520 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
85521 return 0;
85522 }
85523
85524 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
85525 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
85526 .build_3d = patch_sigmatel_stac9700_3d,
85527 .build_specific = patch_sigmatel_stac97xx_specific
85528 };
85529 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
85530 return patch_sigmatel_stac97xx_specific(ac97);
85531 }
85532
85533 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
85534 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
85535 .build_3d = patch_sigmatel_stac9708_3d,
85536 .build_specific = patch_sigmatel_stac9708_specific
85537 };
85538 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97)
85539 return 0;
85540 }
85541
85542 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
85543 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
85544 .build_3d = patch_sigmatel_stac9700_3d,
85545 .build_specific = patch_sigmatel_stac9758_specific
85546 };
85547 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
85548 return 0;
85549 }
85550
85551 -static struct snd_ac97_build_ops patch_cirrus_ops = {
85552 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
85553 .build_spdif = patch_cirrus_build_spdif
85554 };
85555
85556 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
85557 return 0;
85558 }
85559
85560 -static struct snd_ac97_build_ops patch_conexant_ops = {
85561 +static const struct snd_ac97_build_ops patch_conexant_ops = {
85562 .build_spdif = patch_conexant_build_spdif
85563 };
85564
85565 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int
85566 }
85567 }
85568
85569 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
85570 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
85571 #ifdef CONFIG_PM
85572 .resume = ad18xx_resume
85573 #endif
85574 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct snd_ac97 * ac97)
85575 return 0;
85576 }
85577
85578 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
85579 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
85580 .build_specific = &patch_ad1885_specific,
85581 #ifdef CONFIG_PM
85582 .resume = ad18xx_resume
85583 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct snd_ac97 * ac97)
85584 return 0;
85585 }
85586
85587 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
85588 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
85589 .build_specific = &patch_ad1886_specific,
85590 #ifdef CONFIG_PM
85591 .resume = ad18xx_resume
85592 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct snd_ac97 * ac97)
85593 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
85594 }
85595
85596 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
85597 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
85598 .build_post_spdif = patch_ad198x_post_spdif,
85599 .build_specific = patch_ad1981a_specific,
85600 #ifdef CONFIG_PM
85601 @@ -1952,7 +1952,7 @@ static int patch_ad1981b_specific(struct snd_ac97 *ac97)
85602 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
85603 }
85604
85605 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
85606 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
85607 .build_post_spdif = patch_ad198x_post_spdif,
85608 .build_specific = patch_ad1981b_specific,
85609 #ifdef CONFIG_PM
85610 @@ -2091,7 +2091,7 @@ static int patch_ad1888_specific(struct snd_ac97 *ac97)
85611 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
85612 }
85613
85614 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
85615 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
85616 .build_post_spdif = patch_ad198x_post_spdif,
85617 .build_specific = patch_ad1888_specific,
85618 #ifdef CONFIG_PM
85619 @@ -2140,7 +2140,7 @@ static int patch_ad1980_specific(struct snd_ac97 *ac97)
85620 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
85621 }
85622
85623 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
85624 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
85625 .build_post_spdif = patch_ad198x_post_spdif,
85626 .build_specific = patch_ad1980_specific,
85627 #ifdef CONFIG_PM
85628 @@ -2255,7 +2255,7 @@ static int patch_ad1985_specific(struct snd_ac97 *ac97)
85629 ARRAY_SIZE(snd_ac97_ad1985_controls));
85630 }
85631
85632 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
85633 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
85634 .build_post_spdif = patch_ad198x_post_spdif,
85635 .build_specific = patch_ad1985_specific,
85636 #ifdef CONFIG_PM
85637 @@ -2547,7 +2547,7 @@ static int patch_ad1986_specific(struct snd_ac97 *ac97)
85638 ARRAY_SIZE(snd_ac97_ad1985_controls));
85639 }
85640
85641 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
85642 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
85643 .build_post_spdif = patch_ad198x_post_spdif,
85644 .build_specific = patch_ad1986_specific,
85645 #ifdef CONFIG_PM
85646 @@ -2652,7 +2652,7 @@ static int patch_alc650_specific(struct snd_ac97 * ac97)
85647 return 0;
85648 }
85649
85650 -static struct snd_ac97_build_ops patch_alc650_ops = {
85651 +static const struct snd_ac97_build_ops patch_alc650_ops = {
85652 .build_specific = patch_alc650_specific,
85653 .update_jacks = alc650_update_jacks
85654 };
85655 @@ -2804,7 +2804,7 @@ static int patch_alc655_specific(struct snd_ac97 * ac97)
85656 return 0;
85657 }
85658
85659 -static struct snd_ac97_build_ops patch_alc655_ops = {
85660 +static const struct snd_ac97_build_ops patch_alc655_ops = {
85661 .build_specific = patch_alc655_specific,
85662 .update_jacks = alc655_update_jacks
85663 };
85664 @@ -2916,7 +2916,7 @@ static int patch_alc850_specific(struct snd_ac97 *ac97)
85665 return 0;
85666 }
85667
85668 -static struct snd_ac97_build_ops patch_alc850_ops = {
85669 +static const struct snd_ac97_build_ops patch_alc850_ops = {
85670 .build_specific = patch_alc850_specific,
85671 .update_jacks = alc850_update_jacks
85672 };
85673 @@ -2978,7 +2978,7 @@ static int patch_cm9738_specific(struct snd_ac97 * ac97)
85674 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
85675 }
85676
85677 -static struct snd_ac97_build_ops patch_cm9738_ops = {
85678 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
85679 .build_specific = patch_cm9738_specific,
85680 .update_jacks = cm9738_update_jacks
85681 };
85682 @@ -3069,7 +3069,7 @@ static int patch_cm9739_post_spdif(struct snd_ac97 * ac97)
85683 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
85684 }
85685
85686 -static struct snd_ac97_build_ops patch_cm9739_ops = {
85687 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
85688 .build_specific = patch_cm9739_specific,
85689 .build_post_spdif = patch_cm9739_post_spdif,
85690 .update_jacks = cm9739_update_jacks
85691 @@ -3243,7 +3243,7 @@ static int patch_cm9761_specific(struct snd_ac97 * ac97)
85692 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
85693 }
85694
85695 -static struct snd_ac97_build_ops patch_cm9761_ops = {
85696 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
85697 .build_specific = patch_cm9761_specific,
85698 .build_post_spdif = patch_cm9761_post_spdif,
85699 .update_jacks = cm9761_update_jacks
85700 @@ -3339,7 +3339,7 @@ static int patch_cm9780_specific(struct snd_ac97 *ac97)
85701 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
85702 }
85703
85704 -static struct snd_ac97_build_ops patch_cm9780_ops = {
85705 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
85706 .build_specific = patch_cm9780_specific,
85707 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
85708 };
85709 @@ -3459,7 +3459,7 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97)
85710 return 0;
85711 }
85712
85713 -static struct snd_ac97_build_ops patch_vt1616_ops = {
85714 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
85715 .build_specific = patch_vt1616_specific
85716 };
85717
85718 @@ -3813,7 +3813,7 @@ static int patch_it2646_specific(struct snd_ac97 * ac97)
85719 return 0;
85720 }
85721
85722 -static struct snd_ac97_build_ops patch_it2646_ops = {
85723 +static const struct snd_ac97_build_ops patch_it2646_ops = {
85724 .build_specific = patch_it2646_specific,
85725 .update_jacks = it2646_update_jacks
85726 };
85727 @@ -3847,7 +3847,7 @@ static int patch_si3036_specific(struct snd_ac97 * ac97)
85728 return 0;
85729 }
85730
85731 -static struct snd_ac97_build_ops patch_si3036_ops = {
85732 +static const struct snd_ac97_build_ops patch_si3036_ops = {
85733 .build_specific = patch_si3036_specific,
85734 };
85735
85736 @@ -3914,7 +3914,7 @@ static int patch_ucb1400_specific(struct snd_ac97 * ac97)
85737 return 0;
85738 }
85739
85740 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
85741 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
85742 .build_specific = patch_ucb1400_specific,
85743 };
85744
85745 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
85746 index 99552fb..4dcc2c5 100644
85747 --- a/sound/pci/hda/hda_codec.h
85748 +++ b/sound/pci/hda/hda_codec.h
85749 @@ -580,7 +580,7 @@ struct hda_bus_ops {
85750 /* notify power-up/down from codec to controller */
85751 void (*pm_notify)(struct hda_bus *bus);
85752 #endif
85753 -};
85754 +} __no_const;
85755
85756 /* template to pass to the bus constructor */
85757 struct hda_bus_template {
85758 @@ -675,6 +675,7 @@ struct hda_codec_ops {
85759 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
85760 #endif
85761 };
85762 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
85763
85764 /* record for amp information cache */
85765 struct hda_cache_head {
85766 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
85767 struct snd_pcm_substream *substream);
85768 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
85769 struct snd_pcm_substream *substream);
85770 -};
85771 +} __no_const;
85772
85773 /* PCM information for each substream */
85774 struct hda_pcm_stream {
85775 @@ -760,7 +761,7 @@ struct hda_codec {
85776 const char *modelname; /* model name for preset */
85777
85778 /* set by patch */
85779 - struct hda_codec_ops patch_ops;
85780 + hda_codec_ops_no_const patch_ops;
85781
85782 /* PCM to create, set by patch_ops.build_pcms callback */
85783 unsigned int num_pcms;
85784 diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
85785 index fb684f0..2b11cea 100644
85786 --- a/sound/pci/hda/patch_atihdmi.c
85787 +++ b/sound/pci/hda/patch_atihdmi.c
85788 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_codec *codec)
85789 */
85790 spec->multiout.dig_out_nid = CVT_NID;
85791
85792 - codec->patch_ops = atihdmi_patch_ops;
85793 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
85794
85795 return 0;
85796 }
85797 diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
85798 index 7c23016..c5bfdd7 100644
85799 --- a/sound/pci/hda/patch_intelhdmi.c
85800 +++ b/sound/pci/hda/patch_intelhdmi.c
85801 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
85802 cp_ready);
85803
85804 /* TODO */
85805 - if (cp_state)
85806 - ;
85807 - if (cp_ready)
85808 - ;
85809 + if (cp_state) {
85810 + }
85811 + if (cp_ready) {
85812 + }
85813 }
85814
85815
85816 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hda_codec *codec)
85817 spec->multiout.dig_out_nid = cvt_nid;
85818
85819 codec->spec = spec;
85820 - codec->patch_ops = intel_hdmi_patch_ops;
85821 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
85822
85823 snd_hda_eld_proc_new(codec, &spec->sink_eld);
85824
85825 diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
85826 index 6afdab0..68ed352 100644
85827 --- a/sound/pci/hda/patch_nvhdmi.c
85828 +++ b/sound/pci/hda/patch_nvhdmi.c
85829 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_codec *codec)
85830 spec->multiout.max_channels = 8;
85831 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
85832
85833 - codec->patch_ops = nvhdmi_patch_ops_8ch;
85834 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
85835
85836 return 0;
85837 }
85838 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
85839 spec->multiout.max_channels = 2;
85840 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
85841
85842 - codec->patch_ops = nvhdmi_patch_ops_2ch;
85843 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
85844
85845 return 0;
85846 }
85847 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
85848 index 01da10b..01bd71f 100644
85849 --- a/sound/pci/hda/patch_sigmatel.c
85850 +++ b/sound/pci/hda/patch_sigmatel.c
85851 @@ -5220,7 +5220,7 @@ again:
85852 snd_hda_codec_write_cache(codec, nid, 0,
85853 AC_VERB_SET_CONNECT_SEL, num_dacs);
85854
85855 - codec->patch_ops = stac92xx_patch_ops;
85856 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
85857
85858 codec->proc_widget_hook = stac92hd_proc_hook;
85859
85860 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
85861 return -ENOMEM;
85862
85863 codec->spec = spec;
85864 - codec->patch_ops = stac92xx_patch_ops;
85865 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
85866 spec->num_pins = STAC92HD71BXX_NUM_PINS;
85867 switch (codec->vendor_id) {
85868 case 0x111d76b6:
85869 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
85870 index d063149..01599a4 100644
85871 --- a/sound/pci/ice1712/ice1712.h
85872 +++ b/sound/pci/ice1712/ice1712.h
85873 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
85874 unsigned int mask_flags; /* total mask bits */
85875 struct snd_akm4xxx_ops {
85876 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
85877 - } ops;
85878 + } __no_const ops;
85879 };
85880
85881 struct snd_ice1712_spdif {
85882 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
85883 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
85884 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
85885 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
85886 - } ops;
85887 + } __no_const ops;
85888 };
85889
85890
85891 diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
85892 index 9e7d12e..3e3bc64 100644
85893 --- a/sound/pci/intel8x0m.c
85894 +++ b/sound/pci/intel8x0m.c
85895 @@ -1264,7 +1264,7 @@ static struct shortname_table {
85896 { 0x5455, "ALi M5455" },
85897 { 0x746d, "AMD AMD8111" },
85898 #endif
85899 - { 0 },
85900 + { 0, },
85901 };
85902
85903 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
85904 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
85905 index 5518371..45cf7ac 100644
85906 --- a/sound/pci/ymfpci/ymfpci_main.c
85907 +++ b/sound/pci/ymfpci/ymfpci_main.c
85908 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
85909 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
85910 break;
85911 }
85912 - if (atomic_read(&chip->interrupt_sleep_count)) {
85913 - atomic_set(&chip->interrupt_sleep_count, 0);
85914 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
85915 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
85916 wake_up(&chip->interrupt_sleep);
85917 }
85918 __end:
85919 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
85920 continue;
85921 init_waitqueue_entry(&wait, current);
85922 add_wait_queue(&chip->interrupt_sleep, &wait);
85923 - atomic_inc(&chip->interrupt_sleep_count);
85924 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
85925 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
85926 remove_wait_queue(&chip->interrupt_sleep, &wait);
85927 }
85928 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
85929 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
85930 spin_unlock(&chip->reg_lock);
85931
85932 - if (atomic_read(&chip->interrupt_sleep_count)) {
85933 - atomic_set(&chip->interrupt_sleep_count, 0);
85934 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
85935 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
85936 wake_up(&chip->interrupt_sleep);
85937 }
85938 }
85939 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
85940 spin_lock_init(&chip->reg_lock);
85941 spin_lock_init(&chip->voice_lock);
85942 init_waitqueue_head(&chip->interrupt_sleep);
85943 - atomic_set(&chip->interrupt_sleep_count, 0);
85944 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
85945 chip->card = card;
85946 chip->pci = pci;
85947 chip->irq = -1;
85948 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
85949 index 0a1b2f6..776bb19 100644
85950 --- a/sound/soc/soc-core.c
85951 +++ b/sound/soc/soc-core.c
85952 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
85953 }
85954
85955 /* ASoC PCM operations */
85956 -static struct snd_pcm_ops soc_pcm_ops = {
85957 +static snd_pcm_ops_no_const soc_pcm_ops = {
85958 .open = soc_pcm_open,
85959 .close = soc_codec_close,
85960 .hw_params = soc_pcm_hw_params,
85961 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
85962 index 79633ea..9732e90 100644
85963 --- a/sound/usb/usbaudio.c
85964 +++ b/sound/usb/usbaudio.c
85965 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream,
85966 switch (cmd) {
85967 case SNDRV_PCM_TRIGGER_START:
85968 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
85969 - subs->ops.prepare = prepare_playback_urb;
85970 + *(void **)&subs->ops.prepare = prepare_playback_urb;
85971 return 0;
85972 case SNDRV_PCM_TRIGGER_STOP:
85973 return deactivate_urbs(subs, 0, 0);
85974 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
85975 - subs->ops.prepare = prepare_nodata_playback_urb;
85976 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
85977 return 0;
85978 default:
85979 return -EINVAL;
85980 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream,
85981
85982 switch (cmd) {
85983 case SNDRV_PCM_TRIGGER_START:
85984 - subs->ops.retire = retire_capture_urb;
85985 + *(void **)&subs->ops.retire = retire_capture_urb;
85986 return start_urbs(subs, substream->runtime);
85987 case SNDRV_PCM_TRIGGER_STOP:
85988 return deactivate_urbs(subs, 0, 0);
85989 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
85990 - subs->ops.retire = retire_paused_capture_urb;
85991 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
85992 return 0;
85993 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
85994 - subs->ops.retire = retire_capture_urb;
85995 + *(void **)&subs->ops.retire = retire_capture_urb;
85996 return 0;
85997 default:
85998 return -EINVAL;
85999 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
86000 /* for playback, submit the URBs now; otherwise, the first hwptr_done
86001 * updates for all URBs would happen at the same time when starting */
86002 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
86003 - subs->ops.prepare = prepare_nodata_playback_urb;
86004 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
86005 return start_urbs(subs, runtime);
86006 } else
86007 return 0;
86008 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
86009 subs->direction = stream;
86010 subs->dev = as->chip->dev;
86011 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
86012 - subs->ops = audio_urb_ops[stream];
86013 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
86014 } else {
86015 - subs->ops = audio_urb_ops_high_speed[stream];
86016 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
86017 switch (as->chip->usb_id) {
86018 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
86019 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
86020 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
86021 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
86022 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
86023 break;
86024 }
86025 }
86026 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
86027 new file mode 100644
86028 index 0000000..b044b80
86029 --- /dev/null
86030 +++ b/tools/gcc/Makefile
86031 @@ -0,0 +1,21 @@
86032 +#CC := gcc
86033 +#PLUGIN_SOURCE_FILES := pax_plugin.c
86034 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
86035 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
86036 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
86037 +
86038 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
86039 +
86040 +hostlibs-y := constify_plugin.so
86041 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
86042 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
86043 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
86044 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
86045 +
86046 +always := $(hostlibs-y)
86047 +
86048 +constify_plugin-objs := constify_plugin.o
86049 +stackleak_plugin-objs := stackleak_plugin.o
86050 +kallocstat_plugin-objs := kallocstat_plugin.o
86051 +kernexec_plugin-objs := kernexec_plugin.o
86052 +checker_plugin-objs := checker_plugin.o
86053 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
86054 new file mode 100644
86055 index 0000000..d41b5af
86056 --- /dev/null
86057 +++ b/tools/gcc/checker_plugin.c
86058 @@ -0,0 +1,171 @@
86059 +/*
86060 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
86061 + * Licensed under the GPL v2
86062 + *
86063 + * Note: the choice of the license means that the compilation process is
86064 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86065 + * but for the kernel it doesn't matter since it doesn't link against
86066 + * any of the gcc libraries
86067 + *
86068 + * gcc plugin to implement various sparse (source code checker) features
86069 + *
86070 + * TODO:
86071 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
86072 + *
86073 + * BUGS:
86074 + * - none known
86075 + */
86076 +#include "gcc-plugin.h"
86077 +#include "config.h"
86078 +#include "system.h"
86079 +#include "coretypes.h"
86080 +#include "tree.h"
86081 +#include "tree-pass.h"
86082 +#include "flags.h"
86083 +#include "intl.h"
86084 +#include "toplev.h"
86085 +#include "plugin.h"
86086 +//#include "expr.h" where are you...
86087 +#include "diagnostic.h"
86088 +#include "plugin-version.h"
86089 +#include "tm.h"
86090 +#include "function.h"
86091 +#include "basic-block.h"
86092 +#include "gimple.h"
86093 +#include "rtl.h"
86094 +#include "emit-rtl.h"
86095 +#include "tree-flow.h"
86096 +#include "target.h"
86097 +
86098 +extern void c_register_addr_space (const char *str, addr_space_t as);
86099 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
86100 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
86101 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
86102 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
86103 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
86104 +
86105 +extern void print_gimple_stmt(FILE *, gimple, int, int);
86106 +extern rtx emit_move_insn(rtx x, rtx y);
86107 +
86108 +int plugin_is_GPL_compatible;
86109 +
86110 +static struct plugin_info checker_plugin_info = {
86111 + .version = "201111150100",
86112 +};
86113 +
86114 +#define ADDR_SPACE_KERNEL 0
86115 +#define ADDR_SPACE_FORCE_KERNEL 1
86116 +#define ADDR_SPACE_USER 2
86117 +#define ADDR_SPACE_FORCE_USER 3
86118 +#define ADDR_SPACE_IOMEM 0
86119 +#define ADDR_SPACE_FORCE_IOMEM 0
86120 +#define ADDR_SPACE_PERCPU 0
86121 +#define ADDR_SPACE_FORCE_PERCPU 0
86122 +#define ADDR_SPACE_RCU 0
86123 +#define ADDR_SPACE_FORCE_RCU 0
86124 +
86125 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
86126 +{
86127 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
86128 +}
86129 +
86130 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
86131 +{
86132 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
86133 +}
86134 +
86135 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
86136 +{
86137 + return default_addr_space_valid_pointer_mode(mode, as);
86138 +}
86139 +
86140 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
86141 +{
86142 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
86143 +}
86144 +
86145 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
86146 +{
86147 + return default_addr_space_legitimize_address(x, oldx, mode, as);
86148 +}
86149 +
86150 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
86151 +{
86152 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
86153 + return true;
86154 +
86155 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
86156 + return true;
86157 +
86158 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
86159 + return true;
86160 +
86161 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
86162 + return true;
86163 +
86164 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
86165 + return true;
86166 +
86167 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
86168 + return true;
86169 +
86170 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
86171 + return true;
86172 +
86173 + return subset == superset;
86174 +}
86175 +
86176 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
86177 +{
86178 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
86179 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
86180 +
86181 + return op;
86182 +}
86183 +
86184 +static void register_checker_address_spaces(void *event_data, void *data)
86185 +{
86186 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
86187 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
86188 + c_register_addr_space("__user", ADDR_SPACE_USER);
86189 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
86190 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
86191 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
86192 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
86193 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
86194 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
86195 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
86196 +
86197 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
86198 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
86199 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
86200 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
86201 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
86202 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
86203 + targetm.addr_space.convert = checker_addr_space_convert;
86204 +}
86205 +
86206 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86207 +{
86208 + const char * const plugin_name = plugin_info->base_name;
86209 + const int argc = plugin_info->argc;
86210 + const struct plugin_argument * const argv = plugin_info->argv;
86211 + int i;
86212 +
86213 + if (!plugin_default_version_check(version, &gcc_version)) {
86214 + error(G_("incompatible gcc/plugin versions"));
86215 + return 1;
86216 + }
86217 +
86218 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
86219 +
86220 + for (i = 0; i < argc; ++i)
86221 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86222 +
86223 + if (TARGET_64BIT == 0)
86224 + return 0;
86225 +
86226 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
86227 +
86228 + return 0;
86229 +}
86230 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
86231 new file mode 100644
86232 index 0000000..704a564
86233 --- /dev/null
86234 +++ b/tools/gcc/constify_plugin.c
86235 @@ -0,0 +1,303 @@
86236 +/*
86237 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
86238 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
86239 + * Licensed under the GPL v2, or (at your option) v3
86240 + *
86241 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
86242 + *
86243 + * Homepage:
86244 + * http://www.grsecurity.net/~ephox/const_plugin/
86245 + *
86246 + * Usage:
86247 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
86248 + * $ gcc -fplugin=constify_plugin.so test.c -O2
86249 + */
86250 +
86251 +#include "gcc-plugin.h"
86252 +#include "config.h"
86253 +#include "system.h"
86254 +#include "coretypes.h"
86255 +#include "tree.h"
86256 +#include "tree-pass.h"
86257 +#include "flags.h"
86258 +#include "intl.h"
86259 +#include "toplev.h"
86260 +#include "plugin.h"
86261 +#include "diagnostic.h"
86262 +#include "plugin-version.h"
86263 +#include "tm.h"
86264 +#include "function.h"
86265 +#include "basic-block.h"
86266 +#include "gimple.h"
86267 +#include "rtl.h"
86268 +#include "emit-rtl.h"
86269 +#include "tree-flow.h"
86270 +
86271 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
86272 +
86273 +int plugin_is_GPL_compatible;
86274 +
86275 +static struct plugin_info const_plugin_info = {
86276 + .version = "201111150100",
86277 + .help = "no-constify\tturn off constification\n",
86278 +};
86279 +
86280 +static void constify_type(tree type);
86281 +static bool walk_struct(tree node);
86282 +
86283 +static tree deconstify_type(tree old_type)
86284 +{
86285 + tree new_type, field;
86286 +
86287 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
86288 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
86289 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
86290 + DECL_FIELD_CONTEXT(field) = new_type;
86291 + TYPE_READONLY(new_type) = 0;
86292 + C_TYPE_FIELDS_READONLY(new_type) = 0;
86293 + return new_type;
86294 +}
86295 +
86296 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
86297 +{
86298 + tree type;
86299 +
86300 + *no_add_attrs = true;
86301 + if (TREE_CODE(*node) == FUNCTION_DECL) {
86302 + error("%qE attribute does not apply to functions", name);
86303 + return NULL_TREE;
86304 + }
86305 +
86306 + if (TREE_CODE(*node) == VAR_DECL) {
86307 + error("%qE attribute does not apply to variables", name);
86308 + return NULL_TREE;
86309 + }
86310 +
86311 + if (TYPE_P(*node)) {
86312 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
86313 + *no_add_attrs = false;
86314 + else
86315 + error("%qE attribute applies to struct and union types only", name);
86316 + return NULL_TREE;
86317 + }
86318 +
86319 + type = TREE_TYPE(*node);
86320 +
86321 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
86322 + error("%qE attribute applies to struct and union types only", name);
86323 + return NULL_TREE;
86324 + }
86325 +
86326 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
86327 + error("%qE attribute is already applied to the type", name);
86328 + return NULL_TREE;
86329 + }
86330 +
86331 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
86332 + error("%qE attribute used on type that is not constified", name);
86333 + return NULL_TREE;
86334 + }
86335 +
86336 + if (TREE_CODE(*node) == TYPE_DECL) {
86337 + TREE_TYPE(*node) = deconstify_type(type);
86338 + TREE_READONLY(*node) = 0;
86339 + return NULL_TREE;
86340 + }
86341 +
86342 + return NULL_TREE;
86343 +}
86344 +
86345 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
86346 +{
86347 + *no_add_attrs = true;
86348 + if (!TYPE_P(*node)) {
86349 + error("%qE attribute applies to types only", name);
86350 + return NULL_TREE;
86351 + }
86352 +
86353 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
86354 + error("%qE attribute applies to struct and union types only", name);
86355 + return NULL_TREE;
86356 + }
86357 +
86358 + *no_add_attrs = false;
86359 + constify_type(*node);
86360 + return NULL_TREE;
86361 +}
86362 +
86363 +static struct attribute_spec no_const_attr = {
86364 + .name = "no_const",
86365 + .min_length = 0,
86366 + .max_length = 0,
86367 + .decl_required = false,
86368 + .type_required = false,
86369 + .function_type_required = false,
86370 + .handler = handle_no_const_attribute,
86371 +#if BUILDING_GCC_VERSION >= 4007
86372 + .affects_type_identity = true
86373 +#endif
86374 +};
86375 +
86376 +static struct attribute_spec do_const_attr = {
86377 + .name = "do_const",
86378 + .min_length = 0,
86379 + .max_length = 0,
86380 + .decl_required = false,
86381 + .type_required = false,
86382 + .function_type_required = false,
86383 + .handler = handle_do_const_attribute,
86384 +#if BUILDING_GCC_VERSION >= 4007
86385 + .affects_type_identity = true
86386 +#endif
86387 +};
86388 +
86389 +static void register_attributes(void *event_data, void *data)
86390 +{
86391 + register_attribute(&no_const_attr);
86392 + register_attribute(&do_const_attr);
86393 +}
86394 +
86395 +static void constify_type(tree type)
86396 +{
86397 + TYPE_READONLY(type) = 1;
86398 + C_TYPE_FIELDS_READONLY(type) = 1;
86399 +}
86400 +
86401 +static bool is_fptr(tree field)
86402 +{
86403 + tree ptr = TREE_TYPE(field);
86404 +
86405 + if (TREE_CODE(ptr) != POINTER_TYPE)
86406 + return false;
86407 +
86408 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
86409 +}
86410 +
86411 +static bool walk_struct(tree node)
86412 +{
86413 + tree field;
86414 +
86415 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
86416 + return false;
86417 +
86418 + if (TYPE_FIELDS(node) == NULL_TREE)
86419 + return false;
86420 +
86421 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
86422 + tree type = TREE_TYPE(field);
86423 + enum tree_code code = TREE_CODE(type);
86424 + if (code == RECORD_TYPE || code == UNION_TYPE) {
86425 + if (!(walk_struct(type)))
86426 + return false;
86427 + } else if (!is_fptr(field) && !TREE_READONLY(field))
86428 + return false;
86429 + }
86430 + return true;
86431 +}
86432 +
86433 +static void finish_type(void *event_data, void *data)
86434 +{
86435 + tree type = (tree)event_data;
86436 +
86437 + if (type == NULL_TREE)
86438 + return;
86439 +
86440 + if (TYPE_READONLY(type))
86441 + return;
86442 +
86443 + if (walk_struct(type))
86444 + constify_type(type);
86445 +}
86446 +
86447 +static unsigned int check_local_variables(void);
86448 +
86449 +struct gimple_opt_pass pass_local_variable = {
86450 + {
86451 + .type = GIMPLE_PASS,
86452 + .name = "check_local_variables",
86453 + .gate = NULL,
86454 + .execute = check_local_variables,
86455 + .sub = NULL,
86456 + .next = NULL,
86457 + .static_pass_number = 0,
86458 + .tv_id = TV_NONE,
86459 + .properties_required = 0,
86460 + .properties_provided = 0,
86461 + .properties_destroyed = 0,
86462 + .todo_flags_start = 0,
86463 + .todo_flags_finish = 0
86464 + }
86465 +};
86466 +
86467 +static unsigned int check_local_variables(void)
86468 +{
86469 + tree var;
86470 + referenced_var_iterator rvi;
86471 +
86472 +#if BUILDING_GCC_VERSION == 4005
86473 + FOR_EACH_REFERENCED_VAR(var, rvi) {
86474 +#else
86475 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
86476 +#endif
86477 + tree type = TREE_TYPE(var);
86478 +
86479 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
86480 + continue;
86481 +
86482 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
86483 + continue;
86484 +
86485 + if (!TYPE_READONLY(type))
86486 + continue;
86487 +
86488 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
86489 +// continue;
86490 +
86491 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
86492 +// continue;
86493 +
86494 + if (walk_struct(type)) {
86495 + error("constified variable %qE cannot be local", var);
86496 + return 1;
86497 + }
86498 + }
86499 + return 0;
86500 +}
86501 +
86502 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86503 +{
86504 + const char * const plugin_name = plugin_info->base_name;
86505 + const int argc = plugin_info->argc;
86506 + const struct plugin_argument * const argv = plugin_info->argv;
86507 + int i;
86508 + bool constify = true;
86509 +
86510 + struct register_pass_info local_variable_pass_info = {
86511 + .pass = &pass_local_variable.pass,
86512 + .reference_pass_name = "*referenced_vars",
86513 + .ref_pass_instance_number = 0,
86514 + .pos_op = PASS_POS_INSERT_AFTER
86515 + };
86516 +
86517 + if (!plugin_default_version_check(version, &gcc_version)) {
86518 + error(G_("incompatible gcc/plugin versions"));
86519 + return 1;
86520 + }
86521 +
86522 + for (i = 0; i < argc; ++i) {
86523 + if (!(strcmp(argv[i].key, "no-constify"))) {
86524 + constify = false;
86525 + continue;
86526 + }
86527 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86528 + }
86529 +
86530 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
86531 + if (constify) {
86532 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
86533 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
86534 + }
86535 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
86536 +
86537 + return 0;
86538 +}
86539 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
86540 new file mode 100644
86541 index 0000000..a5eabce
86542 --- /dev/null
86543 +++ b/tools/gcc/kallocstat_plugin.c
86544 @@ -0,0 +1,167 @@
86545 +/*
86546 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
86547 + * Licensed under the GPL v2
86548 + *
86549 + * Note: the choice of the license means that the compilation process is
86550 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86551 + * but for the kernel it doesn't matter since it doesn't link against
86552 + * any of the gcc libraries
86553 + *
86554 + * gcc plugin to find the distribution of k*alloc sizes
86555 + *
86556 + * TODO:
86557 + *
86558 + * BUGS:
86559 + * - none known
86560 + */
86561 +#include "gcc-plugin.h"
86562 +#include "config.h"
86563 +#include "system.h"
86564 +#include "coretypes.h"
86565 +#include "tree.h"
86566 +#include "tree-pass.h"
86567 +#include "flags.h"
86568 +#include "intl.h"
86569 +#include "toplev.h"
86570 +#include "plugin.h"
86571 +//#include "expr.h" where are you...
86572 +#include "diagnostic.h"
86573 +#include "plugin-version.h"
86574 +#include "tm.h"
86575 +#include "function.h"
86576 +#include "basic-block.h"
86577 +#include "gimple.h"
86578 +#include "rtl.h"
86579 +#include "emit-rtl.h"
86580 +
86581 +extern void print_gimple_stmt(FILE *, gimple, int, int);
86582 +
86583 +int plugin_is_GPL_compatible;
86584 +
86585 +static const char * const kalloc_functions[] = {
86586 + "__kmalloc",
86587 + "kmalloc",
86588 + "kmalloc_large",
86589 + "kmalloc_node",
86590 + "kmalloc_order",
86591 + "kmalloc_order_trace",
86592 + "kmalloc_slab",
86593 + "kzalloc",
86594 + "kzalloc_node",
86595 +};
86596 +
86597 +static struct plugin_info kallocstat_plugin_info = {
86598 + .version = "201111150100",
86599 +};
86600 +
86601 +static unsigned int execute_kallocstat(void);
86602 +
86603 +static struct gimple_opt_pass kallocstat_pass = {
86604 + .pass = {
86605 + .type = GIMPLE_PASS,
86606 + .name = "kallocstat",
86607 + .gate = NULL,
86608 + .execute = execute_kallocstat,
86609 + .sub = NULL,
86610 + .next = NULL,
86611 + .static_pass_number = 0,
86612 + .tv_id = TV_NONE,
86613 + .properties_required = 0,
86614 + .properties_provided = 0,
86615 + .properties_destroyed = 0,
86616 + .todo_flags_start = 0,
86617 + .todo_flags_finish = 0
86618 + }
86619 +};
86620 +
86621 +static bool is_kalloc(const char *fnname)
86622 +{
86623 + size_t i;
86624 +
86625 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
86626 + if (!strcmp(fnname, kalloc_functions[i]))
86627 + return true;
86628 + return false;
86629 +}
86630 +
86631 +static unsigned int execute_kallocstat(void)
86632 +{
86633 + basic_block bb;
86634 +
86635 + // 1. loop through BBs and GIMPLE statements
86636 + FOR_EACH_BB(bb) {
86637 + gimple_stmt_iterator gsi;
86638 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
86639 + // gimple match:
86640 + tree fndecl, size;
86641 + gimple call_stmt;
86642 + const char *fnname;
86643 +
86644 + // is it a call
86645 + call_stmt = gsi_stmt(gsi);
86646 + if (!is_gimple_call(call_stmt))
86647 + continue;
86648 + fndecl = gimple_call_fndecl(call_stmt);
86649 + if (fndecl == NULL_TREE)
86650 + continue;
86651 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
86652 + continue;
86653 +
86654 + // is it a call to k*alloc
86655 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
86656 + if (!is_kalloc(fnname))
86657 + continue;
86658 +
86659 + // is the size arg the result of a simple const assignment
86660 + size = gimple_call_arg(call_stmt, 0);
86661 + while (true) {
86662 + gimple def_stmt;
86663 + expanded_location xloc;
86664 + size_t size_val;
86665 +
86666 + if (TREE_CODE(size) != SSA_NAME)
86667 + break;
86668 + def_stmt = SSA_NAME_DEF_STMT(size);
86669 + if (!def_stmt || !is_gimple_assign(def_stmt))
86670 + break;
86671 + if (gimple_num_ops(def_stmt) != 2)
86672 + break;
86673 + size = gimple_assign_rhs1(def_stmt);
86674 + if (!TREE_CONSTANT(size))
86675 + continue;
86676 + xloc = expand_location(gimple_location(def_stmt));
86677 + if (!xloc.file)
86678 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
86679 + size_val = TREE_INT_CST_LOW(size);
86680 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
86681 + break;
86682 + }
86683 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
86684 +//debug_tree(gimple_call_fn(call_stmt));
86685 +//print_node(stderr, "pax", fndecl, 4);
86686 + }
86687 + }
86688 +
86689 + return 0;
86690 +}
86691 +
86692 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86693 +{
86694 + const char * const plugin_name = plugin_info->base_name;
86695 + struct register_pass_info kallocstat_pass_info = {
86696 + .pass = &kallocstat_pass.pass,
86697 + .reference_pass_name = "ssa",
86698 + .ref_pass_instance_number = 0,
86699 + .pos_op = PASS_POS_INSERT_AFTER
86700 + };
86701 +
86702 + if (!plugin_default_version_check(version, &gcc_version)) {
86703 + error(G_("incompatible gcc/plugin versions"));
86704 + return 1;
86705 + }
86706 +
86707 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
86708 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
86709 +
86710 + return 0;
86711 +}
86712 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
86713 new file mode 100644
86714 index 0000000..51f747e
86715 --- /dev/null
86716 +++ b/tools/gcc/kernexec_plugin.c
86717 @@ -0,0 +1,348 @@
86718 +/*
86719 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
86720 + * Licensed under the GPL v2
86721 + *
86722 + * Note: the choice of the license means that the compilation process is
86723 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86724 + * but for the kernel it doesn't matter since it doesn't link against
86725 + * any of the gcc libraries
86726 + *
86727 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
86728 + *
86729 + * TODO:
86730 + *
86731 + * BUGS:
86732 + * - none known
86733 + */
86734 +#include "gcc-plugin.h"
86735 +#include "config.h"
86736 +#include "system.h"
86737 +#include "coretypes.h"
86738 +#include "tree.h"
86739 +#include "tree-pass.h"
86740 +#include "flags.h"
86741 +#include "intl.h"
86742 +#include "toplev.h"
86743 +#include "plugin.h"
86744 +//#include "expr.h" where are you...
86745 +#include "diagnostic.h"
86746 +#include "plugin-version.h"
86747 +#include "tm.h"
86748 +#include "function.h"
86749 +#include "basic-block.h"
86750 +#include "gimple.h"
86751 +#include "rtl.h"
86752 +#include "emit-rtl.h"
86753 +#include "tree-flow.h"
86754 +
86755 +extern void print_gimple_stmt(FILE *, gimple, int, int);
86756 +extern rtx emit_move_insn(rtx x, rtx y);
86757 +
86758 +int plugin_is_GPL_compatible;
86759 +
86760 +static struct plugin_info kernexec_plugin_info = {
86761 + .version = "201111291120",
86762 + .help = "method=[bts|or]\tinstrumentation method\n"
86763 +};
86764 +
86765 +static unsigned int execute_kernexec_fptr(void);
86766 +static unsigned int execute_kernexec_retaddr(void);
86767 +static bool kernexec_cmodel_check(void);
86768 +
86769 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator);
86770 +static void (*kernexec_instrument_retaddr)(rtx);
86771 +
86772 +static struct gimple_opt_pass kernexec_fptr_pass = {
86773 + .pass = {
86774 + .type = GIMPLE_PASS,
86775 + .name = "kernexec_fptr",
86776 + .gate = kernexec_cmodel_check,
86777 + .execute = execute_kernexec_fptr,
86778 + .sub = NULL,
86779 + .next = NULL,
86780 + .static_pass_number = 0,
86781 + .tv_id = TV_NONE,
86782 + .properties_required = 0,
86783 + .properties_provided = 0,
86784 + .properties_destroyed = 0,
86785 + .todo_flags_start = 0,
86786 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
86787 + }
86788 +};
86789 +
86790 +static struct rtl_opt_pass kernexec_retaddr_pass = {
86791 + .pass = {
86792 + .type = RTL_PASS,
86793 + .name = "kernexec_retaddr",
86794 + .gate = kernexec_cmodel_check,
86795 + .execute = execute_kernexec_retaddr,
86796 + .sub = NULL,
86797 + .next = NULL,
86798 + .static_pass_number = 0,
86799 + .tv_id = TV_NONE,
86800 + .properties_required = 0,
86801 + .properties_provided = 0,
86802 + .properties_destroyed = 0,
86803 + .todo_flags_start = 0,
86804 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
86805 + }
86806 +};
86807 +
86808 +static bool kernexec_cmodel_check(void)
86809 +{
86810 + tree section;
86811 +
86812 + if (ix86_cmodel != CM_KERNEL)
86813 + return false;
86814 +
86815 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
86816 + if (!section || !TREE_VALUE(section))
86817 + return true;
86818 +
86819 + section = TREE_VALUE(TREE_VALUE(section));
86820 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
86821 + return true;
86822 +
86823 + return false;
86824 +}
86825 +
86826 +/*
86827 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
86828 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
86829 + */
86830 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator gsi)
86831 +{
86832 + gimple assign_intptr, assign_new_fptr, call_stmt;
86833 + tree intptr, old_fptr, new_fptr, kernexec_mask;
86834 +
86835 + call_stmt = gsi_stmt(gsi);
86836 + old_fptr = gimple_call_fn(call_stmt);
86837 +
86838 + // create temporary unsigned long variable used for bitops and cast fptr to it
86839 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
86840 + add_referenced_var(intptr);
86841 + mark_sym_for_renaming(intptr);
86842 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
86843 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
86844 + update_stmt(assign_intptr);
86845 +
86846 + // apply logical or to temporary unsigned long and bitmask
86847 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
86848 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
86849 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
86850 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
86851 + update_stmt(assign_intptr);
86852 +
86853 + // cast temporary unsigned long back to a temporary fptr variable
86854 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
86855 + add_referenced_var(new_fptr);
86856 + mark_sym_for_renaming(new_fptr);
86857 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
86858 + gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
86859 + update_stmt(assign_new_fptr);
86860 +
86861 + // replace call stmt fn with the new fptr
86862 + gimple_call_set_fn(call_stmt, new_fptr);
86863 + update_stmt(call_stmt);
86864 +}
86865 +
86866 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator gsi)
86867 +{
86868 + gimple asm_or_stmt, call_stmt;
86869 + tree old_fptr, new_fptr, input, output;
86870 + VEC(tree, gc) *inputs = NULL;
86871 + VEC(tree, gc) *outputs = NULL;
86872 +
86873 + call_stmt = gsi_stmt(gsi);
86874 + old_fptr = gimple_call_fn(call_stmt);
86875 +
86876 + // create temporary fptr variable
86877 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
86878 + add_referenced_var(new_fptr);
86879 + mark_sym_for_renaming(new_fptr);
86880 +
86881 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
86882 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
86883 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
86884 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
86885 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
86886 + VEC_safe_push(tree, gc, inputs, input);
86887 + VEC_safe_push(tree, gc, outputs, output);
86888 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
86889 + gimple_asm_set_volatile(asm_or_stmt, true);
86890 + gsi_insert_before(&gsi, asm_or_stmt, GSI_SAME_STMT);
86891 + update_stmt(asm_or_stmt);
86892 +
86893 + // replace call stmt fn with the new fptr
86894 + gimple_call_set_fn(call_stmt, new_fptr);
86895 + update_stmt(call_stmt);
86896 +}
86897 +
86898 +/*
86899 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
86900 + */
86901 +static unsigned int execute_kernexec_fptr(void)
86902 +{
86903 + basic_block bb;
86904 + gimple_stmt_iterator gsi;
86905 +
86906 + // 1. loop through BBs and GIMPLE statements
86907 + FOR_EACH_BB(bb) {
86908 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
86909 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
86910 + tree fn;
86911 + gimple call_stmt;
86912 +
86913 + // is it a call ...
86914 + call_stmt = gsi_stmt(gsi);
86915 + if (!is_gimple_call(call_stmt))
86916 + continue;
86917 + fn = gimple_call_fn(call_stmt);
86918 + if (TREE_CODE(fn) == ADDR_EXPR)
86919 + continue;
86920 + if (TREE_CODE(fn) != SSA_NAME)
86921 + gcc_unreachable();
86922 +
86923 + // ... through a function pointer
86924 + fn = SSA_NAME_VAR(fn);
86925 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
86926 + continue;
86927 + fn = TREE_TYPE(fn);
86928 + if (TREE_CODE(fn) != POINTER_TYPE)
86929 + continue;
86930 + fn = TREE_TYPE(fn);
86931 + if (TREE_CODE(fn) != FUNCTION_TYPE)
86932 + continue;
86933 +
86934 + kernexec_instrument_fptr(gsi);
86935 +
86936 +//debug_tree(gimple_call_fn(call_stmt));
86937 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
86938 + }
86939 + }
86940 +
86941 + return 0;
86942 +}
86943 +
86944 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
86945 +static void kernexec_instrument_retaddr_bts(rtx insn)
86946 +{
86947 + rtx btsq;
86948 + rtvec argvec, constraintvec, labelvec;
86949 + int line;
86950 +
86951 + // create asm volatile("btsq $63,(%%rsp)":::)
86952 + argvec = rtvec_alloc(0);
86953 + constraintvec = rtvec_alloc(0);
86954 + labelvec = rtvec_alloc(0);
86955 + line = expand_location(RTL_LOCATION(insn)).line;
86956 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
86957 + MEM_VOLATILE_P(btsq) = 1;
86958 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
86959 + emit_insn_before(btsq, insn);
86960 +}
86961 +
86962 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
86963 +static void kernexec_instrument_retaddr_or(rtx insn)
86964 +{
86965 + rtx orq;
86966 + rtvec argvec, constraintvec, labelvec;
86967 + int line;
86968 +
86969 + // create asm volatile("orq %%r10,(%%rsp)":::)
86970 + argvec = rtvec_alloc(0);
86971 + constraintvec = rtvec_alloc(0);
86972 + labelvec = rtvec_alloc(0);
86973 + line = expand_location(RTL_LOCATION(insn)).line;
86974 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
86975 + MEM_VOLATILE_P(orq) = 1;
86976 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
86977 + emit_insn_before(orq, insn);
86978 +}
86979 +
86980 +/*
86981 + * find all asm level function returns and forcibly set the highest bit of the return address
86982 + */
86983 +static unsigned int execute_kernexec_retaddr(void)
86984 +{
86985 + rtx insn;
86986 +
86987 + // 1. find function returns
86988 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
86989 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
86990 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
86991 + rtx body;
86992 +
86993 + // is it a retn
86994 + if (!JUMP_P(insn))
86995 + continue;
86996 + body = PATTERN(insn);
86997 + if (GET_CODE(body) == PARALLEL)
86998 + body = XVECEXP(body, 0, 0);
86999 + if (GET_CODE(body) != RETURN)
87000 + continue;
87001 + kernexec_instrument_retaddr(insn);
87002 + }
87003 +
87004 +// print_simple_rtl(stderr, get_insns());
87005 +// print_rtl(stderr, get_insns());
87006 +
87007 + return 0;
87008 +}
87009 +
87010 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87011 +{
87012 + const char * const plugin_name = plugin_info->base_name;
87013 + const int argc = plugin_info->argc;
87014 + const struct plugin_argument * const argv = plugin_info->argv;
87015 + int i;
87016 + struct register_pass_info kernexec_fptr_pass_info = {
87017 + .pass = &kernexec_fptr_pass.pass,
87018 + .reference_pass_name = "ssa",
87019 + .ref_pass_instance_number = 0,
87020 + .pos_op = PASS_POS_INSERT_AFTER
87021 + };
87022 + struct register_pass_info kernexec_retaddr_pass_info = {
87023 + .pass = &kernexec_retaddr_pass.pass,
87024 + .reference_pass_name = "pro_and_epilogue",
87025 + .ref_pass_instance_number = 0,
87026 + .pos_op = PASS_POS_INSERT_AFTER
87027 + };
87028 +
87029 + if (!plugin_default_version_check(version, &gcc_version)) {
87030 + error(G_("incompatible gcc/plugin versions"));
87031 + return 1;
87032 + }
87033 +
87034 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
87035 +
87036 + if (TARGET_64BIT == 0)
87037 + return 0;
87038 +
87039 + for (i = 0; i < argc; ++i) {
87040 + if (!strcmp(argv[i].key, "method")) {
87041 + if (!argv[i].value) {
87042 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87043 + continue;
87044 + }
87045 + if (!strcmp(argv[i].value, "bts")) {
87046 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
87047 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
87048 + } else if (!strcmp(argv[i].value, "or")) {
87049 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
87050 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
87051 + fix_register("r10", 1, 1);
87052 + } else
87053 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
87054 + continue;
87055 + }
87056 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87057 + }
87058 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
87059 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
87060 +
87061 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
87062 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
87063 +
87064 + return 0;
87065 +}
87066 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
87067 new file mode 100644
87068 index 0000000..d44f37c
87069 --- /dev/null
87070 +++ b/tools/gcc/stackleak_plugin.c
87071 @@ -0,0 +1,291 @@
87072 +/*
87073 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
87074 + * Licensed under the GPL v2
87075 + *
87076 + * Note: the choice of the license means that the compilation process is
87077 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
87078 + * but for the kernel it doesn't matter since it doesn't link against
87079 + * any of the gcc libraries
87080 + *
87081 + * gcc plugin to help implement various PaX features
87082 + *
87083 + * - track lowest stack pointer
87084 + *
87085 + * TODO:
87086 + * - initialize all local variables
87087 + *
87088 + * BUGS:
87089 + * - none known
87090 + */
87091 +#include "gcc-plugin.h"
87092 +#include "config.h"
87093 +#include "system.h"
87094 +#include "coretypes.h"
87095 +#include "tree.h"
87096 +#include "tree-pass.h"
87097 +#include "flags.h"
87098 +#include "intl.h"
87099 +#include "toplev.h"
87100 +#include "plugin.h"
87101 +//#include "expr.h" where are you...
87102 +#include "diagnostic.h"
87103 +#include "plugin-version.h"
87104 +#include "tm.h"
87105 +#include "function.h"
87106 +#include "basic-block.h"
87107 +#include "gimple.h"
87108 +#include "rtl.h"
87109 +#include "emit-rtl.h"
87110 +
87111 +extern void print_gimple_stmt(FILE *, gimple, int, int);
87112 +
87113 +int plugin_is_GPL_compatible;
87114 +
87115 +static int track_frame_size = -1;
87116 +static const char track_function[] = "pax_track_stack";
87117 +static const char check_function[] = "pax_check_alloca";
87118 +static bool init_locals;
87119 +
87120 +static struct plugin_info stackleak_plugin_info = {
87121 + .version = "201111150100",
87122 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
87123 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
87124 +};
87125 +
87126 +static bool gate_stackleak_track_stack(void);
87127 +static unsigned int execute_stackleak_tree_instrument(void);
87128 +static unsigned int execute_stackleak_final(void);
87129 +
87130 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
87131 + .pass = {
87132 + .type = GIMPLE_PASS,
87133 + .name = "stackleak_tree_instrument",
87134 + .gate = gate_stackleak_track_stack,
87135 + .execute = execute_stackleak_tree_instrument,
87136 + .sub = NULL,
87137 + .next = NULL,
87138 + .static_pass_number = 0,
87139 + .tv_id = TV_NONE,
87140 + .properties_required = PROP_gimple_leh | PROP_cfg,
87141 + .properties_provided = 0,
87142 + .properties_destroyed = 0,
87143 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
87144 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
87145 + }
87146 +};
87147 +
87148 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
87149 + .pass = {
87150 + .type = RTL_PASS,
87151 + .name = "stackleak_final",
87152 + .gate = gate_stackleak_track_stack,
87153 + .execute = execute_stackleak_final,
87154 + .sub = NULL,
87155 + .next = NULL,
87156 + .static_pass_number = 0,
87157 + .tv_id = TV_NONE,
87158 + .properties_required = 0,
87159 + .properties_provided = 0,
87160 + .properties_destroyed = 0,
87161 + .todo_flags_start = 0,
87162 + .todo_flags_finish = TODO_dump_func
87163 + }
87164 +};
87165 +
87166 +static bool gate_stackleak_track_stack(void)
87167 +{
87168 + return track_frame_size >= 0;
87169 +}
87170 +
87171 +static void stackleak_check_alloca(gimple_stmt_iterator gsi)
87172 +{
87173 + gimple check_alloca;
87174 + tree fndecl, fntype, alloca_size;
87175 +
87176 + // insert call to void pax_check_alloca(unsigned long size)
87177 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
87178 + fndecl = build_fn_decl(check_function, fntype);
87179 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
87180 + alloca_size = gimple_call_arg(gsi_stmt(gsi), 0);
87181 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
87182 + gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING);
87183 +}
87184 +
87185 +static void stackleak_add_instrumentation(gimple_stmt_iterator gsi)
87186 +{
87187 + gimple track_stack;
87188 + tree fndecl, fntype;
87189 +
87190 + // insert call to void pax_track_stack(void)
87191 + fntype = build_function_type_list(void_type_node, NULL_TREE);
87192 + fndecl = build_fn_decl(track_function, fntype);
87193 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
87194 + track_stack = gimple_build_call(fndecl, 0);
87195 + gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING);
87196 +}
87197 +
87198 +#if BUILDING_GCC_VERSION == 4005
87199 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
87200 +{
87201 + tree fndecl;
87202 +
87203 + if (!is_gimple_call(stmt))
87204 + return false;
87205 + fndecl = gimple_call_fndecl(stmt);
87206 + if (!fndecl)
87207 + return false;
87208 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
87209 + return false;
87210 +// print_node(stderr, "pax", fndecl, 4);
87211 + return DECL_FUNCTION_CODE(fndecl) == code;
87212 +}
87213 +#endif
87214 +
87215 +static bool is_alloca(gimple stmt)
87216 +{
87217 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
87218 + return true;
87219 +
87220 +#if BUILDING_GCC_VERSION >= 4007
87221 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
87222 + return true;
87223 +#endif
87224 +
87225 + return false;
87226 +}
87227 +
87228 +static unsigned int execute_stackleak_tree_instrument(void)
87229 +{
87230 + basic_block bb, entry_bb;
87231 + bool prologue_instrumented = false;
87232 +
87233 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
87234 +
87235 + // 1. loop through BBs and GIMPLE statements
87236 + FOR_EACH_BB(bb) {
87237 + gimple_stmt_iterator gsi;
87238 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
87239 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
87240 + if (!is_alloca(gsi_stmt(gsi)))
87241 + continue;
87242 +
87243 + // 2. insert stack overflow check before each __builtin_alloca call
87244 + stackleak_check_alloca(gsi);
87245 +
87246 + // 3. insert track call after each __builtin_alloca call
87247 + stackleak_add_instrumentation(gsi);
87248 + if (bb == entry_bb)
87249 + prologue_instrumented = true;
87250 + }
87251 + }
87252 +
87253 + // 4. insert track call at the beginning
87254 + if (!prologue_instrumented) {
87255 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
87256 + if (dom_info_available_p(CDI_DOMINATORS))
87257 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
87258 + stackleak_add_instrumentation(gsi_start_bb(bb));
87259 + }
87260 +
87261 + return 0;
87262 +}
87263 +
87264 +static unsigned int execute_stackleak_final(void)
87265 +{
87266 + rtx insn;
87267 +
87268 + if (cfun->calls_alloca)
87269 + return 0;
87270 +
87271 + // keep calls only if function frame is big enough
87272 + if (get_frame_size() >= track_frame_size)
87273 + return 0;
87274 +
87275 + // 1. find pax_track_stack calls
87276 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
87277 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
87278 + rtx body;
87279 +
87280 + if (!CALL_P(insn))
87281 + continue;
87282 + body = PATTERN(insn);
87283 + if (GET_CODE(body) != CALL)
87284 + continue;
87285 + body = XEXP(body, 0);
87286 + if (GET_CODE(body) != MEM)
87287 + continue;
87288 + body = XEXP(body, 0);
87289 + if (GET_CODE(body) != SYMBOL_REF)
87290 + continue;
87291 + if (strcmp(XSTR(body, 0), track_function))
87292 + continue;
87293 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
87294 + // 2. delete call
87295 + insn = delete_insn_and_edges(insn);
87296 +#if BUILDING_GCC_VERSION >= 4007
87297 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
87298 + insn = delete_insn_and_edges(insn);
87299 +#endif
87300 + }
87301 +
87302 +// print_simple_rtl(stderr, get_insns());
87303 +// print_rtl(stderr, get_insns());
87304 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
87305 +
87306 + return 0;
87307 +}
87308 +
87309 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
87310 +{
87311 + const char * const plugin_name = plugin_info->base_name;
87312 + const int argc = plugin_info->argc;
87313 + const struct plugin_argument * const argv = plugin_info->argv;
87314 + int i;
87315 + struct register_pass_info stackleak_tree_instrument_pass_info = {
87316 + .pass = &stackleak_tree_instrument_pass.pass,
87317 +// .reference_pass_name = "tree_profile",
87318 + .reference_pass_name = "optimized",
87319 + .ref_pass_instance_number = 0,
87320 + .pos_op = PASS_POS_INSERT_AFTER
87321 + };
87322 + struct register_pass_info stackleak_final_pass_info = {
87323 + .pass = &stackleak_final_rtl_opt_pass.pass,
87324 + .reference_pass_name = "final",
87325 + .ref_pass_instance_number = 0,
87326 + .pos_op = PASS_POS_INSERT_BEFORE
87327 + };
87328 +
87329 + if (!plugin_default_version_check(version, &gcc_version)) {
87330 + error(G_("incompatible gcc/plugin versions"));
87331 + return 1;
87332 + }
87333 +
87334 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
87335 +
87336 + for (i = 0; i < argc; ++i) {
87337 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
87338 + if (!argv[i].value) {
87339 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87340 + continue;
87341 + }
87342 + track_frame_size = atoi(argv[i].value);
87343 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
87344 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
87345 + continue;
87346 + }
87347 + if (!strcmp(argv[i].key, "initialize-locals")) {
87348 + if (argv[i].value) {
87349 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
87350 + continue;
87351 + }
87352 + init_locals = true;
87353 + continue;
87354 + }
87355 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
87356 + }
87357 +
87358 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
87359 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
87360 +
87361 + return 0;
87362 +}
87363 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
87364 index 83b3dde..835bee7 100644
87365 --- a/usr/gen_init_cpio.c
87366 +++ b/usr/gen_init_cpio.c
87367 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, const char *location,
87368 int retval;
87369 int rc = -1;
87370 int namesize;
87371 - int i;
87372 + unsigned int i;
87373
87374 mode |= S_IFREG;
87375
87376 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_location)
87377 *env_var = *expanded = '\0';
87378 strncat(env_var, start + 2, end - start - 2);
87379 strncat(expanded, new_location, start - new_location);
87380 - strncat(expanded, getenv(env_var), PATH_MAX);
87381 - strncat(expanded, end + 1, PATH_MAX);
87382 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
87383 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
87384 strncpy(new_location, expanded, PATH_MAX);
87385 + new_location[PATH_MAX] = 0;
87386 } else
87387 break;
87388 }
87389 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
87390 index 4f3434f..159bc3e 100644
87391 --- a/virt/kvm/kvm_main.c
87392 +++ b/virt/kvm/kvm_main.c
87393 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
87394 if (kvm_rebooting)
87395 /* spin while reset goes on */
87396 while (true)
87397 - ;
87398 + cpu_relax();
87399 /* Fault while not rebooting. We want the trace. */
87400 BUG();
87401 }
87402 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
87403 kvm_arch_vcpu_put(vcpu);
87404 }
87405
87406 -int kvm_init(void *opaque, unsigned int vcpu_size,
87407 +int kvm_init(const void *opaque, unsigned int vcpu_size,
87408 struct module *module)
87409 {
87410 int r;
87411 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
87412 /* A kmem cache lets us meet the alignment requirements of fx_save. */
87413 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
87414 __alignof__(struct kvm_vcpu),
87415 - 0, NULL);
87416 + SLAB_USERCOPY, NULL);
87417 if (!kvm_vcpu_cache) {
87418 r = -ENOMEM;
87419 goto out_free_5;
87420 }
87421
87422 - kvm_chardev_ops.owner = module;
87423 - kvm_vm_fops.owner = module;
87424 - kvm_vcpu_fops.owner = module;
87425 + pax_open_kernel();
87426 + *(void **)&kvm_chardev_ops.owner = module;
87427 + *(void **)&kvm_vm_fops.owner = module;
87428 + *(void **)&kvm_vcpu_fops.owner = module;
87429 + pax_close_kernel();
87430
87431 r = misc_register(&kvm_dev);
87432 if (r) {